Kinetica C# API  Version 7.1.10.0
 All Classes Namespaces Files Functions Variables Enumerations Enumerator Properties Pages
InsertRecordsFromFiles.cs
Go to the documentation of this file.
1 /*
2  * This file was autogenerated by the Kinetica schema processor.
3  *
4  * DO NOT EDIT DIRECTLY.
5  */
6 
7 using System.Collections.Generic;
8 
9 
10 
11 namespace kinetica
12 {
13 
50  {
51 
285  public struct CreateTableOptions
286  {
287 
291  public const string TYPE_ID = "type_id";
292 
311  public const string NO_ERROR_IF_EXISTS = "no_error_if_exists";
312  public const string TRUE = "true";
313  public const string FALSE = "false";
314 
347  public const string IS_REPLICATED = "is_replicated";
348 
355  public const string FOREIGN_KEYS = "foreign_keys";
356 
360  public const string FOREIGN_SHARD_KEY = "foreign_shard_key";
361 
402  public const string PARTITION_TYPE = "partition_type";
403 
407  public const string RANGE = "RANGE";
408 
412  public const string INTERVAL = "INTERVAL";
413 
417  public const string LIST = "LIST";
418 
422  public const string HASH = "HASH";
423 
427  public const string SERIES = "SERIES";
428 
434  public const string PARTITION_KEYS = "partition_keys";
435 
450  public const string PARTITION_DEFINITIONS = "partition_definitions";
451 
471  public const string IS_AUTOMATIC_PARTITION = "is_automatic_partition";
472 
476  public const string TTL = "ttl";
477 
480  public const string CHUNK_SIZE = "chunk_size";
481 
506  public const string IS_RESULT_TABLE = "is_result_table";
507 
511  public const string STRATEGY_DEFINITION = "strategy_definition";
512  } // end struct CreateTableOptions
513 
514 
1294  public struct Options
1295  {
1296 
1303  public const string BAD_RECORD_TABLE_NAME = "bad_record_table_name";
1304 
1309  public const string BAD_RECORD_TABLE_LIMIT = "bad_record_table_limit";
1310 
1318  public const string BAD_RECORD_TABLE_LIMIT_PER_INPUT = "bad_record_table_limit_per_input";
1319 
1322  public const string BATCH_SIZE = "batch_size";
1323 
1340  public const string COLUMN_FORMATS = "column_formats";
1341 
1374  public const string COLUMNS_TO_LOAD = "columns_to_load";
1375 
1380  public const string COLUMNS_TO_SKIP = "columns_to_skip";
1381 
1408  public const string COMPRESSION_TYPE = "compression_type";
1409 
1411  public const string NONE = "none";
1412 
1414  public const string AUTO = "auto";
1415 
1417  public const string GZIP = "gzip";
1418 
1420  public const string BZIP2 = "bzip2";
1421 
1425  public const string DATASOURCE_NAME = "datasource_name";
1426 
1460  public const string DEFAULT_COLUMN_FORMATS = "default_column_formats";
1461 
1488  public const string ERROR_HANDLING = "error_handling";
1489 
1493  public const string PERMISSIVE = "permissive";
1494 
1496  public const string IGNORE_BAD_RECORDS = "ignore_bad_records";
1497 
1501  public const string ABORT = "abort";
1502 
1541  public const string FILE_TYPE = "file_type";
1542 
1544  public const string AVRO = "avro";
1545 
1548  public const string DELIMITED_TEXT = "delimited_text";
1549 
1551  public const string GDB = "gdb";
1552 
1554  public const string JSON = "json";
1555 
1557  public const string PARQUET = "parquet";
1558 
1560  public const string SHAPEFILE = "shapefile";
1561 
1564  public const string GDAL_CONFIGURATION_OPTIONS = "gdal_configuration_options";
1565 
1603  public const string IGNORE_EXISTING_PK = "ignore_existing_pk";
1604 
1607  public const string TRUE = "true";
1608 
1611  public const string FALSE = "false";
1612 
1641  public const string INGESTION_MODE = "ingestion_mode";
1642 
1645  public const string FULL = "full";
1646 
1650  public const string DRY_RUN = "dry_run";
1651 
1655  public const string TYPE_INFERENCE_ONLY = "type_inference_only";
1656 
1659  public const string KAFKA_CONSUMERS_PER_RANK = "kafka_consumers_per_rank";
1660 
1664  public const string KAFKA_GROUP_ID = "kafka_group_id";
1665 
1681  public const string KAFKA_OFFSET_RESET_POLICY = "kafka_offset_reset_policy";
1682  public const string EARLIEST = "earliest";
1683  public const string LATEST = "latest";
1684 
1701  public const string KAFKA_OPTIMISTIC_INGEST = "kafka_optimistic_ingest";
1702 
1705  public const string KAFKA_SUBSCRIPTION_CANCEL_AFTER = "kafka_subscription_cancel_after";
1706 
1709  public const string KAFKA_TYPE_INFERENCE_FETCH_TIMEOUT = "kafka_type_inference_fetch_timeout";
1710 
1712  public const string LAYER = "layer";
1713 
1771  public const string LOADING_MODE = "loading_mode";
1772 
1775  public const string HEAD = "head";
1776 
1788  public const string DISTRIBUTED_SHARED = "distributed_shared";
1789 
1812  public const string DISTRIBUTED_LOCAL = "distributed_local";
1813 
1816  public const string LOCAL_TIME_OFFSET = "local_time_offset";
1817 
1824  public const string MAX_RECORDS_TO_LOAD = "max_records_to_load";
1825 
1829  public const string NUM_TASKS_PER_RANK = "num_tasks_per_rank";
1830 
1837  public const string POLL_INTERVAL = "poll_interval";
1838 
1841  public const string PRIMARY_KEYS = "primary_keys";
1842 
1845  public const string SCHEMA_REGISTRY_SCHEMA_NAME = "schema_registry_schema_name";
1846 
1849  public const string SHARD_KEYS = "shard_keys";
1850 
1852  public const string SKIP_LINES = "skip_lines";
1853 
1869  public const string SUBSCRIBE = "subscribe";
1870 
1890  public const string TABLE_INSERT_MODE = "table_insert_mode";
1891 
1893  public const string SINGLE = "single";
1894 
1897  public const string TABLE_PER_FILE = "table_per_file";
1898 
1906  public const string TEXT_COMMENT_STRING = "text_comment_string";
1907 
1914  public const string TEXT_DELIMITER = "text_delimiter";
1915 
1934  public const string TEXT_ESCAPE_CHARACTER = "text_escape_character";
1935 
1953  public const string TEXT_HAS_HEADER = "text_has_header";
1954 
1963  public const string TEXT_HEADER_PROPERTY_DELIMITER = "text_header_property_delimiter";
1964 
1971  public const string TEXT_NULL_STRING = "text_null_string";
1972 
1986  public const string TEXT_QUOTE_CHARACTER = "text_quote_character";
1987 
1995  public const string TEXT_SEARCH_COLUMNS = "text_search_columns";
1996 
2000  public const string TEXT_SEARCH_MIN_COLUMN_LENGTH = "text_search_min_column_length";
2001 
2017  public const string TRUNCATE_STRINGS = "truncate_strings";
2018 
2034  public const string TRUNCATE_TABLE = "truncate_table";
2035 
2056  public const string TYPE_INFERENCE_MODE = "type_inference_mode";
2057 
2060  public const string ACCURACY = "accuracy";
2061 
2065  public const string SPEED = "speed";
2066 
2099  public const string UPDATE_ON_EXISTING_PK = "update_on_existing_pk";
2100  } // end struct Options
2101 
2102 
2113  public string table_name { get; set; }
2114 
2147  public IList<string> filepaths { get; set; } = new List<string>();
2148 
2151  public IDictionary<string, IDictionary<string, string>> modify_columns { get; set; } = new Dictionary<string, IDictionary<string, string>>();
2152 
2384  public IDictionary<string, string> create_table_options { get; set; } = new Dictionary<string, string>();
2385 
3163  public IDictionary<string, string> options { get; set; } = new Dictionary<string, string>();
3164 
3165 
3169 
4205  IList<string> filepaths,
4206  IDictionary<string, IDictionary<string, string>> modify_columns = null,
4207  IDictionary<string, string> create_table_options = null,
4208  IDictionary<string, string> options = null)
4209  {
4210  this.table_name = table_name ?? "";
4211  this.filepaths = filepaths ?? new List<string>();
4212  this.modify_columns = modify_columns ?? new Dictionary<string, IDictionary<string, string>>();
4213  this.create_table_options = create_table_options ?? new Dictionary<string, string>();
4214  this.options = options ?? new Dictionary<string, string>();
4215  } // end constructor
4216 
4217  } // end class InsertRecordsFromFilesRequest
4218 
4219 
4220 
4225  {
4226 
4229  public string table_name { get; set; }
4230 
4234  public string type_id { get; set; }
4235 
4238  public string type_definition { get; set; }
4239 
4242  public string type_label { get; set; }
4243 
4246  public IDictionary<string, IList<string>> type_properties { get; set; } = new Dictionary<string, IList<string>>();
4247 
4250  public long count_inserted { get; set; }
4251 
4254  public long count_skipped { get; set; }
4255 
4258  public long count_updated { get; set; }
4259 
4261  public IDictionary<string, string> info { get; set; } = new Dictionary<string, string>();
4262  public IList<string> files { get; set; } = new List<string>();
4263 
4264  } // end class InsertRecordsFromFilesResponse
4265 
4266 
4267 
4268 
4269 } // end namespace kinetica
const string TRUNCATE_TABLE
If set to true, truncates the table specified by table_name prior to loading the file(s).
const string AUTO
Auto detect compression type
const string BAD_RECORD_TABLE_LIMIT_PER_INPUT
For subscriptions, a positive integer indicating the maximum number of records that can be written to...
Options from /create/table, allowing the structure of the table to be defined independently of the da...
const string IGNORE_BAD_RECORDS
Malformed records are skipped.
const string PARTITION_KEYS
Comma-separated list of partition keys, which are the columns or column expressions by which records ...
const string TABLE_PER_FILE
Insert records from each file into a new table corresponding to that file.
const string TEXT_HEADER_PROPERTY_DELIMITER
Specifies the delimiter for column properties in the header row (if present).
const string FOREIGN_KEYS
Semicolon-separated list of foreign keys, of the format &#39;(source_column_name [, ...]) references target_table_name(primary_key_column_name [, ...]) [as foreign_key_name]&#39;.
const string TEXT_ESCAPE_CHARACTER
Specifies the character that is used to escape other characters in the source data.
const string BATCH_SIZE
Number of records to insert per batch when inserting data.
const string BAD_RECORD_TABLE_NAME
Name of a table to which records that were rejected are written.
const string GDAL_CONFIGURATION_OPTIONS
Comma separated list of gdal conf options, for the specific requets: key=value
const string DISTRIBUTED_SHARED
The head node coordinates loading data by worker processes across all nodes from shared files availab...
long count_updated
[Not yet implemented] Number of records updated within the target table.
const string IS_RESULT_TABLE
Indicates whether the table is a memory-only table.
InsertRecordsFromFilesRequest()
Constructs an InsertRecordsFromFilesRequest object with default parameters.
const string DRY_RUN
Does not load data, but walks through the source data and determines the number of valid records...
const string KAFKA_CONSUMERS_PER_RANK
Number of Kafka consumer threads per rank (valid range 1-6).
const string FOREIGN_SHARD_KEY
Foreign shard key of the format &#39;source_column references shard_by_column from target_table(primary_k...
const string TTL
Sets the TTL of the table specified in table_name.
const string KAFKA_SUBSCRIPTION_CANCEL_AFTER
Sets the Kafka subscription lifespan (in minutes).
const string TEXT_SEARCH_MIN_COLUMN_LENGTH
Set the minimum column size for strings to apply the &#39;text_search&#39; property to.
const string COLUMNS_TO_SKIP
Specifies a comma-delimited list of columns from the source data to skip.
const string COLUMN_FORMATS
For each target column specified, applies the column-property-bound format to the source data loaded ...
const string MAX_RECORDS_TO_LOAD
Limit the number of records to load in this request: if this number is larger than batch_size...
const string KAFKA_GROUP_ID
The group id to be used when consuming data from a Kafka topic (valid only for Kafka datasource subsc...
long count_skipped
Number of records skipped, when not running in abort error handling mode.
const string LAYER
Geo files layer(s) name(s): comma separated.
const string SPEED
Scans data and picks the widest possible column types so that &#39;all&#39; values will fit with minimum data...
IDictionary< string, IList< string > > type_properties
A mapping of each target table column name to an array of column properties associated with that colu...
long count_inserted
Number of records inserted into the target table.
const string TRUNCATE_STRINGS
If set to true, truncate string values that are longer than the column&#39;s type size.
const string FALSE
Reject new records when primary keys match existing records
const string TEXT_DELIMITER
Specifies the character delimiting field values in the source data and field names in the header (if ...
const string BAD_RECORD_TABLE_LIMIT
A positive integer indicating the maximum number of records that can be written to the bad-record-tab...
const string TYPE_INFERENCE_MODE
Optimize type inferencing for either speed or accuracy.
const string SINGLE
Insert all records into a single table.
const string SKIP_LINES
Skip number of lines from begining of file.
const string SCHEMA_REGISTRY_SCHEMA_NAME
Name of the Avro schema in the schema registry to use when reading Avro records.
const string POLL_INTERVAL
If true, the number of seconds between attempts to load external files into the table.
string type_definition
A JSON string describing the columns of the target table
const string TEXT_SEARCH_COLUMNS
Add &#39;text_search&#39; property to internally inferenced string columns.
const string COLUMNS_TO_LOAD
Specifies a comma-delimited list of columns from the source data to load.
const string IS_REPLICATED
Affects the distribution scheme for the table&#39;s data.
const string ACCURACY
Scans data to get exactly-typed &amp; sized columns for all data scanned.
const string FILE_TYPE
Specifies the type of the file(s) whose records will be inserted.
const string KAFKA_TYPE_INFERENCE_FETCH_TIMEOUT
Maximum time to collect Kafka messages before type inferencing on the set of them.
IDictionary< string, IDictionary< string, string > > modify_columns
Not implemented yet.
const string HEAD
The head node loads all data.
const string TEXT_QUOTE_CHARACTER
Specifies the character that should be interpreted as a field value quoting character in the source d...
const string TEXT_NULL_STRING
Specifies the character string that should be interpreted as a null value in the source data...
const string TEXT_COMMENT_STRING
Specifies the character string that should be interpreted as a comment line prefix in the source data...
const string NUM_TASKS_PER_RANK
Number of tasks for reading file per rank.
const string FULL
Run a type inference on the source data (if needed) and ingest
const string SUBSCRIBE
Continuously poll the data source to check for new data and load it into the table.
const string TYPE_INFERENCE_ONLY
Infer the type of the source data and return, without ingesting any data.
const string PARQUET
Apache Parquet file format
IList< string > filepaths
A list of file paths from which data will be sourced; For paths in KiFS, use the uri prefix of kifs...
const string KAFKA_OFFSET_RESET_POLICY
Policy to determine whether the Kafka data consumption starts either at earliest offset or latest off...
const string TRUE
Upsert new records when primary keys match existing records
A set of results returned by Kinetica.insertRecordsFromFiles(string,IList{string},IDictionary{string, IDictionary{string, string}},IDictionary{string, string},IDictionary{string, string}).
IDictionary< string, string > info
Additional information.
const string INGESTION_MODE
Whether to do a full load, dry run, or perform a type inference on the source data.
const string IGNORE_EXISTING_PK
Specifies the record collision error-suppression policy for inserting into a table with a primary key...
const string TYPE_ID
ID of a currently registered type.
const string UPDATE_ON_EXISTING_PK
Specifies the record collision policy for inserting into a table with a primary key.
const string DELIMITED_TEXT
Delimited text file format; e.g., CSV, TSV, PSV, etc.
const string PRIMARY_KEYS
Comma separated list of column names to set as primary keys, when not specified in the type...
string type_label
The user-defined description associated with the target table&#39;s structure
IDictionary< string, string > create_table_options
Options from /create/table, allowing the structure of the table to be defined independently of the da...
const string KAFKA_OPTIMISTIC_INGEST
Enable optimistic ingestion where Kafka topic offsets and table data are committed independently to a...
const string CHUNK_SIZE
Indicates the number of records per chunk to be used for this table.
IDictionary< string, string > options
Optional parameters.
string type_id
ID of the currently registered table structure type for the target table
const string PARTITION_DEFINITIONS
Comma-separated list of partition definitions, whose format depends on the choice of partition_type...
const string PERMISSIVE
Records with missing columns are populated with nulls if possible; otherwise, the malformed records a...
const string LOCAL_TIME_OFFSET
Apply an offset to Avro local timestamp columns.
KineticaData - class to help with Avro Encoding for Kinetica
Definition: KineticaData.cs:14
const string ERROR_HANDLING
Specifies how errors should be handled upon insertion.
const string LOADING_MODE
Scheme for distributing the extraction and loading of data from the source data file(s).
string table_name
Name of the table into which the data will be inserted, in [schema_name.
const string DATASOURCE_NAME
Name of an existing external data source from which data file(s) specified in filepaths will be loade...
A set of parameters for Kinetica.insertRecordsFromFiles(string,IList{string},IDictionary{string, IDictionary{string, string}},IDictionary{string, string},IDictionary{string, string}).
const string IS_AUTOMATIC_PARTITION
If true, a new partition will be created for values which don&#39;t fall into an existing partition...
const string STRATEGY_DEFINITION
The tier strategy for the table and its columns.
const string ABORT
Stops current insertion and aborts entire operation when an error is encountered. ...
const string NO_ERROR_IF_EXISTS
If true, prevents an error from occurring if the table already exists and is of the given type...
const string TABLE_INSERT_MODE
Insertion scheme to use when inserting records from multiple shapefiles.
const string DEFAULT_COLUMN_FORMATS
Specifies the default format to be applied to source data loaded into columns with the corresponding ...
const string SHARD_KEYS
Comma separated list of column names to set as shard keys, when not specified in the type...
const string DISTRIBUTED_LOCAL
A single worker process on each node loads all files that are available to it.
InsertRecordsFromFilesRequest(string table_name, IList< string > filepaths, IDictionary< string, IDictionary< string, string >> modify_columns=null, IDictionary< string, string > create_table_options=null, IDictionary< string, string > options=null)
Constructs an InsertRecordsFromFilesRequest object with the specified parameters. ...
const string TEXT_HAS_HEADER
Indicates whether the source data contains a header row.
const string COMPRESSION_TYPE
Source data compression type Supported values: NONE: No compression.