Kinetica   C#   API  Version 7.2.3.0
InsertRecordsFromFiles.cs
Go to the documentation of this file.
1 /*
2  * This file was autogenerated by the Kinetica schema processor.
3  *
4  * DO NOT EDIT DIRECTLY.
5  */
6 
7 using System.Collections.Generic;
8 
9 namespace kinetica
10 {
40  public class InsertRecordsFromFilesRequest : KineticaData
41  {
48  public struct CreateTableOptions
49  {
53  public const string TYPE_ID = "type_id";
54 
71  public const string NO_ERROR_IF_EXISTS = "no_error_if_exists";
72 
73  public const string TRUE = "true";
74  public const string FALSE = "false";
75 
93  public const string IS_REPLICATED = "is_replicated";
94 
101  public const string FOREIGN_KEYS = "foreign_keys";
102 
106  public const string FOREIGN_SHARD_KEY = "foreign_shard_key";
107 
149  public const string PARTITION_TYPE = "partition_type";
150 
154  public const string RANGE = "RANGE";
155 
159  public const string INTERVAL = "INTERVAL";
160 
164  public const string LIST = "LIST";
165 
169  public const string HASH = "HASH";
170 
174  public const string SERIES = "SERIES";
175 
181  public const string PARTITION_KEYS = "partition_keys";
182 
199  public const string PARTITION_DEFINITIONS = "partition_definitions";
200 
217  public const string IS_AUTOMATIC_PARTITION = "is_automatic_partition";
218 
222  public const string TTL = "ttl";
223 
226  public const string CHUNK_SIZE = "chunk_size";
227 
230  public const string CHUNK_COLUMN_MAX_MEMORY = "chunk_column_max_memory";
231 
234  public const string CHUNK_MAX_MEMORY = "chunk_max_memory";
235 
252  public const string IS_RESULT_TABLE = "is_result_table";
253 
257  public const string STRATEGY_DEFINITION = "strategy_definition";
258 
263  public const string COMPRESSION_CODEC = "compression_codec";
264  } // end struct CreateTableOptions
265 
269  public struct Options
270  {
279  public const string BAD_RECORD_TABLE_NAME = "bad_record_table_name";
280 
284  public const string BAD_RECORD_TABLE_LIMIT = "bad_record_table_limit";
285 
294  public const string BAD_RECORD_TABLE_LIMIT_PER_INPUT = "bad_record_table_limit_per_input";
295 
299  public const string BATCH_SIZE = "batch_size";
300 
315  public const string COLUMN_FORMATS = "column_formats";
316 
343  public const string COLUMNS_TO_LOAD = "columns_to_load";
344 
350  public const string COLUMNS_TO_SKIP = "columns_to_skip";
351 
374  public const string COMPRESSION_TYPE = "compression_type";
375 
377  public const string NONE = "none";
378 
380  public const string AUTO = "auto";
381 
383  public const string GZIP = "gzip";
384 
386  public const string BZIP2 = "bzip2";
387 
391  public const string DATASOURCE_NAME = "datasource_name";
392 
422  public const string DEFAULT_COLUMN_FORMATS = "default_column_formats";
423 
452  public const string ERROR_HANDLING = "error_handling";
453 
457  public const string PERMISSIVE = "permissive";
458 
460  public const string IGNORE_BAD_RECORDS = "ignore_bad_records";
461 
466  public const string ABORT = "abort";
467 
504  public const string FILE_TYPE = "file_type";
505 
507  public const string AVRO = "avro";
508 
511  public const string DELIMITED_TEXT = "delimited_text";
512 
514  public const string GDB = "gdb";
515 
517  public const string JSON = "json";
518 
520  public const string PARQUET = "parquet";
521 
523  public const string SHAPEFILE = "shapefile";
524 
541  public const string FLATTEN_COLUMNS = "flatten_columns";
542 
545  public const string TRUE = "true";
546 
549  public const string FALSE = "false";
550 
553  public const string GDAL_CONFIGURATION_OPTIONS = "gdal_configuration_options";
554 
579  public const string IGNORE_EXISTING_PK = "ignore_existing_pk";
580 
609  public const string INGESTION_MODE = "ingestion_mode";
610 
613  public const string FULL = "full";
614 
619  public const string DRY_RUN = "dry_run";
620 
625  public const string TYPE_INFERENCE_ONLY = "type_inference_only";
626 
630  public const string KAFKA_CONSUMERS_PER_RANK = "kafka_consumers_per_rank";
631 
635  public const string KAFKA_GROUP_ID = "kafka_group_id";
636 
651  public const string KAFKA_OFFSET_RESET_POLICY = "kafka_offset_reset_policy";
652 
653  public const string EARLIEST = "earliest";
654  public const string LATEST = "latest";
655 
670  public const string KAFKA_OPTIMISTIC_INGEST = "kafka_optimistic_ingest";
671 
676  public const string KAFKA_SUBSCRIPTION_CANCEL_AFTER = "kafka_subscription_cancel_after";
677 
680  public const string KAFKA_TYPE_INFERENCE_FETCH_TIMEOUT = "kafka_type_inference_fetch_timeout";
681 
683  public const string LAYER = "layer";
684 
733  public const string LOADING_MODE = "loading_mode";
734 
738  public const string HEAD = "head";
739 
749  public const string DISTRIBUTED_SHARED = "distributed_shared";
750 
768  public const string DISTRIBUTED_LOCAL = "distributed_local";
769 
772  public const string LOCAL_TIME_OFFSET = "local_time_offset";
773 
780  public const string MAX_RECORDS_TO_LOAD = "max_records_to_load";
781 
785  public const string NUM_TASKS_PER_RANK = "num_tasks_per_rank";
786 
794  public const string POLL_INTERVAL = "poll_interval";
795 
798  public const string PRIMARY_KEYS = "primary_keys";
799 
802  public const string SCHEMA_REGISTRY_CONNECTION_RETRIES = "schema_registry_connection_retries";
803 
806  public const string SCHEMA_REGISTRY_CONNECTION_TIMEOUT = "schema_registry_connection_timeout";
807 
810  public const string SCHEMA_REGISTRY_MAX_CONSECUTIVE_CONNECTION_FAILURES = "schema_registry_max_consecutive_connection_failures";
811 
814  public const string MAX_CONSECUTIVE_INVALID_SCHEMA_FAILURE = "max_consecutive_invalid_schema_failure";
815 
818  public const string SCHEMA_REGISTRY_SCHEMA_NAME = "schema_registry_schema_name";
819 
822  public const string SHARD_KEYS = "shard_keys";
823 
826  public const string SKIP_LINES = "skip_lines";
827 
832  public const string START_OFFSETS = "start_offsets";
833 
847  public const string SUBSCRIBE = "subscribe";
848 
868  public const string TABLE_INSERT_MODE = "table_insert_mode";
869 
871  public const string SINGLE = "single";
872 
875  public const string TABLE_PER_FILE = "table_per_file";
876 
886  public const string TEXT_COMMENT_STRING = "text_comment_string";
887 
895  public const string TEXT_DELIMITER = "text_delimiter";
896 
911  public const string TEXT_ESCAPE_CHARACTER = "text_escape_character";
912 
926  public const string TEXT_HAS_HEADER = "text_has_header";
927 
938  public const string TEXT_HEADER_PROPERTY_DELIMITER = "text_header_property_delimiter";
939 
946  public const string TEXT_NULL_STRING = "text_null_string";
947 
960  public const string TEXT_QUOTE_CHARACTER = "text_quote_character";
961 
970  public const string TEXT_SEARCH_COLUMNS = "text_search_columns";
971 
977  public const string TEXT_SEARCH_MIN_COLUMN_LENGTH = "text_search_min_column_length";
978 
993  public const string TRUNCATE_STRINGS = "truncate_strings";
994 
1009  public const string TRUNCATE_TABLE = "truncate_table";
1010 
1011  public const string TYPE_INFERENCE_MAX_RECORDS_READ = "type_inference_max_records_read";
1012 
1032  public const string TYPE_INFERENCE_MODE = "type_inference_mode";
1033 
1036  public const string ACCURACY = "accuracy";
1037 
1041  public const string SPEED = "speed";
1042 
1062  public const string UPDATE_ON_EXISTING_PK = "update_on_existing_pk";
1063  } // end struct Options
1064 
1075  public string table_name { get; set; }
1076 
1103  public IList<string> filepaths { get; set; } = new List<string>();
1104 
1108  public IDictionary<string, IDictionary<string, string>> modify_columns { get; set; } = new Dictionary<string, IDictionary<string, string>>();
1109 
1377  public IDictionary<string, string> create_table_options { get; set; } = new Dictionary<string, string>();
1378 
2219  public IDictionary<string, string> options { get; set; } = new Dictionary<string, string>();
2220 
2224 
3372  IList<string> filepaths,
3373  IDictionary<string, IDictionary<string, string>> modify_columns = null,
3374  IDictionary<string, string> create_table_options = null,
3375  IDictionary<string, string> options = null)
3376  {
3377  this.table_name = table_name ?? "";
3378  this.filepaths = filepaths ?? new List<string>();
3379  this.modify_columns = modify_columns ?? new Dictionary<string, IDictionary<string, string>>();
3380  this.create_table_options = create_table_options ?? new Dictionary<string, string>();
3381  this.options = options ?? new Dictionary<string, string>();
3382  } // end constructor
3383  } // end class InsertRecordsFromFilesRequest
3384 
3388  public class InsertRecordsFromFilesResponse : KineticaData
3389  {
3393  public string table_name { get; set; }
3394 
3398  public string type_id { get; set; }
3399 
3402  public string type_definition { get; set; }
3403 
3406  public string type_label { get; set; }
3407 
3410  public IDictionary<string, IList<string>> type_properties { get; set; } = new Dictionary<string, IList<string>>();
3411 
3414  public long count_inserted { get; set; }
3415 
3419  public long count_skipped { get; set; }
3420 
3423  public long count_updated { get; set; }
3424 
3426  public IDictionary<string, string> info { get; set; } = new Dictionary<string, string>();
3427 
3428  public IList<string> files { get; set; } = new List<string>();
3429  } // end class InsertRecordsFromFilesResponse
3430 } // end namespace kinetica
const string IS_AUTOMATIC_PARTITION
If TRUE, a new partition will be created for values which don't fall into an existing partition.
const string LAYER
Geo files layer(s) name(s): comma separated.
const string TRUNCATE_STRINGS
If set to TRUE, truncate string values that are longer than the column's type size.
const string ERROR_HANDLING
Specifies how errors should be handled upon insertion.
const string MAX_CONSECUTIVE_INVALID_SCHEMA_FAILURE
Max records to skip due to schema related errors, before failing
IDictionary< string, IDictionary< string, string > > modify_columns
Not implemented yet.
const string MAX_RECORDS_TO_LOAD
Limit the number of records to load in this request: if this number is larger than BATCH_SIZE,...
const string UPDATE_ON_EXISTING_PK
Specifies the record collision policy for inserting into a table with a primary key.
const string COMPRESSION_TYPE
Source data compression type.
const string SKIP_LINES
Skip a number of lines from the beginning of the file.
string type_definition
A JSON string describing the columns of the target table
InsertRecordsFromFilesRequest()
Constructs an InsertRecordsFromFilesRequest object with default parameters.
const string FOREIGN_KEYS
Semicolon-separated list of foreign keys, of the format '(source_column_name [, .....
const string CHUNK_MAX_MEMORY
Indicates the target maximum data size for all columns in a chunk to be used for this table.
IList< string > filepaths
A list of file paths from which data will be sourced; For paths in KiFS, use the URI prefix of kifs:/...
const string HEAD
The head node loads all data.
const string TEXT_QUOTE_CHARACTER
Specifies the character that should be interpreted as a field value quoting character in the source d...
const string CHUNK_COLUMN_MAX_MEMORY
Indicates the target maximum data size for each column in a chunk to be used for this table.
const string FLATTEN_COLUMNS
Specifies how to handle nested columns.
const string GDAL_CONFIGURATION_OPTIONS
Comma separated list of gdal conf options, for the specific requests: key=value
const string TRUE
Upsert new records when primary keys match existing records
const string COLUMN_FORMATS
For each target column specified, applies the column-property-bound format to the source data loaded ...
const string FALSE
Reject new records when primary keys match existing records
const string IS_REPLICATED
Affects the distribution scheme for the table's data.
const string AUTO
Auto detect compression type
const string BATCH_SIZE
Number of records to insert per batch when inserting data.
const string BAD_RECORD_TABLE_LIMIT_PER_INPUT
For subscriptions, a positive integer indicating the maximum number of records that can be written to...
const string TTL
Sets the TTL of the table specified in table_name.
const string FOREIGN_SHARD_KEY
Foreign shard key of the format 'source_column references shard_by_column from target_table(primary_k...
const string ACCURACY
Scans data to get exactly-typed & sized columns for all data scanned.
const string COMPRESSION_CODEC
The default compression codec for this table's columns.
IDictionary< string, string > options
Optional parameters.
const string SPEED
Scans data and picks the widest possible column types so that 'all' values will fit with minimum data...
const string PERMISSIVE
Records with missing columns are populated with nulls if possible; otherwise, the malformed records a...
const string SHARD_KEYS
Comma separated list of column names to set as shard keys, when not specified in the type.
const string INGESTION_MODE
Whether to do a full load, dry run, or perform a type inference on the source data.
const string TABLE_INSERT_MODE
Insertion scheme to use when inserting records from multiple shapefiles.
const string TEXT_ESCAPE_CHARACTER
Specifies the character that is used to escape other characters in the source data.
const string KAFKA_GROUP_ID
The group id to be used when consuming data from a Kafka topic (valid only for Kafka datasource subsc...
string type_id
ID of the currently registered table structure type for the target table
const string TABLE_PER_FILE
Insert records from each file into a new table corresponding to that file.
const string PARTITION_DEFINITIONS
Comma-separated list of partition definitions, whose format depends on the choice of PARTITION_TYPE.
IDictionary< string, string > create_table_options
Options from Kinetica.createTable, allowing the structure of the table to be defined independently of...
const string TEXT_SEARCH_COLUMNS
Add 'text_search' property to internally inferenced string columns.
const string TEXT_SEARCH_MIN_COLUMN_LENGTH
Set the minimum column size for strings to apply the 'text_search' property to.
const string TEXT_HAS_HEADER
Indicates whether the source data contains a header row.
IDictionary< string, string > info
Additional information.
const string DELIMITED_TEXT
Delimited text file format; e.g., CSV, TSV, PSV, etc.
const string KAFKA_OPTIMISTIC_INGEST
Enable optimistic ingestion where Kafka topic offsets and table data are committed independently to a...
const string ABORT
Stops current insertion and aborts entire operation when an error is encountered.
long count_updated
[Not yet implemented] Number of records updated within the target table.
const string TEXT_COMMENT_STRING
Specifies the character string that should be interpreted as a comment line prefix in the source data...
const string TEXT_DELIMITER
Specifies the character delimiting field values in the source data and field names in the header (if ...
const string SCHEMA_REGISTRY_MAX_CONSECUTIVE_CONNECTION_FAILURES
Max records to skip due to SR connection failures, before failing
const string KAFKA_CONSUMERS_PER_RANK
Number of Kafka consumer threads per rank (valid range 1-6).
const string TYPE_INFERENCE_MODE
Optimize type inferencing for either speed or accuracy.
const string LOADING_MODE
Scheme for distributing the extraction and loading of data from the source data file(s).
const string BAD_RECORD_TABLE_NAME
Name of a table to which records that were rejected are written.
const string STRATEGY_DEFINITION
The tier strategy for the table and its columns.
const string PARQUET
Apache Parquet file format
const string SCHEMA_REGISTRY_SCHEMA_NAME
Name of the Avro schema in the schema registry to use when reading Avro records.
const string LOCAL_TIME_OFFSET
Apply an offset to Avro local timestamp columns.
const string SUBSCRIBE
Continuously poll the data source to check for new data and load it into the table.
const string BAD_RECORD_TABLE_LIMIT
A positive integer indicating the maximum number of records that can be written to the bad-record-tab...
const string IGNORE_BAD_RECORDS
Malformed records are skipped.
const string DRY_RUN
Does not load data, but walks through the source data and determines the number of valid records,...
const string TEXT_HEADER_PROPERTY_DELIMITER
Specifies the delimiter for column properties in the header row (if present).
const string POLL_INTERVAL
If TRUE, the number of seconds between attempts to load external files into the table.
const string FILE_TYPE
Specifies the type of the file(s) whose records will be inserted.
const string KAFKA_OFFSET_RESET_POLICY
Policy to determine whether the Kafka data consumption starts either at earliest offset or latest off...
string table_name
Name of the table into which the data will be inserted, in [schema_name.
const string SCHEMA_REGISTRY_CONNECTION_RETRIES
Confluent Schema registry connection timeout (in Secs)
const string FULL
Run a type inference on the source data (if needed) and ingest
const string TEXT_NULL_STRING
Specifies the character string that should be interpreted as a null value in the source data.
IDictionary< string, IList< string > > type_properties
A mapping of each target table column name to an array of column properties associated with that colu...
const string PRIMARY_KEYS
Comma separated list of column names to set as primary keys, when not specified in the type.
const string COLUMNS_TO_SKIP
Specifies a comma-delimited list of columns from the source data to skip.
const string DISTRIBUTED_LOCAL
A single worker process on each node loads all files that are available to it.
const string NUM_TASKS_PER_RANK
Number of tasks for reading file per rank.
const string TYPE_ID
ID of a currently registered type.
const string SINGLE
Insert all records into a single table.
string type_label
The user-defined description associated with the target table's structure
long count_skipped
Number of records skipped, when not running in ABORT error handling mode.
const string IS_RESULT_TABLE
Indicates whether the table is a memory-only table.
const string DATASOURCE_NAME
Name of an existing external data source from which data file(s) specified in filepaths will be loade...
const string NO_ERROR_IF_EXISTS
If TRUE, prevents an error from occurring if the table already exists and is of the given type.
const string IGNORE_EXISTING_PK
Specifies the record collision error-suppression policy for inserting into a table with a primary key...
const string TYPE_INFERENCE_ONLY
Infer the type of the source data and return, without ingesting any data.
const string KAFKA_TYPE_INFERENCE_FETCH_TIMEOUT
Maximum time to collect Kafka messages before type inferencing on the set of them.
const string PARTITION_KEYS
Comma-separated list of partition keys, which are the columns or column expressions by which records ...
const string TRUNCATE_TABLE
If set to TRUE, truncates the table specified by table_name prior to loading the file(s).
const string CHUNK_SIZE
Indicates the number of records per chunk to be used for this table.
const string COLUMNS_TO_LOAD
Specifies a comma-delimited list of columns from the source data to load.
long count_inserted
Number of records inserted into the target table.
const string START_OFFSETS
Starting offsets by partition to fetch from kafka.
const string DISTRIBUTED_SHARED
The head node coordinates loading data by worker processes across all nodes from shared files availab...
InsertRecordsFromFilesRequest(string table_name, IList< string > filepaths, IDictionary< string, IDictionary< string, string >> modify_columns=null, IDictionary< string, string > create_table_options=null, IDictionary< string, string > options=null)
Constructs an InsertRecordsFromFilesRequest object with the specified parameters.
const string DEFAULT_COLUMN_FORMATS
Specifies the default format to be applied to source data loaded into columns with the corresponding ...
const string KAFKA_SUBSCRIPTION_CANCEL_AFTER
Sets the Kafka subscription lifespan (in minutes).
const string SCHEMA_REGISTRY_CONNECTION_TIMEOUT
Confluent Schema registry connection timeout (in Secs)