Kinetica   C#   API  Version 7.2.3.0
CreateTableExternal.cs
Go to the documentation of this file.
1 /*
2  * This file was autogenerated by the Kinetica schema processor.
3  *
4  * DO NOT EDIT DIRECTLY.
5  */
6 
7 using System.Collections.Generic;
8 
9 namespace kinetica
10 {
27  public class CreateTableExternalRequest : KineticaData
28  {
35  public struct CreateTableOptions
36  {
40  public const string TYPE_ID = "type_id";
41 
58  public const string NO_ERROR_IF_EXISTS = "no_error_if_exists";
59 
60  public const string TRUE = "true";
61  public const string FALSE = "false";
62 
80  public const string IS_REPLICATED = "is_replicated";
81 
88  public const string FOREIGN_KEYS = "foreign_keys";
89 
93  public const string FOREIGN_SHARD_KEY = "foreign_shard_key";
94 
136  public const string PARTITION_TYPE = "partition_type";
137 
141  public const string RANGE = "RANGE";
142 
146  public const string INTERVAL = "INTERVAL";
147 
151  public const string LIST = "LIST";
152 
156  public const string HASH = "HASH";
157 
161  public const string SERIES = "SERIES";
162 
168  public const string PARTITION_KEYS = "partition_keys";
169 
186  public const string PARTITION_DEFINITIONS = "partition_definitions";
187 
204  public const string IS_AUTOMATIC_PARTITION = "is_automatic_partition";
205 
209  public const string TTL = "ttl";
210 
213  public const string CHUNK_SIZE = "chunk_size";
214 
217  public const string CHUNK_COLUMN_MAX_MEMORY = "chunk_column_max_memory";
218 
221  public const string CHUNK_MAX_MEMORY = "chunk_max_memory";
222 
239  public const string IS_RESULT_TABLE = "is_result_table";
240 
244  public const string STRATEGY_DEFINITION = "strategy_definition";
245 
250  public const string COMPRESSION_CODEC = "compression_codec";
251  } // end struct CreateTableOptions
252 
256  public struct Options
257  {
266  public const string BAD_RECORD_TABLE_NAME = "bad_record_table_name";
267 
271  public const string BAD_RECORD_TABLE_LIMIT = "bad_record_table_limit";
272 
281  public const string BAD_RECORD_TABLE_LIMIT_PER_INPUT = "bad_record_table_limit_per_input";
282 
286  public const string BATCH_SIZE = "batch_size";
287 
302  public const string COLUMN_FORMATS = "column_formats";
303 
330  public const string COLUMNS_TO_LOAD = "columns_to_load";
331 
337  public const string COLUMNS_TO_SKIP = "columns_to_skip";
338 
361  public const string COMPRESSION_TYPE = "compression_type";
362 
364  public const string NONE = "none";
365 
367  public const string AUTO = "auto";
368 
370  public const string GZIP = "gzip";
371 
373  public const string BZIP2 = "bzip2";
374 
378  public const string DATASOURCE_NAME = "datasource_name";
379 
409  public const string DEFAULT_COLUMN_FORMATS = "default_column_formats";
410 
413  public const string DATALAKE_CATALOG = "datalake_catalog";
414 
416  public const string DATALAKE_PATH = "datalake_path";
417 
419  public const string DATALAKE_SNAPSHOT = "datalake_snapshot";
420 
449  public const string ERROR_HANDLING = "error_handling";
450 
454  public const string PERMISSIVE = "permissive";
455 
457  public const string IGNORE_BAD_RECORDS = "ignore_bad_records";
458 
463  public const string ABORT = "abort";
464 
486  public const string EXTERNAL_TABLE_TYPE = "external_table_type";
487 
490  public const string MATERIALIZED = "materialized";
491 
495  public const string LOGICAL = "logical";
496 
533  public const string FILE_TYPE = "file_type";
534 
536  public const string AVRO = "avro";
537 
540  public const string DELIMITED_TEXT = "delimited_text";
541 
543  public const string GDB = "gdb";
544 
546  public const string JSON = "json";
547 
549  public const string PARQUET = "parquet";
550 
552  public const string SHAPEFILE = "shapefile";
553 
570  public const string FLATTEN_COLUMNS = "flatten_columns";
571 
574  public const string TRUE = "true";
575 
578  public const string FALSE = "false";
579 
582  public const string GDAL_CONFIGURATION_OPTIONS = "gdal_configuration_options";
583 
608  public const string IGNORE_EXISTING_PK = "ignore_existing_pk";
609 
638  public const string INGESTION_MODE = "ingestion_mode";
639 
642  public const string FULL = "full";
643 
648  public const string DRY_RUN = "dry_run";
649 
654  public const string TYPE_INFERENCE_ONLY = "type_inference_only";
655 
659  public const string JDBC_FETCH_SIZE = "jdbc_fetch_size";
660 
664  public const string KAFKA_CONSUMERS_PER_RANK = "kafka_consumers_per_rank";
665 
669  public const string KAFKA_GROUP_ID = "kafka_group_id";
670 
685  public const string KAFKA_OFFSET_RESET_POLICY = "kafka_offset_reset_policy";
686 
687  public const string EARLIEST = "earliest";
688  public const string LATEST = "latest";
689 
704  public const string KAFKA_OPTIMISTIC_INGEST = "kafka_optimistic_ingest";
705 
710  public const string KAFKA_SUBSCRIPTION_CANCEL_AFTER = "kafka_subscription_cancel_after";
711 
714  public const string KAFKA_TYPE_INFERENCE_FETCH_TIMEOUT = "kafka_type_inference_fetch_timeout";
715 
717  public const string LAYER = "layer";
718 
767  public const string LOADING_MODE = "loading_mode";
768 
772  public const string HEAD = "head";
773 
783  public const string DISTRIBUTED_SHARED = "distributed_shared";
784 
802  public const string DISTRIBUTED_LOCAL = "distributed_local";
803 
806  public const string LOCAL_TIME_OFFSET = "local_time_offset";
807 
814  public const string MAX_RECORDS_TO_LOAD = "max_records_to_load";
815 
819  public const string NUM_TASKS_PER_RANK = "num_tasks_per_rank";
820 
828  public const string POLL_INTERVAL = "poll_interval";
829 
832  public const string PRIMARY_KEYS = "primary_keys";
833 
857  public const string REFRESH_METHOD = "refresh_method";
858 
863  public const string MANUAL = "manual";
864 
869  public const string ON_START = "on_start";
870 
873  public const string SCHEMA_REGISTRY_CONNECTION_RETRIES = "schema_registry_connection_retries";
874 
877  public const string SCHEMA_REGISTRY_CONNECTION_TIMEOUT = "schema_registry_connection_timeout";
878 
881  public const string SCHEMA_REGISTRY_MAX_CONSECUTIVE_CONNECTION_FAILURES = "schema_registry_max_consecutive_connection_failures";
882 
885  public const string MAX_CONSECUTIVE_INVALID_SCHEMA_FAILURE = "max_consecutive_invalid_schema_failure";
886 
889  public const string SCHEMA_REGISTRY_SCHEMA_NAME = "schema_registry_schema_name";
890 
893  public const string SHARD_KEYS = "shard_keys";
894 
897  public const string SKIP_LINES = "skip_lines";
898 
903  public const string START_OFFSETS = "start_offsets";
904 
918  public const string SUBSCRIBE = "subscribe";
919 
939  public const string TABLE_INSERT_MODE = "table_insert_mode";
940 
942  public const string SINGLE = "single";
943 
946  public const string TABLE_PER_FILE = "table_per_file";
947 
957  public const string TEXT_COMMENT_STRING = "text_comment_string";
958 
966  public const string TEXT_DELIMITER = "text_delimiter";
967 
982  public const string TEXT_ESCAPE_CHARACTER = "text_escape_character";
983 
997  public const string TEXT_HAS_HEADER = "text_has_header";
998 
1009  public const string TEXT_HEADER_PROPERTY_DELIMITER = "text_header_property_delimiter";
1010 
1017  public const string TEXT_NULL_STRING = "text_null_string";
1018 
1031  public const string TEXT_QUOTE_CHARACTER = "text_quote_character";
1032 
1041  public const string TEXT_SEARCH_COLUMNS = "text_search_columns";
1042 
1048  public const string TEXT_SEARCH_MIN_COLUMN_LENGTH = "text_search_min_column_length";
1049 
1063  public const string TRIM_SPACE = "trim_space";
1064 
1079  public const string TRUNCATE_STRINGS = "truncate_strings";
1080 
1095  public const string TRUNCATE_TABLE = "truncate_table";
1096 
1097  public const string TYPE_INFERENCE_MAX_RECORDS_READ = "type_inference_max_records_read";
1098 
1118  public const string TYPE_INFERENCE_MODE = "type_inference_mode";
1119 
1122  public const string ACCURACY = "accuracy";
1123 
1127  public const string SPEED = "speed";
1128 
1131  public const string REMOTE_QUERY = "remote_query";
1132 
1137  public const string REMOTE_QUERY_FILTER_COLUMN = "remote_query_filter_column";
1138 
1141  public const string REMOTE_QUERY_INCREASING_COLUMN = "remote_query_increasing_column";
1142 
1146  public const string REMOTE_QUERY_PARTITION_COLUMN = "remote_query_partition_column";
1147 
1167  public const string UPDATE_ON_EXISTING_PK = "update_on_existing_pk";
1168  } // end struct Options
1169 
1176  public string table_name { get; set; }
1177 
1204  public IList<string> filepaths { get; set; } = new List<string>();
1205 
1209  public IDictionary<string, IDictionary<string, string>> modify_columns { get; set; } = new Dictionary<string, IDictionary<string, string>>();
1210 
1478  public IDictionary<string, string> create_table_options { get; set; } = new Dictionary<string, string>();
1479 
2452  public IDictionary<string, string> options { get; set; } = new Dictionary<string, string>();
2453 
2457 
3734  IList<string> filepaths,
3735  IDictionary<string, IDictionary<string, string>> modify_columns = null,
3736  IDictionary<string, string> create_table_options = null,
3737  IDictionary<string, string> options = null)
3738  {
3739  this.table_name = table_name ?? "";
3740  this.filepaths = filepaths ?? new List<string>();
3741  this.modify_columns = modify_columns ?? new Dictionary<string, IDictionary<string, string>>();
3742  this.create_table_options = create_table_options ?? new Dictionary<string, string>();
3743  this.options = options ?? new Dictionary<string, string>();
3744  } // end constructor
3745  } // end class CreateTableExternalRequest
3746 
3750  public class CreateTableExternalResponse : KineticaData
3751  {
3755  public string table_name { get; set; }
3756 
3760  public string type_id { get; set; }
3761 
3764  public string type_definition { get; set; }
3765 
3768  public string type_label { get; set; }
3769 
3772  public IDictionary<string, IList<string>> type_properties { get; set; } = new Dictionary<string, IList<string>>();
3773 
3776  public long count_inserted { get; set; }
3777 
3781  public long count_skipped { get; set; }
3782 
3785  public long count_updated { get; set; }
3786 
3788  public IDictionary<string, string> info { get; set; } = new Dictionary<string, string>();
3789 
3790  public IList<string> files { get; set; } = new List<string>();
3791  } // end class CreateTableExternalResponse
3792 } // end namespace kinetica
const string DEFAULT_COLUMN_FORMATS
Specifies the default format to be applied to source data loaded into columns with the corresponding ...
const string KAFKA_TYPE_INFERENCE_FETCH_TIMEOUT
Maximum time to collect Kafka messages before type inferencing on the set of them.
IDictionary< string, IList< string > > type_properties
A mapping of each external table column name to an array of column properties associated with that co...
const string DELIMITED_TEXT
Delimited text file format; e.g., CSV, TSV, PSV, etc.
const string SCHEMA_REGISTRY_CONNECTION_RETRIES
Confluent Schema registry connection timeout (in Secs)
const string KAFKA_OPTIMISTIC_INGEST
Enable optimistic ingestion where Kafka topic offsets and table data are committed independently to a...
const string FALSE
Reject new records when primary keys match existing records
const string TEXT_SEARCH_COLUMNS
Add 'text_search' property to internally inferenced string columns.
const string ERROR_HANDLING
Specifies how errors should be handled upon insertion.
const string IS_REPLICATED
Affects the distribution scheme for the table's data.
const string BAD_RECORD_TABLE_LIMIT
A positive integer indicating the maximum number of records that can be written to the bad-record-tab...
const string SHARD_KEYS
Comma separated list of column names to set as shard keys, when not specified in the type.
const string REFRESH_METHOD
Method by which the table can be refreshed from its source data.
const string TEXT_SEARCH_MIN_COLUMN_LENGTH
Set the minimum column size for strings to apply the 'text_search' property to.
const string TEXT_QUOTE_CHARACTER
Specifies the character that should be interpreted as a field value quoting character in the source d...
const string IGNORE_BAD_RECORDS
Malformed records are skipped.
const string MANUAL
Refresh only occurs when manually requested by invoking the refresh action of Kinetica....
const string MAX_CONSECUTIVE_INVALID_SCHEMA_FAILURE
Max records to skip due to schema related errors, before failing
const string TEXT_HAS_HEADER
Indicates whether the source data contains a header row.
const string SCHEMA_REGISTRY_MAX_CONSECUTIVE_CONNECTION_FAILURES
Max records to skip due to SR connection failures, before failing
const string FILE_TYPE
Specifies the type of the file(s) whose records will be inserted.
long count_inserted
Number of records inserted into the external table.
const string DATASOURCE_NAME
Name of an existing external data source from which data file(s) specified in filepaths will be loade...
const string COLUMNS_TO_SKIP
Specifies a comma-delimited list of columns from the source data to skip.
const string HEAD
The head node loads all data.
const string TRUNCATE_TABLE
If set to TRUE, truncates the table specified by table_name prior to loading the file(s).
const string KAFKA_GROUP_ID
The group id to be used when consuming data from a Kafka topic (valid only for Kafka datasource subsc...
const string SINGLE
Insert all records into a single table.
const string SCHEMA_REGISTRY_SCHEMA_NAME
Name of the Avro schema in the schema registry to use when reading Avro records.
const string REMOTE_QUERY_FILTER_COLUMN
Name of column to be used for splitting REMOTE_QUERY into multiple sub-queries using the data distrib...
const string SCHEMA_REGISTRY_CONNECTION_TIMEOUT
Confluent Schema registry connection timeout (in Secs)
const string PARQUET
Apache Parquet file format
const string DRY_RUN
Does not load data, but walks through the source data and determines the number of valid records,...
const string IGNORE_EXISTING_PK
Specifies the record collision error-suppression policy for inserting into a table with a primary key...
IDictionary< string, IDictionary< string, string > > modify_columns
Not implemented yet.
const string SPEED
Scans data and picks the widest possible column types so that 'all' values will fit with minimum data...
const string IS_AUTOMATIC_PARTITION
If TRUE, a new partition will be created for values which don't fall into an existing partition.
const string EXTERNAL_TABLE_TYPE
Specifies whether the external table holds a local copy of the external data.
const string LOGICAL
External data will not be loaded into the database; the data will be retrieved from the source upon s...
IDictionary< string, string > options
Optional parameters.
const string REMOTE_QUERY_INCREASING_COLUMN
Column on subscribed remote query result that will increase for new records (e.g.,...
const string PERMISSIVE
Records with missing columns are populated with nulls if possible; otherwise, the malformed records a...
const string DATALAKE_CATALOG
Name of an existing datalake(iceberg) catalog used in loading files
const string TEXT_DELIMITER
Specifies the character delimiting field values in the source data and field names in the header (if ...
string type_definition
A JSON string describing the columns of the created external table
CreateTableExternalRequest()
Constructs a CreateTableExternalRequest object with default parameters.
const string DATALAKE_SNAPSHOT
Snapshot ID of datalake(iceberg) object
const string AUTO
Auto detect compression type
string type_id
ID of the currently registered table structure type for this external table
const string REMOTE_QUERY
Remote SQL query from which data will be sourced
const string LOCAL_TIME_OFFSET
Apply an offset to Avro local timestamp columns.
const string LOADING_MODE
Scheme for distributing the extraction and loading of data from the source data file(s).
const string SUBSCRIBE
Continuously poll the data source to check for new data and load it into the table.
const string TEXT_ESCAPE_CHARACTER
Specifies the character that is used to escape other characters in the source data.
const string BATCH_SIZE
Number of records to insert per batch when inserting data.
long count_updated
[Not yet implemented] Number of records updated within the external table.
const string BAD_RECORD_TABLE_LIMIT_PER_INPUT
For subscriptions, a positive integer indicating the maximum number of records that can be written to...
const string PRIMARY_KEYS
Comma separated list of column names to set as primary keys, when not specified in the type.
const string TABLE_INSERT_MODE
Insertion scheme to use when inserting records from multiple shapefiles.
IDictionary< string, string > create_table_options
Options from Kinetica.createTable, allowing the structure of the table to be defined independently of...
const string START_OFFSETS
Starting offsets by partition to fetch from kafka.
const string TEXT_COMMENT_STRING
Specifies the character string that should be interpreted as a comment line prefix in the source data...
string table_name
Name of the table to be created, in [schema_name.
IList< string > filepaths
A list of file paths from which data will be sourced; For paths in KiFS, use the URI prefix of kifs:/...
const string COMPRESSION_TYPE
Source data compression type.
const string ON_START
Refresh table on database startup and when manually requested by invoking the refresh action of Kinet...
const string TABLE_PER_FILE
Insert records from each file into a new table corresponding to that file.
const string TRUNCATE_STRINGS
If set to TRUE, truncate string values that are longer than the column's type size.
const string BZIP2
bzip2 file compression.
const string DATALAKE_PATH
Path of datalake(iceberg) object
const string SHAPEFILE
ShapeFile file format
const string PARTITION_KEYS
Comma-separated list of partition keys, which are the columns or column expressions by which records ...
long count_skipped
Number of records skipped, when not running in ABORT error handling mode.
const string UPDATE_ON_EXISTING_PK
Specifies the record collision policy for inserting into a table with a primary key.
IDictionary< string, string > info
Additional information.
const string KAFKA_OFFSET_RESET_POLICY
Policy to determine whether the Kafka data consumption starts either at earliest offset or latest off...
const string ACCURACY
Scans data to get exactly-typed & sized columns for all data scanned.
const string TEXT_NULL_STRING
Specifies the character string that should be interpreted as a null value in the source data.
const string TRIM_SPACE
If set to TRUE, remove leading or trailing space from fields.
const string FOREIGN_SHARD_KEY
Foreign shard key of the format 'source_column references shard_by_column from target_table(primary_k...
const string KAFKA_CONSUMERS_PER_RANK
Number of Kafka consumer threads per rank (valid range 1-6).
const string TYPE_INFERENCE_MODE
Optimize type inferencing for either speed or accuracy.
const string LAYER
Geo files layer(s) name(s): comma separated.
const string REMOTE_QUERY_PARTITION_COLUMN
Alias name for REMOTE_QUERY_FILTER_COLUMN.
const string CHUNK_COLUMN_MAX_MEMORY
Indicates the target maximum data size for each column in a chunk to be used for this table.
const string FOREIGN_KEYS
Semicolon-separated list of foreign keys, of the format '(source_column_name [, .....
const string MATERIALIZED
Loads a copy of the external data into the database, refreshed on demand
const string POLL_INTERVAL
If TRUE, the number of seconds between attempts to load external files into the table.
const string COLUMNS_TO_LOAD
Specifies a comma-delimited list of columns from the source data to load.
const string INGESTION_MODE
Whether to do a full load, dry run, or perform a type inference on the source data.
const string CHUNK_MAX_MEMORY
Indicates the target maximum data size for all columns in a chunk to be used for this table.
const string TRUE
Upsert new records when primary keys match existing records
const string GZIP
gzip file compression.
const string TYPE_ID
ID of a currently registered type.
const string TEXT_HEADER_PROPERTY_DELIMITER
Specifies the delimiter for column properties in the header row (if present).
const string TTL
Sets the TTL of the table specified in table_name.
const string GDAL_CONFIGURATION_OPTIONS
Comma separated list of gdal conf options, for the specific requests: key=value
const string KAFKA_SUBSCRIPTION_CANCEL_AFTER
Sets the Kafka subscription lifespan (in minutes).
const string PARTITION_TYPE
Partitioning scheme to use.
const string STRATEGY_DEFINITION
The tier strategy for the table and its columns.
const string NUM_TASKS_PER_RANK
Number of tasks for reading file per rank.
const string IS_RESULT_TABLE
Indicates whether the table is a memory-only table.
const string TYPE_INFERENCE_ONLY
Infer the type of the source data and return, without ingesting any data.
const string CHUNK_SIZE
Indicates the number of records per chunk to be used for this table.
const string SKIP_LINES
Skip a number of lines from the beginning of the file.
const string NO_ERROR_IF_EXISTS
If TRUE, prevents an error from occurring if the table already exists and is of the given type.
const string ABORT
Stops current insertion and aborts entire operation when an error is encountered.
const string DISTRIBUTED_SHARED
The head node coordinates loading data by worker processes across all nodes from shared files availab...
const string BAD_RECORD_TABLE_NAME
Name of a table to which records that were rejected are written.
const string PARTITION_DEFINITIONS
Comma-separated list of partition definitions, whose format depends on the choice of PARTITION_TYPE.
const string COMPRESSION_CODEC
The default compression codec for this table's columns.
const string COLUMN_FORMATS
For each target column specified, applies the column-property-bound format to the source data loaded ...
const string DISTRIBUTED_LOCAL
A single worker process on each node loads all files that are available to it.
const string JDBC_FETCH_SIZE
The JDBC fetch size, which determines how many rows to fetch per round trip.
CreateTableExternalRequest(string table_name, IList< string > filepaths, IDictionary< string, IDictionary< string, string >> modify_columns=null, IDictionary< string, string > create_table_options=null, IDictionary< string, string > options=null)
Constructs a CreateTableExternalRequest object with the specified parameters.
const string FLATTEN_COLUMNS
Specifies how to handle nested columns.
const string FULL
Run a type inference on the source data (if needed) and ingest
string type_label
The user-defined description associated with the table's structure
const string MAX_RECORDS_TO_LOAD
Limit the number of records to load in this request: if this number is larger than BATCH_SIZE,...