Kinetica C# API  Version 7.1.10.0
 All Classes Namespaces Files Functions Variables Enumerations Enumerator Properties Pages
CreateTableExternal.cs
Go to the documentation of this file.
1 /*
2  * This file was autogenerated by the Kinetica schema processor.
3  *
4  * DO NOT EDIT DIRECTLY.
5  */
6 
7 using System.Collections.Generic;
8 
9 
10 
11 namespace kinetica
12 {
13 
34  {
35 
266  public struct CreateTableOptions
267  {
268 
272  public const string TYPE_ID = "type_id";
273 
292  public const string NO_ERROR_IF_EXISTS = "no_error_if_exists";
293  public const string TRUE = "true";
294  public const string FALSE = "false";
295 
328  public const string IS_REPLICATED = "is_replicated";
329 
336  public const string FOREIGN_KEYS = "foreign_keys";
337 
341  public const string FOREIGN_SHARD_KEY = "foreign_shard_key";
342 
383  public const string PARTITION_TYPE = "partition_type";
384 
388  public const string RANGE = "RANGE";
389 
393  public const string INTERVAL = "INTERVAL";
394 
398  public const string LIST = "LIST";
399 
403  public const string HASH = "HASH";
404 
408  public const string SERIES = "SERIES";
409 
415  public const string PARTITION_KEYS = "partition_keys";
416 
431  public const string PARTITION_DEFINITIONS = "partition_definitions";
432 
452  public const string IS_AUTOMATIC_PARTITION = "is_automatic_partition";
453 
457  public const string TTL = "ttl";
458 
461  public const string CHUNK_SIZE = "chunk_size";
462 
487  public const string IS_RESULT_TABLE = "is_result_table";
488 
492  public const string STRATEGY_DEFINITION = "strategy_definition";
493  } // end struct CreateTableOptions
494 
495 
1355  public struct Options
1356  {
1357 
1364  public const string BAD_RECORD_TABLE_NAME = "bad_record_table_name";
1365 
1370  public const string BAD_RECORD_TABLE_LIMIT = "bad_record_table_limit";
1371 
1379  public const string BAD_RECORD_TABLE_LIMIT_PER_INPUT = "bad_record_table_limit_per_input";
1380 
1383  public const string BATCH_SIZE = "batch_size";
1384 
1401  public const string COLUMN_FORMATS = "column_formats";
1402 
1435  public const string COLUMNS_TO_LOAD = "columns_to_load";
1436 
1441  public const string COLUMNS_TO_SKIP = "columns_to_skip";
1442 
1469  public const string COMPRESSION_TYPE = "compression_type";
1470 
1472  public const string NONE = "none";
1473 
1475  public const string AUTO = "auto";
1476 
1478  public const string GZIP = "gzip";
1479 
1481  public const string BZIP2 = "bzip2";
1482 
1486  public const string DATASOURCE_NAME = "datasource_name";
1487 
1521  public const string DEFAULT_COLUMN_FORMATS = "default_column_formats";
1522 
1549  public const string ERROR_HANDLING = "error_handling";
1550 
1554  public const string PERMISSIVE = "permissive";
1555 
1557  public const string IGNORE_BAD_RECORDS = "ignore_bad_records";
1558 
1562  public const string ABORT = "abort";
1563 
1584  public const string EXTERNAL_TABLE_TYPE = "external_table_type";
1585 
1588  public const string MATERIALIZED = "materialized";
1589 
1593  public const string LOGICAL = "logical";
1594 
1633  public const string FILE_TYPE = "file_type";
1634 
1636  public const string AVRO = "avro";
1637 
1640  public const string DELIMITED_TEXT = "delimited_text";
1641 
1643  public const string GDB = "gdb";
1644 
1646  public const string JSON = "json";
1647 
1649  public const string PARQUET = "parquet";
1650 
1652  public const string SHAPEFILE = "shapefile";
1653 
1656  public const string GDAL_CONFIGURATION_OPTIONS = "gdal_configuration_options";
1657 
1695  public const string IGNORE_EXISTING_PK = "ignore_existing_pk";
1696 
1699  public const string TRUE = "true";
1700 
1703  public const string FALSE = "false";
1704 
1733  public const string INGESTION_MODE = "ingestion_mode";
1734 
1737  public const string FULL = "full";
1738 
1742  public const string DRY_RUN = "dry_run";
1743 
1747  public const string TYPE_INFERENCE_ONLY = "type_inference_only";
1748 
1751  public const string JDBC_FETCH_SIZE = "jdbc_fetch_size";
1752 
1755  public const string KAFKA_CONSUMERS_PER_RANK = "kafka_consumers_per_rank";
1756 
1760  public const string KAFKA_GROUP_ID = "kafka_group_id";
1761 
1777  public const string KAFKA_OFFSET_RESET_POLICY = "kafka_offset_reset_policy";
1778  public const string EARLIEST = "earliest";
1779  public const string LATEST = "latest";
1780 
1797  public const string KAFKA_OPTIMISTIC_INGEST = "kafka_optimistic_ingest";
1798 
1801  public const string KAFKA_SUBSCRIPTION_CANCEL_AFTER = "kafka_subscription_cancel_after";
1802 
1805  public const string KAFKA_TYPE_INFERENCE_FETCH_TIMEOUT = "kafka_type_inference_fetch_timeout";
1806 
1808  public const string LAYER = "layer";
1809 
1867  public const string LOADING_MODE = "loading_mode";
1868 
1871  public const string HEAD = "head";
1872 
1884  public const string DISTRIBUTED_SHARED = "distributed_shared";
1885 
1908  public const string DISTRIBUTED_LOCAL = "distributed_local";
1909 
1912  public const string LOCAL_TIME_OFFSET = "local_time_offset";
1913 
1920  public const string MAX_RECORDS_TO_LOAD = "max_records_to_load";
1921 
1925  public const string NUM_TASKS_PER_RANK = "num_tasks_per_rank";
1926 
1933  public const string POLL_INTERVAL = "poll_interval";
1934 
1937  public const string PRIMARY_KEYS = "primary_keys";
1938 
1962  public const string REFRESH_METHOD = "refresh_method";
1963 
1968  public const string MANUAL = "manual";
1969 
1974  public const string ON_START = "on_start";
1975 
1978  public const string SCHEMA_REGISTRY_SCHEMA_NAME = "schema_registry_schema_name";
1979 
1982  public const string SHARD_KEYS = "shard_keys";
1983 
1985  public const string SKIP_LINES = "skip_lines";
1986 
2002  public const string SUBSCRIBE = "subscribe";
2003 
2023  public const string TABLE_INSERT_MODE = "table_insert_mode";
2024 
2026  public const string SINGLE = "single";
2027 
2030  public const string TABLE_PER_FILE = "table_per_file";
2031 
2039  public const string TEXT_COMMENT_STRING = "text_comment_string";
2040 
2047  public const string TEXT_DELIMITER = "text_delimiter";
2048 
2067  public const string TEXT_ESCAPE_CHARACTER = "text_escape_character";
2068 
2086  public const string TEXT_HAS_HEADER = "text_has_header";
2087 
2096  public const string TEXT_HEADER_PROPERTY_DELIMITER = "text_header_property_delimiter";
2097 
2104  public const string TEXT_NULL_STRING = "text_null_string";
2105 
2119  public const string TEXT_QUOTE_CHARACTER = "text_quote_character";
2120 
2128  public const string TEXT_SEARCH_COLUMNS = "text_search_columns";
2129 
2133  public const string TEXT_SEARCH_MIN_COLUMN_LENGTH = "text_search_min_column_length";
2134 
2150  public const string TRUNCATE_STRINGS = "truncate_strings";
2151 
2167  public const string TRUNCATE_TABLE = "truncate_table";
2168 
2189  public const string TYPE_INFERENCE_MODE = "type_inference_mode";
2190 
2193  public const string ACCURACY = "accuracy";
2194 
2198  public const string SPEED = "speed";
2199 
2202  public const string REMOTE_QUERY = "remote_query";
2203 
2207  public const string REMOTE_QUERY_FILTER_COLUMN = "remote_query_filter_column";
2208 
2211  public const string REMOTE_QUERY_INCREASING_COLUMN = "remote_query_increasing_column";
2212 
2215  public const string REMOTE_QUERY_PARTITION_COLUMN = "remote_query_partition_column";
2216 
2249  public const string UPDATE_ON_EXISTING_PK = "update_on_existing_pk";
2250  } // end struct Options
2251 
2252 
2259  public string table_name { get; set; }
2260 
2293  public IList<string> filepaths { get; set; } = new List<string>();
2294 
2297  public IDictionary<string, IDictionary<string, string>> modify_columns { get; set; } = new Dictionary<string, IDictionary<string, string>>();
2298 
2527  public IDictionary<string, string> create_table_options { get; set; } = new Dictionary<string, string>();
2528 
3386  public IDictionary<string, string> options { get; set; } = new Dictionary<string, string>();
3387 
3388 
3392 
4500  IList<string> filepaths,
4501  IDictionary<string, IDictionary<string, string>> modify_columns = null,
4502  IDictionary<string, string> create_table_options = null,
4503  IDictionary<string, string> options = null)
4504  {
4505  this.table_name = table_name ?? "";
4506  this.filepaths = filepaths ?? new List<string>();
4507  this.modify_columns = modify_columns ?? new Dictionary<string, IDictionary<string, string>>();
4508  this.create_table_options = create_table_options ?? new Dictionary<string, string>();
4509  this.options = options ?? new Dictionary<string, string>();
4510  } // end constructor
4511 
4512  } // end class CreateTableExternalRequest
4513 
4514 
4515 
4520  {
4521 
4524  public string table_name { get; set; }
4525 
4529  public string type_id { get; set; }
4530 
4533  public string type_definition { get; set; }
4534 
4537  public string type_label { get; set; }
4538 
4541  public IDictionary<string, IList<string>> type_properties { get; set; } = new Dictionary<string, IList<string>>();
4542 
4545  public long count_inserted { get; set; }
4546 
4549  public long count_skipped { get; set; }
4550 
4553  public long count_updated { get; set; }
4554 
4556  public IDictionary<string, string> info { get; set; } = new Dictionary<string, string>();
4557  public IList<string> files { get; set; } = new List<string>();
4558 
4559  } // end class CreateTableExternalResponse
4560 
4561 
4562 
4563 
4564 } // end namespace kinetica
const string FALSE
Reject new records when primary keys match existing records
const string KAFKA_OPTIMISTIC_INGEST
Enable optimistic ingestion where Kafka topic offsets and table data are committed independently to a...
const string TRUE
Upsert new records when primary keys match existing records
const string TYPE_ID
ID of a currently registered type.
const string GZIP
gzip file compression.
const string GDAL_CONFIGURATION_OPTIONS
Comma separated list of gdal conf options, for the specific requets: key=value
const string SHARD_KEYS
Comma separated list of column names to set as shard keys, when not specified in the type...
const string POLL_INTERVAL
If true, the number of seconds between attempts to load external files into the table.
const string JDBC_FETCH_SIZE
The JDBC fetch size, which determines how many rows to fetch per round trip.
const string DRY_RUN
Does not load data, but walks through the source data and determines the number of valid records...
const string FILE_TYPE
Specifies the type of the file(s) whose records will be inserted.
const string BZIP2
bzip2 file compression.
const string TEXT_COMMENT_STRING
Specifies the character string that should be interpreted as a comment line prefix in the source data...
const string REMOTE_QUERY_FILTER_COLUMN
Name of column to be used for splitting remote_query into multiple sub-queries using the data distrib...
const string ON_START
Refresh table on database startup and when manually requested by invoking the refresh action of Kinet...
IDictionary< string, string > info
Additional information.
const string IS_AUTOMATIC_PARTITION
If true, a new partition will be created for values which don&#39;t fall into an existing partition...
const string LOCAL_TIME_OFFSET
Apply an offset to Avro local timestamp columns.
const string BAD_RECORD_TABLE_LIMIT
A positive integer indicating the maximum number of records that can be written to the bad-record-tab...
const string DELIMITED_TEXT
Delimited text file format; e.g., CSV, TSV, PSV, etc.
const string MATERIALIZED
Loads a copy of the external data into the database, refreshed on demand
const string SPEED
Scans data and picks the widest possible column types so that &#39;all&#39; values will fit with minimum data...
const string KAFKA_OFFSET_RESET_POLICY
Policy to determine whether the Kafka data consumption starts either at earliest offset or latest off...
const string INGESTION_MODE
Whether to do a full load, dry run, or perform a type inference on the source data.
const string DISTRIBUTED_LOCAL
A single worker process on each node loads all files that are available to it.
const string PARTITION_KEYS
Comma-separated list of partition keys, which are the columns or column expressions by which records ...
const string LAYER
Geo files layer(s) name(s): comma separated.
const string ABORT
Stops current insertion and aborts entire operation when an error is encountered. ...
long count_inserted
Number of records inserted into the external table.
CreateTableExternalRequest()
Constructs a CreateTableExternalRequest object with default parameters.
const string TRUNCATE_STRINGS
If set to true, truncate string values that are longer than the column&#39;s type size.
const string COLUMNS_TO_SKIP
Specifies a comma-delimited list of columns from the source data to skip.
const string KAFKA_GROUP_ID
The group id to be used when consuming data from a Kafka topic (valid only for Kafka datasource subsc...
const string TTL
Sets the TTL of the table specified in table_name.
const string SHAPEFILE
ShapeFile file format
const string MANUAL
Refresh only occurs when manually requested by invoking the refresh action of Kinetica.alterTable(string,string,string,IDictionary{string, string}) on this table.
const string CHUNK_SIZE
Indicates the number of records per chunk to be used for this table.
const string PERMISSIVE
Records with missing columns are populated with nulls if possible; otherwise, the malformed records a...
const string SINGLE
Insert all records into a single table.
const string TEXT_HEADER_PROPERTY_DELIMITER
Specifies the delimiter for column properties in the header row (if present).
const string TEXT_HAS_HEADER
Indicates whether the source data contains a header row.
const string FOREIGN_KEYS
Semicolon-separated list of foreign keys, of the format &#39;(source_column_name [, ...]) references target_table_name(primary_key_column_name [, ...]) [as foreign_key_name]&#39;.
const string HEAD
The head node loads all data.
const string ACCURACY
Scans data to get exactly-typed &amp; sized columns for all data scanned.
const string COLUMNS_TO_LOAD
Specifies a comma-delimited list of columns from the source data to load.
const string TABLE_PER_FILE
Insert records from each file into a new table corresponding to that file.
const string INTERVAL
Use interval partitioning.
const string NO_ERROR_IF_EXISTS
If true, prevents an error from occurring if the table already exists and is of the given type...
const string TRUNCATE_TABLE
If set to true, truncates the table specified by table_name prior to loading the file(s).
const string PRIMARY_KEYS
Comma separated list of column names to set as primary keys, when not specified in the type...
A set of parameters for Kinetica.createTableExternal(string,IList{string},IDictionary{string, IDictionary{string, string}},IDictionary{string, string},IDictionary{string, string}).
const string TEXT_DELIMITER
Specifies the character delimiting field values in the source data and field names in the header (if ...
const string TEXT_ESCAPE_CHARACTER
Specifies the character that is used to escape other characters in the source data.
const string TEXT_NULL_STRING
Specifies the character string that should be interpreted as a null value in the source data...
const string LOGICAL
External data will not be loaded into the database; the data will be retrieved from the source upon s...
const string FOREIGN_SHARD_KEY
Foreign shard key of the format &#39;source_column references shard_by_column from target_table(primary_k...
const string FULL
Run a type inference on the source data (if needed) and ingest
const string DATASOURCE_NAME
Name of an existing external data source from which data file(s) specified in filepaths will be loade...
const string KAFKA_CONSUMERS_PER_RANK
Number of Kafka consumer threads per rank (valid range 1-6).
const string IS_RESULT_TABLE
Indicates whether the table is a memory-only table.
const string LOADING_MODE
Scheme for distributing the extraction and loading of data from the source data file(s).
const string PARTITION_TYPE
Partitioning scheme to use.
const string MAX_RECORDS_TO_LOAD
Limit the number of records to load in this request: if this number is larger than batch_size...
const string AUTO
Auto detect compression type
const string STRATEGY_DEFINITION
The tier strategy for the table and its columns.
const string KAFKA_TYPE_INFERENCE_FETCH_TIMEOUT
Maximum time to collect Kafka messages before type inferencing on the set of them.
const string PARQUET
Apache Parquet file format
const string UPDATE_ON_EXISTING_PK
Specifies the record collision policy for inserting into a table with a primary key.
long count_updated
[Not yet implemented] Number of records updated within the external table.
const string SCHEMA_REGISTRY_SCHEMA_NAME
Name of the Avro schema in the schema registry to use when reading Avro records.
const string TEXT_SEARCH_COLUMNS
Add &#39;text_search&#39; property to internally inferenced string columns.
const string REMOTE_QUERY_PARTITION_COLUMN
Alias name for remote_query_filter_column.
IDictionary< string, string > options
Optional parameters.
const string SKIP_LINES
Skip number of lines from begining of file.
long count_skipped
Number of records skipped, when not running in abort error handling mode.
const string NUM_TASKS_PER_RANK
Number of tasks for reading file per rank.
string type_label
The user-defined description associated with the table&#39;s structure
const string IGNORE_BAD_RECORDS
Malformed records are skipped.
const string EXTERNAL_TABLE_TYPE
Specifies whether the external table holds a local copy of the external data.
const string TEXT_SEARCH_MIN_COLUMN_LENGTH
Set the minimum column size for strings to apply the &#39;text_search&#39; property to.
const string ERROR_HANDLING
Specifies how errors should be handled upon insertion.
const string SUBSCRIBE
Continuously poll the data source to check for new data and load it into the table.
const string DEFAULT_COLUMN_FORMATS
Specifies the default format to be applied to source data loaded into columns with the corresponding ...
const string TYPE_INFERENCE_MODE
Optimize type inferencing for either speed or accuracy.
const string REMOTE_QUERY
Remote SQL query from which data will be sourced
const string DISTRIBUTED_SHARED
The head node coordinates loading data by worker processes across all nodes from shared files availab...
const string TEXT_QUOTE_CHARACTER
Specifies the character that should be interpreted as a field value quoting character in the source d...
const string IS_REPLICATED
Affects the distribution scheme for the table&#39;s data.
IList< string > filepaths
A list of file paths from which data will be sourced; For paths in KiFS, use the uri prefix of kifs...
const string REMOTE_QUERY_INCREASING_COLUMN
Column on subscribed remote query result that will increase for new records (e.g., TIMESTAMP).
const string REFRESH_METHOD
Method by which the table can be refreshed from its source data.
A set of results returned by Kinetica.createTableExternal(string,IList{string},IDictionary{string, IDictionary{string, string}},IDictionary{string, string},IDictionary{string, string}).
Options from /create/table, allowing the structure of the table to be defined independently of the da...
const string BAD_RECORD_TABLE_LIMIT_PER_INPUT
For subscriptions, a positive integer indicating the maximum number of records that can be written to...
const string KAFKA_SUBSCRIPTION_CANCEL_AFTER
Sets the Kafka subscription lifespan (in minutes).
string type_id
ID of the currently registered table structure type for this external table
const string BATCH_SIZE
Number of records to insert per batch when inserting data.
IDictionary< string, string > create_table_options
Options from /create/table, allowing the structure of the table to be defined independently of the da...
KineticaData - class to help with Avro Encoding for Kinetica
Definition: KineticaData.cs:14
string table_name
Name of the table to be created, in [schema_name.
const string BAD_RECORD_TABLE_NAME
Name of a table to which records that were rejected are written.
string type_definition
A JSON string describing the columns of the created external table
const string TABLE_INSERT_MODE
Insertion scheme to use when inserting records from multiple shapefiles.
const string IGNORE_EXISTING_PK
Specifies the record collision error-suppression policy for inserting into a table with a primary key...
IDictionary< string, IDictionary< string, string > > modify_columns
Not implemented yet.
const string COLUMN_FORMATS
For each target column specified, applies the column-property-bound format to the source data loaded ...
const string COMPRESSION_TYPE
Source data compression type Supported values: NONE: No compression.
CreateTableExternalRequest(string table_name, IList< string > filepaths, IDictionary< string, IDictionary< string, string >> modify_columns=null, IDictionary< string, string > create_table_options=null, IDictionary< string, string > options=null)
Constructs a CreateTableExternalRequest object with the specified parameters.
IDictionary< string, IList< string > > type_properties
A mapping of each external table column name to an array of column properties associated with that co...
const string TYPE_INFERENCE_ONLY
Infer the type of the source data and return, without ingesting any data.
const string PARTITION_DEFINITIONS
Comma-separated list of partition definitions, whose format depends on the choice of partition_type...