##############################################
# Autogenerated GPUdb Python API file. 
# 
# *****Do NOT modify this file***** 
# 
##############################################
# ---------------------------------------------------------------------------
# gpudb.py - The Python API to interact with a GPUdb server.
#
# Copyright (c) 2014 GIS Federal
# ---------------------------------------------------------------------------
from __future__ import print_function
try:
    from io import BytesIO
except:
    from cStringIO import StringIO as BytesIO
try:
    import httplib
except:
    import http.client as httplib
import base64
import os, sys
import json
import random
import uuid
from collections import Iterator
from decimal import Decimal
if sys.version_info[0] >= 3: # checking the major component
    long = int
    basestring = str
    class unicode:
        pass
# ---------------------------------------------------------------------------
# The absolute path of this gpudb.py module for importing local packages
gpudb_module_path = __file__
if gpudb_module_path[len(gpudb_module_path)-3:] == "pyc": # allow symlinks to gpudb.py
    gpudb_module_path = gpudb_module_path[0:len(gpudb_module_path)-1]
if os.path.islink(gpudb_module_path): # allow symlinks to gpudb.py
    gpudb_module_path = os.readlink(gpudb_module_path)
if not os.path.isabs(gpudb_module_path): # take care of relative symlinks
    gpudb_module_path = os.path.join(os.path.dirname(__file__), gpudb_module_path)
gpudb_module_path = os.path.dirname(os.path.abspath(gpudb_module_path))
# Search for our modules first, probably don't need imp or virt envs.
if not gpudb_module_path + "/packages" in sys.path :
    sys.path.insert(1, gpudb_module_path + "/packages")
# ---------------------------------------------------------------------------
# Local imports after adding our module search path
from avro import schema, datafile, io
if sys.version_info >= (2, 7):
    import collections
else:
    import ordereddict as collections # a separate package
# Override some python3 avro things
if sys.version_info >= (3, 0):
    schema.parse = schema.Parse
    schema.RecordSchema.fields_dict = schema.RecordSchema.field_map
have_snappy = False
try:
    import snappy
    have_snappy = True
except ImportError:
    have_snappy = False
from tabulate import tabulate
# Some string constants used throughout the program
class C:
    """Some string constants used throughout the program."""
    _fields = "fields"
    # /show/table response
    _table_descriptions = "table_descriptions"
    _collection   = "COLLECTION"
    _view         = "VIEW"
    _replicated   = "REPLICATED"
    _join         = "JOIN"
    _result_table = "RESULT_TABLE"
    _total_full_size = "total_full_size"
    # /show/system/properties response
    _property_map = "property_map"
    _gaia_version = "version.gpudb_core_version"
# end class C
# ---------------------------------------------------------------------------
# _ConnectionToken - Private wrapper class to manage connection logic
# ---------------------------------------------------------------------------
class _ConnectionToken(object):
    """Internal wrapper class to handle multiple server logic."""
    def __init__(self, host, port, connection):
        assert (type(host) is str), "Expected a string host address, got: '"+str(host)+"'"
        # host may take the form of :
        #  - "https://user:password@domain.com:port/path/"
        if host.startswith("http://") :    # Allow http://, but remove it.
            host = host[7:]
        elif host.startswith("https://") : # Allow https://, but remove it.
            host = host[8:]
            connection = "HTTPS" # force it
        # Parse the username and password, if supplied.
        host_at_sign_pos = host.find('@')
        if host_at_sign_pos != -1 :
            user_pass = host[:host_at_sign_pos]
            host = host[host_at_sign_pos+1:]
            user_pass_list = user_pass.split(':')
            username = user_pass_list[0]
            if len(user_pass_list) > 1 :
                password = user_pass_list[1]
        url_path = ""
        # Find the URL /path/ and remove it to get the ip address.
        host_path_pos = host.find('/')
        if host_path_pos != -1:
            url_path = host[host_path_pos:]
            if url_path[-1] == '/':
                url_path = url_path[:-1]
            host = host[:host_path_pos]
        # Override default port if specified in ip address
        host_port_pos = host.find(':')
        if host_port_pos != -1 :
            port = host[host_port_pos+1:]
            host = host[:host_port_pos]
        # Port does not have to be provided if using standard HTTP(S) ports.
        if (port == None) or len(str(port)) == 0:
            if connection == 'HTTP' :
                port = 80
            elif connection == 'HTTPS' :
                port = 443
        # Validate port
        try :
            port = int(port)
        except :
            assert False, "Expected a numeric port, got: '" + str(port) + "'"
        assert (port > 0) and (port < 65536), "Expected a valid port (1-65535), got: '"+str(port)+"'"
        assert (len(host) > 0), "Expected a valid host address, got an empty string."
        assert (connection in ["HTTP", "HTTPS"]), "Expected connection to be 'HTTP' or 'HTTPS', got: '"+str(connection)+"'"
        self._host       = host
        self._port       = int(port)
        self._connection = connection
        self._gpudb_url_path = url_path
# end class _ConnectionToken
# ---------------------------------------------------------------------------
# Utility Functions
# ---------------------------------------------------------------------------
class _Util(object):
    @staticmethod
    def is_ok( response_object ):
        """Returns True if the response object's status is OK."""
        return (response_object['status_info']['status'] == 'OK')
    # end is_ok
    @staticmethod
    def get_error_msg( response_object ):
        """Returns the error message for the query, if any.  None otherwise."""
        if (response_object['status_info']['status'] != 'ERROR'):
            return None
        return response_object['status_info']['message']
    # end get_error_msg
    @staticmethod
    def is_list_or_dict( arg ):
        """Returns whether the given argument either a list or a dict
        (or an OrderedDict).
        """
        return ( isinstance( arg, list )
                 or isinstance( arg, dict )
                 or isinstance( arg, collections.OrderedDict ) )
    # end is_list_or_dict
    if sys.version_info[0] >= 3: # checking the major component
        # Declaring the python 3 version of this static method
        @staticmethod
        def str_to_bytes(value):
            if sys.version_info[0] <= 2: # checking the major component
                data = bytes()
                for c in value:
                    data += chr(ord(c))
                return data
            else:
                # The python 3 output
                return bytes( ord(b) for b in value )
        # end str_to_bytes
    else:
        # Declaring the python 2 version of this static method
        @staticmethod
        def str_to_bytes(value):
            if isinstance(value, unicode):
                data = bytes()
                for c in value:
                    data += chr(ord(c))
                return data
            else:
                # The python 2 output
                return value
        # end str_to_bytes
    # end py 2 vs. 3
    @staticmethod
    def ensure_bytes(value):
        if isinstance(value, bytes) and not isinstance(value, str):
            return value
        elif isinstance(value, basestring):
            return _Util.str_to_bytes(value)
        else:
            raise Exception("Unhandled data type: " + str(type(value)))
    # end ensure_bytes
    @staticmethod
    def bytes_to_str(value):
        return ''.join([chr(b) for b in value])
    # end bytes_to_str
    @staticmethod
    def ensure_str(value):
        if isinstance(value, basestring):
            return value
        elif isinstance(value, bytes):
            return _Util.bytes_to_str(value)
        else:
            raise Exception("Unhandled data type: " + str(type(value)))
    # end ensure_str
    @staticmethod
    def convert_dict_bytes_to_str(value):
        for key in list(value):
            val = value[key]
            if isinstance(val, bytes) and not isinstance(val, str):
                value[key] = ''.join([chr(b) for b in val])
            elif isinstance(val, dict):
                value[key] = _Util.convert_dict_bytes_to_str(val)
        return value
    # end convert_dict_bytes_to_str
    @staticmethod
    def decode_binary_data( SCHEMA, encoded_data ):
        """Given a schema and binary encoded data, decode it.
        """
        encoded_data = _Util.ensure_bytes( encoded_data )
        output = BytesIO( encoded_data )
        bd = io.BinaryDecoder( output )
        reader = io.DatumReader( SCHEMA )
        out = reader.read( bd ) # read, give a decoder
        return out
    # end decode_binary_data
    @staticmethod
    def encode_binary_data( SCHEMA, raw_data, encoding = "binary" ):
        """Given a schema and raw data, encode it.
        """
        output = BytesIO()
        be = io.BinaryEncoder( output )
        # Create a 'record' (datum) writer
        writer = io.DatumWriter( SCHEMA )
        writer.write( raw_data, be )
        result = None
        if encoding.lower is 'json':
            result = _Util.ensure_str( output.getvalue() )
        else:
            result = output.getvalue()
        return result
    # end encode_binary_data
# end class _Util
# ---------------------------------------------------------------------------
# Utility Classes
# ---------------------------------------------------------------------------
class AttrDict(dict):
    """Converts a dictionary into a class object such that the entries in the
    dict can be accessed using dot '.' notation.
    """
    def __init__(self, *args, **kwargs):
        super(AttrDict, self).__init__(*args, **kwargs)
        self.__dict__ = self
# end class AttrDict
# ---------------------------------------------------------------------------
# GPUdbException - Exception for GPUdb Issues
# ---------------------------------------------------------------------------
class GPUdbException( Exception ):
    def __init__( self, value ):
        self.value = value
    # end __init__
    def __str__( self ):
        return repr( self.value )
    # end __str__
    
# end class GPUdbException
# ---------------------------------------------------------------------------
# GPUdbColumnProperty - Class to Handle GPUdb Column Properties
# ---------------------------------------------------------------------------
[docs]class GPUdbColumnProperty(object):
    """Column properties used for GPUdb record data types.  The properties
    are class-level read-only properties, so the user can use them as such::
        GPUdbColumnProperty.prop_name
    """
    DATA = "data"
    """str: Default property for all numeric and string type columns; makes the
    column available for GPU queries.
    """
    TEXT_SEARCH = "text_search"
    """str: Valid only for 'string' columns. Enables full text search for string
    columns. Can be set independently of *data* and *store_only*.
    """
    STORE_ONLY = "store_only"
    """str: Persist the column value but do not make it available to queries (e.g.
    :meth:`.filter`)-i.e. it is mutually exclusive to the 'data' property. Any
    'bytes' type column must have a 'store_only' property. This property
    reduces system memory usage.
    """
    DISK_OPTIMIZED = "disk_optimized"
    """str: Works in conjunction with the 'data' property for string columns. This
    property reduces system disk usage by disabling reverse string lookups.
    Queries like :meth:`.filter`, :meth:`.filter_by_list`, and
    :meth:`.filter_by_value` work as usual but :meth:`.aggregate_unique`,
    :meth:`.aggregate_group_by` and :meth:`.get_records_by_column` are not
    allowed on columns with this property.
    """
    TIMESTAMP = "timestamp"
    """str: Valid only for 'long' columns. Indicates that this field represents a
    timestamp and will be provided in milliseconds since the Unix epoch:
    00:00:00 Jan 1 1970.  Dates represented by a timestamp must fall between
    the year 1000 and the year 2900.
    """
    DECIMAL = "decimal"
    """str: Valid only for 'string' columns.  It represents a SQL type NUMERIC(19,
    4) data type.  There can be up to 15 digits before the decimal point and up
    to four digits in the fractional part.  The value can be positive or
    negative (indicated by a minus sign at the beginning).  This property is
    mutually exclusive with the 'text_search' property.
    """
    DATE = "date"
    """str: Valid only for 'string' columns.  Indicates that this field represents
    a date and will be provided in the format 'YYYY-MM-DD'.  The allowable
    range is 1000-01-01 through 2900-01-01.  This property is mutually
    exclusive with the *text_search* property.
    """
    TIME = "time"
    """str: Valid only for 'string' columns.  Indicates that this field represents
    a time-of-day and will be provided in the format 'HH:MM:SS.mmm'.  The
    allowable range is 00:00:00.000 through 23:59:59.999.  This property is
    mutually exclusive with the *text_search* property.
    """
    DATETIME = "datetime"
    """str: Valid only for 'string' columns.  Indicates that this field represents
    a datetime and will be provided in the format 'YYYY-MM-DD HH:MM:SS.mmm'.
    The allowable range is 1000-01-01 00:00:00.000 through 2900-01-01
    23:59:59.999.  This property is mutually exclusive with the *text_search*
    property.
    """
    CHAR1 = "char1"
    """str: This property provides optimized memory, disk and query performance
    for string columns. Strings with this property must be no longer than 1
    character. This property cannot be combined with *text_search*
    """
    CHAR2 = "char2"
    """str: This property provides optimized memory, disk and query performance
    for string columns. Strings with this property must be no longer than 2
    characters. This property cannot be combined with *text_search*
    """
    CHAR4 = "char4"
    """str: This property provides optimized memory, disk and query performance
    for string columns. Strings with this property must be no longer than 4
    characters. This property cannot be combined with *text_search*
    """
    CHAR8 = "char8"
    """str: This property provides optimized memory, disk and query performance
    for string columns. Strings with this property must be no longer than 8
    characters. This property cannot be combined with *text_search*
    """
    CHAR16 = "char16"
    """str: This property provides optimized memory, disk and query performance
    for string columns. Strings with this property must be no longer than 16
    characters. This property cannot be combined with *text_search*
    """
    CHAR32 = "char32"
    """str: This property provides optimized memory, disk and query performance
    for string columns. Strings with this property must be no longer than 32
    characters. This property cannot be combined with *text_search*
    """
    CHAR64 = "char64"
    """str: This property provides optimized memory, disk and query performance
    for string columns. Strings with this property must be no longer than 64
    characters. This property cannot be combined with *text_search*
    """
    CHAR128 = "char128"
    """str: This property provides optimized memory, disk and query performance
    for string columns. Strings with this property must be no longer than 128
    characters. This property cannot be combined with *text_search*
    """
    CHAR256 = "char256"
    """str: This property provides optimized memory, disk and query performance
    for string columns. Strings with this property must be no longer than 256
    characters. This property cannot be combined with *text_search*
    """
    INT8 = "int8"
    """str: This property provides optimized memory and query performance for int
    columns. Ints with this property must be between -128 and +127 (inclusive)
    """
    INT16 = "int16"
    """str: This property provides optimized memory and query performance for int
    columns. Ints with this property must be between -32768 and +32767
    (inclusive)
    """
    IPV4 = "ipv4"
    """str: This property provides optimized memory, disk and query performance
    for string columns representing IPv4 addresses (i.e. 192.168.1.1). Strings
    with this property must be of the form: A.B.C.D where A, B, C and D are in
    the range of 0-255.
    """
    WKT = "wkt"
    """str: Valid only for 'string' and 'bytes' columns. Indicates that this field
    contains geospatial geometry objects in Well-Known Text (WKT) or Well-Known
    Binary (WKB) format.
    """
    PRIMARY_KEY = "primary_key"
    """str: This property indicates that this column will be part of (or the
    entire) primary key.
    """
    SHARD_KEY = "shard_key"
    """str: This property indicates that this column will be part of (or the
    entire) shard key.
    """
    NULLABLE = "nullable"
    """str: This property indicates that this column is nullable.  However,
    setting this property is insufficient for making the column nullable.  The
    user must declare the type of the column as a union between its regular
    type and 'null' in the avro schema for the record type in input parameter
    *type_definition*.  For example, if a column is of type integer and is
    nullable, then the entry for the column in the avro schema must be: ['int',
    'null'].
    The C++, C#, Java, and Python APIs have built-in convenience for bypassing
    setting the avro schema by hand.  For those two languages, one can use this
    property as usual and not have to worry about the avro schema for the
    record.
    """
    DICT = "dict"
    """str: This property indicates that this column should be dictionary encoded.
    It can only be used in conjunction with string columns marked with a charN
    property. This property is appropriate for columns where the cardinality
    (the number of unique values) is expected to be low, and can save a large
    amount of memory.
    """ 
# end class GPUdbColumnProperty
# ---------------------------------------------------------------------------
# GPUdbRecordColumn - Class to Handle GPUdb Record Column Data Types
# ---------------------------------------------------------------------------
[docs]class GPUdbRecordColumn(object):
    """Represents a column in a GPUdb record object (:class:`.GPUdbRecordType`).
    """
    class _ColumnType(object):
        """A class acting as an enum for the data types allowed for a column."""
        INT    = "int"
        LONG   = "long"
        FLOAT  = "float"
        DOUBLE = "double"
        STRING = "string"
        BYTES  = "bytes"
    # end class _ColumnType
    # The allowe data types
    _allowed_data_types = [ _ColumnType.INT,
                            _ColumnType.LONG,
                            _ColumnType.FLOAT,
                            _ColumnType.DOUBLE,
                            _ColumnType.STRING,
                            _ColumnType.BYTES
    ]
    # All non-numeric data types
    _non_numeric_data_types = [ _ColumnType.STRING,
                                _ColumnType.BYTES
    ]
    # All allowed numeric data types
    _numeric_data_types = [ _ColumnType.INT,
                            _ColumnType.LONG,
                            _ColumnType.FLOAT,
                            _ColumnType.DOUBLE
    ]
    # All allowed integral numeric data types
    _numeric_integral_data_types = [ _ColumnType.INT,
                                    _ColumnType.LONG
    ]
    # All allowed decimal numeric data types
    _numeric_decimal_data_types = [ _ColumnType.FLOAT,
                                    _ColumnType.DOUBLE
    ]
    def __init__( self, name, column_type, column_properties = None, is_nullable = False ):
        """Construct a GPUdbRecordColumn object.
        Parameters:
            name (str)
                The name of the column, must be a non-empty string.
            column_type (str)
                The data type of the column.  Must be one of int, long,
                float, double, string, bytes.
            column_properties (list)
                Optional list of properties for the column.
            is_nullable (bool)
                Optional boolean flag indicating whether the column is
                nullable.
        """
        # Validate and save the stringified name
        if (not name):
            raise GPUdbException( "The name of the column must be a non-empty string; given " + repr(name) )
        self._name = name
        # Validate and save the data type
        if column_type not in self._allowed_data_types:
            raise GPUdbException( "Data type must be one of " + str(self._allowed_data_types) +
                              "; given " + str(column_type) )
        self._column_type = column_type
        # Validate and save the column properties
        if not column_properties: # it's ok to not have any
            column_properties = []
        if not isinstance( column_properties, list ):
            raise GPUdbException( "'column_properties' must be a list; given " + str(type(column_properties)) )
        # Sort and stringify the column properties so that the order for a given set of
        # properties is always the same--handy for equivalency checks
        self._column_properties = sorted( column_properties )
        # Check for nullability
        self._is_nullable = False # default value
        if (GPUdbColumnProperty.NULLABLE in self.column_properties):
            self._is_nullable = True
        # Check the optional 'is_nullable' argument
        if is_nullable not in [True, False]:
            raise GPUdbException( "'is_nullable' must be a boolean value; given " + repr(type(is_nullable)) )
        if (is_nullable == True):
            self._is_nullable = True
            # Enter the 'nullable' property into the list of propertie, even though
            # GPUdb doesn't actually use it (make sure not to make duplicates)
            if (GPUdbColumnProperty.NULLABLE not in self._column_properties):
                self._column_properties.append( GPUdbColumnProperty.NULLABLE )
                # Re-sort for equivalency tests down the road
                self._column_properties = sorted( self._column_properties )
            # end inner if
        # end if
    # end __init__
    @property
    def name(self):  # read-only name
        """The name of the column."""
        return self._name
    # end name
    @property
    def column_type(self):  # read-only column_type
        """The data type of the column."""
        return self._column_type
    # end column_type
    @property
    def column_properties(self): # read-only column_properties
        """The properties of the column."""
        return self._column_properties
    # end column_properties
    @property
    def is_nullable(self):  # read-only is_nullable
        """The nullability of the column."""
        return self._is_nullable
    # end is_nullable
    def __eq__( self, other ):
        if isinstance(other, self.__class__):
            if ( self._name != other.name ):
                return False
            if ( self._column_type != other.column_type ):
                return False
            if ( self._is_nullable != other.is_nullable ):
                return False
            if ( self._column_properties == other.column_properties ):
                return True
            # The column properties are tricky; need to disregard
            # 'data' and 'text_search'
            disregarded_props = [ GPUdbColumnProperty.TEXT_SEARCH, GPUdbColumnProperty.DATA ]
            LHS_column_properties = [ prop for prop in self._column_properties \
                                      
if prop not in disregarded_props ]
            RHS_column_properties = [ prop for prop in other.column_properties \
                                      
if prop not in disregarded_props ]
            if (LHS_column_properties == RHS_column_properties):
                return True
            return False # Column properties did not match
        else:
            return False
    # end __eq__
    def __ne__(self, other):
        return not self.__eq__(other) 
    # end __ne__
# end class GPUdbRecordColumn
# ---------------------------------------------------------------------------
# GPUdbRecordType - Class to Handle GPUdb Record Data Types
# ---------------------------------------------------------------------------
[docs]class GPUdbRecordType(object):
    """Represent the data type for a given record in GPUdb.  Has convenience
    functions for creating the type in GPUdb (among others).
    """
    def __init__( self, columns = None, label = "",
                  schema_string = None, column_properties = None ):
        """Create a GPUdbRecordType object which represents the data type for
        a given record for GPUdb.
        Parameters:
            columns (list)
                A list of :class:`.GPUdbRecordColumn` objects. Either this argument
                or the schema_string argument must be given.
            label (str)
                Optional string label for the column.
            schema_string (str)
                The JSON string containing the schema for the type.
                Either this argument or the columns argument must
                be given.
            column_properties (dict)
                Optional dict that lists the properties for the
                columns of the type.  Meant to be used in conjunction
                with schema_string only; will be ignored if
                columns is given.
        """
        # Validate and save the label
        if not isinstance( label, basestring ):
            raise GPUdbException( "Column label must be a string; given " + str(type( label )) )
        self._label = label
        # The server always uses this hardcoded name and trumps any label
        self.name = "type_name"
        # Either columns or schema_string must be given, but not both!
        if ((columns == None) and (schema_string == None)):
            raise GPUdbException( "Either columns or schema_string must be given, but none is!" )
        elif ((columns != None) and (schema_string != None)):
            raise GPUdbException( "Either columns or schema_string must be given, but not both!" )
        # Construct the object from the given columns
        if (columns != None):
            self.__initiate_from_columns( columns )
        else:
            self.__initiate_from_schema_string( schema_string, column_properties )
        # The type hasn't been registered with GPUdb yet
        self._type_id = None
    # end __init__
    def __initiate_from_columns( self, columns ):
        """Private method that constructs the object using the given columns.
        Parameters:
            columns (list)
                A list of GPUdbRecordColumn objects or a list with the following
                format: [name, type, ...] where ... is optional properties. For
                example, ['x', 'int', 'int8']
        """
        # Validate the columns
        if not columns: # Must NOT be empty
            raise GPUdbException( "Non-empty list of columns must be given.  Given none." )
        if not isinstance( columns, list ): # Must be a list
            raise GPUdbException( "Non-empty list of columns must be given.  Given " + str(type( columns )) )
        # Check if the list contains only GPUdbRecordColumns, then nothing to do
        if all( isinstance( x, GPUdbRecordColumn ) for x in columns ):
            self._columns = columns
        else: # unroll the information contained within
            # If the caller provided one list of arguments, wrap it into a list of lists so we can
            # properly iterate over
            columns = columns if all( isinstance( elm, list ) for elm in columns ) else [ columns ]
            # Unroll the information about the column(s) and create GPUdbRecordColumn objects
            self._columns = []
            for col_info in columns:
                # Arguments 3 and beyond--these are properties--must be combined into one list argument
                if len( col_info ) > 2:
                    self._columns.append( GPUdbRecordColumn( col_info[0], col_info[1], col_info[2:] ) )
                elif len( col_info ) < 2:
                    # Need at least two elements: the name and the type
                    raise GPUdbException( "Need a list with the column name, type, and optional properties; "
                                          "given '%s'" % col_info )
                else:
                    self._columns.append( GPUdbRecordColumn( *col_info ) )
        # end if-else
        # Column property container
        self._column_properties = {}
        # Avro schema string field container
        fields = []
        # Validate each column and deduce its properties
        for col in self._columns:
            # Check that each element is a GPUdbRecordColumn object
            if not isinstance( col, GPUdbRecordColumn ):
                raise GPUdbException( "columns must contain only GPUdbRecordColumn objects.  Given " + str(type( col )) )
            # Extract the column's properties, if any
            if col.column_properties:
                self._column_properties[ col.name ] = sorted( col.column_properties )
            # done handling column props
            # Create the field for the schema string
            field_type = '"{_type}"'.format( _type = col.column_type )
            # Handle nullable fields
            if col.is_nullable:
                field_type = ('[{_type}, "null"]'.format( _type = field_type ))
            field = ('{{"name": "{_name}", "type": {_type} }}'.format( _name = col.name, _type = field_type ))
            fields.append( field )
        # end for loop
        # Put the fields together
        fields = ", ".join( fields )
        # Generate the avro schema string
        schema_string = """
        {{
            "type" : "record",
            "name" : "{_label}",
            "fields" : [ {_fields} ]
        }}
        """.format( _label  = self.name,
                    _fields = fields )
        schema_string = schema_string.replace( " ", "" ).replace( "\n", "" )
        # Generate the avro schema and save it
        self._record_schema = schema.parse( schema_string )
        # Save this version of the schema string so that it is standard
        self._schema_string = json.dumps( self._record_schema.to_json() )
        return
    # end __initiate_from_columns
    def __initiate_from_schema_string( self, schema_string, column_properties = None ):
        """Private method that constructs the object using the given schema string.
        Parameters:
            schema_string (str)
                The schema string for the record type.
            column_properties (dict)
                An optional dict containing property information for
                some or all of the columns.
        """
        # Validate the schema string
        if not schema_string: # Must NOT be empty!
            raise GPUdbException( "A schema string must be given.  Given none." )
        # Try to parse the schema string, this would also help us validate it
        self._record_schema = schema.parse( schema_string )
        # Rename the schema with a generic name just like the database
        self._record_schema._props[ "name" ] = self.name
        # If no exception was thrown above, then save the schema string
        self._schema_string = json.dumps( self._record_schema.to_json() )
        # Save the column properties, if any
        self._column_properties = column_properties if column_properties else {}
        # Now, deduce the columns from the schema string
        schema_json = self._record_schema.to_json()
        columns = []
        for field in schema_json["fields"]:
            # Get the field's type
            field_type = field["type"]
            # Is the type nullable?
            is_nullable = False
            if ( isinstance( field_type, list )
                 and ("null" in field_type) ):
                is_nullable = True
                # Then, also get the scalar type of the field
                field_type = field_type[ 0 ]
            # end if
            field_name = field["name"]
            
            # Get any properties for the column
            col_props = None
            if (self._column_properties and (field_name in self._column_properties)):
                col_props = column_properties[ field_name ]
            # end if
            # Create the column object and to the list
            column = GPUdbRecordColumn( field["name"], field_type, col_props,
                                        is_nullable = is_nullable )
            columns.append( column )
        # end for
        # Save the columns
        self._columns = columns
        return
    # end __initiate_from_schema_string
    @property
    def columns(self): # read-only columns
        """A list of columns for the record type."""
        return self._columns
    # end columns
    @property
    def label(self): # read-only label
        """A label for the record type."""
        return self._label
    # end label
    @property
    def schema_string(self): # read-only schema string
        """The schema string for the record type."""
        return self._schema_string
    # end schema_string
    @property
    def record_schema(self): # read-only avro schema
        """The avro schema for the record type."""
        return self._record_schema
    # end record_schema
    @property
    def column_properties(self): # read-only column properties
        """The properties for the type's columns."""
        return self._column_properties
    # end column_properties
    @property
    def type_id(self): # read-only ID for the type
        """The ID for the type, if it has already been registered
        with GPUdb."""
        if not self._type_id:
            raise GPUdbException( "The record type has not been registered with GPUdb yet." )
        return self._type_id
    # end type_id
[docs]    def create_type( self, gpudb, options = None ):
        """Create the record type in GPUdb so that users can create
        tables using this type.
        Parameters:
            gpudb (GPUdb)
                A GPUdb object to connect to a GPUdb server.
            option (dict)
                Optional dictionary containing options for the /create/type call.
        Returns:
            The type ID.
        """
        # Validate the GPUdb handle
        if not isinstance( gpudb, GPUdb ):
            raise GPUdbException( "'gpudb' must be a GPUdb object; given " + str(type( gpudb )) )
        if not options:
            options = {}
        response = gpudb.create_type( self._schema_string, self._label, self._column_properties, options )
        if not _Util.is_ok( response ): # problem creating the type
            raise GPUdbException( _Util.get_error_msg( response ) )
        self._type_id = response[ "type_id" ]
        return self._type_id 
    # end create_type
    def __eq__( self, other ):
        if isinstance(other, self.__class__):
            # Match all but the column properties (which need special treatment)
            # (must use the dict constructor to support python 2.6)
            lhs_ = dict( [ (k, v) for (k, v) in self.__dict__.items() \
                     
if (k != "_column_properties") ] )
            rhs_ = dict( [ (k, v) for (k, v) in other.__dict__.items() \
                     
if (k != "_column_properties") ] )
            if (lhs_ != rhs_): # some mismatch
                return False
            # So, other properties matched.  Now compare the properties
            # (need to disregard 'data' and 'text_search')
            disregarded_props = [ GPUdbColumnProperty.TEXT_SEARCH, GPUdbColumnProperty.DATA ]
            # Get the sanitized column properties
            lhs_col_props = {}
            for name, props in self._column_properties.items():
                sanitized_props = [ prop for prop in props  if (prop not in disregarded_props) ]
                if sanitized_props:
                    lhs_col_props[ name ] = sanitized_props
            # end loop
            # Get the sanitized column properties
            rhs_col_props = {}
            for name, props in other.column_properties.items():
                sanitized_props = [ prop for prop in props  if (prop not in disregarded_props) ]
                if sanitized_props:
                    rhs_col_props[ name ] = sanitized_props
            # end loop
            if (lhs_col_props == rhs_col_props):
                return True # distilled props matched
            return False # properties did not match
        else:
            return False
    # end __eq__
    def __ne__(self, other):
        return not self.__eq__(other) 
    # end __ne__
# end class GPUdbRecordType
# ---------------------------------------------------------------------------
# GPUdbRecord - Class to Handle GPUdb Record Data
# ---------------------------------------------------------------------------
[docs]class GPUdbRecord( object ):
    """Represent the data for a given record in GPUdb.  Has convenience
    functions for encoding/decoding the data.
    """
    @staticmethod
[docs]    def decode_binary_data( record_type_schema_string, binary_data ):
        """Decode binary encoded data (generally returned by GPUdb) using
        the schema for the data.  Return the decoded data.
        Parameters:
            record_type_schema_string (str)
                The schema string for the record type.
            binary_data (obj or list)
                The binary encoded data.  Could be a single object or
                a list of data.
        Returns:
            The decoded data (a single object or a list)
        """
        # Create an avro schema from the schema string
        record_schema = schema.parse( record_type_schema_string )
        # Get an avro data reader
        data_reader = io.DatumReader( record_schema )
        # Decode the single data object
        if not isinstance( binary_data, list ):
            return _Util.decode_binary_data( record_schema, binary_data )
        # end if
        # Decode the list of data data
        decoded_data = []
        for binary_datum in binary_data:
            decoded_data.append( _Util.decode_binary_data( record_schema, binary_datum ) )
        # end for
        return decoded_data 
    # end decode_binary_data
    @staticmethod
[docs]    def decode_json_string_data( json_string_data ):
        """Decode binary encoded data in string form (generally returned by GPUdb).
        Return the decoded data.
        Parameters:
            json_string_data (str)
                The stringified json encoded data.  Could be
                a single object or a list of data.
        Returns:
            The decoded data (a single object or a list)
        """
        # Decode the single data object
        if not isinstance( json_string_data, list ):
            json_string_data = json_string_data.replace( "\\U", "\\u")
            json_string_data = _Util.ensure_str( json_string_data )
            decoded_datum = json.loads( json_string_data )
            return decoded_datum
        # end if
        # Decode the list of data data
        decoded_data = []
        for json_datum in json_string_data:
            json_datum = json_datum.replace( "\\U", "\\u")
            json_datum = _Util.ensure_str( json_datum )
            decoded_datum = json.loads( json_datum,
                                        object_pairs_hook = collections.OrderedDict )
            decoded_data.append( decoded_datum )
        # end for
        return decoded_data 
    # end decode_json_string_data
    @staticmethod
[docs]    def convert_data_col_major_to_row_major( col_major_data, col_major_schema_str ):
        """Given some column major data, convert it to row major data.
        Parameters:
            col_major_data (OrderedDict)
                An OrderedDict of arrays containing the data by column names.
            col_major_schema_str (str)
                A JSON schema string describing the column major data.
        Returns:
            A list of GPUdbRecord objects.
        """
        if not isinstance( col_major_data, collections.OrderedDict ):
            raise GPUdbException( "Argument 'col_major_data' must be an OrderedDict;"
                                  " given %s" % str( type( col_major_data ) ) )
        try:
            schema_json = json.loads( col_major_schema_str )
        except Exception as e:
            raise GPUdbException( "Could not parse 'col_major_schema_str': "
                                  "%s" % str(e) )
        # Create the schema for each record from the column-major format's schema
        columns = []
        for col_name, field in zip(col_major_data.keys(), schema_json[ C._fields ]):
            field_type = field[ "type" ][ "items" ]
            if isinstance( field_type, (str, unicode) ):
                columns.append( [ col_name, field_type ] )
            elif (isinstance( field_type, list ) and ("null" in field_type )):
                # The column is nullable
                columns.append( [ col_name, field_type[0], GPUdbColumnProperty.NULLABLE ] )
            else:
                raise GPUdbException( "Unknown column type: {0}".format( field_type ) )
        # end loop
        # Create a record type
        record_type = GPUdbRecordType( columns )
        # Create the records
        records = []
        for record in zip( *col_major_data.values() ):
            records.append( GPUdbRecord( record_type, list( record ) ) )
        # end loop
        return records 
    # end convert_data_col_major_to_row_major
    def __init__( self, record_type, column_values ):
        """Create a GPUdbRecord object which holds the data for
        a given record.
        Parameters:
            record_type (GPUdbRecordType)
                A :class:`.GPUdbRecordType` object that describes the columns
                of this record.
            column_values (dict or list)
                Either a dict or a list that contains the values for
                the columns.  In either case, must contain values for
                ALL columns.  If a list, then the columns must be in the
                correct order.
        """
        # Validate and save the record type
        if not isinstance( record_type, GPUdbRecordType ):
            raise GPUdbException( "'record_type' must be a GPUdbRecordType; given " + str(type( record_type )) )
        self._record_type = record_type
        # Validate the column values
        if not _Util.is_list_or_dict( column_values ):
            # Must be a list or a dict
            raise GPUdbException( "Columns must be one of the following: list, dict, OrderedDict.  "
                                  "Given " + str(type( column_values )) )
        if not column_values: # Must NOT be empty
            raise GPUdbException( "Column values must be given.  Given none." )
        # The column values must be saved in the order they're declared in the type
        self._column_values = collections.OrderedDict()
        # Get the expected number of columns based on the data type provided
        num_columns = len( self._record_type.columns )
        # Check that there are correct number of values
        if (len( column_values ) != num_columns ):
            raise GPUdbException( "Given list of column values does not have the correct (%d) "
                              "number of values; it has %d" % (num_columns, len( column_values )) )
        # Check and save the column values
        # --------------------------------
        # Case 1: The values are given in a list
        if isinstance( column_values, list ):
            # Check that the order of the columns is ok
            # (we can only check string vs. numeric types, really;
            # we can also check for nulls)
            for i in range(0, num_columns):
                column_name = self._record_type.columns[ i ].name
                # The given value for this column
                column_val = column_values[ i ]
                # Check that the value is of the given type, save the value if it is
                if self.__is_valid_column_value( column_val, self._record_type.columns[ i ] ):
                    self._column_values[ column_name ] = column_val
            # end for loop
        else: # the values are given either in a dict or an OrderedDict
            # Check that the column names given match those of the record's type
            given_column_names = set( column_values.keys() )
            record_type_column_names = set( [c.name for c in self._record_type.columns] )
            if ( given_column_names != record_type_column_names ):
                if (given_column_names - record_type_column_names):
                    raise GPUdbException( "Given column names do not match that of the record type.  "
                                      "Extra column names are: " + str( (given_column_names - record_type_column_names) ))
                else:
                    raise GPUdbException( "Given column names do not match that of the record type.  "
                                      "Missing column names are: " + str( (record_type_column_names - given_column_names) ))
            # end if
            # We will disregard the order in which the column values were listed
            # in column_values (this should help the user somewhat)
            for i in range(0, num_columns):
                column_name = self._record_type.columns[ i ].name
                column_val  = column_values[ column_name ]
                # Check that the value is of the given type, save the value if it is
                if self.__is_valid_column_value( column_val, self._record_type.columns[ i ] ):
                    self._column_values[ column_name ] = column_val
        # end checking and save column values
        # Encode the record into binary and save it
        # -----------------------------------------
        self._binary_encoded_data = _Util.encode_binary_data( self._record_type.record_schema,
                                                              self._column_values )
    # end __init__
    @property
    def record_type(self): # read-only record type
        """The type for this record."""
        return self._record_type
    # end record_type
    @property
    def column_values(self): # read-only column_values
        """The values for this record."""
        return self._column_values
    # end column_values
    @property
    def data(self): # read-only column_values, just a convenient name
        """The values for this record."""
        return self._column_values
    # end data
    @property
    def binary_data(self): # read-only binary_data
        """The binary encoded values for this record."""
        return self._binary_encoded_data
    # end binary_data
    @property
    def json_data_string(self): # JSON encoded column_values in a string
        """The stringified JSON encoded values for this record."""
        return json.dumps( _Util.convert_dict_bytes_to_str(self._column_values) )
    # end json_data_string
[docs]    def keys( self ):
        """Return a list of the column names of the record.
        """
        return self.data.keys() 
    # end values
[docs]    def values( self ):
        """Return a list of the values of the record.
        """
        return self.data.values() 
    # end values
[docs]    def insert_record( self, gpudb, table_name, encoding = "binary", options = None ):
        """Insert this record into GPUdb.
        Parameters:
            gpudb (GPUdb)
                A :class:`.GPUdb` client handle.
            table_name (str)
                The name of the table into which we need to insert the record.
            encoding (str)
                Optional encoding with which to perform the insertion.  Default
                is binary encoding.
            options (dict)
                Optional parameter.  If given, use the options for the insertion
                function.
        Returns:
            The response from GPUdb.
        """
        # Validate the GPUdb handle
        if not isinstance( gpudb, GPUdb ):
            raise GPUdbException( "'gpudb' must be a GPUdb object; given " + str( type( gpudb ) ) )
        if not options:
            options = {}
        # Based on the encoding, format the data appropriately
        if (encoding == "binary"):
            data = [ self._binary_encoded_data ]
        elif (encoding == "json"):
            data = [ json.dumps( _Util.convert_dict_bytes_to_str( self._column_values ) ) ]
        else:
            raise GPUdbException( "Unknown encoding: " + str( encoding ) )
        # Insert the record
        response = gpudb.insert_records( table_name = table_name,
                                         data = data,
                                         list_encoding = encoding,
                                         options = options )
        return response 
    # end insert_record
    def __is_valid_column_value( self, column_value, column, do_throw = True ):
        """Private function that validates the given value for a column.
        Parameters:
            column_value
                The value for the given column
            column (GPUdbRecordColumn)
                A :class:`.GPUdbRecordColumn` object that has information about
                the column.  This is used to validate the column value.
            do_throw (bool)
                Throw an exception for invalid columns
        Returns:
            True if the value can be validated, False otherwise.
        """
        if not isinstance( column, GPUdbRecordColumn ):
            raise GPUdbException( "'column' must be a GPUdbRecordColumn object; given "
                              + str(type( column )) )
        # Check that the value is of the given type
        # -----------------------------------------
        column_type = column.column_type
        if (column_value == None): # Handle null values
            if not column.is_nullable: # but the column is not nullable
                if do_throw:
                    raise GPUdbException( "Non-nullable column '%s' given a null value" % column.name )
                else:
                    return False
        # Numeric types:
        elif (column_type in GPUdbRecordColumn._numeric_data_types):
            if not (isinstance( column_value, (int, long, float)) and not isinstance( column_value, bool ) ):
                if do_throw:
                    raise GPUdbException( ("Column '%s' must be a numeric type (one of int, long, float); "
                                       "given " % column.name )
                                      + str(type( column_value )) )
                else:
                    return False
        else: # string/bytes type
            if not isinstance( column_value, (str, Decimal, unicode, bytes) ):
                if do_throw:
                    raise GPUdbException( ("Column '%s' must be string or bytes; given " % column.name)
                                      + str(type( column_value )) )
                else:
                    return False
        # end if-else checking type-correctness
        # The value checks out; it is valid
        return True
    # end __is_valid_column_value
    def __eq__( self, other ):
        if isinstance(other, self.__class__):
            return self.__dict__ == other.__dict__
        else:
            return False
    # end __eq__
    def __ne__(self, other):
        return not self.__eq__(other) 
    # end __ne__
# end class GPUdbRecord
# ---------------------------------------------------------------------------
# GPUdb - Lightweight client class to interact with a GPUdb server.
# ---------------------------------------------------------------------------
[docs]class GPUdb(object):
    def __init__(self, host = "127.0.0.1", port = "9191",
                       encoding = "BINARY", connection = 'HTTP',
                       username = "", password = "", timeout = None,
                       **kwargs ):
        """
        Construct a new GPUdb client instance.
        Parameters:
            host (str)
                The IP address of the GPUdb server. May be provided as a list
                to support HA.  Also, can include the port following a colon
                (the *port* argument then should be unused).  Host may take
                the form "https://user:password@domain.com:port/path/".
            port (str)
                The port of the GPUdb server at the given IP address. May be
                provided as a list in conjunction with host; but if using the
                same port for all hosts, then a single port value is OK.  Also,
                can be omitted entirely if the host already contains the port.
                If the *host* does include a port, then this argument will be
                ignored.
            encoding (str)
                Type of Avro encoding to use, "BINARY", "JSON" or "SNAPPY".
            connection (str)
                Connection type, currently only "HTTP" or "HTTPS" supported.
                May be provided as a list in conjunction with host; but if using
                the same port for all hosts, then a single port value is OK.
            username (str)
                An optional http username.
            password (str)
                The http password for the username.
            timeout (int)
                HTTP request timeout in seconds. Defaults to global socket
                timeout.
        """
        if type(host) is list:
            if not type(port) is list:
                port = [port]*len(host)
            if not type(connection) is list:
                connection = [connection]*len(host)
            assert len(host) == len(port) == len(connection), \
                
"Host, port and connection list must have the same number of items"
        else:
            assert not (type(port) is list) and not (type(connection) is list), \
                
"Host is not a list, port and connection must not be either"
            host = [host]
            port = [port]
            connection = [connection]
        assert (encoding in ["BINARY", "JSON", "SNAPPY"]), "Expected encoding to be either 'BINARY', 'JSON' or 'SNAPPY' got: '"+str(encoding)+"'"
        if (encoding == 'SNAPPY' and not have_snappy):
            print('SNAPPY encoding specified but python-snappy is not installed; reverting to BINARY')
            encoding = 'BINARY'
        self._conn_tokens = tuple(_ConnectionToken(h, p, c) \
                               
for h, p, c in zip(host, port, connection))
        self.current_host_index = random.randint(0, len(self._conn_tokens))
        self.encoding   = encoding
        self.username   = username
        self.password   = password
        self.timeout    = timeout
        # Set up the credentials to be used per POST
        self.auth = None
        if len(self.username) != 0:
            if sys.version_info[0] >= 3: # Python 3.x
                # base64 encode the username and password
                self.auth = ('%s:%s' % (self.username, self.password) )
                self.auth = _Util.str_to_bytes( self.auth )
                self.auth = base64.encodestring( self.auth ).decode( "ascii" ).replace('\n', '')
                self.auth = ("Basic %s" % self.auth)
            else: # Python 2.x
                self.auth = base64.encodestring('%s:%s' % (self.username, self.password)).replace('\n', '')
                self.auth = ("Basic %s" % self.auth)
        # end if
        self.client_to_object_encoding_map = { \
                                               
"BINARY": "binary",
                                               "SNAPPY": "binary",
                                               "JSON": "json",
        }
        # Load all gpudb schemas
        self.load_gpudb_schemas()
        # Get the multi-head ingestion related hidden input parameter, if given
        using_multihead_ingestion = kwargs.get( "using_multihead_ingestion", None )
        # Make sure that a connection to the server can be established
        if not using_multihead_ingestion:
            server_status_response = self.show_system_status()
            if not _Util.is_ok( server_status_response ):
                raise GPUdbException( _Util.get_error_msg( server_status_response ) )
        # Check version compatibility with the server
        # -------------------------------------------
        if not using_multihead_ingestion:
            system_props = self.show_system_properties()
            server_version = system_props[ C._property_map ][ C._gaia_version ]
            # Extract the version for both server and client: major.minor.revision (ignore ABI)
            server_version = ".".join( server_version.split( "." )[ 0 : 3 ] )
            client_version = ".".join( self.api_version.split( "." )[ 0 : 3 ] )
            if (server_version != client_version):
                print ( "Warning: Client version ({0}) does not match that of the server ({1})"
                        "".format( client_version, server_version ) )
    # end __init__
    def _get_current_conn_token( self ):
        """Returns the connection information for the current server."""
        return self._conn_tokens[self._current_conn_token_index]
[docs]    def get_version_info( self ):
        """Return the version information for this API."""
        return self.api_version 
    # end get_version_info
[docs]    def get_host( self ):
        """Return the host this client is talking to."""
        return self._get_current_conn_token()._host 
    # end get_host
[docs]    def get_port( self ):
        """Return the port the host is listening to."""
        return self._get_current_conn_token()._port 
    # end get_host
[docs]    def get_url( self ):
        """Return the url of the host this client is listening to."""
        return "{host}:{port}".format( host = self.get_host(),
                                       port = self.get_port() ) 
    # end get_host
    @property
    def host(self):
        return self.get_host()
    @host.setter
    def host(self, value):
        self._get_current_conn_token()._host = value
    @property
    def port(self):
        return self.get_port()
    @port.setter
    def port(self, value):
        self._get_current_conn_token()._port = value
    @property
    def gpudb_url_path(self):
        return self._get_current_conn_token()._gpudb_url_path
    @gpudb_url_path.setter
    def gpudb_url_path(self, value):
        self._get_current_conn_token()._gpudb_url_path = value
    @property
    def connection(self):
        return self._get_current_conn_token()._connection
    @connection.setter
    def connection(self, value):
        self._get_current_conn_token()._connection = value
    # members
    _current_conn_token_index = 0
    _conn_tokens   = ()          # Collection of parsed url entities
    timeout       = None        # HTTP request timeout (None=default socket timeout)
    encoding      = "BINARY"    # Input encoding, either 'BINARY' or 'JSON'.
    username      = ""          # Input username or empty string for none.
    password      = ""          # Input password or empty string for none.
    api_version   = "6.1.0.3"
    # constants
    END_OF_SET = -9999
    """(int) Used for indicating that all of the records (till the end of the
    set are desired)--generally used for /get/records/\* functions.
    """
    # Some other schemas for internal work
    logger_request_schema_str = """
        {
            "type" : "record",
            "name" : "logger_request",
            "fields" : [
                {"name" : "ranks", "type" : {"type" : "array", "items" : "int"}},
                {"name" : "log_levels", "type" : {"type" : "map", "values" : "string"}}
            ]
        }
    """.replace("\n", "").replace(" ", "")
    logger_response_schema_str = """
        {
            "type" : "record",
            "name" : "logger_response",
            "fields" : [
                {"name" : "status" , "type" : "string"},
                {"name" : "log_levels", "type" : {"type" : "map", "values" : "string"}}
            ]
        }
    """.replace("\n", "").replace(" ", "")
    # -----------------------------------------------------------------------
    # Helper functions
    # -----------------------------------------------------------------------
    def __post_to_gpudb_read(self, body_data, endpoint):
        """
        Create a HTTP connection and POST then get GET, returning the server response.
        Parameters:
            body_data : Data to POST to GPUdb server.
            endpoint  : Server path to POST to, e.g. "/add".
        """
        if self.encoding == 'BINARY':
            headers = {"Content-type": "application/octet-stream",
                       "Accept": "application/octet-stream"}
        elif self.encoding == 'JSON':
            headers = {"Content-type": "application/json",
                       "Accept": "application/json"}
        elif self.encoding == 'SNAPPY':
            headers = {"Content-type": "application/x-snappy",
                       "Accept": "application/x-snappy"}
            body_data = snappy.compress(body_data)
        # Set the authentication header, if needed
        if self.auth:
            headers["Authorization"] = self.auth
        # NOTE: Creating a new httplib.HTTPConnection is suprisingly just as
        #       fast as reusing a persistent one and has the advantage of
        #       fully retrying from scratch if the connection fails.
        initial_index = self._current_conn_token_index
        cond = True
        error = None
        while cond:
            loop_error = None
            conn_token = self._get_current_conn_token()
            url_path = (conn_token._gpudb_url_path + endpoint)
            try:
                if (conn_token._connection == 'HTTP'):
                    conn = httplib.HTTPConnection(host=conn_token._host,
                                                  port=conn_token._port,
                                                  timeout=self.timeout)
                elif (conn_token._connection == 'HTTPS'):
                    conn = httplib.HTTPSConnection(host=conn_token._host,
                                                   port=conn_token._port,
                                                   timeout=self.timeout)
            except:
                loop_error = "Error connecting to: '%s' on port %d" % (conn_token._host, conn_token._port)
            if not loop_error:
                try:
                    conn.request("POST", url_path, body_data, headers)
                except:
                    loop_error = "Error posting to: '%s:%d%s'" % (conn_token._host, conn_token._port, url_path)
                try:
                    resp = conn.getresponse()
                    resp_data = resp.read()
                    resp_time = resp.getheader('x-request-time-secs',None)
                except: # some error occurred; return a message
                    loop_error = GPUdbException( "Timeout Error: No response received from %s:%s" % (conn_token._host, conn_token._port) )
                # end except
            if loop_error:
                self._current_conn_token_index = \
                    
(self._current_conn_token_index+1) % len(self._conn_tokens)
            error = loop_error
            cond = error and (self._current_conn_token_index != initial_index)
        if error:
            raise error
        return  resp_data, resp_time
    # end __post_to_gpudb_read
    def __client_to_object_encoding( self ):
        """Returns object encoding for queries based on the GPUdb client's
        encoding.
        """
        return self.client_to_object_encoding_map[ self.encoding ]
    # end client_to_object_encoding
    def __read_orig_datum(self, SCHEMA, encoded_datum, encoding=None):
        """
        Decode the binary or JSON encoded datum using the avro schema and return a dict.
        Parameters:
            SCHEMA        : A parsed schema from avro.schema.parse().
            encoded_datum : Binary or JSON encoded data.
            encoding      : Type of avro encoding, either "BINARY" or "JSON",
                            None uses the encoding this class was initialized with.
        """
        if encoding == None:
            encoding = self.encoding
        if (encoding == 'BINARY') or (encoding == 'SNAPPY'):
            return _Util.decode_binary_data( SCHEMA, encoded_datum )
        elif encoding == 'JSON':
            data_str = json.loads( _Util.ensure_str(encoded_datum).replace('\\U','\\u') )
            return data_str
    # end __read_orig_datum
    def __read_datum(self, SCHEMA, encoded_datum, encoding=None, response_time=None):
        """
        Decode a gpudb_response and decode the contained message too.
        Parameters:
            SCHEMA : The parsed schema from avro.schema.parse() that the gpudb_response contains.
            encoded_datum : A BINARY or JSON encoded gpudb_response message.
        Returns:
            An OrderedDict of the decoded gpudb_response message's data with the
            gpudb_response put into the "status_info" field.
        """
        # Parse the gpudb_response message
        REP_SCHEMA = self.gpudb_schemas["gpudb_response"]["RSP_SCHEMA"]
        resp = self.__read_orig_datum(REP_SCHEMA, encoded_datum, encoding)
        #now parse the actual response if there is no error
        #NOTE: DATA_SCHEMA should be equivalent to SCHEMA but is NOT for get_set_sorted
        stype = resp['data_type']
        if stype == 'none':
            out = collections.OrderedDict()
        else:
            if self.encoding == 'JSON':
                out = self.__read_orig_datum(SCHEMA, resp['data_str'], 'JSON')
            elif (self.encoding == 'BINARY') or (self.encoding == 'SNAPPY'):
                out = self.__read_orig_datum(SCHEMA, resp['data'], 'BINARY')
        del resp['data']
        del resp['data_str']
        out['status_info'] = resp
        if (response_time is not None):
            out['status_info']['response_time'] = float(response_time)
        return out
    # end __read_datum
    def __get_schemas(self, base_name):
        """
        Get a tuple of parsed and cached request and reply schemas.
        Parameters:
            base_name : Schema name, e.g. "base_name"+"_request.json" or "_response.json"
        """
        REQ_SCHEMA = self.gpudb_schemas[base_name]["REQ_SCHEMA"]
        RSP_SCHEMA = self.gpudb_schemas[base_name]["RSP_SCHEMA"]
        return (REQ_SCHEMA, RSP_SCHEMA)
    # end __get_schemas
    def __get_endpoint(self, base_name):
        """
        Get the endpoint for a given query.
        Parameters:
            base_name : Schema name, e.g. "base_name"+"_request.json" or "_response.json"
        """
        return self.gpudb_schemas[base_name]["ENDPOINT"]
    # end __get_endpoint
    def __post_then_get(self, REQ_SCHEMA, REP_SCHEMA, datum, endpoint):
        """
        Encode the datum dict using the REQ_SCHEMA, POST to GPUdb server and
        decode the reply using the REP_SCHEMA.
        Parameters:
            REQ_SCHEMA : The parsed schema from avro.schema.parse() of the request.
            REP_SCHEMA : The parsed schema from avro.schema.parse() of the reply.
            datum      : Request dict matching the REQ_SCHEMA.
            endpoint   : Server path to POST to, e.g. "/add".
        """
        encoded_datum = self.encode_datum(REQ_SCHEMA, datum)
        response,response_time  = self.__post_to_gpudb_read(encoded_datum, endpoint)
        return self.__read_datum(REP_SCHEMA, response, None, response_time)
    # end __post_then_get
    def __sanitize_dicts( self, _dict ):
        if not isinstance( _dict, (dict, collections.OrderedDict) ):
            return
        # Iterate over a copy of the keys so that we can modify the dict
        for key in _dict.keys(): 
            val = _dict[ key ]
            if isinstance( val, bool ):
                if val: # true
                    _dict[ key ] = 'true'
                else:
                    _dict[ key ] = 'false'
            elif isinstance( val, (dict, collections.OrderedDict) ):
                _dict[ key ] = self.__sanitize_dicts( _dict[ key ] )
        # end loop
        return _dict
    # end sanitize_dicts
[docs]    def encode_datum(self, SCHEMA, datum, encoding = None):
        """
        Returns an avro binary or JSON encoded dataum dict using its schema.
        Parameters:
            SCHEMA (str or avro.Schema)
                A parsed schema object from avro.schema.parse() or a
                string containing the schema.
            datum (dict)
                A dict of key-value pairs containing the data to encode (the
                entries must match the schema).
        """
        # Convert the string to a parsed schema object (if needed)
        if isinstance( SCHEMA, basestring ):
            SCHEMA = schema.parse( SCHEMA )
        if encoding is None:
            encoding = self.encoding
        else:
            encoding = encoding.upper()
        # Build the encoder; this output is where the data will be written
        if encoding == 'BINARY' or encoding == 'SNAPPY':
            return _Util.encode_binary_data( SCHEMA, datum, self.encoding )
        elif encoding == 'JSON':
            return json.dumps( _Util.convert_dict_bytes_to_str( datum ) ) 
    # end encode_datum
    # ------------- Convenience Functions ------------------------------------
    def read_trigger_msg(self, encoded_datum):
        RSP_SCHEMA = self.gpudb_schemas[ "trigger_notification" ]["RSP_SCHEMA"]
        return self.__read_orig_datum(RSP_SCHEMA, encoded_datum, 'BINARY')
[docs]    def logger(self, ranks, log_levels):
        """Convenience function to change log levels of some
        or all GPUdb ranks.
        """
        REQ_SCHEMA     = schema.parse( self.logger_request_schema_str )
        REP_SCHEMA     = schema.parse( self.logger_response_schema_str )
        datum = collections.OrderedDict()
        datum["ranks"]      = ranks
        datum["log_levels"] = log_levels
        print('Using host: %s\n' % (self.host))
        return self.__post_then_get(REQ_SCHEMA, REP_SCHEMA, datum, "/logger") 
    # end logger
    # Helper function to emulate old /add (single object insert) capability
    def insert_object(self, set_id, object_data, params=None):
        if (params):
            return self.insert_records(set_id, [object_data], None, params)
        else:
            return self.insert_records(set_id, [object_data], None, {"return_record_ids":"true"})
    # Helper for dynamic schema responses
    def parse_dynamic_response(self, retobj, do_print=False, convert_nulls = True):
        if (retobj['status_info']['status'] == 'ERROR'):
            print('Error: ', retobj['status_info']['message'])
            return retobj
        my_schema = schema.parse(retobj['response_schema_str'])
        fields = eval(retobj['response_schema_str'])['fields']
        nullable = [type(x['type']['items']) != str for x in fields]
        if len(retobj['binary_encoded_response']) > 0:
            data = retobj['binary_encoded_response']
            decoded = _Util.decode_binary_data( my_schema, data )
            #translate the column names
            column_lookup = decoded['column_headers']
            translated = collections.OrderedDict()
            for i,(n,column_name) in enumerate(zip(nullable,column_lookup)):
                if (n and convert_nulls): # nullable - replace None with '<NULL>'
                    col = [x if x is not None else '<NULL>' for x in decoded['column_%d'%(i+1)]]
                else:
                    col = decoded['column_%d'%(i+1)]
                #translated[column_name] = decoded['column_%d'%(i+1)]
                translated[column_name] = col
            retobj['response'] = translated
        else:
            retobj['response'] = collections.OrderedDict()
            #note running eval here returns a standard (unordered) dict
            #d_resp = eval(retobj['json_encoded_response'])
            d_resp = json.loads(retobj['json_encoded_response'])
            column_lookup = d_resp['column_headers']
            for i,(n,column_name) in enumerate(zip(nullable,column_lookup)):
                column_index_name = 'column_%d'%(i+1)
                #double/float conversion here
                #get the datatype of the underlying data
                data_type = my_schema.fields_dict[column_index_name].type.items.type
                if (data_type == 'double' or data_type == 'float'):
                    retobj['response'][column_name] = [float(x) for x in d_resp[column_index_name]]
                else:
                    retobj['response'][column_name] = d_resp[column_index_name]
                if (n and convert_nulls): # nullable
                    retobj['response'][column_name] = [x if x is not None else '<NULL>' for x in retobj['response'][column_name]]
        if (do_print):
            print(tabulate(retobj['response'],headers='keys',tablefmt='psql'))
        return retobj
    # end parse_dynamic_response
    # ------------- END convenience functions ------------------------------------
    # -----------------------------------------------------------------------
    # Begin autogenerated functions
    # -----------------------------------------------------------------------
[docs]    def load_gpudb_schemas( self ):
        """Saves all request and response schemas for GPUdb queries
           in a lookup table (lookup by query name).
        """
        self.gpudb_schemas = {}
        name = "gpudb_response"
        RSP_SCHEMA_STR = """{"type":"record","name":"gpudb_response","fields":[{"name":"status","type":"string"},{"name":"message","type":"string"},{"name":"data_type","type":"string"},{"name":"data","type":"bytes"},{"name":"data_str","type":"string"}]}"""
        self.gpudb_schemas[ name ] = { "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ) }
        name = "trigger_notification"
        RSP_SCHEMA_STR = """{"type":"record","name":"trigger_notification","fields":[{"name":"trigger_id","type":"string"},{"name":"set_id","type":"string"},{"name":"object_id","type":"string"},{"name":"object_data","type":"bytes"}]}"""
        self.gpudb_schemas[ name ] = { "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ) }
        name = "admin_alter_configuration"
        REQ_SCHEMA_STR = """{"type":"record","name":"admin_alter_configuration_request","fields":[{"name":"config_string","type":"string"},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"admin_alter_configuration_response","fields":[{"name":"status","type":"string"}]}"""
        ENDPOINT = "/admin/alter/configuration"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "admin_alter_jobs"
        REQ_SCHEMA_STR = """{"type":"record","name":"admin_alter_jobs_request","fields":[{"name":"job_ids","type":{"type":"array","items":"int"}},{"name":"action","type":"string"},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"admin_alter_jobs_response","fields":[{"name":"job_ids","type":{"type":"array","items":"int"}},{"name":"action","type":"string"},{"name":"status","type":{"type":"array","items":"string"}}]}"""
        ENDPOINT = "/admin/alter/jobs"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "admin_offline"
        REQ_SCHEMA_STR = """{"type":"record","name":"admin_offline_request","fields":[{"name":"offline","type":"boolean"},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"admin_offline_response","fields":[{"name":"is_offline","type":"boolean"}]}"""
        ENDPOINT = "/admin/offline"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "admin_show_configuration"
        REQ_SCHEMA_STR = """{"type":"record","name":"admin_show_configuration_request","fields":[{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"admin_show_configuration_response","fields":[{"name":"config_string","type":"string"}]}"""
        ENDPOINT = "/admin/show/configuration"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "admin_show_jobs"
        REQ_SCHEMA_STR = """{"type":"record","name":"admin_show_jobs_request","fields":[{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"admin_show_jobs_response","fields":[{"name":"job_id","type":{"type":"array","items":"int"}},{"name":"status","type":{"type":"array","items":"string"}},{"name":"endpoint_name","type":{"type":"array","items":"string"}},{"name":"time_received","type":{"type":"array","items":"long"}},{"name":"auth_id","type":{"type":"array","items":"string"}},{"name":"user_data","type":{"type":"array","items":"string"}}]}"""
        ENDPOINT = "/admin/show/jobs"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "admin_show_shards"
        REQ_SCHEMA_STR = """{"type":"record","name":"admin_show_shards_request","fields":[{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"admin_show_shards_response","fields":[{"name":"version","type":"long"},{"name":"rank","type":{"type":"array","items":"int"}},{"name":"tom","type":{"type":"array","items":"int"}}]}"""
        ENDPOINT = "/admin/show/shards"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "admin_shutdown"
        REQ_SCHEMA_STR = """{"type":"record","name":"admin_shutdown_request","fields":[{"name":"exit_type","type":"string"},{"name":"authorization","type":"string"},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"admin_shutdown_response","fields":[{"name":"exit_status","type":"string"}]}"""
        ENDPOINT = "/admin/shutdown"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "admin_verify_db"
        REQ_SCHEMA_STR = """{"type":"record","name":"admin_verify_db_request","fields":[{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"admin_verify_db_response","fields":[{"name":"verified_ok","type":"boolean"},{"name":"error_list","type":{"type":"array","items":"string"}}]}"""
        ENDPOINT = "/admin/verifydb"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "aggregate_convex_hull"
        REQ_SCHEMA_STR = """{"type":"record","name":"aggregate_convex_hull_request","fields":[{"name":"table_name","type":"string"},{"name":"x_column_name","type":"string"},{"name":"y_column_name","type":"string"},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"aggregate_convex_hull_response","fields":[{"name":"x_vector","type":{"type":"array","items":"double"}},{"name":"y_vector","type":{"type":"array","items":"double"}},{"name":"count","type":"int"},{"name":"is_valid","type":"boolean"}]}"""
        ENDPOINT = "/aggregate/convexhull"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "aggregate_group_by"
        REQ_SCHEMA_STR = """{"type":"record","name":"aggregate_group_by_request","fields":[{"name":"table_name","type":"string"},{"name":"column_names","type":{"type":"array","items":"string"}},{"name":"offset","type":"long"},{"name":"limit","type":"long"},{"name":"encoding","type":"string"},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"aggregate_group_by_response","fields":[{"name":"response_schema_str","type":"string"},{"name":"binary_encoded_response","type":"bytes"},{"name":"json_encoded_response","type":"string"},{"name":"total_number_of_records","type":"long"},{"name":"has_more_records","type":"boolean"}]}"""
        ENDPOINT = "/aggregate/groupby"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "aggregate_histogram"
        REQ_SCHEMA_STR = """{"type":"record","name":"aggregate_histogram_request","fields":[{"name":"table_name","type":"string"},{"name":"column_name","type":"string"},{"name":"start","type":"double"},{"name":"end","type":"double"},{"name":"interval","type":"double"},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"aggregate_histogram_response","fields":[{"name":"counts","type":{"type":"array","items":"double"}},{"name":"start","type":"double"},{"name":"end","type":"double"}]}"""
        ENDPOINT = "/aggregate/histogram"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "aggregate_k_means"
        REQ_SCHEMA_STR = """{"type":"record","name":"aggregate_k_means_request","fields":[{"name":"table_name","type":"string"},{"name":"column_names","type":{"type":"array","items":"string"}},{"name":"k","type":"int"},{"name":"tolerance","type":"double"},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"aggregate_k_means_response","fields":[{"name":"means","type":{"type":"array","items":{"type":"array","items":"double"}}},{"name":"counts","type":{"type":"array","items":"long"}},{"name":"rms_dists","type":{"type":"array","items":"double"}},{"name":"count","type":"long"},{"name":"rms_dist","type":"double"},{"name":"tolerance","type":"double"},{"name":"num_iters","type":"int"}]}"""
        ENDPOINT = "/aggregate/kmeans"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "aggregate_min_max"
        REQ_SCHEMA_STR = """{"type":"record","name":"aggregate_min_max_request","fields":[{"name":"table_name","type":"string"},{"name":"column_name","type":"string"},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"aggregate_min_max_response","fields":[{"name":"min","type":"double"},{"name":"max","type":"double"}]}"""
        ENDPOINT = "/aggregate/minmax"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "aggregate_min_max_geometry"
        REQ_SCHEMA_STR = """{"type":"record","name":"aggregate_min_max_geometry_request","fields":[{"name":"table_name","type":"string"},{"name":"column_name","type":"string"},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"aggregate_min_max_geometry_response","fields":[{"name":"min_x","type":"double"},{"name":"max_x","type":"double"},{"name":"min_y","type":"double"},{"name":"max_y","type":"double"}]}"""
        ENDPOINT = "/aggregate/minmax/geometry"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "aggregate_statistics"
        REQ_SCHEMA_STR = """{"type":"record","name":"aggregate_statistics_request","fields":[{"name":"table_name","type":"string"},{"name":"column_name","type":"string"},{"name":"stats","type":"string"},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"aggregate_statistics_response","fields":[{"name":"stats","type":{"type":"map","values":"double"}}]}"""
        ENDPOINT = "/aggregate/statistics"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "aggregate_statistics_by_range"
        REQ_SCHEMA_STR = """{"type":"record","name":"aggregate_statistics_by_range_request","fields":[{"name":"table_name","type":"string"},{"name":"select_expression","type":"string"},{"name":"column_name","type":"string"},{"name":"value_column_name","type":"string"},{"name":"stats","type":"string"},{"name":"start","type":"double"},{"name":"end","type":"double"},{"name":"interval","type":"double"},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"aggregate_statistics_by_range_response","fields":[{"name":"stats","type":{"type":"map","values":{"type":"array","items":"double"}}}]}"""
        ENDPOINT = "/aggregate/statistics/byrange"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "aggregate_unique"
        REQ_SCHEMA_STR = """{"type":"record","name":"aggregate_unique_request","fields":[{"name":"table_name","type":"string"},{"name":"column_name","type":"string"},{"name":"offset","type":"long"},{"name":"limit","type":"long"},{"name":"encoding","type":"string"},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"aggregate_unique_response","fields":[{"name":"table_name","type":"string"},{"name":"response_schema_str","type":"string"},{"name":"binary_encoded_response","type":"bytes"},{"name":"json_encoded_response","type":"string"},{"name":"has_more_records","type":"boolean"}]}"""
        ENDPOINT = "/aggregate/unique"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "aggregate_unpivot"
        REQ_SCHEMA_STR = """{"type":"record","name":"aggregate_unpivot_request","fields":[{"name":"table_name","type":"string"},{"name":"variable_column_name","type":"string"},{"name":"value_column_name","type":"string"},{"name":"pivoted_columns","type":{"type":"array","items":"string"}},{"name":"encoding","type":"string"},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"aggregate_unpivot_response","fields":[{"name":"table_name","type":"string"},{"name":"response_schema_str","type":"string"},{"name":"binary_encoded_response","type":"bytes"},{"name":"json_encoded_response","type":"string"},{"name":"total_number_of_records","type":"long"},{"name":"has_more_records","type":"boolean"}]}"""
        ENDPOINT = "/aggregate/unpivot"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "alter_system_properties"
        REQ_SCHEMA_STR = """{"type":"record","name":"alter_system_properties_request","fields":[{"name":"property_updates_map","type":{"type":"map","values":"string"}},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"alter_system_properties_response","fields":[{"name":"updated_properties_map","type":{"type":"map","values":"string"}}]}"""
        ENDPOINT = "/alter/system/properties"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "alter_table"
        REQ_SCHEMA_STR = """{"type":"record","name":"alter_table_request","fields":[{"name":"table_name","type":"string"},{"name":"action","type":"string"},{"name":"value","type":"string"},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"alter_table_response","fields":[{"name":"table_name","type":"string"},{"name":"action","type":"string"},{"name":"value","type":"string"},{"name":"type_id","type":"string"},{"name":"type_definition","type":"string"},{"name":"properties","type":{"type":"map","values":{"type":"array","items":"string"}}},{"name":"label","type":"string"}]}"""
        ENDPOINT = "/alter/table"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "alter_table_metadata"
        REQ_SCHEMA_STR = """{"type":"record","name":"alter_table_metadata_request","fields":[{"name":"table_names","type":{"type":"array","items":"string"}},{"name":"metadata_map","type":{"type":"map","values":"string"}},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"alter_table_metadata_response","fields":[{"name":"table_names","type":{"type":"array","items":"string"}},{"name":"metadata_map","type":{"type":"map","values":"string"}}]}"""
        ENDPOINT = "/alter/table/metadata"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "alter_user"
        REQ_SCHEMA_STR = """{"type":"record","name":"alter_user_request","fields":[{"name":"name","type":"string"},{"name":"action","type":"string"},{"name":"value","type":"string"},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"alter_user_response","fields":[{"name":"name","type":"string"}]}"""
        ENDPOINT = "/alter/user"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "append_records"
        REQ_SCHEMA_STR = """{"type":"record","name":"append_records_request","fields":[{"name":"table_name","type":"string"},{"name":"source_table_name","type":"string"},{"name":"field_map","type":{"type":"map","values":"string"}},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"append_records_response","fields":[{"name":"table_name","type":"string"}]}"""
        ENDPOINT = "/append/records"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "clear_table"
        REQ_SCHEMA_STR = """{"type":"record","name":"clear_table_request","fields":[{"name":"table_name","type":"string"},{"name":"authorization","type":"string"},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"clear_table_response","fields":[{"name":"table_name","type":"string"}]}"""
        ENDPOINT = "/clear/table"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "clear_table_monitor"
        REQ_SCHEMA_STR = """{"type":"record","name":"clear_table_monitor_request","fields":[{"name":"topic_id","type":"string"},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"clear_table_monitor_response","fields":[{"name":"topic_id","type":"string"}]}"""
        ENDPOINT = "/clear/tablemonitor"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "clear_trigger"
        REQ_SCHEMA_STR = """{"type":"record","name":"clear_trigger_request","fields":[{"name":"trigger_id","type":"string"},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"clear_trigger_response","fields":[{"name":"trigger_id","type":"string"}]}"""
        ENDPOINT = "/clear/trigger"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "create_join_table"
        REQ_SCHEMA_STR = """{"type":"record","name":"create_join_table_request","fields":[{"name":"join_table_name","type":"string"},{"name":"table_names","type":{"type":"array","items":"string"}},{"name":"column_names","type":{"type":"array","items":"string"}},{"name":"expressions","type":{"type":"array","items":"string"}},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"create_join_table_response","fields":[{"name":"join_table_name","type":"string"},{"name":"count","type":"long"}]}"""
        ENDPOINT = "/create/jointable"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "create_proc"
        REQ_SCHEMA_STR = """{"type":"record","name":"create_proc_request","fields":[{"name":"proc_name","type":"string"},{"name":"execution_mode","type":"string"},{"name":"files","type":{"type":"map","values":"bytes"}},{"name":"command","type":"string"},{"name":"args","type":{"type":"array","items":"string"}},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"create_proc_response","fields":[{"name":"proc_name","type":"string"}]}"""
        ENDPOINT = "/create/proc"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "create_projection"
        REQ_SCHEMA_STR = """{"type":"record","name":"create_projection_request","fields":[{"name":"table_name","type":"string"},{"name":"projection_name","type":"string"},{"name":"column_names","type":{"type":"array","items":"string"}},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"create_projection_response","fields":[{"name":"projection_name","type":"string"}]}"""
        ENDPOINT = "/create/projection"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "create_role"
        REQ_SCHEMA_STR = """{"type":"record","name":"create_role_request","fields":[{"name":"name","type":"string"},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"create_role_response","fields":[{"name":"name","type":"string"}]}"""
        ENDPOINT = "/create/role"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "create_table"
        REQ_SCHEMA_STR = """{"type":"record","name":"create_table_request","fields":[{"name":"table_name","type":"string"},{"name":"type_id","type":"string"},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"create_table_response","fields":[{"name":"table_name","type":"string"},{"name":"type_id","type":"string"},{"name":"is_collection","type":"boolean"}]}"""
        ENDPOINT = "/create/table"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "create_table_monitor"
        REQ_SCHEMA_STR = """{"type":"record","name":"create_table_monitor_request","fields":[{"name":"table_name","type":"string"},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"create_table_monitor_response","fields":[{"name":"topic_id","type":"string"},{"name":"table_name","type":"string"},{"name":"type_schema","type":"string"}]}"""
        ENDPOINT = "/create/tablemonitor"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "create_trigger_by_area"
        REQ_SCHEMA_STR = """{"type":"record","name":"create_trigger_by_area_request","fields":[{"name":"request_id","type":"string"},{"name":"table_names","type":{"type":"array","items":"string"}},{"name":"x_column_name","type":"string"},{"name":"x_vector","type":{"type":"array","items":"double"}},{"name":"y_column_name","type":"string"},{"name":"y_vector","type":{"type":"array","items":"double"}},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"create_trigger_by_area_response","fields":[{"name":"trigger_id","type":"string"}]}"""
        ENDPOINT = "/create/trigger/byarea"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "create_trigger_by_range"
        REQ_SCHEMA_STR = """{"type":"record","name":"create_trigger_by_range_request","fields":[{"name":"request_id","type":"string"},{"name":"table_names","type":{"type":"array","items":"string"}},{"name":"column_name","type":"string"},{"name":"min","type":"double"},{"name":"max","type":"double"},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"create_trigger_by_range_response","fields":[{"name":"trigger_id","type":"string"}]}"""
        ENDPOINT = "/create/trigger/byrange"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "create_type"
        REQ_SCHEMA_STR = """{"type":"record","name":"create_type_request","fields":[{"name":"type_definition","type":"string"},{"name":"label","type":"string"},{"name":"properties","type":{"type":"map","values":{"type":"array","items":"string"}}},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"create_type_response","fields":[{"name":"type_id","type":"string"},{"name":"type_definition","type":"string"},{"name":"label","type":"string"},{"name":"properties","type":{"type":"map","values":{"type":"array","items":"string"}}}]}"""
        ENDPOINT = "/create/type"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "create_union"
        REQ_SCHEMA_STR = """{"type":"record","name":"create_union_request","fields":[{"name":"table_name","type":"string"},{"name":"table_names","type":{"type":"array","items":"string"}},{"name":"input_column_names","type":{"type":"array","items":{"type":"array","items":"string"}}},{"name":"output_column_names","type":{"type":"array","items":"string"}},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"create_union_response","fields":[{"name":"table_name","type":"string"}]}"""
        ENDPOINT = "/create/union"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "create_user_external"
        REQ_SCHEMA_STR = """{"type":"record","name":"create_user_external_request","fields":[{"name":"name","type":"string"},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"create_user_external_response","fields":[{"name":"name","type":"string"}]}"""
        ENDPOINT = "/create/user/external"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "create_user_internal"
        REQ_SCHEMA_STR = """{"type":"record","name":"create_user_internal_request","fields":[{"name":"name","type":"string"},{"name":"password","type":"string"},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"create_user_internal_response","fields":[{"name":"name","type":"string"}]}"""
        ENDPOINT = "/create/user/internal"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "delete_proc"
        REQ_SCHEMA_STR = """{"type":"record","name":"delete_proc_request","fields":[{"name":"proc_name","type":"string"},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"delete_proc_response","fields":[{"name":"proc_name","type":"string"}]}"""
        ENDPOINT = "/delete/proc"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "delete_records"
        REQ_SCHEMA_STR = """{"type":"record","name":"delete_records_request","fields":[{"name":"table_name","type":"string"},{"name":"expressions","type":{"type":"array","items":"string"}},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"delete_records_response","fields":[{"name":"count_deleted","type":"long"},{"name":"counts_deleted","type":{"type":"array","items":"long"}}]}"""
        ENDPOINT = "/delete/records"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "delete_role"
        REQ_SCHEMA_STR = """{"type":"record","name":"delete_role_request","fields":[{"name":"name","type":"string"},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"delete_role_response","fields":[{"name":"name","type":"string"}]}"""
        ENDPOINT = "/delete/role"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "delete_user"
        REQ_SCHEMA_STR = """{"type":"record","name":"delete_user_request","fields":[{"name":"name","type":"string"},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"delete_user_response","fields":[{"name":"name","type":"string"}]}"""
        ENDPOINT = "/delete/user"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "execute_proc"
        REQ_SCHEMA_STR = """{"type":"record","name":"execute_proc_request","fields":[{"name":"proc_name","type":"string"},{"name":"params","type":{"type":"map","values":"string"}},{"name":"bin_params","type":{"type":"map","values":"bytes"}},{"name":"input_table_names","type":{"type":"array","items":"string"}},{"name":"input_column_names","type":{"type":"map","values":{"type":"array","items":"string"}}},{"name":"output_table_names","type":{"type":"array","items":"string"}},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"execute_proc_response","fields":[{"name":"run_id","type":"string"}]}"""
        ENDPOINT = "/execute/proc"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "filter"
        REQ_SCHEMA_STR = """{"type":"record","name":"filter_request","fields":[{"name":"table_name","type":"string"},{"name":"view_name","type":"string"},{"name":"expression","type":"string"},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"filter_response","fields":[{"name":"count","type":"long"}]}"""
        ENDPOINT = "/filter"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "filter_by_area"
        REQ_SCHEMA_STR = """{"type":"record","name":"filter_by_area_request","fields":[{"name":"table_name","type":"string"},{"name":"view_name","type":"string"},{"name":"x_column_name","type":"string"},{"name":"x_vector","type":{"type":"array","items":"double"}},{"name":"y_column_name","type":"string"},{"name":"y_vector","type":{"type":"array","items":"double"}},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"filter_by_area_response","fields":[{"name":"count","type":"long"}]}"""
        ENDPOINT = "/filter/byarea"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "filter_by_area_geometry"
        REQ_SCHEMA_STR = """{"type":"record","name":"filter_by_area_geometry_request","fields":[{"name":"table_name","type":"string"},{"name":"view_name","type":"string"},{"name":"column_name","type":"string"},{"name":"x_vector","type":{"type":"array","items":"double"}},{"name":"y_vector","type":{"type":"array","items":"double"}},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"filter_by_area_geometry_response","fields":[{"name":"count","type":"long"}]}"""
        ENDPOINT = "/filter/byarea/geometry"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "filter_by_box"
        REQ_SCHEMA_STR = """{"type":"record","name":"filter_by_box_request","fields":[{"name":"table_name","type":"string"},{"name":"view_name","type":"string"},{"name":"x_column_name","type":"string"},{"name":"min_x","type":"double"},{"name":"max_x","type":"double"},{"name":"y_column_name","type":"string"},{"name":"min_y","type":"double"},{"name":"max_y","type":"double"},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"filter_by_box_response","fields":[{"name":"count","type":"long"}]}"""
        ENDPOINT = "/filter/bybox"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "filter_by_box_geometry"
        REQ_SCHEMA_STR = """{"type":"record","name":"filter_by_box_geometry_request","fields":[{"name":"table_name","type":"string"},{"name":"view_name","type":"string"},{"name":"column_name","type":"string"},{"name":"min_x","type":"double"},{"name":"max_x","type":"double"},{"name":"min_y","type":"double"},{"name":"max_y","type":"double"},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"filter_by_box_geometry_response","fields":[{"name":"count","type":"long"}]}"""
        ENDPOINT = "/filter/bybox/geometry"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "filter_by_geometry"
        REQ_SCHEMA_STR = """{"type":"record","name":"filter_by_geometry_request","fields":[{"name":"table_name","type":"string"},{"name":"view_name","type":"string"},{"name":"column_name","type":"string"},{"name":"input_wkt","type":"string"},{"name":"operation","type":"string"},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"filter_by_geometry_response","fields":[{"name":"count","type":"long"}]}"""
        ENDPOINT = "/filter/bygeometry"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "filter_by_list"
        REQ_SCHEMA_STR = """{"type":"record","name":"filter_by_list_request","fields":[{"name":"table_name","type":"string"},{"name":"view_name","type":"string"},{"name":"column_values_map","type":{"type":"map","values":{"type":"array","items":"string"}}},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"filter_by_list_response","fields":[{"name":"count","type":"long"}]}"""
        ENDPOINT = "/filter/bylist"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "filter_by_radius"
        REQ_SCHEMA_STR = """{"type":"record","name":"filter_by_radius_request","fields":[{"name":"table_name","type":"string"},{"name":"view_name","type":"string"},{"name":"x_column_name","type":"string"},{"name":"x_center","type":"double"},{"name":"y_column_name","type":"string"},{"name":"y_center","type":"double"},{"name":"radius","type":"double"},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"filter_by_radius_response","fields":[{"name":"count","type":"long"}]}"""
        ENDPOINT = "/filter/byradius"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "filter_by_radius_geometry"
        REQ_SCHEMA_STR = """{"type":"record","name":"filter_by_radius_geometry_request","fields":[{"name":"table_name","type":"string"},{"name":"view_name","type":"string"},{"name":"column_name","type":"string"},{"name":"x_center","type":"double"},{"name":"y_center","type":"double"},{"name":"radius","type":"double"},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"filter_by_radius_geometry_response","fields":[{"name":"count","type":"long"}]}"""
        ENDPOINT = "/filter/byradius/geometry"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "filter_by_range"
        REQ_SCHEMA_STR = """{"type":"record","name":"filter_by_range_request","fields":[{"name":"table_name","type":"string"},{"name":"view_name","type":"string"},{"name":"column_name","type":"string"},{"name":"lower_bound","type":"double"},{"name":"upper_bound","type":"double"},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"filter_by_range_response","fields":[{"name":"count","type":"long"}]}"""
        ENDPOINT = "/filter/byrange"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "filter_by_series"
        REQ_SCHEMA_STR = """{"type":"record","name":"filter_by_series_request","fields":[{"name":"table_name","type":"string"},{"name":"view_name","type":"string"},{"name":"track_id","type":"string"},{"name":"target_track_ids","type":{"type":"array","items":"string"}},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"filter_by_series_response","fields":[{"name":"count","type":"long"}]}"""
        ENDPOINT = "/filter/byseries"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "filter_by_string"
        REQ_SCHEMA_STR = """{"type":"record","name":"filter_by_string_request","fields":[{"name":"table_name","type":"string"},{"name":"view_name","type":"string"},{"name":"expression","type":"string"},{"name":"mode","type":"string"},{"name":"column_names","type":{"type":"array","items":"string"}},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"filter_by_string_response","fields":[{"name":"count","type":"long"}]}"""
        ENDPOINT = "/filter/bystring"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "filter_by_table"
        REQ_SCHEMA_STR = """{"type":"record","name":"filter_by_table_request","fields":[{"name":"table_name","type":"string"},{"name":"view_name","type":"string"},{"name":"column_name","type":"string"},{"name":"source_table_name","type":"string"},{"name":"source_table_column_name","type":"string"},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"filter_by_table_response","fields":[{"name":"count","type":"long"}]}"""
        ENDPOINT = "/filter/bytable"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "filter_by_value"
        REQ_SCHEMA_STR = """{"type":"record","name":"filter_by_value_request","fields":[{"name":"table_name","type":"string"},{"name":"view_name","type":"string"},{"name":"is_string","type":"boolean"},{"name":"value","type":"double"},{"name":"value_str","type":"string"},{"name":"column_name","type":"string"},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"filter_by_value_response","fields":[{"name":"count","type":"long"}]}"""
        ENDPOINT = "/filter/byvalue"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "get_records"
        REQ_SCHEMA_STR = """{"type":"record","name":"get_records_request","fields":[{"name":"table_name","type":"string"},{"name":"offset","type":"long"},{"name":"limit","type":"long"},{"name":"encoding","type":"string"},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"get_records_response","fields":[{"name":"table_name","type":"string"},{"name":"type_name","type":"string"},{"name":"type_schema","type":"string"},{"name":"records_binary","type":{"type":"array","items":"bytes"}},{"name":"records_json","type":{"type":"array","items":"string"}},{"name":"total_number_of_records","type":"long"},{"name":"has_more_records","type":"boolean"}]}"""
        ENDPOINT = "/get/records"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "get_records_by_column"
        REQ_SCHEMA_STR = """{"type":"record","name":"get_records_by_column_request","fields":[{"name":"table_name","type":"string"},{"name":"column_names","type":{"type":"array","items":"string"}},{"name":"offset","type":"long"},{"name":"limit","type":"long"},{"name":"encoding","type":"string"},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"get_records_by_column_response","fields":[{"name":"table_name","type":"string"},{"name":"response_schema_str","type":"string"},{"name":"binary_encoded_response","type":"bytes"},{"name":"json_encoded_response","type":"string"},{"name":"total_number_of_records","type":"long"},{"name":"has_more_records","type":"boolean"}]}"""
        ENDPOINT = "/get/records/bycolumn"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "get_records_by_series"
        REQ_SCHEMA_STR = """{"type":"record","name":"get_records_by_series_request","fields":[{"name":"table_name","type":"string"},{"name":"world_table_name","type":"string"},{"name":"offset","type":"int"},{"name":"limit","type":"int"},{"name":"encoding","type":"string"},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"get_records_by_series_response","fields":[{"name":"table_names","type":{"type":"array","items":"string"}},{"name":"type_names","type":{"type":"array","items":"string"}},{"name":"type_schemas","type":{"type":"array","items":"string"}},{"name":"list_records_binary","type":{"type":"array","items":{"type":"array","items":"bytes"}}},{"name":"list_records_json","type":{"type":"array","items":{"type":"array","items":"string"}}}]}"""
        ENDPOINT = "/get/records/byseries"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "get_records_from_collection"
        REQ_SCHEMA_STR = """{"type":"record","name":"get_records_from_collection_request","fields":[{"name":"table_name","type":"string"},{"name":"offset","type":"long"},{"name":"limit","type":"long"},{"name":"encoding","type":"string"},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"get_records_from_collection_response","fields":[{"name":"table_name","type":"string"},{"name":"type_names","type":{"type":"array","items":"string"}},{"name":"records_binary","type":{"type":"array","items":"bytes"}},{"name":"records_json","type":{"type":"array","items":"string"}},{"name":"record_ids","type":{"type":"array","items":"string"}}]}"""
        ENDPOINT = "/get/records/fromcollection"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "grant_permission_system"
        REQ_SCHEMA_STR = """{"type":"record","name":"grant_permission_system_request","fields":[{"name":"name","type":"string"},{"name":"permission","type":"string"},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"grant_permission_system_response","fields":[{"name":"name","type":"string"},{"name":"permission","type":"string"}]}"""
        ENDPOINT = "/grant/permission/system"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "grant_permission_table"
        REQ_SCHEMA_STR = """{"type":"record","name":"grant_permission_table_request","fields":[{"name":"name","type":"string"},{"name":"permission","type":"string"},{"name":"table_name","type":"string"},{"name":"filter_expression","type":"string"},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"grant_permission_table_response","fields":[{"name":"name","type":"string"},{"name":"permission","type":"string"},{"name":"table_name","type":"string"},{"name":"filter_expression","type":"string"}]}"""
        ENDPOINT = "/grant/permission/table"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "grant_role"
        REQ_SCHEMA_STR = """{"type":"record","name":"grant_role_request","fields":[{"name":"role","type":"string"},{"name":"member","type":"string"},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"grant_role_response","fields":[{"name":"role","type":"string"},{"name":"member","type":"string"}]}"""
        ENDPOINT = "/grant/role"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "has_proc"
        REQ_SCHEMA_STR = """{"type":"record","name":"has_proc_request","fields":[{"name":"proc_name","type":"string"},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"has_proc_response","fields":[{"name":"proc_name","type":"string"},{"name":"proc_exists","type":"boolean"}]}"""
        ENDPOINT = "/has/proc"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "has_table"
        REQ_SCHEMA_STR = """{"type":"record","name":"has_table_request","fields":[{"name":"table_name","type":"string"},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"has_table_response","fields":[{"name":"table_name","type":"string"},{"name":"table_exists","type":"boolean"}]}"""
        ENDPOINT = "/has/table"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "has_type"
        REQ_SCHEMA_STR = """{"type":"record","name":"has_type_request","fields":[{"name":"type_id","type":"string"},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"has_type_response","fields":[{"name":"type_id","type":"string"},{"name":"type_exists","type":"boolean"}]}"""
        ENDPOINT = "/has/type"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "insert_records"
        REQ_SCHEMA_STR = """{"type":"record","name":"insert_records_request","fields":[{"name":"table_name","type":"string"},{"name":"list","type":{"type":"array","items":"bytes"}},{"name":"list_str","type":{"type":"array","items":"string"}},{"name":"list_encoding","type":"string"},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"insert_records_response","fields":[{"name":"record_ids","type":{"type":"array","items":"string"}},{"name":"count_inserted","type":"int"},{"name":"count_updated","type":"int"}]}"""
        ENDPOINT = "/insert/records"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "insert_records_random"
        REQ_SCHEMA_STR = """{"type":"record","name":"insert_records_random_request","fields":[{"name":"table_name","type":"string"},{"name":"count","type":"long"},{"name":"options","type":{"type":"map","values":{"type":"map","values":"double"}}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"insert_records_random_response","fields":[{"name":"table_name","type":"string"},{"name":"count","type":"long"}]}"""
        ENDPOINT = "/insert/records/random"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "insert_symbol"
        REQ_SCHEMA_STR = """{"type":"record","name":"insert_symbol_request","fields":[{"name":"symbol_id","type":"string"},{"name":"symbol_format","type":"string"},{"name":"symbol_data","type":"bytes"},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"insert_symbol_response","fields":[{"name":"symbol_id","type":"string"}]}"""
        ENDPOINT = "/insert/symbol"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "kill_proc"
        REQ_SCHEMA_STR = """{"type":"record","name":"kill_proc_request","fields":[{"name":"run_id","type":"string"},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"kill_proc_response","fields":[{"name":"run_ids","type":{"type":"array","items":"string"}}]}"""
        ENDPOINT = "/kill/proc"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "lock_table"
        REQ_SCHEMA_STR = """{"type":"record","name":"lock_table_request","fields":[{"name":"table_name","type":"string"},{"name":"lock_type","type":"string"},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"lock_table_response","fields":[{"name":"lock_type","type":"string"}]}"""
        ENDPOINT = "/lock/table"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "merge_records"
        REQ_SCHEMA_STR = """{"type":"record","name":"merge_records_request","fields":[{"name":"table_name","type":"string"},{"name":"source_table_names","type":{"type":"array","items":"string"}},{"name":"field_maps","type":{"type":"array","items":{"type":"map","values":"string"}}},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"merge_records_response","fields":[{"name":"table_name","type":"string"}]}"""
        ENDPOINT = "/merge/records"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "admin_replace_tom"
        REQ_SCHEMA_STR = """{"type":"record","name":"admin_replace_tom_request","fields":[{"name":"old_rank_tom","type":"long"},{"name":"new_rank_tom","type":"long"}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"admin_replace_tom_response","fields":[{"name":"old_rank_tom","type":"long"},{"name":"new_rank_tom","type":"long"}]}"""
        ENDPOINT = "/replace/tom"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "revoke_permission_system"
        REQ_SCHEMA_STR = """{"type":"record","name":"revoke_permission_system_request","fields":[{"name":"name","type":"string"},{"name":"permission","type":"string"},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"revoke_permission_system_response","fields":[{"name":"name","type":"string"},{"name":"permission","type":"string"}]}"""
        ENDPOINT = "/revoke/permission/system"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "revoke_permission_table"
        REQ_SCHEMA_STR = """{"type":"record","name":"revoke_permission_table_request","fields":[{"name":"name","type":"string"},{"name":"permission","type":"string"},{"name":"table_name","type":"string"},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"revoke_permission_table_response","fields":[{"name":"name","type":"string"},{"name":"permission","type":"string"},{"name":"table_name","type":"string"}]}"""
        ENDPOINT = "/revoke/permission/table"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "revoke_role"
        REQ_SCHEMA_STR = """{"type":"record","name":"revoke_role_request","fields":[{"name":"role","type":"string"},{"name":"member","type":"string"},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"revoke_role_response","fields":[{"name":"role","type":"string"},{"name":"member","type":"string"}]}"""
        ENDPOINT = "/revoke/role"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "show_proc"
        REQ_SCHEMA_STR = """{"type":"record","name":"show_proc_request","fields":[{"name":"proc_name","type":"string"},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"show_proc_response","fields":[{"name":"proc_names","type":{"type":"array","items":"string"}},{"name":"execution_modes","type":{"type":"array","items":"string"}},{"name":"files","type":{"type":"array","items":{"type":"map","values":"bytes"}}},{"name":"commands","type":{"type":"array","items":"string"}},{"name":"args","type":{"type":"array","items":{"type":"array","items":"string"}}},{"name":"options","type":{"type":"array","items":{"type":"map","values":"string"}}}]}"""
        ENDPOINT = "/show/proc"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "show_proc_status"
        REQ_SCHEMA_STR = """{"type":"record","name":"show_proc_status_request","fields":[{"name":"run_id","type":"string"},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"show_proc_status_response","fields":[{"name":"proc_names","type":{"type":"map","values":"string"}},{"name":"params","type":{"type":"map","values":{"type":"map","values":"string"}}},{"name":"bin_params","type":{"type":"map","values":{"type":"map","values":"bytes"}}},{"name":"input_table_names","type":{"type":"map","values":{"type":"array","items":"string"}}},{"name":"input_column_names","type":{"type":"map","values":{"type":"map","values":{"type":"array","items":"string"}}}},{"name":"output_table_names","type":{"type":"map","values":{"type":"array","items":"string"}}},{"name":"options","type":{"type":"map","values":{"type":"map","values":"string"}}},{"name":"overall_statuses","type":{"type":"map","values":"string"}},{"name":"statuses","type":{"type":"map","values":{"type":"map","values":"string"}}},{"name":"messages","type":{"type":"map","values":{"type":"map","values":"string"}}},{"name":"results","type":{"type":"map","values":{"type":"map","values":{"type":"map","values":"string"}}}},{"name":"bin_results","type":{"type":"map","values":{"type":"map","values":{"type":"map","values":"bytes"}}}},{"name":"timings","type":{"type":"map","values":{"type":"map","values":{"type":"map","values":"long"}}}}]}"""
        ENDPOINT = "/show/proc/status"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "show_security"
        REQ_SCHEMA_STR = """{"type":"record","name":"show_security_request","fields":[{"name":"names","type":{"type":"array","items":"string"}},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"show_security_response","fields":[{"name":"types","type":{"type":"map","values":"string"}},{"name":"roles","type":{"type":"map","values":{"type":"array","items":"string"}}},{"name":"permissions","type":{"type":"map","values":{"type":"array","items":{"type":"map","values":"string"}}}}]}"""
        ENDPOINT = "/show/security"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "show_system_properties"
        REQ_SCHEMA_STR = """{"type":"record","name":"show_system_properties_request","fields":[{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"show_system_properties_response","fields":[{"name":"property_map","type":{"type":"map","values":"string"}}]}"""
        ENDPOINT = "/show/system/properties"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "show_system_status"
        REQ_SCHEMA_STR = """{"type":"record","name":"show_system_status_request","fields":[{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"show_system_status_response","fields":[{"name":"status_map","type":{"type":"map","values":"string"}}]}"""
        ENDPOINT = "/show/system/status"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "show_system_timing"
        REQ_SCHEMA_STR = """{"type":"record","name":"show_system_timing_request","fields":[{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"show_system_timing_response","fields":[{"name":"endpoints","type":{"type":"array","items":"string"}},{"name":"time_in_ms","type":{"type":"array","items":"float"}},{"name":"jobIds","type":{"type":"array","items":"string"}}]}"""
        ENDPOINT = "/show/system/timing"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "show_table"
        REQ_SCHEMA_STR = """{"type":"record","name":"show_table_request","fields":[{"name":"table_name","type":"string"},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"show_table_response","fields":[{"name":"table_name","type":"string"},{"name":"table_names","type":{"type":"array","items":"string"}},{"name":"table_descriptions","type":{"type":"array","items":{"type":"array","items":"string"}}},{"name":"type_ids","type":{"type":"array","items":"string"}},{"name":"type_schemas","type":{"type":"array","items":"string"}},{"name":"type_labels","type":{"type":"array","items":"string"}},{"name":"properties","type":{"type":"array","items":{"type":"map","values":{"type":"array","items":"string"}}}},{"name":"additional_info","type":{"type":"array","items":{"type":"map","values":"string"}}},{"name":"sizes","type":{"type":"array","items":"long"}},{"name":"full_sizes","type":{"type":"array","items":"long"}},{"name":"join_sizes","type":{"type":"array","items":"double"}},{"name":"total_size","type":"long"},{"name":"total_full_size","type":"long"}]}"""
        ENDPOINT = "/show/table"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "show_table_metadata"
        REQ_SCHEMA_STR = """{"type":"record","name":"show_table_metadata_request","fields":[{"name":"table_names","type":{"type":"array","items":"string"}},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"show_table_metadata_response","fields":[{"name":"table_names","type":{"type":"array","items":"string"}},{"name":"metadata_maps","type":{"type":"array","items":{"type":"map","values":"string"}}}]}"""
        ENDPOINT = "/show/table/metadata"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "show_tables_by_type"
        REQ_SCHEMA_STR = """{"type":"record","name":"show_tables_by_type_request","fields":[{"name":"type_id","type":"string"},{"name":"label","type":"string"},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"show_tables_by_type_response","fields":[{"name":"table_names","type":{"type":"array","items":"string"}}]}"""
        ENDPOINT = "/show/tables/bytype"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "show_triggers"
        REQ_SCHEMA_STR = """{"type":"record","name":"show_triggers_request","fields":[{"name":"trigger_ids","type":{"type":"array","items":"string"}},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"show_triggers_response","fields":[{"name":"trigger_map","type":{"type":"map","values":{"type":"map","values":"string"}}}]}"""
        ENDPOINT = "/show/triggers"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "show_types"
        REQ_SCHEMA_STR = """{"type":"record","name":"show_types_request","fields":[{"name":"type_id","type":"string"},{"name":"label","type":"string"},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"show_types_response","fields":[{"name":"type_ids","type":{"type":"array","items":"string"}},{"name":"type_schemas","type":{"type":"array","items":"string"}},{"name":"labels","type":{"type":"array","items":"string"}},{"name":"properties","type":{"type":"array","items":{"type":"map","values":{"type":"array","items":"string"}}}}]}"""
        ENDPOINT = "/show/types"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "update_records"
        REQ_SCHEMA_STR = """{"type":"record","name":"update_records_request","fields":[{"name":"table_name","type":"string"},{"name":"expressions","type":{"type":"array","items":"string"}},{"name":"new_values_maps","type":{"type":"array","items":{"type":"map","values":["string","null"]}}},{"name":"records_to_insert","type":{"type":"array","items":"bytes"}},{"name":"records_to_insert_str","type":{"type":"array","items":"string"}},{"name":"record_encoding","type":"string"},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"update_records_response","fields":[{"name":"count_updated","type":"long"},{"name":"counts_updated","type":{"type":"array","items":"long"}},{"name":"count_inserted","type":"long"},{"name":"counts_inserted","type":{"type":"array","items":"long"}}]}"""
        ENDPOINT = "/update/records"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "update_records_by_series"
        REQ_SCHEMA_STR = """{"type":"record","name":"update_records_by_series_request","fields":[{"name":"table_name","type":"string"},{"name":"world_table_name","type":"string"},{"name":"view_name","type":"string"},{"name":"reserved","type":{"type":"array","items":"string"}},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"update_records_by_series_response","fields":[{"name":"count","type":"int"}]}"""
        ENDPOINT = "/update/records/byseries"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "visualize_image"
        REQ_SCHEMA_STR = """{"type":"record","name":"visualize_image_request","fields":[{"name":"table_names","type":{"type":"array","items":"string"}},{"name":"world_table_names","type":{"type":"array","items":"string"}},{"name":"x_column_name","type":"string"},{"name":"y_column_name","type":"string"},{"name":"geometry_column_name","type":"string"},{"name":"track_ids","type":{"type":"array","items":{"type":"array","items":"string"}}},{"name":"min_x","type":"double"},{"name":"max_x","type":"double"},{"name":"min_y","type":"double"},{"name":"max_y","type":"double"},{"name":"width","type":"int"},{"name":"height","type":"int"},{"name":"projection","type":"string"},{"name":"bg_color","type":"long"},{"name":"style_options","type":{"type":"map","values":{"type":"array","items":"string"}}},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"visualize_image_response","fields":[{"name":"width","type":"double"},{"name":"height","type":"double"},{"name":"bg_color","type":"long"},{"name":"image_data","type":"bytes"}]}"""
        ENDPOINT = "/visualize/image"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "visualize_image_chart"
        REQ_SCHEMA_STR = """{"type":"record","name":"visualize_image_chart_request","fields":[{"name":"table_name","type":"string"},{"name":"x_column_name","type":"string"},{"name":"y_column_name","type":"string"},{"name":"min_x","type":"double"},{"name":"max_x","type":"double"},{"name":"min_y","type":"double"},{"name":"max_y","type":"double"},{"name":"width","type":"int"},{"name":"height","type":"int"},{"name":"bg_color","type":"string"},{"name":"style_options","type":{"type":"map","values":{"type":"array","items":"string"}}},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"visualize_image_chart_response","fields":[{"name":"min_x","type":"double"},{"name":"max_x","type":"double"},{"name":"min_y","type":"double"},{"name":"max_y","type":"double"},{"name":"width","type":"int"},{"name":"height","type":"int"},{"name":"bg_color","type":"string"},{"name":"image_data","type":"bytes"},{"name":"axes_info","type":{"type":"map","values":{"type":"array","items":"string"}}}]}"""
        ENDPOINT = "/visualize/image/chart"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "visualize_image_classbreak"
        REQ_SCHEMA_STR = """{"type":"record","name":"visualize_image_classbreak_request","fields":[{"name":"table_names","type":{"type":"array","items":"string"}},{"name":"world_table_names","type":{"type":"array","items":"string"}},{"name":"x_column_name","type":"string"},{"name":"y_column_name","type":"string"},{"name":"geometry_column_name","type":"string"},{"name":"track_ids","type":{"type":"array","items":{"type":"array","items":"string"}}},{"name":"cb_column_name","type":"string"},{"name":"cb_vals","type":{"type":"array","items":"string"}},{"name":"min_x","type":"double"},{"name":"max_x","type":"double"},{"name":"min_y","type":"double"},{"name":"max_y","type":"double"},{"name":"width","type":"int"},{"name":"height","type":"int"},{"name":"projection","type":"string"},{"name":"bg_color","type":"long"},{"name":"style_options","type":{"type":"map","values":{"type":"array","items":"string"}}},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"visualize_image_classbreak_response","fields":[{"name":"width","type":"double"},{"name":"height","type":"double"},{"name":"bg_color","type":"long"},{"name":"image_data","type":"bytes"}]}"""
        ENDPOINT = "/visualize/image/classbreak"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "visualize_image_heatmap"
        REQ_SCHEMA_STR = """{"type":"record","name":"visualize_image_heatmap_request","fields":[{"name":"table_names","type":{"type":"array","items":"string"}},{"name":"x_column_name","type":"string"},{"name":"y_column_name","type":"string"},{"name":"value_column_name","type":"string"},{"name":"geometry_column_name","type":"string"},{"name":"min_x","type":"double"},{"name":"max_x","type":"double"},{"name":"min_y","type":"double"},{"name":"max_y","type":"double"},{"name":"width","type":"int"},{"name":"height","type":"int"},{"name":"projection","type":"string"},{"name":"style_options","type":{"type":"map","values":"string"}},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"visualize_image_heatmap_response","fields":[{"name":"width","type":"int"},{"name":"height","type":"int"},{"name":"bg_color","type":"long"},{"name":"image_data","type":"bytes"}]}"""
        ENDPOINT = "/visualize/image/heatmap"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "visualize_image_labels"
        REQ_SCHEMA_STR = """{"type":"record","name":"visualize_image_labels_request","fields":[{"name":"table_name","type":"string"},{"name":"x_column_name","type":"string"},{"name":"y_column_name","type":"string"},{"name":"x_offset","type":"string"},{"name":"y_offset","type":"string"},{"name":"text_string","type":"string"},{"name":"font","type":"string"},{"name":"text_color","type":"string"},{"name":"text_angle","type":"string"},{"name":"text_scale","type":"string"},{"name":"draw_box","type":"string"},{"name":"draw_leader","type":"string"},{"name":"line_width","type":"string"},{"name":"line_color","type":"string"},{"name":"fill_color","type":"string"},{"name":"leader_x_column_name","type":"string"},{"name":"leader_y_column_name","type":"string"},{"name":"filter","type":"string"},{"name":"min_x","type":"double"},{"name":"max_x","type":"double"},{"name":"min_y","type":"double"},{"name":"max_y","type":"double"},{"name":"width","type":"int"},{"name":"height","type":"int"},{"name":"projection","type":"string"},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"visualize_image_labels_response","fields":[{"name":"width","type":"double"},{"name":"height","type":"double"},{"name":"bg_color","type":"long"},{"name":"image_data","type":"bytes"}]}"""
        ENDPOINT = "/visualize/image/labels"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "visualize_video"
        REQ_SCHEMA_STR = """{"type":"record","name":"visualize_video_request","fields":[{"name":"table_names","type":{"type":"array","items":"string"}},{"name":"world_table_names","type":{"type":"array","items":"string"}},{"name":"track_ids","type":{"type":"array","items":{"type":"array","items":"string"}}},{"name":"x_column_name","type":"string"},{"name":"y_column_name","type":"string"},{"name":"geometry_column_name","type":"string"},{"name":"min_x","type":"double"},{"name":"max_x","type":"double"},{"name":"min_y","type":"double"},{"name":"max_y","type":"double"},{"name":"width","type":"int"},{"name":"height","type":"int"},{"name":"projection","type":"string"},{"name":"bg_color","type":"long"},{"name":"time_intervals","type":{"type":"array","items":{"type":"array","items":"double"}}},{"name":"video_style","type":"string"},{"name":"session_key","type":"string"},{"name":"style_options","type":{"type":"map","values":{"type":"array","items":"string"}}},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"visualize_video_response","fields":[{"name":"width","type":"double"},{"name":"height","type":"double"},{"name":"bg_color","type":"long"},{"name":"num_frames","type":"int"},{"name":"session_key","type":"string"},{"name":"data","type":{"type":"array","items":"bytes"}}]}"""
        ENDPOINT = "/visualize/video"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT }
        name = "visualize_video_heatmap"
        REQ_SCHEMA_STR = """{"type":"record","name":"visualize_video_heatmap_request","fields":[{"name":"table_names","type":{"type":"array","items":"string"}},{"name":"x_column_name","type":"string"},{"name":"y_column_name","type":"string"},{"name":"min_x","type":"double"},{"name":"max_x","type":"double"},{"name":"min_y","type":"double"},{"name":"max_y","type":"double"},{"name":"time_intervals","type":{"type":"array","items":{"type":"array","items":"double"}}},{"name":"width","type":"int"},{"name":"height","type":"int"},{"name":"projection","type":"string"},{"name":"video_style","type":"string"},{"name":"session_key","type":"string"},{"name":"style_options","type":{"type":"map","values":"string"}},{"name":"options","type":{"type":"map","values":"string"}}]}"""
        RSP_SCHEMA_STR = """{"type":"record","name":"visualize_video_heatmap_response","fields":[{"name":"width","type":"double"},{"name":"height","type":"double"},{"name":"bg_color","type":"long"},{"name":"num_frames","type":"int"},{"name":"session_key","type":"string"},{"name":"data","type":{"type":"array","items":"bytes"}}]}"""
        ENDPOINT = "/visualize/video/heatmap"
        self.gpudb_schemas[ name ] = { "REQ_SCHEMA_STR" : REQ_SCHEMA_STR,
                                       "RSP_SCHEMA_STR" : RSP_SCHEMA_STR,
                                       "REQ_SCHEMA" : schema.parse( REQ_SCHEMA_STR ),
                                       "RSP_SCHEMA" : schema.parse( RSP_SCHEMA_STR ),
                                       "ENDPOINT" : ENDPOINT } 
    # end load_gpudb_schemas
    # begin admin_alter_configuration
[docs]    def admin_alter_configuration( self, config_string = None, options = {} ):
        """Update the system config file.  Updates to the config file are only
        permitted when the system is stopped.
        Parameters:
            config_string (str)
                updated contents of the config file.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
        Returns:
            A dict with the following entries--
            status (str)
                Default value is an empty dict ( {} ).
        """
        assert isinstance( config_string, (basestring)), "admin_alter_configuration(): Argument 'config_string' must be (one) of type(s) '(basestring)'; given %s" % type( config_string ).__name__
        assert isinstance( options, (dict)), "admin_alter_configuration(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "admin_alter_configuration" )
        obj = collections.OrderedDict()
        obj['config_string'] = config_string
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/admin/alter/configuration' ) ) 
    # end admin_alter_configuration
    # begin admin_alter_jobs
[docs]    def admin_alter_jobs( self, job_ids = None, action = None, options = {} ):
        """Perform the requested action on a list of one or more job(s). Based on
        the type of job and the current state of execution, the action may not
        be successfully executed. The final result of the attempted actions for
        each specified job is returned in the status array of the response. See
        `Job Manager <../../../gpudbAdmin/job_manager.html>`_ for more
        information.
        Parameters:
            job_ids (list of ints)
                Jobs to be modified.  The user can provide a single element
                (which will be automatically promoted to a list internally) or
                a list.
            action (str)
                Action to be performed on the jobs specified by job_ids.
                Allowed values are:
                * cancel
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
        Returns:
            A dict with the following entries--
            job_ids (list of ints)
                Jobs on which the action was performed.
            action (str)
                Action requested on the jobs.
            status (list of str)
                Status of the requested action for each job.
        """
        job_ids = job_ids if isinstance( job_ids, list ) else ( [] if (job_ids is None) else [ job_ids ] )
        assert isinstance( action, (basestring)), "admin_alter_jobs(): Argument 'action' must be (one) of type(s) '(basestring)'; given %s" % type( action ).__name__
        assert isinstance( options, (dict)), "admin_alter_jobs(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "admin_alter_jobs" )
        obj = collections.OrderedDict()
        obj['job_ids'] = job_ids
        obj['action'] = action
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/admin/alter/jobs' ) ) 
    # end admin_alter_jobs
    # begin admin_offline
[docs]    def admin_offline( self, offline = None, options = {} ):
        """Take the system offline. When the system is offline, no user operations
        can be performed with the exception of a system shutdown.
        Parameters:
            offline (bool)
                Set to true if desired state is offline.
                Allowed values are:
                * true
                * false
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
                Allowed keys are:
                * **flush_to_disk** --
                  Flush to disk when going offline
                  Allowed values are:
                  * true
                  * false
        Returns:
            A dict with the following entries--
            is_offline (bool)
                Returns true if the system is offline, or false otherwise.
        """
        assert isinstance( offline, (bool)), "admin_offline(): Argument 'offline' must be (one) of type(s) '(bool)'; given %s" % type( offline ).__name__
        assert isinstance( options, (dict)), "admin_offline(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "admin_offline" )
        obj = collections.OrderedDict()
        obj['offline'] = offline
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/admin/offline' ) ) 
    # end admin_offline
    # begin admin_show_configuration
[docs]    def admin_show_configuration( self, options = {} ):
        """Show the current system configuration file.
        Parameters:
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
        Returns:
            A dict with the following entries--
            config_string (str)
                contents of the file
        """
        assert isinstance( options, (dict)), "admin_show_configuration(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "admin_show_configuration" )
        obj = collections.OrderedDict()
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/admin/show/configuration' ) ) 
    # end admin_show_configuration
    # begin admin_show_jobs
[docs]    def admin_show_jobs( self, options = {} ):
        """Get a list of the current jobs in GPUdb.
        Parameters:
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
                Allowed keys are:
                * show_details
        Returns:
            A dict with the following entries--
            job_id (list of ints)
            status (list of str)
            endpoint_name (list of str)
            time_received (list of longs)
            auth_id (list of str)
            user_data (list of str)
        """
        assert isinstance( options, (dict)), "admin_show_jobs(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "admin_show_jobs" )
        obj = collections.OrderedDict()
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/admin/show/jobs' ) ) 
    # end admin_show_jobs
    # begin admin_show_shards
[docs]    def admin_show_shards( self, options = {} ):
        """Show the mapping of shards to the corresponding rank and tom.  The
        response message contains list of 16384 (total number of shards in the
        system) Rank and TOM numbers corresponding to each shard.
        Parameters:
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
        Returns:
            A dict with the following entries--
            version (long)
                Current shard array version number.
            rank (list of ints)
                Array of ranks indexed by the shard number.
            tom (list of ints)
                Array of toms to which the corresponding shard belongs.
        """
        assert isinstance( options, (dict)), "admin_show_shards(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "admin_show_shards" )
        obj = collections.OrderedDict()
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/admin/show/shards' ) ) 
    # end admin_show_shards
    # begin admin_shutdown
[docs]    def admin_shutdown( self, exit_type = None, authorization = None, options = {}
                        ):
        """Exits the database server application.
        Parameters:
            exit_type (str)
                Reserved for future use. User can pass an empty string.
            authorization (str)
                No longer used. User can pass an empty string.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
        Returns:
            A dict with the following entries--
            exit_status (str)
                'OK' upon (right before) successful exit.
        """
        assert isinstance( exit_type, (basestring)), "admin_shutdown(): Argument 'exit_type' must be (one) of type(s) '(basestring)'; given %s" % type( exit_type ).__name__
        assert isinstance( authorization, (basestring)), "admin_shutdown(): Argument 'authorization' must be (one) of type(s) '(basestring)'; given %s" % type( authorization ).__name__
        assert isinstance( options, (dict)), "admin_shutdown(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "admin_shutdown" )
        obj = collections.OrderedDict()
        obj['exit_type'] = exit_type
        obj['authorization'] = authorization
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/admin/shutdown' ) ) 
    # end admin_shutdown
    # begin admin_verify_db
[docs]    def admin_verify_db( self, options = {} ):
        """Verify database is in a consistent state.  When inconsistencies or
        errors are found, the verified_ok flag in the response is set to false
        and the list of errors found is provided in the error_list.
        Parameters:
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
                Allowed keys are:
                * rebuild_on_error
                * verify_persist
        Returns:
            A dict with the following entries--
            verified_ok (bool)
                True if no errors were found, false otherwise.  Default value
                is 'false'.
            error_list (list of str)
                List of errors found while validating the database internal
                state.  Default value is an empty list ( [] ).
        """
        assert isinstance( options, (dict)), "admin_verify_db(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "admin_verify_db" )
        obj = collections.OrderedDict()
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/admin/verifydb' ) ) 
    # end admin_verify_db
    # begin aggregate_convex_hull
[docs]    def aggregate_convex_hull( self, table_name = None, x_column_name = None,
                               y_column_name = None, options = {} ):
        """Calculates and returns the convex hull for the values in a table
        specified by input parameter *table_name*.
        Parameters:
            table_name (str)
                Name of table on which the operation will be performed. Must be
                an existing table.  It cannot be a collection.
            x_column_name (str)
                Name of the column containing the x coordinates of the points
                for the operation being performed.
            y_column_name (str)
                Name of the column containing the y coordinates of the points
                for the operation being performed.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
        Returns:
            A dict with the following entries--
            x_vector (list of floats)
                Array of x coordinates of the resulting convex set.
            y_vector (list of floats)
                Array of y coordinates of the resulting convex set.
            count (int)
                Count of the number of points in the convex set.
            is_valid (bool)
        """
        assert isinstance( table_name, (basestring)), "aggregate_convex_hull(): Argument 'table_name' must be (one) of type(s) '(basestring)'; given %s" % type( table_name ).__name__
        assert isinstance( x_column_name, (basestring)), "aggregate_convex_hull(): Argument 'x_column_name' must be (one) of type(s) '(basestring)'; given %s" % type( x_column_name ).__name__
        assert isinstance( y_column_name, (basestring)), "aggregate_convex_hull(): Argument 'y_column_name' must be (one) of type(s) '(basestring)'; given %s" % type( y_column_name ).__name__
        assert isinstance( options, (dict)), "aggregate_convex_hull(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "aggregate_convex_hull" )
        obj = collections.OrderedDict()
        obj['table_name'] = table_name
        obj['x_column_name'] = x_column_name
        obj['y_column_name'] = y_column_name
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/aggregate/convexhull' ) ) 
    # end aggregate_convex_hull
    # begin aggregate_group_by
[docs]    def aggregate_group_by( self, table_name = None, column_names = None, offset =
                            None, limit = 1000, encoding = 'binary', options =
                            {} ):
        """Calculates unique combinations (groups) of values for the given columns
        in a given table/view/collection and computes aggregates on each unique
        combination. This is somewhat analogous to an SQL-style SELECT...GROUP
        BY.
        Any column(s) can be grouped on, and all column types except
        unrestricted-length strings may be used for computing applicable
        aggregates; columns marked as `store-only
        <../../../concepts/types.html#data-handling>`_ are unable to be used in
        grouping or aggregation.
        The results can be paged via the input parameter *offset* and input
        parameter *limit* parameters. For example, to get 10 groups with the
        largest counts the inputs would be: limit=10,
        options={"sort_order":"descending", "sort_by":"value"}.
        Input parameter *options* can be used to customize behavior of this
        call e.g. filtering or sorting the results.
        To group by columns 'x' and 'y' and compute the number of objects
        within each group, use:  column_names=['x','y','count(*)'].
        To also compute the sum of 'z' over each group, use:
        column_names=['x','y','count(*)','sum(z)'].
        Available `aggregation functions
        <../../../concepts/expressions.html#aggregate-expressions>`_ are:
        count(*), sum, min, max, avg, mean, stddev, stddev_pop, stddev_samp,
        var, var_pop, var_samp, arg_min, arg_max and count_distinct.
        The response is returned as a dynamic schema. For details see: `dynamic
        schemas documentation <../../../api/index.html#dynamic-schemas>`_.
        If a *result_table* name is specified in the input parameter *options*,
        the results are stored in a new table with that name--no results are
        returned in the response.  Both the table name and resulting column
        names must adhere to `standard naming conventions
        <../../../concepts/tables.html#table>`_; column/aggregation expressions
        will need to be aliased.  If the source table's `shard key
        <../../../concepts/tables.html#shard-keys>`_ is used as the grouping
        column(s), the result table will be sharded, in all other cases it will
        be replicated.  Sorting will properly function only if the result table
        is replicated or if there is only one processing node and should not be
        relied upon in other cases.  Not available when any of the values of
        input parameter *column_names* is an unrestricted-length string.
        Parameters:
            table_name (str)
                Name of the table on which the operation will be performed.
                Must be an existing table/view/collection.
            column_names (list of str)
                List of one or more column names, expressions, and aggregate
                expressions.  The user can provide a single element (which will
                be automatically promoted to a list internally) or a list.
            offset (long)
                A positive integer indicating the number of initial results to
                skip (this can be useful for paging through the results).  The
                minimum allowed value is 0. The maximum allowed value is
                MAX_INT.
            limit (long)
                A positive integer indicating the maximum number of results to
                be returned Or END_OF_SET (-9999) to indicate that the max
                number of results should be returned.  Default value is 1000.
            encoding (str)
                Specifies the encoding for returned records.  Default value is
                'binary'.
                Allowed values are:
                * **binary** --
                  Indicates that the returned records should be binary encoded.
                * **json** --
                  Indicates that the returned records should be json encoded.
                  The default value is 'binary'.
            options (dict of str to str)
                  Optional parameters.  Default value is an empty dict ( {} ).
                  Allowed keys are:
                  * **collection_name** --
                    Name of a collection which is to contain the table
                    specified in *result_table*. If the collection provided is
                    non-existent, the collection will be automatically created.
                    If empty, then the table will be a top-level table.
                    Additionally this option is invalid if input parameter
                    *table_name* is a collection.
                  * **expression** --
                    Filter expression to apply to the table prior to computing
                    the aggregate group by.
                  * **having** --
                    Filter expression to apply to the aggregated results.
                  * **sort_order** --
                    String indicating how the returned values should be sorted
                    - ascending or descending.
                    Allowed values are:
                    * **ascending** --
                      Indicates that the returned values should be sorted in
                      ascending order.
                    * **descending** --
                      Indicates that the returned values should be sorted in
                      descending order.
                      The default value is 'ascending'.
                  * **sort_by** --
                    String determining how the results are sorted.
                    Allowed values are:
                    * **key** --
                      Indicates that the returned values should be sorted by
                      key, which corresponds to the grouping columns. If you
                      have multiple grouping columns (and are sorting by key),
                      it will first sort the first grouping column, then the
                      second grouping column, etc.
                    * **value** --
                      Indicates that the returned values should be sorted by
                      value, which corresponds to the aggregates. If you have
                      multiple aggregates (and are sorting by value), it will
                      first sort by the first aggregate, then the second
                      aggregate, etc.
                      The default value is 'value'.
                  * **result_table** --
                    The name of the table used to store the results. Has the
                    same naming restrictions as `tables
                    <../../../concepts/tables.html>`_. Column names (group-by
                    and aggregate fields) need to be given aliases e.g.
                    ["FChar256 as fchar256", "sum(FDouble) as sfd"].  If
                    present, no results are returned in the response.  This
                    option is not available if one of the grouping attributes
                    is an unrestricted string (i.e.; not charN) type.
                  * **result_table_persist** --
                    If *true*, then the result table specified in
                    *result_table* will be persisted and will not expire unless
                    a *ttl* is specified.   If *false*, then the result table
                    will be an in-memory table and will expire unless a *ttl*
                    is specified otherwise.
                    Allowed values are:
                    * true
                    * false
                    The default value is 'false'.
                  * **result_table_force_replicated** --
                    Force the result table to be replicated (ignores any
                    sharding). Must be used in combination with the
                    *result_table* option.
                  * **result_table_generate_pk** --
                    If 'true' then set a primary key for the result table. Must
                    be used in combination with the *result_table* option.
                  * **ttl** --
                    Sets the `TTL <../../../concepts/ttl.html>`_ of the table
                    specified in *result_table*.
                  * **chunk_size** --
                    Indicates the chunk size to be used for the result table.
                    Must be used in combination with the *result_table* option.
                  * **materialize_on_gpu** --
                    If *true* then the columns of the groupby result table will
                    be cached on the GPU. Must be used in combination with the
                    *result_table* option.
                    Allowed values are:
                    * true
                    * false
                    The default value is 'false'.
        Returns:
            A dict with the following entries--
            response_schema_str (str)
                Avro schema of output parameter *binary_encoded_response* or
                output parameter *json_encoded_response*.
            binary_encoded_response (str)
                Avro binary encoded response.
            json_encoded_response (str)
                Avro JSON encoded response.
            total_number_of_records (long)
                Total/Filtered number of records.
            has_more_records (bool)
                Too many records. Returned a partial set.
        """
        assert isinstance( table_name, (basestring)), "aggregate_group_by(): Argument 'table_name' must be (one) of type(s) '(basestring)'; given %s" % type( table_name ).__name__
        column_names = column_names if isinstance( column_names, list ) else ( [] if (column_names is None) else [ column_names ] )
        assert isinstance( offset, (int, long, float)), "aggregate_group_by(): Argument 'offset' must be (one) of type(s) '(int, long, float)'; given %s" % type( offset ).__name__
        assert isinstance( limit, (int, long, float)), "aggregate_group_by(): Argument 'limit' must be (one) of type(s) '(int, long, float)'; given %s" % type( limit ).__name__
        assert isinstance( encoding, (basestring)), "aggregate_group_by(): Argument 'encoding' must be (one) of type(s) '(basestring)'; given %s" % type( encoding ).__name__
        assert isinstance( options, (dict)), "aggregate_group_by(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "aggregate_group_by" )
        obj = collections.OrderedDict()
        obj['table_name'] = table_name
        obj['column_names'] = column_names
        obj['offset'] = offset
        obj['limit'] = limit
        obj['encoding'] = encoding
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/aggregate/groupby' ) ) 
    # end aggregate_group_by
    # begin aggregate_histogram
[docs]    def aggregate_histogram( self, table_name = None, column_name = None, start =
                             None, end = None, interval = None, options = {} ):
        """Performs a histogram calculation given a table, a column, and an
        interval function. The input parameter *interval* is used to produce
        bins of that size and the result, computed over the records falling
        within each bin, is returned.  For each bin, the start value is
        inclusive, but the end value is exclusive--except for the very last bin
        for which the end value is also inclusive.  The value returned for each
        bin is the number of records in it, except when a column name is
        provided as a *value_column* in input parameter *options*.  In this
        latter case the sum of the values corresponding to the *value_column*
        is used as the result instead.
        Parameters:
            table_name (str)
                Name of the table on which the operation will be performed.
                Must be an existing table or collection.
            column_name (str)
                Name of a column or an expression of one or more column names
                over which the histogram will be calculated.
            start (float)
                Lower end value of the histogram interval, inclusive.
            end (float)
                Upper end value of the histogram interval, inclusive.
            interval (float)
                The size of each bin within the start and end parameters.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
                Allowed keys are:
                * **value_column** --
                  The name of the column to use when calculating the bin values
                  (values are summed).  The column must be a numerical type
                  (int, double, long, float).
        Returns:
            A dict with the following entries--
            counts (list of floats)
                The array of calculated values that represents the histogram
                data points.
            start (float)
                Value of input parameter *start*.
            end (float)
                Value of input parameter *end*.
        """
        assert isinstance( table_name, (basestring)), "aggregate_histogram(): Argument 'table_name' must be (one) of type(s) '(basestring)'; given %s" % type( table_name ).__name__
        assert isinstance( column_name, (basestring)), "aggregate_histogram(): Argument 'column_name' must be (one) of type(s) '(basestring)'; given %s" % type( column_name ).__name__
        assert isinstance( start, (int, long, float)), "aggregate_histogram(): Argument 'start' must be (one) of type(s) '(int, long, float)'; given %s" % type( start ).__name__
        assert isinstance( end, (int, long, float)), "aggregate_histogram(): Argument 'end' must be (one) of type(s) '(int, long, float)'; given %s" % type( end ).__name__
        assert isinstance( interval, (int, long, float)), "aggregate_histogram(): Argument 'interval' must be (one) of type(s) '(int, long, float)'; given %s" % type( interval ).__name__
        assert isinstance( options, (dict)), "aggregate_histogram(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "aggregate_histogram" )
        obj = collections.OrderedDict()
        obj['table_name'] = table_name
        obj['column_name'] = column_name
        obj['start'] = start
        obj['end'] = end
        obj['interval'] = interval
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/aggregate/histogram' ) ) 
    # end aggregate_histogram
    # begin aggregate_k_means
[docs]    def aggregate_k_means( self, table_name = None, column_names = None, k = None,
                           tolerance = None, options = {} ):
        """This endpoint runs the k-means algorithm - a heuristic algorithm that
        attempts to do k-means clustering.  An ideal k-means clustering
        algorithm selects k points such that the sum of the mean squared
        distances of each member of the set to the nearest of the k points is
        minimized.  The k-means algorithm however does not necessarily produce
        such an ideal cluster.   It begins with a randomly selected set of k
        points and then refines the location of the points iteratively and
        settles to a local minimum.  Various parameters and options are
        provided to control the heuristic search.
        Parameters:
            table_name (str)
                Name of the table on which the operation will be performed.
                Must be an existing table or collection.
            column_names (list of str)
                List of column names on which the operation would be performed.
                If n columns are provided then each of the k result points will
                have n dimensions corresponding to the n columns.  The user can
                provide a single element (which will be automatically promoted
                to a list internally) or a list.
            k (int)
                The number of mean points to be determined by the algorithm.
            tolerance (float)
                Stop iterating when the distances between successive points is
                less than the given tolerance.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
                Allowed keys are:
                * **whiten** --
                  When set to 1 each of the columns is first normalized by its
                  stdv - default is not to whiten.
                * **max_iters** --
                  Number of times to try to hit the tolerance limit before
                  giving up - default is 10.
                * **num_tries** --
                  Number of times to run the k-means algorithm with a different
                  randomly selected starting points - helps avoid local
                  minimum. Default is 1.
        Returns:
            A dict with the following entries--
            means (list of lists of floats)
                The k-mean values found.
            counts (list of longs)
                The number of elements in the cluster closest the corresponding
                k-means values.
            rms_dists (list of floats)
                The root mean squared distance of the elements in the cluster
                for each of the k-means values.
            count (long)
                The total count of all the clusters - will be the size of the
                input table.
            rms_dist (float)
                The sum of all the rms_dists - the value the k-means algorithm
                is attempting to minimize.
            tolerance (float)
                The distance between the last two iterations of the algorithm
                before it quit.
            num_iters (int)
                The number of iterations the algorithm executed before it quit.
        """
        assert isinstance( table_name, (basestring)), "aggregate_k_means(): Argument 'table_name' must be (one) of type(s) '(basestring)'; given %s" % type( table_name ).__name__
        column_names = column_names if isinstance( column_names, list ) else ( [] if (column_names is None) else [ column_names ] )
        assert isinstance( k, (int, long, float)), "aggregate_k_means(): Argument 'k' must be (one) of type(s) '(int, long, float)'; given %s" % type( k ).__name__
        assert isinstance( tolerance, (int, long, float)), "aggregate_k_means(): Argument 'tolerance' must be (one) of type(s) '(int, long, float)'; given %s" % type( tolerance ).__name__
        assert isinstance( options, (dict)), "aggregate_k_means(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "aggregate_k_means" )
        obj = collections.OrderedDict()
        obj['table_name'] = table_name
        obj['column_names'] = column_names
        obj['k'] = k
        obj['tolerance'] = tolerance
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/aggregate/kmeans' ) ) 
    # end aggregate_k_means
    # begin aggregate_min_max
[docs]    def aggregate_min_max( self, table_name = None, column_name = None, options = {}
                           ):
        """Calculates and returns the minimum and maximum values of a particular
        column in a table.
        Parameters:
            table_name (str)
                Name of the table on which the operation will be performed.
                Must be an existing table.
            column_name (str)
                Name of a column or an expression of one or more column on
                which the min-max will be calculated.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
        Returns:
            A dict with the following entries--
            min (float)
                Minimum value of the input parameter *column_name*.
            max (float)
                Maximum value of the input parameter *column_name*.
        """
        assert isinstance( table_name, (basestring)), "aggregate_min_max(): Argument 'table_name' must be (one) of type(s) '(basestring)'; given %s" % type( table_name ).__name__
        assert isinstance( column_name, (basestring)), "aggregate_min_max(): Argument 'column_name' must be (one) of type(s) '(basestring)'; given %s" % type( column_name ).__name__
        assert isinstance( options, (dict)), "aggregate_min_max(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "aggregate_min_max" )
        obj = collections.OrderedDict()
        obj['table_name'] = table_name
        obj['column_name'] = column_name
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/aggregate/minmax' ) ) 
    # end aggregate_min_max
    # begin aggregate_min_max_geometry
[docs]    def aggregate_min_max_geometry( self, table_name = None, column_name = None,
                                    options = {} ):
        """Calculates and returns the minimum and maximum x- and y-coordinates of
        a particular geospatial geometry column in a table.
        Parameters:
            table_name (str)
                Name of the table on which the operation will be performed.
                Must be an existing table.
            column_name (str)
                Name of a geospatial geometry column on which the min-max will
                be calculated.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
        Returns:
            A dict with the following entries--
            min_x (float)
                Minimum x-coordinate value of the input parameter
                *column_name*.
            max_x (float)
                Maximum x-coordinate value of the input parameter
                *column_name*.
            min_y (float)
                Minimum y-coordinate value of the input parameter
                *column_name*.
            max_y (float)
                Maximum y-coordinate value of the input parameter
                *column_name*.
        """
        assert isinstance( table_name, (basestring)), "aggregate_min_max_geometry(): Argument 'table_name' must be (one) of type(s) '(basestring)'; given %s" % type( table_name ).__name__
        assert isinstance( column_name, (basestring)), "aggregate_min_max_geometry(): Argument 'column_name' must be (one) of type(s) '(basestring)'; given %s" % type( column_name ).__name__
        assert isinstance( options, (dict)), "aggregate_min_max_geometry(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "aggregate_min_max_geometry" )
        obj = collections.OrderedDict()
        obj['table_name'] = table_name
        obj['column_name'] = column_name
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/aggregate/minmax/geometry' ) ) 
    # end aggregate_min_max_geometry
    # begin aggregate_statistics
[docs]    def aggregate_statistics( self, table_name = None, column_name = None, stats =
                              None, options = {} ):
        """Calculates the requested statistics of the given column(s) in a given
        table.
        The available statistics are *count* (number of total objects), *mean*,
        *stdv* (standard deviation), *variance*, *skew*, *kurtosis*, *sum*,
        *min*, *max*, *weighted_average*, *cardinality* (unique count),
        *estimated_cardinality*, *percentile* and *percentile_rank*.
        Estimated cardinality is calculated by using the hyperloglog
        approximation technique.
        Percentiles and percentile ranks are approximate and are calculated
        using the t-digest algorithm. They must include the desired
        *percentile*/*percentile_rank*. To compute multiple percentiles each
        value must be specified separately (i.e.
        'percentile(75.0),percentile(99.0),percentile_rank(1234.56),percentile_rank(-5)').
        The weighted average statistic requires a *weight_column_name* to be
        specified in input parameter *options*. The weighted average is then
        defined as the sum of the products of input parameter *column_name*
        times the *weight_column_name* values divided by the sum of the
        *weight_column_name* values.
        Additional columns can be used in the calculation of statistics via the
        *additional_column_names* option.  Values in these columns will be
        included in the overall aggregate calculation--individual aggregates
        will not be calculated per additional column.  For instance, requesting
        the *count* & *mean* of input parameter *column_name* x and
        *additional_column_names* y & z, where x holds the numbers 1-10, y
        holds 11-20, and z holds 21-30, would return the total number of x, y,
        & z values (30), and the single average value across all x, y, & z
        values (15.5).
        The response includes a list of key/value pairs of each statistic
        requested and its corresponding value.
        Parameters:
            table_name (str)
                Name of the table on which the statistics operation will be
                performed.
            column_name (str)
                Name of the primary column for which the statistics are to be
                calculated.
            stats (str)
                Comma separated list of the statistics to calculate, e.g.
                "sum,mean".
                Allowed values are:
                * **count** --
                  Number of objects (independent of the given column(s)).
                * **mean** --
                  Arithmetic mean (average), equivalent to sum/count.
                * **stdv** --
                  Sample standard deviation (denominator is count-1).
                * **variance** --
                  Unbiased sample variance (denominator is count-1).
                * **skew** --
                  Skewness (third standardized moment).
                * **kurtosis** --
                  Kurtosis (fourth standardized moment).
                * **sum** --
                  Sum of all values in the column(s).
                * **min** --
                  Minimum value of the column(s).
                * **max** --
                  Maximum value of the column(s).
                * **weighted_average** --
                  Weighted arithmetic mean (using the option
                  *weight_column_name* as the weighting column).
                * **cardinality** --
                  Number of unique values in the column(s).
                * **estimated_cardinality** --
                  Estimate (via hyperloglog technique) of the number of unique
                  values in the column(s).
                * **percentile** --
                  Estimate (via t-digest) of the given percentile of the
                  column(s) (percentile(50.0) will be an approximation of the
                  median).
                * **percentile_rank** --
                  Estimate (via t-digest) of the percentile rank of the given
                  value in the column(s) (if the given value is the median of
                  the column(s), percentile_rank(<median>) will return
                  approximately 50.0).
            options (dict of str to str)
                  Optional parameters.  Default value is an empty dict ( {} ).
                  Allowed keys are:
                  * **additional_column_names** --
                    A list of comma separated column names over which
                    statistics can be accumulated along with the primary
                    column.  All columns listed and input parameter
                    *column_name* must be of the same type.  Must not include
                    the column specified in input parameter *column_name* and
                    no column can be listed twice.
                  * **weight_column_name** --
                    Name of column used as weighting attribute for the weighted
                    average statistic.
        Returns:
            A dict with the following entries--
            stats (dict of str to floats)
                (statistic name, double value) pairs of the requested
                statistics, including the total count by default.
        """
        assert isinstance( table_name, (basestring)), "aggregate_statistics(): Argument 'table_name' must be (one) of type(s) '(basestring)'; given %s" % type( table_name ).__name__
        assert isinstance( column_name, (basestring)), "aggregate_statistics(): Argument 'column_name' must be (one) of type(s) '(basestring)'; given %s" % type( column_name ).__name__
        assert isinstance( stats, (basestring)), "aggregate_statistics(): Argument 'stats' must be (one) of type(s) '(basestring)'; given %s" % type( stats ).__name__
        assert isinstance( options, (dict)), "aggregate_statistics(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "aggregate_statistics" )
        obj = collections.OrderedDict()
        obj['table_name'] = table_name
        obj['column_name'] = column_name
        obj['stats'] = stats
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/aggregate/statistics' ) ) 
    # end aggregate_statistics
    # begin aggregate_statistics_by_range
[docs]    def aggregate_statistics_by_range( self, table_name = None, select_expression =
                                       '', column_name = None, value_column_name
                                       = None, stats = None, start = None, end =
                                       None, interval = None, options = {} ):
        """Divides the given set into bins and calculates statistics of the values
        of a value-column in each bin.  The bins are based on the values of a
        given binning-column.  The statistics that may be requested are mean,
        stdv (standard deviation), variance, skew, kurtosis, sum, min, max,
        first, last and weighted average. In addition to the requested
        statistics the count of total samples in each bin is returned. This
        counts vector is just the histogram of the column used to divide the
        set members into bins. The weighted average statistic requires a
        weight_column to be specified in input parameter *options*. The
        weighted average is then defined as the sum of the products of the
        value column times the weight column divided by the sum of the weight
        column.
        There are two methods for binning the set members. In the first, which
        can be used for numeric valued binning-columns, a min, max and interval
        are specified. The number of bins, nbins, is the integer upper bound of
        (max-min)/interval. Values that fall in the range
        [min+n\*interval,min+(n+1)\*interval) are placed in the nth bin where n
        ranges from 0..nbin-2. The final bin is [min+(nbin-1)\*interval,max].
        In the second method, input parameter *options* bin_values specifies a
        list of binning column values. Binning-columns whose value matches the
        nth member of the bin_values list are placed in the nth bin. When a
        list is provided the binning-column must be of type string or int.
        Parameters:
            table_name (str)
                Name of the table on which the ranged-statistics operation will
                be performed.
            select_expression (str)
                For a non-empty expression statistics are calculated for those
                records for which the expression is true.  Default value is ''.
            column_name (str)
                Name of the binning-column used to divide the set samples into
                bins.
            value_column_name (str)
                Name of the value-column for which statistics are to be
                computed.
            stats (str)
                A string of comma separated list of the statistics to
                calculate, e.g. 'sum,mean'. Available statistics: mean, stdv
                (standard deviation), variance, skew, kurtosis, sum.
            start (float)
                The lower bound of the binning-column.
            end (float)
                The upper bound of the binning-column.
            interval (float)
                The interval of a bin. Set members fall into bin i if the
                binning-column falls in the range [start+interval``*``i,
                start+interval``*``(i+1)).
            options (dict of str to str)
                Map of optional parameters:  Default value is an empty dict (
                {} ).
                Allowed keys are:
                * **additional_column_names** --
                  A list of comma separated value-column names over which
                  statistics can be accumulated along with the primary
                  value_column.
                * **bin_values** --
                  A list of comma separated binning-column values. Values that
                  match the nth bin_values value are placed in the nth bin.
                * **weight_column_name** --
                  Name of the column used as weighting column for the
                  weighted_average statistic.
                * **order_column_name** --
                  Name of the column used for candlestick charting techniques.
        Returns:
            A dict with the following entries--
            stats (dict of str to lists of floats)
                A map with a key for each statistic in the stats input
                parameter having a value that is a vector of the corresponding
                value-column bin statistics. In a addition the key count has a
                value that is a histogram of the binning-column.
        """
        assert isinstance( table_name, (basestring)), "aggregate_statistics_by_range(): Argument 'table_name' must be (one) of type(s) '(basestring)'; given %s" % type( table_name ).__name__
        assert isinstance( select_expression, (basestring)), "aggregate_statistics_by_range(): Argument 'select_expression' must be (one) of type(s) '(basestring)'; given %s" % type( select_expression ).__name__
        assert isinstance( column_name, (basestring)), "aggregate_statistics_by_range(): Argument 'column_name' must be (one) of type(s) '(basestring)'; given %s" % type( column_name ).__name__
        assert isinstance( value_column_name, (basestring)), "aggregate_statistics_by_range(): Argument 'value_column_name' must be (one) of type(s) '(basestring)'; given %s" % type( value_column_name ).__name__
        assert isinstance( stats, (basestring)), "aggregate_statistics_by_range(): Argument 'stats' must be (one) of type(s) '(basestring)'; given %s" % type( stats ).__name__
        assert isinstance( start, (int, long, float)), "aggregate_statistics_by_range(): Argument 'start' must be (one) of type(s) '(int, long, float)'; given %s" % type( start ).__name__
        assert isinstance( end, (int, long, float)), "aggregate_statistics_by_range(): Argument 'end' must be (one) of type(s) '(int, long, float)'; given %s" % type( end ).__name__
        assert isinstance( interval, (int, long, float)), "aggregate_statistics_by_range(): Argument 'interval' must be (one) of type(s) '(int, long, float)'; given %s" % type( interval ).__name__
        assert isinstance( options, (dict)), "aggregate_statistics_by_range(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "aggregate_statistics_by_range" )
        obj = collections.OrderedDict()
        obj['table_name'] = table_name
        obj['select_expression'] = select_expression
        obj['column_name'] = column_name
        obj['value_column_name'] = value_column_name
        obj['stats'] = stats
        obj['start'] = start
        obj['end'] = end
        obj['interval'] = interval
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/aggregate/statistics/byrange' ) ) 
    # end aggregate_statistics_by_range
    # begin aggregate_unique
[docs]    def aggregate_unique( self, table_name = None, column_name = None, offset =
                          None, limit = 10000, encoding = 'binary', options = {}
                          ):
        """Returns all the unique values from a particular column (specified by
        input parameter *column_name*) of a particular table or collection
        (specified by input parameter *table_name*). If input parameter
        *column_name* is a numeric column the values will be in output
        parameter *binary_encoded_response*. Otherwise if input parameter
        *column_name* is a string column the values will be in output parameter
        *json_encoded_response*.  The results can be paged via the input
        parameter *offset* and input parameter *limit* parameters.
        Columns marked as `store-only
        <../../../concepts/types.html#data-handling>`_ are unable to be used
        with this function.
        To get the first 10 unique values sorted in descending order input
        parameter *options* would be::
        {"limit":"10","sort_order":"descending"}.
        The response is returned as a dynamic schema. For details see: `dynamic
        schemas documentation <../../../api/index.html#dynamic-schemas>`_.
        If a *result_table* name is specified in the input parameter *options*,
        the results are stored in a new table with that name--no results are
        returned in the response.  Both the table name and resulting column
        name must adhere to `standard naming conventions
        <../../../concepts/tables.html#table>`_; any column expression will
        need to be aliased.  If the source table's `shard key
        <../../../concepts/tables.html#shard-keys>`_ is used as the input
        parameter *column_name*, the result table will be sharded, in all other
        cases it will be replicated.  Sorting will properly function only if
        the result table is replicated or if there is only one processing node
        and should not be relied upon in other cases.  Not available if input
        parameter *table_name* is a collection or when the value of input
        parameter *column_name* is an unrestricted-length string.
        Parameters:
            table_name (str)
                Name of an existing table/collection on which the operation
                will be performed.
            column_name (str)
                Name of the column or an expression containing one or more
                column names on which the unique function would be applied.
            offset (long)
                A positive integer indicating the number of initial results to
                skip (this can be useful for paging through the results).  The
                minimum allowed value is 0. The maximum allowed value is
                MAX_INT.
            limit (long)
                A positive integer indicating the maximum number of results to
                be returned. Or END_OF_SET (-9999) to indicate that the max
                number of results should be returned.  Default value is 10000.
            encoding (str)
                Specifies the encoding for returned records.  Default value is
                'binary'.
                Allowed values are:
                * **binary** --
                  Indicates that the returned records should be binary encoded.
                * **json** --
                  Indicates that the returned records should be json encoded.
                  The default value is 'binary'.
            options (dict of str to str)
                  Optional parameters.  Default value is an empty dict ( {} ).
                  Allowed keys are:
                  * **collection_name** --
                    Name of a collection which is to contain the table
                    specified in *result_table*. If the collection provided is
                    non-existent, the collection will be automatically created.
                    If empty, then the table will be a top-level table.
                    Additionally this option is invalid if input parameter
                    *table_name* is a collection.
                  * **expression** --
                    Optional filter expression to apply to the table.
                  * **sort_order** --
                    String indicating how the returned values should be sorted.
                    Allowed values are:
                    * ascending
                    * descending
                    The default value is 'ascending'.
                  * **result_table** --
                    The name of the table used to store the results. If
                    present, no results are returned in the response. Has the
                    same naming restrictions as `tables
                    <../../../concepts/tables.html>`_.  Not available if input
                    parameter *table_name* is a collection or when input
                    parameter *column_name* is an unrestricted-length string.
                  * **result_table_persist** --
                    If *true*, then the result table specified in
                    *result_table* will be persisted and will not expire unless
                    a *ttl* is specified.   If *false*, then the result table
                    will be an in-memory table and will expire unless a *ttl*
                    is specified otherwise.
                    Allowed values are:
                    * true
                    * false
                    The default value is 'false'.
                  * **result_table_force_replicated** --
                    Force the result table to be replicated (ignores any
                    sharding). Must be used in combination with the
                    *result_table* option.
                  * **result_table_generate_pk** --
                    If 'true' then set a primary key for the result table. Must
                    be used in combination with the *result_table* option.
                  * **ttl** --
                    Sets the `TTL <../../../concepts/ttl.html>`_ of the table
                    specified in *result_table*.
                  * **chunk_size** --
                    Indicates the chunk size to be used for the result table.
                    Must be used in combination with the *result_table* option.
        Returns:
            A dict with the following entries--
            table_name (str)
                The same table name as was passed in the parameter list.
            response_schema_str (str)
                Avro schema of output parameter *binary_encoded_response* or
                output parameter *json_encoded_response*.
            binary_encoded_response (str)
                Avro binary encoded response.
            json_encoded_response (str)
                Avro JSON encoded response.
            has_more_records (bool)
                Too many records. Returned a partial set.
        """
        assert isinstance( table_name, (basestring)), "aggregate_unique(): Argument 'table_name' must be (one) of type(s) '(basestring)'; given %s" % type( table_name ).__name__
        assert isinstance( column_name, (basestring)), "aggregate_unique(): Argument 'column_name' must be (one) of type(s) '(basestring)'; given %s" % type( column_name ).__name__
        assert isinstance( offset, (int, long, float)), "aggregate_unique(): Argument 'offset' must be (one) of type(s) '(int, long, float)'; given %s" % type( offset ).__name__
        assert isinstance( limit, (int, long, float)), "aggregate_unique(): Argument 'limit' must be (one) of type(s) '(int, long, float)'; given %s" % type( limit ).__name__
        assert isinstance( encoding, (basestring)), "aggregate_unique(): Argument 'encoding' must be (one) of type(s) '(basestring)'; given %s" % type( encoding ).__name__
        assert isinstance( options, (dict)), "aggregate_unique(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "aggregate_unique" )
        obj = collections.OrderedDict()
        obj['table_name'] = table_name
        obj['column_name'] = column_name
        obj['offset'] = offset
        obj['limit'] = limit
        obj['encoding'] = encoding
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/aggregate/unique' ) ) 
    # end aggregate_unique
    # begin aggregate_unpivot
[docs]    def aggregate_unpivot( self, table_name = None, variable_column_name = '',
                           value_column_name = '', pivoted_columns = None,
                           encoding = 'binary', options = {} ):
        """Rotate the column values into rows values.
        The aggregate unpivot is used to normalize tables that are built for
        cross tabular reporting purposes. The unpivot operator rotates the
        column values for all the pivoted columns. A variable column, value
        column and all columns from the source table except the unpivot columns
        are projected into the result table. The variable column and value
        columns in the result table indicate the pivoted column name and values
        respectively.
        The response is returned as a dynamic schema. For details see: `dynamic
        schemas documentation <../../../api/index.html#dynamic-schemas>`_.
        Parameters:
            table_name (str)
                Name of the table on which the operation will be performed.
                Must be an existing table/view.
            variable_column_name (str)
                Specifies the variable/parameter column name.  Default value is
                ''.
            value_column_name (str)
                Specifies the value column name.  Default value is ''.
            pivoted_columns (list of str)
                List of one or more values typically the column names of the
                input table. All the columns in the source table must have the
                same data type.  The user can provide a single element (which
                will be automatically promoted to a list internally) or a list.
            encoding (str)
                Specifies the encoding for returned records.  Default value is
                'binary'.
                Allowed values are:
                * **binary** --
                  Indicates that the returned records should be binary encoded.
                * **json** --
                  Indicates that the returned records should be json encoded.
                  The default value is 'binary'.
            options (dict of str to str)
                  Optional parameters.  Default value is an empty dict ( {} ).
                  Allowed keys are:
                  * **collection_name** --
                    Name of a collection which is to contain the table
                    specified in *result_table*. If the collection provided is
                    non-existent, the collection will be automatically created.
                    If empty, then the table will be a top-level table.
                  * **result_table** --
                    The name of the table used to store the results. Has the
                    same naming restrictions as `tables
                    <../../../concepts/tables.html>`_. If present, no results
                    are returned in the response.
                  * **result_table_persist** --
                    If *true*, then the result table specified in
                    *result_table* will be persisted and will not expire unless
                    a *ttl* is specified.   If *false*, then the result table
                    will be an in-memory table and will expire unless a *ttl*
                    is specified otherwise.
                    Allowed values are:
                    * true
                    * false
                    The default value is 'false'.
                  * **expression** --
                    Filter expression to apply to the table prior to unpivot
                    processing.
                  * **order_by** --
                    Comma-separated list of the columns to be sorted by; e.g.
                    'timestamp asc, x desc'.  The columns specified must be
                    present in input table.  If any alias is given for any
                    column name, the alias must be used, rather than the
                    original column name.
                  * **chunk_size** --
                    Indicates the chunk size to be used for the result table.
                    Must be used in combination with the *result_table* option.
                  * **limit** --
                    The number of records to keep.
                  * **ttl** --
                    Sets the `TTL <../../../concepts/ttl.html>`_ of the table
                    specified in *result_table*.
        Returns:
            A dict with the following entries--
            table_name (str)
                Typically shows the result-table name if provided in the
                request (Ignore otherwise).
            response_schema_str (str)
                Avro schema of output parameter *binary_encoded_response* or
                output parameter *json_encoded_response*.
            binary_encoded_response (str)
                Avro binary encoded response.
            json_encoded_response (str)
                Avro JSON encoded response.
            total_number_of_records (long)
                Total/Filtered number of records.
            has_more_records (bool)
                Too many records. Returned a partial set.
        """
        assert isinstance( table_name, (basestring)), "aggregate_unpivot(): Argument 'table_name' must be (one) of type(s) '(basestring)'; given %s" % type( table_name ).__name__
        assert isinstance( variable_column_name, (basestring)), "aggregate_unpivot(): Argument 'variable_column_name' must be (one) of type(s) '(basestring)'; given %s" % type( variable_column_name ).__name__
        assert isinstance( value_column_name, (basestring)), "aggregate_unpivot(): Argument 'value_column_name' must be (one) of type(s) '(basestring)'; given %s" % type( value_column_name ).__name__
        pivoted_columns = pivoted_columns if isinstance( pivoted_columns, list ) else ( [] if (pivoted_columns is None) else [ pivoted_columns ] )
        assert isinstance( encoding, (basestring)), "aggregate_unpivot(): Argument 'encoding' must be (one) of type(s) '(basestring)'; given %s" % type( encoding ).__name__
        assert isinstance( options, (dict)), "aggregate_unpivot(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "aggregate_unpivot" )
        obj = collections.OrderedDict()
        obj['table_name'] = table_name
        obj['variable_column_name'] = variable_column_name
        obj['value_column_name'] = value_column_name
        obj['pivoted_columns'] = pivoted_columns
        obj['encoding'] = encoding
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/aggregate/unpivot' ) ) 
    # end aggregate_unpivot
    # begin alter_system_properties
[docs]    def alter_system_properties( self, property_updates_map = None, options = {} ):
        """The :meth:`.alter_system_properties` endpoint is primarily used to
        simplify the testing of the system and is not expected to be used
        during normal execution.  Commands are given through the input
        parameter *property_updates_map* whose keys are commands and values are
        strings representing integer values (for example '8000') or boolean
        values ('true' or 'false').
        Parameters:
            property_updates_map (dict of str to str)
                Map containing the properties of the system to be updated.
                Error if empty.
                Allowed keys are:
                * **sm_omp_threads** --
                  Set the number of OpenMP threads that will be used to service
                  filter & aggregation requests against collections to the
                  specified integer value.
                * **kernel_omp_threads** --
                  Set the number of kernel OpenMP threads to the specified
                  integer value.
                * **concurrent_kernel_execution** --
                  Enables concurrent kernel execution if the value is *true*
                  and disables it if the value is *false*.
                  Allowed values are:
                  * true
                  * false
                * **chunk_size** --
                  Sets the chunk size of all new sets to the specified integer
                  value.
                * **execution_mode** --
                  Sets the execution_mode for kernel executions to the
                  specified string value. Possible values are host, device,
                  default (engine decides) or an integer value that indicates
                  max chunk size to exec on host
                * **flush_to_disk** --
                  Flushes any changes to any tables to the persistent store.
                  These changes include updates to the vector store, object
                  store, and text search store, Value string is ignored
                * **clear_cache** --
                  Clears cached results.  Useful to allow repeated timing of
                  endpoints. Value string is ignored
                * **communicator_test** --
                  Invoke the communicator test and report timing results. Value
                  string is is a comma separated list of <key>=<value>
                  expressions.  Expressions are: num_transactions=<num> where
                  num is the number of request reply transactions to invoke per
                  test; message_size=<bytes> where bytes is the size of the
                  messages to send in bytes; check_values=<enabled> where if
                  enabled is true the value of the messages received are
                  verified.
                * **set_message_timers_enabled** --
                  Enables the communicator test to collect additional timing
                  statistics when the value string is *true*. Disables the
                  collection when the value string is *false*
                  Allowed values are:
                  * true
                  * false
                * **bulk_add_test** --
                  Invoke the bulk add test and report timing results. Value
                  string is ignored.
                * **network_speed** --
                  Invoke the network speed test and report timing results.
                  Value string is a semicolon-separated list of <key>=<value>
                  expressions.  Valid expressions are: seconds=<time> where
                  time is the time in seconds to run the test; data_size=<size>
                  where size is the size in bytes of the block to be
                  transferred; threads=<number of threads>;
                  to_ranks=<space-separated list of ranks> where the list of
                  ranks is the ranks that rank 0 will send data to and get data
                  from. If to_ranks is unspecified then all worker ranks are
                  used.
                * **request_timeout** --
                  Number of minutes after which filtering (e.g.,
                  :meth:`.filter`) and aggregating (e.g.,
                  :meth:`.aggregate_group_by`) queries will timeout.
                * **max_get_records_size** --
                  The maximum number of records the database will serve for a
                  given data retrieval call
                * **memory_allocation_limit_mb** --
                  Set the memory allocation limit for all rank processes in
                  megabytes, 0 means no limit. Overrides any individual rank
                  memory allocation limits.
                * **enable_audit** --
                  Enable or disable auditing.
                * **audit_headers** --
                  Enable or disable auditing of request headers.
                * **audit_body** --
                  Enable or disable auditing of request bodies.
                * **audit_data** --
                  Enable or disable auditing of request data.
                * **enable_job_manager** --
                  Enable JobManager to enforce processing of requests in the
                  order received.
            options (dict of str to str)
                  Optional parameters.  Default value is an empty dict ( {} ).
        Returns:
            A dict with the following entries--
            updated_properties_map (dict of str to str)
                map of values updated, For speed tests a map of values measured
                to the measurement
        """
        assert isinstance( property_updates_map, (dict)), "alter_system_properties(): Argument 'property_updates_map' must be (one) of type(s) '(dict)'; given %s" % type( property_updates_map ).__name__
        assert isinstance( options, (dict)), "alter_system_properties(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "alter_system_properties" )
        obj = collections.OrderedDict()
        obj['property_updates_map'] = self.__sanitize_dicts( property_updates_map )
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/alter/system/properties' ) ) 
    # end alter_system_properties
    # begin alter_table
[docs]    def alter_table( self, table_name = None, action = None, value = None, options =
                     {} ):
        """Apply various modifications to a table, view, or collection.  The
        available
        modifications include the following:
        Create or delete an `index
        <../../../concepts/indexes.html#column-index>`_ on a
        particular column. This can speed up certain operations when using
        expressions
        containing equality or relational operators on indexed columns. This
        only
        applies to tables.
        Set the `time-to-live (TTL) <../../../concepts/ttl.html>`_. This can be
        applied
        to tables, views, or collections.  When applied to collections, every
        contained
        table & view that is not protected will have its TTL set to the given
        value.
        Set the global access mode (i.e. locking) for a table. The mode can be
        set to
        'no_access', 'read_only', 'write_only' or 'read_write'.
        Change the `protection <../../../concepts/protection.html>`_ mode to
        prevent or
        allow automatic expiration. This can be applied to tables, views, and
        collections.
        Allow homogeneous tables within a collection.
        Manage a table's columns--a column can be added, removed, or have its
        `type and properties <../../../concepts/types.html>`_ modified.
        Set or unset `compression <../../../concepts/compression.html>`_ for a
        column.
        Parameters:
            table_name (str)
                Table on which the operation will be performed. Must be an
                existing table, view, or collection.
            action (str)
                Modification operation to be applied
                Allowed values are:
                * **allow_homogeneous_tables** --
                  Sets whether homogeneous tables are allowed in the given
                  collection. This action is only valid if input parameter
                  *table_name* is a collection. The input parameter *value*
                  must be either 'true' or 'false'.
                * **create_index** --
                  Creates an `index
                  <../../../concepts/indexes.html#column-index>`_ on the column
                  name specified in input parameter *value*. If this column is
                  already indexed, an error will be returned.
                * **delete_index** --
                  Deletes an existing `index
                  <../../../concepts/indexes.html#column-index>`_ on the column
                  name specified in input parameter *value*. If this column
                  does not have indexing turned on, an error will be returned.
                * **move_to_collection** --
                  Moves a table into a collection input parameter *value*.
                * **protected** --
                  Sets whether the given input parameter *table_name* should be
                  `protected <../../../concepts/protection.html>`_ or not. The
                  input parameter *value* must be either 'true' or 'false'.
                * **rename_table** --
                  Renames a table, view or collection to input parameter
                  *value*. Has the same naming restrictions as `tables
                  <../../../concepts/tables.html>`_.
                * **ttl** --
                  Sets the `TTL <../../../concepts/ttl.html>`_ of the table,
                  view, or collection specified in input parameter
                  *table_name*.
                * **add_column** --
                  Adds the column specified in input parameter *value* to the
                  table specified in input parameter *table_name*.  Use
                  *column_type* and *column_properties* in input parameter
                  *options* to set the column's type and properties,
                  respectively.
                * **change_column** --
                  Changes type and properties of the column specified in input
                  parameter *value*.  Use *column_type* and *column_properties*
                  in input parameter *options* to set the column's type and
                  properties, respectively.
                * **set_column_compression** --
                  Modifies the `compression
                  <../../../concepts/compression.html>`_ setting on the column
                  specified in input parameter *value*.
                * **delete_column** --
                  Deletes the column specified in input parameter *value* from
                  the table specified in input parameter *table_name*.
                * **create_foreign_key** --
                  Creates a `foreign key
                  <../../../concepts/tables.html#foreign-key>`_ using the
                  format 'source_column references
                  target_table(primary_key_column) [ as <foreign_key_name> ]'.
                * **delete_foreign_key** --
                  Deletes a `foreign key
                  <../../../concepts/tables.html#foreign-key>`_.  The input
                  parameter *value* should be the <foreign_key_name> specified
                  when creating the key or the complete string used to define
                  it.
                * **set_global_access_mode** --
                  Sets the global access mode (i.e. locking) for the table
                  specified in input parameter *table_name*. Specify the access
                  mode in input parameter *value*. Valid modes are 'no_access',
                  'read_only', 'write_only' and 'read_write'.
            value (str)
                  The value of the modification. May be a column name, 'true'
                  or 'false', a TTL, or the global access mode depending on
                  input parameter *action*.
            options (dict of str to str)
                  Optional parameters.  Default value is an empty dict ( {} ).
                  Allowed keys are:
                  * **column_default_value** --
                    When adding a column, set a default value for existing
                    records.
                  * **column_properties** --
                    When adding or changing a column, set the column properties
                    (strings, separated by a comma: data, store_only,
                    text_search, char8, int8 etc).
                  * **column_type** --
                    When adding or changing a column, set the column type
                    (strings, separated by a comma: int, double, string, null
                    etc).
                  * **compression_type** --
                    When setting column compression (*set_column_compression*
                    for input parameter *action*), compression type to use:
                    *none* (to use no compression) or a valid compression type.
                    Allowed values are:
                    * none
                    * snappy
                    * lz4
                    * lz4hc
                    The default value is 'snappy'.
                  * **copy_values_from_column** --
                    When adding or changing a column, enter a column name from
                    the same table being altered to use as a source for the
                    column being added/changed; values will be copied from this
                    source column into the new/modified column.
                  * **rename_column** --
                    When changing a column, specify new column name.
                  * **validate_change_column** --
                    When changing a column, validate the change before applying
                    it. If *true*, then validate all values. A value too large
                    (or too long) for the new type will prevent any change. If
                    *false*, then when a value is too large or long, it will be
                    truncated.
                    Allowed values are:
                    * **true** --
                      true
                    * **false** --
                      false
                      The default value is 'true'.
        Returns:
            A dict with the following entries--
            table_name (str)
                Table on which the operation was performed.
            action (str)
                Modification operation that was performed.
            value (str)
                The value of the modification that was performed.
            type_id (str)
                return the type_id (when changing a table, a new type may be
                created)
            type_definition (str)
                return the type_definition  (when changing a table, a new type
                may be created)
            properties (dict of str to lists of str)
                return the type properties  (when changing a table, a new type
                may be created)
            label (str)
                return the type label  (when changing a table, a new type may
                be created)
        """
        assert isinstance( table_name, (basestring)), "alter_table(): Argument 'table_name' must be (one) of type(s) '(basestring)'; given %s" % type( table_name ).__name__
        assert isinstance( action, (basestring)), "alter_table(): Argument 'action' must be (one) of type(s) '(basestring)'; given %s" % type( action ).__name__
        assert isinstance( value, (basestring)), "alter_table(): Argument 'value' must be (one) of type(s) '(basestring)'; given %s" % type( value ).__name__
        assert isinstance( options, (dict)), "alter_table(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "alter_table" )
        obj = collections.OrderedDict()
        obj['table_name'] = table_name
        obj['action'] = action
        obj['value'] = value
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/alter/table' ) ) 
    # end alter_table
    # begin alter_table_metadata
    # end alter_table_metadata
    # begin alter_user
[docs]    def alter_user( self, name = None, action = None, value = None, options = None
                    ):
        """Alters a user.
        Parameters:
            name (str)
                Name of the user to be altered. Must be an existing user.
            action (str)
                Modification operation to be applied to the user.
                Allowed values are:
                * **set_password** --
                  Sets the password of the user. The user must be an internal
                  user.
            value (str)
                  The value of the modification, depending on input parameter
                  *action*.
            options (dict of str to str)
                  Optional parameters.
        Returns:
            A dict with the following entries--
            name (str)
                Value of input parameter *name*.
        """
        assert isinstance( name, (basestring)), "alter_user(): Argument 'name' must be (one) of type(s) '(basestring)'; given %s" % type( name ).__name__
        assert isinstance( action, (basestring)), "alter_user(): Argument 'action' must be (one) of type(s) '(basestring)'; given %s" % type( action ).__name__
        assert isinstance( value, (basestring)), "alter_user(): Argument 'value' must be (one) of type(s) '(basestring)'; given %s" % type( value ).__name__
        assert isinstance( options, (dict)), "alter_user(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "alter_user" )
        obj = collections.OrderedDict()
        obj['name'] = name
        obj['action'] = action
        obj['value'] = value
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/alter/user' ) ) 
    # end alter_user
    # begin append_records
[docs]    def append_records( self, table_name = None, source_table_name = None, field_map
                        = None, options = {} ):
        """Append (or insert) all records from a source table (specified by input
        parameter *source_table_name*) to a particular target table (specified
        by input parameter *table_name*). The field map (specified by input
        parameter *field_map*) holds the user specified map of target table
        column names with their mapped source column names.
        Parameters:
            table_name (str)
                The table name for the records to be appended. Must be an
                existing table.
            source_table_name (str)
                The source table name to get records from. Must be an existing
                table name.
            field_map (dict of str to str)
                Contains the mapping of column names from the target table
                (specified by input parameter *table_name*) as the keys, and
                corresponding column names from the source table (specified by
                input parameter *source_table_name*). Must be existing column
                names in source table and target table, and their types must be
                matched.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
                Allowed keys are:
                * **offset** --
                  A positive integer indicating the number of initial results
                  to skip from source table (specified by input parameter
                  *source_table_name*). Default is 0. The minimum allowed value
                  is 0. The maximum allowed value is MAX_INT.
                * **limit** --
                  A positive integer indicating the maximum number of results
                  to be returned from source table (specified by input
                  parameter *source_table_name*). Or END_OF_SET (-9999) to
                  indicate that the max number of results should be returned.
                * **expression** --
                  Optional filter expression to apply to the source table
                  (specified by input parameter *source_table_name*). Empty by
                  default.
                * **order_by** --
                  Comma-separated list of the columns to be sorted from source
                  table (specified by input parameter *source_table_name*) by;
                  e.g. 'timestamp asc, x desc'.  The columns specified must be
                  present in input parameter *field_map*.  If any alias is
                  given for any column name, the alias must be used, rather
                  than the original column name.
                * **update_on_existing_pk** --
                  Specifies the record collision policy for inserting the
                  source table records (specified by input parameter
                  *source_table_name*) into the target table (specified by
                  input parameter *table_name*) table with a `primary key
                  <../../../concepts/tables.html#primary-keys>`_.  If set to
                  *true*, any existing target table record with primary key
                  values that match those of a source table record being
                  inserted will be replaced by that new record.  If set to
                  *false*, any existing target table record with primary key
                  values that match those of a source table record being
                  inserted will remain unchanged and the new record discarded.
                  If the specified table does not have a primary key, then this
                  option is ignored.
                  Allowed values are:
                  * true
                  * false
                  The default value is 'false'.
        Returns:
            A dict with the following entries--
            table_name (str)
        """
        assert isinstance( table_name, (basestring)), "append_records(): Argument 'table_name' must be (one) of type(s) '(basestring)'; given %s" % type( table_name ).__name__
        assert isinstance( source_table_name, (basestring)), "append_records(): Argument 'source_table_name' must be (one) of type(s) '(basestring)'; given %s" % type( source_table_name ).__name__
        assert isinstance( field_map, (dict)), "append_records(): Argument 'field_map' must be (one) of type(s) '(dict)'; given %s" % type( field_map ).__name__
        assert isinstance( options, (dict)), "append_records(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "append_records" )
        obj = collections.OrderedDict()
        obj['table_name'] = table_name
        obj['source_table_name'] = source_table_name
        obj['field_map'] = self.__sanitize_dicts( field_map )
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/append/records' ) ) 
    # end append_records
    # begin clear_table
[docs]    def clear_table( self, table_name = '', authorization = '', options = {} ):
        """Clears (drops) one or all tables in the database cluster. The operation
        is synchronous meaning that the table will be cleared before the
        function returns. The response payload returns the status of the
        operation along with the name of the table that was cleared.
        Parameters:
            table_name (str)
                Name of the table to be cleared. Must be an existing table.
                Empty string clears all available tables.  Default value is ''.
            authorization (str)
                No longer used. User can pass an empty string.  Default value
                is ''.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
                Allowed keys are:
                * **no_error_if_not_exists** --
                  If *true* and if the table specified in input parameter
                  *table_name* does not exist no error is returned. If *false*
                  and if the table specified in input parameter *table_name*
                  does not exist then an error is returned.
                  Allowed values are:
                  * true
                  * false
                  The default value is 'false'.
        Returns:
            A dict with the following entries--
            table_name (str)
                Value of input parameter *table_name* for a given table, or
                'ALL CLEARED' in case of clearing all tables.
        """
        assert isinstance( table_name, (basestring)), "clear_table(): Argument 'table_name' must be (one) of type(s) '(basestring)'; given %s" % type( table_name ).__name__
        assert isinstance( authorization, (basestring)), "clear_table(): Argument 'authorization' must be (one) of type(s) '(basestring)'; given %s" % type( authorization ).__name__
        assert isinstance( options, (dict)), "clear_table(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "clear_table" )
        obj = collections.OrderedDict()
        obj['table_name'] = table_name
        obj['authorization'] = authorization
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/clear/table' ) ) 
    # end clear_table
    # begin clear_table_monitor
[docs]    def clear_table_monitor( self, topic_id = None, options = {} ):
        """Deactivates a table monitor previously created with
        :meth:`.create_table_monitor`.
        Parameters:
            topic_id (str)
                The topic ID returned by :meth:`.create_table_monitor`.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
        Returns:
            A dict with the following entries--
            topic_id (str)
                Value of input parameter *topic_id*.
        """
        assert isinstance( topic_id, (basestring)), "clear_table_monitor(): Argument 'topic_id' must be (one) of type(s) '(basestring)'; given %s" % type( topic_id ).__name__
        assert isinstance( options, (dict)), "clear_table_monitor(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "clear_table_monitor" )
        obj = collections.OrderedDict()
        obj['topic_id'] = topic_id
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/clear/tablemonitor' ) ) 
    # end clear_table_monitor
    # begin clear_trigger
[docs]    def clear_trigger( self, trigger_id = None, options = {} ):
        """Clears or cancels the trigger identified by the specified handle. The
        output returns the handle of the trigger cleared as well as indicating
        success or failure of the trigger deactivation.
        Parameters:
            trigger_id (str)
                ID for the trigger to be deactivated.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
        Returns:
            A dict with the following entries--
            trigger_id (str)
                Value of input parameter *trigger_id*.
        """
        assert isinstance( trigger_id, (basestring)), "clear_trigger(): Argument 'trigger_id' must be (one) of type(s) '(basestring)'; given %s" % type( trigger_id ).__name__
        assert isinstance( options, (dict)), "clear_trigger(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "clear_trigger" )
        obj = collections.OrderedDict()
        obj['trigger_id'] = trigger_id
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/clear/trigger' ) ) 
    # end clear_trigger
    # begin create_join_table
[docs]    def create_join_table( self, join_table_name = None, table_names = [],
                           column_names = [], expressions = [], options = {} ):
        """Creates a table that is the result of a SQL JOIN.  For details see:
        `join concept documentation <../../../concepts/joins.html>`_.
        Parameters:
            join_table_name (str)
                Name of the join table to be created.  Has the same naming
                restrictions as `tables <../../../concepts/tables.html>`_.
            table_names (list of str)
                The list of table names composing the join.  Corresponds to a
                SQL statement FROM clause.  The user can provide a single
                element (which will be automatically promoted to a list
                internally) or a list.  Default value is an empty list ( [] ).
            column_names (list of str)
                List of member table columns or column expressions to be
                included in the join. Columns can be prefixed with
                'table_id.column_name', where 'table_id' is the table name or
                alias.  Columns can be aliased via the syntax 'column_name as
                alias'. Wild cards '*' can be used to include all columns
                across member tables or 'table_id.*' for all of a single
                table's columns.  Columns and column expressions comprising the
                join must be uniquely named or aliased--therefore, the '*' wild
                card cannot be used if column names aren't unique across all
                tables.  The user can provide a single element (which will be
                automatically promoted to a list internally) or a list.
                Default value is an empty list ( [] ).
            expressions (list of str)
                An optional list of expressions to combine and filter the
                joined tables.  Corresponds to a SQL statement WHERE clause.
                For details see: `expressions
                <../../../concepts/expressions.html>`_.  The user can provide a
                single element (which will be automatically promoted to a list
                internally) or a list.  Default value is an empty list ( [] ).
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
                Allowed keys are:
                * **collection_name** --
                  Name of a collection which is to contain the join. If the
                  collection provided is non-existent, the collection will be
                  automatically created. If empty, then the join will be at the
                  top level.
                * **max_query_dimensions** --
                  The maximum number of tables in a join that can be accessed
                  by a query and are not equated by a foreign-key to
                  primary-key equality predicate
                * **optimize_lookups** --
                  Use more memory to speed up the joining of tables.
                  Allowed values are:
                  * true
                  * false
                  The default value is 'false'.
                * **refresh_method** --
                  Method by which the join can be refreshed when the data in
                  underlying member tables have changed.
                  Allowed values are:
                  * **manual** --
                    refresh only occurs when manually requested by calling this
                    endpoint with refresh option set to *refresh* or
                    *full_refresh*
                  * **on_query** --
                    incrementally refresh (refresh just those records added)
                    whenever a new query is issued and new data is inserted
                    into the base table.  A full refresh of all the records
                    occurs when a new query is issued and there have been
                    inserts to any non-base-tables since the last query
                  * **on_insert** --
                    incrementally refresh (refresh just those records added)
                    whenever new data is inserted into a base table.  A full
                    refresh of all the records occurs when a new query is
                    issued and there have been inserts to any non-base-tables
                    since the last query
                    The default value is 'manual'.
                * **refresh** --
                  Do a manual refresh of the join if it exists - throws an
                  error otherwise
                  Allowed values are:
                  * **no_refresh** --
                    don't refresh
                  * **refresh** --
                    incrementally refresh (refresh just those records added) if
                    new data has been inserted into the base table.  A full
                    refresh of all the records occurs if there have been
                    inserts to any non-base-tables since the last refresh
                  * **full_refresh** --
                    always refresh even if no new records have been added.
                    Only refresh method guaranteed to do a full refresh
                    (refresh all the records) if a delete or update has
                    occurred since the last refresh.
                    The default value is 'no_refresh'.
                * **ttl** --
                  Sets the `TTL <../../../concepts/ttl.html>`_ of the join
                  table specified in input parameter *join_table_name*.
                * **no_count** --
                  return a count of 0 for the join table for logging and for
                  show_table. optimization needed for large overlapped
                  equi-join stencils
        Returns:
            A dict with the following entries--
            join_table_name (str)
                Value of input parameter *join_table_name*.
            count (long)
                The number of records in the join table filtered by the given
                select expression.
        """
        assert isinstance( join_table_name, (basestring)), "create_join_table(): Argument 'join_table_name' must be (one) of type(s) '(basestring)'; given %s" % type( join_table_name ).__name__
        table_names = table_names if isinstance( table_names, list ) else ( [] if (table_names is None) else [ table_names ] )
        column_names = column_names if isinstance( column_names, list ) else ( [] if (column_names is None) else [ column_names ] )
        expressions = expressions if isinstance( expressions, list ) else ( [] if (expressions is None) else [ expressions ] )
        assert isinstance( options, (dict)), "create_join_table(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "create_join_table" )
        obj = collections.OrderedDict()
        obj['join_table_name'] = join_table_name
        obj['table_names'] = table_names
        obj['column_names'] = column_names
        obj['expressions'] = expressions
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/create/jointable' ) ) 
    # end create_join_table
    # begin create_proc
[docs]    def create_proc( self, proc_name = None, execution_mode = 'distributed', files =
                     {}, command = '', args = [], options = {} ):
        """Creates an instance (proc) of the user-defined function (UDF) specified
        by the given command, options, and files, and makes it available for
        execution.  For details on UDFs, see: `User-Defined Functions
        <../../../concepts/udf.html>`_
        Parameters:
            proc_name (str)
                Name of the proc to be created. Must not be the name of a
                currently existing proc.
            execution_mode (str)
                The execution mode of the proc.  Default value is
                'distributed'.
                Allowed values are:
                * **distributed** --
                  Input table data will be divided into data segments that are
                  distributed across all nodes in the cluster, and the proc
                  command will be invoked once per data segment in parallel.
                  Output table data from each invocation will be saved to the
                  same node as the corresponding input data.
                * **nondistributed** --
                  The proc command will be invoked only once per execution, and
                  will not have access to any input or output table data.
                  The default value is 'distributed'.
            files (dict of str to str)
                  A map of the files that make up the proc. The keys of the map
                  are file names, and the values are the binary contents of the
                  files. The file names may include subdirectory names (e.g.
                  'subdir/file') but must not resolve to a directory above the
                  root for the proc.  Default value is an empty dict ( {} ).
            command (str)
                  The command (excluding arguments) that will be invoked when
                  the proc is executed. It will be invoked from the directory
                  containing the proc input parameter *files* and may be any
                  command that can be resolved from that directory. It need not
                  refer to a file actually in that directory; for example, it
                  could be 'java' if the proc is a Java application; however,
                  any necessary external programs must be preinstalled on every
                  database node. If the command refers to a file in that
                  directory, it must be preceded with './' as per Linux
                  convention. If not specified, and exactly one file is
                  provided in input parameter *files*, that file will be
                  invoked.  Default value is ''.
            args (list of str)
                  An array of command-line arguments that will be passed to
                  input parameter *command* when the proc is executed.  The
                  user can provide a single element (which will be
                  automatically promoted to a list internally) or a list.
                  Default value is an empty list ( [] ).
            options (dict of str to str)
                  Optional parameters.  Default value is an empty dict ( {} ).
        Returns:
            A dict with the following entries--
            proc_name (str)
                Value of input parameter *proc_name*.
        """
        assert isinstance( proc_name, (basestring)), "create_proc(): Argument 'proc_name' must be (one) of type(s) '(basestring)'; given %s" % type( proc_name ).__name__
        assert isinstance( execution_mode, (basestring)), "create_proc(): Argument 'execution_mode' must be (one) of type(s) '(basestring)'; given %s" % type( execution_mode ).__name__
        assert isinstance( files, (dict)), "create_proc(): Argument 'files' must be (one) of type(s) '(dict)'; given %s" % type( files ).__name__
        assert isinstance( command, (basestring)), "create_proc(): Argument 'command' must be (one) of type(s) '(basestring)'; given %s" % type( command ).__name__
        args = args if isinstance( args, list ) else ( [] if (args is None) else [ args ] )
        assert isinstance( options, (dict)), "create_proc(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "create_proc" )
        obj = collections.OrderedDict()
        obj['proc_name'] = proc_name
        obj['execution_mode'] = execution_mode
        obj['files'] = self.__sanitize_dicts( files )
        obj['command'] = command
        obj['args'] = args
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/create/proc' ) ) 
    # end create_proc
    # begin create_projection
[docs]    def create_projection( self, table_name = None, projection_name = None,
                           column_names = None, options = {} ):
        """Creates a new `projection <../../../concepts/projections.html>`_ of an
        existing table. A projection represents a subset of the columns
        (potentially including derived columns) of a table.
        Notes:
        A moving average can be calculated on a given column using the
        following syntax in the input parameter *column_names* parameter:
        'moving_average(column_name,num_points_before,num_points_after) as
        new_column_name'
        For each record in the moving_average function's 'column_name'
        parameter, it computes the average over the previous
        'num_points_before' records and the subsequent 'num_points_after'
        records.
        Note that moving average relies on *order_by*, and *order_by* requires
        that all the data being ordered resides on the same processing node, so
        it won't make sense to use *order_by* without moving average.
        Also, a projection can be created with a different `shard key
        <../../../concepts/tables.html#shard-keys>`_ than the source table.  By
        specifying *shard_key*, the projection will be sharded according to the
        specified columns, regardless of how the source table is sharded.  The
        source table can even be unsharded or replicated.
        Parameters:
            table_name (str)
                Name of the existing table on which the projection is to be
                applied.
            projection_name (str)
                Name of the projection to be created. Has the same naming
                restrictions as `tables <../../../concepts/tables.html>`_.
            column_names (list of str)
                List of columns from input parameter *table_name* to be
                included in the projection. Can include derived columns. Can be
                specified as aliased via the syntax 'column_name as alias'.
                The user can provide a single element (which will be
                automatically promoted to a list internally) or a list.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
                Allowed keys are:
                * **collection_name** --
                  Name of a `collection <../../../concepts/collections.html>`_
                  to which the projection is to be assigned as a child. If the
                  collection provided is non-existent, the collection will be
                  automatically created. If empty, then the projection will be
                  at the top level.
                * **expression** --
                  An optional filter `expression
                  <../../../concepts/expressions.html>`_ to be applied to the
                  source table prior to the projection.
                * **limit** --
                  The number of records to keep.
                * **order_by** --
                  Comma-separated list of the columns to be sorted by; e.g.
                  'timestamp asc, x desc'.  The columns specified must be
                  present in input parameter *column_names*.  If any alias is
                  given for any column name, the alias must be used, rather
                  than the original column name.
                * **materialize_on_gpu** --
                  If *true* then the columns of the projection will be cached
                  on the GPU.
                  Allowed values are:
                  * true
                  * false
                  The default value is 'false'.
                * **chunk_size** --
                  Indicates the chunk size to be used for this table.
                * **ttl** --
                  Sets the `TTL <../../../concepts/ttl.html>`_ of the
                  projection specified in input parameter *projection_name*.
                * **shard_key** --
                  Comma-separated list of the columns to be sharded on; e.g.
                  'column1, column2'.  The columns specified must be present in
                  input parameter *column_names*.  If any alias is given for
                  any column name, the alias must be used, rather than the
                  original column name.
                * **persist** --
                  If *true*, then the projection specified in input parameter
                  *projection_name* will be persisted and will not expire
                  unless a *ttl* is specified.   If *false*, then the
                  projection will be an in-memory table and will expire unless
                  a *ttl* is specified otherwise.
                  Allowed values are:
                  * true
                  * false
                  The default value is 'false'.
        Returns:
            A dict with the following entries--
            projection_name (str)
                Value of input parameter *projection_name*.
        """
        assert isinstance( table_name, (basestring)), "create_projection(): Argument 'table_name' must be (one) of type(s) '(basestring)'; given %s" % type( table_name ).__name__
        assert isinstance( projection_name, (basestring)), "create_projection(): Argument 'projection_name' must be (one) of type(s) '(basestring)'; given %s" % type( projection_name ).__name__
        column_names = column_names if isinstance( column_names, list ) else ( [] if (column_names is None) else [ column_names ] )
        assert isinstance( options, (dict)), "create_projection(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "create_projection" )
        obj = collections.OrderedDict()
        obj['table_name'] = table_name
        obj['projection_name'] = projection_name
        obj['column_names'] = column_names
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/create/projection' ) ) 
    # end create_projection
    # begin create_role
[docs]    def create_role( self, name = None, options = None ):
        """Creates a new role.
        Parameters:
            name (str)
                Name of the role to be created. Must contain only lowercase
                letters, digits, and underscores, and cannot begin with a
                digit. Must not be the same name as an existing user or role.
            options (dict of str to str)
                Optional parameters.
        Returns:
            A dict with the following entries--
            name (str)
                Value of input parameter *name*.
        """
        assert isinstance( name, (basestring)), "create_role(): Argument 'name' must be (one) of type(s) '(basestring)'; given %s" % type( name ).__name__
        assert isinstance( options, (dict)), "create_role(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "create_role" )
        obj = collections.OrderedDict()
        obj['name'] = name
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/create/role' ) ) 
    # end create_role
    # begin create_table
[docs]    def create_table( self, table_name = None, type_id = None, options = {} ):
        """Creates a new table or collection. If a new table is being created, the
        type of the table is given by input parameter *type_id*, which must the
        be the ID of a currently registered type (i.e. one created via
        :meth:`.create_type`). The table will be created inside a collection if
        the option *collection_name* is specified. If that collection does not
        already exist, it will be created.
        To create a new collection, specify the name of the collection in input
        parameter *table_name* and set the *is_collection* option to *true*;
        input parameter *type_id* will be ignored.
        Parameters:
            table_name (str)
                Name of the table to be created. Error for requests with
                existing table of the same name and type id may be suppressed
                by using the *no_error_if_exists* option.  See `Tables
                <../../../concepts/tables.html>`_ for naming restrictions.
            type_id (str)
                ID of a currently registered type. All objects added to the
                newly created table will be of this type.  Ignored if
                *is_collection* is *true*.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
                Allowed keys are:
                * **no_error_if_exists** --
                  If *true*, prevents an error from occurring if the table
                  already exists and is of the given type.  If a table with the
                  same ID but a different type exists, it is still an error.
                  Allowed values are:
                  * true
                  * false
                  The default value is 'false'.
                * **collection_name** --
                  Name of a collection which is to contain the newly created
                  table. If the collection provided is non-existent, the
                  collection will be automatically created. If empty, then the
                  newly created table will be a top-level table.
                * **is_collection** --
                  Indicates whether the new table to be created will be a
                  collection.
                  Allowed values are:
                  * true
                  * false
                  The default value is 'false'.
                * **disallow_homogeneous_tables** --
                  For a collection, indicates whether the collection prohibits
                  containment of multiple tables of exactly the same data type.
                  Allowed values are:
                  * true
                  * false
                  The default value is 'false'.
                * **is_replicated** --
                  For a table, indicates the `distribution scheme
                  <../../../concepts/tables.html#distribution>`_ for the
                  table's data.  If true, the table will be `replicated
                  <../../../concepts/tables.html#replication>`_.  If false, the
                  table will be `sharded
                  <../../../concepts/tables.html#sharding>`_ according to the
                  `shard key <../../../concepts/tables.html#shard-keys>`_
                  specified in the given input parameter *type_id*, or
                  `randomly sharded
                  <../../../concepts/tables.html#random-sharding>`_, if no
                  shard key is specified.
                  Allowed values are:
                  * true
                  * false
                  The default value is 'false'.
                * **foreign_keys** --
                  Semicolon-separated list of `foreign keys
                  <../../../concepts/tables.html#foreign-keys>`_, of the format
                  'source_column references target_table(primary_key_column) [
                  as <foreign_key_name> ]'.
                * **foreign_shard_key** --
                  Foreign shard key of the format 'source_column references
                  shard_by_column from target_table(primary_key_column)'
                * **ttl** --
                  For a table, sets the `TTL <../../../concepts/ttl.html>`_ of
                  the table specified in input parameter *table_name*.
                * **chunk_size** --
                  Indicates the chunk size to be used for this table.
                * **is_result_table** --
                  For a table, indicates whether the table is an in-memory
                  table. A result table cannot contain store_only, text_search,
                  or string columns (charN columns are acceptable), and it will
                  not be retained if the server is restarted.
                  Allowed values are:
                  * true
                  * false
                  The default value is 'false'.
        Returns:
            A dict with the following entries--
            table_name (str)
                Value of input parameter *table_name*.
            type_id (str)
                Value of input parameter *type_id*.
            is_collection (bool)
                Indicates if the created entity is a collection.
        """
        assert isinstance( table_name, (basestring)), "create_table(): Argument 'table_name' must be (one) of type(s) '(basestring)'; given %s" % type( table_name ).__name__
        assert isinstance( type_id, (basestring)), "create_table(): Argument 'type_id' must be (one) of type(s) '(basestring)'; given %s" % type( type_id ).__name__
        assert isinstance( options, (dict)), "create_table(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "create_table" )
        obj = collections.OrderedDict()
        obj['table_name'] = table_name
        obj['type_id'] = type_id
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/create/table' ) ) 
    # end create_table
    # begin create_table_monitor
[docs]    def create_table_monitor( self, table_name = None, options = {} ):
        """Creates a monitor that watches for new records inserted into a
        particular table (identified by input parameter *table_name*) and
        forwards copies to subscribers via ZMQ. After this call completes,
        subscribe to the returned output parameter *topic_id* on the ZMQ table
        monitor port (default 9002). Each time an insert operation on the table
        completes, a multipart message is published for that topic; the first
        part contains only the topic ID, and each subsequent part contains one
        binary-encoded Avro object that was inserted. The monitor will continue
        to run (regardless of whether or not there are any subscribers) until
        deactivated with :meth:`.clear_table_monitor`.
        Parameters:
            table_name (str)
                Name of the table to monitor. Must not refer to a collection.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
        Returns:
            A dict with the following entries--
            topic_id (str)
                The ZMQ topic ID to subscribe to for inserted records.
            table_name (str)
                Value of input parameter *table_name*.
            type_schema (str)
                JSON Avro schema of the table, for use in decoding published
                records.
        """
        assert isinstance( table_name, (basestring)), "create_table_monitor(): Argument 'table_name' must be (one) of type(s) '(basestring)'; given %s" % type( table_name ).__name__
        assert isinstance( options, (dict)), "create_table_monitor(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "create_table_monitor" )
        obj = collections.OrderedDict()
        obj['table_name'] = table_name
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/create/tablemonitor' ) ) 
    # end create_table_monitor
    # begin create_trigger_by_area
[docs]    def create_trigger_by_area( self, request_id = None, table_names = None,
                                x_column_name = None, x_vector = None,
                                y_column_name = None, y_vector = None, options =
                                {} ):
        """Sets up an area trigger mechanism for two column_names for one or more
        tables. (This function is essentially the two-dimensional version of
        :meth:`.create_trigger_by_range`.) Once the trigger has been activated,
        any record added to the listed tables(s) via :meth:`.insert_records`
        with the chosen columns' values falling within the specified region
        will trip the trigger. All such records will be queued at the trigger
        port (by default '9001' but able to be retrieved via
        :meth:`.show_system_status`) for any listening client to collect.
        Active triggers can be cancelled by using the :meth:`.clear_trigger`
        endpoint or by clearing all relevant tables.
        The output returns the trigger handle as well as indicating success or
        failure of the trigger activation.
        Parameters:
            request_id (str)
                User-created ID for the trigger. The ID can be alphanumeric,
                contain symbols, and must contain at least one character.
            table_names (list of str)
                Names of the tables on which the trigger will be activated and
                maintained.  The user can provide a single element (which will
                be automatically promoted to a list internally) or a list.
            x_column_name (str)
                Name of a numeric column on which the trigger is activated.
                Usually 'x' for geospatial data points.
            x_vector (list of floats)
                The respective coordinate values for the region on which the
                trigger is activated. This usually translates to the
                x-coordinates of a geospatial region.  The user can provide a
                single element (which will be automatically promoted to a list
                internally) or a list.
            y_column_name (str)
                Name of a second numeric column on which the trigger is
                activated. Usually 'y' for geospatial data points.
            y_vector (list of floats)
                The respective coordinate values for the region on which the
                trigger is activated. This usually translates to the
                y-coordinates of a geospatial region. Must be the same length
                as xvals.  The user can provide a single element (which will be
                automatically promoted to a list internally) or a list.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
        Returns:
            A dict with the following entries--
            trigger_id (str)
                Value of input parameter *request_id*.
        """
        assert isinstance( request_id, (basestring)), "create_trigger_by_area(): Argument 'request_id' must be (one) of type(s) '(basestring)'; given %s" % type( request_id ).__name__
        table_names = table_names if isinstance( table_names, list ) else ( [] if (table_names is None) else [ table_names ] )
        assert isinstance( x_column_name, (basestring)), "create_trigger_by_area(): Argument 'x_column_name' must be (one) of type(s) '(basestring)'; given %s" % type( x_column_name ).__name__
        x_vector = x_vector if isinstance( x_vector, list ) else ( [] if (x_vector is None) else [ x_vector ] )
        assert isinstance( y_column_name, (basestring)), "create_trigger_by_area(): Argument 'y_column_name' must be (one) of type(s) '(basestring)'; given %s" % type( y_column_name ).__name__
        y_vector = y_vector if isinstance( y_vector, list ) else ( [] if (y_vector is None) else [ y_vector ] )
        assert isinstance( options, (dict)), "create_trigger_by_area(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "create_trigger_by_area" )
        obj = collections.OrderedDict()
        obj['request_id'] = request_id
        obj['table_names'] = table_names
        obj['x_column_name'] = x_column_name
        obj['x_vector'] = x_vector
        obj['y_column_name'] = y_column_name
        obj['y_vector'] = y_vector
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/create/trigger/byarea' ) ) 
    # end create_trigger_by_area
    # begin create_trigger_by_range
[docs]    def create_trigger_by_range( self, request_id = None, table_names = None,
                                 column_name = None, min = None, max = None,
                                 options = {} ):
        """Sets up a simple range trigger for a column_name for one or more
        tables. Once the trigger has been activated, any record added to the
        listed tables(s) via :meth:`.insert_records` with the chosen
        column_name's value falling within the specified range will trip the
        trigger. All such records will be queued at the trigger port (by
        default '9001' but able to be retrieved via
        :meth:`.show_system_status`) for any listening client to collect.
        Active triggers can be cancelled by using the :meth:`.clear_trigger`
        endpoint or by clearing all relevant tables.
        The output returns the trigger handle as well as indicating success or
        failure of the trigger activation.
        Parameters:
            request_id (str)
                User-created ID for the trigger. The ID can be alphanumeric,
                contain symbols, and must contain at least one character.
            table_names (list of str)
                Tables on which the trigger will be active.  The user can
                provide a single element (which will be automatically promoted
                to a list internally) or a list.
            column_name (str)
                Name of a numeric column_name on which the trigger is
                activated.
            min (float)
                The lower bound (inclusive) for the trigger range.
            max (float)
                The upper bound (inclusive) for the trigger range.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
        Returns:
            A dict with the following entries--
            trigger_id (str)
                Value of input parameter *request_id*.
        """
        assert isinstance( request_id, (basestring)), "create_trigger_by_range(): Argument 'request_id' must be (one) of type(s) '(basestring)'; given %s" % type( request_id ).__name__
        table_names = table_names if isinstance( table_names, list ) else ( [] if (table_names is None) else [ table_names ] )
        assert isinstance( column_name, (basestring)), "create_trigger_by_range(): Argument 'column_name' must be (one) of type(s) '(basestring)'; given %s" % type( column_name ).__name__
        assert isinstance( min, (int, long, float)), "create_trigger_by_range(): Argument 'min' must be (one) of type(s) '(int, long, float)'; given %s" % type( min ).__name__
        assert isinstance( max, (int, long, float)), "create_trigger_by_range(): Argument 'max' must be (one) of type(s) '(int, long, float)'; given %s" % type( max ).__name__
        assert isinstance( options, (dict)), "create_trigger_by_range(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "create_trigger_by_range" )
        obj = collections.OrderedDict()
        obj['request_id'] = request_id
        obj['table_names'] = table_names
        obj['column_name'] = column_name
        obj['min'] = min
        obj['max'] = max
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/create/trigger/byrange' ) ) 
    # end create_trigger_by_range
    # begin create_type
[docs]    def create_type( self, type_definition = None, label = None, properties = {},
                     options = {} ):
        """Creates a new type describing the layout or schema of a table. The type
        definition is a JSON string describing the fields (i.e. columns) of the
        type. Each field consists of a name and a data type. Supported data
        types are: double, float, int, long, string, and bytes. In addition one
        or more properties can be specified for each column which customize the
        memory usage and query availability of that column.  Note that some
        properties are mutually exclusive--i.e. they cannot be specified for
        any given column simultaneously.  One example of mutually exclusive
        properties are *data* and *store_only*.
        To set a *primary key* on one or more columns include the property
        'primary_key' on the desired column_names. If a primary key is
        specified, then a uniqueness constraint is enforced, in that only a
        single object can exist with a given primary key. When :meth:`inserting
        <.insert_records>` data into a table with a primary key, depending on
        the parameters in the request, incoming objects with primary keys that
        match existing objects will either overwrite (i.e. update) the existing
        object or will be skipped and not added into the set.
        Example of a type definition with some of the parameters::
                {"type":"record",
                "name":"point",
                "fields":[{"name":"msg_id","type":"string"},
                                {"name":"x","type":"double"},
                                {"name":"y","type":"double"},
                                {"name":"TIMESTAMP","type":"double"},
                                {"name":"source","type":"string"},
                                {"name":"group_id","type":"string"},
                                {"name":"OBJECT_ID","type":"string"}]
                }
        Properties::
                {"group_id":["store_only"],
                "msg_id":["store_only","text_search"]
                }
        Parameters:
            type_definition (str)
                a JSON string describing the columns of the type to be
                registered.
            label (str)
                A user-defined description string which can be used to
                differentiate between tables and types with otherwise identical
                schemas.
            properties (dict of str to lists of str)
                Each key-value pair specifies the properties to use for a given
                column where the key is the column name.  All keys used must be
                relevant column names for the given table.  Specifying any
                property overrides the default properties for that column
                (which is based on the column's data type).  Default value is
                an empty dict ( {} ).
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
        Returns:
            A dict with the following entries--
            type_id (str)
                An identifier representing the created type. This type_id can
                be used in subsequent calls to :meth:`create a table
                <.create_table>`
            type_definition (str)
                Value of input parameter *type_definition*.
            label (str)
                Value of input parameter *label*.
            properties (dict of str to lists of str)
                Value of input parameter *properties*.
        """
        assert isinstance( type_definition, (basestring)), "create_type(): Argument 'type_definition' must be (one) of type(s) '(basestring)'; given %s" % type( type_definition ).__name__
        assert isinstance( label, (basestring)), "create_type(): Argument 'label' must be (one) of type(s) '(basestring)'; given %s" % type( label ).__name__
        assert isinstance( properties, (dict)), "create_type(): Argument 'properties' must be (one) of type(s) '(dict)'; given %s" % type( properties ).__name__
        assert isinstance( options, (dict)), "create_type(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "create_type" )
        obj = collections.OrderedDict()
        obj['type_definition'] = type_definition
        obj['label'] = label
        obj['properties'] = self.__sanitize_dicts( properties )
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/create/type' ) ) 
    # end create_type
    # begin create_union
[docs]    def create_union( self, table_name = None, table_names = None,
                      input_column_names = None, output_column_names = None,
                      options = {} ):
        """Performs a `union <../../../concepts/unions.html>`_ (concatenation) of
        one or more existing tables or views, the results of which are stored
        in a new table. It is equivalent to the SQL UNION ALL operator.
        Non-charN 'string' and 'bytes' column types cannot be included in a
        union, neither can columns with the property 'store_only'. Though not
        explicitly unions, `intersect <../../../concepts/intersect.html>`_ and
        `except <../../../concepts/except.html>`_ are also available from this
        endpoint.
        Parameters:
            table_name (str)
                Name of the table to be created. Has the same naming
                restrictions as `tables <../../../concepts/tables.html>`_.
            table_names (list of str)
                The list of table names making up the union. Must contain the
                names of one or more existing tables.  The user can provide a
                single element (which will be automatically promoted to a list
                internally) or a list.
            input_column_names (list of lists of str)
                The list of columns from each of the corresponding input
                tables.  The user can provide a single element (which will be
                automatically promoted to a list internally) or a list.
            output_column_names (list of str)
                The list of names of the columns to be stored in the union.
                The user can provide a single element (which will be
                automatically promoted to a list internally) or a list.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
                Allowed keys are:
                * **collection_name** --
                  Name of a collection which is to contain the union. If the
                  collection provided is non-existent, the collection will be
                  automatically created. If empty, then the union will be a
                  top-level table.
                * **materialize_on_gpu** --
                  If 'true' then the columns of the union will be cached on the
                  GPU.
                  Allowed values are:
                  * true
                  * false
                  The default value is 'false'.
                * **mode** --
                  If 'merge_views' then this operation will merge (i.e. union)
                  the provided views. All 'table_names' must be views from the
                  same underlying base table.
                  Allowed values are:
                  * **union_all** --
                    Retains all rows from the specified tables.
                  * **union** --
                    Retains all unique rows from the specified tables (synonym
                    for 'union_distinct').
                  * **union_distinct** --
                    Retains all unique rows from the specified tables.
                  * **except** --
                    Retains all unique rows from the first table that do not
                    appear in the second table (only works on 2 tables).
                  * **intersect** --
                    Retains all unique rows that appear in both of the
                    specified tables (only works on 2 tables).
                  * **merge_views** --
                    Merge two or more views (or views of views) of the same
                    base data set into a new view. If this mode is selected
                    input parameter *input_column_names* AND input parameter
                    *output_column_names* must be empty. The resulting view
                    would match the results of a SQL OR operation, e.g., if
                    filter 1 creates a view using the expression 'x = 10' and
                    filter 2 creates a view using the expression 'x <= 10',
                    then the merge views operation creates a new view using the
                    expression 'x = 10 OR x <= 10'.
                    The default value is 'union_all'.
                * **chunk_size** --
                  Indicates the chunk size to be used for this table.
                * **ttl** --
                  Sets the `TTL <../../../concepts/ttl.html>`_ of the table
                  specified in input parameter *table_name*.
                * **persist** --
                  If *true*, then the union specified in input parameter
                  *table_name* will be persisted and will not expire unless a
                  *ttl* is specified.   If *false*, then the union will be an
                  in-memory table and will expire unless a *ttl* is specified
                  otherwise.
                  Allowed values are:
                  * true
                  * false
                  The default value is 'false'.
        Returns:
            A dict with the following entries--
            table_name (str)
                Value of input parameter *table_name*.
        """
        assert isinstance( table_name, (basestring)), "create_union(): Argument 'table_name' must be (one) of type(s) '(basestring)'; given %s" % type( table_name ).__name__
        table_names = table_names if isinstance( table_names, list ) else ( [] if (table_names is None) else [ table_names ] )
        input_column_names = input_column_names if isinstance( input_column_names, list ) else ( [] if (input_column_names is None) else [ input_column_names ] )
        output_column_names = output_column_names if isinstance( output_column_names, list ) else ( [] if (output_column_names is None) else [ output_column_names ] )
        assert isinstance( options, (dict)), "create_union(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "create_union" )
        obj = collections.OrderedDict()
        obj['table_name'] = table_name
        obj['table_names'] = table_names
        obj['input_column_names'] = input_column_names
        obj['output_column_names'] = output_column_names
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/create/union' ) ) 
    # end create_union
    # begin create_user_external
[docs]    def create_user_external( self, name = None, options = None ):
        """Creates a new external user (a user whose credentials are managed by an
        external LDAP).
        Parameters:
            name (str)
                Name of the user to be created. Must exactly match the user's
                name in the external LDAP, prefixed with a @. Must not be the
                same name as an existing user.
            options (dict of str to str)
                Optional parameters.
        Returns:
            A dict with the following entries--
            name (str)
                Value of input parameter *name*.
        """
        assert isinstance( name, (basestring)), "create_user_external(): Argument 'name' must be (one) of type(s) '(basestring)'; given %s" % type( name ).__name__
        assert isinstance( options, (dict)), "create_user_external(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "create_user_external" )
        obj = collections.OrderedDict()
        obj['name'] = name
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/create/user/external' ) ) 
    # end create_user_external
    # begin create_user_internal
[docs]    def create_user_internal( self, name = None, password = None, options = None ):
        """Creates a new internal user (a user whose credentials are managed by
        the database system).
        Parameters:
            name (str)
                Name of the user to be created. Must contain only lowercase
                letters, digits, and underscores, and cannot begin with a
                digit. Must not be the same name as an existing user or role.
            password (str)
                Initial password of the user to be created. May be an empty
                string for no password.
            options (dict of str to str)
                Optional parameters.
        Returns:
            A dict with the following entries--
            name (str)
                Value of input parameter *name*.
        """
        assert isinstance( name, (basestring)), "create_user_internal(): Argument 'name' must be (one) of type(s) '(basestring)'; given %s" % type( name ).__name__
        assert isinstance( password, (basestring)), "create_user_internal(): Argument 'password' must be (one) of type(s) '(basestring)'; given %s" % type( password ).__name__
        assert isinstance( options, (dict)), "create_user_internal(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "create_user_internal" )
        obj = collections.OrderedDict()
        obj['name'] = name
        obj['password'] = password
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/create/user/internal' ) ) 
    # end create_user_internal
    # begin delete_proc
[docs]    def delete_proc( self, proc_name = None, options = {} ):
        """Deletes a proc. Any currently running instances of the proc will be
        killed.
        Parameters:
            proc_name (str)
                Name of the proc to be deleted. Must be the name of a currently
                existing proc.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
        Returns:
            A dict with the following entries--
            proc_name (str)
                Value of input parameter *proc_name*.
        """
        assert isinstance( proc_name, (basestring)), "delete_proc(): Argument 'proc_name' must be (one) of type(s) '(basestring)'; given %s" % type( proc_name ).__name__
        assert isinstance( options, (dict)), "delete_proc(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "delete_proc" )
        obj = collections.OrderedDict()
        obj['proc_name'] = proc_name
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/delete/proc' ) ) 
    # end delete_proc
    # begin delete_records
[docs]    def delete_records( self, table_name = None, expressions = None, options = {} ):
        """Deletes record(s) matching the provided criteria from the given table.
        The record selection criteria can either be one or more  input
        parameter *expressions* (matching multiple records) or a single record
        identified by *record_id* options.  Note that the two selection
        criteria are mutually exclusive.  This operation cannot be run on a
        collection or a view.  The operation is synchronous meaning that a
        response will not be available until the request is completely
        processed and all the matching records are deleted.
        Parameters:
            table_name (str)
                Name of the table from which to delete records. The set must be
                a currently existing table and not a collection or a view.
            expressions (list of str)
                A list of the actual predicates, one for each select; format
                should follow the guidelines provided `here
                <../../../concepts/expressions.html>`_. Specifying one or more
                input parameter *expressions* is mutually exclusive to
                specifying *record_id* in the input parameter *options*.  The
                user can provide a single element (which will be automatically
                promoted to a list internally) or a list.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
                Allowed keys are:
                * **global_expression** --
                  An optional global expression to reduce the search space of
                  the input parameter *expressions*.
                * **record_id** --
                  A record id identifying a single record, obtained at the time
                  of :meth:`insertion of the record <.insert_records>` or by
                  calling :meth:`.get_records_from_collection` with the
                  *return_record_ids* option.
        Returns:
            A dict with the following entries--
            count_deleted (long)
                Total number of records deleted across all expressions.
            counts_deleted (list of longs)
                Total number of records deleted per expression.
        """
        assert isinstance( table_name, (basestring)), "delete_records(): Argument 'table_name' must be (one) of type(s) '(basestring)'; given %s" % type( table_name ).__name__
        expressions = expressions if isinstance( expressions, list ) else ( [] if (expressions is None) else [ expressions ] )
        assert isinstance( options, (dict)), "delete_records(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "delete_records" )
        obj = collections.OrderedDict()
        obj['table_name'] = table_name
        obj['expressions'] = expressions
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/delete/records' ) ) 
    # end delete_records
    # begin delete_role
[docs]    def delete_role( self, name = None, options = None ):
        """Deletes an existing role.
        Parameters:
            name (str)
                Name of the role to be deleted. Must be an existing role.
            options (dict of str to str)
                Optional parameters.
        Returns:
            A dict with the following entries--
            name (str)
                Value of input parameter *name*.
        """
        assert isinstance( name, (basestring)), "delete_role(): Argument 'name' must be (one) of type(s) '(basestring)'; given %s" % type( name ).__name__
        assert isinstance( options, (dict)), "delete_role(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "delete_role" )
        obj = collections.OrderedDict()
        obj['name'] = name
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/delete/role' ) ) 
    # end delete_role
    # begin delete_user
[docs]    def delete_user( self, name = None, options = None ):
        """Deletes an existing user.
        Parameters:
            name (str)
                Name of the user to be deleted. Must be an existing user.
            options (dict of str to str)
                Optional parameters.
        Returns:
            A dict with the following entries--
            name (str)
                Value of input parameter *name*.
        """
        assert isinstance( name, (basestring)), "delete_user(): Argument 'name' must be (one) of type(s) '(basestring)'; given %s" % type( name ).__name__
        assert isinstance( options, (dict)), "delete_user(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "delete_user" )
        obj = collections.OrderedDict()
        obj['name'] = name
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/delete/user' ) ) 
    # end delete_user
    # begin execute_proc
[docs]    def execute_proc( self, proc_name = None, params = {}, bin_params = {},
                      input_table_names = [], input_column_names = {},
                      output_table_names = [], options = {} ):
        """Executes a proc. This endpoint is asynchronous and does not wait for
        the proc to complete before returning.
        Parameters:
            proc_name (str)
                Name of the proc to execute. Must be the name of a currently
                existing proc.
            params (dict of str to str)
                A map containing named parameters to pass to the proc. Each
                key/value pair specifies the name of a parameter and its value.
                Default value is an empty dict ( {} ).
            bin_params (dict of str to str)
                A map containing named binary parameters to pass to the proc.
                Each key/value pair specifies the name of a parameter and its
                value.  Default value is an empty dict ( {} ).
            input_table_names (list of str)
                Names of the tables containing data to be passed to the proc.
                Each name specified must be the name of a currently existing
                table. If no table names are specified, no data will be passed
                to the proc.  The user can provide a single element (which will
                be automatically promoted to a list internally) or a list.
                Default value is an empty list ( [] ).
            input_column_names (dict of str to lists of str)
                Map of table names from input parameter *input_table_names* to
                lists of names of columns from those tables that will be passed
                to the proc. Each column name specified must be the name of an
                existing column in the corresponding table. If a table name
                from input parameter *input_table_names* is not included, all
                columns from that table will be passed to the proc.  Default
                value is an empty dict ( {} ).
            output_table_names (list of str)
                Names of the tables to which output data from the proc will be
                written. If a specified table does not exist, it will
                automatically be created with the same schema as the
                corresponding table (by order) from input parameter
                *input_table_names*, excluding any primary and shard keys. If a
                specified table is a non-persistent result table, it must not
                have primary or shard keys. If no table names are specified, no
                output data can be returned from the proc.  The user can
                provide a single element (which will be automatically promoted
                to a list internally) or a list.  Default value is an empty
                list ( [] ).
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
                Allowed keys are:
                * **cache_input** --
                  A comma-delimited list of table names from input parameter
                  *input_table_names* from which input data will be cached for
                  use in subsequent calls to :meth:`.execute_proc` with the
                  *use_cached_input* option. Cached input data will be retained
                  until the proc status is cleared with the
                  :meth:`clear_complete <.show_proc_status>` option of
                  :meth:`.show_proc_status` and all proc instances using the
                  cached data have completed.
                * **use_cached_input** --
                  A comma-delimited list of run IDs (as returned from prior
                  calls to :meth:`.execute_proc`) of running or completed proc
                  instances from which input data cached using the
                  *cache_input* option will be used. Cached input data will not
                  be used for any tables specified in input parameter
                  *input_table_names*, but data from all other tables cached
                  for the specified run IDs will be passed to the proc. If the
                  same table was cached for multiple specified run IDs, the
                  cached data from the first run ID specified in the list that
                  includes that table will be used.
        Returns:
            A dict with the following entries--
            run_id (str)
                The run ID of the running proc instance. This may be passed to
                :meth:`.show_proc_status` to obtain status information, or
                :meth:`.kill_proc` to kill the proc instance.
        """
        assert isinstance( proc_name, (basestring)), "execute_proc(): Argument 'proc_name' must be (one) of type(s) '(basestring)'; given %s" % type( proc_name ).__name__
        assert isinstance( params, (dict)), "execute_proc(): Argument 'params' must be (one) of type(s) '(dict)'; given %s" % type( params ).__name__
        assert isinstance( bin_params, (dict)), "execute_proc(): Argument 'bin_params' must be (one) of type(s) '(dict)'; given %s" % type( bin_params ).__name__
        input_table_names = input_table_names if isinstance( input_table_names, list ) else ( [] if (input_table_names is None) else [ input_table_names ] )
        assert isinstance( input_column_names, (dict)), "execute_proc(): Argument 'input_column_names' must be (one) of type(s) '(dict)'; given %s" % type( input_column_names ).__name__
        output_table_names = output_table_names if isinstance( output_table_names, list ) else ( [] if (output_table_names is None) else [ output_table_names ] )
        assert isinstance( options, (dict)), "execute_proc(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "execute_proc" )
        obj = collections.OrderedDict()
        obj['proc_name'] = proc_name
        obj['params'] = self.__sanitize_dicts( params )
        obj['bin_params'] = self.__sanitize_dicts( bin_params )
        obj['input_table_names'] = input_table_names
        obj['input_column_names'] = self.__sanitize_dicts( input_column_names )
        obj['output_table_names'] = output_table_names
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/execute/proc' ) ) 
    # end execute_proc
    # begin filter
[docs]    def filter( self, table_name = None, view_name = '', expression = None, options
                = {} ):
        """Filters data based on the specified expression.  The results are stored
        in a `result set <../../../concepts/filtered_views.html>`_ with the
        given input parameter *view_name*.
        For details see `Expressions <../../../concepts/expressions.html>`_.
        The response message contains the number of points for which the
        expression evaluated to be true, which is equivalent to the size of the
        result view.
        Parameters:
            table_name (str)
                Name of the table to filter.  This may be the ID of a
                collection, table or a result set (for chaining queries).
                Collections may be filtered only if all tables within the
                collection have the same type ID.
            view_name (str)
                If provided, then this will be the name of the view containing
                the results. Has the same naming restrictions as `tables
                <../../../concepts/tables.html>`_.  Default value is ''.
            expression (str)
                The select expression to filter the specified table.  For
                details see `Expressions
                <../../../concepts/expressions.html>`_.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
                Allowed keys are:
                * **collection_name** --
                  Name of a collection which is to contain the newly created
                  view. If the collection provided is non-existent, the
                  collection will be automatically created. If empty, then the
                  newly created view will be top-level.
                * **ttl** --
                  Sets the `TTL <../../../concepts/ttl.html>`_ of the view
                  specified in input parameter *view_name*.
        Returns:
            A dict with the following entries--
            count (long)
                The number of records that matched the given select expression.
        """
        assert isinstance( table_name, (basestring)), "filter(): Argument 'table_name' must be (one) of type(s) '(basestring)'; given %s" % type( table_name ).__name__
        assert isinstance( view_name, (basestring)), "filter(): Argument 'view_name' must be (one) of type(s) '(basestring)'; given %s" % type( view_name ).__name__
        assert isinstance( expression, (basestring)), "filter(): Argument 'expression' must be (one) of type(s) '(basestring)'; given %s" % type( expression ).__name__
        assert isinstance( options, (dict)), "filter(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "filter" )
        obj = collections.OrderedDict()
        obj['table_name'] = table_name
        obj['view_name'] = view_name
        obj['expression'] = expression
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/filter' ) ) 
    # end filter
    # begin filter_by_area
[docs]    def filter_by_area( self, table_name = None, view_name = '', x_column_name =
                        None, x_vector = None, y_column_name = None, y_vector =
                        None, options = {} ):
        """Calculates which objects from a table are within a named area of
        interest (NAI/polygon). The operation is synchronous, meaning that a
        response will not be returned until all the matching objects are fully
        available. The response payload provides the count of the resulting
        set. A new resultant set (view) which satisfies the input NAI
        restriction specification is created with the name input parameter
        *view_name* passed in as part of the input.
        Parameters:
            table_name (str)
                Name of the table to filter.  This may be the name of a
                collection, a table or a view (when chaining queries).
                Collections may be filtered only if all tables within the
                collection have the same type ID.
            view_name (str)
                If provided, then this will be the name of the view containing
                the results. Has the same naming restrictions as `tables
                <../../../concepts/tables.html>`_.  Default value is ''.
            x_column_name (str)
                Name of the column containing the x values to be filtered.
            x_vector (list of floats)
                List of x coordinates of the vertices of the polygon
                representing the area to be filtered.  The user can provide a
                single element (which will be automatically promoted to a list
                internally) or a list.
            y_column_name (str)
                Name of the column containing the y values to be filtered.
            y_vector (list of floats)
                List of y coordinates of the vertices of the polygon
                representing the area to be filtered.  The user can provide a
                single element (which will be automatically promoted to a list
                internally) or a list.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
        Returns:
            A dict with the following entries--
            count (long)
                The number of records passing the area filter.
        """
        assert isinstance( table_name, (basestring)), "filter_by_area(): Argument 'table_name' must be (one) of type(s) '(basestring)'; given %s" % type( table_name ).__name__
        assert isinstance( view_name, (basestring)), "filter_by_area(): Argument 'view_name' must be (one) of type(s) '(basestring)'; given %s" % type( view_name ).__name__
        assert isinstance( x_column_name, (basestring)), "filter_by_area(): Argument 'x_column_name' must be (one) of type(s) '(basestring)'; given %s" % type( x_column_name ).__name__
        x_vector = x_vector if isinstance( x_vector, list ) else ( [] if (x_vector is None) else [ x_vector ] )
        assert isinstance( y_column_name, (basestring)), "filter_by_area(): Argument 'y_column_name' must be (one) of type(s) '(basestring)'; given %s" % type( y_column_name ).__name__
        y_vector = y_vector if isinstance( y_vector, list ) else ( [] if (y_vector is None) else [ y_vector ] )
        assert isinstance( options, (dict)), "filter_by_area(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "filter_by_area" )
        obj = collections.OrderedDict()
        obj['table_name'] = table_name
        obj['view_name'] = view_name
        obj['x_column_name'] = x_column_name
        obj['x_vector'] = x_vector
        obj['y_column_name'] = y_column_name
        obj['y_vector'] = y_vector
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/filter/byarea' ) ) 
    # end filter_by_area
    # begin filter_by_area_geometry
[docs]    def filter_by_area_geometry( self, table_name = None, view_name = '',
                                 column_name = None, x_vector = None, y_vector =
                                 None, options = {} ):
        """Calculates which geospatial geometry objects from a table intersect a
        named area of interest (NAI/polygon). The operation is synchronous,
        meaning that a response will not be returned until all the matching
        objects are fully available. The response payload provides the count of
        the resulting set. A new resultant set (view) which satisfies the input
        NAI restriction specification is created with the name input parameter
        *view_name* passed in as part of the input.
        Parameters:
            table_name (str)
                Name of the table to filter.  This may be the name of a
                collection, a table or a view (when chaining queries).
                Collections may be filtered only if all tables within the
                collection have the same type ID.
            view_name (str)
                If provided, then this will be the name of the view containing
                the results. Must not be an already existing collection, table
                or view.  Default value is ''.
            column_name (str)
                Name of the geospatial geometry column to be filtered.
            x_vector (list of floats)
                List of x coordinates of the vertices of the polygon
                representing the area to be filtered.  The user can provide a
                single element (which will be automatically promoted to a list
                internally) or a list.
            y_vector (list of floats)
                List of y coordinates of the vertices of the polygon
                representing the area to be filtered.  The user can provide a
                single element (which will be automatically promoted to a list
                internally) or a list.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
        Returns:
            A dict with the following entries--
            count (long)
                The number of records passing the area filter.
        """
        assert isinstance( table_name, (basestring)), "filter_by_area_geometry(): Argument 'table_name' must be (one) of type(s) '(basestring)'; given %s" % type( table_name ).__name__
        assert isinstance( view_name, (basestring)), "filter_by_area_geometry(): Argument 'view_name' must be (one) of type(s) '(basestring)'; given %s" % type( view_name ).__name__
        assert isinstance( column_name, (basestring)), "filter_by_area_geometry(): Argument 'column_name' must be (one) of type(s) '(basestring)'; given %s" % type( column_name ).__name__
        x_vector = x_vector if isinstance( x_vector, list ) else ( [] if (x_vector is None) else [ x_vector ] )
        y_vector = y_vector if isinstance( y_vector, list ) else ( [] if (y_vector is None) else [ y_vector ] )
        assert isinstance( options, (dict)), "filter_by_area_geometry(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "filter_by_area_geometry" )
        obj = collections.OrderedDict()
        obj['table_name'] = table_name
        obj['view_name'] = view_name
        obj['column_name'] = column_name
        obj['x_vector'] = x_vector
        obj['y_vector'] = y_vector
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/filter/byarea/geometry' ) ) 
    # end filter_by_area_geometry
    # begin filter_by_box
[docs]    def filter_by_box( self, table_name = None, view_name = '', x_column_name =
                       None, min_x = None, max_x = None, y_column_name = None,
                       min_y = None, max_y = None, options = {} ):
        """Calculates how many objects within the given table lie in a rectangular
        box. The operation is synchronous, meaning that a response will not be
        returned until all the objects are fully available. The response
        payload provides the count of the resulting set. A new resultant set
        which satisfies the input NAI restriction specification is also created
        when a input parameter *view_name* is passed in as part of the input
        payload.
        Parameters:
            table_name (str)
                Name of the table on which the bounding box operation will be
                performed. Must be an existing table.
            view_name (str)
                Optional name of the result view that will be created
                containing the results of the query. Has the same naming
                restrictions as `tables <../../../concepts/tables.html>`_.
                Default value is ''.
            x_column_name (str)
                Name of the column on which to perform the bounding box query.
                Must be a valid numeric column.
            min_x (float)
                Lower bound for the column chosen by input parameter
                *x_column_name*.  Must be less than or equal to input parameter
                *max_x*.
            max_x (float)
                Upper bound for input parameter *x_column_name*.  Must be
                greater than or equal to input parameter *min_x*.
            y_column_name (str)
                Name of a column on which to perform the bounding box query.
                Must be a valid numeric column.
            min_y (float)
                Lower bound for input parameter *y_column_name*. Must be less
                than or equal to input parameter *max_y*.
            max_y (float)
                Upper bound for input parameter *y_column_name*. Must be
                greater than or equal to input parameter *min_y*.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
        Returns:
            A dict with the following entries--
            count (long)
                The number of records passing the box filter.
        """
        assert isinstance( table_name, (basestring)), "filter_by_box(): Argument 'table_name' must be (one) of type(s) '(basestring)'; given %s" % type( table_name ).__name__
        assert isinstance( view_name, (basestring)), "filter_by_box(): Argument 'view_name' must be (one) of type(s) '(basestring)'; given %s" % type( view_name ).__name__
        assert isinstance( x_column_name, (basestring)), "filter_by_box(): Argument 'x_column_name' must be (one) of type(s) '(basestring)'; given %s" % type( x_column_name ).__name__
        assert isinstance( min_x, (int, long, float)), "filter_by_box(): Argument 'min_x' must be (one) of type(s) '(int, long, float)'; given %s" % type( min_x ).__name__
        assert isinstance( max_x, (int, long, float)), "filter_by_box(): Argument 'max_x' must be (one) of type(s) '(int, long, float)'; given %s" % type( max_x ).__name__
        assert isinstance( y_column_name, (basestring)), "filter_by_box(): Argument 'y_column_name' must be (one) of type(s) '(basestring)'; given %s" % type( y_column_name ).__name__
        assert isinstance( min_y, (int, long, float)), "filter_by_box(): Argument 'min_y' must be (one) of type(s) '(int, long, float)'; given %s" % type( min_y ).__name__
        assert isinstance( max_y, (int, long, float)), "filter_by_box(): Argument 'max_y' must be (one) of type(s) '(int, long, float)'; given %s" % type( max_y ).__name__
        assert isinstance( options, (dict)), "filter_by_box(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "filter_by_box" )
        obj = collections.OrderedDict()
        obj['table_name'] = table_name
        obj['view_name'] = view_name
        obj['x_column_name'] = x_column_name
        obj['min_x'] = min_x
        obj['max_x'] = max_x
        obj['y_column_name'] = y_column_name
        obj['min_y'] = min_y
        obj['max_y'] = max_y
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/filter/bybox' ) ) 
    # end filter_by_box
    # begin filter_by_box_geometry
[docs]    def filter_by_box_geometry( self, table_name = None, view_name = '', column_name
                                = None, min_x = None, max_x = None, min_y =
                                None, max_y = None, options = {} ):
        """Calculates which geospatial geometry objects from a table intersect a
        rectangular box. The operation is synchronous, meaning that a response
        will not be returned until all the objects are fully available. The
        response payload provides the count of the resulting set. A new
        resultant set which satisfies the input NAI restriction specification
        is also created when a input parameter *view_name* is passed in as part
        of the input payload.
        Parameters:
            table_name (str)
                Name of the table on which the bounding box operation will be
                performed. Must be an existing table.
            view_name (str)
                Optional name of the result view that will be created
                containing the results of the query. Must not be an already
                existing collection, table or view.  Default value is ''.
            column_name (str)
                Name of the geospatial geometry column to be filtered.
            min_x (float)
                Lower bound for the x-coordinate of the rectangular box.  Must
                be less than or equal to input parameter *max_x*.
            max_x (float)
                Upper bound for the x-coordinate of the rectangular box.  Must
                be greater than or equal to input parameter *min_x*.
            min_y (float)
                Lower bound for the y-coordinate of the rectangular box. Must
                be less than or equal to input parameter *max_y*.
            max_y (float)
                Upper bound for the y-coordinate of the rectangular box. Must
                be greater than or equal to input parameter *min_y*.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
        Returns:
            A dict with the following entries--
            count (long)
                The number of records passing the box filter.
        """
        assert isinstance( table_name, (basestring)), "filter_by_box_geometry(): Argument 'table_name' must be (one) of type(s) '(basestring)'; given %s" % type( table_name ).__name__
        assert isinstance( view_name, (basestring)), "filter_by_box_geometry(): Argument 'view_name' must be (one) of type(s) '(basestring)'; given %s" % type( view_name ).__name__
        assert isinstance( column_name, (basestring)), "filter_by_box_geometry(): Argument 'column_name' must be (one) of type(s) '(basestring)'; given %s" % type( column_name ).__name__
        assert isinstance( min_x, (int, long, float)), "filter_by_box_geometry(): Argument 'min_x' must be (one) of type(s) '(int, long, float)'; given %s" % type( min_x ).__name__
        assert isinstance( max_x, (int, long, float)), "filter_by_box_geometry(): Argument 'max_x' must be (one) of type(s) '(int, long, float)'; given %s" % type( max_x ).__name__
        assert isinstance( min_y, (int, long, float)), "filter_by_box_geometry(): Argument 'min_y' must be (one) of type(s) '(int, long, float)'; given %s" % type( min_y ).__name__
        assert isinstance( max_y, (int, long, float)), "filter_by_box_geometry(): Argument 'max_y' must be (one) of type(s) '(int, long, float)'; given %s" % type( max_y ).__name__
        assert isinstance( options, (dict)), "filter_by_box_geometry(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "filter_by_box_geometry" )
        obj = collections.OrderedDict()
        obj['table_name'] = table_name
        obj['view_name'] = view_name
        obj['column_name'] = column_name
        obj['min_x'] = min_x
        obj['max_x'] = max_x
        obj['min_y'] = min_y
        obj['max_y'] = max_y
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/filter/bybox/geometry' ) ) 
    # end filter_by_box_geometry
    # begin filter_by_geometry
[docs]    def filter_by_geometry( self, table_name = None, view_name = '', column_name =
                            None, input_wkt = '', operation = None, options = {}
                            ):
        """Applies a geometry filter against a geospatial geometry column in a
        given table, collection or view. The filtering geometry is provided by
        input parameter *input_wkt*.
        Parameters:
            table_name (str)
                Name of the table on which the filter by geometry will be
                performed.  Must be an existing table, collection or view
                containing a geospatial geometry column.
            view_name (str)
                If provided, then this will be the name of the view containing
                the results. Has the same naming restrictions as `tables
                <../../../concepts/tables.html>`_.  Default value is ''.
            column_name (str)
                Name of the column to be used in the filter. Must be a
                geospatial geometry column.
            input_wkt (str)
                A geometry in WKT format that will be used to filter the
                objects in input parameter *table_name*.  Default value is ''.
            operation (str)
                The geometric filtering operation to perform
                Allowed values are:
                * **contains** --
                  Matches records that contain the given WKT in input parameter
                  *input_wkt*, i.e. the given WKT is within the bounds of a
                  record's geometry.
                * **crosses** --
                  Matches records that cross the given WKT.
                * **disjoint** --
                  Matches records that are disjoint from the given WKT.
                * **equals** --
                  Matches records that are the same as the given WKT.
                * **intersects** --
                  Matches records that intersect the given WKT.
                * **overlaps** --
                  Matches records that overlap the given WKT.
                * **touches** --
                  Matches records that touch the given WKT.
                * **within** --
                  Matches records that are within the given WKT.
            options (dict of str to str)
                  Optional parameters.  Default value is an empty dict ( {} ).
        Returns:
            A dict with the following entries--
            count (long)
                The number of records passing the geometry filter.
        """
        assert isinstance( table_name, (basestring)), "filter_by_geometry(): Argument 'table_name' must be (one) of type(s) '(basestring)'; given %s" % type( table_name ).__name__
        assert isinstance( view_name, (basestring)), "filter_by_geometry(): Argument 'view_name' must be (one) of type(s) '(basestring)'; given %s" % type( view_name ).__name__
        assert isinstance( column_name, (basestring)), "filter_by_geometry(): Argument 'column_name' must be (one) of type(s) '(basestring)'; given %s" % type( column_name ).__name__
        assert isinstance( input_wkt, (basestring)), "filter_by_geometry(): Argument 'input_wkt' must be (one) of type(s) '(basestring)'; given %s" % type( input_wkt ).__name__
        assert isinstance( operation, (basestring)), "filter_by_geometry(): Argument 'operation' must be (one) of type(s) '(basestring)'; given %s" % type( operation ).__name__
        assert isinstance( options, (dict)), "filter_by_geometry(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "filter_by_geometry" )
        obj = collections.OrderedDict()
        obj['table_name'] = table_name
        obj['view_name'] = view_name
        obj['column_name'] = column_name
        obj['input_wkt'] = input_wkt
        obj['operation'] = operation
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/filter/bygeometry' ) ) 
    # end filter_by_geometry
    # begin filter_by_list
[docs]    def filter_by_list( self, table_name = None, view_name = '', column_values_map =
                        None, options = {} ):
        """Calculates which records from a table have values in the given list for
        the corresponding column. The operation is synchronous, meaning that a
        response will not be returned until all the objects are fully
        available. The response payload provides the count of the resulting
        set. A new resultant set (view) which satisfies the input filter
        specification is also created if a input parameter *view_name* is
        passed in as part of the request.
        For example, if a type definition has the columns 'x' and 'y', then a
        filter by list query with the column map {"x":["10.1", "2.3"],
        "y":["0.0", "-31.5", "42.0"]} will return the count of all data points
        whose x and y values match both in the respective x- and y-lists, e.g.,
        "x = 10.1 and y = 0.0", "x = 2.3 and y = -31.5", etc. However, a record
        with "x = 10.1 and y = -31.5" or "x = 2.3 and y = 0.0" would not be
        returned because the values in the given lists do not correspond.
        Parameters:
            table_name (str)
                Name of the table to filter.  This may be the ID of a
                collection, table or a result set (for chaining queries).
                Collections may be filtered only if all tables within the
                collection have the same type ID.
            view_name (str)
                If provided, then this will be the name of the view containing
                the results. Has the same naming restrictions as `tables
                <../../../concepts/tables.html>`_.  Default value is ''.
            column_values_map (dict of str to lists of str)
                List of values for the corresponding column in the table
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
                Allowed keys are:
                * **filter_mode** --
                  String indicating the filter mode, either 'in_list' or
                  'not_in_list'.
                  Allowed values are:
                  * **in_list** --
                    The filter will match all items that are in the provided
                    list(s).
                  * **not_in_list** --
                    The filter will match all items that are not in the
                    provided list(s).
                    The default value is 'in_list'.
        Returns:
            A dict with the following entries--
            count (long)
                The number of records passing the list filter.
        """
        assert isinstance( table_name, (basestring)), "filter_by_list(): Argument 'table_name' must be (one) of type(s) '(basestring)'; given %s" % type( table_name ).__name__
        assert isinstance( view_name, (basestring)), "filter_by_list(): Argument 'view_name' must be (one) of type(s) '(basestring)'; given %s" % type( view_name ).__name__
        assert isinstance( column_values_map, (dict)), "filter_by_list(): Argument 'column_values_map' must be (one) of type(s) '(dict)'; given %s" % type( column_values_map ).__name__
        assert isinstance( options, (dict)), "filter_by_list(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "filter_by_list" )
        obj = collections.OrderedDict()
        obj['table_name'] = table_name
        obj['view_name'] = view_name
        obj['column_values_map'] = self.__sanitize_dicts( column_values_map )
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/filter/bylist' ) ) 
    # end filter_by_list
    # begin filter_by_radius
[docs]    def filter_by_radius( self, table_name = None, view_name = '', x_column_name =
                          None, x_center = None, y_column_name = None, y_center
                          = None, radius = None, options = {} ):
        """Calculates which objects from a table lie within a circle with the
        given radius and center point (i.e. circular NAI). The operation is
        synchronous, meaning that a response will not be returned until all the
        objects are fully available. The response payload provides the count of
        the resulting set. A new resultant set (view) which satisfies the input
        circular NAI restriction specification is also created if a input
        parameter *view_name* is passed in as part of the request.
        For track data, all track points that lie within the circle plus one
        point on either side of the circle (if the track goes beyond the
        circle) will be included in the result.
        Parameters:
            table_name (str)
                Name of the table on which the filter by radius operation will
                be performed.  Must be an existing table.
            view_name (str)
                If provided, then this will be the name of the view containing
                the results. Has the same naming restrictions as `tables
                <../../../concepts/tables.html>`_.  Default value is ''.
            x_column_name (str)
                Name of the column to be used for the x-coordinate (the
                longitude) of the center.
            x_center (float)
                Value of the longitude of the center. Must be within [-180.0,
                180.0].  The minimum allowed value is -180. The maximum allowed
                value is 180.
            y_column_name (str)
                Name of the column to be used for the y-coordinate-the
                latitude-of the center.
            y_center (float)
                Value of the latitude of the center. Must be within [-90.0,
                90.0].  The minimum allowed value is -90. The maximum allowed
                value is 90.
            radius (float)
                The radius of the circle within which the search will be
                performed. Must be a non-zero positive value. It is in meters;
                so, for example, a value of '42000' means 42 km.  The minimum
                allowed value is 0. The maximum allowed value is MAX_INT.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
        Returns:
            A dict with the following entries--
            count (long)
                The number of records passing the radius filter.
        """
        assert isinstance( table_name, (basestring)), "filter_by_radius(): Argument 'table_name' must be (one) of type(s) '(basestring)'; given %s" % type( table_name ).__name__
        assert isinstance( view_name, (basestring)), "filter_by_radius(): Argument 'view_name' must be (one) of type(s) '(basestring)'; given %s" % type( view_name ).__name__
        assert isinstance( x_column_name, (basestring)), "filter_by_radius(): Argument 'x_column_name' must be (one) of type(s) '(basestring)'; given %s" % type( x_column_name ).__name__
        assert isinstance( x_center, (int, long, float)), "filter_by_radius(): Argument 'x_center' must be (one) of type(s) '(int, long, float)'; given %s" % type( x_center ).__name__
        assert isinstance( y_column_name, (basestring)), "filter_by_radius(): Argument 'y_column_name' must be (one) of type(s) '(basestring)'; given %s" % type( y_column_name ).__name__
        assert isinstance( y_center, (int, long, float)), "filter_by_radius(): Argument 'y_center' must be (one) of type(s) '(int, long, float)'; given %s" % type( y_center ).__name__
        assert isinstance( radius, (int, long, float)), "filter_by_radius(): Argument 'radius' must be (one) of type(s) '(int, long, float)'; given %s" % type( radius ).__name__
        assert isinstance( options, (dict)), "filter_by_radius(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "filter_by_radius" )
        obj = collections.OrderedDict()
        obj['table_name'] = table_name
        obj['view_name'] = view_name
        obj['x_column_name'] = x_column_name
        obj['x_center'] = x_center
        obj['y_column_name'] = y_column_name
        obj['y_center'] = y_center
        obj['radius'] = radius
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/filter/byradius' ) ) 
    # end filter_by_radius
    # begin filter_by_radius_geometry
[docs]    def filter_by_radius_geometry( self, table_name = None, view_name = '',
                                   column_name = None, x_center = None, y_center
                                   = None, radius = None, options = {} ):
        """Calculates which geospatial geometry objects from a table intersect a
        circle with the given radius and center point (i.e. circular NAI). The
        operation is synchronous, meaning that a response will not be returned
        until all the objects are fully available. The response payload
        provides the count of the resulting set. A new resultant set (view)
        which satisfies the input circular NAI restriction specification is
        also created if a input parameter *view_name* is passed in as part of
        the request.
        Parameters:
            table_name (str)
                Name of the table on which the filter by radius operation will
                be performed.  Must be an existing table.
            view_name (str)
                If provided, then this will be the name of the view containing
                the results. Must not be an already existing collection, table
                or view.  Default value is ''.
            column_name (str)
                Name of the geospatial geometry column to be filtered.
            x_center (float)
                Value of the longitude of the center. Must be within [-180.0,
                180.0].  The minimum allowed value is -180. The maximum allowed
                value is 180.
            y_center (float)
                Value of the latitude of the center. Must be within [-90.0,
                90.0].  The minimum allowed value is -90. The maximum allowed
                value is 90.
            radius (float)
                The radius of the circle within which the search will be
                performed. Must be a non-zero positive value. It is in meters;
                so, for example, a value of '42000' means 42 km.  The minimum
                allowed value is 0. The maximum allowed value is MAX_INT.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
        Returns:
            A dict with the following entries--
            count (long)
                The number of records passing the radius filter.
        """
        assert isinstance( table_name, (basestring)), "filter_by_radius_geometry(): Argument 'table_name' must be (one) of type(s) '(basestring)'; given %s" % type( table_name ).__name__
        assert isinstance( view_name, (basestring)), "filter_by_radius_geometry(): Argument 'view_name' must be (one) of type(s) '(basestring)'; given %s" % type( view_name ).__name__
        assert isinstance( column_name, (basestring)), "filter_by_radius_geometry(): Argument 'column_name' must be (one) of type(s) '(basestring)'; given %s" % type( column_name ).__name__
        assert isinstance( x_center, (int, long, float)), "filter_by_radius_geometry(): Argument 'x_center' must be (one) of type(s) '(int, long, float)'; given %s" % type( x_center ).__name__
        assert isinstance( y_center, (int, long, float)), "filter_by_radius_geometry(): Argument 'y_center' must be (one) of type(s) '(int, long, float)'; given %s" % type( y_center ).__name__
        assert isinstance( radius, (int, long, float)), "filter_by_radius_geometry(): Argument 'radius' must be (one) of type(s) '(int, long, float)'; given %s" % type( radius ).__name__
        assert isinstance( options, (dict)), "filter_by_radius_geometry(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "filter_by_radius_geometry" )
        obj = collections.OrderedDict()
        obj['table_name'] = table_name
        obj['view_name'] = view_name
        obj['column_name'] = column_name
        obj['x_center'] = x_center
        obj['y_center'] = y_center
        obj['radius'] = radius
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/filter/byradius/geometry' ) ) 
    # end filter_by_radius_geometry
    # begin filter_by_range
[docs]    def filter_by_range( self, table_name = None, view_name = '', column_name =
                         None, lower_bound = None, upper_bound = None, options =
                         {} ):
        """Calculates which objects from a table have a column that is within the
        given bounds. An object from the table identified by input parameter
        *table_name* is added to the view input parameter *view_name* if its
        column is within [input parameter *lower_bound*, input parameter
        *upper_bound*] (inclusive). The operation is synchronous. The response
        provides a count of the number of objects which passed the bound
        filter.  Although this functionality can also be accomplished with the
        standard filter function, it is more efficient.
        For track objects, the count reflects how many points fall within the
        given bounds (which may not include all the track points of any given
        track).
        Parameters:
            table_name (str)
                Name of the table on which the filter by range operation will
                be performed.  Must be an existing table.
            view_name (str)
                If provided, then this will be the name of the view containing
                the results. Has the same naming restrictions as `tables
                <../../../concepts/tables.html>`_.  Default value is ''.
            column_name (str)
                Name of a column on which the operation would be applied.
            lower_bound (float)
                Value of the lower bound (inclusive).
            upper_bound (float)
                Value of the upper bound (inclusive).
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
        Returns:
            A dict with the following entries--
            count (long)
                The number of records passing the range filter.
        """
        assert isinstance( table_name, (basestring)), "filter_by_range(): Argument 'table_name' must be (one) of type(s) '(basestring)'; given %s" % type( table_name ).__name__
        assert isinstance( view_name, (basestring)), "filter_by_range(): Argument 'view_name' must be (one) of type(s) '(basestring)'; given %s" % type( view_name ).__name__
        assert isinstance( column_name, (basestring)), "filter_by_range(): Argument 'column_name' must be (one) of type(s) '(basestring)'; given %s" % type( column_name ).__name__
        assert isinstance( lower_bound, (int, long, float)), "filter_by_range(): Argument 'lower_bound' must be (one) of type(s) '(int, long, float)'; given %s" % type( lower_bound ).__name__
        assert isinstance( upper_bound, (int, long, float)), "filter_by_range(): Argument 'upper_bound' must be (one) of type(s) '(int, long, float)'; given %s" % type( upper_bound ).__name__
        assert isinstance( options, (dict)), "filter_by_range(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "filter_by_range" )
        obj = collections.OrderedDict()
        obj['table_name'] = table_name
        obj['view_name'] = view_name
        obj['column_name'] = column_name
        obj['lower_bound'] = lower_bound
        obj['upper_bound'] = upper_bound
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/filter/byrange' ) ) 
    # end filter_by_range
    # begin filter_by_series
[docs]    def filter_by_series( self, table_name = None, view_name = '', track_id = None,
                          target_track_ids = None, options = {} ):
        """Filters objects matching all points of the given track (works only on
        track type data).  It allows users to specify a particular track to
        find all other points in the table that fall within specified
        ranges-spatial and temporal-of all points of the given track.
        Additionally, the user can specify another track to see if the two
        intersect (or go close to each other within the specified ranges). The
        user also has the flexibility of using different metrics for the
        spatial distance calculation: Euclidean (flat geometry) or Great Circle
        (spherical geometry to approximate the Earth's surface distances). The
        filtered points are stored in a newly created result set. The return
        value of the function is the number of points in the resultant set
        (view).
        This operation is synchronous, meaning that a response will not be
        returned until all the objects are fully available.
        Parameters:
            table_name (str)
                Name of the table on which the filter by track operation will
                be performed. Must be a currently existing table with track
                semantic type.
            view_name (str)
                If provided, then this will be the name of the view containing
                the results. Has the same naming restrictions as `tables
                <../../../concepts/tables.html>`_.  Default value is ''.
            track_id (str)
                The ID of the track which will act as the filtering points.
                Must be an existing track within the given table.
            target_track_ids (list of str)
                Up to one track ID to intersect with the "filter" track. If any
                provided, it must be an valid track ID within the given set.
                The user can provide a single element (which will be
                automatically promoted to a list internally) or a list.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
                Allowed keys are:
                * **spatial_radius** --
                  A positive number passed as a string representing the radius
                  of the search area centered around each track point's
                  geospatial coordinates. The value is interpreted in meters.
                  Required parameter.
                * **time_radius** --
                  A positive number passed as a string representing the maximum
                  allowable time difference between the timestamps of a
                  filtered object and the given track's points. The value is
                  interpreted in seconds. Required parameter.
                * **spatial_distance_metric** --
                  A string representing the coordinate system to use for the
                  spatial search criteria. Acceptable values are 'euclidean'
                  and 'great_circle'. Optional parameter; default is
                  'euclidean'.
                  Allowed values are:
                  * euclidean
                  * great_circle
        Returns:
            A dict with the following entries--
            count (long)
                The number of records passing the series filter.
        """
        assert isinstance( table_name, (basestring)), "filter_by_series(): Argument 'table_name' must be (one) of type(s) '(basestring)'; given %s" % type( table_name ).__name__
        assert isinstance( view_name, (basestring)), "filter_by_series(): Argument 'view_name' must be (one) of type(s) '(basestring)'; given %s" % type( view_name ).__name__
        assert isinstance( track_id, (basestring)), "filter_by_series(): Argument 'track_id' must be (one) of type(s) '(basestring)'; given %s" % type( track_id ).__name__
        target_track_ids = target_track_ids if isinstance( target_track_ids, list ) else ( [] if (target_track_ids is None) else [ target_track_ids ] )
        assert isinstance( options, (dict)), "filter_by_series(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "filter_by_series" )
        obj = collections.OrderedDict()
        obj['table_name'] = table_name
        obj['view_name'] = view_name
        obj['track_id'] = track_id
        obj['target_track_ids'] = target_track_ids
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/filter/byseries' ) ) 
    # end filter_by_series
    # begin filter_by_string
[docs]    def filter_by_string( self, table_name = None, view_name = '', expression =
                          None, mode = None, column_names = None, options = {}
                          ):
        """Calculates which objects from a table, collection, or view match a
        string expression for the given string columns. The options
        'case_sensitive' can be used to modify the behavior for all modes
        except 'search'. For 'search' mode details and limitations, see `Full
        Text Search <../../../concepts/full_text_search.html>`_.
        Parameters:
            table_name (str)
                Name of the table on which the filter operation will be
                performed.  Must be an existing table, collection or view.
            view_name (str)
                If provided, then this will be the name of the view containing
                the results. Has the same naming restrictions as `tables
                <../../../concepts/tables.html>`_.  Default value is ''.
            expression (str)
                The expression with which to filter the table.
            mode (str)
                The string filtering mode to apply. See below for details.
                Allowed values are:
                * **search** --
                  Full text search query with wildcards and boolean operators.
                  Note that for this mode, no column can be specified in input
                  parameter *column_names*; all string columns of the table
                  that have text search enabled will be searched.
                * **equals** --
                  Exact whole-string match (accelerated).
                * **contains** --
                  Partial substring match (not accelerated).  If the column is
                  a string type (non-charN) and the number of records is too
                  large, it will return 0.
                * **starts_with** --
                  Strings that start with the given expression (not
                  accelerated). If the column is a string type (non-charN) and
                  the number of records is too large, it will return 0.
                * **regex** --
                  Full regular expression search (not accelerated). If the
                  column is a string type (non-charN) and the number of records
                  is too large, it will return 0.
            column_names (list of str)
                  List of columns on which to apply the filter. Ignored for
                  'search' mode.  The user can provide a single element (which
                  will be automatically promoted to a list internally) or a
                  list.
            options (dict of str to str)
                  Optional parameters.  Default value is an empty dict ( {} ).
                  Allowed keys are:
                  * **case_sensitive** --
                    If 'false' then string filtering will ignore case. Does not
                    apply to 'search' mode.
                    Allowed values are:
                    * true
                    * false
                    The default value is 'true'.
        Returns:
            A dict with the following entries--
            count (long)
                The number of records that passed the string filter.
        """
        assert isinstance( table_name, (basestring)), "filter_by_string(): Argument 'table_name' must be (one) of type(s) '(basestring)'; given %s" % type( table_name ).__name__
        assert isinstance( view_name, (basestring)), "filter_by_string(): Argument 'view_name' must be (one) of type(s) '(basestring)'; given %s" % type( view_name ).__name__
        assert isinstance( expression, (basestring)), "filter_by_string(): Argument 'expression' must be (one) of type(s) '(basestring)'; given %s" % type( expression ).__name__
        assert isinstance( mode, (basestring)), "filter_by_string(): Argument 'mode' must be (one) of type(s) '(basestring)'; given %s" % type( mode ).__name__
        column_names = column_names if isinstance( column_names, list ) else ( [] if (column_names is None) else [ column_names ] )
        assert isinstance( options, (dict)), "filter_by_string(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "filter_by_string" )
        obj = collections.OrderedDict()
        obj['table_name'] = table_name
        obj['view_name'] = view_name
        obj['expression'] = expression
        obj['mode'] = mode
        obj['column_names'] = column_names
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/filter/bystring' ) ) 
    # end filter_by_string
    # begin filter_by_table
[docs]    def filter_by_table( self, table_name = None, view_name = '', column_name =
                         None, source_table_name = None,
                         source_table_column_name = None, options = {} ):
        """Filters objects in one table based on objects in another table. The
        user must specify matching column types from the two tables (i.e. the
        target table from which objects will be filtered and the source table
        based on which the filter will be created); the column names need not
        be the same. If a input parameter *view_name* is specified, then the
        filtered objects will then be put in a newly created view. The
        operation is synchronous, meaning that a response will not be returned
        until all objects are fully available in the result view. The return
        value contains the count (i.e. the size) of the resulting view.
        Parameters:
            table_name (str)
                Name of the table whose data will be filtered. Must be an
                existing table.
            view_name (str)
                If provided, then this will be the name of the view containing
                the results. Has the same naming restrictions as `tables
                <../../../concepts/tables.html>`_.  Default value is ''.
            column_name (str)
                Name of the column by whose value the data will be filtered
                from the table designated by input parameter *table_name*.
            source_table_name (str)
                Name of the table whose data will be compared against in the
                table called input parameter *table_name*. Must be an existing
                table.
            source_table_column_name (str)
                Name of the column in the input parameter *source_table_name*
                whose values will be used as the filter for table input
                parameter *table_name*. Must be a geospatial geometry column if
                in 'spatial' mode; otherwise, Must match the type of the input
                parameter *column_name*.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
                Allowed keys are:
                * **filter_mode** --
                  String indicating the filter mode, either *in_table* or
                  *not_in_table*.
                  Allowed values are:
                  * in_table
                  * not_in_table
                  The default value is 'in_table'.
                * **mode** --
                  Mode - should be either *spatial* or *normal*.
                  Allowed values are:
                  * normal
                  * spatial
                  The default value is 'normal'.
                * **buffer** --
                  Buffer size, in meters. Only relevant for *spatial* mode.
                * **buffer_method** --
                  Method used to buffer polygons.  Only relevant for *spatial*
                  mode.
                  Allowed values are:
                  * **geos** --
                    Use geos 1 edge per corner algorithm
                    The default value is 'normal'.
                * **max_partition_size** --
                  Maximum number of points in a partition. Only relevant for
                  *spatial* mode.
                * **max_partition_score** --
                  Maximum number of points * edges in a partition. Only
                  relevant for *spatial* mode.
                * **x_column_name** --
                  Name of column containing x value of point being filtered in
                  *spatial* mode.
                * **y_column_name** --
                  Name of column containing y value of point being filtered in
                  *spatial* mode.
        Returns:
            A dict with the following entries--
            count (long)
                The number of records in input parameter *table_name* that have
                input parameter *column_name* values matching input parameter
                *source_table_column_name* values in input parameter
                *source_table_name*.
        """
        assert isinstance( table_name, (basestring)), "filter_by_table(): Argument 'table_name' must be (one) of type(s) '(basestring)'; given %s" % type( table_name ).__name__
        assert isinstance( view_name, (basestring)), "filter_by_table(): Argument 'view_name' must be (one) of type(s) '(basestring)'; given %s" % type( view_name ).__name__
        assert isinstance( column_name, (basestring)), "filter_by_table(): Argument 'column_name' must be (one) of type(s) '(basestring)'; given %s" % type( column_name ).__name__
        assert isinstance( source_table_name, (basestring)), "filter_by_table(): Argument 'source_table_name' must be (one) of type(s) '(basestring)'; given %s" % type( source_table_name ).__name__
        assert isinstance( source_table_column_name, (basestring)), "filter_by_table(): Argument 'source_table_column_name' must be (one) of type(s) '(basestring)'; given %s" % type( source_table_column_name ).__name__
        assert isinstance( options, (dict)), "filter_by_table(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "filter_by_table" )
        obj = collections.OrderedDict()
        obj['table_name'] = table_name
        obj['view_name'] = view_name
        obj['column_name'] = column_name
        obj['source_table_name'] = source_table_name
        obj['source_table_column_name'] = source_table_column_name
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/filter/bytable' ) ) 
    # end filter_by_table
    # begin filter_by_value
[docs]    def filter_by_value( self, table_name = None, view_name = '', is_string = None,
                         value = 0, value_str = '', column_name = None, options
                         = {} ):
        """Calculates which objects from a table has a particular value for a
        particular column. The input parameters provide a way to specify either
        a String or a Double valued column and a desired value for the column
        on which the filter is performed. The operation is synchronous, meaning
        that a response will not be returned until all the objects are fully
        available. The response payload provides the count of the resulting
        set. A new result view which satisfies the input filter restriction
        specification is also created with a view name passed in as part of the
        input payload.  Although this functionality can also be accomplished
        with the standard filter function, it is more efficient.
        Parameters:
            table_name (str)
                Name of an existing table on which to perform the calculation.
            view_name (str)
                If provided, then this will be the name of the view containing
                the results. Has the same naming restrictions as `tables
                <../../../concepts/tables.html>`_.  Default value is ''.
            is_string (bool)
                Indicates whether the value being searched for is string or
                numeric.
            value (float)
                The value to search for.  Default value is 0.
            value_str (str)
                The string value to search for.  Default value is ''.
            column_name (str)
                Name of a column on which the filter by value would be applied.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
        Returns:
            A dict with the following entries--
            count (long)
                The number of records passing the value filter.
        """
        assert isinstance( table_name, (basestring)), "filter_by_value(): Argument 'table_name' must be (one) of type(s) '(basestring)'; given %s" % type( table_name ).__name__
        assert isinstance( view_name, (basestring)), "filter_by_value(): Argument 'view_name' must be (one) of type(s) '(basestring)'; given %s" % type( view_name ).__name__
        assert isinstance( is_string, (bool)), "filter_by_value(): Argument 'is_string' must be (one) of type(s) '(bool)'; given %s" % type( is_string ).__name__
        assert isinstance( value, (int, long, float)), "filter_by_value(): Argument 'value' must be (one) of type(s) '(int, long, float)'; given %s" % type( value ).__name__
        assert isinstance( value_str, (basestring)), "filter_by_value(): Argument 'value_str' must be (one) of type(s) '(basestring)'; given %s" % type( value_str ).__name__
        assert isinstance( column_name, (basestring)), "filter_by_value(): Argument 'column_name' must be (one) of type(s) '(basestring)'; given %s" % type( column_name ).__name__
        assert isinstance( options, (dict)), "filter_by_value(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "filter_by_value" )
        obj = collections.OrderedDict()
        obj['table_name'] = table_name
        obj['view_name'] = view_name
        obj['is_string'] = is_string
        obj['value'] = value
        obj['value_str'] = value_str
        obj['column_name'] = column_name
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/filter/byvalue' ) ) 
    # end filter_by_value
    # begin get_records
[docs]    def get_records( self, table_name = None, offset = 0, limit = 10000, encoding =
                     'binary', options = {} ):
        """Retrieves records from a given table, optionally filtered by an
        expression and/or sorted by a column. This operation can be performed
        on tables, views, or on homogeneous collections (collections containing
        tables of all the same type). Records can be returned encoded as binary
        or json.
        This operation supports paging through the data via the input parameter
        *offset* and input parameter *limit* parameters. Note that when paging
        through a table, if the table (or the underlying table in case of a
        view) is updated (records are inserted, deleted or modified) the
        records retrieved may differ between calls based on the updates
        applied.
        Parameters:
            table_name (str)
                Name of the table from which the records will be fetched. Must
                be a table, view or homogeneous collection.
            offset (long)
                A positive integer indicating the number of initial results to
                skip (this can be useful for paging through the results).
                Default value is 0. The minimum allowed value is 0. The maximum
                allowed value is MAX_INT.
            limit (long)
                A positive integer indicating the maximum number of results to
                be returned. Or END_OF_SET (-9999) to indicate that the max
                number of results should be returned.  Default value is 10000.
            encoding (str)
                Specifies the encoding for returned records.  Default value is
                'binary'.
                Allowed values are:
                * binary
                * json
                The default value is 'binary'.
            options (dict of str to str)
                Default value is an empty dict ( {} ).
                Allowed keys are:
                * **expression** --
                  Optional filter expression to apply to the table.
                * **fast_index_lookup** --
                  Indicates if indexes should be used to perform the lookup for
                  a given expression if possible. Only applicable if there is
                  no sorting, the expression contains only equivalence
                  comparisons based on existing tables indexes and the range of
                  requested values is from [0 to END_OF_SET].
                  Allowed values are:
                  * true
                  * false
                  The default value is 'true'.
                * **sort_by** --
                  Optional column that the data should be sorted by. Empty by
                  default (i.e. no sorting is applied).
                * **sort_order** --
                  String indicating how the returned values should be sorted -
                  ascending or descending. If sort_order is provided, sort_by
                  has to be provided.
                  Allowed values are:
                  * ascending
                  * descending
                  The default value is 'ascending'.
        Returns:
            A dict with the following entries--
            table_name (str)
                Value of input parameter *table_name*.
            type_name (str)
            type_schema (str)
                Avro schema of output parameter *records_binary* or output
                parameter *records_json*
            records_binary (list of str)
                If the input parameter *encoding* was 'binary', then this list
                contains the binary encoded records retrieved from the set,
                otherwise not populated.
            records_json (list of str)
                If the input parameter *encoding* was 'json', then this list
                contains the JSON encoded records retrieved from the set,
                otherwise not populated.
            total_number_of_records (long)
                Total/Filtered number of records.
            has_more_records (bool)
                Too many records. Returned a partial set.
        """
        assert isinstance( table_name, (basestring)), "get_records(): Argument 'table_name' must be (one) of type(s) '(basestring)'; given %s" % type( table_name ).__name__
        assert isinstance( offset, (int, long, float)), "get_records(): Argument 'offset' must be (one) of type(s) '(int, long, float)'; given %s" % type( offset ).__name__
        assert isinstance( limit, (int, long, float)), "get_records(): Argument 'limit' must be (one) of type(s) '(int, long, float)'; given %s" % type( limit ).__name__
        assert isinstance( encoding, (basestring)), "get_records(): Argument 'encoding' must be (one) of type(s) '(basestring)'; given %s" % type( encoding ).__name__
        assert isinstance( options, (dict)), "get_records(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "get_records" )
        obj = collections.OrderedDict()
        obj['table_name'] = table_name
        obj['offset'] = offset
        obj['limit'] = limit
        obj['encoding'] = encoding
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/get/records' ) ) 
    # end get_records
    # begin get_records_by_column
[docs]    def get_records_by_column( self, table_name = None, column_names = None, offset
                               = None, limit = None, encoding = 'binary',
                               options = {} ):
        """For a given table, retrieves the values from the requested column(s).
        Maps of column name to the array of values as well as the column data
        type are returned. This endpoint supports pagination with the input
        parameter *offset* and input parameter *limit* parameters.
        When using pagination, if the table (or the underlying table in the
        case of a view) is modified (records are inserted, updated, or deleted)
        during a call to the endpoint, the records or values retrieved may
        differ between calls based on the type of the update, e.g., the
        contiguity across pages cannot be relied upon.
        The response is returned as a dynamic schema. For details see: `dynamic
        schemas documentation <../../../api/index.html#dynamic-schemas>`_.
        Parameters:
            table_name (str)
                Name of the table on which this operation will be performed.
                The table cannot be a parent set.
            column_names (list of str)
                The list of column values to retrieve.  The user can provide a
                single element (which will be automatically promoted to a list
                internally) or a list.
            offset (long)
                A positive integer indicating the number of initial results to
                skip (this can be useful for paging through the results).  The
                minimum allowed value is 0. The maximum allowed value is
                MAX_INT.
            limit (long)
                A positive integer indicating the maximum number of results to
                be returned (if not provided the default is 10000), or
                END_OF_SET (-9999) to indicate that the maximum number of
                results allowed by the server should be returned.
            encoding (str)
                Specifies the encoding for returned records; either 'binary' or
                'json'.  Default value is 'binary'.
                Allowed values are:
                * binary
                * json
                The default value is 'binary'.
            options (dict of str to str)
                Default value is an empty dict ( {} ).
                Allowed keys are:
                * **expression** --
                  Optional filter expression to apply to the table.
                * **sort_by** --
                  Optional column that the data should be sorted by. Empty by
                  default (i.e. no sorting is applied).
                * **sort_order** --
                  String indicating how the returned values should be sorted -
                  ascending or descending. If sort_order is provided, sort_by
                  has to be provided.
                  Allowed values are:
                  * ascending
                  * descending
                  The default value is 'ascending'.
                * **order_by** --
                  Comma-separated list of the columns to be sorted by; e.g.
                  'timestamp asc, x desc'.  The columns specified must be
                  present in input parameter *column_names*.  If any alias is
                  given for any column name, the alias must be used, rather
                  than the original column name.
        Returns:
            A dict with the following entries--
            table_name (str)
                The same table name as was passed in the parameter list.
            response_schema_str (str)
                Avro schema of output parameter *binary_encoded_response* or
                output parameter *json_encoded_response*.
            binary_encoded_response (str)
                Avro binary encoded response.
            json_encoded_response (str)
                Avro JSON encoded response.
            total_number_of_records (long)
                Total/Filtered number of records.
            has_more_records (bool)
                Too many records. Returned a partial set.
        """
        assert isinstance( table_name, (basestring)), "get_records_by_column(): Argument 'table_name' must be (one) of type(s) '(basestring)'; given %s" % type( table_name ).__name__
        column_names = column_names if isinstance( column_names, list ) else ( [] if (column_names is None) else [ column_names ] )
        assert isinstance( offset, (int, long, float)), "get_records_by_column(): Argument 'offset' must be (one) of type(s) '(int, long, float)'; given %s" % type( offset ).__name__
        assert isinstance( limit, (int, long, float)), "get_records_by_column(): Argument 'limit' must be (one) of type(s) '(int, long, float)'; given %s" % type( limit ).__name__
        assert isinstance( encoding, (basestring)), "get_records_by_column(): Argument 'encoding' must be (one) of type(s) '(basestring)'; given %s" % type( encoding ).__name__
        assert isinstance( options, (dict)), "get_records_by_column(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "get_records_by_column" )
        obj = collections.OrderedDict()
        obj['table_name'] = table_name
        obj['column_names'] = column_names
        obj['offset'] = offset
        obj['limit'] = limit
        obj['encoding'] = encoding
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/get/records/bycolumn' ) ) 
    # end get_records_by_column
    # begin get_records_by_series
[docs]    def get_records_by_series( self, table_name = None, world_table_name = None,
                               offset = 0, limit = 250, encoding = 'binary',
                               options = {} ):
        """Retrieves the complete series/track records from the given input
        parameter *world_table_name* based on the partial track information
        contained in the input parameter *table_name*.
        This operation supports paging through the data via the input parameter
        *offset* and input parameter *limit* parameters.
        In contrast to :meth:`.get_records` this returns records grouped by
        series/track. So if input parameter *offset* is 0 and input parameter
        *limit* is 5 this operation would return the first 5 series/tracks in
        input parameter *table_name*. Each series/track will be returned sorted
        by their TIMESTAMP column.
        Parameters:
            table_name (str)
                Name of the collection/table/view for which series/tracks will
                be fetched.
            world_table_name (str)
                Name of the table containing the complete series/track
                information to be returned for the tracks present in the input
                parameter *table_name*. Typically this is used when retrieving
                series/tracks from a view (which contains partial
                series/tracks) but the user wants to retrieve the entire
                original series/tracks. Can be blank.
            offset (int)
                A positive integer indicating the number of initial
                series/tracks to skip (useful for paging through the results).
                Default value is 0. The minimum allowed value is 0. The maximum
                allowed value is MAX_INT.
            limit (int)
                A positive integer indicating the maximum number of
                series/tracks to be returned. Or END_OF_SET (-9999) to indicate
                that the max number of results should be returned.  Default
                value is 250.
            encoding (str)
                Specifies the encoding for returned records; either 'binary' or
                'json'.  Default value is 'binary'.
                Allowed values are:
                * binary
                * json
                The default value is 'binary'.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
        Returns:
            A dict with the following entries--
            table_names (list of str)
                The table name (one per series/track) of the returned
                series/tracks.
            type_names (list of str)
                The type IDs (one per series/track) of the returned
                series/tracks. This is useful when input parameter *table_name*
                is a collection and the returned series/tracks belong to tables
                with different types.
            type_schemas (list of str)
                The type schemas (one per series/track) of the returned
                series/tracks.
            list_records_binary (list of lists of str)
                If the encoding parameter of the request was 'binary' then this
                list-of-lists contains the binary encoded records for each
                object (inner list) in each series/track (outer list).
                Otherwise, empty list-of-lists.
            list_records_json (list of lists of str)
                If the encoding parameter of the request was 'json' then this
                list-of-lists contains the json encoded records for each object
                (inner list) in each series/track (outer list). Otherwise,
                empty list-of-lists.
        """
        assert isinstance( table_name, (basestring)), "get_records_by_series(): Argument 'table_name' must be (one) of type(s) '(basestring)'; given %s" % type( table_name ).__name__
        assert isinstance( world_table_name, (basestring)), "get_records_by_series(): Argument 'world_table_name' must be (one) of type(s) '(basestring)'; given %s" % type( world_table_name ).__name__
        assert isinstance( offset, (int, long, float)), "get_records_by_series(): Argument 'offset' must be (one) of type(s) '(int, long, float)'; given %s" % type( offset ).__name__
        assert isinstance( limit, (int, long, float)), "get_records_by_series(): Argument 'limit' must be (one) of type(s) '(int, long, float)'; given %s" % type( limit ).__name__
        assert isinstance( encoding, (basestring)), "get_records_by_series(): Argument 'encoding' must be (one) of type(s) '(basestring)'; given %s" % type( encoding ).__name__
        assert isinstance( options, (dict)), "get_records_by_series(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "get_records_by_series" )
        obj = collections.OrderedDict()
        obj['table_name'] = table_name
        obj['world_table_name'] = world_table_name
        obj['offset'] = offset
        obj['limit'] = limit
        obj['encoding'] = encoding
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/get/records/byseries' ) ) 
    # end get_records_by_series
    # begin get_records_from_collection
[docs]    def get_records_from_collection( self, table_name = None, offset = 0, limit =
                                     10000, encoding = 'binary', options = {} ):
        """Retrieves records from a collection. The operation can optionally
        return the record IDs which can be used in certain queries such as
        :meth:`.delete_records`.
        This operation supports paging through the data via the input parameter
        *offset* and input parameter *limit* parameters.
        Note that when using the Java API, it is not possible to retrieve
        records from join tables using this operation.
        Parameters:
            table_name (str)
                Name of the collection or table from which records are to be
                retrieved. Must be an existing collection or table.
            offset (long)
                A positive integer indicating the number of initial results to
                skip (this can be useful for paging through the results).
                Default value is 0. The minimum allowed value is 0. The maximum
                allowed value is MAX_INT.
            limit (long)
                A positive integer indicating the maximum number of results to
                be returned, or END_OF_SET (-9999) to indicate that the max
                number of results should be returned.  Default value is 10000.
            encoding (str)
                Specifies the encoding for returned records; either 'binary' or
                'json'.  Default value is 'binary'.
                Allowed values are:
                * binary
                * json
                The default value is 'binary'.
            options (dict of str to str)
                Default value is an empty dict ( {} ).
                Allowed keys are:
                * **return_record_ids** --
                  If 'true' then return the internal record ID along with each
                  returned record. Default is 'false'.
                  Allowed values are:
                  * true
                  * false
                  The default value is 'false'.
        Returns:
            A dict with the following entries--
            table_name (str)
                Value of input parameter *table_name*.
            type_names (list of str)
                The type IDs of the corresponding records in output parameter
                *records_binary* or output parameter *records_json*. This is
                useful when input parameter *table_name* is a heterogeneous
                collection (collections containing tables of different types).
            records_binary (list of str)
                If the encoding parameter of the request was 'binary' then this
                list contains the binary encoded records retrieved from the
                table/collection. Otherwise, empty list.
            records_json (list of str)
                If the encoding parameter of the request was 'json', then this
                list contains the JSON encoded records retrieved from the
                table/collection. Otherwise, empty list.
            record_ids (list of str)
                If the 'return_record_ids' option of the request was 'true',
                then this list contains the internal ID for each object.
                Otherwise it will be empty.
        """
        assert isinstance( table_name, (basestring)), "get_records_from_collection(): Argument 'table_name' must be (one) of type(s) '(basestring)'; given %s" % type( table_name ).__name__
        assert isinstance( offset, (int, long, float)), "get_records_from_collection(): Argument 'offset' must be (one) of type(s) '(int, long, float)'; given %s" % type( offset ).__name__
        assert isinstance( limit, (int, long, float)), "get_records_from_collection(): Argument 'limit' must be (one) of type(s) '(int, long, float)'; given %s" % type( limit ).__name__
        assert isinstance( encoding, (basestring)), "get_records_from_collection(): Argument 'encoding' must be (one) of type(s) '(basestring)'; given %s" % type( encoding ).__name__
        assert isinstance( options, (dict)), "get_records_from_collection(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "get_records_from_collection" )
        obj = collections.OrderedDict()
        obj['table_name'] = table_name
        obj['offset'] = offset
        obj['limit'] = limit
        obj['encoding'] = encoding
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/get/records/fromcollection' ) ) 
    # end get_records_from_collection
    # begin grant_permission_system
[docs]    def grant_permission_system( self, name = None, permission = None, options =
                                 None ):
        """Grants a system-level permission to a user or role.
        Parameters:
            name (str)
                Name of the user or role to which the permission will be
                granted. Must be an existing user or role.
            permission (str)
                Permission to grant to the user or role.
                Allowed values are:
                * **system_admin** --
                  Full access to all data and system functions.
                * **system_write** --
                  Read and write access to all tables.
                * **system_read** --
                  Read-only access to all tables.
            options (dict of str to str)
                  Optional parameters.
        Returns:
            A dict with the following entries--
            name (str)
                Value of input parameter *name*.
            permission (str)
                Value of input parameter *permission*.
        """
        assert isinstance( name, (basestring)), "grant_permission_system(): Argument 'name' must be (one) of type(s) '(basestring)'; given %s" % type( name ).__name__
        assert isinstance( permission, (basestring)), "grant_permission_system(): Argument 'permission' must be (one) of type(s) '(basestring)'; given %s" % type( permission ).__name__
        assert isinstance( options, (dict)), "grant_permission_system(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "grant_permission_system" )
        obj = collections.OrderedDict()
        obj['name'] = name
        obj['permission'] = permission
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/grant/permission/system' ) ) 
    # end grant_permission_system
    # begin grant_permission_table
[docs]    def grant_permission_table( self, name = None, permission = None, table_name =
                                None, filter_expression = '', options = None ):
        """Grants a table-level permission to a user or role.
        Parameters:
            name (str)
                Name of the user or role to which the permission will be
                granted. Must be an existing user or role.
            permission (str)
                Permission to grant to the user or role.
                Allowed values are:
                * **table_admin** --
                  Full read/write and administrative access to the table.
                * **table_insert** --
                  Insert access to the table.
                * **table_update** --
                  Update access to the table.
                * **table_delete** --
                  Delete access to the table.
                * **table_read** --
                  Read access to the table.
            table_name (str)
                  Name of the table to which the permission grants access. Must
                  be an existing table, collection, or view. If a collection,
                  the permission also applies to tables and views in the
                  collection.
            filter_expression (str)
                  Reserved for future use.  Default value is ''.
            options (dict of str to str)
                  Optional parameters.
        Returns:
            A dict with the following entries--
            name (str)
                Value of input parameter *name*.
            permission (str)
                Value of input parameter *permission*.
            table_name (str)
                Value of input parameter *table_name*.
            filter_expression (str)
                Value of input parameter *filter_expression*.
        """
        assert isinstance( name, (basestring)), "grant_permission_table(): Argument 'name' must be (one) of type(s) '(basestring)'; given %s" % type( name ).__name__
        assert isinstance( permission, (basestring)), "grant_permission_table(): Argument 'permission' must be (one) of type(s) '(basestring)'; given %s" % type( permission ).__name__
        assert isinstance( table_name, (basestring)), "grant_permission_table(): Argument 'table_name' must be (one) of type(s) '(basestring)'; given %s" % type( table_name ).__name__
        assert isinstance( filter_expression, (basestring)), "grant_permission_table(): Argument 'filter_expression' must be (one) of type(s) '(basestring)'; given %s" % type( filter_expression ).__name__
        assert isinstance( options, (dict)), "grant_permission_table(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "grant_permission_table" )
        obj = collections.OrderedDict()
        obj['name'] = name
        obj['permission'] = permission
        obj['table_name'] = table_name
        obj['filter_expression'] = filter_expression
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/grant/permission/table' ) ) 
    # end grant_permission_table
    # begin grant_role
[docs]    def grant_role( self, role = None, member = None, options = None ):
        """Grants membership in a role to a user or role.
        Parameters:
            role (str)
                Name of the role in which membership will be granted. Must be
                an existing role.
            member (str)
                Name of the user or role that will be granted membership in
                input parameter *role*. Must be an existing user or role.
            options (dict of str to str)
                Optional parameters.
        Returns:
            A dict with the following entries--
            role (str)
                Value of input parameter *role*.
            member (str)
                Value of input parameter *member*.
        """
        assert isinstance( role, (basestring)), "grant_role(): Argument 'role' must be (one) of type(s) '(basestring)'; given %s" % type( role ).__name__
        assert isinstance( member, (basestring)), "grant_role(): Argument 'member' must be (one) of type(s) '(basestring)'; given %s" % type( member ).__name__
        assert isinstance( options, (dict)), "grant_role(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "grant_role" )
        obj = collections.OrderedDict()
        obj['role'] = role
        obj['member'] = member
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/grant/role' ) ) 
    # end grant_role
    # begin has_proc
[docs]    def has_proc( self, proc_name = None, options = {} ):
        """Checks the existence of a proc with the given name.
        Parameters:
            proc_name (str)
                Name of the proc to check for existence.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
        Returns:
            A dict with the following entries--
            proc_name (str)
                Value of input parameter *proc_name*
            proc_exists (bool)
                Indicates whether the proc exists or not.
                Allowed values are:
                * true
                * false
        """
        assert isinstance( proc_name, (basestring)), "has_proc(): Argument 'proc_name' must be (one) of type(s) '(basestring)'; given %s" % type( proc_name ).__name__
        assert isinstance( options, (dict)), "has_proc(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "has_proc" )
        obj = collections.OrderedDict()
        obj['proc_name'] = proc_name
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/has/proc' ) ) 
    # end has_proc
    # begin has_table
[docs]    def has_table( self, table_name = None, options = {} ):
        """Checks for the existence of a table with the given name.
        Parameters:
            table_name (str)
                Name of the table to check for existence.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
        Returns:
            A dict with the following entries--
            table_name (str)
                Value of input parameter *table_name*
            table_exists (bool)
                Indicates whether the table exists or not.
                Allowed values are:
                * true
                * false
        """
        assert isinstance( table_name, (basestring)), "has_table(): Argument 'table_name' must be (one) of type(s) '(basestring)'; given %s" % type( table_name ).__name__
        assert isinstance( options, (dict)), "has_table(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "has_table" )
        obj = collections.OrderedDict()
        obj['table_name'] = table_name
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/has/table' ) ) 
    # end has_table
    # begin has_type
[docs]    def has_type( self, type_id = None, options = {} ):
        """Check for the existence of a type.
        Parameters:
            type_id (str)
                Id of the type returned in response to :meth:`.create_type`
                request.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
        Returns:
            A dict with the following entries--
            type_id (str)
                Value of input parameter *type_id*.
            type_exists (bool)
                Indicates whether the type exists or not.
                Allowed values are:
                * true
                * false
        """
        assert isinstance( type_id, (basestring)), "has_type(): Argument 'type_id' must be (one) of type(s) '(basestring)'; given %s" % type( type_id ).__name__
        assert isinstance( options, (dict)), "has_type(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "has_type" )
        obj = collections.OrderedDict()
        obj['type_id'] = type_id
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/has/type' ) ) 
    # end has_type
    # begin insert_records
[docs]    def insert_records( self, table_name = None, data = None, list_encoding = None,
                        options = {} ):
        """Adds multiple records to the specified table. The operation is
        synchronous, meaning that a response will not be returned until all the
        records are fully inserted and available. The response payload provides
        the counts of the number of records actually inserted and/or updated,
        and can provide the unique identifier of each added record.
        The input parameter *options* parameter can be used to customize this
        function's behavior.
        The *update_on_existing_pk* option specifies the record collision
        policy for inserting into a table with a `primary key
        <../../../concepts/tables.html#primary-keys>`_, but is ignored if no
        primary key exists.
        The *return_record_ids* option indicates that the database should
        return the unique identifiers of inserted records.
        The *route_to_address* option directs that inserted records should be
        targeted for a particular database node.
        Parameters:
            table_name (str)
                Table to which the records are to be added. Must be an existing
                table.
            data (list of str)
                An array of *binary* or *json* encoded data for the records to
                be added.  The user can provide a single element (which will be
                automatically promoted to a list internally) or a list.
            list_encoding (str)
                The encoding of the records to be inserted.  Default value is
                'binary'.
                Allowed values are:
                * binary
                * json
                The default value is 'binary'.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
                Allowed keys are:
                * **update_on_existing_pk** --
                  Specifies the record collision policy for inserting into a
                  table with a `primary key
                  <../../../concepts/tables.html#primary-keys>`_.  If set to
                  *true*, any existing table record with primary key values
                  that match those of a record being inserted will be replaced
                  by that new record.  If set to *false*, any existing table
                  record with primary key values that match those of a record
                  being inserted will remain unchanged and the new record
                  discarded.  If the specified table does not have a primary
                  key, then this option is ignored.
                  Allowed values are:
                  * true
                  * false
                  The default value is 'false'.
                * **return_record_ids** --
                  If *true* then return the internal record id along for each
                  inserted record.
                  Allowed values are:
                  * true
                  * false
                  The default value is 'false'.
                * **route_to_address** --
                  Route to a specific rank/tom. Option not suitable for tables
                  using primary/shard keys
        Returns:
            A dict with the following entries--
            record_ids (list of str)
                An array containing the IDs with which the added records are
                identified internally.
            count_inserted (int)
                The number of records inserted.
            count_updated (int)
                The number of records updated.
        """
        assert isinstance( table_name, (basestring)), "insert_records(): Argument 'table_name' must be (one) of type(s) '(basestring)'; given %s" % type( table_name ).__name__
        data = data if isinstance( data, list ) else ( [] if (data is None) else [ data ] )
        assert isinstance( list_encoding, (basestring, type( None ))), "insert_records(): Argument 'list_encoding' must be (one) of type(s) '(basestring, type( None ))'; given %s" % type( list_encoding ).__name__
        assert isinstance( options, (dict)), "insert_records(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "insert_records" )
        obj = collections.OrderedDict()
        obj['table_name'] = table_name
        list_encoding = list_encoding if list_encoding else self.__client_to_object_encoding()
        obj['list_encoding'] = list_encoding
        if (list_encoding == 'json'):
            obj['list_str'] = data
            obj['list'] = []
        elif (list_encoding == 'binary'):
            obj['list'] = data
            obj['list_str'] = []
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/insert/records' ) ) 
    # end insert_records
    # begin insert_records_random
[docs]    def insert_records_random( self, table_name = None, count = None, options = {}
                               ):
        """Generates a specified number of random records and adds them to the
        given table. There is an optional parameter that allows the user to
        customize the ranges of the column values. It also allows the user to
        specify linear profiles for some or all columns in which case linear
        values are generated rather than random ones. Only individual tables
        are supported for this operation.
        This operation is synchronous, meaning that a response will not be
        returned until all random records are fully available.
        Parameters:
            table_name (str)
                Table to which random records will be added. Must be an
                existing table.  Also, must be an individual table, not a
                collection of tables, nor a view of a table.
            count (long)
                Number of records to generate.
            options (dict of str to dicts of str to floats)
                Optional parameter to pass in specifications for the randomness
                of the values.  This map is different from the *options*
                parameter of most other endpoints in that it is a map of string
                to map of string to doubles, while most others are maps of
                string to string.  In this map, the top level keys represent
                which column's parameters are being specified, while the
                internal keys represents which parameter is being specified.
                These parameters take on different meanings depending on the
                type of the column.  Below follows a more detailed description
                of the map:  Default value is an empty dict ( {} ).
                Allowed keys are:
                * **seed** --
                  If provided, the internal random number generator will be
                  initialized with the given value.  The minimum is 0.  This
                  allows for the same set of random numbers to be generated
                  across invocation of this endpoint in case the user wants to
                  repeat the test.  Since input parameter *options*, is a map
                  of maps, we need an internal map to provide the seed value.
                  For example, to pass 100 as the seed value through this
                  parameter, you need something equivalent to: 'options' =
                  {'seed': { 'value': 100 } }
                  Allowed keys are:
                  * **value** --
                    Pass the seed value here.
                * **all** --
                  This key indicates that the specifications relayed in the
                  internal map are to be applied to all columns of the records.
                  Allowed keys are:
                  * **min** --
                    For numerical columns, the minimum of the generated values
                    is set to this value.  Default is -99999.  For point,
                    shape, and track semantic types, min for numeric 'x' and
                    'y' columns needs to be within [-180, 180] and [-90, 90],
                    respectively. The default minimum possible values for these
                    columns in such cases are -180.0 and -90.0. For the
                    'TIMESTAMP' column, the default minimum corresponds to Jan
                    1, 2010.
                    For string columns, the minimum length of the randomly
                    generated strings is set to this value (default is 0). If
                    both minimum and maximum are provided, minimum must be less
                    than or equal to max. Value needs to be within [0, 200].
                    If the min is outside the accepted ranges for strings
                    columns and 'x' and 'y' columns for point/shape/track
                    types, then those parameters will not be set; however, an
                    error will not be thrown in such a case. It is the
                    responsibility of the user to use the *all* parameter
                    judiciously.
                  * **max** --
                    For numerical columns, the maximum of the generated values
                    is set to this value. Default is 99999. For point, shape,
                    and track semantic types, max for numeric 'x' and 'y'
                    columns needs to be within [-180, 180] and [-90, 90],
                    respectively. The default minimum possible values for these
                    columns in such cases are 180.0 and 90.0.
                    For string columns, the maximum length of the randomly
                    generated strings is set to this value (default is 200). If
                    both minimum and maximum are provided, *max* must be
                    greater than or equal to *min*. Value needs to be within
                    [0, 200].
                    If the *max* is outside the accepted ranges for strings
                    columns and 'x' and 'y' columns for point/shape/track
                    types, then those parameters will not be set; however, an
                    error will not be thrown in such a case. It is the
                    responsibility of the user to use the *all* parameter
                    judiciously.
                  * **interval** --
                    If specified, generate values for all columns evenly spaced
                    with the given interval value. If a max value is specified
                    for a given column the data is randomly generated between
                    min and max and decimated down to the interval. If no max
                    is provided the data is linerally generated starting at the
                    minimum value (instead of generating random data). For
                    non-decimated string-type columns the interval value is
                    ignored. Instead the values are generated following the
                    pattern: 'attrname_creationIndex#', i.e. the column name
                    suffixed with an underscore and a running counter (starting
                    at 0). For string types with limited size (eg char4) the
                    prefix is dropped. No nulls will be generated for nullable
                    columns.
                  * **null_percentage** --
                    If specified, then generate the given percentage of the
                    count as nulls for all nullable columns.  This option will
                    be ignored for non-nullable columns.  The value must be
                    within the range [0, 1.0].  The default value is 5% (0.05).
                  * **cardinality** --
                    If specified, limit the randomly generated values to a
                    fixed set. Not allowed on a column with interval specified,
                    and is not applicable to WKT or Track-specific columns. The
                    value must be greater than 0. This option is disabled by
                    default.
                * **attr_name** --
                  Use the desired column name in place of *attr_name*, and set
                  the following parameters for the column specified. This
                  overrides any parameter set by *all*.
                  Allowed keys are:
                  * **min** --
                    For numerical columns, the minimum of the generated values
                    is set to this value.  Default is -99999.  For point,
                    shape, and track semantic types, min for numeric 'x' and
                    'y' columns needs to be within [-180, 180] and [-90, 90],
                    respectively. The default minimum possible values for these
                    columns in such cases are -180.0 and -90.0. For the
                    'TIMESTAMP' column, the default minimum corresponds to Jan
                    1, 2010.
                    For string columns, the minimum length of the randomly
                    generated strings is set to this value (default is 0). If
                    both minimum and maximum are provided, minimum must be less
                    than or equal to max. Value needs to be within [0, 200].
                    If the min is outside the accepted ranges for strings
                    columns and 'x' and 'y' columns for point/shape/track
                    types, then those parameters will not be set; however, an
                    error will not be thrown in such a case. It is the
                    responsibility of the user to use the *all* parameter
                    judiciously.
                  * **max** --
                    For numerical columns, the maximum of the generated values
                    is set to this value. Default is 99999. For point, shape,
                    and track semantic types, max for numeric 'x' and 'y'
                    columns needs to be within [-180, 180] and [-90, 90],
                    respectively. The default minimum possible values for these
                    columns in such cases are 180.0 and 90.0.
                    For string columns, the maximum length of the randomly
                    generated strings is set to this value (default is 200). If
                    both minimum and maximum are provided, *max* must be
                    greater than or equal to *min*. Value needs to be within
                    [0, 200].
                    If the *max* is outside the accepted ranges for strings
                    columns and 'x' and 'y' columns for point/shape/track
                    types, then those parameters will not be set; however, an
                    error will not be thrown in such a case. It is the
                    responsibility of the user to use the *all* parameter
                    judiciously.
                  * **interval** --
                    If specified, generate values for all columns evenly spaced
                    with the given interval value. If a max value is specified
                    for a given column the data is randomly generated between
                    min and max and decimated down to the interval. If no max
                    is provided the data is linerally generated starting at the
                    minimum value (instead of generating random data). For
                    non-decimated string-type columns the interval value is
                    ignored. Instead the values are generated following the
                    pattern: 'attrname_creationIndex#', i.e. the column name
                    suffixed with an underscore and a running counter (starting
                    at 0). For string types with limited size (eg char4) the
                    prefix is dropped. No nulls will be generated for nullable
                    columns.
                  * **null_percentage** --
                    If specified and if this column is nullable, then generate
                    the given percentage of the count as nulls.  This option
                    will result in an error if the column is not nullable.  The
                    value must be within the range [0, 1.0].  The default value
                    is 5% (0.05).
                  * **cardinality** --
                    If specified, limit the randomly generated values to a
                    fixed set. Not allowed on a column with interval specified,
                    and is not applicable to WKT or Track-specific columns. The
                    value must be greater than 0. This option is disabled by
                    default.
                * **track_length** --
                  This key-map pair is only valid for track type data sets (an
                  error is thrown otherwise).  No nulls would be generated for
                  nullable columns.
                  Allowed keys are:
                  * **min** --
                    Minimum possible length for generated series; default is
                    100 records per series. Must be an integral value within
                    the range [1, 500]. If both min and max are specified, min
                    must be less than or equal to max.
                  * **max** --
                    Maximum possible length for generated series; default is
                    500 records per series. Must be an integral value within
                    the range [1, 500]. If both min and max are specified, max
                    must be greater than or equal to min.
        Returns:
            A dict with the following entries--
            table_name (str)
                Value of input parameter *table_name*.
            count (long)
                Value of input parameter *count*.
        """
        assert isinstance( table_name, (basestring)), "insert_records_random(): Argument 'table_name' must be (one) of type(s) '(basestring)'; given %s" % type( table_name ).__name__
        assert isinstance( count, (int, long, float)), "insert_records_random(): Argument 'count' must be (one) of type(s) '(int, long, float)'; given %s" % type( count ).__name__
        assert isinstance( options, (dict)), "insert_records_random(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "insert_records_random" )
        obj = collections.OrderedDict()
        obj['table_name'] = table_name
        obj['count'] = count
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/insert/records/random' ) ) 
    # end insert_records_random
    # begin insert_symbol
[docs]    def insert_symbol( self, symbol_id = None, symbol_format = None, symbol_data =
                       None, options = {} ):
        """Adds a symbol or icon (i.e. an image) to represent data points when
        data is rendered visually. Users must provide the symbol identifier
        (string), a format (currently supported: 'svg' and 'svg_path'), the
        data for the symbol, and any additional optional parameter (e.g.
        color). To have a symbol used for rendering create a table with a
        string column named 'SYMBOLCODE' (along with 'x' or 'y' for example).
        Then when the table is rendered (via `WMS <../../rest/wms_rest.html>`_)
        if the 'dosymbology' parameter is 'true' then the value of the
        'SYMBOLCODE' column is used to pick the symbol displayed for each
        point.
        Parameters:
            symbol_id (str)
                The id of the symbol being added. This is the same id that
                should be in the 'SYMBOLCODE' column for objects using this
                symbol
            symbol_format (str)
                Specifies the symbol format. Must be either 'svg' or
                'svg_path'.
                Allowed values are:
                * svg
                * svg_path
            symbol_data (str)
                The actual symbol data. If input parameter *symbol_format* is
                'svg' then this should be the raw bytes representing an svg
                file. If input parameter *symbol_format* is svg path then this
                should be an svg path string, for example:
                'M25.979,12.896,5.979,12.896,5.979,19.562,25.979,19.562z'
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
                Allowed keys are:
                * **color** --
                  If input parameter *symbol_format* is 'svg' this is ignored.
                  If input parameter *symbol_format* is 'svg_path' then this
                  option specifies the color (in RRGGBB hex format) of the
                  path. For example, to have the path rendered in red, used
                  'FF0000'. If 'color' is not provided then '00FF00' (i.e.
                  green) is used by default.
        Returns:
            A dict with the following entries--
            symbol_id (str)
                Value of input parameter *symbol_id*.
        """
        assert isinstance( symbol_id, (basestring)), "insert_symbol(): Argument 'symbol_id' must be (one) of type(s) '(basestring)'; given %s" % type( symbol_id ).__name__
        assert isinstance( symbol_format, (basestring)), "insert_symbol(): Argument 'symbol_format' must be (one) of type(s) '(basestring)'; given %s" % type( symbol_format ).__name__
        assert isinstance( symbol_data, (basestring)), "insert_symbol(): Argument 'symbol_data' must be (one) of type(s) '(basestring)'; given %s" % type( symbol_data ).__name__
        assert isinstance( options, (dict)), "insert_symbol(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "insert_symbol" )
        obj = collections.OrderedDict()
        obj['symbol_id'] = symbol_id
        obj['symbol_format'] = symbol_format
        obj['symbol_data'] = symbol_data
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/insert/symbol' ) ) 
    # end insert_symbol
    # begin kill_proc
[docs]    def kill_proc( self, run_id = '', options = {} ):
        """Kills a running proc instance.
        Parameters:
            run_id (str)
                The run ID of the running proc instance. If the run ID is not
                found or the proc instance has already completed, this does
                nothing. If not specified, all running proc instances will be
                killed.  Default value is ''.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
        Returns:
            A dict with the following entries--
            run_ids (list of str)
                List of run IDs of proc instances that were killed.
        """
        assert isinstance( run_id, (basestring)), "kill_proc(): Argument 'run_id' must be (one) of type(s) '(basestring)'; given %s" % type( run_id ).__name__
        assert isinstance( options, (dict)), "kill_proc(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "kill_proc" )
        obj = collections.OrderedDict()
        obj['run_id'] = run_id
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/kill/proc' ) ) 
    # end kill_proc
    # begin lock_table
[docs]    def lock_table( self, table_name = None, lock_type = 'status', options = {} ):
        """Manages global access to a table's data.  By default a table has a
        input parameter *lock_type* of *read_write*, indicating all operations
        are permitted.  A user may request a *read_only* or a *write_only*
        lock, after which only read or write operations, respectively, are
        permitted on the table until the lock is removed.  When input parameter
        *lock_type* is *no_access* then no operations are permitted on the
        table.  The lock status can be queried by setting input parameter
        *lock_type* to *status*.
        Parameters:
            table_name (str)
                Name of the table to be locked. It must be a currently existing
                table, collection, or view.
            lock_type (str)
                The type of lock being applied to the table. Setting it to
                *status* will return the current lock status of the table
                without changing it.  Default value is 'status'.
                Allowed values are:
                * **status** --
                  Show locked status
                * **no_access** --
                  Allow no read/write operations
                * **read_only** --
                  Allow only read operations
                * **write_only** --
                  Allow only write operations
                * **read_write** --
                  Allow all read/write operations
                  The default value is 'status'.
            options (dict of str to str)
                  Optional parameters.  Default value is an empty dict ( {} ).
        Returns:
            A dict with the following entries--
            lock_type (str)
                Returns the lock state of the table.
        """
        assert isinstance( table_name, (basestring)), "lock_table(): Argument 'table_name' must be (one) of type(s) '(basestring)'; given %s" % type( table_name ).__name__
        assert isinstance( lock_type, (basestring)), "lock_table(): Argument 'lock_type' must be (one) of type(s) '(basestring)'; given %s" % type( lock_type ).__name__
        assert isinstance( options, (dict)), "lock_table(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "lock_table" )
        obj = collections.OrderedDict()
        obj['table_name'] = table_name
        obj['lock_type'] = lock_type
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/lock/table' ) ) 
    # end lock_table
    # begin merge_records
[docs]    def merge_records( self, table_name = None, source_table_names = None,
                       field_maps = None, options = {} ):
        """Create a new empty result table (specified by input parameter
        *table_name*), and insert all records from source tables (specified by
        input parameter *source_table_names*) based on the field mapping
        information (specified by input parameter *field_maps*). The field map
        (specified by input parameter *field_maps*) holds the user specified
        maps of target table column names to source table columns. The array of
        input parameter *field_maps* must match one-to-one with the input
        parameter *source_table_names*, e.g., there's a map present in input
        parameter *field_maps* for each table listed in input parameter
        *source_table_names*. Read more about Merge Records `here
        <../../../concepts/merge_records.html>`_.
        Parameters:
            table_name (str)
                The new result table name for the records to be merged.  Must
                NOT be an existing table.
            source_table_names (list of str)
                The list of source table names to get the records from. Must be
                existing table names.  The user can provide a single element
                (which will be automatically promoted to a list internally) or
                a list.
            field_maps (list of dicts of str to str)
                Contains a list of source/target column mappings, one mapping
                for each source table listed in input parameter
                *source_table_names* being merged into the target table
                specified by input parameter *table_name*.  Each mapping
                contains the target column names (as keys) that the data in the
                mapped source columns (as values) will be merged into.  All of
                the source columns being merged into a given target column must
                match in type, as that type will determine the type of the new
                target column.  The user can provide a single element (which
                will be automatically promoted to a list internally) or a list.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
                Allowed keys are:
                * **collection_name** --
                  Name of a collection which is to contain the newly created
                  merged table specified by input parameter *table_name*. If
                  the collection provided is non-existent, the collection will
                  be automatically created. If empty, then the newly created
                  merged table will be a top-level table.
                * **is_replicated** --
                  Indicates the `distribution scheme
                  <../../../concepts/tables.html#distribution>`_ for the data
                  of the merged table specified in input parameter
                  *table_name*.  If true, the table will be `replicated
                  <../../../concepts/tables.html#replication>`_.  If false, the
                  table will be `randomly sharded
                  <../../../concepts/tables.html#random-sharding>`_.
                  Allowed values are:
                  * true
                  * false
                  The default value is 'false'.
                * **ttl** --
                  Sets the `TTL <../../../concepts/ttl.html>`_ of the merged
                  table specified in input parameter *table_name*.
                * **chunk_size** --
                  Indicates the chunk size to be used for the merged table
                  specified in input parameter *table_name*.
        Returns:
            A dict with the following entries--
            table_name (str)
        """
        assert isinstance( table_name, (basestring)), "merge_records(): Argument 'table_name' must be (one) of type(s) '(basestring)'; given %s" % type( table_name ).__name__
        source_table_names = source_table_names if isinstance( source_table_names, list ) else ( [] if (source_table_names is None) else [ source_table_names ] )
        field_maps = field_maps if isinstance( field_maps, list ) else ( [] if (field_maps is None) else [ field_maps ] )
        assert isinstance( options, (dict)), "merge_records(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "merge_records" )
        obj = collections.OrderedDict()
        obj['table_name'] = table_name
        obj['source_table_names'] = source_table_names
        obj['field_maps'] = field_maps
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/merge/records' ) ) 
    # end merge_records
    # begin admin_replace_tom
    def admin_replace_tom( self, old_rank_tom = None, new_rank_tom = None ):
        assert isinstance( old_rank_tom, (int, long, float)), "admin_replace_tom(): Argument 'old_rank_tom' must be (one) of type(s) '(int, long, float)'; given %s" % type( old_rank_tom ).__name__
        assert isinstance( new_rank_tom, (int, long, float)), "admin_replace_tom(): Argument 'new_rank_tom' must be (one) of type(s) '(int, long, float)'; given %s" % type( new_rank_tom ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "admin_replace_tom" )
        obj = collections.OrderedDict()
        obj['old_rank_tom'] = old_rank_tom
        obj['new_rank_tom'] = new_rank_tom
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/replace/tom' ) )
    # end admin_replace_tom
    # begin revoke_permission_system
[docs]    def revoke_permission_system( self, name = None, permission = None, options =
                                  None ):
        """Revokes a system-level permission from a user or role.
        Parameters:
            name (str)
                Name of the user or role from which the permission will be
                revoked. Must be an existing user or role.
            permission (str)
                Permission to revoke from the user or role.
                Allowed values are:
                * **system_admin** --
                  Full access to all data and system functions.
                * **system_write** --
                  Read and write access to all tables.
                * **system_read** --
                  Read-only access to all tables.
            options (dict of str to str)
                  Optional parameters.
        Returns:
            A dict with the following entries--
            name (str)
                Value of input parameter *name*.
            permission (str)
                Value of input parameter *permission*.
        """
        assert isinstance( name, (basestring)), "revoke_permission_system(): Argument 'name' must be (one) of type(s) '(basestring)'; given %s" % type( name ).__name__
        assert isinstance( permission, (basestring)), "revoke_permission_system(): Argument 'permission' must be (one) of type(s) '(basestring)'; given %s" % type( permission ).__name__
        assert isinstance( options, (dict)), "revoke_permission_system(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "revoke_permission_system" )
        obj = collections.OrderedDict()
        obj['name'] = name
        obj['permission'] = permission
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/revoke/permission/system' ) ) 
    # end revoke_permission_system
    # begin revoke_permission_table
[docs]    def revoke_permission_table( self, name = None, permission = None, table_name =
                                 None, options = None ):
        """Revokes a table-level permission from a user or role.
        Parameters:
            name (str)
                Name of the user or role from which the permission will be
                revoked. Must be an existing user or role.
            permission (str)
                Permission to revoke from the user or role.
                Allowed values are:
                * **table_admin** --
                  Full read/write and administrative access to the table.
                * **table_insert** --
                  Insert access to the table.
                * **table_update** --
                  Update access to the table.
                * **table_delete** --
                  Delete access to the table.
                * **table_read** --
                  Read access to the table.
            table_name (str)
                  Name of the table to which the permission grants access. Must
                  be an existing table, collection, or view.
            options (dict of str to str)
                  Optional parameters.
        Returns:
            A dict with the following entries--
            name (str)
                Value of input parameter *name*.
            permission (str)
                Value of input parameter *permission*.
            table_name (str)
                Value of input parameter *table_name*.
        """
        assert isinstance( name, (basestring)), "revoke_permission_table(): Argument 'name' must be (one) of type(s) '(basestring)'; given %s" % type( name ).__name__
        assert isinstance( permission, (basestring)), "revoke_permission_table(): Argument 'permission' must be (one) of type(s) '(basestring)'; given %s" % type( permission ).__name__
        assert isinstance( table_name, (basestring)), "revoke_permission_table(): Argument 'table_name' must be (one) of type(s) '(basestring)'; given %s" % type( table_name ).__name__
        assert isinstance( options, (dict)), "revoke_permission_table(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "revoke_permission_table" )
        obj = collections.OrderedDict()
        obj['name'] = name
        obj['permission'] = permission
        obj['table_name'] = table_name
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/revoke/permission/table' ) ) 
    # end revoke_permission_table
    # begin revoke_role
[docs]    def revoke_role( self, role = None, member = None, options = None ):
        """Revokes membership in a role from a user or role.
        Parameters:
            role (str)
                Name of the role in which membership will be revoked. Must be
                an existing role.
            member (str)
                Name of the user or role that will be revoked membership in
                input parameter *role*. Must be an existing user or role.
            options (dict of str to str)
                Optional parameters.
        Returns:
            A dict with the following entries--
            role (str)
                Value of input parameter *role*.
            member (str)
                Value of input parameter *member*.
        """
        assert isinstance( role, (basestring)), "revoke_role(): Argument 'role' must be (one) of type(s) '(basestring)'; given %s" % type( role ).__name__
        assert isinstance( member, (basestring)), "revoke_role(): Argument 'member' must be (one) of type(s) '(basestring)'; given %s" % type( member ).__name__
        assert isinstance( options, (dict)), "revoke_role(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "revoke_role" )
        obj = collections.OrderedDict()
        obj['role'] = role
        obj['member'] = member
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/revoke/role' ) ) 
    # end revoke_role
    # begin show_proc
[docs]    def show_proc( self, proc_name = '', options = {} ):
        """Shows information about a proc.
        Parameters:
            proc_name (str)
                Name of the proc to show information about. If specified, must
                be the name of a currently existing proc. If not specified,
                information about all procs will be returned.  Default value is
                ''.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
                Allowed keys are:
                * **include_files** --
                  If set to *true*, the files that make up the proc will be
                  returned. If set to *false*, the files will not be returned.
                  Allowed values are:
                  * true
                  * false
                  The default value is 'false'.
        Returns:
            A dict with the following entries--
            proc_names (list of str)
                The proc names.
            execution_modes (list of str)
                The execution modes of the procs named in output parameter
                *proc_names*.
                Allowed values are:
                * @INNER_STRUCTURE
            files (list of dicts of str to str)
                Maps of the files that make up the procs named in output
                parameter *proc_names*.
            commands (list of str)
                The commands (excluding arguments) that will be invoked when
                the procs named in output parameter *proc_names* are executed.
            args (list of lists of str)
                Arrays of command-line arguments that will be passed to the
                procs named in output parameter *proc_names* when executed.
            options (list of dicts of str to str)
                The optional parameters for the procs named in output parameter
                *proc_names*.
        """
        assert isinstance( proc_name, (basestring)), "show_proc(): Argument 'proc_name' must be (one) of type(s) '(basestring)'; given %s" % type( proc_name ).__name__
        assert isinstance( options, (dict)), "show_proc(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "show_proc" )
        obj = collections.OrderedDict()
        obj['proc_name'] = proc_name
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/show/proc' ) ) 
    # end show_proc
    # begin show_proc_status
[docs]    def show_proc_status( self, run_id = '', options = {} ):
        """Shows the statuses of running or completed proc instances. Results are
        grouped by run ID (as returned from :meth:`.execute_proc`) and data
        segment ID (each invocation of the proc command on a data segment is
        assigned a data segment ID).
        Parameters:
            run_id (str)
                The run ID of a specific running or completed proc instance for
                which the status will be returned. If the run ID is not found,
                nothing will be returned. If not specified, the statuses of all
                running and completed proc instances will be returned.  Default
                value is ''.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
                Allowed keys are:
                * **clear_complete** --
                  If set to *true*, if a proc instance has completed (either
                  successfully or unsuccessfully) then its status will be
                  cleared and no longer returned in subsequent calls.
                  Allowed values are:
                  * true
                  * false
                  The default value is 'false'.
        Returns:
            A dict with the following entries--
            proc_names (dict of str to str)
                The proc names corresponding to the returned run IDs.
            params (dict of str to dicts of str to str)
                The string params passed to :meth:`.execute_proc` for the
                returned run IDs.
            bin_params (dict of str to dicts of str to str)
                The binary params passed to :meth:`.execute_proc` for the
                returned run IDs.
            input_table_names (dict of str to lists of str)
                The input table names passed to :meth:`.execute_proc` for the
                returned run IDs.
            input_column_names (dict of str to dicts of str to lists of str)
                The input column names passed to :meth:`.execute_proc` for the
                returned run IDs, supplemented with the column names for input
                tables not included in the input column name map.
            output_table_names (dict of str to lists of str)
                The output table names passed to :meth:`.execute_proc` for the
                returned run IDs.
            options (dict of str to dicts of str to str)
                The optional parameters passed to :meth:`.execute_proc` for the
                returned run IDs.
            overall_statuses (dict of str to str)
                Overall statuses for the returned run IDs. Note that these are
                rollups and individual statuses may differ between data
                segments for the same run ID; see output parameter *statuses*
                and output parameter *messages* for statuses from individual
                data segments.
            statuses (dict of str to dicts of str to str)
                Statuses for the returned run IDs, grouped by data segment ID.
            messages (dict of str to dicts of str to str)
                Messages containing additional status information for the
                returned run IDs, grouped by data segment ID.
            results (dict of str to dicts of str to dicts of str to str)
                String results for the returned run IDs, grouped by data
                segment ID.
            bin_results (dict of str to dicts of str to dicts of str to str)
                Binary results for the returned run IDs, grouped by data
                segment ID.
            timings (dict of str to dicts of str to dicts of str to longs)
                Timing information for the returned run IDs, grouped by data
                segment ID.
        """
        assert isinstance( run_id, (basestring)), "show_proc_status(): Argument 'run_id' must be (one) of type(s) '(basestring)'; given %s" % type( run_id ).__name__
        assert isinstance( options, (dict)), "show_proc_status(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "show_proc_status" )
        obj = collections.OrderedDict()
        obj['run_id'] = run_id
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/show/proc/status' ) ) 
    # end show_proc_status
    # begin show_security
[docs]    def show_security( self, names = None, options = None ):
        """Shows security information relating to users and/or roles. If the
        caller is not a system administrator, only information relating to the
        caller and their roles is returned.
        Parameters:
            names (list of str)
                A list of names of users and/or roles about which security
                information is requested. If none are provided, information
                about all users and roles will be returned.  The user can
                provide a single element (which will be automatically promoted
                to a list internally) or a list.
            options (dict of str to str)
                Optional parameters.
        Returns:
            A dict with the following entries--
            types (dict of str to str)
                Map of user/role name to the type of that user/role.
            roles (dict of str to lists of str)
                Map of user/role name to a list of names of roles of which that
                user/role is a member.
            permissions (dict of str to lists of dicts of str to str)
                Map of user/role name to a list of permissions directly granted
                to that user/role.
        """
        names = names if isinstance( names, list ) else ( [] if (names is None) else [ names ] )
        assert isinstance( options, (dict)), "show_security(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "show_security" )
        obj = collections.OrderedDict()
        obj['names'] = names
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/show/security' ) ) 
    # end show_security
    # begin show_system_properties
[docs]    def show_system_properties( self, options = {} ):
        """Returns server configuration and version related information to the
        caller. The admin tool uses it to present server related information to
        the user.
        Parameters:
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
                Allowed keys are:
                * **properties** --
                  A list of comma separated names of properties requested. If
                  not specified, all properties will be returned.
        Returns:
            A dict with the following entries--
            property_map (dict of str to str)
                A map of server configuration parameters and version
                information.
                Allowed keys are:
                * **conf.enable_worker_http_servers** --
                  Boolean value indicating whether the system is configured for
                  multi-head ingestion.
                  Allowed values are:
                  * **TRUE** --
                    Indicates that the system is configured for multi-head
                    ingestion.
                  * **FALSE** --
                    Indicates that the system is NOT configured for multi-head
                    ingestion.
                * **conf.worker_http_server_ips** --
                  Semicolon (';') separated string of IP addresses of all the
                  ingestion-enabled worker heads of the system.
                * **conf.worker_http_server_ports** --
                  Semicolon (';') separated string of the port numbers of all
                  the ingestion-enabled worker ranks of the system.
        """
        assert isinstance( options, (dict)), "show_system_properties(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "show_system_properties" )
        obj = collections.OrderedDict()
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/show/system/properties' ) ) 
    # end show_system_properties
    # begin show_system_status
[docs]    def show_system_status( self, options = {} ):
        """Provides server configuration and health related status to the caller.
        The admin tool uses it to present server related information to the
        user.
        Parameters:
            options (dict of str to str)
                Optional parameters, currently unused.  Default value is an
                empty dict ( {} ).
        Returns:
            A dict with the following entries--
            status_map (dict of str to str)
                A map of server configuration and health related status.
        """
        assert isinstance( options, (dict)), "show_system_status(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "show_system_status" )
        obj = collections.OrderedDict()
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/show/system/status' ) ) 
    # end show_system_status
    # begin show_system_timing
[docs]    def show_system_timing( self, options = {} ):
        """Returns the last 100 database requests along with the request timing
        and internal job id. The admin tool uses it to present request timing
        information to the user.
        Parameters:
            options (dict of str to str)
                Optional parameters, currently unused.  Default value is an
                empty dict ( {} ).
        Returns:
            A dict with the following entries--
            endpoints (list of str)
                List of recently called endpoints, most recent first.
            time_in_ms (list of floats)
                List of time (in ms) of the recent requests.
            jobIds (list of str)
                List of the internal job ids for the recent requests.
        """
        assert isinstance( options, (dict)), "show_system_timing(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "show_system_timing" )
        obj = collections.OrderedDict()
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/show/system/timing' ) ) 
    # end show_system_timing
    # begin show_table
[docs]    def show_table( self, table_name = None, options = {} ):
        """Retrieves detailed information about a table, view, or collection,
        specified in input parameter *table_name*. If the supplied input
        parameter *table_name* is a collection, the call can return information
        about either the collection itself or the tables and views it contains.
        If input parameter *table_name* is empty, information about all
        collections and top-level tables and views can be returned.
        If the option *get_sizes* is set to *true*, then the sizes (objects and
        elements) of each table are returned (in output parameter *sizes* and
        output parameter *full_sizes*), along with the total number of objects
        in the requested table (in output parameter *total_size* and output
        parameter *total_full_size*).
        For a collection, setting the *show_children* option to *false* returns
        only information about the collection itself; setting *show_children*
        to *true* returns a list of tables and views contained in the
        collection, along with their corresponding detail.
        Parameters:
            table_name (str)
                Name of the table for which to retrieve the information. If
                blank, then information about all collections and top-level
                tables and views is returned.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
                Allowed keys are:
                * **get_sizes** --
                  If *true* then the table sizes will be returned; blank,
                  otherwise.
                  Allowed values are:
                  * true
                  * false
                  The default value is 'false'.
                * **show_children** --
                  If input parameter *table_name* is a collection, then *true*
                  will return information about the children of the collection,
                  and *false* will return information about the collection
                  itself. If input parameter *table_name* is a table or view,
                  *show_children* must be *false*. If input parameter
                  *table_name* is empty, then *show_children* must be *true*.
                  Allowed values are:
                  * true
                  * false
                  The default value is 'true'.
                * **no_error_if_not_exists** --
                  If *false* will return an error if the provided input
                  parameter *table_name* does not exist. If *true* then it will
                  return an empty result.
                  Allowed values are:
                  * true
                  * false
                  The default value is 'false'.
                * **get_column_info** --
                  If *true* then column info (memory usage, etc) will be
                  returned.
                  Allowed values are:
                  * true
                  * false
                  The default value is 'false'.
        Returns:
            A dict with the following entries--
            table_name (str)
                Value of input parameter *table_name*.
            table_names (list of str)
                If input parameter *table_name* is a table or view, then the
                single element of the array is input parameter *table_name*. If
                input parameter *table_name* is a collection and
                *show_children* is set to *true*, then this array is populated
                with the names of all tables and views contained by the given
                collection; if *show_children* is *false* then this array will
                only include the collection name itself. If input parameter
                *table_name* is an empty string, then the array contains the
                names of all collections and top-level tables.
            table_descriptions (list of lists of str)
                List of descriptions for the respective tables in output
                parameter *table_names*.
                Allowed values are:
                * COLLECTION
                * VIEW
                * REPLICATED
                * JOIN
                * RESULT_TABLE
            type_ids (list of str)
                Type ids of the respective tables in output parameter
                *table_names*.
            type_schemas (list of str)
                Type schemas of the respective tables in output parameter
                *table_names*.
            type_labels (list of str)
                Type labels of the respective tables in output parameter
                *table_names*.
            properties (list of dicts of str to lists of str)
                Property maps of the respective tables in output parameter
                *table_names*.
            additional_info (list of dicts of str to str)
                Additional information about the respective tables in output
                parameter *table_names*.
                Allowed values are:
                * @INNER_STRUCTURE
            sizes (list of longs)
                Empty array if the *get_sizes* option is *false*. Otherwise,
                sizes of the respective tables represented in output parameter
                *table_names*. For all but track data types, this is simply the
                number of total objects in a table. For track types, since each
                track semantically contains many individual objects, the output
                parameter *sizes* are the counts of conceptual tracks (each of
                which may be associated with multiple objects).
            full_sizes (list of longs)
                Empty array if the *get_sizes* option is *false*. Otherwise,
                number of total objects in the respective tables represented in
                output parameter *table_names*. For all but track data types,
                this is the same as output parameter *sizes*. For track types,
                since each track semantically contains many individual objects,
                output parameter *full_sizes* is the count of total objects.
            join_sizes (list of floats)
                Empty array if the *get_sizes* option is *false*. Otherwise,
                number of unfiltered objects in the cross product of the
                sub-tables in the joined-tables represented in output parameter
                *table_names*. For simple tables, this number will be the same
                as output parameter *sizes*.  For join-tables this value gives
                the number of joined-table rows that must be processed by any
                aggregate functions operating on the table.
            total_size (long)
                -1 if the *get_sizes* option is *false*. Otherwise, the sum of
                the elements of output parameter *sizes*.
            total_full_size (long)
                -1 if the *get_sizes* option is *false*. The sum of the
                elements of output parameter *full_sizes*.
        """
        assert isinstance( table_name, (basestring)), "show_table(): Argument 'table_name' must be (one) of type(s) '(basestring)'; given %s" % type( table_name ).__name__
        assert isinstance( options, (dict)), "show_table(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "show_table" )
        obj = collections.OrderedDict()
        obj['table_name'] = table_name
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/show/table' ) ) 
    # end show_table
    # begin show_table_metadata
    # end show_table_metadata
    # begin show_tables_by_type
[docs]    def show_tables_by_type( self, type_id = None, label = None, options = {} ):
        """Gets names of the tables whose type matches the given criteria. Each
        table has a particular type. This type is made out of the type label,
        schema of the table, and the semantic type of the table. This function
        allows a look up of the existing tables based on full or partial type
        information. The operation is synchronous.
        Parameters:
            type_id (str)
                Type id returned by a call to :meth:`.create_type`.
            label (str)
                Optional user supplied label which can be used instead of the
                type_id to retrieve all tables with the given label.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
        Returns:
            A dict with the following entries--
            table_names (list of str)
                List of tables matching the input criteria.
        """
        assert isinstance( type_id, (basestring)), "show_tables_by_type(): Argument 'type_id' must be (one) of type(s) '(basestring)'; given %s" % type( type_id ).__name__
        assert isinstance( label, (basestring)), "show_tables_by_type(): Argument 'label' must be (one) of type(s) '(basestring)'; given %s" % type( label ).__name__
        assert isinstance( options, (dict)), "show_tables_by_type(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "show_tables_by_type" )
        obj = collections.OrderedDict()
        obj['type_id'] = type_id
        obj['label'] = label
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/show/tables/bytype' ) ) 
    # end show_tables_by_type
    # begin show_triggers
[docs]    def show_triggers( self, trigger_ids = None, options = {} ):
        """Retrieves information regarding the specified triggers or all existing
        triggers currently active.
        Parameters:
            trigger_ids (list of str)
                List of IDs of the triggers whose information is to be
                retrieved. An empty list means information will be retrieved on
                all active triggers.  The user can provide a single element
                (which will be automatically promoted to a list internally) or
                a list.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
        Returns:
            A dict with the following entries--
            trigger_map (dict of str to dicts of str to str)
                This dictionary contains (key, value) pairs of (trigger ID,
                information map/dictionary) where the key is a Unicode string
                representing a Trigger ID. The value is another embedded
                dictionary containing (key, value) pairs where the keys consist
                of 'table_name', 'type' and the parameter names relating to the
                trigger type, e.g. *nai*, *min*, *max*. The values are unicode
                strings (numeric values are also converted to strings)
                representing the value of the respective parameter. If a
                trigger is associated with multiple tables, then the string
                value for *table_name* contains a comma separated list of table
                names.
        """
        trigger_ids = trigger_ids if isinstance( trigger_ids, list ) else ( [] if (trigger_ids is None) else [ trigger_ids ] )
        assert isinstance( options, (dict)), "show_triggers(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "show_triggers" )
        obj = collections.OrderedDict()
        obj['trigger_ids'] = trigger_ids
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/show/triggers' ) ) 
    # end show_triggers
    # begin show_types
[docs]    def show_types( self, type_id = None, label = None, options = {} ):
        """Retrieves information for the specified data type. Given a type ID, the
        database returns the data type schema, the label, and the semantic type
        along with the type ID. If the user provides any combination of label
        and semantic type, then the database returns the pertinent information
        for all data types that match the input criteria.
        Parameters:
            type_id (str)
                Type Id returned in response to a call to :meth:`.create_type`.
            label (str)
                Option string that was supplied by user in a call to
                :meth:`.create_type`.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
        Returns:
            A dict with the following entries--
            type_ids (list of str)
            type_schemas (list of str)
            labels (list of str)
            properties (list of dicts of str to lists of str)
        """
        assert isinstance( type_id, (basestring)), "show_types(): Argument 'type_id' must be (one) of type(s) '(basestring)'; given %s" % type( type_id ).__name__
        assert isinstance( label, (basestring)), "show_types(): Argument 'label' must be (one) of type(s) '(basestring)'; given %s" % type( label ).__name__
        assert isinstance( options, (dict)), "show_types(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "show_types" )
        obj = collections.OrderedDict()
        obj['type_id'] = type_id
        obj['label'] = label
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/show/types' ) ) 
    # end show_types
    # begin update_records
[docs]    def update_records( self, table_name = None, expressions = None, new_values_maps
                        = None, records_to_insert = [], records_to_insert_str =
                        [], record_encoding = 'binary', options = {} ):
        """Runs multiple predicate-based updates in a single call.  With the list
        of given expressions, any matching record's column values will be
        updated as provided in input parameter *new_values_maps*.  There is
        also an optional 'upsert' capability where if a particular predicate
        doesn't match any existing record, then a new record can be inserted.
        Note that this operation can only be run on an original table and not
        on a collection or a result view.
        This operation can update primary key values.  By default only 'pure
        primary key' predicates are allowed when updating primary key values.
        If the primary key for a table is the column 'attr1', then the
        operation will only accept predicates of the form: "attr1 == 'foo'" if
        the attr1 column is being updated.  For a composite primary key (e.g.
        columns 'attr1' and 'attr2') then this operation will only accept
        predicates of the form: "(attr1 == 'foo') and (attr2 == 'bar')".
        Meaning, all primary key columns must appear in an equality predicate
        in the expressions.  Furthermore each 'pure primary key' predicate must
        be unique within a given request.  These restrictions can be removed by
        utilizing some available options through input parameter *options*.
        Parameters:
            table_name (str)
                Table to be updated. Must be a currently existing table and not
                a collection or view.
            expressions (list of str)
                A list of the actual predicates, one for each update; format
                should follow the guidelines :meth:`here <.filter>`.  The user
                can provide a single element (which will be automatically
                promoted to a list internally) or a list.
            new_values_maps (list of dicts of str to str and/or None)
                List of new values for the matching records.  Each element is a
                map with (key, value) pairs where the keys are the names of the
                columns whose values are to be updated; the values are the new
                values.  The number of elements in the list should match the
                length of input parameter *expressions*.  The user can provide
                a single element (which will be automatically promoted to a
                list internally) or a list.
            records_to_insert (list of str)
                An *optional* list of new binary-avro encoded records to
                insert, one for each update.  If one of input parameter
                *expressions* does not yield a matching record to be updated,
                then the corresponding element from this list will be added to
                the table.  The user can provide a single element (which will
                be automatically promoted to a list internally) or a list.
                Default value is an empty list ( [] ).
            records_to_insert_str (list of str)
                An optional list of new json-avro encoded objects to insert,
                one for each update, to be added to the set if the particular
                update did not affect any objects.  The user can provide a
                single element (which will be automatically promoted to a list
                internally) or a list.  Default value is an empty list ( [] ).
            record_encoding (str)
                Identifies which of input parameter *records_to_insert* and
                input parameter *records_to_insert_str* should be used.
                Default value is 'binary'.
                Allowed values are:
                * binary
                * json
                The default value is 'binary'.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
                Allowed keys are:
                * **global_expression** --
                  An optional global expression to reduce the search space of
                  the predicates listed in input parameter *expressions*.
                * **bypass_safety_checks** --
                  When set to 'true', all predicates are available for primary
                  key updates.  Keep in mind that it is possible to destroy
                  data in this case, since a single predicate may match
                  multiple objects (potentially all of records of a table), and
                  then updating all of those records to have the same primary
                  key will, due to the primary key uniqueness constraints,
                  effectively delete all but one of those updated records.
                  Allowed values are:
                  * true
                  * false
                  The default value is 'false'.
                * **update_on_existing_pk** --
                  Can be used to customize behavior when the updated primary
                  key value already exists as described in
                  :meth:`.insert_records`.
                  Allowed values are:
                  * true
                  * false
                  The default value is 'false'.
                * **record_id** --
                  ID of a single record to be updated (returned in the call to
                  :meth:`.insert_records` or
                  :meth:`.get_records_from_collection`).
        Returns:
            A dict with the following entries--
            count_updated (long)
                Total number of records updated.
            counts_updated (list of longs)
                Total number of records updated per predicate in input
                parameter *expressions*.
            count_inserted (long)
                Total number of records inserted (due to expressions not
                matching any existing records).
            counts_inserted (list of longs)
                Total number of records inserted per predicate in input
                parameter *expressions* (will be either 0 or 1 for each
                expression).
        """
        assert isinstance( table_name, (basestring)), "update_records(): Argument 'table_name' must be (one) of type(s) '(basestring)'; given %s" % type( table_name ).__name__
        expressions = expressions if isinstance( expressions, list ) else ( [] if (expressions is None) else [ expressions ] )
        new_values_maps = new_values_maps if isinstance( new_values_maps, list ) else ( [] if (new_values_maps is None) else [ new_values_maps ] )
        records_to_insert = records_to_insert if isinstance( records_to_insert, list ) else ( [] if (records_to_insert is None) else [ records_to_insert ] )
        records_to_insert_str = records_to_insert_str if isinstance( records_to_insert_str, list ) else ( [] if (records_to_insert_str is None) else [ records_to_insert_str ] )
        assert isinstance( record_encoding, (basestring)), "update_records(): Argument 'record_encoding' must be (one) of type(s) '(basestring)'; given %s" % type( record_encoding ).__name__
        assert isinstance( options, (dict)), "update_records(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "update_records" )
        obj = collections.OrderedDict()
        obj['table_name'] = table_name
        obj['expressions'] = expressions
        obj['new_values_maps'] = new_values_maps
        obj['records_to_insert'] = records_to_insert
        obj['records_to_insert_str'] = records_to_insert_str
        obj['record_encoding'] = record_encoding
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/update/records' ) ) 
    # end update_records
    # begin update_records_by_series
[docs]    def update_records_by_series( self, table_name = None, world_table_name = None,
                                  view_name = '', reserved = [], options = {} ):
        """Updates the view specified by input parameter *table_name* to include
        full series (track) information from the input parameter
        *world_table_name* for the series (tracks) present in the input
        parameter *view_name*.
        Parameters:
            table_name (str)
                Name of the view on which the update operation will be
                performed. Must be an existing view.
            world_table_name (str)
                Name of the table containing the complete series (track)
                information.
            view_name (str)
                Optional name of the view containing the series (tracks) which
                have to be updated.  Default value is ''.
            reserved (list of str)
                  The user can provide a single element (which will be
                automatically promoted to a list internally) or a list.
                Default value is an empty list ( [] ).
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
        Returns:
            A dict with the following entries--
            count (int)
        """
        assert isinstance( table_name, (basestring)), "update_records_by_series(): Argument 'table_name' must be (one) of type(s) '(basestring)'; given %s" % type( table_name ).__name__
        assert isinstance( world_table_name, (basestring)), "update_records_by_series(): Argument 'world_table_name' must be (one) of type(s) '(basestring)'; given %s" % type( world_table_name ).__name__
        assert isinstance( view_name, (basestring)), "update_records_by_series(): Argument 'view_name' must be (one) of type(s) '(basestring)'; given %s" % type( view_name ).__name__
        reserved = reserved if isinstance( reserved, list ) else ( [] if (reserved is None) else [ reserved ] )
        assert isinstance( options, (dict)), "update_records_by_series(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "update_records_by_series" )
        obj = collections.OrderedDict()
        obj['table_name'] = table_name
        obj['world_table_name'] = world_table_name
        obj['view_name'] = view_name
        obj['reserved'] = reserved
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/update/records/byseries' ) ) 
    # end update_records_by_series
    # begin visualize_image
    def visualize_image( self, table_names = None, world_table_names = None,
                         x_column_name = None, y_column_name = None,
                         geometry_column_name = None, track_ids = None, min_x =
                         None, max_x = None, min_y = None, max_y = None, width =
                         None, height = None, projection = 'PLATE_CARREE',
                         bg_color = None, style_options = None, options = {} ):
        table_names = table_names if isinstance( table_names, list ) else ( [] if (table_names is None) else [ table_names ] )
        world_table_names = world_table_names if isinstance( world_table_names, list ) else ( [] if (world_table_names is None) else [ world_table_names ] )
        assert isinstance( x_column_name, (basestring)), "visualize_image(): Argument 'x_column_name' must be (one) of type(s) '(basestring)'; given %s" % type( x_column_name ).__name__
        assert isinstance( y_column_name, (basestring)), "visualize_image(): Argument 'y_column_name' must be (one) of type(s) '(basestring)'; given %s" % type( y_column_name ).__name__
        assert isinstance( geometry_column_name, (basestring)), "visualize_image(): Argument 'geometry_column_name' must be (one) of type(s) '(basestring)'; given %s" % type( geometry_column_name ).__name__
        track_ids = track_ids if isinstance( track_ids, list ) else ( [] if (track_ids is None) else [ track_ids ] )
        assert isinstance( min_x, (int, long, float)), "visualize_image(): Argument 'min_x' must be (one) of type(s) '(int, long, float)'; given %s" % type( min_x ).__name__
        assert isinstance( max_x, (int, long, float)), "visualize_image(): Argument 'max_x' must be (one) of type(s) '(int, long, float)'; given %s" % type( max_x ).__name__
        assert isinstance( min_y, (int, long, float)), "visualize_image(): Argument 'min_y' must be (one) of type(s) '(int, long, float)'; given %s" % type( min_y ).__name__
        assert isinstance( max_y, (int, long, float)), "visualize_image(): Argument 'max_y' must be (one) of type(s) '(int, long, float)'; given %s" % type( max_y ).__name__
        assert isinstance( width, (int, long, float)), "visualize_image(): Argument 'width' must be (one) of type(s) '(int, long, float)'; given %s" % type( width ).__name__
        assert isinstance( height, (int, long, float)), "visualize_image(): Argument 'height' must be (one) of type(s) '(int, long, float)'; given %s" % type( height ).__name__
        assert isinstance( projection, (basestring)), "visualize_image(): Argument 'projection' must be (one) of type(s) '(basestring)'; given %s" % type( projection ).__name__
        assert isinstance( bg_color, (int, long, float)), "visualize_image(): Argument 'bg_color' must be (one) of type(s) '(int, long, float)'; given %s" % type( bg_color ).__name__
        assert isinstance( style_options, (dict)), "visualize_image(): Argument 'style_options' must be (one) of type(s) '(dict)'; given %s" % type( style_options ).__name__
        assert isinstance( options, (dict)), "visualize_image(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "visualize_image" )
        obj = collections.OrderedDict()
        obj['table_names'] = table_names
        obj['world_table_names'] = world_table_names
        obj['x_column_name'] = x_column_name
        obj['y_column_name'] = y_column_name
        obj['geometry_column_name'] = geometry_column_name
        obj['track_ids'] = track_ids
        obj['min_x'] = min_x
        obj['max_x'] = max_x
        obj['min_y'] = min_y
        obj['max_y'] = max_y
        obj['width'] = width
        obj['height'] = height
        obj['projection'] = projection
        obj['bg_color'] = bg_color
        obj['style_options'] = self.__sanitize_dicts( style_options )
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/visualize/image' ) )
    # end visualize_image
    # begin visualize_image_chart
[docs]    def visualize_image_chart( self, table_name = None, x_column_name = None,
                               y_column_name = None, min_x = None, max_x = None,
                               min_y = None, max_y = None, width = None, height
                               = None, bg_color = None, style_options = None,
                               options = {} ):
        """Scatter plot is the only plot type currently supported. A non-numeric
        column can be specified as x or y column and jitters can be added to
        them to avoid excessive overlapping. All color values must be in the
        format RRGGBB or AARRGGBB (to specify the alpha value).
        The image is contained in the output parameter *image_data* field.
        Parameters:
            table_name (str)
                Name of the table containing the data to be drawn as a chart.
            x_column_name (str)
                Name of the column containing the data mapped to the x axis of
                a chart.
            y_column_name (str)
                Name of the column containing the data mapped to the y axis of
                a chart.
            min_x (float)
                Lower bound for the x column values. For non-numeric x column,
                each x column item is mapped to an integral value starting from
                0.
            max_x (float)
                Upper bound for the x column values. For non-numeric x column,
                each x column item is mapped to an integral value starting from
                0.
            min_y (float)
                Lower bound for the y column values. For non-numeric y column,
                each y column item is mapped to an integral value starting from
                0.
            max_y (float)
                Upper bound for the y column values. For non-numeric y column,
                each y column item is mapped to an integral value starting from
                0.
            width (int)
                Width of the generated image in pixels.
            height (int)
                Height of the generated image in pixels.
            bg_color (str)
                Background color of the generated image.
            style_options (dict of str to lists of str)
                Rendering style options for a chart.
                Allowed keys are:
                * **pointcolor** --
                  The color of points in the plot represented as a hexadecimal
                  number.
                * **pointsize** --
                  The size of points in the plot represented as number of
                  pixels.
                * **pointshape** --
                  The shape of points in the plot.
                  Allowed values are:
                  * none
                  * circle
                  * square
                  * diamond
                  * hollowcircle
                  * hollowsquare
                  * hollowdiamond
                  The default value is 'square'.
                * **cb_pointcolors** --
                  Point color class break information consisting of three
                  entries: class-break attribute, class-break values/ranges,
                  and point color values. This option overrides the pointcolor
                  option if both are provided. Class-break ranges are
                  represented in the form of "min:max". Class-break
                  values/ranges and point color values are separated by
                  cb_delimiter, e.g. {"price", "20:30;30:40;40:50",
                  "0xFF0000;0x00FF00;0x0000FF"}.
                * **cb_pointsizes** --
                  Point size class break information consisting of three
                  entries: class-break attribute, class-break values/ranges,
                  and point size values. This option overrides the pointsize
                  option if both are provided. Class-break ranges are
                  represented in the form of "min:max". Class-break
                  values/ranges and point size values are separated by
                  cb_delimiter, e.g. {"states", "NY;TX;CA", "3;5;7"}.
                * **cb_pointshapes** --
                  Point shape class break information consisting of three
                  entries: class-break attribute, class-break values/ranges,
                  and point shape names. This option overrides the pointshape
                  option if both are provided. Class-break ranges are
                  represented in the form of "min:max". Class-break
                  values/ranges and point shape names are separated by
                  cb_delimiter, e.g. {"states", "NY;TX;CA",
                  "circle;square;diamond"}.
                * **cb_delimiter** --
                  A character or string which separates per-class values in a
                  class-break style option string.
                * **x_order_by** --
                  An expression or aggregate expression by which non-numeric x
                  column values are sorted, e.g. "avg(price) descending".
                * **y_order_by** --
                  An expression or aggregate expression by which non-numeric y
                  column values are sorted, e.g. "avg(price)", which defaults
                  to "avg(price) ascending".
                * **jitter_x** --
                  Amplitude of horizontal jitter applied to non-numaric x
                  column values.
                * **jitter_y** --
                  Amplitude of vertical jitter applied to non-numaric y column
                  values.
                * **plot_all** --
                  If this options is set to "true", all non-numeric column
                  values are plotted ignoring min_x, max_x, min_y and max_y
                  parameters.
            options (dict of str to str)
                  Optional parameters.  Default value is an empty dict ( {} ).
        Returns:
            A dict with the following entries--
            min_x (float)
                Lower bound for the x column values as provided in input
                parameter *min_x* or calculated for non-numeric columns when
                plot_all option is used.
            max_x (float)
                Upper bound for the x column values as provided in input
                parameter *max_x* or calculated for non-numeric columns when
                plot_all option is used.
            min_y (float)
                Lower bound for the y column values as provided in input
                parameter *min_y* or calculated for non-numeric columns when
                plot_all option is used.
            max_y (float)
                Upper bound for the y column values as provided in input
                parameter *max_y* or calculated for non-numeric columns when
                plot_all option is used.
            width (int)
                Width of the image as provided in input parameter *width*.
            height (int)
                Height of the image as provided in input parameter *height*.
            bg_color (str)
                Background color of the image as provided in input parameter
                *bg_color*.
            image_data (str)
                The generated image data.
            axes_info (dict of str to lists of str)
                Information returned for drawing labels for the axes associated
                with non-numeric columns.
                Allowed keys are:
                * **sorted_x_values** --
                  Sorted non-numeric x column value list for drawing x axis
                  label.
                * **location_x** --
                  X axis label positions of sorted_x_values in pixel
                  coordinates.
                * **sorted_y_values** --
                  Sorted non-numeric y column value list for drawing y axis
                  label.
                * **location_y** --
                  Y axis label positions of sorted_y_values in pixel
                  coordinates.
        """
        assert isinstance( table_name, (basestring)), "visualize_image_chart(): Argument 'table_name' must be (one) of type(s) '(basestring)'; given %s" % type( table_name ).__name__
        assert isinstance( x_column_name, (basestring)), "visualize_image_chart(): Argument 'x_column_name' must be (one) of type(s) '(basestring)'; given %s" % type( x_column_name ).__name__
        assert isinstance( y_column_name, (basestring)), "visualize_image_chart(): Argument 'y_column_name' must be (one) of type(s) '(basestring)'; given %s" % type( y_column_name ).__name__
        assert isinstance( min_x, (int, long, float)), "visualize_image_chart(): Argument 'min_x' must be (one) of type(s) '(int, long, float)'; given %s" % type( min_x ).__name__
        assert isinstance( max_x, (int, long, float)), "visualize_image_chart(): Argument 'max_x' must be (one) of type(s) '(int, long, float)'; given %s" % type( max_x ).__name__
        assert isinstance( min_y, (int, long, float)), "visualize_image_chart(): Argument 'min_y' must be (one) of type(s) '(int, long, float)'; given %s" % type( min_y ).__name__
        assert isinstance( max_y, (int, long, float)), "visualize_image_chart(): Argument 'max_y' must be (one) of type(s) '(int, long, float)'; given %s" % type( max_y ).__name__
        assert isinstance( width, (int, long, float)), "visualize_image_chart(): Argument 'width' must be (one) of type(s) '(int, long, float)'; given %s" % type( width ).__name__
        assert isinstance( height, (int, long, float)), "visualize_image_chart(): Argument 'height' must be (one) of type(s) '(int, long, float)'; given %s" % type( height ).__name__
        assert isinstance( bg_color, (basestring)), "visualize_image_chart(): Argument 'bg_color' must be (one) of type(s) '(basestring)'; given %s" % type( bg_color ).__name__
        assert isinstance( style_options, (dict)), "visualize_image_chart(): Argument 'style_options' must be (one) of type(s) '(dict)'; given %s" % type( style_options ).__name__
        assert isinstance( options, (dict)), "visualize_image_chart(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "visualize_image_chart" )
        obj = collections.OrderedDict()
        obj['table_name'] = table_name
        obj['x_column_name'] = x_column_name
        obj['y_column_name'] = y_column_name
        obj['min_x'] = min_x
        obj['max_x'] = max_x
        obj['min_y'] = min_y
        obj['max_y'] = max_y
        obj['width'] = width
        obj['height'] = height
        obj['bg_color'] = bg_color
        obj['style_options'] = self.__sanitize_dicts( style_options )
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/visualize/image/chart' ) ) 
    # end visualize_image_chart
    # begin visualize_image_classbreak
    def visualize_image_classbreak( self, table_names = None, world_table_names =
                                    None, x_column_name = None, y_column_name =
                                    None, geometry_column_name = None, track_ids
                                    = None, cb_column_name = None, cb_vals =
                                    None, min_x = None, max_x = None, min_y =
                                    None, max_y = None, width = None, height =
                                    None, projection = 'PLATE_CARREE', bg_color
                                    = None, style_options = None, options = {}
                                    ):
        table_names = table_names if isinstance( table_names, list ) else ( [] if (table_names is None) else [ table_names ] )
        world_table_names = world_table_names if isinstance( world_table_names, list ) else ( [] if (world_table_names is None) else [ world_table_names ] )
        assert isinstance( x_column_name, (basestring)), "visualize_image_classbreak(): Argument 'x_column_name' must be (one) of type(s) '(basestring)'; given %s" % type( x_column_name ).__name__
        assert isinstance( y_column_name, (basestring)), "visualize_image_classbreak(): Argument 'y_column_name' must be (one) of type(s) '(basestring)'; given %s" % type( y_column_name ).__name__
        assert isinstance( geometry_column_name, (basestring)), "visualize_image_classbreak(): Argument 'geometry_column_name' must be (one) of type(s) '(basestring)'; given %s" % type( geometry_column_name ).__name__
        track_ids = track_ids if isinstance( track_ids, list ) else ( [] if (track_ids is None) else [ track_ids ] )
        assert isinstance( cb_column_name, (basestring)), "visualize_image_classbreak(): Argument 'cb_column_name' must be (one) of type(s) '(basestring)'; given %s" % type( cb_column_name ).__name__
        cb_vals = cb_vals if isinstance( cb_vals, list ) else ( [] if (cb_vals is None) else [ cb_vals ] )
        assert isinstance( min_x, (int, long, float)), "visualize_image_classbreak(): Argument 'min_x' must be (one) of type(s) '(int, long, float)'; given %s" % type( min_x ).__name__
        assert isinstance( max_x, (int, long, float)), "visualize_image_classbreak(): Argument 'max_x' must be (one) of type(s) '(int, long, float)'; given %s" % type( max_x ).__name__
        assert isinstance( min_y, (int, long, float)), "visualize_image_classbreak(): Argument 'min_y' must be (one) of type(s) '(int, long, float)'; given %s" % type( min_y ).__name__
        assert isinstance( max_y, (int, long, float)), "visualize_image_classbreak(): Argument 'max_y' must be (one) of type(s) '(int, long, float)'; given %s" % type( max_y ).__name__
        assert isinstance( width, (int, long, float)), "visualize_image_classbreak(): Argument 'width' must be (one) of type(s) '(int, long, float)'; given %s" % type( width ).__name__
        assert isinstance( height, (int, long, float)), "visualize_image_classbreak(): Argument 'height' must be (one) of type(s) '(int, long, float)'; given %s" % type( height ).__name__
        assert isinstance( projection, (basestring)), "visualize_image_classbreak(): Argument 'projection' must be (one) of type(s) '(basestring)'; given %s" % type( projection ).__name__
        assert isinstance( bg_color, (int, long, float)), "visualize_image_classbreak(): Argument 'bg_color' must be (one) of type(s) '(int, long, float)'; given %s" % type( bg_color ).__name__
        assert isinstance( style_options, (dict)), "visualize_image_classbreak(): Argument 'style_options' must be (one) of type(s) '(dict)'; given %s" % type( style_options ).__name__
        assert isinstance( options, (dict)), "visualize_image_classbreak(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "visualize_image_classbreak" )
        obj = collections.OrderedDict()
        obj['table_names'] = table_names
        obj['world_table_names'] = world_table_names
        obj['x_column_name'] = x_column_name
        obj['y_column_name'] = y_column_name
        obj['geometry_column_name'] = geometry_column_name
        obj['track_ids'] = track_ids
        obj['cb_column_name'] = cb_column_name
        obj['cb_vals'] = cb_vals
        obj['min_x'] = min_x
        obj['max_x'] = max_x
        obj['min_y'] = min_y
        obj['max_y'] = max_y
        obj['width'] = width
        obj['height'] = height
        obj['projection'] = projection
        obj['bg_color'] = bg_color
        obj['style_options'] = self.__sanitize_dicts( style_options )
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/visualize/image/classbreak' ) )
    # end visualize_image_classbreak
    # begin visualize_image_heatmap
    def visualize_image_heatmap( self, table_names = None, x_column_name = None,
                                 y_column_name = None, value_column_name = None,
                                 geometry_column_name = None, min_x = None,
                                 max_x = None, min_y = None, max_y = None, width
                                 = None, height = None, projection =
                                 'PLATE_CARREE', style_options = None, options =
                                 {} ):
        table_names = table_names if isinstance( table_names, list ) else ( [] if (table_names is None) else [ table_names ] )
        assert isinstance( x_column_name, (basestring)), "visualize_image_heatmap(): Argument 'x_column_name' must be (one) of type(s) '(basestring)'; given %s" % type( x_column_name ).__name__
        assert isinstance( y_column_name, (basestring)), "visualize_image_heatmap(): Argument 'y_column_name' must be (one) of type(s) '(basestring)'; given %s" % type( y_column_name ).__name__
        assert isinstance( value_column_name, (basestring)), "visualize_image_heatmap(): Argument 'value_column_name' must be (one) of type(s) '(basestring)'; given %s" % type( value_column_name ).__name__
        assert isinstance( geometry_column_name, (basestring)), "visualize_image_heatmap(): Argument 'geometry_column_name' must be (one) of type(s) '(basestring)'; given %s" % type( geometry_column_name ).__name__
        assert isinstance( min_x, (int, long, float)), "visualize_image_heatmap(): Argument 'min_x' must be (one) of type(s) '(int, long, float)'; given %s" % type( min_x ).__name__
        assert isinstance( max_x, (int, long, float)), "visualize_image_heatmap(): Argument 'max_x' must be (one) of type(s) '(int, long, float)'; given %s" % type( max_x ).__name__
        assert isinstance( min_y, (int, long, float)), "visualize_image_heatmap(): Argument 'min_y' must be (one) of type(s) '(int, long, float)'; given %s" % type( min_y ).__name__
        assert isinstance( max_y, (int, long, float)), "visualize_image_heatmap(): Argument 'max_y' must be (one) of type(s) '(int, long, float)'; given %s" % type( max_y ).__name__
        assert isinstance( width, (int, long, float)), "visualize_image_heatmap(): Argument 'width' must be (one) of type(s) '(int, long, float)'; given %s" % type( width ).__name__
        assert isinstance( height, (int, long, float)), "visualize_image_heatmap(): Argument 'height' must be (one) of type(s) '(int, long, float)'; given %s" % type( height ).__name__
        assert isinstance( projection, (basestring)), "visualize_image_heatmap(): Argument 'projection' must be (one) of type(s) '(basestring)'; given %s" % type( projection ).__name__
        assert isinstance( style_options, (dict)), "visualize_image_heatmap(): Argument 'style_options' must be (one) of type(s) '(dict)'; given %s" % type( style_options ).__name__
        assert isinstance( options, (dict)), "visualize_image_heatmap(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "visualize_image_heatmap" )
        obj = collections.OrderedDict()
        obj['table_names'] = table_names
        obj['x_column_name'] = x_column_name
        obj['y_column_name'] = y_column_name
        obj['value_column_name'] = value_column_name
        obj['geometry_column_name'] = geometry_column_name
        obj['min_x'] = min_x
        obj['max_x'] = max_x
        obj['min_y'] = min_y
        obj['max_y'] = max_y
        obj['width'] = width
        obj['height'] = height
        obj['projection'] = projection
        obj['style_options'] = self.__sanitize_dicts( style_options )
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/visualize/image/heatmap' ) )
    # end visualize_image_heatmap
    # begin visualize_image_labels
    def visualize_image_labels( self, table_name = None, x_column_name = None,
                                y_column_name = None, x_offset = '', y_offset =
                                '', text_string = None, font = '', text_color =
                                '', text_angle = '', text_scale = '', draw_box =
                                '', draw_leader = '', line_width = '',
                                line_color = '', fill_color = '',
                                leader_x_column_name = '', leader_y_column_name
                                = '', filter = '', min_x = None, max_x = None,
                                min_y = None, max_y = None, width = None, height
                                = None, projection = 'PLATE_CARREE', options =
                                {} ):
        assert isinstance( table_name, (basestring)), "visualize_image_labels(): Argument 'table_name' must be (one) of type(s) '(basestring)'; given %s" % type( table_name ).__name__
        assert isinstance( x_column_name, (basestring)), "visualize_image_labels(): Argument 'x_column_name' must be (one) of type(s) '(basestring)'; given %s" % type( x_column_name ).__name__
        assert isinstance( y_column_name, (basestring)), "visualize_image_labels(): Argument 'y_column_name' must be (one) of type(s) '(basestring)'; given %s" % type( y_column_name ).__name__
        assert isinstance( x_offset, (basestring)), "visualize_image_labels(): Argument 'x_offset' must be (one) of type(s) '(basestring)'; given %s" % type( x_offset ).__name__
        assert isinstance( y_offset, (basestring)), "visualize_image_labels(): Argument 'y_offset' must be (one) of type(s) '(basestring)'; given %s" % type( y_offset ).__name__
        assert isinstance( text_string, (basestring)), "visualize_image_labels(): Argument 'text_string' must be (one) of type(s) '(basestring)'; given %s" % type( text_string ).__name__
        assert isinstance( font, (basestring)), "visualize_image_labels(): Argument 'font' must be (one) of type(s) '(basestring)'; given %s" % type( font ).__name__
        assert isinstance( text_color, (basestring)), "visualize_image_labels(): Argument 'text_color' must be (one) of type(s) '(basestring)'; given %s" % type( text_color ).__name__
        assert isinstance( text_angle, (basestring)), "visualize_image_labels(): Argument 'text_angle' must be (one) of type(s) '(basestring)'; given %s" % type( text_angle ).__name__
        assert isinstance( text_scale, (basestring)), "visualize_image_labels(): Argument 'text_scale' must be (one) of type(s) '(basestring)'; given %s" % type( text_scale ).__name__
        assert isinstance( draw_box, (basestring)), "visualize_image_labels(): Argument 'draw_box' must be (one) of type(s) '(basestring)'; given %s" % type( draw_box ).__name__
        assert isinstance( draw_leader, (basestring)), "visualize_image_labels(): Argument 'draw_leader' must be (one) of type(s) '(basestring)'; given %s" % type( draw_leader ).__name__
        assert isinstance( line_width, (basestring)), "visualize_image_labels(): Argument 'line_width' must be (one) of type(s) '(basestring)'; given %s" % type( line_width ).__name__
        assert isinstance( line_color, (basestring)), "visualize_image_labels(): Argument 'line_color' must be (one) of type(s) '(basestring)'; given %s" % type( line_color ).__name__
        assert isinstance( fill_color, (basestring)), "visualize_image_labels(): Argument 'fill_color' must be (one) of type(s) '(basestring)'; given %s" % type( fill_color ).__name__
        assert isinstance( leader_x_column_name, (basestring)), "visualize_image_labels(): Argument 'leader_x_column_name' must be (one) of type(s) '(basestring)'; given %s" % type( leader_x_column_name ).__name__
        assert isinstance( leader_y_column_name, (basestring)), "visualize_image_labels(): Argument 'leader_y_column_name' must be (one) of type(s) '(basestring)'; given %s" % type( leader_y_column_name ).__name__
        assert isinstance( filter, (basestring)), "visualize_image_labels(): Argument 'filter' must be (one) of type(s) '(basestring)'; given %s" % type( filter ).__name__
        assert isinstance( min_x, (int, long, float)), "visualize_image_labels(): Argument 'min_x' must be (one) of type(s) '(int, long, float)'; given %s" % type( min_x ).__name__
        assert isinstance( max_x, (int, long, float)), "visualize_image_labels(): Argument 'max_x' must be (one) of type(s) '(int, long, float)'; given %s" % type( max_x ).__name__
        assert isinstance( min_y, (int, long, float)), "visualize_image_labels(): Argument 'min_y' must be (one) of type(s) '(int, long, float)'; given %s" % type( min_y ).__name__
        assert isinstance( max_y, (int, long, float)), "visualize_image_labels(): Argument 'max_y' must be (one) of type(s) '(int, long, float)'; given %s" % type( max_y ).__name__
        assert isinstance( width, (int, long, float)), "visualize_image_labels(): Argument 'width' must be (one) of type(s) '(int, long, float)'; given %s" % type( width ).__name__
        assert isinstance( height, (int, long, float)), "visualize_image_labels(): Argument 'height' must be (one) of type(s) '(int, long, float)'; given %s" % type( height ).__name__
        assert isinstance( projection, (basestring)), "visualize_image_labels(): Argument 'projection' must be (one) of type(s) '(basestring)'; given %s" % type( projection ).__name__
        assert isinstance( options, (dict)), "visualize_image_labels(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "visualize_image_labels" )
        obj = collections.OrderedDict()
        obj['table_name'] = table_name
        obj['x_column_name'] = x_column_name
        obj['y_column_name'] = y_column_name
        obj['x_offset'] = x_offset
        obj['y_offset'] = y_offset
        obj['text_string'] = text_string
        obj['font'] = font
        obj['text_color'] = text_color
        obj['text_angle'] = text_angle
        obj['text_scale'] = text_scale
        obj['draw_box'] = draw_box
        obj['draw_leader'] = draw_leader
        obj['line_width'] = line_width
        obj['line_color'] = line_color
        obj['fill_color'] = fill_color
        obj['leader_x_column_name'] = leader_x_column_name
        obj['leader_y_column_name'] = leader_y_column_name
        obj['filter'] = filter
        obj['min_x'] = min_x
        obj['max_x'] = max_x
        obj['min_y'] = min_y
        obj['max_y'] = max_y
        obj['width'] = width
        obj['height'] = height
        obj['projection'] = projection
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/visualize/image/labels' ) )
    # end visualize_image_labels
    # begin visualize_video
    def visualize_video( self, table_names = None, world_table_names = None,
                         track_ids = None, x_column_name = None, y_column_name =
                         None, geometry_column_name = None, min_x = None, max_x
                         = None, min_y = None, max_y = None, width = None,
                         height = None, projection = 'PLATE_CARREE', bg_color =
                         None, time_intervals = None, video_style = None,
                         session_key = None, style_options = None, options = {}
                         ):
        table_names = table_names if isinstance( table_names, list ) else ( [] if (table_names is None) else [ table_names ] )
        world_table_names = world_table_names if isinstance( world_table_names, list ) else ( [] if (world_table_names is None) else [ world_table_names ] )
        track_ids = track_ids if isinstance( track_ids, list ) else ( [] if (track_ids is None) else [ track_ids ] )
        assert isinstance( x_column_name, (basestring)), "visualize_video(): Argument 'x_column_name' must be (one) of type(s) '(basestring)'; given %s" % type( x_column_name ).__name__
        assert isinstance( y_column_name, (basestring)), "visualize_video(): Argument 'y_column_name' must be (one) of type(s) '(basestring)'; given %s" % type( y_column_name ).__name__
        assert isinstance( geometry_column_name, (basestring)), "visualize_video(): Argument 'geometry_column_name' must be (one) of type(s) '(basestring)'; given %s" % type( geometry_column_name ).__name__
        assert isinstance( min_x, (int, long, float)), "visualize_video(): Argument 'min_x' must be (one) of type(s) '(int, long, float)'; given %s" % type( min_x ).__name__
        assert isinstance( max_x, (int, long, float)), "visualize_video(): Argument 'max_x' must be (one) of type(s) '(int, long, float)'; given %s" % type( max_x ).__name__
        assert isinstance( min_y, (int, long, float)), "visualize_video(): Argument 'min_y' must be (one) of type(s) '(int, long, float)'; given %s" % type( min_y ).__name__
        assert isinstance( max_y, (int, long, float)), "visualize_video(): Argument 'max_y' must be (one) of type(s) '(int, long, float)'; given %s" % type( max_y ).__name__
        assert isinstance( width, (int, long, float)), "visualize_video(): Argument 'width' must be (one) of type(s) '(int, long, float)'; given %s" % type( width ).__name__
        assert isinstance( height, (int, long, float)), "visualize_video(): Argument 'height' must be (one) of type(s) '(int, long, float)'; given %s" % type( height ).__name__
        assert isinstance( projection, (basestring)), "visualize_video(): Argument 'projection' must be (one) of type(s) '(basestring)'; given %s" % type( projection ).__name__
        assert isinstance( bg_color, (int, long, float)), "visualize_video(): Argument 'bg_color' must be (one) of type(s) '(int, long, float)'; given %s" % type( bg_color ).__name__
        time_intervals = time_intervals if isinstance( time_intervals, list ) else ( [] if (time_intervals is None) else [ time_intervals ] )
        assert isinstance( video_style, (basestring)), "visualize_video(): Argument 'video_style' must be (one) of type(s) '(basestring)'; given %s" % type( video_style ).__name__
        assert isinstance( session_key, (basestring)), "visualize_video(): Argument 'session_key' must be (one) of type(s) '(basestring)'; given %s" % type( session_key ).__name__
        assert isinstance( style_options, (dict)), "visualize_video(): Argument 'style_options' must be (one) of type(s) '(dict)'; given %s" % type( style_options ).__name__
        assert isinstance( options, (dict)), "visualize_video(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "visualize_video" )
        obj = collections.OrderedDict()
        obj['table_names'] = table_names
        obj['world_table_names'] = world_table_names
        obj['track_ids'] = track_ids
        obj['x_column_name'] = x_column_name
        obj['y_column_name'] = y_column_name
        obj['geometry_column_name'] = geometry_column_name
        obj['min_x'] = min_x
        obj['max_x'] = max_x
        obj['min_y'] = min_y
        obj['max_y'] = max_y
        obj['width'] = width
        obj['height'] = height
        obj['projection'] = projection
        obj['bg_color'] = bg_color
        obj['time_intervals'] = time_intervals
        obj['video_style'] = video_style
        obj['session_key'] = session_key
        obj['style_options'] = self.__sanitize_dicts( style_options )
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/visualize/video' ) )
    # end visualize_video
    # begin visualize_video_heatmap
    def visualize_video_heatmap( self, table_names = None, x_column_name = None,
                                 y_column_name = None, min_x = None, max_x =
                                 None, min_y = None, max_y = None,
                                 time_intervals = None, width = None, height =
                                 None, projection = 'PLATE_CARREE', video_style
                                 = None, session_key = None, style_options =
                                 None, options = {} ):
        table_names = table_names if isinstance( table_names, list ) else ( [] if (table_names is None) else [ table_names ] )
        assert isinstance( x_column_name, (basestring)), "visualize_video_heatmap(): Argument 'x_column_name' must be (one) of type(s) '(basestring)'; given %s" % type( x_column_name ).__name__
        assert isinstance( y_column_name, (basestring)), "visualize_video_heatmap(): Argument 'y_column_name' must be (one) of type(s) '(basestring)'; given %s" % type( y_column_name ).__name__
        assert isinstance( min_x, (int, long, float)), "visualize_video_heatmap(): Argument 'min_x' must be (one) of type(s) '(int, long, float)'; given %s" % type( min_x ).__name__
        assert isinstance( max_x, (int, long, float)), "visualize_video_heatmap(): Argument 'max_x' must be (one) of type(s) '(int, long, float)'; given %s" % type( max_x ).__name__
        assert isinstance( min_y, (int, long, float)), "visualize_video_heatmap(): Argument 'min_y' must be (one) of type(s) '(int, long, float)'; given %s" % type( min_y ).__name__
        assert isinstance( max_y, (int, long, float)), "visualize_video_heatmap(): Argument 'max_y' must be (one) of type(s) '(int, long, float)'; given %s" % type( max_y ).__name__
        time_intervals = time_intervals if isinstance( time_intervals, list ) else ( [] if (time_intervals is None) else [ time_intervals ] )
        assert isinstance( width, (int, long, float)), "visualize_video_heatmap(): Argument 'width' must be (one) of type(s) '(int, long, float)'; given %s" % type( width ).__name__
        assert isinstance( height, (int, long, float)), "visualize_video_heatmap(): Argument 'height' must be (one) of type(s) '(int, long, float)'; given %s" % type( height ).__name__
        assert isinstance( projection, (basestring)), "visualize_video_heatmap(): Argument 'projection' must be (one) of type(s) '(basestring)'; given %s" % type( projection ).__name__
        assert isinstance( video_style, (basestring)), "visualize_video_heatmap(): Argument 'video_style' must be (one) of type(s) '(basestring)'; given %s" % type( video_style ).__name__
        assert isinstance( session_key, (basestring)), "visualize_video_heatmap(): Argument 'session_key' must be (one) of type(s) '(basestring)'; given %s" % type( session_key ).__name__
        assert isinstance( style_options, (dict)), "visualize_video_heatmap(): Argument 'style_options' must be (one) of type(s) '(dict)'; given %s" % type( style_options ).__name__
        assert isinstance( options, (dict)), "visualize_video_heatmap(): Argument 'options' must be (one) of type(s) '(dict)'; given %s" % type( options ).__name__
        (REQ_SCHEMA, REP_SCHEMA) = self.__get_schemas( "visualize_video_heatmap" )
        obj = collections.OrderedDict()
        obj['table_names'] = table_names
        obj['x_column_name'] = x_column_name
        obj['y_column_name'] = y_column_name
        obj['min_x'] = min_x
        obj['max_x'] = max_x
        obj['min_y'] = min_y
        obj['max_y'] = max_y
        obj['time_intervals'] = time_intervals
        obj['width'] = width
        obj['height'] = height
        obj['projection'] = projection
        obj['video_style'] = video_style
        obj['session_key'] = session_key
        obj['style_options'] = self.__sanitize_dicts( style_options )
        obj['options'] = self.__sanitize_dicts( options )
        return AttrDict( self.__post_then_get( REQ_SCHEMA, REP_SCHEMA, obj, '/visualize/video/heatmap' ) ) 
    # end visualize_video_heatmap
    # -----------------------------------------------------------------------
    # End autogenerated functions
    # -----------------------------------------------------------------------
# end class GPUdb
# ---------------------------------------------------------------------------
# Import GPUdbIngestor; try from an installed package first, if not, try local
if sys.version_info[0] >= 3: # checking the major component
    try:
        from gpudb import GPUdbIngestor
    except:
        if not gpudb_module_path in sys.path :
            sys.path.insert(1, gpudb_module_path)
        from gpudb_ingestor import GPUdbIngestor
else:
    try:
        from gpudb import GPUdbIngestor
    except:
        if not gpudb_module_path in sys.path :
            sys.path.insert(1, gpudb_module_path)
        from gpudb_ingestor import GPUdbIngestor
# done importing GPUdbIngestor
# ---------------------------------------------------------------------------
# GPUdbTable - Class to Handle GPUdb Tables
# ---------------------------------------------------------------------------
[docs]class GPUdbTable( object ):
    @staticmethod
[docs]    def random_name():
        """Returns a randomly generated uuid-based name"""
        return str(uuid.uuid1()) 
    # end random_name
    @staticmethod
[docs]    def prefix_name( val ):
        """Returns a random name with the specified prefix"""
        return val + GPUdbTable.random_name() 
    # end prefix_name
    def __init__( self, _type, name = None, options = None, db = None,
                  read_only_table_count = None,
                  delete_temporary_views = True,
                  temporary_view_names = None,
                  create_views = True,
                  use_multihead_ingest = False,
                  multihead_ingest_batch_size = 10000,
                  flush_multi_head_ingest_per_insertion = False ):
        """
        Parameters:
            _type (GPUdbRecordType or list of lists of str)
                Either a :class:`.GPUdbRecordType` object which represents
                a type for the table, or a nested list of lists, where each
                internal list has the format of:
                ::
                    # Just the name and type
                    [ "name", "type (double, int etc.)" ]
                    # Name, type, and one column property
                    [ "name", "type (double, int etc.)", "nullable" ]
                    # Name, type, and multiple column properties
                    [ "name", "string", "char4", "nullable" ]
                Pass None for collections.  If creating a GPUdbTable
                object for a pre-existing table, then also pass None.
                If no table with the given name exists, then the given type
                will be created in GPUdb before creating the table.
            name (str)
                The name for the table.  if none provided, then a random
                name will be generated using :meth:`.random_name`.
            options (GPUdbTableOptions or dict)
                A :class:`.GPUdbTableOptions` object or a dict containing
                options for the table creation.
            db (GPUdb)
                A :class:`.GPUdb` object that allows the user to connect to
                the GPUdb server.
            read_only_table_count (int)
                For known read-only tables, provide the number of records
                in it. Integer.  Must provide the name of the table.
            delete_temporary_views (bool)
                If true, then in terminal queries--queries that can not be
                chained--delete the temporary views upon completion. Defaults
                to True.
            create_views (bool)
                Indicates whether or not to create views for this table.
            temporary_view_names (list)
                Optional list of temporary view names (that ought
                to be deleted upon terminal queries)
            use_multihead_ingest (bool)
                Indicates whether or not to use multi-head ingestion, if
                available upon insertion.  Note that multi-head ingestion
                is more computation intensive for sharded tables, and it
                it probably advisable only if there is a heavy ingestion
                load.  Choose carefully.
            multihead_ingest_batch_size (int)
                Used only in conjunction with *use_multihead_ingest*;
                ignored otherwise.  Sets the batch size to be used for the
                ingestor.  Must be greater than zero.  Default is 10,000.
                The multi-head ingestor flushes the inserted records every
                *multihead_ingest_batch_size* automatically, unless
                *flush_multi_head_ingest_automatically* is False.  Any
                remaining records would have to be manually flushed using
                :meth:`.flush_data_to_server` by the user, or will be
                automatically flushed per :meth:`.insert_records` if
                *flush_multi_head_ingest_automatically* is True.
            flush_multi_head_ingest_per_insertion (bool)
                Used only in conjunction with *use_multihead_ingest*;
                ignored otherwise.  If True, flushes the multi-head ingestor in
                every :meth:`.insert_records` call.  Otherwise, the multi-head
                ingestor flushes the data to the server when a worker queue
                reaches *multihead_ingest_batch_size* in size, and any
                remaining records will have to be manually flushed using
                :meth:`.flush_data_to_server`. Default True.
        Returns:
            A GPUdbTable object.
        """
        # The given DB handle must be a GPUdb instance
        if not isinstance( db, GPUdb ):
            raise GPUdbException( "Argument 'db' must be a GPUdb object; "
                                  "given %s" % type(db) )
        self.db = db
        # Save the options (maybe need to convert to a dict)
        if options:
            if isinstance( options, GPUdbTableOptions ):
                self.options = options
            elif isinstance( options, dict ):
                self.options = GPUdbTableOptions( options )
            else:
                raise GPUdbException( "Argument 'options' must be either a dict "
                                      "or a GPUdbTableOptions object; given '%s'"
                                      % type( options ) )
        else:
            self.options = GPUdbTableOptions()
        # Save the type (create it if necessary)
        self._type = _type
        if isinstance( _type, GPUdbRecordType):
            self.record_type = _type
        elif not _type:
            self.record_type = None
        else:
            self.record_type = GPUdbRecordType( _type )
        # Save passed-in arguments
        self._delete_temporary_views = delete_temporary_views
        self.create_views = create_views
        # Create and update the set of temporary table names
        self._temporary_view_names = set()
        if temporary_view_names:
            self._temporary_view_names.update( temporary_view_names )
        # The table is known to be read only
        if read_only_table_count is not None: # Integer value 0 accepted
            if not name: # name must be given!
                raise GPUdbException( "Table name must be provided with 'read_only_table_count'." )
            if not isinstance( read_only_table_count, (int, long) ):
                raise GPUdbException( "Argument 'read_only_table_count' must be an integer." )
            if (read_only_table_count < 0):
                raise GPUdbException( "Argument 'read_only_table_count' must be greater than "
                                      "or equal to zero; given %d" % read_only_table_count )
            # All checks pass; save the name and count
            self.name          = name
            self._count        = read_only_table_count
            self._is_read_only = True
            return # Nothing more to do
        # end if
        # NOT a known read-only table; need to either get info on it or create it
        # -----------------------------------------------------------------------
        # Create a random table name if none is given
        self.name = name if name else GPUdbTable.random_name()
        # Some default values (assuming it is not a read-only table)
        self._count = None
        self._is_read_only = False
        # Do different things based on whether the table already exists
        if self.db.has_table( self.name )["table_exists"]:
            # Check that the given type agrees with the existing table's type, if any given
            show_table_rsp = self.db.show_table( self.name, options = {"get_sizes": "true"} )
            if not _Util.is_ok( show_table_rsp ): # problem creating the table
                raise GPUdbException( "Problem creating the table: " + _Util.get_error_msg( show_table_rsp ) )
            if (len( show_table_rsp["type_schemas"] ) > 0): # not a collection
                table_type = GPUdbRecordType( None, "", show_table_rsp["type_schemas"][0],
                                              show_table_rsp["properties"][0] )
            else:
                table_type = None
            if ( self.record_type and not table_type ):
                # TODO: Decide if we should have this check or silently ignore the given type
                raise GPUdbException( "Table '%s' is an existing collection; so cannot be of the "
                                      "given type." % self.name )
            if ( self.record_type and (self.record_type != table_type) ):
                raise GPUdbException( "Table '%s' exists; existing table's type does "
                                      "not match the given type." % self.name )
            self.record_type = table_type
            # Check if the table is read-only or not
            if show_table_rsp[ C._table_descriptions ] in [ C._view, C._join, C._result_table ]:
                self._is_read_only = True
                self._count = show_table_rsp[ C._total_full_size ]
        else: # table does not already exist in GPUdb
            # Create the table (and the type)
            if self.options._is_collection: # Create a collection
                rsp_obj = self.db.create_table( self.name, "",
                                                self.options.as_dict() )
            elif self.record_type: # create a regular table
                self.record_type.create_type( self.db )
                rsp_obj = self.db.create_table( self.name, self.record_type.type_id,
                                                self.options.as_dict() )
            else: # Need to create a table-hence the type-but none given
                raise GPUdbException( "Must provide a type to create a new table; none given." )
            if not _Util.is_ok( rsp_obj ): # problem creating the table
                raise GPUdbException( _Util.get_error_msg( rsp_obj ) )
        # end if-else
        # Set up multi-head ingestion, if needed
        self._multihead_ingestor = None
        if not isinstance( use_multihead_ingest, bool ):
            raise GPUdbException( "Argument 'use_multihead_ingest' must be "
                                  "a bool; given '%s'"
                                  % str( type( use_multihead_ingest ) ) )
        if use_multihead_ingest:
            # Check multihead_ingest_batch_size
            if ( not isinstance( multihead_ingest_batch_size, (int, long) )
                 or (multihead_ingest_batch_size < 1) ):
                raise GPUdbException( "Argument 'multihead_ingest_batch_size' "
                                      "must be an integer greater than zero; "
                                      "given: " + multihead_ingest_batch_size )
            self._multihead_ingestor = GPUdbIngestor( self.db, self.name,
                                                      self.record_type,
                                                      multihead_ingest_batch_size )
            # Save the per-insertion-call flushing setting
            self._flush_multi_head_ingest_per_insertion = flush_multi_head_ingest_per_insertion
            # Set the function used by multihead ingestor for encoding records
            self._record_encoding_function = lambda vals: GPUdbRecord( self.record_type, vals )
        else: # no multi-head ingestion
            # Set the function used by the regular insertion for encoding records
            self._record_encoding_function = lambda vals: self.__encode_data_for_insertion( vals )
        # end if
    # end __init__
    def __str__( self ):
        return self.name
    # end __str__
    def __len__( self ):
        """Return the current size of the table.  If it is a read-only table,
        then return the cached count; if not a read-only table, get the current
        size from GPUdb.
        """
        if self._is_read_only:
            return self._count
        
        # Not a read-only table; get the current size
        show_table_rsp = self.db.show_table( self.name, options = {"get_sizes": "true"} )
        if not _Util.is_ok( show_table_rsp ):
            return 0
        return show_table_rsp[ C._total_full_size ]
    # end __len__
[docs]    def size( self ):
        """Return the table's size/length/count.
        """
        return self.__len__() 
    # end size
    def __getitem__( self, key ):
        """Implement indexing and slicing for the table.
        """
        # A single integer--get a single record
        if isinstance( key, (int, long) ):
            if (key < 0):
                raise TypeError( "GPUdbTable does not support negative indexing" )
            return self.get_records( key, 1 )
        # end if
        # Handle slicing
        if isinstance( key, slice ):
            if key.step and (key.step != 1):
                raise TypeError( "GPUdbTable does not support slicing with steps" )
            if not isinstance(key.start, (int, long)) or not isinstance(key.stop, (int, long)):
                raise TypeError( "GPUdbTable slicing requires integers" )
            if (key.start < 0):
                raise TypeError( "GPUdbTable does not support negative indexing" )
            if ( (key.stop < 0) and (key.stop != self.db.END_OF_SET) ):
                raise TypeError( "GPUdbTable does not support negative indexing" )
            if ( (key.stop <= key.start) and (key.stop != self.db.END_OF_SET) ):
                raise IndexError( "GPUdbTable slice start index must be greater than the stop index" )
            limit = key.stop if (key.stop == self.db.END_OF_SET) \
                    
else (key.stop - key.start)
            return self.get_records( key.start, limit )
        # end if
        raise TypeError( "GPUdbTable indexing/slicing requires integers" )
    # end __getitem__
    def __iter__( self ):
        """Return a table iterator for this table.  Defaults to the first
        10,000 records in the table.  If needing to access more records,
        please use the GPUdbTableIterator class directly.
        """
        return GPUdbTableIterator( self )
    # end __iter__
    def __process_view_name(self, view_name ):
        """Given a view name, process it as needed.
        Returns:
            The processed view name
        """
        # If no view name is given but views ought to be created, get a random name
        if not view_name:
            if self.create_views: # will create a view
                view_name = GPUdbTable.random_name()
            else: # won't create views
                view_name = ""
        # end if
        return view_name
    # end __process_view_name
    @property
    def table_name( self ):
        return self.name
    # end table_name
    @property
    def is_read_only( self ): # read-only attribute is_read_only
        """Is the table read-only, or can we modify it?
        """
        return self._is_read_only
    # end is_read_only
    @property
    def count( self ):  # read-only property count
        """Return the table's size/length/count.
        """
        return self.__len__()
    # end count
[docs]    def get_table_type( self ):
        """Return the table's (record) type."""
        return self.record_type 
    # end get_table_type
[docs]    def alias( self, alias ):
        """Create an alias string for this table.
        Parameters:
            alias (str)
                A string that contains the alias.
        Returns:
            A string with the format "this-table-name as alias".
        """
        if not isinstance( alias, (str, unicode) ):
           raise GPUdbException( "'alias' must be a string; given {0}"
                                 "".format( str( type( alias ) ) ) )
        return "{0} as {1}".format( self.name, alias ) 
    # end alias
    
[docs]    def create_view( self, view_name, count = None ):
        """Given a view name and a related response, create a new GPUdbTable object
        which is a read-only table with the intermediate tables automatically
        updated.
        Returns:
            A :class:`.GPUdbTable` object
        """
        # If the current table is read-only, add it to the list of intermediate
        # temporary table names
        if self.is_read_only:
            self._temporary_view_names.update( [ self.name ] )
        view = GPUdbTable( None, name = view_name,
                           read_only_table_count = count,
                           db = self.db,
                           temporary_view_names = self._temporary_view_names )
        return view 
    # end create_view
[docs]    def cleanup( self ):
        """Clear/drop all intermediate tables if settings allow it.
        Returns:
            self for enabling chaining method invocations.
        """
        # Clear/drop all temporary tables
        if self._delete_temporary_views:
            for view in list(self._temporary_view_names): # iterate over a copy
                self.db.clear_table( table_name = view )
                self._temporary_view_names.remove( view )
        else: # We're not allowed to delete intermediate tables!
            raise GPUdbException( "Not allowed to delete intermediate "
                                  "tables." )
        return self 
    # end cleanup
[docs]    def exists( self, options = {} ):
        """Checks for the existence of a table with the given name.
        Returns:
            A boolean flag indicating whether the table currently
            exists in the database.
        """
        response = self.db.has_table( self.name, options = options )
        if not _Util.is_ok( response ):
            raise GPUdbException( _Util.get_error_msg( response ) )
        return response[ "table_exists" ] 
    # end exists
[docs]    def flush_data_to_server( self ):
        """If multi-head ingestion is enabled, then flush all records
        in the ingestors' worker queues so that they actually get
        inserted to the server database.
        """
        if self._multihead_ingestor:
            self._multihead_ingestor.flush() 
    # end flush_data_to_server
    def __encode_data_for_insertion( self, values ):
        """Encode the given values with the database client's encoding
        and return the encoded data.
        """
        encoding = self.db._GPUdb__client_to_object_encoding()
        if encoding is "binary":
            encoded_record = GPUdbRecord( self.record_type, values ).binary_data
        else: # JSON encoding
            encoded_record = GPUdbRecord( self.record_type, values ).json_data_string
        return encoded_record
    # end __encode_data_for_insertion
[docs]    def insert_records( self, *args, **kwargs ):
        """Insert one or more records.
        Parameters:
            args
                Values for all columns of a single record or multiple records.
                For a single record, use either of the following syntaxes:
                ::
                    insert_records( 1, 2, 3 )
                    insert_records( [1, 2, 3] )
                For multiple records, use either of the following syntaxes:
                ::
                    insert_records( [ [1, 2, 3], [4, 5, 6] ] )
                    insert_records(   [1, 2, 3], [4, 5, 6]   )
                Also, the user can use keyword arguments to pass in values:
                ::
                    # For a record type with two integers named 'a' and 'b':
                    insert_records( {"a":  1, "b":  1},
                                    {"a": 42, "b": 32} )
                Additionally, the user may provide options for the insertion
                operation.  For example:
                ::
                    insert_records( [1, 2, 3], [4, 5, 6],
                                    options = {"return_record_ids": "true"} )
            kwargs
                Values for all columns for a single record.  Mutually
                exclusive with args (i.e. cannot provide both) when it
                only contains data.
                May contain an 'options' keyword arg which will be passed
                to the database for the insertion operation.
        Returns:
            A :class:`.GPUdbTable` object with the the insert_records()
            response fields converted to attributes and stored within.
        """
        # Extract any options that the user may have provided
        options = kwargs.get( "options", None )
        if options is not None: # if given, remove from kwargs
            kwargs.pop( "options" )
        else: # no option given; use an empty dict
            options = {}
        encoded_data = []
        # Process the input--single record or multiple records (or invalid syntax)?
        if args and kwargs:
            # Cannot give both args and kwargs
            raise GPUdbException( "Cannot specify both args and kwargs: either provide "
                                  "the column values for a single record "
                                  "in 'kwargs', or provide column values for any number "
                                  "of records in 'args'." )
        if kwargs:
            # Gave the column values for a single record in kwargs
            encoded_record = self._record_encoding_function( kwargs )
            encoded_data.append( encoded_record )
        elif not any( _Util.is_list_or_dict( i ) for i in args):
            # Column values not within a single list/dict: so it is a single record
            encoded_record = self._record_encoding_function( list(args) )
            encoded_data.append( encoded_record )
        elif not all( _Util.is_list_or_dict( i ) for i in args):
            # Some values are lists or dicts, but not all--this is an error case
            raise GPUdbException( "Arguments must be either contain no list, or contain only "
                                  "lists or dicts; i.e. it must not be a mix; "
                                  "given {0}".format( args ) )
        elif (len( args ) == 1):
            # A list/dict of length one given
            if any( isinstance(i, list) for i in args[0]):
                # At least one element within the list is also a list
                if not all( _Util.is_list_or_dict( i ) for i in args[0]):
                    # But not all elements are lists/dict; this is an error case
                    raise GPUdbException( "Arguments must be either a single list, multiple lists, "
                                          "a list of lists, or contain no lists; i.e. it must not be "
                                          "a mix of lists and non-lists; given a list with mixed "
                                          "elements: {0}".format( args ) )
                else:
                    # A list of lists/dicts--multiple records within a list
                    for col_vals in args[0]:
                        encoded_record = self._record_encoding_function( col_vals )
                        encoded_data.append( encoded_record )
                    # end for
                # end inner-most if-else
            else:
                # A single list--a single record
                encoded_record = self._record_encoding_function( *args )
                encoded_data.append( encoded_record )
            # end 2nd inner if-else
        else:
            # All arguments are either lists or dicts, so multiple records given
            for col_vals in args:
                encoded_record = self._record_encoding_function( col_vals )
                encoded_data.append( encoded_record )
            # end for
        # end if-else
        if not encoded_data: # No data given
            raise GPUdbException( "Must provide data for at least a single record; none given." )
        # Make the insertion call-- either with the multi-head ingestor or the regular way
        if self._multihead_ingestor:
            # Set the multi-head ingestor's options
            self._multihead_ingestor.options = options
            try:
                # Call the insertion funciton
                response = self._multihead_ingestor.insert_records( encoded_data )
                # Need to flush the records, per the setting
                if self._flush_multi_head_ingest_per_insertion:
                    self._multihead_ingestor.flush()
            except Exception as e:
                raise GPUdbException( str(e) )
        else:
            # Call the insert function and check the status
            response = self.db.insert_records( self.name, encoded_data,
                                               options = options )
            if not _Util.is_ok( response ):
                raise GPUdbException( _Util.get_error_msg( response ) )
        # end if-else
        return self 
    # end insert_records
[docs]    def insert_records_random( self, count = None, options = {} ):
        """Generates a specified number of random records and adds them to the
        given table. There is an optional parameter that allows the user to
        customize the ranges of the column values. It also allows the user to
        specify linear profiles for some or all columns in which case linear
        values are generated rather than random ones. Only individual tables
        are supported for this operation.
        This operation is synchronous, meaning that a response will not be
        returned until all random records are fully available.
        Parameters:
            count (long)
                Number of records to generate.
            options (dict of dicts of floats)
                Optional parameter to pass in specifications for the randomness
                of the values.  This map is different from the *options*
                parameter of most other endpoints in that it is a map of string
                to map of string to doubles, while most others are maps of
                string to string.  In this map, the top level keys represent
                which column's parameters are being specified, while the
                internal keys represents which parameter is being specified.
                These parameters take on different meanings depending on the
                type of the column.  Below follows a more detailed description
                of the map:  Default value is an empty dict ( {} ).
                Allowed keys are:
                * **seed** --
                  If provided, the internal random number generator will be
                  initialized with the given value.  The minimum is 0.  This
                  allows for the same set of random numbers to be generated
                  across invocation of this endpoint in case the user wants to
                  repeat the test.  Since input parameter *options*, is a map
                  of maps, we need an internal map to provide the seed value.
                  For example, to pass 100 as the seed value through this
                  parameter, you need something equivalent to: 'options' =
                  {'seed': { 'value': 100 } }
                  Allowed keys are:
                  * **value** --
                    Pass the seed value here.
                * **all** --
                  This key indicates that the specifications relayed in the
                  internal map are to be applied to all columns of the records.
                  Allowed keys are:
                  * **min** --
                    For numerical columns, the minimum of the generated values
                    is set to this value.  Default is -99999.  For point,
                    shape, and track semantic types, min for numeric 'x' and
                    'y' columns needs to be within [-180, 180] and [-90, 90],
                    respectively. The default minimum possible values for these
                    columns in such cases are -180.0 and -90.0. For the
                    'TIMESTAMP' column, the default minimum corresponds to Jan
                    1, 2010.
                    For string columns, the minimum length of the randomly
                    generated strings is set to this value (default is 0). If
                    both minimum and maximum are provided, minimum must be less
                    than or equal to max. Value needs to be within [0, 200].
                    If the min is outside the accepted ranges for strings
                    columns and 'x' and 'y' columns for point/shape/track
                    types, then those parameters will not be set; however, an
                    error will not be thrown in such a case. It is the
                    responsibility of the user to use the *all* parameter
                    judiciously.
                  * **max** --
                    For numerical columns, the maximum of the generated values
                    is set to this value. Default is 99999. For point, shape,
                    and track semantic types, max for numeric 'x' and 'y'
                    columns needs to be within [-180, 180] and [-90, 90],
                    respectively. The default minimum possible values for these
                    columns in such cases are 180.0 and 90.0.
                    For string columns, the maximum length of the randomly
                    generated strings is set to this value (default is 200). If
                    both minimum and maximum are provided, *max* must be
                    greater than or equal to *min*. Value needs to be within
                    [0, 200].
                    If the *max* is outside the accepted ranges for strings
                    columns and 'x' and 'y' columns for point/shape/track
                    types, then those parameters will not be set; however, an
                    error will not be thrown in such a case. It is the
                    responsibility of the user to use the *all* parameter
                    judiciously.
                  * **interval** --
                    If specified, generate values for all columns evenly spaced
                    with the given interval value. If a max value is specified
                    for a given column the data is randomly generated between
                    min and max and decimated down to the interval. If no max
                    is provided the data is linerally generated starting at the
                    minimum value (instead of generating random data). For
                    non-decimated string-type columns the interval value is
                    ignored. Instead the values are generated following the
                    pattern: 'attrname_creationIndex#', i.e. the column name
                    suffixed with an underscore and a running counter (starting
                    at 0). For string types with limited size (eg char4) the
                    prefix is dropped. No nulls will be generated for nullable
                    columns.
                  * **null_percentage** --
                    If specified, then generate the given percentage of the
                    count as nulls for all nullable columns.  This option will
                    be ignored for non-nullable columns.  The value must be
                    within the range [0, 1.0].  The default value is 5% (0.05).
                  * **cardinality** --
                    If specified, limit the randomly generated values to a
                    fixed set. Not allowed on a column with interval specified,
                    and is not applicable to WKT or Track-specific columns. The
                    value must be greater than 0. This option is disabled by
                    default.
                * **attr_name** --
                  Set the following parameters for the column specified by the
                  key. This overrides any parameter set by *all*.
                  Allowed keys are:
                  * **min** --
                    For numerical columns, the minimum of the generated values
                    is set to this value.  Default is -99999.  For point,
                    shape, and track semantic types, min for numeric 'x' and
                    'y' columns needs to be within [-180, 180] and [-90, 90],
                    respectively. The default minimum possible values for these
                    columns in such cases are -180.0 and -90.0. For the
                    'TIMESTAMP' column, the default minimum corresponds to Jan
                    1, 2010.
                    For string columns, the minimum length of the randomly
                    generated strings is set to this value (default is 0). If
                    both minimum and maximum are provided, minimum must be less
                    than or equal to max. Value needs to be within [0, 200].
                    If the min is outside the accepted ranges for strings
                    columns and 'x' and 'y' columns for point/shape/track
                    types, then those parameters will not be set; however, an
                    error will not be thrown in such a case. It is the
                    responsibility of the user to use the *all* parameter
                    judiciously.
                  * **max** --
                    For numerical columns, the maximum of the generated values
                    is set to this value. Default is 99999. For point, shape,
                    and track semantic types, max for numeric 'x' and 'y'
                    columns needs to be within [-180, 180] and [-90, 90],
                    respectively. The default minimum possible values for these
                    columns in such cases are 180.0 and 90.0.
                    For string columns, the maximum length of the randomly
                    generated strings is set to this value (default is 200). If
                    both minimum and maximum are provided, *max* must be
                    greater than or equal to *min*. Value needs to be within
                    [0, 200].
                    If the *max* is outside the accepted ranges for strings
                    columns and 'x' and 'y' columns for point/shape/track
                    types, then those parameters will not be set; however, an
                    error will not be thrown in such a case. It is the
                    responsibility of the user to use the *all* parameter
                    judiciously.
                  * **interval** --
                    If specified, generate values for all columns evenly spaced
                    with the given interval value. If a max value is specified
                    for a given column the data is randomly generated between
                    min and max and decimated down to the interval. If no max
                    is provided the data is linerally generated starting at the
                    minimum value (instead of generating random data). For
                    non-decimated string-type columns the interval value is
                    ignored. Instead the values are generated following the
                    pattern: 'attrname_creationIndex#', i.e. the column name
                    suffixed with an underscore and a running counter (starting
                    at 0). For string types with limited size (eg char4) the
                    prefix is dropped. No nulls will be generated for nullable
                    columns.
                  * **null_percentage** --
                    If specified and if this column is nullable, then generate
                    the given percentage of the count as nulls.  This option
                    will result in an error if the column is not nullable.  The
                    value must be within the range [0, 1.0].  The default value
                    is 5% (0.05).
                  * **cardinality** --
                    If specified, limit the randomly generated values to a
                    fixed set. Not allowed on a column with interval specified,
                    and is not applicable to WKT or Track-specific columns. The
                    value must be greater than 0. This option is disabled by
                    default.
                * **track_length** --
                  This key-map pair is only valid for track type data sets (an
                  error is thrown otherwise).  No nulls would be generated for
                  nullable columns.
                  Allowed keys are:
                  * **min** --
                    Minimum possible length for generated series; default is
                    100 records per series. Must be an integral value within
                    the range [1, 500]. If both min and max are specified, min
                    must be less than or equal to max.
                  * **max** --
                    Maximum possible length for generated series; default is
                    500 records per series. Must be an integral value within
                    the range [1, 500]. If both min and max are specified, max
                    must be greater than or equal to min.
        Returns:
            A GPUdbTable object with the the insert_records() response fields
            converted to attributes (and stored within) with the following
            entries:
            table_name (str)
                Value of input parameter *table_name*.
            count (long)
                Value of input parameter *count*.
        """
        response = self.db.insert_records_random( self.name, count = count,
                                                  options = options )
        if not _Util.is_ok( response ):
            raise GPUdbException( _Util.get_error_msg( response ) )
        # We can
        return self 
    # end insert_records_random
[docs]    def get_records( self, offset = 0, limit = 10000,
                     encoding = 'binary', options = {} ):
        """Retrieves records from a given table, optionally filtered by an
        expression and/or sorted by a column. This operation can be performed
        on tables, views, or on homogeneous collections (collections containing
        tables of all the same type). Records can be returned encoded as binary
        or json.
        This operation supports paging through the data via the input parameter
        *offset* and input parameter *limit* parameters. Note that when paging
        through a table, if the table (or the underlying table in case of a
        view) is updated (records are inserted, deleted or modified) the
        records retrieved may differ between calls based on the updates
        applied.
        Decodes and returns the fetched records.
        Parameters:
            offset (long)
                A positive integer indicating the number of initial results to
                skip (this can be useful for paging through the results).
                Default value is 0. The minimum allowed value is 0. The maximum
                allowed value is MAX_INT.
            limit (long)
                A positive integer indicating the maximum number of results to
                be returned. Or END_OF_SET (-9999) to indicate that the max
                number of results should be returned.  Default value is 10000.
            encoding (str)
                Specifies the encoding for returned records.  Default value is
                'binary'.
                Allowed values are:
                * binary
                * json
                The default value is 'binary'.
            options (dict of str)
                Default value is an empty dict ( {} ).
                Allowed keys are:
                * **expression** --
                  Optional filter expression to apply to the table.
                * **fast_index_lookup** --
                  Indicates if indexes should be used to perform the lookup for
                  a given expression if possible. Only applicable if there is
                  no sorting, the expression contains only equivalence
                  comparisons based on existing tables indexes and the range of
                  requested values is from [0 to END_OF_SET]. The default value
                  is true.
                * **sort_by** --
                  Optional column that the data should be sorted by. Empty by
                  default (i.e. no sorting is applied).
                * **sort_order** --
                  String indicating how the returned values should be sorted -
                  ascending or descending. If sort_order is provided, sort_by
                  has to be provided.
                  Allowed values are:
                  * ascending
                  * descending
                  The default value is 'ascending'.
        Returns:
            A list of OrderedDict objects containg the record values.
        """
        # Issue the /get/records query
        response = self.db.get_records( self.name, offset, limit, encoding, options )
        if not _Util.is_ok( response ):
            raise GPUdbException( _Util.get_error_msg( response ) )
        # Decode the records as necessary
        if encoding == "binary":
            records = GPUdbRecord.decode_binary_data( response["type_schema"],
                                                      response["records_binary"] )
        else:
            records = GPUdbRecord.decode_json_string_data( response["records_json"] )
        # Return just the records; disregard the extra info within the response
        return records 
    # end get_records
[docs]    def get_records_by_column( self, column_names, offset = 0, limit = 10000,
                               encoding = 'binary', options = {},
                               print_data = False,
                               is_column_major = True ):
        """For a given table, retrieves the values of the given columns within a
        given range. It returns maps of column name to the vector of values for
        each supported data type (double, float, long, int and string). This
        operation supports pagination feature, i.e. values that are retrieved
        are those associated with the indices between the start (offset) and
        end value (offset + limit) parameters (inclusive). If there are
        num_points values in the table then each of the indices between 0 and
        num_points-1 retrieves a unique value.
        Note that when using the pagination feature, if the table (or the
        underlying table in case of a view) is updated (records are inserted,
        deleted or modified) the records or values retrieved may differ between
        calls (discontiguous or overlap) based on the type of the update.
        The response is returned as a dynamic schema. For details see: `dynamic
        schemas documentation <../../concepts/dynamic_schemas.html>`_.
        Parameters:
            column_names (list of str)
                The list of column values to retrieve.
            offset (long)
                A positive integer indicating the number of initial results to
                skip (this can be useful for paging through the results).  The
                minimum allowed value is 0. The maximum allowed value is
                MAX_INT.
            limit (long)
                A positive integer indicating the maximum number of results to
                be returned (if not provided the default is 10000), or
                END_OF_SET (-9999) to indicate that the maximum number of
                results allowed by the server should be returned.
            encoding (str)
                Specifies the encoding for returned records; either 'binary' or
                'json'.  Default value is 'binary'.
                Allowed values are:
                * binary
                * json
                The default value is 'binary'.
            options (dict of str)
                Default value is an empty dict ( {} ).
                Allowed keys are:
                * **expression** --
                  Optional filter expression to apply to the table.
                * **sort_by** --
                  Optional column that the data should be sorted by. Empty by
                  default (i.e. no sorting is applied).
                * **sort_order** --
                  String indicating how the returned values should be sorted -
                  ascending or descending. Default is 'ascending'. If
                  sort_order is provided, sort_by has to be provided.
                  Allowed values are:
                  * ascending
                  * descending
                  The default value is 'ascending'.
                * **order_by** --
                  Comma-separated list of the columns to be sorted by; e.g.
                  'timestamp asc, x desc'.  The columns specified must be
                  present in input parameter *column_names*.  If any alias is
                  given for any column name, the alias must be used, rather
                  than the original column name.
            print_data (bool)
                If True, print the fetched data to the console in a tabular
                format.  Default is False.
            is_column_major (bool)
                If True, then return the fetched values in a column-major
                format; otherwise, return them in a row-major format.  Deafult
                is True.
        Decodes the fetched records and saves them in the response class in an
        attribute called data.
        Returns:
            A dict of column name to column values for column-major data, or
            a list of OrderedDict objects for row-major data.
        """
        # Issue the /get/records/bycolumn query
        response = self.db.get_records_by_column( self.name, column_names,
                                                  offset, limit, encoding, options )
        if not _Util.is_ok( response ):
            raise GPUdbException( _Util.get_error_msg( response ) )
        # Decode the records
        resp = self.db.parse_dynamic_response( response, convert_nulls = False,
                                               do_print = print_data )
        data = resp[ "response" ]
        if is_column_major:
            # Return just the records; disregard the extra info within the response
            return data
        # Else, need to cobble the data together to create records
        records = GPUdbRecord.convert_data_col_major_to_row_major( data, resp["response_schema_str"] )
        return records 
    # end get_records_by_column
[docs]    def get_records_by_series( self, world_table_name = None,
                               offset = 0, limit = 250, encoding = 'binary',
                               options = {} ):
        """Retrieves the complete series/track records from the given input
        parameter *world_table_name* based on the partial track information
        contained in the input parameter *table_name*.
        This operation supports paging through the data via the input parameter
        *offset* and input parameter *limit* parameters.
        In contrast to :meth:`.get_records` this returns records grouped by
        series/track. So if input parameter *offset* is 0 and input parameter
        *limit* is 5 this operation would return the first 5 series/tracks in
        input parameter *table_name*. Each series/track will be returned sorted
        by their TIMESTAMP column.
        Parameters:
            world_table_name (str)
                Name of the table containing the complete series/track
                information to be returned for the tracks present in the input
                parameter *table_name*. Typically this is used when retrieving
                series/tracks from a view (which contains partial
                series/tracks) but the user wants to retrieve the entire
                original series/tracks. Can be blank.
            offset (int)
                A positive integer indicating the number of initial
                series/tracks to skip (useful for paging through the results).
                Default value is 0. The minimum allowed value is 0. The maximum
                allowed value is MAX_INT.
            limit (int)
                A positive integer indicating the maximum number of
                series/tracks to be returned. Or END_OF_SET (-9999) to indicate
                that the max number of results should be returned.  Default
                value is 250.
            encoding (str)
                Specifies the encoding for returned records; either 'binary' or
                'json'.  Default value is 'binary'.
                Allowed values are:
                * binary
                * json
                The default value is 'binary'.
            options (dict of str)
                Optional parameters.  Default value is an empty dict ( {} ).
        Returns:
            A list of OrderedDict objects containing the record values.
        """
        # Issue the /get/records/byseries query
        response = self.db.get_records_by_series( self.name,
                                                  world_table_name = world_table_name,
                                                  offset = offset, limit = limit,
                                                  encoding = encoding,
                                                  options = options )
        if not _Util.is_ok( response ):
            raise GPUdbException( _Util.get_error_msg( response ) )
        # Decode the records as necessary; flatten them into a single list
        records = []
        if encoding == "binary":
            binary_encoded_tracks = response["list_records_binary"]
            type_schemas = response[ "type_schemas" ]
            # Decode one series at a time
            for binary_encoded_records, type_schema in zip(binary_encoded_tracks, type_schemas):
                # Decode all records for a given track
                series_records = GPUdbRecord.decode_binary_data( type_schema,
                                                                 binary_encoded_records )
                records.extend( series_records )
            # end loop
        else:
            json_encoded_tracks = response["list_records_json"]
            for json_encoded_records in json_encoded_tracks:
                records.extend( GPUdbRecord.decode_json_string_data( json_encoded_records ) )
            # end loop
        # end if-else
        # Return just the records; disregard the extra info within the response
        return records 
    # end get_records_by_series
[docs]    def get_records_from_collection( self, offset = 0, limit = 10000,
                                     encoding = 'binary', options = {} ):
        """Retrieves records from a collection. The operation can optionally
        return the record IDs which can be used in certain queries such as
        :meth:`.delete_records`.
        This operation supports paging through the data via the input parameter
        *offset* and input parameter *limit* parameters.
        Note that when using the Java API, it is not possible to retrieve
        records from join tables using this operation.
        Parameters:
            offset (long)
                A positive integer indicating the number of initial results to
                skip (this can be useful for paging through the results).
                Default value is 0. The minimum allowed value is 0. The maximum
                allowed value is MAX_INT.
            limit (long)
                A positive integer indicating the maximum number of results to
                be returned, or END_OF_SET (-9999) to indicate that the max
                number of results should be returned.  Default value is 10000.
            encoding (str)
                Specifies the encoding for returned records; either 'binary' or
                'json'.  Default value is 'binary'.
                Allowed values are:
                * binary
                * json
                The default value is 'binary'.
            options (dict of str)
                Default value is an empty dict ( {} ).
                Allowed keys are:
                * **return_record_ids** --
                  If 'true' then return the internal record ID along with each
                  returned record. Default is 'false'.
                  Allowed values are:
                  * true
                  * false
                  The default value is 'false'.
        Returns:
            A list of OrderedDict objects containing the record values.
        """
        # Issue the /get/records/fromcollection query
        response = self.db.get_records_from_collection( self.name, offset, limit, encoding, options )
        if not _Util.is_ok( response ):
            raise GPUdbException( _Util.get_error_msg( response ) )
        # Decode the records as necessary
        if encoding == "binary":
            records = []
            binary_encoded_records = response["records_binary"]
            type_ids = response[ "type_names" ]
            # Decode one record at a time
            for bin_record, type_id in zip(binary_encoded_records, type_ids):
                # We need to fetch the type schema string from GPUdb per record
                type_schema = self.db.show_types( type_id, "" )["type_schemas"][ 0 ]
                record = GPUdbRecord.decode_binary_data( type_schema,
                                                         bin_record )
                records.append( record )
        else:
            records = GPUdbRecord.decode_json_string_data( response["records_json"] )
        # Return just the records; disregard the extra info within the response
        return records 
    # end get_records_from_collection
    @staticmethod
[docs]    def create_join_table( db, join_table_name = None, table_names = [],
                           column_names = [], expressions = [], options = {} ):
        """Creates a table that is the result of a SQL JOIN.  For details see:
        `join concept documentation <../../../concepts/joins.html>`_.
        Parameters:
            join_table_name (str)
                Name of the join table to be created.  Has the same naming
                restrictions as `tables <../../../concepts/tables.html>`_.
            table_names (list of str)
                The list of table names composing the join.  Corresponds to a
                SQL statement FROM clause.  The user can provide a single
                element (which will be automatically promoted to a list
                internally) or a list.  Default value is an empty list ( [] ).
            column_names (list of str)
                List of member table columns or column expressions to be
                included in the join. Columns can be prefixed with
                'table_id.column_name', where 'table_id' is the table name or
                alias.  Columns can be aliased via the syntax 'column_name as
                alias'. Wild cards '*' can be used to include all columns
                across member tables or 'table_id.*' for all of a single
                table's columns.  Columns and column expressions comprising the
                join must be uniquely named or aliased--therefore, the '*' wild
                card cannot be used if column names aren't unique across all
                tables.  The user can provide a single element (which will be
                automatically promoted to a list internally) or a list.
                Default value is an empty list ( [] ).
            expressions (list of str)
                An optional list of expressions to combine and filter the
                joined tables.  Corresponds to a SQL statement WHERE clause.
                For details see: `expressions
                <../../../concepts/expressions.html>`_.  The user can provide a
                single element (which will be automatically promoted to a list
                internally) or a list.  Default value is an empty list ( [] ).
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
                Allowed keys are:
                * **collection_name** --
                  Name of a collection which is to contain the join. If the
                  collection provided is non-existent, the collection will be
                  automatically created. If empty, then the join will be at the
                  top level.
                * **max_query_dimensions** --
                  The maximum number of tables in a join that can be accessed
                  by a query and are not equated by a foreign-key to
                  primary-key equality predicate
                * **optimize_lookups** --
                  Use more memory to speed up the joining of tables.
                  Allowed values are:
                  * true
                  * false
                  The default value is 'false'.
                * **refresh_method** --
                  Method by which the join can be refreshed when the data in
                  underlying member tables have changed.
                  Allowed values are:
                  * **manual** --
                    refresh only occurs when manually requested by calling this
                    endpoint with refresh option set to *refresh* or
                    *full_refresh*
                  * **on_query** --
                    incrementally refresh (refresh just those records added)
                    whenever a new query is issued and new data is inserted
                    into the base table.  A full refresh of all the records
                    occurs when a new query is issued and there have been
                    inserts to any non-base-tables since the last query
                  * **on_insert** --
                    incrementally refresh (refresh just those records added)
                    whenever new data is inserted into a base table.  A full
                    refresh of all the records occurs when a new query is
                    issued and there have been inserts to any non-base-tables
                    since the last query
                    The default value is 'manual'.
                * **refresh** --
                  Do a manual refresh of the join if it exists - throws an
                  error otherwise
                  Allowed values are:
                  * **no_refresh** --
                    don't refresh
                  * **refresh** --
                    incrementally refresh (refresh just those records added) if
                    new data has been inserted into the base table.  A full
                    refresh of all the records occurs if there have been
                    inserts to any non-base-tables since the last refresh
                  * **full_refresh** --
                    always refresh even if no new records have been added.
                    Only refresh method guaranteed to do a full refresh
                    (refresh all the records) if a delete or update has
                    occurred since the last refresh.
                    The default value is 'no_refresh'.
                * **ttl** --
                  Sets the `TTL <../../../concepts/ttl.html>`_ of the join
                  table specified in input parameter *join_table_name*.
                * **no_count** --
                  return a count of 0 for the join table for logging and for
                  show_table. optimization needed for large overlapped
                  equi-join stencils
        Returns:
            A read-only GPUdbTable object.
        Raises:
            GPUdbException -- 
                Upon an error from the server.
        """
        # Create a random table name if none is given
        join_table_name = join_table_name if join_table_name else GPUdbTable.random_name()
        # Normalize the input table names
        table_names = table_names if isinstance( table_names, list ) else [ table_names ]
        table_names = [ t.name if isinstance(t, GPUdbTable) else t for t in table_names ]
        # The given DB handle must be a GPUdb instance
        if not isinstance( db, GPUdb ):
            raise GPUdbException( "Argument 'db' must be a GPUdb object; "
                                  "given %s" % str( type( db ) ) )
        response = db.create_join_table( join_table_name, table_names,
                                         column_names, expressions, options )
        if not _Util.is_ok( response ):
            raise GPUdbException( _Util.get_error_msg( response ) )
        return GPUdbTable( None, name = join_table_name, db = db ) 
    # end create_join_table
    @staticmethod
[docs]    def create_union( db, table_name = None, table_names = None,
                      input_column_names = None, output_column_names = None,
                      options = {} ):
        """Performs a `union <../../../concepts/unions.html>`_ (concatenation) of
        one or more existing tables or views, the results of which are stored
        in a new table. It is equivalent to the SQL UNION ALL operator.
        Non-charN 'string' and 'bytes' column types cannot be included in a
        union, neither can columns with the property 'store_only'. Though not
        explicitly unions, `intersect <../../../concepts/intersect.html>`_ and
        `except <../../../concepts/except.html>`_ are also available from this
        endpoint.
        Parameters:
            table_name (str)
                Name of the table to be created. Has the same naming
                restrictions as `tables <../../../concepts/tables.html>`_.
            table_names (list of str)
                The list of table names making up the union. Must contain the
                names of one or more existing tables.  The user can provide a
                single element (which will be automatically promoted to a list
                internally) or a list.
            input_column_names (list of lists of str)
                The list of columns from each of the corresponding input
                tables.  The user can provide a single element (which will be
                automatically promoted to a list internally) or a list.
            output_column_names (list of str)
                The list of names of the columns to be stored in the union.
                The user can provide a single element (which will be
                automatically promoted to a list internally) or a list.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
                Allowed keys are:
                * **collection_name** --
                  Name of a collection which is to contain the union. If the
                  collection provided is non-existent, the collection will be
                  automatically created. If empty, then the union will be a
                  top-level table.
                * **materialize_on_gpu** --
                  If 'true' then the columns of the union will be cached on the
                  GPU.
                  Allowed values are:
                  * true
                  * false
                  The default value is 'false'.
                * **mode** --
                  If 'merge_views' then this operation will merge (i.e. union)
                  the provided views. All 'table_names' must be views from the
                  same underlying base table.
                  Allowed values are:
                  * **union_all** --
                    Retains all rows from the specified tables.
                  * **union** --
                    Retains all unique rows from the specified tables (synonym
                    for 'union_distinct').
                  * **union_distinct** --
                    Retains all unique rows from the specified tables.
                  * **except** --
                    Retains all unique rows from the first table that do not
                    appear in the second table (only works on 2 tables).
                  * **intersect** --
                    Retains all unique rows that appear in both of the
                    specified tables (only works on 2 tables).
                  * **merge_views** --
                    Merge two or more views (or views of views) of the same
                    base data set into a new view. If this mode is selected
                    input parameter *input_column_names* AND input parameter
                    *output_column_names* must be empty. The resulting view
                    would match the results of a SQL OR operation, e.g., if
                    filter 1 creates a view using the expression 'x = 10' and
                    filter 2 creates a view using the expression 'x <= 10',
                    then the merge views operation creates a new view using the
                    expression 'x = 10 OR x <= 10'.
                    The default value is 'union_all'.
                * **chunk_size** --
                  Indicates the chunk size to be used for this table.
                * **ttl** --
                  Sets the `TTL <../../../concepts/ttl.html>`_ of the table
                  specified in input parameter *table_name*.
                * **persist** --
                  If *true*, then the union specified in input parameter
                  *table_name* will be persisted and will not expire unless a
                  *ttl* is specified.   If *false*, then the union will be an
                  in-memory table and will expire unless a *ttl* is specified
                  otherwise.
                  Allowed values are:
                  * true
                  * false
                  The default value is 'false'.
        Returns:
            A read-only GPUdbTable object.
        Raises:
            GPUdbException -- 
                Upon an error from the server.
        """
        # Create a random table name if none is given
        table_name = table_name if table_name else GPUdbTable.random_name()
        # Normalize the input table names
        table_names = table_names if isinstance( table_names, list ) else [ table_names ]
        table_names = [ t.name if isinstance(t, GPUdbTable) else t for t in table_names ]
        # The given DB handle must be a GPUdb instance
        if not isinstance( db, GPUdb ):
            raise GPUdbException( "Argument 'db' must be a GPUdb object; "
                                  "given %s" % str( type( db ) ) )
        response = db.create_union( table_name, table_names, input_column_names,
                                    output_column_names, options )
        if not _Util.is_ok( response ):
            raise GPUdbException( _Util.get_error_msg( response ) )
        return GPUdbTable( None, name = table_name, db = db ) 
    # end create_union
    @staticmethod
[docs]    def merge_records( db, table_name = None, source_table_names = None,
                       field_maps = None, options = {} ):
        """Create a new empty result table (specified by input parameter
        *table_name*), and insert all records from source tables (specified by
        input parameter *source_table_names*) based on the field mapping
        information (specified by input parameter *field_maps*). The field map
        (specified by input parameter *field_maps*) holds the user specified
        maps of target table column names to source table columns. The array of
        input parameter *field_maps* must match one-to-one with the input
        parameter *source_table_names*, e.g., there's a map present in input
        parameter *field_maps* for each table listed in input parameter
        *source_table_names*. Read more about Merge Records `here
        <../../../concepts/merge_records.html>`_.
        Parameters:
            table_name (str)
                The new result table name for the records to be merged.  Must
                NOT be an existing table.
            source_table_names (list of str)
                The list of source table names to get the records from. Must be
                existing table names.  The user can provide a single element
                (which will be automatically promoted to a list internally) or
                a list.
            field_maps (list of dicts of str to str)
                Contains a list of source/target column mappings, one mapping
                for each source table listed in input parameter
                *source_table_names* being merged into the target table
                specified by input parameter *table_name*.  Each mapping
                contains the target column names (as keys) that the data in the
                mapped source columns (as values) will be merged into.  All of
                the source columns being merged into a given target column must
                match in type, as that type will determine the type of the new
                target column.  The user can provide a single element (which
                will be automatically promoted to a list internally) or a list.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
                Allowed keys are:
                * **collection_name** --
                  Name of a collection which is to contain the newly created
                  merged table specified by input parameter *table_name*. If
                  the collection provided is non-existent, the collection will
                  be automatically created. If empty, then the newly created
                  merged table will be a top-level table.
                * **is_replicated** --
                  Indicates the `distribution scheme
                  <../../../concepts/tables.html#distribution>`_ for the data
                  of the merged table specified in input parameter
                  *table_name*.  If true, the table will be `replicated
                  <../../../concepts/tables.html#replication>`_.  If false, the
                  table will be `randomly sharded
                  <../../../concepts/tables.html#random-sharding>`_.
                  Allowed values are:
                  * true
                  * false
                  The default value is 'false'.
                * **ttl** --
                  Sets the `TTL <../../../concepts/ttl.html>`_ of the merged
                  table specified in input parameter *table_name*.
                * **chunk_size** --
                  Indicates the chunk size to be used for the merged table
                  specified in input parameter *table_name*.
        Returns:
            A read-only GPUdbTable object.
        Raises:
            GPUdbException -- 
                Upon an error from the server.
        """
        # Create a random table name if none is given
        table_name = table_name if table_name else GPUdbTable.random_name()
        # Normalize the input table names
        source_table_names = source_table_names if isinstance( source_table_names, list ) else [ source_table_names ]
        source_table_names = [ t.name if isinstance(t, GPUdbTable) else t for t in source_table_names ]
        # The given DB handle must be a GPUdb instance
        if not isinstance( db, GPUdb ):
            raise GPUdbException( "Argument 'db' must be a GPUdb object; "
                                  "given %s" % str( type( db ) ) )
        response = db.merge_records( table_name, source_table_names, field_maps,
                                     options )
        if not _Util.is_ok( response ):
            raise GPUdbException( _Util.get_error_msg( response ) )
        return GPUdbTable( None, name = table_name, db = db ) 
    # end merge_records
[docs]    def aggregate_convex_hull( self, x_column_name = None, y_column_name = None,
                               options = {} ):
        """Calculates and returns the convex hull for the values in a table
        specified by input parameter *table_name*.
        Parameters:
            x_column_name (str)
                Name of the column containing the x coordinates of the points
                for the operation being performed.
            y_column_name (str)
                Name of the column containing the y coordinates of the points
                for the operation being performed.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
        Returns:
            The response from the server which is a dict containing the
            following entries--
            x_vector (list of floats)
                Array of x coordinates of the resulting convex set.
            y_vector (list of floats)
                Array of y coordinates of the resulting convex set.
            count (int)
                Count of the number of points in the convex set.
            is_valid (bool)
        Raises:
            GPUdbException -- 
                Upon an error from the server.
        """
        response = self.db.aggregate_convex_hull( self.name, x_column_name,
                                                  y_column_name, options )
        if not _Util.is_ok( response ):
            raise GPUdbException( _Util.get_error_msg( response ) )
        return response 
    # end aggregate_convex_hull
[docs]    def aggregate_group_by( self, column_names = None, offset = None, limit =
                            1000, encoding = 'binary', options = {} ):
        """Calculates unique combinations (groups) of values for the given columns
        in a given table/view/collection and computes aggregates on each unique
        combination. This is somewhat analogous to an SQL-style SELECT...GROUP
        BY.
        Any column(s) can be grouped on, and all column types except
        unrestricted-length strings may be used for computing applicable
        aggregates; columns marked as `store-only
        <../../../concepts/types.html#data-handling>`_ are unable to be used in
        grouping or aggregation.
        The results can be paged via the input parameter *offset* and input
        parameter *limit* parameters. For example, to get 10 groups with the
        largest counts the inputs would be: limit=10,
        options={"sort_order":"descending", "sort_by":"value"}.
        Input parameter *options* can be used to customize behavior of this
        call e.g. filtering or sorting the results.
        To group by columns 'x' and 'y' and compute the number of objects
        within each group, use:  column_names=['x','y','count(*)'].
        To also compute the sum of 'z' over each group, use:
        column_names=['x','y','count(*)','sum(z)'].
        Available `aggregation functions
        <../../../concepts/expressions.html#aggregate-expressions>`_ are:
        count(*), sum, min, max, avg, mean, stddev, stddev_pop, stddev_samp,
        var, var_pop, var_samp, arg_min, arg_max and count_distinct.
        The response is returned as a dynamic schema. For details see: `dynamic
        schemas documentation <../../../api/index.html#dynamic-schemas>`_.
        If a *result_table* name is specified in the input parameter *options*,
        the results are stored in a new table with that name--no results are
        returned in the response.  Both the table name and resulting column
        names must adhere to `standard naming conventions
        <../../../concepts/tables.html#table>`_; column/aggregation expressions
        will need to be aliased.  If the source table's `shard key
        <../../../concepts/tables.html#shard-keys>`_ is used as the grouping
        column(s), the result table will be sharded, in all other cases it will
        be replicated.  Sorting will properly function only if the result table
        is replicated or if there is only one processing node and should not be
        relied upon in other cases.  Not available when any of the values of
        input parameter *column_names* is an unrestricted-length string.
        Parameters:
            column_names (list of str)
                List of one or more column names, expressions, and aggregate
                expressions.  The user can provide a single element (which will
                be automatically promoted to a list internally) or a list.
            offset (long)
                A positive integer indicating the number of initial results to
                skip (this can be useful for paging through the results).  The
                minimum allowed value is 0. The maximum allowed value is
                MAX_INT.
            limit (long)
                A positive integer indicating the maximum number of results to
                be returned Or END_OF_SET (-9999) to indicate that the max
                number of results should be returned.  Default value is 1000.
            encoding (str)
                Specifies the encoding for returned records.  Default value is
                'binary'.
                Allowed values are:
                * **binary** --
                  Indicates that the returned records should be binary encoded.
                * **json** --
                  Indicates that the returned records should be json encoded.
                  The default value is 'binary'.
            options (dict of str to str)
                  Optional parameters.  Default value is an empty dict ( {} ).
                  Allowed keys are:
                  * **collection_name** --
                    Name of a collection which is to contain the table
                    specified in *result_table*. If the collection provided is
                    non-existent, the collection will be automatically created.
                    If empty, then the table will be a top-level table.
                    Additionally this option is invalid if input parameter
                    *table_name* is a collection.
                  * **expression** --
                    Filter expression to apply to the table prior to computing
                    the aggregate group by.
                  * **having** --
                    Filter expression to apply to the aggregated results.
                  * **sort_order** --
                    String indicating how the returned values should be sorted
                    - ascending or descending.
                    Allowed values are:
                    * **ascending** --
                      Indicates that the returned values should be sorted in
                      ascending order.
                    * **descending** --
                      Indicates that the returned values should be sorted in
                      descending order.
                      The default value is 'ascending'.
                  * **sort_by** --
                    String determining how the results are sorted.
                    Allowed values are:
                    * **key** --
                      Indicates that the returned values should be sorted by
                      key, which corresponds to the grouping columns. If you
                      have multiple grouping columns (and are sorting by key),
                      it will first sort the first grouping column, then the
                      second grouping column, etc.
                    * **value** --
                      Indicates that the returned values should be sorted by
                      value, which corresponds to the aggregates. If you have
                      multiple aggregates (and are sorting by value), it will
                      first sort by the first aggregate, then the second
                      aggregate, etc.
                      The default value is 'value'.
                  * **result_table** --
                    The name of the table used to store the results. Has the
                    same naming restrictions as `tables
                    <../../../concepts/tables.html>`_. Column names (group-by
                    and aggregate fields) need to be given aliases e.g.
                    ["FChar256 as fchar256", "sum(FDouble) as sfd"].  If
                    present, no results are returned in the response.  This
                    option is not available if one of the grouping attributes
                    is an unrestricted string (i.e.; not charN) type.
                  * **result_table_persist** --
                    If *true*, then the result table specified in
                    *result_table* will be persisted and will not expire unless
                    a *ttl* is specified.   If *false*, then the result table
                    will be an in-memory table and will expire unless a *ttl*
                    is specified otherwise.
                    Allowed values are:
                    * true
                    * false
                    The default value is 'false'.
                  * **result_table_force_replicated** --
                    Force the result table to be replicated (ignores any
                    sharding). Must be used in combination with the
                    *result_table* option.
                  * **result_table_generate_pk** --
                    If 'true' then set a primary key for the result table. Must
                    be used in combination with the *result_table* option.
                  * **ttl** --
                    Sets the `TTL <../../../concepts/ttl.html>`_ of the table
                    specified in *result_table*.
                  * **chunk_size** --
                    Indicates the chunk size to be used for the result table.
                    Must be used in combination with the *result_table* option.
                  * **materialize_on_gpu** --
                    If *true* then the columns of the groupby result table will
                    be cached on the GPU. Must be used in combination with the
                    *result_table* option.
                    Allowed values are:
                    * true
                    * false
                    The default value is 'false'.
        Returns:
            A read-only GPUdbTable object if input options has "result_table";
            otherwise the response from the server, which is a dict containing
            the following entries--
            response_schema_str (str)
                Avro schema of output parameter *binary_encoded_response* or
                output parameter *json_encoded_response*.
            binary_encoded_response (str)
                Avro binary encoded response.
            json_encoded_response (str)
                Avro JSON encoded response.
            total_number_of_records (long)
                Total/Filtered number of records.
            has_more_records (bool)
                Too many records. Returned a partial set.
        Raises:
            GPUdbException -- 
                Upon an error from the server.
        """
        if "result_table" in options:
            result_table = options[ "result_table" ]
        else:
            result_table = None
        response = self.db.aggregate_group_by( self.name, column_names, offset,
                                               limit, encoding, options )
        if not _Util.is_ok( response ):
            raise GPUdbException( _Util.get_error_msg( response ) )
        if result_table:
            # Create a read-only table for the result table
            return self.create_view( result_table, response[ "total_number_of_records" ] )
        # Decode the returned records
        response = self.db.parse_dynamic_response( response, convert_nulls = False )
        # Save the decoded data in a field called 'data' and delete the raw 
        # data related fields
        response[ "data" ] = response[ "response" ]
        del response[ "response" ]
        del response[ "binary_encoded_response" ]
        del response[ "json_encoded_response" ]
        return response 
    # end aggregate_group_by
[docs]    def aggregate_histogram( self, column_name = None, start = None, end = None,
                             interval = None, options = {} ):
        """Performs a histogram calculation given a table, a column, and an
        interval function. The input parameter *interval* is used to produce
        bins of that size and the result, computed over the records falling
        within each bin, is returned.  For each bin, the start value is
        inclusive, but the end value is exclusive--except for the very last bin
        for which the end value is also inclusive.  The value returned for each
        bin is the number of records in it, except when a column name is
        provided as a *value_column* in input parameter *options*.  In this
        latter case the sum of the values corresponding to the *value_column*
        is used as the result instead.
        Parameters:
            column_name (str)
                Name of a column or an expression of one or more column names
                over which the histogram will be calculated.
            start (float)
                Lower end value of the histogram interval, inclusive.
            end (float)
                Upper end value of the histogram interval, inclusive.
            interval (float)
                The size of each bin within the start and end parameters.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
                Allowed keys are:
                * **value_column** --
                  The name of the column to use when calculating the bin values
                  (values are summed).  The column must be a numerical type
                  (int, double, long, float).
        Returns:
            The response from the server which is a dict containing the
            following entries--
            counts (list of floats)
                The array of calculated values that represents the histogram
                data points.
            start (float)
                Value of input parameter *start*.
            end (float)
                Value of input parameter *end*.
        Raises:
            GPUdbException -- 
                Upon an error from the server.
        """
        response = self.db.aggregate_histogram( self.name, column_name, start,
                                                end, interval, options )
        if not _Util.is_ok( response ):
            raise GPUdbException( _Util.get_error_msg( response ) )
        return response 
    # end aggregate_histogram
[docs]    def aggregate_k_means( self, column_names = None, k = None, tolerance =
                           None, options = {} ):
        """This endpoint runs the k-means algorithm - a heuristic algorithm that
        attempts to do k-means clustering.  An ideal k-means clustering
        algorithm selects k points such that the sum of the mean squared
        distances of each member of the set to the nearest of the k points is
        minimized.  The k-means algorithm however does not necessarily produce
        such an ideal cluster.   It begins with a randomly selected set of k
        points and then refines the location of the points iteratively and
        settles to a local minimum.  Various parameters and options are
        provided to control the heuristic search.
        Parameters:
            column_names (list of str)
                List of column names on which the operation would be performed.
                If n columns are provided then each of the k result points will
                have n dimensions corresponding to the n columns.  The user can
                provide a single element (which will be automatically promoted
                to a list internally) or a list.
            k (int)
                The number of mean points to be determined by the algorithm.
            tolerance (float)
                Stop iterating when the distances between successive points is
                less than the given tolerance.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
                Allowed keys are:
                * **whiten** --
                  When set to 1 each of the columns is first normalized by its
                  stdv - default is not to whiten.
                * **max_iters** --
                  Number of times to try to hit the tolerance limit before
                  giving up - default is 10.
                * **num_tries** --
                  Number of times to run the k-means algorithm with a different
                  randomly selected starting points - helps avoid local
                  minimum. Default is 1.
        Returns:
            The response from the server which is a dict containing the
            following entries--
            means (list of lists of floats)
                The k-mean values found.
            counts (list of longs)
                The number of elements in the cluster closest the corresponding
                k-means values.
            rms_dists (list of floats)
                The root mean squared distance of the elements in the cluster
                for each of the k-means values.
            count (long)
                The total count of all the clusters - will be the size of the
                input table.
            rms_dist (float)
                The sum of all the rms_dists - the value the k-means algorithm
                is attempting to minimize.
            tolerance (float)
                The distance between the last two iterations of the algorithm
                before it quit.
            num_iters (int)
                The number of iterations the algorithm executed before it quit.
        Raises:
            GPUdbException -- 
                Upon an error from the server.
        """
        response = self.db.aggregate_k_means( self.name, column_names, k,
                                              tolerance, options )
        if not _Util.is_ok( response ):
            raise GPUdbException( _Util.get_error_msg( response ) )
        return response 
    # end aggregate_k_means
[docs]    def aggregate_min_max( self, column_name = None, options = {} ):
        """Calculates and returns the minimum and maximum values of a particular
        column in a table.
        Parameters:
            column_name (str)
                Name of a column or an expression of one or more column on
                which the min-max will be calculated.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
        Returns:
            The response from the server which is a dict containing the
            following entries--
            min (float)
                Minimum value of the input parameter *column_name*.
            max (float)
                Maximum value of the input parameter *column_name*.
        Raises:
            GPUdbException -- 
                Upon an error from the server.
        """
        response = self.db.aggregate_min_max( self.name, column_name, options )
        if not _Util.is_ok( response ):
            raise GPUdbException( _Util.get_error_msg( response ) )
        return response 
    # end aggregate_min_max
[docs]    def aggregate_min_max_geometry( self, column_name = None, options = {} ):
        """Calculates and returns the minimum and maximum x- and y-coordinates of
        a particular geospatial geometry column in a table.
        Parameters:
            column_name (str)
                Name of a geospatial geometry column on which the min-max will
                be calculated.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
        Returns:
            The response from the server which is a dict containing the
            following entries--
            min_x (float)
                Minimum x-coordinate value of the input parameter
                *column_name*.
            max_x (float)
                Maximum x-coordinate value of the input parameter
                *column_name*.
            min_y (float)
                Minimum y-coordinate value of the input parameter
                *column_name*.
            max_y (float)
                Maximum y-coordinate value of the input parameter
                *column_name*.
        Raises:
            GPUdbException -- 
                Upon an error from the server.
        """
        response = self.db.aggregate_min_max_geometry( self.name, column_name,
                                                       options )
        if not _Util.is_ok( response ):
            raise GPUdbException( _Util.get_error_msg( response ) )
        return response 
    # end aggregate_min_max_geometry
[docs]    def aggregate_statistics( self, column_name = None, stats = None, options =
                              {} ):
        """Calculates the requested statistics of the given column(s) in a given
        table.
        The available statistics are *count* (number of total objects), *mean*,
        *stdv* (standard deviation), *variance*, *skew*, *kurtosis*, *sum*,
        *min*, *max*, *weighted_average*, *cardinality* (unique count),
        *estimated_cardinality*, *percentile* and *percentile_rank*.
        Estimated cardinality is calculated by using the hyperloglog
        approximation technique.
        Percentiles and percentile ranks are approximate and are calculated
        using the t-digest algorithm. They must include the desired
        *percentile*/*percentile_rank*. To compute multiple percentiles each
        value must be specified separately (i.e.
        'percentile(75.0),percentile(99.0),percentile_rank(1234.56),percentile_rank(-5)').
        The weighted average statistic requires a *weight_column_name* to be
        specified in input parameter *options*. The weighted average is then
        defined as the sum of the products of input parameter *column_name*
        times the *weight_column_name* values divided by the sum of the
        *weight_column_name* values.
        Additional columns can be used in the calculation of statistics via the
        *additional_column_names* option.  Values in these columns will be
        included in the overall aggregate calculation--individual aggregates
        will not be calculated per additional column.  For instance, requesting
        the *count* & *mean* of input parameter *column_name* x and
        *additional_column_names* y & z, where x holds the numbers 1-10, y
        holds 11-20, and z holds 21-30, would return the total number of x, y,
        & z values (30), and the single average value across all x, y, & z
        values (15.5).
        The response includes a list of key/value pairs of each statistic
        requested and its corresponding value.
        Parameters:
            column_name (str)
                Name of the primary column for which the statistics are to be
                calculated.
            stats (str)
                Comma separated list of the statistics to calculate, e.g.
                "sum,mean".
                Allowed values are:
                * **count** --
                  Number of objects (independent of the given column(s)).
                * **mean** --
                  Arithmetic mean (average), equivalent to sum/count.
                * **stdv** --
                  Sample standard deviation (denominator is count-1).
                * **variance** --
                  Unbiased sample variance (denominator is count-1).
                * **skew** --
                  Skewness (third standardized moment).
                * **kurtosis** --
                  Kurtosis (fourth standardized moment).
                * **sum** --
                  Sum of all values in the column(s).
                * **min** --
                  Minimum value of the column(s).
                * **max** --
                  Maximum value of the column(s).
                * **weighted_average** --
                  Weighted arithmetic mean (using the option
                  *weight_column_name* as the weighting column).
                * **cardinality** --
                  Number of unique values in the column(s).
                * **estimated_cardinality** --
                  Estimate (via hyperloglog technique) of the number of unique
                  values in the column(s).
                * **percentile** --
                  Estimate (via t-digest) of the given percentile of the
                  column(s) (percentile(50.0) will be an approximation of the
                  median).
                * **percentile_rank** --
                  Estimate (via t-digest) of the percentile rank of the given
                  value in the column(s) (if the given value is the median of
                  the column(s), percentile_rank(<median>) will return
                  approximately 50.0).
            options (dict of str to str)
                  Optional parameters.  Default value is an empty dict ( {} ).
                  Allowed keys are:
                  * **additional_column_names** --
                    A list of comma separated column names over which
                    statistics can be accumulated along with the primary
                    column.  All columns listed and input parameter
                    *column_name* must be of the same type.  Must not include
                    the column specified in input parameter *column_name* and
                    no column can be listed twice.
                  * **weight_column_name** --
                    Name of column used as weighting attribute for the weighted
                    average statistic.
        Returns:
            The response from the server which is a dict containing the
            following entries--
            stats (dict of str to floats)
                (statistic name, double value) pairs of the requested
                statistics, including the total count by default.
        Raises:
            GPUdbException -- 
                Upon an error from the server.
        """
        response = self.db.aggregate_statistics( self.name, column_name, stats,
                                                 options )
        if not _Util.is_ok( response ):
            raise GPUdbException( _Util.get_error_msg( response ) )
        return response 
    # end aggregate_statistics
[docs]    def aggregate_statistics_by_range( self, select_expression = '', column_name
                                       = None, value_column_name = None, stats =
                                       None, start = None, end = None, interval
                                       = None, options = {} ):
        """Divides the given set into bins and calculates statistics of the values
        of a value-column in each bin.  The bins are based on the values of a
        given binning-column.  The statistics that may be requested are mean,
        stdv (standard deviation), variance, skew, kurtosis, sum, min, max,
        first, last and weighted average. In addition to the requested
        statistics the count of total samples in each bin is returned. This
        counts vector is just the histogram of the column used to divide the
        set members into bins. The weighted average statistic requires a
        weight_column to be specified in input parameter *options*. The
        weighted average is then defined as the sum of the products of the
        value column times the weight column divided by the sum of the weight
        column.
        There are two methods for binning the set members. In the first, which
        can be used for numeric valued binning-columns, a min, max and interval
        are specified. The number of bins, nbins, is the integer upper bound of
        (max-min)/interval. Values that fall in the range
        [min+n\*interval,min+(n+1)\*interval) are placed in the nth bin where n
        ranges from 0..nbin-2. The final bin is [min+(nbin-1)\*interval,max].
        In the second method, input parameter *options* bin_values specifies a
        list of binning column values. Binning-columns whose value matches the
        nth member of the bin_values list are placed in the nth bin. When a
        list is provided the binning-column must be of type string or int.
        Parameters:
            select_expression (str)
                For a non-empty expression statistics are calculated for those
                records for which the expression is true.  Default value is ''.
            column_name (str)
                Name of the binning-column used to divide the set samples into
                bins.
            value_column_name (str)
                Name of the value-column for which statistics are to be
                computed.
            stats (str)
                A string of comma separated list of the statistics to
                calculate, e.g. 'sum,mean'. Available statistics: mean, stdv
                (standard deviation), variance, skew, kurtosis, sum.
            start (float)
                The lower bound of the binning-column.
            end (float)
                The upper bound of the binning-column.
            interval (float)
                The interval of a bin. Set members fall into bin i if the
                binning-column falls in the range [start+interval``*``i,
                start+interval``*``(i+1)).
            options (dict of str to str)
                Map of optional parameters:  Default value is an empty dict (
                {} ).
                Allowed keys are:
                * **additional_column_names** --
                  A list of comma separated value-column names over which
                  statistics can be accumulated along with the primary
                  value_column.
                * **bin_values** --
                  A list of comma separated binning-column values. Values that
                  match the nth bin_values value are placed in the nth bin.
                * **weight_column_name** --
                  Name of the column used as weighting column for the
                  weighted_average statistic.
                * **order_column_name** --
                  Name of the column used for candlestick charting techniques.
        Returns:
            The response from the server which is a dict containing the
            following entries--
            stats (dict of str to lists of floats)
                A map with a key for each statistic in the stats input
                parameter having a value that is a vector of the corresponding
                value-column bin statistics. In a addition the key count has a
                value that is a histogram of the binning-column.
        Raises:
            GPUdbException -- 
                Upon an error from the server.
        """
        response = self.db.aggregate_statistics_by_range( self.name,
                                                          select_expression,
                                                          column_name,
                                                          value_column_name,
                                                          stats, start, end,
                                                          interval, options )
        if not _Util.is_ok( response ):
            raise GPUdbException( _Util.get_error_msg( response ) )
        return response 
    # end aggregate_statistics_by_range
[docs]    def aggregate_unique( self, column_name = None, offset = None, limit =
                          10000, encoding = 'binary', options = {} ):
        """Returns all the unique values from a particular column (specified by
        input parameter *column_name*) of a particular table or collection
        (specified by input parameter *table_name*). If input parameter
        *column_name* is a numeric column the values will be in output
        parameter *binary_encoded_response*. Otherwise if input parameter
        *column_name* is a string column the values will be in output parameter
        *json_encoded_response*.  The results can be paged via the input
        parameter *offset* and input parameter *limit* parameters.
        Columns marked as `store-only
        <../../../concepts/types.html#data-handling>`_ are unable to be used
        with this function.
        To get the first 10 unique values sorted in descending order input
        parameter *options* would be::
        {"limit":"10","sort_order":"descending"}.
        The response is returned as a dynamic schema. For details see: `dynamic
        schemas documentation <../../../api/index.html#dynamic-schemas>`_.
        If a *result_table* name is specified in the input parameter *options*,
        the results are stored in a new table with that name--no results are
        returned in the response.  Both the table name and resulting column
        name must adhere to `standard naming conventions
        <../../../concepts/tables.html#table>`_; any column expression will
        need to be aliased.  If the source table's `shard key
        <../../../concepts/tables.html#shard-keys>`_ is used as the input
        parameter *column_name*, the result table will be sharded, in all other
        cases it will be replicated.  Sorting will properly function only if
        the result table is replicated or if there is only one processing node
        and should not be relied upon in other cases.  Not available if input
        parameter *table_name* is a collection or when the value of input
        parameter *column_name* is an unrestricted-length string.
        Parameters:
            column_name (str)
                Name of the column or an expression containing one or more
                column names on which the unique function would be applied.
            offset (long)
                A positive integer indicating the number of initial results to
                skip (this can be useful for paging through the results).  The
                minimum allowed value is 0. The maximum allowed value is
                MAX_INT.
            limit (long)
                A positive integer indicating the maximum number of results to
                be returned. Or END_OF_SET (-9999) to indicate that the max
                number of results should be returned.  Default value is 10000.
            encoding (str)
                Specifies the encoding for returned records.  Default value is
                'binary'.
                Allowed values are:
                * **binary** --
                  Indicates that the returned records should be binary encoded.
                * **json** --
                  Indicates that the returned records should be json encoded.
                  The default value is 'binary'.
            options (dict of str to str)
                  Optional parameters.  Default value is an empty dict ( {} ).
                  Allowed keys are:
                  * **collection_name** --
                    Name of a collection which is to contain the table
                    specified in *result_table*. If the collection provided is
                    non-existent, the collection will be automatically created.
                    If empty, then the table will be a top-level table.
                    Additionally this option is invalid if input parameter
                    *table_name* is a collection.
                  * **expression** --
                    Optional filter expression to apply to the table.
                  * **sort_order** --
                    String indicating how the returned values should be sorted.
                    Allowed values are:
                    * ascending
                    * descending
                    The default value is 'ascending'.
                  * **result_table** --
                    The name of the table used to store the results. If
                    present, no results are returned in the response. Has the
                    same naming restrictions as `tables
                    <../../../concepts/tables.html>`_.  Not available if input
                    parameter *table_name* is a collection or when input
                    parameter *column_name* is an unrestricted-length string.
                  * **result_table_persist** --
                    If *true*, then the result table specified in
                    *result_table* will be persisted and will not expire unless
                    a *ttl* is specified.   If *false*, then the result table
                    will be an in-memory table and will expire unless a *ttl*
                    is specified otherwise.
                    Allowed values are:
                    * true
                    * false
                    The default value is 'false'.
                  * **result_table_force_replicated** --
                    Force the result table to be replicated (ignores any
                    sharding). Must be used in combination with the
                    *result_table* option.
                  * **result_table_generate_pk** --
                    If 'true' then set a primary key for the result table. Must
                    be used in combination with the *result_table* option.
                  * **ttl** --
                    Sets the `TTL <../../../concepts/ttl.html>`_ of the table
                    specified in *result_table*.
                  * **chunk_size** --
                    Indicates the chunk size to be used for the result table.
                    Must be used in combination with the *result_table* option.
        Returns:
            A read-only GPUdbTable object if input options has "result_table";
            otherwise the response from the server, which is a dict containing
            the following entries--
            table_name (str)
                The same table name as was passed in the parameter list.
            response_schema_str (str)
                Avro schema of output parameter *binary_encoded_response* or
                output parameter *json_encoded_response*.
            binary_encoded_response (str)
                Avro binary encoded response.
            json_encoded_response (str)
                Avro JSON encoded response.
            has_more_records (bool)
                Too many records. Returned a partial set.
        Raises:
            GPUdbException -- 
                Upon an error from the server.
        """
        if "result_table" in options:
            result_table = options[ "result_table" ]
        else:
            result_table = None
        response = self.db.aggregate_unique( self.name, column_name, offset,
                                             limit, encoding, options )
        if not _Util.is_ok( response ):
            raise GPUdbException( _Util.get_error_msg( response ) )
        if result_table:
            # Create a read-only table for the result table
            return self.create_view( result_table )
        # Decode the returned records
        response = self.db.parse_dynamic_response( response, convert_nulls = False )
        # Save the decoded data in a field called 'data' and delete the raw 
        # data related fields
        response[ "data" ] = response[ "response" ]
        del response[ "response" ]
        del response[ "binary_encoded_response" ]
        del response[ "json_encoded_response" ]
        return response 
    # end aggregate_unique
[docs]    def aggregate_unpivot( self, variable_column_name = '', value_column_name =
                           '', pivoted_columns = None, encoding = 'binary',
                           options = {} ):
        """Rotate the column values into rows values.
        The aggregate unpivot is used to normalize tables that are built for
        cross tabular reporting purposes. The unpivot operator rotates the
        column values for all the pivoted columns. A variable column, value
        column and all columns from the source table except the unpivot columns
        are projected into the result table. The variable column and value
        columns in the result table indicate the pivoted column name and values
        respectively.
        The response is returned as a dynamic schema. For details see: `dynamic
        schemas documentation <../../../api/index.html#dynamic-schemas>`_.
        Parameters:
            variable_column_name (str)
                Specifies the variable/parameter column name.  Default value is
                ''.
            value_column_name (str)
                Specifies the value column name.  Default value is ''.
            pivoted_columns (list of str)
                List of one or more values typically the column names of the
                input table. All the columns in the source table must have the
                same data type.  The user can provide a single element (which
                will be automatically promoted to a list internally) or a list.
            encoding (str)
                Specifies the encoding for returned records.  Default value is
                'binary'.
                Allowed values are:
                * **binary** --
                  Indicates that the returned records should be binary encoded.
                * **json** --
                  Indicates that the returned records should be json encoded.
                  The default value is 'binary'.
            options (dict of str to str)
                  Optional parameters.  Default value is an empty dict ( {} ).
                  Allowed keys are:
                  * **collection_name** --
                    Name of a collection which is to contain the table
                    specified in *result_table*. If the collection provided is
                    non-existent, the collection will be automatically created.
                    If empty, then the table will be a top-level table.
                  * **result_table** --
                    The name of the table used to store the results. Has the
                    same naming restrictions as `tables
                    <../../../concepts/tables.html>`_. If present, no results
                    are returned in the response.
                  * **result_table_persist** --
                    If *true*, then the result table specified in
                    *result_table* will be persisted and will not expire unless
                    a *ttl* is specified.   If *false*, then the result table
                    will be an in-memory table and will expire unless a *ttl*
                    is specified otherwise.
                    Allowed values are:
                    * true
                    * false
                    The default value is 'false'.
                  * **expression** --
                    Filter expression to apply to the table prior to unpivot
                    processing.
                  * **order_by** --
                    Comma-separated list of the columns to be sorted by; e.g.
                    'timestamp asc, x desc'.  The columns specified must be
                    present in input table.  If any alias is given for any
                    column name, the alias must be used, rather than the
                    original column name.
                  * **chunk_size** --
                    Indicates the chunk size to be used for the result table.
                    Must be used in combination with the *result_table* option.
                  * **limit** --
                    The number of records to keep.
                  * **ttl** --
                    Sets the `TTL <../../../concepts/ttl.html>`_ of the table
                    specified in *result_table*.
        Returns:
            A read-only GPUdbTable object if input options has "result_table";
            otherwise the response from the server, which is a dict containing
            the following entries--
            table_name (str)
                Typically shows the result-table name if provided in the
                request (Ignore otherwise).
            response_schema_str (str)
                Avro schema of output parameter *binary_encoded_response* or
                output parameter *json_encoded_response*.
            binary_encoded_response (str)
                Avro binary encoded response.
            json_encoded_response (str)
                Avro JSON encoded response.
            total_number_of_records (long)
                Total/Filtered number of records.
            has_more_records (bool)
                Too many records. Returned a partial set.
        Raises:
            GPUdbException -- 
                Upon an error from the server.
        """
        if "result_table" in options:
            result_table = options[ "result_table" ]
        else:
            result_table = None
        response = self.db.aggregate_unpivot( self.name, variable_column_name,
                                              value_column_name,
                                              pivoted_columns, encoding, options )
        if not _Util.is_ok( response ):
            raise GPUdbException( _Util.get_error_msg( response ) )
        if result_table:
            # Create a read-only table for the result table
            return self.create_view( result_table )
        # Decode the returned records
        response = self.db.parse_dynamic_response( response, convert_nulls = False )
        # Save the decoded data in a field called 'data' and delete the raw 
        # data related fields
        response[ "data" ] = response[ "response" ]
        del response[ "response" ]
        del response[ "binary_encoded_response" ]
        del response[ "json_encoded_response" ]
        return response 
    # end aggregate_unpivot
[docs]    def alter_table( self, action = None, value = None, options = {} ):
        """Apply various modifications to a table, view, or collection.  The
        available
        modifications include the following:
        Create or delete an `index
        <../../../concepts/indexes.html#column-index>`_ on a
        particular column. This can speed up certain operations when using
        expressions
        containing equality or relational operators on indexed columns. This
        only
        applies to tables.
        Set the `time-to-live (TTL) <../../../concepts/ttl.html>`_. This can be
        applied
        to tables, views, or collections.  When applied to collections, every
        contained
        table & view that is not protected will have its TTL set to the given
        value.
        Set the global access mode (i.e. locking) for a table. The mode can be
        set to
        'no_access', 'read_only', 'write_only' or 'read_write'.
        Change the `protection <../../../concepts/protection.html>`_ mode to
        prevent or
        allow automatic expiration. This can be applied to tables, views, and
        collections.
        Allow homogeneous tables within a collection.
        Manage a table's columns--a column can be added, removed, or have its
        `type and properties <../../../concepts/types.html>`_ modified.
        Set or unset `compression <../../../concepts/compression.html>`_ for a
        column.
        Parameters:
            action (str)
                Modification operation to be applied
                Allowed values are:
                * **allow_homogeneous_tables** --
                  Sets whether homogeneous tables are allowed in the given
                  collection. This action is only valid if input parameter
                  *table_name* is a collection. The input parameter *value*
                  must be either 'true' or 'false'.
                * **create_index** --
                  Creates an `index
                  <../../../concepts/indexes.html#column-index>`_ on the column
                  name specified in input parameter *value*. If this column is
                  already indexed, an error will be returned.
                * **delete_index** --
                  Deletes an existing `index
                  <../../../concepts/indexes.html#column-index>`_ on the column
                  name specified in input parameter *value*. If this column
                  does not have indexing turned on, an error will be returned.
                * **move_to_collection** --
                  Moves a table into a collection input parameter *value*.
                * **protected** --
                  Sets whether the given input parameter *table_name* should be
                  `protected <../../../concepts/protection.html>`_ or not. The
                  input parameter *value* must be either 'true' or 'false'.
                * **rename_table** --
                  Renames a table, view or collection to input parameter
                  *value*. Has the same naming restrictions as `tables
                  <../../../concepts/tables.html>`_.
                * **ttl** --
                  Sets the `TTL <../../../concepts/ttl.html>`_ of the table,
                  view, or collection specified in input parameter
                  *table_name*.
                * **add_column** --
                  Adds the column specified in input parameter *value* to the
                  table specified in input parameter *table_name*.  Use
                  *column_type* and *column_properties* in input parameter
                  *options* to set the column's type and properties,
                  respectively.
                * **change_column** --
                  Changes type and properties of the column specified in input
                  parameter *value*.  Use *column_type* and *column_properties*
                  in input parameter *options* to set the column's type and
                  properties, respectively.
                * **set_column_compression** --
                  Modifies the `compression
                  <../../../concepts/compression.html>`_ setting on the column
                  specified in input parameter *value*.
                * **delete_column** --
                  Deletes the column specified in input parameter *value* from
                  the table specified in input parameter *table_name*.
                * **create_foreign_key** --
                  Creates a `foreign key
                  <../../../concepts/tables.html#foreign-key>`_ using the
                  format 'source_column references
                  target_table(primary_key_column) [ as <foreign_key_name> ]'.
                * **delete_foreign_key** --
                  Deletes a `foreign key
                  <../../../concepts/tables.html#foreign-key>`_.  The input
                  parameter *value* should be the <foreign_key_name> specified
                  when creating the key or the complete string used to define
                  it.
                * **set_global_access_mode** --
                  Sets the global access mode (i.e. locking) for the table
                  specified in input parameter *table_name*. Specify the access
                  mode in input parameter *value*. Valid modes are 'no_access',
                  'read_only', 'write_only' and 'read_write'.
            value (str)
                  The value of the modification. May be a column name, 'true'
                  or 'false', a TTL, or the global access mode depending on
                  input parameter *action*.
            options (dict of str to str)
                  Optional parameters.  Default value is an empty dict ( {} ).
                  Allowed keys are:
                  * **column_default_value** --
                    When adding a column, set a default value for existing
                    records.
                  * **column_properties** --
                    When adding or changing a column, set the column properties
                    (strings, separated by a comma: data, store_only,
                    text_search, char8, int8 etc).
                  * **column_type** --
                    When adding or changing a column, set the column type
                    (strings, separated by a comma: int, double, string, null
                    etc).
                  * **compression_type** --
                    When setting column compression (*set_column_compression*
                    for input parameter *action*), compression type to use:
                    *none* (to use no compression) or a valid compression type.
                    Allowed values are:
                    * none
                    * snappy
                    * lz4
                    * lz4hc
                    The default value is 'snappy'.
                  * **copy_values_from_column** --
                    When adding or changing a column, enter a column name from
                    the same table being altered to use as a source for the
                    column being added/changed; values will be copied from this
                    source column into the new/modified column.
                  * **rename_column** --
                    When changing a column, specify new column name.
                  * **validate_change_column** --
                    When changing a column, validate the change before applying
                    it. If *true*, then validate all values. A value too large
                    (or too long) for the new type will prevent any change. If
                    *false*, then when a value is too large or long, it will be
                    truncated.
                    Allowed values are:
                    * **true** --
                      true
                    * **false** --
                      false
                      The default value is 'true'.
        Returns:
            The response from the server which is a dict containing the
            following entries--
            table_name (str)
                Table on which the operation was performed.
            action (str)
                Modification operation that was performed.
            value (str)
                The value of the modification that was performed.
            type_id (str)
                return the type_id (when changing a table, a new type may be
                created)
            type_definition (str)
                return the type_definition  (when changing a table, a new type
                may be created)
            properties (dict of str to lists of str)
                return the type properties  (when changing a table, a new type
                may be created)
            label (str)
                return the type label  (when changing a table, a new type may
                be created)
        Raises:
            GPUdbException -- 
                Upon an error from the server.
        """
        response = self.db.alter_table( self.name, action, value, options )
        if not _Util.is_ok( response ):
            raise GPUdbException( _Util.get_error_msg( response ) )
        return response 
    # end alter_table
[docs]    def append_records( self, source_table_name = None, field_map = None,
                        options = {} ):
        """Append (or insert) all records from a source table (specified by input
        parameter *source_table_name*) to a particular target table (specified
        by input parameter *table_name*). The field map (specified by input
        parameter *field_map*) holds the user specified map of target table
        column names with their mapped source column names.
        Parameters:
            source_table_name (str)
                The source table name to get records from. Must be an existing
                table name.
            field_map (dict of str to str)
                Contains the mapping of column names from the target table
                (specified by input parameter *table_name*) as the keys, and
                corresponding column names from the source table (specified by
                input parameter *source_table_name*). Must be existing column
                names in source table and target table, and their types must be
                matched.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
                Allowed keys are:
                * **offset** --
                  A positive integer indicating the number of initial results
                  to skip from source table (specified by input parameter
                  *source_table_name*). Default is 0. The minimum allowed value
                  is 0. The maximum allowed value is MAX_INT.
                * **limit** --
                  A positive integer indicating the maximum number of results
                  to be returned from source table (specified by input
                  parameter *source_table_name*). Or END_OF_SET (-9999) to
                  indicate that the max number of results should be returned.
                * **expression** --
                  Optional filter expression to apply to the source table
                  (specified by input parameter *source_table_name*). Empty by
                  default.
                * **order_by** --
                  Comma-separated list of the columns to be sorted from source
                  table (specified by input parameter *source_table_name*) by;
                  e.g. 'timestamp asc, x desc'.  The columns specified must be
                  present in input parameter *field_map*.  If any alias is
                  given for any column name, the alias must be used, rather
                  than the original column name.
                * **update_on_existing_pk** --
                  Specifies the record collision policy for inserting the
                  source table records (specified by input parameter
                  *source_table_name*) into the target table (specified by
                  input parameter *table_name*) table with a `primary key
                  <../../../concepts/tables.html#primary-keys>`_.  If set to
                  *true*, any existing target table record with primary key
                  values that match those of a source table record being
                  inserted will be replaced by that new record.  If set to
                  *false*, any existing target table record with primary key
                  values that match those of a source table record being
                  inserted will remain unchanged and the new record discarded.
                  If the specified table does not have a primary key, then this
                  option is ignored.
                  Allowed values are:
                  * true
                  * false
                  The default value is 'false'.
        Returns:
            The response from the server which is a dict containing the
            following entries--
            table_name (str)
        Raises:
            GPUdbException -- 
                Upon an error from the server.
        """
        response = self.db.append_records( self.name, source_table_name,
                                           field_map, options )
        if not _Util.is_ok( response ):
            raise GPUdbException( _Util.get_error_msg( response ) )
        return response 
    # end append_records
[docs]    def clear( self, authorization = '', options = {} ):
        """Clears (drops) one or all tables in the database cluster. The operation
        is synchronous meaning that the table will be cleared before the
        function returns. The response payload returns the status of the
        operation along with the name of the table that was cleared.
        Parameters:
            authorization (str)
                No longer used. User can pass an empty string.  Default value
                is ''.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
                Allowed keys are:
                * **no_error_if_not_exists** --
                  If *true* and if the table specified in input parameter
                  *table_name* does not exist no error is returned. If *false*
                  and if the table specified in input parameter *table_name*
                  does not exist then an error is returned.
                  Allowed values are:
                  * true
                  * false
                  The default value is 'false'.
        Returns:
            The response from the server which is a dict containing the
            following entries--
            table_name (str)
                Value of input parameter *table_name* for a given table, or
                'ALL CLEARED' in case of clearing all tables.
        Raises:
            GPUdbException -- 
                Upon an error from the server.
        """
        response = self.db.clear_table( self.name, authorization, options )
        if not _Util.is_ok( response ):
            raise GPUdbException( _Util.get_error_msg( response ) )
        return response 
    # end clear
[docs]    def create_projection( self, column_names = None, options = {},
                           projection_name = None ):
        """Creates a new `projection <../../../concepts/projections.html>`_ of an
        existing table. A projection represents a subset of the columns
        (potentially including derived columns) of a table.
        Notes:
        A moving average can be calculated on a given column using the
        following syntax in the input parameter *column_names* parameter:
        'moving_average(column_name,num_points_before,num_points_after) as
        new_column_name'
        For each record in the moving_average function's 'column_name'
        parameter, it computes the average over the previous
        'num_points_before' records and the subsequent 'num_points_after'
        records.
        Note that moving average relies on *order_by*, and *order_by* requires
        that all the data being ordered resides on the same processing node, so
        it won't make sense to use *order_by* without moving average.
        Also, a projection can be created with a different `shard key
        <../../../concepts/tables.html#shard-keys>`_ than the source table.  By
        specifying *shard_key*, the projection will be sharded according to the
        specified columns, regardless of how the source table is sharded.  The
        source table can even be unsharded or replicated.
        Parameters:
            column_names (list of str)
                List of columns from input parameter *table_name* to be
                included in the projection. Can include derived columns. Can be
                specified as aliased via the syntax 'column_name as alias'.
                The user can provide a single element (which will be
                automatically promoted to a list internally) or a list.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
                Allowed keys are:
                * **collection_name** --
                  Name of a `collection <../../../concepts/collections.html>`_
                  to which the projection is to be assigned as a child. If the
                  collection provided is non-existent, the collection will be
                  automatically created. If empty, then the projection will be
                  at the top level.
                * **expression** --
                  An optional filter `expression
                  <../../../concepts/expressions.html>`_ to be applied to the
                  source table prior to the projection.
                * **limit** --
                  The number of records to keep.
                * **order_by** --
                  Comma-separated list of the columns to be sorted by; e.g.
                  'timestamp asc, x desc'.  The columns specified must be
                  present in input parameter *column_names*.  If any alias is
                  given for any column name, the alias must be used, rather
                  than the original column name.
                * **materialize_on_gpu** --
                  If *true* then the columns of the projection will be cached
                  on the GPU.
                  Allowed values are:
                  * true
                  * false
                  The default value is 'false'.
                * **chunk_size** --
                  Indicates the chunk size to be used for this table.
                * **ttl** --
                  Sets the `TTL <../../../concepts/ttl.html>`_ of the
                  projection specified in input parameter *projection_name*.
                * **shard_key** --
                  Comma-separated list of the columns to be sharded on; e.g.
                  'column1, column2'.  The columns specified must be present in
                  input parameter *column_names*.  If any alias is given for
                  any column name, the alias must be used, rather than the
                  original column name.
                * **persist** --
                  If *true*, then the projection specified in input parameter
                  *projection_name* will be persisted and will not expire
                  unless a *ttl* is specified.   If *false*, then the
                  projection will be an in-memory table and will expire unless
                  a *ttl* is specified otherwise.
                  Allowed values are:
                  * true
                  * false
                  The default value is 'false'.
            projection_name (str)
                  Name of the projection to be created. Has the same naming
                  restrictions as `tables <../../../concepts/tables.html>`_.
        Returns:
            A read-only GPUdbTable object.
        Raises:
            GPUdbException -- 
                Upon an error from the server.
        """
        projection_name = self.__process_view_name( projection_name )
        response = self.db.create_projection( self.name, projection_name,
                                              column_names, options )
        if not _Util.is_ok( response ):
            raise GPUdbException( _Util.get_error_msg( response ) )
        return self.create_view( projection_name ) 
    # end create_projection
[docs]    def create_table_monitor( self, options = {} ):
        """Creates a monitor that watches for new records inserted into a
        particular table (identified by input parameter *table_name*) and
        forwards copies to subscribers via ZMQ. After this call completes,
        subscribe to the returned output parameter *topic_id* on the ZMQ table
        monitor port (default 9002). Each time an insert operation on the table
        completes, a multipart message is published for that topic; the first
        part contains only the topic ID, and each subsequent part contains one
        binary-encoded Avro object that was inserted. The monitor will continue
        to run (regardless of whether or not there are any subscribers) until
        deactivated with :meth:`.clear_table_monitor`.
        Parameters:
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
        Returns:
            The response from the server which is a dict containing the
            following entries--
            topic_id (str)
                The ZMQ topic ID to subscribe to for inserted records.
            table_name (str)
                Value of input parameter *table_name*.
            type_schema (str)
                JSON Avro schema of the table, for use in decoding published
                records.
        Raises:
            GPUdbException -- 
                Upon an error from the server.
        """
        response = self.db.create_table_monitor( self.name, options )
        if not _Util.is_ok( response ):
            raise GPUdbException( _Util.get_error_msg( response ) )
        return response 
    # end create_table_monitor
[docs]    def delete_records( self, expressions = None, options = {} ):
        """Deletes record(s) matching the provided criteria from the given table.
        The record selection criteria can either be one or more  input
        parameter *expressions* (matching multiple records) or a single record
        identified by *record_id* options.  Note that the two selection
        criteria are mutually exclusive.  This operation cannot be run on a
        collection or a view.  The operation is synchronous meaning that a
        response will not be available until the request is completely
        processed and all the matching records are deleted.
        Parameters:
            expressions (list of str)
                A list of the actual predicates, one for each select; format
                should follow the guidelines provided `here
                <../../../concepts/expressions.html>`_. Specifying one or more
                input parameter *expressions* is mutually exclusive to
                specifying *record_id* in the input parameter *options*.  The
                user can provide a single element (which will be automatically
                promoted to a list internally) or a list.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
                Allowed keys are:
                * **global_expression** --
                  An optional global expression to reduce the search space of
                  the input parameter *expressions*.
                * **record_id** --
                  A record id identifying a single record, obtained at the time
                  of :meth:`insertion of the record <.insert_records>` or by
                  calling :meth:`.get_records_from_collection` with the
                  *return_record_ids* option.
        Returns:
            The response from the server which is a dict containing the
            following entries--
            count_deleted (long)
                Total number of records deleted across all expressions.
            counts_deleted (list of longs)
                Total number of records deleted per expression.
        Raises:
            GPUdbException -- 
                Upon an error from the server.
        """
        response = self.db.delete_records( self.name, expressions, options )
        if not _Util.is_ok( response ):
            raise GPUdbException( _Util.get_error_msg( response ) )
        return response 
    # end delete_records
[docs]    def filter( self, expression = None, options = {}, view_name = '' ):
        """Filters data based on the specified expression.  The results are stored
        in a `result set <../../../concepts/filtered_views.html>`_ with the
        given input parameter *view_name*.
        For details see `Expressions <../../../concepts/expressions.html>`_.
        The response message contains the number of points for which the
        expression evaluated to be true, which is equivalent to the size of the
        result view.
        Parameters:
            expression (str)
                The select expression to filter the specified table.  For
                details see `Expressions
                <../../../concepts/expressions.html>`_.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
                Allowed keys are:
                * **collection_name** --
                  Name of a collection which is to contain the newly created
                  view. If the collection provided is non-existent, the
                  collection will be automatically created. If empty, then the
                  newly created view will be top-level.
                * **ttl** --
                  Sets the `TTL <../../../concepts/ttl.html>`_ of the view
                  specified in input parameter *view_name*.
            view_name (str)
                  If provided, then this will be the name of the view
                  containing the results. Has the same naming restrictions as
                  `tables <../../../concepts/tables.html>`_.  Default value is
                  ''.
        Returns:
            A read-only GPUdbTable object.
        Raises:
            GPUdbException -- 
                Upon an error from the server.
        """
        view_name = self.__process_view_name( view_name )
        response = self.db.filter( self.name, view_name, expression, options )
        if not _Util.is_ok( response ):
            raise GPUdbException( _Util.get_error_msg( response ) )
        return self.create_view( view_name, response[ "count" ] ) 
    # end filter
[docs]    def filter_by_area( self, x_column_name = None, x_vector = None,
                        y_column_name = None, y_vector = None, options = {},
                        view_name = '' ):
        """Calculates which objects from a table are within a named area of
        interest (NAI/polygon). The operation is synchronous, meaning that a
        response will not be returned until all the matching objects are fully
        available. The response payload provides the count of the resulting
        set. A new resultant set (view) which satisfies the input NAI
        restriction specification is created with the name input parameter
        *view_name* passed in as part of the input.
        Parameters:
            x_column_name (str)
                Name of the column containing the x values to be filtered.
            x_vector (list of floats)
                List of x coordinates of the vertices of the polygon
                representing the area to be filtered.  The user can provide a
                single element (which will be automatically promoted to a list
                internally) or a list.
            y_column_name (str)
                Name of the column containing the y values to be filtered.
            y_vector (list of floats)
                List of y coordinates of the vertices of the polygon
                representing the area to be filtered.  The user can provide a
                single element (which will be automatically promoted to a list
                internally) or a list.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
            view_name (str)
                If provided, then this will be the name of the view containing
                the results. Has the same naming restrictions as `tables
                <../../../concepts/tables.html>`_.  Default value is ''.
        Returns:
            A read-only GPUdbTable object.
        Raises:
            GPUdbException -- 
                Upon an error from the server.
        """
        view_name = self.__process_view_name( view_name )
        response = self.db.filter_by_area( self.name, view_name, x_column_name,
                                           x_vector, y_column_name, y_vector,
                                           options )
        if not _Util.is_ok( response ):
            raise GPUdbException( _Util.get_error_msg( response ) )
        return self.create_view( view_name, response[ "count" ] ) 
    # end filter_by_area
[docs]    def filter_by_area_geometry( self, column_name = None, x_vector = None,
                                 y_vector = None, options = {}, view_name = ''
                                 ):
        """Calculates which geospatial geometry objects from a table intersect a
        named area of interest (NAI/polygon). The operation is synchronous,
        meaning that a response will not be returned until all the matching
        objects are fully available. The response payload provides the count of
        the resulting set. A new resultant set (view) which satisfies the input
        NAI restriction specification is created with the name input parameter
        *view_name* passed in as part of the input.
        Parameters:
            column_name (str)
                Name of the geospatial geometry column to be filtered.
            x_vector (list of floats)
                List of x coordinates of the vertices of the polygon
                representing the area to be filtered.  The user can provide a
                single element (which will be automatically promoted to a list
                internally) or a list.
            y_vector (list of floats)
                List of y coordinates of the vertices of the polygon
                representing the area to be filtered.  The user can provide a
                single element (which will be automatically promoted to a list
                internally) or a list.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
            view_name (str)
                If provided, then this will be the name of the view containing
                the results. Must not be an already existing collection, table
                or view.  Default value is ''.
        Returns:
            A read-only GPUdbTable object.
        Raises:
            GPUdbException -- 
                Upon an error from the server.
        """
        view_name = self.__process_view_name( view_name )
        response = self.db.filter_by_area_geometry( self.name, view_name,
                                                    column_name, x_vector,
                                                    y_vector, options )
        if not _Util.is_ok( response ):
            raise GPUdbException( _Util.get_error_msg( response ) )
        return self.create_view( view_name, response[ "count" ] ) 
    # end filter_by_area_geometry
[docs]    def filter_by_box( self, x_column_name = None, min_x = None, max_x = None,
                       y_column_name = None, min_y = None, max_y = None, options
                       = {}, view_name = '' ):
        """Calculates how many objects within the given table lie in a rectangular
        box. The operation is synchronous, meaning that a response will not be
        returned until all the objects are fully available. The response
        payload provides the count of the resulting set. A new resultant set
        which satisfies the input NAI restriction specification is also created
        when a input parameter *view_name* is passed in as part of the input
        payload.
        Parameters:
            x_column_name (str)
                Name of the column on which to perform the bounding box query.
                Must be a valid numeric column.
            min_x (float)
                Lower bound for the column chosen by input parameter
                *x_column_name*.  Must be less than or equal to input parameter
                *max_x*.
            max_x (float)
                Upper bound for input parameter *x_column_name*.  Must be
                greater than or equal to input parameter *min_x*.
            y_column_name (str)
                Name of a column on which to perform the bounding box query.
                Must be a valid numeric column.
            min_y (float)
                Lower bound for input parameter *y_column_name*. Must be less
                than or equal to input parameter *max_y*.
            max_y (float)
                Upper bound for input parameter *y_column_name*. Must be
                greater than or equal to input parameter *min_y*.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
            view_name (str)
                Optional name of the result view that will be created
                containing the results of the query. Has the same naming
                restrictions as `tables <../../../concepts/tables.html>`_.
                Default value is ''.
        Returns:
            A read-only GPUdbTable object.
        Raises:
            GPUdbException -- 
                Upon an error from the server.
        """
        view_name = self.__process_view_name( view_name )
        response = self.db.filter_by_box( self.name, view_name, x_column_name,
                                          min_x, max_x, y_column_name, min_y,
                                          max_y, options )
        if not _Util.is_ok( response ):
            raise GPUdbException( _Util.get_error_msg( response ) )
        return self.create_view( view_name, response[ "count" ] ) 
    # end filter_by_box
[docs]    def filter_by_box_geometry( self, column_name = None, min_x = None, max_x =
                                None, min_y = None, max_y = None, options = {},
                                view_name = '' ):
        """Calculates which geospatial geometry objects from a table intersect a
        rectangular box. The operation is synchronous, meaning that a response
        will not be returned until all the objects are fully available. The
        response payload provides the count of the resulting set. A new
        resultant set which satisfies the input NAI restriction specification
        is also created when a input parameter *view_name* is passed in as part
        of the input payload.
        Parameters:
            column_name (str)
                Name of the geospatial geometry column to be filtered.
            min_x (float)
                Lower bound for the x-coordinate of the rectangular box.  Must
                be less than or equal to input parameter *max_x*.
            max_x (float)
                Upper bound for the x-coordinate of the rectangular box.  Must
                be greater than or equal to input parameter *min_x*.
            min_y (float)
                Lower bound for the y-coordinate of the rectangular box. Must
                be less than or equal to input parameter *max_y*.
            max_y (float)
                Upper bound for the y-coordinate of the rectangular box. Must
                be greater than or equal to input parameter *min_y*.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
            view_name (str)
                Optional name of the result view that will be created
                containing the results of the query. Must not be an already
                existing collection, table or view.  Default value is ''.
        Returns:
            A read-only GPUdbTable object.
        Raises:
            GPUdbException -- 
                Upon an error from the server.
        """
        view_name = self.__process_view_name( view_name )
        response = self.db.filter_by_box_geometry( self.name, view_name,
                                                   column_name, min_x, max_x,
                                                   min_y, max_y, options )
        if not _Util.is_ok( response ):
            raise GPUdbException( _Util.get_error_msg( response ) )
        return self.create_view( view_name, response[ "count" ] ) 
    # end filter_by_box_geometry
[docs]    def filter_by_geometry( self, column_name = None, input_wkt = '', operation
                            = None, options = {}, view_name = '' ):
        """Applies a geometry filter against a geospatial geometry column in a
        given table, collection or view. The filtering geometry is provided by
        input parameter *input_wkt*.
        Parameters:
            column_name (str)
                Name of the column to be used in the filter. Must be a
                geospatial geometry column.
            input_wkt (str)
                A geometry in WKT format that will be used to filter the
                objects in input parameter *table_name*.  Default value is ''.
            operation (str)
                The geometric filtering operation to perform
                Allowed values are:
                * **contains** --
                  Matches records that contain the given WKT in input parameter
                  *input_wkt*, i.e. the given WKT is within the bounds of a
                  record's geometry.
                * **crosses** --
                  Matches records that cross the given WKT.
                * **disjoint** --
                  Matches records that are disjoint from the given WKT.
                * **equals** --
                  Matches records that are the same as the given WKT.
                * **intersects** --
                  Matches records that intersect the given WKT.
                * **overlaps** --
                  Matches records that overlap the given WKT.
                * **touches** --
                  Matches records that touch the given WKT.
                * **within** --
                  Matches records that are within the given WKT.
            options (dict of str to str)
                  Optional parameters.  Default value is an empty dict ( {} ).
            view_name (str)
                  If provided, then this will be the name of the view
                  containing the results. Has the same naming restrictions as
                  `tables <../../../concepts/tables.html>`_.  Default value is
                  ''.
        Returns:
            A read-only GPUdbTable object.
        Raises:
            GPUdbException -- 
                Upon an error from the server.
        """
        view_name = self.__process_view_name( view_name )
        response = self.db.filter_by_geometry( self.name, view_name,
                                               column_name, input_wkt,
                                               operation, options )
        if not _Util.is_ok( response ):
            raise GPUdbException( _Util.get_error_msg( response ) )
        return self.create_view( view_name, response[ "count" ] ) 
    # end filter_by_geometry
[docs]    def filter_by_list( self, column_values_map = None, options = {}, view_name
                        = '' ):
        """Calculates which records from a table have values in the given list for
        the corresponding column. The operation is synchronous, meaning that a
        response will not be returned until all the objects are fully
        available. The response payload provides the count of the resulting
        set. A new resultant set (view) which satisfies the input filter
        specification is also created if a input parameter *view_name* is
        passed in as part of the request.
        For example, if a type definition has the columns 'x' and 'y', then a
        filter by list query with the column map {"x":["10.1", "2.3"],
        "y":["0.0", "-31.5", "42.0"]} will return the count of all data points
        whose x and y values match both in the respective x- and y-lists, e.g.,
        "x = 10.1 and y = 0.0", "x = 2.3 and y = -31.5", etc. However, a record
        with "x = 10.1 and y = -31.5" or "x = 2.3 and y = 0.0" would not be
        returned because the values in the given lists do not correspond.
        Parameters:
            column_values_map (dict of str to lists of str)
                List of values for the corresponding column in the table
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
                Allowed keys are:
                * **filter_mode** --
                  String indicating the filter mode, either 'in_list' or
                  'not_in_list'.
                  Allowed values are:
                  * **in_list** --
                    The filter will match all items that are in the provided
                    list(s).
                  * **not_in_list** --
                    The filter will match all items that are not in the
                    provided list(s).
                    The default value is 'in_list'.
            view_name (str)
                  If provided, then this will be the name of the view
                  containing the results. Has the same naming restrictions as
                  `tables <../../../concepts/tables.html>`_.  Default value is
                  ''.
        Returns:
            A read-only GPUdbTable object.
        Raises:
            GPUdbException -- 
                Upon an error from the server.
        """
        view_name = self.__process_view_name( view_name )
        response = self.db.filter_by_list( self.name, view_name,
                                           column_values_map, options )
        if not _Util.is_ok( response ):
            raise GPUdbException( _Util.get_error_msg( response ) )
        return self.create_view( view_name, response[ "count" ] ) 
    # end filter_by_list
[docs]    def filter_by_radius( self, x_column_name = None, x_center = None,
                          y_column_name = None, y_center = None, radius = None,
                          options = {}, view_name = '' ):
        """Calculates which objects from a table lie within a circle with the
        given radius and center point (i.e. circular NAI). The operation is
        synchronous, meaning that a response will not be returned until all the
        objects are fully available. The response payload provides the count of
        the resulting set. A new resultant set (view) which satisfies the input
        circular NAI restriction specification is also created if a input
        parameter *view_name* is passed in as part of the request.
        For track data, all track points that lie within the circle plus one
        point on either side of the circle (if the track goes beyond the
        circle) will be included in the result.
        Parameters:
            x_column_name (str)
                Name of the column to be used for the x-coordinate (the
                longitude) of the center.
            x_center (float)
                Value of the longitude of the center. Must be within [-180.0,
                180.0].  The minimum allowed value is -180. The maximum allowed
                value is 180.
            y_column_name (str)
                Name of the column to be used for the y-coordinate-the
                latitude-of the center.
            y_center (float)
                Value of the latitude of the center. Must be within [-90.0,
                90.0].  The minimum allowed value is -90. The maximum allowed
                value is 90.
            radius (float)
                The radius of the circle within which the search will be
                performed. Must be a non-zero positive value. It is in meters;
                so, for example, a value of '42000' means 42 km.  The minimum
                allowed value is 0. The maximum allowed value is MAX_INT.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
            view_name (str)
                If provided, then this will be the name of the view containing
                the results. Has the same naming restrictions as `tables
                <../../../concepts/tables.html>`_.  Default value is ''.
        Returns:
            A read-only GPUdbTable object.
        Raises:
            GPUdbException -- 
                Upon an error from the server.
        """
        view_name = self.__process_view_name( view_name )
        response = self.db.filter_by_radius( self.name, view_name,
                                             x_column_name, x_center,
                                             y_column_name, y_center, radius,
                                             options )
        if not _Util.is_ok( response ):
            raise GPUdbException( _Util.get_error_msg( response ) )
        return self.create_view( view_name, response[ "count" ] ) 
    # end filter_by_radius
[docs]    def filter_by_radius_geometry( self, column_name = None, x_center = None,
                                   y_center = None, radius = None, options = {},
                                   view_name = '' ):
        """Calculates which geospatial geometry objects from a table intersect a
        circle with the given radius and center point (i.e. circular NAI). The
        operation is synchronous, meaning that a response will not be returned
        until all the objects are fully available. The response payload
        provides the count of the resulting set. A new resultant set (view)
        which satisfies the input circular NAI restriction specification is
        also created if a input parameter *view_name* is passed in as part of
        the request.
        Parameters:
            column_name (str)
                Name of the geospatial geometry column to be filtered.
            x_center (float)
                Value of the longitude of the center. Must be within [-180.0,
                180.0].  The minimum allowed value is -180. The maximum allowed
                value is 180.
            y_center (float)
                Value of the latitude of the center. Must be within [-90.0,
                90.0].  The minimum allowed value is -90. The maximum allowed
                value is 90.
            radius (float)
                The radius of the circle within which the search will be
                performed. Must be a non-zero positive value. It is in meters;
                so, for example, a value of '42000' means 42 km.  The minimum
                allowed value is 0. The maximum allowed value is MAX_INT.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
            view_name (str)
                If provided, then this will be the name of the view containing
                the results. Must not be an already existing collection, table
                or view.  Default value is ''.
        Returns:
            A read-only GPUdbTable object.
        Raises:
            GPUdbException -- 
                Upon an error from the server.
        """
        view_name = self.__process_view_name( view_name )
        response = self.db.filter_by_radius_geometry( self.name, view_name,
                                                      column_name, x_center,
                                                      y_center, radius, options )
        if not _Util.is_ok( response ):
            raise GPUdbException( _Util.get_error_msg( response ) )
        return self.create_view( view_name, response[ "count" ] ) 
    # end filter_by_radius_geometry
[docs]    def filter_by_range( self, column_name = None, lower_bound = None,
                         upper_bound = None, options = {}, view_name = '' ):
        """Calculates which objects from a table have a column that is within the
        given bounds. An object from the table identified by input parameter
        *table_name* is added to the view input parameter *view_name* if its
        column is within [input parameter *lower_bound*, input parameter
        *upper_bound*] (inclusive). The operation is synchronous. The response
        provides a count of the number of objects which passed the bound
        filter.  Although this functionality can also be accomplished with the
        standard filter function, it is more efficient.
        For track objects, the count reflects how many points fall within the
        given bounds (which may not include all the track points of any given
        track).
        Parameters:
            column_name (str)
                Name of a column on which the operation would be applied.
            lower_bound (float)
                Value of the lower bound (inclusive).
            upper_bound (float)
                Value of the upper bound (inclusive).
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
            view_name (str)
                If provided, then this will be the name of the view containing
                the results. Has the same naming restrictions as `tables
                <../../../concepts/tables.html>`_.  Default value is ''.
        Returns:
            A read-only GPUdbTable object.
        Raises:
            GPUdbException -- 
                Upon an error from the server.
        """
        view_name = self.__process_view_name( view_name )
        response = self.db.filter_by_range( self.name, view_name, column_name,
                                            lower_bound, upper_bound, options )
        if not _Util.is_ok( response ):
            raise GPUdbException( _Util.get_error_msg( response ) )
        return self.create_view( view_name, response[ "count" ] ) 
    # end filter_by_range
[docs]    def filter_by_series( self, track_id = None, target_track_ids = None,
                          options = {}, view_name = '' ):
        """Filters objects matching all points of the given track (works only on
        track type data).  It allows users to specify a particular track to
        find all other points in the table that fall within specified
        ranges-spatial and temporal-of all points of the given track.
        Additionally, the user can specify another track to see if the two
        intersect (or go close to each other within the specified ranges). The
        user also has the flexibility of using different metrics for the
        spatial distance calculation: Euclidean (flat geometry) or Great Circle
        (spherical geometry to approximate the Earth's surface distances). The
        filtered points are stored in a newly created result set. The return
        value of the function is the number of points in the resultant set
        (view).
        This operation is synchronous, meaning that a response will not be
        returned until all the objects are fully available.
        Parameters:
            track_id (str)
                The ID of the track which will act as the filtering points.
                Must be an existing track within the given table.
            target_track_ids (list of str)
                Up to one track ID to intersect with the "filter" track. If any
                provided, it must be an valid track ID within the given set.
                The user can provide a single element (which will be
                automatically promoted to a list internally) or a list.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
                Allowed keys are:
                * **spatial_radius** --
                  A positive number passed as a string representing the radius
                  of the search area centered around each track point's
                  geospatial coordinates. The value is interpreted in meters.
                  Required parameter.
                * **time_radius** --
                  A positive number passed as a string representing the maximum
                  allowable time difference between the timestamps of a
                  filtered object and the given track's points. The value is
                  interpreted in seconds. Required parameter.
                * **spatial_distance_metric** --
                  A string representing the coordinate system to use for the
                  spatial search criteria. Acceptable values are 'euclidean'
                  and 'great_circle'. Optional parameter; default is
                  'euclidean'.
                  Allowed values are:
                  * euclidean
                  * great_circle
            view_name (str)
                  If provided, then this will be the name of the view
                  containing the results. Has the same naming restrictions as
                  `tables <../../../concepts/tables.html>`_.  Default value is
                  ''.
        Returns:
            A read-only GPUdbTable object.
        Raises:
            GPUdbException -- 
                Upon an error from the server.
        """
        view_name = self.__process_view_name( view_name )
        response = self.db.filter_by_series( self.name, view_name, track_id,
                                             target_track_ids, options )
        if not _Util.is_ok( response ):
            raise GPUdbException( _Util.get_error_msg( response ) )
        return self.create_view( view_name, response[ "count" ] ) 
    # end filter_by_series
[docs]    def filter_by_string( self, expression = None, mode = None, column_names =
                          None, options = {}, view_name = '' ):
        """Calculates which objects from a table, collection, or view match a
        string expression for the given string columns. The options
        'case_sensitive' can be used to modify the behavior for all modes
        except 'search'. For 'search' mode details and limitations, see `Full
        Text Search <../../../concepts/full_text_search.html>`_.
        Parameters:
            expression (str)
                The expression with which to filter the table.
            mode (str)
                The string filtering mode to apply. See below for details.
                Allowed values are:
                * **search** --
                  Full text search query with wildcards and boolean operators.
                  Note that for this mode, no column can be specified in input
                  parameter *column_names*; all string columns of the table
                  that have text search enabled will be searched.
                * **equals** --
                  Exact whole-string match (accelerated).
                * **contains** --
                  Partial substring match (not accelerated).  If the column is
                  a string type (non-charN) and the number of records is too
                  large, it will return 0.
                * **starts_with** --
                  Strings that start with the given expression (not
                  accelerated). If the column is a string type (non-charN) and
                  the number of records is too large, it will return 0.
                * **regex** --
                  Full regular expression search (not accelerated). If the
                  column is a string type (non-charN) and the number of records
                  is too large, it will return 0.
            column_names (list of str)
                  List of columns on which to apply the filter. Ignored for
                  'search' mode.  The user can provide a single element (which
                  will be automatically promoted to a list internally) or a
                  list.
            options (dict of str to str)
                  Optional parameters.  Default value is an empty dict ( {} ).
                  Allowed keys are:
                  * **case_sensitive** --
                    If 'false' then string filtering will ignore case. Does not
                    apply to 'search' mode.
                    Allowed values are:
                    * true
                    * false
                    The default value is 'true'.
            view_name (str)
                    If provided, then this will be the name of the view
                    containing the results. Has the same naming restrictions as
                    `tables <../../../concepts/tables.html>`_.  Default value
                    is ''.
        Returns:
            A read-only GPUdbTable object.
        Raises:
            GPUdbException -- 
                Upon an error from the server.
        """
        view_name = self.__process_view_name( view_name )
        response = self.db.filter_by_string( self.name, view_name, expression,
                                             mode, column_names, options )
        if not _Util.is_ok( response ):
            raise GPUdbException( _Util.get_error_msg( response ) )
        return self.create_view( view_name, response[ "count" ] ) 
    # end filter_by_string
[docs]    def filter_by_table( self, column_name = None, source_table_name = None,
                         source_table_column_name = None, options = {},
                         view_name = '' ):
        """Filters objects in one table based on objects in another table. The
        user must specify matching column types from the two tables (i.e. the
        target table from which objects will be filtered and the source table
        based on which the filter will be created); the column names need not
        be the same. If a input parameter *view_name* is specified, then the
        filtered objects will then be put in a newly created view. The
        operation is synchronous, meaning that a response will not be returned
        until all objects are fully available in the result view. The return
        value contains the count (i.e. the size) of the resulting view.
        Parameters:
            column_name (str)
                Name of the column by whose value the data will be filtered
                from the table designated by input parameter *table_name*.
            source_table_name (str)
                Name of the table whose data will be compared against in the
                table called input parameter *table_name*. Must be an existing
                table.
            source_table_column_name (str)
                Name of the column in the input parameter *source_table_name*
                whose values will be used as the filter for table input
                parameter *table_name*. Must be a geospatial geometry column if
                in 'spatial' mode; otherwise, Must match the type of the input
                parameter *column_name*.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
                Allowed keys are:
                * **filter_mode** --
                  String indicating the filter mode, either *in_table* or
                  *not_in_table*.
                  Allowed values are:
                  * in_table
                  * not_in_table
                  The default value is 'in_table'.
                * **mode** --
                  Mode - should be either *spatial* or *normal*.
                  Allowed values are:
                  * normal
                  * spatial
                  The default value is 'normal'.
                * **buffer** --
                  Buffer size, in meters. Only relevant for *spatial* mode.
                * **buffer_method** --
                  Method used to buffer polygons.  Only relevant for *spatial*
                  mode.
                  Allowed values are:
                  * **geos** --
                    Use geos 1 edge per corner algorithm
                    The default value is 'normal'.
                * **max_partition_size** --
                  Maximum number of points in a partition. Only relevant for
                  *spatial* mode.
                * **max_partition_score** --
                  Maximum number of points * edges in a partition. Only
                  relevant for *spatial* mode.
                * **x_column_name** --
                  Name of column containing x value of point being filtered in
                  *spatial* mode.
                * **y_column_name** --
                  Name of column containing y value of point being filtered in
                  *spatial* mode.
            view_name (str)
                  If provided, then this will be the name of the view
                  containing the results. Has the same naming restrictions as
                  `tables <../../../concepts/tables.html>`_.  Default value is
                  ''.
        Returns:
            A read-only GPUdbTable object.
        Raises:
            GPUdbException -- 
                Upon an error from the server.
        """
        view_name = self.__process_view_name( view_name )
        response = self.db.filter_by_table( self.name, view_name, column_name,
                                            source_table_name,
                                            source_table_column_name, options )
        if not _Util.is_ok( response ):
            raise GPUdbException( _Util.get_error_msg( response ) )
        return self.create_view( view_name, response[ "count" ] ) 
    # end filter_by_table
[docs]    def filter_by_value( self, is_string = None, value = 0, value_str = '',
                         column_name = None, options = {}, view_name = '' ):
        """Calculates which objects from a table has a particular value for a
        particular column. The input parameters provide a way to specify either
        a String or a Double valued column and a desired value for the column
        on which the filter is performed. The operation is synchronous, meaning
        that a response will not be returned until all the objects are fully
        available. The response payload provides the count of the resulting
        set. A new result view which satisfies the input filter restriction
        specification is also created with a view name passed in as part of the
        input payload.  Although this functionality can also be accomplished
        with the standard filter function, it is more efficient.
        Parameters:
            is_string (bool)
                Indicates whether the value being searched for is string or
                numeric.
            value (float)
                The value to search for.  Default value is 0.
            value_str (str)
                The string value to search for.  Default value is ''.
            column_name (str)
                Name of a column on which the filter by value would be applied.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
            view_name (str)
                If provided, then this will be the name of the view containing
                the results. Has the same naming restrictions as `tables
                <../../../concepts/tables.html>`_.  Default value is ''.
        Returns:
            A read-only GPUdbTable object.
        Raises:
            GPUdbException -- 
                Upon an error from the server.
        """
        view_name = self.__process_view_name( view_name )
        response = self.db.filter_by_value( self.name, view_name, is_string,
                                            value, value_str, column_name,
                                            options )
        if not _Util.is_ok( response ):
            raise GPUdbException( _Util.get_error_msg( response ) )
        return self.create_view( view_name, response[ "count" ] ) 
    # end filter_by_value
[docs]    def lock_table( self, lock_type = 'status', options = {} ):
        """Manages global access to a table's data.  By default a table has a
        input parameter *lock_type* of *read_write*, indicating all operations
        are permitted.  A user may request a *read_only* or a *write_only*
        lock, after which only read or write operations, respectively, are
        permitted on the table until the lock is removed.  When input parameter
        *lock_type* is *no_access* then no operations are permitted on the
        table.  The lock status can be queried by setting input parameter
        *lock_type* to *status*.
        Parameters:
            lock_type (str)
                The type of lock being applied to the table. Setting it to
                *status* will return the current lock status of the table
                without changing it.  Default value is 'status'.
                Allowed values are:
                * **status** --
                  Show locked status
                * **no_access** --
                  Allow no read/write operations
                * **read_only** --
                  Allow only read operations
                * **write_only** --
                  Allow only write operations
                * **read_write** --
                  Allow all read/write operations
                  The default value is 'status'.
            options (dict of str to str)
                  Optional parameters.  Default value is an empty dict ( {} ).
        Returns:
            The response from the server which is a dict containing the
            following entries--
            lock_type (str)
                Returns the lock state of the table.
        Raises:
            GPUdbException -- 
                Upon an error from the server.
        """
        response = self.db.lock_table( self.name, lock_type, options )
        if not _Util.is_ok( response ):
            raise GPUdbException( _Util.get_error_msg( response ) )
        return response 
    # end lock_table
[docs]    def revoke_permission_table( self, permission = None, table_name = None,
                                 options = None ):
        """Revokes a table-level permission from a user or role.
        Parameters:
            permission (str)
                Permission to revoke from the user or role.
                Allowed values are:
                * **table_admin** --
                  Full read/write and administrative access to the table.
                * **table_insert** --
                  Insert access to the table.
                * **table_update** --
                  Update access to the table.
                * **table_delete** --
                  Delete access to the table.
                * **table_read** --
                  Read access to the table.
            table_name (str)
                  Name of the table to which the permission grants access. Must
                  be an existing table, collection, or view.
            options (dict of str to str)
                  Optional parameters.
        Returns:
            The response from the server which is a dict containing the
            following entries--
            name (str)
                Value of input parameter *name*.
            permission (str)
                Value of input parameter *permission*.
            table_name (str)
                Value of input parameter *table_name*.
        Raises:
            GPUdbException -- 
                Upon an error from the server.
        """
        response = self.db.revoke_permission_table( self.name, permission,
                                                    table_name, options )
        if not _Util.is_ok( response ):
            raise GPUdbException( _Util.get_error_msg( response ) )
        return response 
    # end revoke_permission_table
[docs]    def show_table( self, options = {} ):
        """Retrieves detailed information about a table, view, or collection,
        specified in input parameter *table_name*. If the supplied input
        parameter *table_name* is a collection, the call can return information
        about either the collection itself or the tables and views it contains.
        If input parameter *table_name* is empty, information about all
        collections and top-level tables and views can be returned.
        If the option *get_sizes* is set to *true*, then the sizes (objects and
        elements) of each table are returned (in output parameter *sizes* and
        output parameter *full_sizes*), along with the total number of objects
        in the requested table (in output parameter *total_size* and output
        parameter *total_full_size*).
        For a collection, setting the *show_children* option to *false* returns
        only information about the collection itself; setting *show_children*
        to *true* returns a list of tables and views contained in the
        collection, along with their corresponding detail.
        Parameters:
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
                Allowed keys are:
                * **get_sizes** --
                  If *true* then the table sizes will be returned; blank,
                  otherwise.
                  Allowed values are:
                  * true
                  * false
                  The default value is 'false'.
                * **show_children** --
                  If input parameter *table_name* is a collection, then *true*
                  will return information about the children of the collection,
                  and *false* will return information about the collection
                  itself. If input parameter *table_name* is a table or view,
                  *show_children* must be *false*. If input parameter
                  *table_name* is empty, then *show_children* must be *true*.
                  Allowed values are:
                  * true
                  * false
                  The default value is 'true'.
                * **no_error_if_not_exists** --
                  If *false* will return an error if the provided input
                  parameter *table_name* does not exist. If *true* then it will
                  return an empty result.
                  Allowed values are:
                  * true
                  * false
                  The default value is 'false'.
                * **get_column_info** --
                  If *true* then column info (memory usage, etc) will be
                  returned.
                  Allowed values are:
                  * true
                  * false
                  The default value is 'false'.
        Returns:
            The response from the server which is a dict containing the
            following entries--
            table_name (str)
                Value of input parameter *table_name*.
            table_names (list of str)
                If input parameter *table_name* is a table or view, then the
                single element of the array is input parameter *table_name*. If
                input parameter *table_name* is a collection and
                *show_children* is set to *true*, then this array is populated
                with the names of all tables and views contained by the given
                collection; if *show_children* is *false* then this array will
                only include the collection name itself. If input parameter
                *table_name* is an empty string, then the array contains the
                names of all collections and top-level tables.
            table_descriptions (list of lists of str)
                List of descriptions for the respective tables in output
                parameter *table_names*.
                Allowed values are:
                * COLLECTION
                * VIEW
                * REPLICATED
                * JOIN
                * RESULT_TABLE
            type_ids (list of str)
                Type ids of the respective tables in output parameter
                *table_names*.
            type_schemas (list of str)
                Type schemas of the respective tables in output parameter
                *table_names*.
            type_labels (list of str)
                Type labels of the respective tables in output parameter
                *table_names*.
            properties (list of dicts of str to lists of str)
                Property maps of the respective tables in output parameter
                *table_names*.
            additional_info (list of dicts of str to str)
                Additional information about the respective tables in output
                parameter *table_names*.
                Allowed values are:
                * @INNER_STRUCTURE
            sizes (list of longs)
                Empty array if the *get_sizes* option is *false*. Otherwise,
                sizes of the respective tables represented in output parameter
                *table_names*. For all but track data types, this is simply the
                number of total objects in a table. For track types, since each
                track semantically contains many individual objects, the output
                parameter *sizes* are the counts of conceptual tracks (each of
                which may be associated with multiple objects).
            full_sizes (list of longs)
                Empty array if the *get_sizes* option is *false*. Otherwise,
                number of total objects in the respective tables represented in
                output parameter *table_names*. For all but track data types,
                this is the same as output parameter *sizes*. For track types,
                since each track semantically contains many individual objects,
                output parameter *full_sizes* is the count of total objects.
            join_sizes (list of floats)
                Empty array if the *get_sizes* option is *false*. Otherwise,
                number of unfiltered objects in the cross product of the
                sub-tables in the joined-tables represented in output parameter
                *table_names*. For simple tables, this number will be the same
                as output parameter *sizes*.  For join-tables this value gives
                the number of joined-table rows that must be processed by any
                aggregate functions operating on the table.
            total_size (long)
                -1 if the *get_sizes* option is *false*. Otherwise, the sum of
                the elements of output parameter *sizes*.
            total_full_size (long)
                -1 if the *get_sizes* option is *false*. The sum of the
                elements of output parameter *full_sizes*.
        Raises:
            GPUdbException -- 
                Upon an error from the server.
        """
        response = self.db.show_table( self.name, options )
        if not _Util.is_ok( response ):
            raise GPUdbException( _Util.get_error_msg( response ) )
        return response 
    # end show_table
[docs]    def update_records( self, expressions = None, new_values_maps = None,
                        records_to_insert = [], records_to_insert_str = [],
                        record_encoding = 'binary', options = {} ):
        """Runs multiple predicate-based updates in a single call.  With the list
        of given expressions, any matching record's column values will be
        updated as provided in input parameter *new_values_maps*.  There is
        also an optional 'upsert' capability where if a particular predicate
        doesn't match any existing record, then a new record can be inserted.
        Note that this operation can only be run on an original table and not
        on a collection or a result view.
        This operation can update primary key values.  By default only 'pure
        primary key' predicates are allowed when updating primary key values.
        If the primary key for a table is the column 'attr1', then the
        operation will only accept predicates of the form: "attr1 == 'foo'" if
        the attr1 column is being updated.  For a composite primary key (e.g.
        columns 'attr1' and 'attr2') then this operation will only accept
        predicates of the form: "(attr1 == 'foo') and (attr2 == 'bar')".
        Meaning, all primary key columns must appear in an equality predicate
        in the expressions.  Furthermore each 'pure primary key' predicate must
        be unique within a given request.  These restrictions can be removed by
        utilizing some available options through input parameter *options*.
        Parameters:
            expressions (list of str)
                A list of the actual predicates, one for each update; format
                should follow the guidelines :meth:`here <.filter>`.  The user
                can provide a single element (which will be automatically
                promoted to a list internally) or a list.
            new_values_maps (list of dicts of str to str and/or None)
                List of new values for the matching records.  Each element is a
                map with (key, value) pairs where the keys are the names of the
                columns whose values are to be updated; the values are the new
                values.  The number of elements in the list should match the
                length of input parameter *expressions*.  The user can provide
                a single element (which will be automatically promoted to a
                list internally) or a list.
            records_to_insert (list of str)
                An *optional* list of new binary-avro encoded records to
                insert, one for each update.  If one of input parameter
                *expressions* does not yield a matching record to be updated,
                then the corresponding element from this list will be added to
                the table.  The user can provide a single element (which will
                be automatically promoted to a list internally) or a list.
                Default value is an empty list ( [] ).
            records_to_insert_str (list of str)
                An optional list of new json-avro encoded objects to insert,
                one for each update, to be added to the set if the particular
                update did not affect any objects.  The user can provide a
                single element (which will be automatically promoted to a list
                internally) or a list.  Default value is an empty list ( [] ).
            record_encoding (str)
                Identifies which of input parameter *records_to_insert* and
                input parameter *records_to_insert_str* should be used.
                Default value is 'binary'.
                Allowed values are:
                * binary
                * json
                The default value is 'binary'.
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
                Allowed keys are:
                * **global_expression** --
                  An optional global expression to reduce the search space of
                  the predicates listed in input parameter *expressions*.
                * **bypass_safety_checks** --
                  When set to 'true', all predicates are available for primary
                  key updates.  Keep in mind that it is possible to destroy
                  data in this case, since a single predicate may match
                  multiple objects (potentially all of records of a table), and
                  then updating all of those records to have the same primary
                  key will, due to the primary key uniqueness constraints,
                  effectively delete all but one of those updated records.
                  Allowed values are:
                  * true
                  * false
                  The default value is 'false'.
                * **update_on_existing_pk** --
                  Can be used to customize behavior when the updated primary
                  key value already exists as described in
                  :meth:`.insert_records`.
                  Allowed values are:
                  * true
                  * false
                  The default value is 'false'.
                * **record_id** --
                  ID of a single record to be updated (returned in the call to
                  :meth:`.insert_records` or
                  :meth:`.get_records_from_collection`).
        Returns:
            The response from the server which is a dict containing the
            following entries--
            count_updated (long)
                Total number of records updated.
            counts_updated (list of longs)
                Total number of records updated per predicate in input
                parameter *expressions*.
            count_inserted (long)
                Total number of records inserted (due to expressions not
                matching any existing records).
            counts_inserted (list of longs)
                Total number of records inserted per predicate in input
                parameter *expressions* (will be either 0 or 1 for each
                expression).
        Raises:
            GPUdbException -- 
                Upon an error from the server.
        """
        response = self.db.update_records( self.name, expressions,
                                           new_values_maps, records_to_insert,
                                           records_to_insert_str,
                                           record_encoding, options )
        if not _Util.is_ok( response ):
            raise GPUdbException( _Util.get_error_msg( response ) )
        return response 
    # end update_records
[docs]    def update_records_by_series( self, world_table_name = None, view_name = '',
                                  reserved = [], options = {} ):
        """Updates the view specified by input parameter *table_name* to include
        full series (track) information from the input parameter
        *world_table_name* for the series (tracks) present in the input
        parameter *view_name*.
        Parameters:
            world_table_name (str)
                Name of the table containing the complete series (track)
                information.
            view_name (str)
                Optional name of the view containing the series (tracks) which
                have to be updated.  Default value is ''.
            reserved (list of str)
                  The user can provide a single element (which will be
                automatically promoted to a list internally) or a list.
                Default value is an empty list ( [] ).
            options (dict of str to str)
                Optional parameters.  Default value is an empty dict ( {} ).
        Returns:
            The response from the server which is a dict containing the
            following entries--
            count (int)
        Raises:
            GPUdbException -- 
                Upon an error from the server.
        """
        response = self.db.update_records_by_series( self.name,
                                                     world_table_name,
                                                     view_name, reserved,
                                                     options )
        if not _Util.is_ok( response ):
            raise GPUdbException( _Util.get_error_msg( response ) )
        return response 
    # end update_records_by_series
    def visualize_image_labels( self, x_column_name = None, y_column_name =
                                None, x_offset = '', y_offset = '', text_string
                                = None, font = '', text_color = '', text_angle =
                                '', text_scale = '', draw_box = '', draw_leader
                                = '', line_width = '', line_color = '',
                                fill_color = '', leader_x_column_name = '',
                                leader_y_column_name = '', filter = '', min_x =
                                None, max_x = None, min_y = None, max_y = None,
                                width = None, height = None, projection =
                                'PLATE_CARREE', options = {} ):
        response = self.db.visualize_image_labels( self.name, x_column_name,
                                                   y_column_name, x_offset,
                                                   y_offset, text_string, font,
                                                   text_color, text_angle,
                                                   text_scale, draw_box,
                                                   draw_leader, line_width,
                                                   line_color, fill_color,
                                                   leader_x_column_name,
                                                   leader_y_column_name, filter,
                                                   min_x, max_x, min_y, max_y,
                                                   width, height, projection,
                                                   options )
        if not _Util.is_ok( response ):
            raise GPUdbException( _Util.get_error_msg( response ) )
        return response 
    # end visualize_image_labels
# end class GPUdbTable
# ---------------------------------------------------------------------------
# GPUdbTableIterator - Iterator Class to iterate over records in a table
# ---------------------------------------------------------------------------
class GPUdbTableIterator( Iterator ):
    """Iterates over a chunk of records of a given table.  Once the initial
    chunk of records have been iterated over, a new iterator object must
    be instantiated since there is no way to guarantee that getting another
    chunk would yield the 'next' set of records without duplicates or skipping
    over records.  GPUdb does not guarantee any order or returned records via
    /get/records/\*.
    """
    def __init__( self, table, offset = 0, limit = 10000, db = None ):
        """Initiate the iterator with the given table, offset, and limit.
        Parameters:
            table (GPUdbTable)
                A GPUdbTable object or a name of a table
            offset (int)
                An integer value greater than or equal to 0.
            limit (int)
                An integer value greater than or equal to 1.
            db (GPUdb)
                Optional GPUdb object
        """
        # Validate and set the offset
        if not isinstance( offset, (int, long) ) or (offset < 0):
            raise GPUdbException( "Offset must be >= 0; given {0}"
                                  "".format( offset ) )
        self.offset = offset
        if not isinstance( limit, (int, long) ) or (limit < 1):
            raise GPUdbException( "Limit must be >= 1; given {0}"
                                  "".format( limit ) )
        self.limit = limit
        # Save the table name and the GPUdb object
        if isinstance( table, GPUdbTable ):
            self.table = table
        elif isinstance( table, (str, unicode) ):
            if not isinstance( db, GPUdb ):
                raise GPUdbException( "Argument 'db' must be a GPUdb object "
                                      "if 'table' is the table name; given "
                                      "{0}".format( type( db ) ) )
            # Create the table object
            self.table = GPUdbTable( None, table, db = db )
        else:
            raise GPUdbException( "Argument 'table' must be a GPUdbTable object"
                                  " or a string; given {0}".format( table ) )
        self.cursor = 0
        # Call /get/records to get the batch of records
        records = self.table.get_records( offset = self.offset,
                                          limit  = self.limit )
        self.records = records
    # end __init__
    def __iter__( self ):
        return self
    def next( self ):
        return self.__next__()
    # end next
    def __next__( self ): # For python3
        if (self.cursor == len( self.records ) ):
            raise StopIteration()
        cursor = self.cursor
        self.cursor += 1
        return self.records[ cursor ]
    # end __next__
# end class GPUdbTableIterator
# ---------------------------------------------------------------------------
# GPUdbTableOptions - Class to handle GPUdb table creation options
# ---------------------------------------------------------------------------
class GPUdbTableOptions(object):
    """
    Encapsulates the various options used to create a table.  The same object
    can be used on multiple tables and state modifications are chained together:
    ::
        opts = GPUdbTableOptions.default().collection_name('coll_name')
        table1 = Table( None, options = opts )
        table2 = Table( None, options = opts.replicated( True ) )
    """
    __no_error_if_exists          = "no_error_if_exists"
    __collection_name             = "collection_name"
    __is_collection               = "is_collection"
    __disallow_homogeneous_tables = "disallow_homogeneous_tables"
    __is_replicated               = "is_replicated"
    __foreign_keys                = "foreign_keys"
    __foreign_shard_key           = "foreign_shard_key"
    __ttl                         = "ttl"
    __chunk_size                  = "chunk_size"
    __is_result_table             = "is_result_table"
    _supported_options = [ __no_error_if_exists,
                           __collection_name,
                           __is_collection,
                           __disallow_homogeneous_tables,
                           __is_replicated,
                           __foreign_keys,
                           __foreign_shard_key,
                           __ttl,
                           __chunk_size,
                           __is_result_table
    ]
    @staticmethod
    def default():
        return GPUdbTableOptions()
    def __init__(self, _dict = None):
        """Create a default set of options for create_table().
        Parameters:
            _dict (dict)
                Optional dictionary with options already loaded.
        Returns:
            A GPUdbTableOptions object.
        """
        # Set default values
        self._no_error_if_exists          = False
        self._collection_name             = None
        self._is_collection               = False
        self._disallow_homogeneous_tables = False
        self._is_replicated               = False
        self._foreign_keys                = None
        self._foreign_shard_key           = None
        self._ttl                         = None
        self._chunk_size                  = None
        self._is_result_table             = None
        if (_dict is None):
            return # nothing to do
        if not isinstance( _dict, dict ):
            raise GPUdbException( "Argument '_dict' must be a dict; given '%s'."
                                  % type( _dict ) )
        # Else,_dict is a dict; extract options from within it
        # Check for invalid options
        unsupported_options = set( _dict.keys() ).difference( self._supported_options )
        if unsupported_options:
            raise GPUdbException( "Invalid options: %s" % unsupported_options )
        # Extract and save each option
        for (key, val) in _dict.items():
            getattr( self, key )( val )
    # end __init__
    def as_json(self):
        """Return the options as a JSON for using directly in create_table()"""
        result = {}
        if self._is_replicated is not None:
            result[ self.__is_replicated      ] = "true" if self._is_replicated else "false"
        if self._is_result_table is not None:
            result[ self.__is_result_table      ] = "true" if self._is_result_table else "false"
        if self._collection_name is not None:
            result[ self.__collection_name    ] = str( self._collection_name )
        if self._no_error_if_exists is not None:
            result[ self.__no_error_if_exists ] = "true" if self._no_error_if_exists else "false"
        if self._chunk_size is not None:
            result[ self.__chunk_size         ] = str( self._chunk_size )
        if self._is_collection is not None:
            result[ self.__is_collection      ] = "true" if self._is_collection else "false"
        if self._foreign_keys is not None:
            result[ self.__foreign_keys       ] = str( self._foreign_keys )
        if self._foreign_shard_key is not None:
            result[ self.__foreign_shard_key  ] = str( self._foreign_shard_key )
        if self._ttl is not None:
            result[ self.__ttl                ] = str( self._ttl )
        if self._disallow_homogeneous_tables is not None:
            result[ self.__disallow_homogeneous_tables ] = "true" if self._disallow_homogeneous_tables else "false"
        return result
    # end as_json
    def as_dict(self):
        """Return the options as a dict for using directly in create_table()"""
        return self.as_json()
    # end as_dict
    def no_error_if_exists(self, val):
        if isinstance( val, bool ):
            self._no_error_if_exists = val
        elif val.lower() in ["true", "false"]:
            self._no_error_if_exists = True if (val == "true") else False
        else:
            raise GPUdbException( "Value for 'no_error_if_exists' must be "
                                  "boolean or one of ['true', 'false']; "
                                  "given " + repr( val ) )
        return self
    # end no_error_if_exists
    def collection_name(self, val):
        if (val and not isinstance( val, basestring )):
            raise GPUdbException( "'collection_name' must be a string value; given '%s'" % val )
        self._collection_name = val
        return self
    # end collection_name
    def is_collection(self, val):
        if isinstance( val, bool ):
            self._is_collection = val
        elif val.lower() in ["true", "false"]:
            self._is_collection = True if (val == "true") else False
        else:
            raise GPUdbException( "Value for 'is_collection' must be "
                                  "boolean or one of ['true', 'false']; "
                                  "given " + repr( val ) )
        return self
    # end is_collection
    def disallow_homogeneous_tables(self, val):
        if isinstance( val, bool ):
            self._disallow_homogeneous_tables = val
        elif val.lower() in ["true", "false"]:
            self._disallow_homogeneous_tables = True if (val == "true") else False
        else:
            raise GPUdbException( "Value for 'disallow_homogeneous_tables' must be "
                                  "boolean or one of ['true', 'false']; "
                                  "given " + repr( val ) )
        return self
    # end disallow_homogeneous_tables
    def is_replicated(self, val):
        if isinstance( val, bool ):
            self._is_replicated = val
        elif val.lower() in ["true", "false"]:
            self._is_replicated = True if (val == "true") else False
        else:
            raise GPUdbException( "Value for 'is_replicated' must be "
                                  "boolean or one of ['true', 'false']; "
                                  "given " + repr( val ) )
        return self
    # end is_replicated
    def is_result_table(self, val):
        if isinstance( val, bool ):
            self._is_result_table = val
        elif val.lower() in ["true", "false"]:
            self._is_result_table = True if (val == "true") else False
        else:
            raise GPUdbException( "Value for 'is_result_table' must be "
                                  "boolean or one of ['true', 'false']; "
                                  "given " + repr( val ) )
        return self
    # end is_result_table
    def foreign_keys(self, val):
        self._foreign_keys = val
        return self
    # end foreign_keys
    def foreign_shard_key(self, val):
        self._foreign_shard_key = val
        return self
    # end foreign_shard_key
    def ttl(self, val):
        self._ttl = val
        return self
    # end ttl
    def chunk_size(self, val):
        self._chunk_size = val
        return self
    # end chunk_size
# end class GPUdbTableOptions