| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,160 @@ | ||
| from six import StringIO | ||
|
|
||
| import ibis.common as com | ||
| import ibis.util as util | ||
| import ibis.expr.operations as ops | ||
| import ibis.sql.compiler as comp | ||
|
|
||
| from .identifiers import quote_identifier | ||
| from .operations import _operation_registry, _name_expr | ||
|
|
||
|
|
||
| def build_ast(expr, context=None, params=None): | ||
| builder = ClickhouseQueryBuilder(expr, context=context, params=params) | ||
| return builder.get_result() | ||
|
|
||
|
|
||
| def _get_query(expr, context): | ||
| ast = build_ast(expr, context) | ||
| query = ast.queries[0] | ||
|
|
||
| return query | ||
|
|
||
|
|
||
| def to_sql(expr, context=None): | ||
| query = _get_query(expr, context) | ||
| return query.compile() | ||
|
|
||
|
|
||
| class ClickhouseSelectBuilder(comp.SelectBuilder): | ||
|
|
||
| @property | ||
| def _select_class(self): | ||
| return ClickhouseSelect | ||
|
|
||
| def _convert_group_by(self, exprs): | ||
| return exprs | ||
|
|
||
|
|
||
| class ClickhouseQueryBuilder(comp.QueryBuilder): | ||
|
|
||
| select_builder = ClickhouseSelectBuilder | ||
|
|
||
| @property | ||
| def _make_context(self): | ||
| return ClickhouseQueryContext | ||
|
|
||
|
|
||
| class ClickhouseQueryContext(comp.QueryContext): | ||
|
|
||
| def _to_sql(self, expr, ctx): | ||
| return to_sql(expr, context=ctx) | ||
|
|
||
|
|
||
| class ClickhouseSelect(comp.Select): | ||
|
|
||
| @property | ||
| def translator(self): | ||
| return ClickhouseExprTranslator | ||
|
|
||
| @property | ||
| def table_set_formatter(self): | ||
| return ClickhouseTableSetFormatter | ||
|
|
||
| def format_group_by(self): | ||
| if not len(self.group_by): | ||
| # There is no aggregation, nothing to see here | ||
| return None | ||
|
|
||
| lines = [] | ||
| if len(self.group_by) > 0: | ||
| columns = ['`{0}`'.format(expr.get_name()) | ||
| for expr in self.group_by] | ||
| clause = 'GROUP BY {0}'.format(', '.join(columns)) | ||
| lines.append(clause) | ||
|
|
||
| if len(self.having) > 0: | ||
| trans_exprs = [] | ||
| for expr in self.having: | ||
| translated = self._translate(expr) | ||
| trans_exprs.append(translated) | ||
| lines.append('HAVING {0}'.format(' AND '.join(trans_exprs))) | ||
|
|
||
| return '\n'.join(lines) | ||
|
|
||
|
|
||
| class ClickhouseTableSetFormatter(comp.TableSetFormatter): | ||
|
|
||
| _join_names = { | ||
| ops.InnerJoin: 'ALL INNER JOIN', | ||
| ops.LeftJoin: 'ALL LEFT JOIN', | ||
| ops.AnyInnerJoin: 'ANY INNER JOIN', | ||
| ops.AnyLeftJoin: 'ANY LEFT JOIN' | ||
| } | ||
|
|
||
| def get_result(self): | ||
| # Got to unravel the join stack; the nesting order could be | ||
| # arbitrary, so we do a depth first search and push the join tokens | ||
| # and predicates onto a flat list, then format them | ||
| op = self.expr.op() | ||
|
|
||
| if isinstance(op, ops.Join): | ||
| self._walk_join_tree(op) | ||
| else: | ||
| self.join_tables.append(self._format_table(self.expr)) | ||
|
|
||
| # TODO: Now actually format the things | ||
| buf = StringIO() | ||
| buf.write(self.join_tables[0]) | ||
| for jtype, table, preds in zip(self.join_types, self.join_tables[1:], | ||
| self.join_predicates): | ||
| buf.write('\n') | ||
| buf.write(util.indent('{0} {1}'.format(jtype, table), self.indent)) | ||
|
|
||
| if len(preds): | ||
| buf.write('\n') | ||
| fmt_preds = map(self._format_predicate, preds) | ||
| fmt_preds = util.indent('USING ' + ', '.join(fmt_preds), | ||
| self.indent * 2) | ||
| buf.write(fmt_preds) | ||
|
|
||
| return buf.getvalue() | ||
|
|
||
| def _validate_join_predicates(self, predicates): | ||
| for pred in predicates: | ||
| op = pred.op() | ||
| if not isinstance(op, ops.Equals): | ||
| raise com.TranslationError('Non-equality join predicates are ' | ||
| 'not supported') | ||
|
|
||
| left_on, right_on = op.args | ||
| if left_on.get_name() != right_on.get_name(): | ||
| raise com.TranslationError('Joining on different column names ' | ||
| 'is not supported') | ||
|
|
||
| def _format_predicate(self, predicate): | ||
| column = predicate.op().args[0] | ||
| return quote_identifier(column.get_name(), force=True) | ||
|
|
||
| def _quote_identifier(self, name): | ||
| return quote_identifier(name) | ||
|
|
||
|
|
||
| class ClickhouseExprTranslator(comp.ExprTranslator): | ||
|
|
||
| _registry = _operation_registry | ||
| _context_class = ClickhouseQueryContext | ||
|
|
||
| def name(self, translated, name, force=True): | ||
| return _name_expr(translated, | ||
| quote_identifier(name, force=force)) | ||
|
|
||
|
|
||
| compiles = ClickhouseExprTranslator.compiles | ||
| rewrites = ClickhouseExprTranslator.rewrites | ||
|
|
||
|
|
||
| @rewrites(ops.FloorDivide) | ||
| def _floor_divide(expr): | ||
| left, right = expr.op().args | ||
| return left.div(right).floor() |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,115 @@ | ||
| _identifiers = frozenset({ | ||
| 'add', | ||
| 'aggregate', | ||
| 'all', | ||
| 'alter', | ||
| 'and', | ||
| 'as', | ||
| 'asc', | ||
| 'between', | ||
| 'by', | ||
| 'cached', | ||
| 'case', | ||
| 'cast', | ||
| 'change', | ||
| 'class', | ||
| 'column', | ||
| 'columns', | ||
| 'comment', | ||
| 'create', | ||
| 'cross', | ||
| 'data', | ||
| 'database', | ||
| 'databases', | ||
| 'date', | ||
| 'datetime', | ||
| 'desc', | ||
| 'describe', | ||
| 'distinct', | ||
| 'div', | ||
| 'double', | ||
| 'drop', | ||
| 'else', | ||
| 'end', | ||
| 'escaped', | ||
| 'exists', | ||
| 'explain', | ||
| 'external', | ||
| 'fields', | ||
| 'fileformat', | ||
| 'first', | ||
| 'float', | ||
| 'format', | ||
| 'from', | ||
| 'full', | ||
| 'function', | ||
| 'functions', | ||
| 'group', | ||
| 'having', | ||
| 'if', | ||
| 'in', | ||
| 'inner', | ||
| 'inpath', | ||
| 'insert', | ||
| 'int', | ||
| 'integer', | ||
| 'intermediate', | ||
| 'interval', | ||
| 'into', | ||
| 'is', | ||
| 'join', | ||
| 'last', | ||
| 'left', | ||
| 'like', | ||
| 'limit', | ||
| 'lines', | ||
| 'load', | ||
| 'location', | ||
| 'metadata', | ||
| 'not', | ||
| 'null', | ||
| 'offset', | ||
| 'on', | ||
| 'or', | ||
| 'order', | ||
| 'outer', | ||
| 'partition', | ||
| 'partitioned', | ||
| 'partitions', | ||
| 'real', | ||
| 'refresh', | ||
| 'regexp', | ||
| 'rename', | ||
| 'replace', | ||
| 'returns', | ||
| 'right', | ||
| 'row', | ||
| 'schema', | ||
| 'schemas', | ||
| 'select', | ||
| 'set', | ||
| 'show', | ||
| 'stats', | ||
| 'stored', | ||
| 'string', | ||
| 'symbol', | ||
| 'table', | ||
| 'tables', | ||
| 'then', | ||
| 'to', | ||
| 'union', | ||
| 'use', | ||
| 'using', | ||
| 'values', | ||
| 'view', | ||
| 'when', | ||
| 'where', | ||
| 'with' | ||
| }) | ||
|
|
||
|
|
||
| def quote_identifier(name, quotechar='`', force=False): | ||
| if force or name.count(' ') or name in _identifiers: | ||
| return '{0}{1}{0}'.format(quotechar, name) | ||
| else: | ||
| return name |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,41 @@ | ||
| import os | ||
| import ibis | ||
| import pytest | ||
|
|
||
|
|
||
| CLICKHOUSE_HOST = os.environ.get('IBIS_CLICKHOUSE_HOST', 'localhost') | ||
| CLICKHOUSE_PORT = int(os.environ.get('IBIS_CLICKHOUSE_PORT', 9000)) | ||
| CLICKHOUSE_USER = os.environ.get('IBIS_CLICKHOUSE_USER', 'default') | ||
| CLICKHOUSE_PASS = os.environ.get('IBIS_CLICKHOUSE_PASS', '') | ||
| IBIS_TEST_CLICKHOUSE_DB = os.environ.get('IBIS_TEST_DATA_DB', 'ibis_testing') | ||
|
|
||
|
|
||
| @pytest.fixture(scope='module') | ||
| def con(): | ||
| return ibis.clickhouse.connect( | ||
| host=CLICKHOUSE_HOST, | ||
| user=CLICKHOUSE_USER, | ||
| password=CLICKHOUSE_PASS, | ||
| database=IBIS_TEST_CLICKHOUSE_DB, | ||
| ) | ||
|
|
||
|
|
||
| @pytest.fixture(scope='module') | ||
| def db(con): | ||
| return con.database() | ||
|
|
||
|
|
||
| @pytest.fixture(scope='module') | ||
| def alltypes(db): | ||
| return db.functional_alltypes | ||
|
|
||
|
|
||
| @pytest.fixture(scope='module') | ||
| def df(alltypes): | ||
| return alltypes.execute() | ||
|
|
||
|
|
||
| @pytest.fixture | ||
| def translate(): | ||
| from ibis.clickhouse.compiler import ClickhouseExprTranslator | ||
| return lambda expr: ClickhouseExprTranslator(expr).get_result() |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,330 @@ | ||
| import pytest | ||
| import numpy as np | ||
| import pandas as pd | ||
| import pandas.util.testing as tm | ||
| from operator import methodcaller | ||
| from ibis import literal as L | ||
|
|
||
| pytest.importorskip('clickhouse_driver') | ||
| pytestmark = pytest.mark.clickhouse | ||
|
|
||
|
|
||
| @pytest.mark.parametrize(('reduction', 'func_translated'), [ | ||
| ('sum', 'sum'), | ||
| ('count', 'count'), | ||
| ('mean', 'avg'), | ||
| ('max', 'max'), | ||
| ('min', 'min'), | ||
| ('std', 'stddevSamp'), | ||
| ('var', 'varSamp') | ||
| ]) | ||
| def test_reduction_where(con, alltypes, translate, reduction, func_translated): | ||
| template = '{0}If(`double_col`, `bigint_col` < 70)' | ||
| expected = template.format(func_translated) | ||
|
|
||
| method = getattr(alltypes.double_col, reduction) | ||
| cond = alltypes.bigint_col < 70 | ||
| expr = method(where=cond) | ||
|
|
||
| assert translate(expr) == expected | ||
| assert isinstance(con.execute(expr), (np.float, np.uint)) | ||
|
|
||
|
|
||
| def test_std_var_pop(con, alltypes, translate): | ||
| cond = alltypes.bigint_col < 70 | ||
| expr1 = alltypes.double_col.std(where=cond, how='pop') | ||
| expr2 = alltypes.double_col.var(where=cond, how='pop') | ||
|
|
||
| assert translate(expr1) == 'stddevPopIf(`double_col`, `bigint_col` < 70)' | ||
| assert translate(expr2) == 'varPopIf(`double_col`, `bigint_col` < 70)' | ||
| assert isinstance(con.execute(expr1), np.float) | ||
| assert isinstance(con.execute(expr2), np.float) | ||
|
|
||
|
|
||
| @pytest.mark.parametrize('reduction', [ | ||
| 'sum', | ||
| 'count', | ||
| 'max', | ||
| 'min' | ||
| ]) | ||
| def test_reduction_invalid_where(con, alltypes, reduction): | ||
| condbad_literal = L('T') | ||
|
|
||
| with pytest.raises(TypeError): | ||
| fn = methodcaller(reduction, where=condbad_literal) | ||
| fn(alltypes.double_col) | ||
|
|
||
|
|
||
| # @pytest.mark.parametrize( | ||
| # ('func', 'pandas_func'), | ||
| # [ | ||
| # # tier and histogram | ||
| # ( | ||
| # lambda d: d.bucket([0, 10, 25, 50, 100]), | ||
| # lambda s: pd.cut( | ||
| # s, [0, 10, 25, 50, 100], right=False, labels=False, | ||
| # ) | ||
| # ), | ||
| # ( | ||
| # lambda d: d.bucket([0, 10, 25, 50], include_over=True), | ||
| # lambda s: pd.cut( | ||
| # s, [0, 10, 25, 50, np.inf], right=False, labels=False | ||
| # ) | ||
| # ), | ||
| # ( | ||
| # lambda d: d.bucket([0, 10, 25, 50], close_extreme=False), | ||
| # lambda s: pd.cut(s, [0, 10, 25, 50], right=False, labels=False), | ||
| # ), | ||
| # ( | ||
| # lambda d: d.bucket( | ||
| # [0, 10, 25, 50], closed='right', close_extreme=False | ||
| # ), | ||
| # lambda s: pd.cut( | ||
| # s, [0, 10, 25, 50], | ||
| # include_lowest=False, | ||
| # right=True, | ||
| # labels=False, | ||
| # ) | ||
| # ), | ||
| # ( | ||
| # lambda d: d.bucket([10, 25, 50, 100], include_under=True), | ||
| # lambda s: pd.cut( | ||
| # s, [0, 10, 25, 50, 100], right=False, labels=False | ||
| # ), | ||
| # ), | ||
| # ] | ||
| # ) | ||
| # def test_bucket(alltypes, df, func, pandas_func): | ||
| # expr = func(alltypes.double_col) | ||
| # result = expr.execute() | ||
| # expected = pandas_func(df.double_col) | ||
| # tm.assert_series_equal(result, expected, check_names=False) | ||
|
|
||
|
|
||
| # def test_category_label(alltypes, df): | ||
| # t = alltypes | ||
| # d = t.double_col | ||
|
|
||
| # bins = [0, 10, 25, 50, 100] | ||
| # labels = ['a', 'b', 'c', 'd'] | ||
| # bucket = d.bucket(bins) | ||
| # expr = bucket.label(labels) | ||
| # result = expr.execute().astype('category', ordered=True) | ||
| # result.name = 'double_col' | ||
|
|
||
| # expected = pd.cut(df.double_col, bins, labels=labels, right=False) | ||
|
|
||
| # tm.assert_series_equal(result, expected) | ||
|
|
||
|
|
||
| @pytest.mark.parametrize(('func', 'pandas_func'), [ | ||
| ( | ||
| lambda t, cond: t.bool_col.count(), | ||
| lambda df, cond: df.bool_col.count(), | ||
| ), | ||
| # ( | ||
| # lambda t, cond: t.bool_col.nunique(), | ||
| # lambda df, cond: df.bool_col.nunique(), | ||
| # ), | ||
| ( | ||
| lambda t, cond: t.bool_col.approx_nunique(), | ||
| lambda df, cond: df.bool_col.nunique(), | ||
| ), | ||
| # group_concat | ||
| # ( | ||
| # lambda t, cond: t.bool_col.any(), | ||
| # lambda df, cond: df.bool_col.any(), | ||
| # ), | ||
| # ( | ||
| # lambda t, cond: t.bool_col.all(), | ||
| # lambda df, cond: df.bool_col.all(), | ||
| # ), | ||
| # ( | ||
| # lambda t, cond: t.bool_col.notany(), | ||
| # lambda df, cond: ~df.bool_col.any(), | ||
| # ), | ||
| # ( | ||
| # lambda t, cond: t.bool_col.notall(), | ||
| # lambda df, cond: ~df.bool_col.all(), | ||
| # ), | ||
| ( | ||
| lambda t, cond: t.double_col.sum(), | ||
| lambda df, cond: df.double_col.sum(), | ||
| ), | ||
| ( | ||
| lambda t, cond: t.double_col.mean(), | ||
| lambda df, cond: df.double_col.mean(), | ||
| ), | ||
| ( | ||
| lambda t, cond: t.int_col.approx_median(), | ||
| lambda df, cond: df.int_col.median(), | ||
| ), | ||
| ( | ||
| lambda t, cond: t.double_col.min(), | ||
| lambda df, cond: df.double_col.min(), | ||
| ), | ||
| ( | ||
| lambda t, cond: t.double_col.max(), | ||
| lambda df, cond: df.double_col.max(), | ||
| ), | ||
| ( | ||
| lambda t, cond: t.double_col.var(), | ||
| lambda df, cond: df.double_col.var(), | ||
| ), | ||
| ( | ||
| lambda t, cond: t.double_col.std(), | ||
| lambda df, cond: df.double_col.std(), | ||
| ), | ||
| ( | ||
| lambda t, cond: t.double_col.var(how='sample'), | ||
| lambda df, cond: df.double_col.var(ddof=1), | ||
| ), | ||
| ( | ||
| lambda t, cond: t.double_col.std(how='pop'), | ||
| lambda df, cond: df.double_col.std(ddof=0), | ||
| ), | ||
| ( | ||
| lambda t, cond: t.bool_col.count(where=cond), | ||
| lambda df, cond: df.bool_col[cond].count(), | ||
| ), | ||
| # ( | ||
| # lambda t, cond: t.bool_col.nunique(where=cond), | ||
| # lambda df, cond: df.bool_col[cond].nunique(), | ||
| # ), | ||
| # ( | ||
| # lambda t, cond: t.bool_col.approx_nunique(where=cond), | ||
| # lambda df, cond: df.bool_col[cond].nunique(), | ||
| # ), | ||
| ( | ||
| lambda t, cond: t.double_col.sum(where=cond), | ||
| lambda df, cond: df.double_col[cond].sum(), | ||
| ), | ||
| ( | ||
| lambda t, cond: t.double_col.mean(where=cond), | ||
| lambda df, cond: df.double_col[cond].mean(), | ||
| ), | ||
| ( | ||
| lambda t, cond: t.int_col.approx_median(where=cond), | ||
| lambda df, cond: df.int_col[cond].median(), | ||
| ), | ||
| ( | ||
| lambda t, cond: t.double_col.min(where=cond), | ||
| lambda df, cond: df.double_col[cond].min(), | ||
| ), | ||
| ( | ||
| lambda t, cond: t.double_col.max(where=cond), | ||
| lambda df, cond: df.double_col[cond].max(), | ||
| ), | ||
| ( | ||
| lambda t, cond: t.double_col.var(where=cond), | ||
| lambda df, cond: df.double_col[cond].var(), | ||
| ), | ||
| ( | ||
| lambda t, cond: t.double_col.std(where=cond), | ||
| lambda df, cond: df.double_col[cond].std(), | ||
| ), | ||
| ( | ||
| lambda t, cond: t.double_col.var(where=cond, how='sample'), | ||
| lambda df, cond: df.double_col[cond].var(), | ||
| ), | ||
| ( | ||
| lambda t, cond: t.double_col.std(where=cond, how='pop'), | ||
| lambda df, cond: df.double_col[cond].std(ddof=0), | ||
| ) | ||
| ]) | ||
| def test_aggregations(alltypes, df, func, pandas_func, translate): | ||
| table = alltypes.limit(100) | ||
| count = table.count().execute() | ||
| df = df.head(int(count)) | ||
|
|
||
| cond = table.string_col.isin(['1', '7']) | ||
| mask = cond.execute().astype('bool') | ||
| expr = func(table, cond) | ||
|
|
||
| result = expr.execute() | ||
| expected = pandas_func(df, mask) | ||
|
|
||
| np.testing.assert_allclose(result, expected) | ||
|
|
||
|
|
||
| # def test_group_concat(alltypes, df): | ||
| # expr = alltypes.string_col.group_concat() | ||
| # result = expr.execute() | ||
| # expected = ','.join(df.string_col.dropna()) | ||
| # assert result == expected | ||
|
|
||
|
|
||
| # TODO: requires CountDistinct to support condition | ||
| # def test_distinct_aggregates(alltypes, df, translate): | ||
| # expr = alltypes.limit(100).double_col.nunique() | ||
| # result = expr.execute() | ||
|
|
||
| # assert translate(expr) == 'uniq(`double_col`)' | ||
| # assert result == df.head(100).double_col.nunique() | ||
|
|
||
|
|
||
| @pytest.mark.parametrize('op', [ | ||
| methodcaller('sum'), | ||
| methodcaller('mean'), | ||
| methodcaller('min'), | ||
| methodcaller('max'), | ||
| methodcaller('std'), | ||
| methodcaller('var') | ||
| ]) | ||
| def test_boolean_reduction(alltypes, op, df): | ||
| result = op(alltypes.bool_col).execute() | ||
| assert result == op(df.bool_col) | ||
|
|
||
|
|
||
| def test_anonymus_aggregate(alltypes, df, translate): | ||
| t = alltypes | ||
| expr = t[t.double_col > t.double_col.mean()] | ||
| result = expr.execute().set_index('id') | ||
| expected = df[df.double_col > df.double_col.mean()].set_index('id') | ||
| tm.assert_frame_equal(result, expected, check_like=True) | ||
|
|
||
|
|
||
| # def test_rank(con): | ||
| # t = con.table('functional_alltypes') | ||
| # expr = t.double_col.rank() | ||
| # sqla_expr = expr.compile() | ||
| # result = str(sqla_expr.compile(compile_kwargs=dict(literal_binds=True))) | ||
| # expected = """\ | ||
| # assert result == expected | ||
|
|
||
|
|
||
| # def test_percent_rank(con): | ||
| # t = con.table('functional_alltypes') | ||
| # expr = t.double_col.percent_rank() | ||
| # sqla_expr = expr.compile() | ||
| # result = str(sqla_expr.compile(compile_kwargs=dict(literal_binds=True))) | ||
| # expected = """\ | ||
| # assert result == expected | ||
|
|
||
|
|
||
| # def test_ntile(con): | ||
| # t = con.table('functional_alltypes') | ||
| # expr = t.double_col.ntile(7) | ||
| # sqla_expr = expr.compile() | ||
| # result = str(sqla_expr.compile(compile_kwargs=dict(literal_binds=True))) | ||
| # expected = """\ | ||
| # assert result == expected | ||
|
|
||
|
|
||
| def test_boolean_summary(alltypes): | ||
| expr = alltypes.bool_col.summary() | ||
| result = expr.execute() | ||
| expected = pd.DataFrame( | ||
| [[7300, 0, 0, 1, 3650, 0.5, 2]], | ||
| columns=[ | ||
| 'count', | ||
| 'nulls', | ||
| 'min', | ||
| 'max', | ||
| 'sum', | ||
| 'mean', | ||
| 'approx_nunique', | ||
| ] | ||
| ) | ||
| tm.assert_frame_equal(result, expected, check_column_type=False, | ||
| check_dtype=False) |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,172 @@ | ||
| import pytest | ||
| import pandas as pd | ||
|
|
||
| import ibis | ||
| import ibis.config as config | ||
| import ibis.expr.types as ir | ||
|
|
||
| from ibis import literal as L | ||
| from ibis.compat import StringIO | ||
|
|
||
|
|
||
| pytest.importorskip('clickhouse_driver') | ||
| pytestmark = pytest.mark.clickhouse | ||
|
|
||
|
|
||
| def test_get_table_ref(db): | ||
| table = db.functional_alltypes | ||
| assert isinstance(table, ir.TableExpr) | ||
|
|
||
| table = db['functional_alltypes'] | ||
| assert isinstance(table, ir.TableExpr) | ||
|
|
||
|
|
||
| def test_run_sql(con, db): | ||
| query = 'SELECT * FROM {0}.`functional_alltypes`'.format(db.name) | ||
| table = con.sql(query) | ||
|
|
||
| fa = con.table('functional_alltypes') | ||
| assert isinstance(table, ir.TableExpr) | ||
| assert table.schema() == fa.schema() | ||
|
|
||
| expr = table.limit(10) | ||
| result = expr.execute() | ||
| assert len(result) == 10 | ||
|
|
||
|
|
||
| def test_get_schema(con, db): | ||
| t = con.table('functional_alltypes') | ||
| schema = con.get_schema('functional_alltypes', database=db.name) | ||
| assert t.schema() == schema | ||
|
|
||
|
|
||
| def test_result_as_dataframe(con, alltypes): | ||
| expr = alltypes.limit(10) | ||
|
|
||
| ex_names = expr.schema().names | ||
| result = con.execute(expr) | ||
|
|
||
| assert isinstance(result, pd.DataFrame) | ||
| assert result.columns.tolist() == ex_names | ||
| assert len(result) == 10 | ||
|
|
||
|
|
||
| def test_array_default_limit(con, alltypes): | ||
| result = con.execute(alltypes.float_col, limit=100) | ||
| assert len(result) == 100 | ||
|
|
||
|
|
||
| def test_limit_overrides_expr(con, alltypes): | ||
| result = con.execute(alltypes.limit(10), limit=5) | ||
| assert len(result) == 5 | ||
|
|
||
|
|
||
| def test_limit_equals_none_no_limit(alltypes): | ||
| with config.option_context('sql.default_limit', 10): | ||
| result = alltypes.execute(limit=None) | ||
| assert len(result) > 10 | ||
|
|
||
|
|
||
| def test_verbose_log_queries(con, db): | ||
| queries = [] | ||
|
|
||
| def logger(x): | ||
| queries.append(x) | ||
|
|
||
| with config.option_context('verbose', True): | ||
| with config.option_context('verbose_log', logger): | ||
| con.table('functional_alltypes', database=db.name) | ||
|
|
||
| expected = 'DESC {0}.`functional_alltypes`'.format(db.name) | ||
|
|
||
| assert len(queries) == 1 | ||
| assert queries[0] == expected | ||
|
|
||
|
|
||
| def test_sql_query_limits(alltypes): | ||
| table = alltypes | ||
| with config.option_context('sql.default_limit', 100000): | ||
| # table has 25 rows | ||
| assert len(table.execute()) == 7300 | ||
| # comply with limit arg for TableExpr | ||
| assert len(table.execute(limit=10)) == 10 | ||
| # state hasn't changed | ||
| assert len(table.execute()) == 7300 | ||
| # non-TableExpr ignores default_limit | ||
| assert table.count().execute() == 7300 | ||
| # non-TableExpr doesn't observe limit arg | ||
| assert table.count().execute(limit=10) == 7300 | ||
| with config.option_context('sql.default_limit', 20): | ||
| # TableExpr observes default limit setting | ||
| assert len(table.execute()) == 20 | ||
| # explicit limit= overrides default | ||
| assert len(table.execute(limit=15)) == 15 | ||
| assert len(table.execute(limit=23)) == 23 | ||
| # non-TableExpr ignores default_limit | ||
| assert table.count().execute() == 7300 | ||
| # non-TableExpr doesn't observe limit arg | ||
| assert table.count().execute(limit=10) == 7300 | ||
| # eliminating default_limit doesn't break anything | ||
| with config.option_context('sql.default_limit', None): | ||
| assert len(table.execute()) == 7300 | ||
| assert len(table.execute(limit=15)) == 15 | ||
| assert len(table.execute(limit=10000)) == 7300 | ||
| assert table.count().execute() == 7300 | ||
| assert table.count().execute(limit=10) == 7300 | ||
|
|
||
|
|
||
| def test_expr_compile_verify(alltypes): | ||
| expr = alltypes.double_col.sum() | ||
|
|
||
| assert isinstance(expr.compile(), str) | ||
| assert expr.verify() | ||
|
|
||
|
|
||
| def test_api_compile_verify(alltypes): | ||
| t = alltypes.timestamp_col | ||
|
|
||
| supported = t.year() | ||
| unsupported = t.rank() | ||
|
|
||
| assert ibis.clickhouse.verify(supported) | ||
| assert not ibis.clickhouse.verify(unsupported) | ||
|
|
||
|
|
||
| def test_database_repr(db): | ||
| assert db.name in repr(db) | ||
|
|
||
|
|
||
| def test_database_default_current_database(con): | ||
| db = con.database() | ||
| assert db.name == con.current_database | ||
|
|
||
|
|
||
| def test_embedded_identifier_quoting(alltypes): | ||
| t = alltypes | ||
|
|
||
| expr = (t[[(t.double_col * 2).name('double(fun)')]] | ||
| ['double(fun)'].sum()) | ||
| expr.execute() | ||
|
|
||
|
|
||
| def test_table_info(alltypes): | ||
| buf = StringIO() | ||
| alltypes.info(buf=buf) | ||
|
|
||
| assert buf.getvalue() is not None | ||
|
|
||
|
|
||
| def test_execute_exprs_no_table_ref(con): | ||
| cases = [ | ||
| (L(1) + L(2), 3) | ||
| ] | ||
|
|
||
| for expr, expected in cases: | ||
| result = con.execute(expr) | ||
| assert result == expected | ||
|
|
||
| # ExprList | ||
| exlist = ibis.api.expr_list([L(1).name('a'), | ||
| ibis.now().name('b'), | ||
| L(2).log().name('c')]) | ||
| con.execute(exlist) |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,28 @@ | ||
| import ibis | ||
| import pytest | ||
|
|
||
|
|
||
| pytest.importorskip('clickhouse_driver') | ||
| pytestmark = pytest.mark.clickhouse | ||
|
|
||
|
|
||
| def test_column_ref_quoting(translate): | ||
| schema = [('has a space', 'double')] | ||
| table = ibis.table(schema) | ||
| assert translate(table['has a space']) == '`has a space`' | ||
|
|
||
|
|
||
| def test_identifier_quoting(translate): | ||
| schema = [('date', 'double'), ('table', 'string')] | ||
| table = ibis.table(schema) | ||
| assert translate(table['date']) == '`date`' | ||
| assert translate(table['table']) == '`table`' | ||
|
|
||
|
|
||
| # TODO: fix it | ||
| # def test_named_expression(alltypes, translate): | ||
| # a, b = alltypes.get_columns(['int_col', 'float_col']) | ||
| # expr = ((a - b) * a).name('expr') | ||
|
|
||
| # expected = '(`int_col` - `float_col`) * `int_col` AS `expr`' | ||
| # assert translate(expr) == expected |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,53 @@ | ||
| import pytest | ||
| from pandas import Timestamp | ||
|
|
||
| import ibis | ||
| from ibis import literal as L | ||
|
|
||
|
|
||
| pytest.importorskip('clickhouse_driver') | ||
| pytestmark = pytest.mark.clickhouse | ||
|
|
||
|
|
||
| @pytest.mark.parametrize('expr', [ | ||
| L(Timestamp('2015-01-01 12:34:56')), | ||
| L(Timestamp('2015-01-01 12:34:56').to_pydatetime()), | ||
| ibis.timestamp('2015-01-01 12:34:56') | ||
| ]) | ||
| def test_timestamp_literals(con, translate, expr): | ||
| expected = "toDateTime('2015-01-01 12:34:56')" | ||
|
|
||
| assert translate(expr) == expected | ||
| assert con.execute(expr) == Timestamp('2015-01-01 12:34:56') | ||
|
|
||
|
|
||
| @pytest.mark.parametrize(('value', 'expected'), [ | ||
| ('simple', "'simple'"), | ||
| ('I can\'t', "'I can\\'t'"), | ||
| ('An "escape"', "'An \"escape\"'") | ||
| ]) | ||
| def test_string_literals(con, translate, value, expected): | ||
| expr = ibis.literal(value) | ||
| assert translate(expr) == expected | ||
| # TODO clickhouse-driver escaping problem | ||
| # assert con.execute(expr) == expected | ||
|
|
||
|
|
||
| @pytest.mark.parametrize(('value', 'expected'), [ | ||
| (5, '5'), | ||
| (1.5, '1.5'), | ||
| ]) | ||
| def test_number_literals(con, translate, value, expected): | ||
| expr = ibis.literal(value) | ||
| assert translate(expr) == expected | ||
| assert con.execute(expr) == value | ||
|
|
||
|
|
||
| @pytest.mark.parametrize(('value', 'expected'), [ | ||
| (True, '1'), | ||
| (False, '0'), | ||
| ]) | ||
| def test_boolean_literals(con, translate, value, expected): | ||
| expr = ibis.literal(value) | ||
| assert translate(expr) == expected | ||
| assert con.execute(expr) == value |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,275 @@ | ||
| import pytest | ||
| import operator | ||
| import numpy as np | ||
| import pandas as pd | ||
| import pandas.util.testing as tm | ||
| from datetime import date, datetime | ||
|
|
||
| import ibis | ||
| import ibis.expr.datatypes as dt | ||
| from ibis import literal as L | ||
|
|
||
|
|
||
| pytest.importorskip('clickhouse_driver') | ||
| pytestmark = pytest.mark.clickhouse | ||
|
|
||
|
|
||
| # def test_not(alltypes): | ||
| # t = alltypes.limit(10) | ||
| # expr = t.projection([(~t.double_col.isnull()).name('double_col')]) | ||
| # result = expr.execute().double_col | ||
| # expected = ~t.execute().double_col.isnull() | ||
| # tm.assert_series_equal(result, expected) | ||
|
|
||
|
|
||
| # @pytest.mark.parametrize('op', [operator.invert, operator.neg]) | ||
| # def test_not_and_negate_bool(con, op, df): | ||
| # t = con.table('functional_alltypes').limit(10) | ||
| # expr = t.projection([op(t.bool_col).name('bool_col')]) | ||
| # result = expr.execute().bool_col | ||
| # expected = op(df.head(10).bool_col) | ||
| # tm.assert_series_equal(result, expected) | ||
|
|
||
|
|
||
| @pytest.mark.parametrize(('left', 'right', 'type'), [ | ||
| (L('2017-04-01'), date(2017, 4, 2), dt.date), | ||
| (date(2017, 4, 2), L('2017-04-01'), dt.date), | ||
| (L('2017-04-01 01:02:33'), datetime(2017, 4, 1, 1, 3, 34), dt.timestamp), | ||
| (datetime(2017, 4, 1, 1, 3, 34), L('2017-04-01 01:02:33'), dt.timestamp) | ||
| ]) | ||
| @pytest.mark.parametrize('op', [ | ||
| operator.eq, | ||
| operator.ne, | ||
| operator.lt, | ||
| operator.le, | ||
| operator.gt, | ||
| operator.ge, | ||
| ]) | ||
| def test_string_temporal_compare(con, op, left, right, type): | ||
| expr = op(left, right) | ||
| result = con.execute(expr) | ||
| left_raw = con.execute(L(left).cast(type)) | ||
| right_raw = con.execute(L(right).cast(type)) | ||
| expected = op(left_raw, right_raw) | ||
| assert result == expected | ||
|
|
||
|
|
||
| @pytest.mark.parametrize(('func', 'left', 'right', 'expected'), [ | ||
| (operator.add, L(3), L(4), 7), | ||
| (operator.sub, L(3), L(4), -1), | ||
| (operator.mul, L(3), L(4), 12), | ||
| (operator.truediv, L(12), L(4), 3), | ||
| (operator.pow, L(12), L(2), 144), | ||
| (operator.mod, L(12), L(5), 2), | ||
| (operator.truediv, L(7), L(2), 3.5), | ||
| (operator.floordiv, L(7), L(2), 3), | ||
| (lambda x, y: x.floordiv(y), L(7), 2, 3), | ||
| (lambda x, y: x.rfloordiv(y), L(2), 7, 3) | ||
| ]) | ||
| def test_binary_arithmetic(con, func, left, right, expected): | ||
| expr = func(left, right) | ||
| result = con.execute(expr) | ||
| assert result == expected | ||
|
|
||
|
|
||
| @pytest.mark.parametrize(('op', 'expected'), [ | ||
| (lambda a, b: a + b, '`int_col` + `tinyint_col`'), | ||
| (lambda a, b: a - b, '`int_col` - `tinyint_col`'), | ||
| (lambda a, b: a * b, '`int_col` * `tinyint_col`'), | ||
| (lambda a, b: a / b, '`int_col` / `tinyint_col`'), | ||
| (lambda a, b: a ** b, 'pow(`int_col`, `tinyint_col`)'), | ||
| (lambda a, b: a < b, '`int_col` < `tinyint_col`'), | ||
| (lambda a, b: a <= b, '`int_col` <= `tinyint_col`'), | ||
| (lambda a, b: a > b, '`int_col` > `tinyint_col`'), | ||
| (lambda a, b: a >= b, '`int_col` >= `tinyint_col`'), | ||
| (lambda a, b: a == b, '`int_col` = `tinyint_col`'), | ||
| (lambda a, b: a != b, '`int_col` != `tinyint_col`') | ||
| ]) | ||
| def test_binary_infix_operators(con, alltypes, translate, op, expected): | ||
| a, b = alltypes.int_col, alltypes.tinyint_col | ||
| expr = op(a, b) | ||
| assert translate(expr) == expected | ||
| assert len(con.execute(expr)) | ||
|
|
||
|
|
||
| # TODO: test boolean operators | ||
| # (h & bool_col, '`h` AND (`a` > 0)'), | ||
| # (h | bool_col, '`h` OR (`a` > 0)'), | ||
| # (h ^ bool_col, 'xor(`h`, (`a` > 0))') | ||
|
|
||
|
|
||
| @pytest.mark.parametrize(('op', 'expected'), [ | ||
| (lambda a, b, c: (a + b) + c, | ||
| '(`int_col` + `tinyint_col`) + `double_col`'), | ||
| (lambda a, b, c: a.log() + c, | ||
| 'log(`int_col`) + `double_col`'), | ||
| (lambda a, b, c: (b + (-(a + c))), | ||
| '`tinyint_col` + (-(`int_col` + `double_col`))') | ||
| ]) | ||
| def test_binary_infix_parenthesization(con, alltypes, translate, op, expected): | ||
| a = alltypes.int_col | ||
| b = alltypes.tinyint_col | ||
| c = alltypes.double_col | ||
|
|
||
| expr = op(a, b, c) | ||
| assert translate(expr) == expected | ||
| assert len(con.execute(expr)) | ||
|
|
||
|
|
||
| def test_between(con, alltypes, translate): | ||
| expr = alltypes.int_col.between(0, 10) | ||
| assert translate(expr) == '`int_col` BETWEEN 0 AND 10' | ||
| assert len(con.execute(expr)) | ||
|
|
||
|
|
||
| @pytest.mark.parametrize(('left', 'right'), [ | ||
| (L('2017-03-31').cast(dt.date), date(2017, 4, 2)), | ||
| (date(2017, 3, 31), L('2017-04-02').cast(dt.date)) | ||
| ]) | ||
| def test_string_temporal_compare_between_dates(con, left, right): | ||
| expr = ibis.timestamp('2017-04-01').cast(dt.date).between(left, right) | ||
| result = con.execute(expr) | ||
| assert result | ||
|
|
||
|
|
||
| @pytest.mark.parametrize(('left', 'right'), [ | ||
| ( | ||
| L('2017-03-31 00:02:33').cast(dt.timestamp), | ||
| datetime(2017, 4, 1, 1, 3, 34), | ||
| ), | ||
| ( | ||
| datetime(2017, 3, 31, 0, 2, 33), | ||
| L('2017-04-01 01:03:34').cast(dt.timestamp), | ||
| ) | ||
| ]) | ||
| def test_string_temporal_compare_between_datetimes(con, left, right): | ||
| expr = ibis.timestamp('2017-04-01 00:02:34').between(left, right) | ||
| result = con.execute(expr) | ||
| assert result | ||
|
|
||
|
|
||
| def test_field_in_literals(con, alltypes, translate): | ||
| expr = alltypes.string_col.isin(['foo', 'bar', 'baz']) | ||
| assert translate(expr) == "`string_col` IN ('foo', 'bar', 'baz')" | ||
| assert len(con.execute(expr)) | ||
|
|
||
| expr = alltypes.string_col.notin(['foo', 'bar', 'baz']) | ||
| assert translate(expr) == "`string_col` NOT IN ('foo', 'bar', 'baz')" | ||
| assert len(con.execute(expr)) | ||
|
|
||
|
|
||
| @pytest.mark.parametrize('column', [ | ||
| 'int_col', | ||
| 'float_col', | ||
| 'bool_col' | ||
| ]) | ||
| def test_negate(con, alltypes, translate, column): | ||
| # clickhouse represent boolean as UInt8 | ||
| expr = -getattr(alltypes, column) | ||
| assert translate(expr) == '-`{0}`'.format(column) | ||
| assert len(con.execute(expr)) | ||
|
|
||
|
|
||
| @pytest.mark.parametrize('field', [ | ||
| 'tinyint_col', | ||
| 'smallint_col', | ||
| 'int_col', | ||
| 'bigint_col', | ||
| 'float_col', | ||
| 'double_col', | ||
| 'year', | ||
| 'month', | ||
| ]) | ||
| def test_negate_non_boolean(con, alltypes, field, df): | ||
| t = alltypes.limit(10) | ||
| expr = t.projection([(-t[field]).name(field)]) | ||
| result = expr.execute()[field] | ||
| expected = -df.head(10)[field] | ||
| tm.assert_series_equal(result, expected) | ||
|
|
||
|
|
||
| # def test_negate_boolean(con, alltypes, df): | ||
| # t = alltypes.limit(10) | ||
| # expr = t.projection([(~t.bool_col).name('bool_col')]) | ||
| # result = expr.execute().bool_col | ||
| # print(result) | ||
| # expected = ~df.head(10).bool_col | ||
| # tm.assert_series_equal(result, expected) | ||
|
|
||
|
|
||
| def test_negate_literal(con): | ||
| expr = -L(5.245) | ||
| assert round(con.execute(expr), 3) == -5.245 | ||
|
|
||
|
|
||
| @pytest.mark.parametrize(('op', 'pandas_op'), [ | ||
| ( | ||
| lambda t: (t.double_col > 20).ifelse(10, -20), | ||
| lambda df: pd.Series(np.where(df.double_col > 20, 10, -20), | ||
| dtype='int16') | ||
| ), | ||
| ( | ||
| lambda t: (t.double_col > 20).ifelse(10, -20).abs(), | ||
| lambda df: (pd.Series(np.where(df.double_col > 20, 10, -20)) | ||
| .abs() | ||
| .astype('uint16')) | ||
| ), | ||
| ]) | ||
| def test_ifelse(alltypes, df, op, pandas_op, translate): | ||
| expr = op(alltypes) | ||
| result = expr.execute() | ||
| result.name = None | ||
| expected = pandas_op(df) | ||
|
|
||
| tm.assert_series_equal(result, expected) | ||
|
|
||
|
|
||
| def test_simple_case(con, alltypes, translate): | ||
| t = alltypes | ||
| expr = (t.string_col.case() | ||
| .when('foo', 'bar') | ||
| .when('baz', 'qux') | ||
| .else_('default') | ||
| .end()) | ||
|
|
||
| expected = """CASE `string_col` | ||
| WHEN 'foo' THEN 'bar' | ||
| WHEN 'baz' THEN 'qux' | ||
| ELSE 'default' | ||
| END""" | ||
| assert translate(expr) == expected | ||
| assert len(con.execute(expr)) | ||
|
|
||
|
|
||
| def test_search_case(con, alltypes, translate): | ||
| t = alltypes | ||
| expr = (ibis.case() | ||
| .when(t.float_col > 0, t.int_col * 2) | ||
| .when(t.float_col < 0, t.int_col) | ||
| .else_(0) | ||
| .end()) | ||
|
|
||
| expected = """CASE | ||
| WHEN `float_col` > 0 THEN `int_col` * 2 | ||
| WHEN `float_col` < 0 THEN `int_col` | ||
| ELSE 0 | ||
| END""" | ||
| assert translate(expr) == expected | ||
| assert len(con.execute(expr)) | ||
|
|
||
|
|
||
| # TODO: Clickhouse raises incompatible type error | ||
| # def test_bucket_to_case(con, alltypes, translate): | ||
| # buckets = [0, 10, 25, 50] | ||
|
|
||
| # expr1 = alltypes.float_col.bucket(buckets) | ||
| # expected1 = """\ | ||
| # CASE | ||
| # WHEN (`float_col` >= 0) AND (`float_col` < 10) THEN 0 | ||
| # WHEN (`float_col` >= 10) AND (`float_col` < 25) THEN 1 | ||
| # WHEN (`float_col` >= 25) AND (`float_col` <= 50) THEN 2 | ||
| # ELSE Null | ||
| # END""" | ||
|
|
||
| # assert translate(expr1) == expected1 | ||
| # assert len(con.execute(expr1)) |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,17 @@ | ||
| import pytest | ||
| import pandas as pd | ||
|
|
||
|
|
||
| pytest.importorskip('clickhouse_driver') | ||
| pytestmark = pytest.mark.clickhouse | ||
|
|
||
|
|
||
| def test_column_types(alltypes): | ||
| df = alltypes.execute() | ||
| assert df.tinyint_col.dtype.name == 'int8' | ||
| assert df.smallint_col.dtype.name == 'int16' | ||
| assert df.int_col.dtype.name == 'int32' | ||
| assert df.bigint_col.dtype.name == 'int64' | ||
| assert df.float_col.dtype.name == 'float32' | ||
| assert df.double_col.dtype.name == 'float64' | ||
| assert pd.core.common.is_datetime64_dtype(df.timestamp_col.dtype) |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,81 @@ | ||
| pandas_to_clickhouse = { | ||
| 'object': 'String', | ||
| 'uint64': 'UInt64', | ||
| 'uint32': 'UInt32', | ||
| 'uint16': 'UInt16', | ||
| 'float64': 'Float64', | ||
| 'float32': 'Float32', | ||
| 'uint8': 'UInt8', | ||
| 'int64': 'Int64', | ||
| 'int32': 'Int32', | ||
| 'int16': 'Int16', | ||
| 'int8': 'Int8', | ||
| 'bool': 'UInt8', | ||
| 'datetime64[D]': 'Date', | ||
| 'datetime64[ns]': 'DateTime' | ||
| } | ||
|
|
||
| clickhouse_to_pandas = { | ||
| 'UInt8': 'uint8', | ||
| 'UInt16': 'uint16', | ||
| 'UInt32': 'uint32', | ||
| 'UInt64': 'uint64', | ||
| 'Int8': 'int8', | ||
| 'Int16': 'int16', | ||
| 'Int32': 'int32', | ||
| 'Int64': 'int64', | ||
| 'Float64': 'float64', | ||
| 'Float32': 'float32', | ||
| 'String': 'object', | ||
| 'FixedString': 'object', # TODO | ||
| 'Null': 'object', | ||
| 'Date': 'datetime64[ns]', | ||
| 'DateTime': 'datetime64[ns]', | ||
| 'Nullable(UInt8)': 'float32', | ||
| 'Nullable(UInt16)': 'float32', | ||
| 'Nullable(UInt32)': 'float32', | ||
| 'Nullable(UInt64)': 'float64', | ||
| 'Nullable(Int8)': 'float32', | ||
| 'Nullable(Int16)': 'float32', | ||
| 'Nullable(Int32)': 'float32', | ||
| 'Nullable(Int64)': 'float64', | ||
| 'Nullable(Float32)': 'float32', | ||
| 'Nullable(Float64)': 'float64', | ||
| 'Nullable(String)': 'object', | ||
| 'Nullable(FixedString)': 'object', # TODO | ||
| 'Nullable(Date)': 'Date', | ||
| 'Nullable(DateTime)': 'DateTime' | ||
| } | ||
|
|
||
| ibis_to_clickhouse = { | ||
| 'null': 'Null', | ||
| 'int8': 'Int8', | ||
| 'int16': 'Int16', | ||
| 'int32': 'Int32', | ||
| 'int64': 'Int64', | ||
| 'float': 'Float32', | ||
| 'double': 'Float64', | ||
| 'string': 'String', | ||
| 'boolean': 'UInt8', | ||
| 'date': 'Date', | ||
| 'timestamp': 'DateTime', | ||
| 'decimal': 'UInt64' # see yandex/clickhouse#253 | ||
| } | ||
|
|
||
| clickhouse_to_ibis = { | ||
| 'Null': 'null', | ||
| 'UInt64': 'int64', | ||
| 'UInt32': 'int32', | ||
| 'UInt16': 'int16', | ||
| 'UInt8': 'int8', | ||
| 'Int64': 'int64', | ||
| 'Int32': 'int32', | ||
| 'Int16': 'int16', | ||
| 'Int8': 'int8', | ||
| 'Float64': 'double', | ||
| 'Float32': 'float', | ||
| 'String': 'string', | ||
| 'FixedString': 'string', | ||
| 'Date': 'date', | ||
| 'DateTime': 'timestamp' | ||
| } |