From f0e23c2f23fd7befdcb32814b18a63ed3edea157 Mon Sep 17 00:00:00 2001 From: Matvey Arye Date: Tue, 17 Apr 2018 12:12:28 -0400 Subject: [PATCH] Optimize planning times when hypertables have many chunks This planner optimization reduces planning times when a hypertable has many chunks. It does this by expanding hypertable chunks manually, eliding the `expand_inherited_tables` logic used by PG. Slow planning time were previously seen because `expand_inherited_tables` expands all chunks of a hypertable, without regard to constraints present in the query. Then, `get_relation_info` is the called on all chunks before constraint exclusion. Getting the statistics an many chunks ends up being expensive because RelationGetNumberOfBlocks has to open the file for each relation. This gets even worse under high concurrency. This logic solves this by expanding only the chunks needed to fulfil the query instead of all chunks. In effect, it moves chunk exclusion up in the planning process. But, we actually don't use constraint exclusion here, but rather a variant of range exclusion implemented by HypertableRestrictInfo. --- src/CMakeLists.txt | 9 +- src/chunk.c | 60 +- src/chunk.h | 1 + src/constraint_aware_append.c | 11 +- src/dimension_slice.c | 83 +++ src/dimension_slice.h | 1 + src/hypertable_restrict_info.c | 358 ++++++++++ src/hypertable_restrict_info.h | 22 + src/init.c | 5 - src/parse_analyze.c | 121 ---- src/parse_rewrite.c | 206 ------ src/parse_rewrite.h | 11 - src/plan_expand_hypertable.c | 253 +++++++ src/plan_expand_hypertable.h | 27 + src/planner.c | 137 +++- src/planner_import.c | 108 +++ src/planner_import.h | 18 + test/expected/append.out | 1 - test/expected/append_unoptimized.out | 1 - test/expected/append_x_diff.out | 40 +- test/expected/insert.out | 2 +- test/expected/partitioning.out | 16 +- .../plan_expand_hypertable_optimized.out | 643 ++++++++++++++++++ .../plan_expand_hypertable_results_diff.out | 6 + test/expected/sql_query.out | 24 +- test/sql/CMakeLists.txt | 2 + .../include/plan_expand_hypertable_load.sql | 51 ++ .../include/plan_expand_hypertable_query.sql | 88 +++ test/sql/plan_expand_hypertable_optimized.sql | 4 + .../plan_expand_hypertable_results_diff.sql | 35 + 30 files changed, 1945 insertions(+), 399 deletions(-) create mode 100644 src/hypertable_restrict_info.c create mode 100644 src/hypertable_restrict_info.h delete mode 100644 src/parse_analyze.c delete mode 100644 src/parse_rewrite.c delete mode 100644 src/parse_rewrite.h create mode 100644 src/plan_expand_hypertable.c create mode 100644 src/plan_expand_hypertable.h create mode 100644 src/planner_import.c create mode 100644 src/planner_import.h create mode 100644 test/expected/plan_expand_hypertable_optimized.out create mode 100644 test/expected/plan_expand_hypertable_results_diff.out create mode 100644 test/sql/include/plan_expand_hypertable_load.sql create mode 100644 test/sql/include/plan_expand_hypertable_query.sql create mode 100644 test/sql/plan_expand_hypertable_optimized.sql create mode 100644 test/sql/plan_expand_hypertable_results_diff.sql diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index ddea956172b..7e011b6c160 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -56,10 +56,12 @@ set(HEADERS hypertable_cache.h hypertable.h hypertable_insert.h + hypertable_restrict_info.h indexing.h - parse_rewrite.h partitioning.h planner_utils.h + planner_import.h + plan_expand_hypertable.h process_utility.h scanner.h subspace_store.h @@ -93,13 +95,14 @@ set(SOURCES hypertable.c hypertable_cache.c hypertable_insert.c + hypertable_restrict_info.c indexing.c init.c - parse_analyze.c - parse_rewrite.c partitioning.c planner.c + planner_import.c planner_utils.c + plan_expand_hypertable.c process_utility.c scanner.c sort_transform.c diff --git a/src/chunk.c b/src/chunk.c index 44aa674256b..6cf884a2987 100644 --- a/src/chunk.c +++ b/src/chunk.c @@ -762,7 +762,7 @@ chunk_is_complete(ChunkScanCtx *scanctx, Chunk *chunk) if (scanctx->space->num_dimensions != chunk->constraints->num_dimension_constraints) return false; - scanctx->data = chunk; + scanctx->data = lappend(scanctx->data, chunk); return true; } @@ -772,10 +772,21 @@ chunk_is_complete(ChunkScanCtx *scanctx, Chunk *chunk) static Chunk * chunk_scan_ctx_get_chunk(ChunkScanCtx *ctx) { - ctx->data = NULL; + ctx->data = NIL; chunk_scan_ctx_foreach_chunk(ctx, chunk_is_complete, 1); + return (ctx->data == NIL ? NULL : linitial(ctx->data)); +} + +/* Finds all chunks that have a complete set of constraints. */ +static List * +chunk_scan_ctx_get_chunk_list(ChunkScanCtx *ctx) +{ + ctx->data = NIL; + + chunk_scan_ctx_foreach_chunk(ctx, chunk_is_complete, 0); + return ctx->data; } @@ -837,6 +848,51 @@ chunk_find(Hyperspace *hs, Point *p) return chunk; } +List * +chunk_find_all_oids(Hyperspace *hs, List *dimension_vecs, LOCKMODE lockmode) +{ + List *chunk_list, + *oid_list = NIL; + ChunkScanCtx ctx; + ListCell *lc; + + /* The scan context will keep the state accumulated during the scan */ + chunk_scan_ctx_init(&ctx, hs, NULL); + + /* Do not abort the scan when one chunk is found */ + ctx.early_abort = false; + + /* Scan all dimensions for slices enclosing the point */ + foreach(lc, dimension_vecs) + { + DimensionVec *vec = lfirst(lc); + + dimension_slice_and_chunk_constraint_join(&ctx, vec); + } + + /* Get a list of chunks that each have N matching dimension constraints */ + chunk_list = chunk_scan_ctx_get_chunk_list(&ctx); + + chunk_scan_ctx_destroy(&ctx); + + foreach(lc, chunk_list) + { + Chunk *chunk = lfirst(lc); + + /* Fill in the rest of the chunk's data from the chunk table */ + chunk_fill_stub(chunk, false); + + /* chunk constraints left unfilled */ + + if (lockmode != NoLock) + LockRelationOid(chunk->table_id, lockmode); + + oid_list = lappend_oid(oid_list, chunk->table_id); + } + + return oid_list; +} + Chunk * chunk_copy(Chunk *chunk) { diff --git a/src/chunk.h b/src/chunk.h index 446307d9a8d..097469f163f 100644 --- a/src/chunk.h +++ b/src/chunk.h @@ -66,6 +66,7 @@ extern Chunk *chunk_create(Hypertable *ht, Point *p, const char *schema, const c extern Chunk *chunk_create_stub(int32 id, int16 num_constraints); extern void chunk_free(Chunk *chunk); extern Chunk *chunk_find(Hyperspace *hs, Point *p); +extern List *chunk_find_all_oids(Hyperspace *hs, List *dimension_vecs, LOCKMODE lockmode); extern Chunk *chunk_copy(Chunk *chunk); extern Chunk *chunk_get_by_name(const char *schema_name, const char *table_name, int16 num_constraints, bool fail_if_not_found); extern Chunk *chunk_get_by_relid(Oid relid, int16 num_constraints, bool fail_if_not_found); diff --git a/src/constraint_aware_append.c b/src/constraint_aware_append.c index 9673700a289..9ef17a55b8f 100644 --- a/src/constraint_aware_append.c +++ b/src/constraint_aware_append.c @@ -420,11 +420,14 @@ constraint_aware_append_path_create(PlannerInfo *root, Hypertable *ht, Path *sub break; } - appinfo = linitial(root->append_rel_list); - relid = root->simple_rte_array[appinfo->child_relid]->relid; + if (list_length(root->append_rel_list) > 1) + { + appinfo = linitial(root->append_rel_list); + relid = root->simple_rte_array[appinfo->child_relid]->relid; - if (relid == ht->main_table_relid) - root->append_rel_list = list_delete_first(root->append_rel_list); + if (relid == ht->main_table_relid) + root->append_rel_list = list_delete_first(root->append_rel_list); + } return &path->cpath.path; } diff --git a/src/dimension_slice.c b/src/dimension_slice.c index 430be6ff08c..e580ac37efa 100644 --- a/src/dimension_slice.c +++ b/src/dimension_slice.c @@ -6,6 +6,9 @@ #include #include #include +#include +#include +#include #include "catalog.h" #include "dimension_slice.h" @@ -166,6 +169,84 @@ dimension_slice_scan_limit(int32 dimension_id, int64 coordinate, int limit) return dimension_vec_sort(&slices); } +/* + * Look for all ranges where value > lower_bound and value < upper_bound + * + */ +DimensionVec * +dimension_slice_scan_range_limit(int32 dimension_id, StrategyNumber start_strategy, int64 start_value, StrategyNumber end_strategy, int64 end_value, int limit) +{ + ScanKeyData scankey[3]; + DimensionVec *slices = dimension_vec_create(limit > 0 ? limit : DIMENSION_VEC_DEFAULT_SIZE); + int nkeys = 1; + + /* + * Perform an index scan for slices matching the dimension's ID and which + * enclose the coordinate. + */ + ScanKeyInit(&scankey[0], Anum_dimension_slice_dimension_id_range_start_range_end_idx_dimension_id, + BTEqualStrategyNumber, F_INT4EQ, Int32GetDatum(dimension_id)); + if (start_strategy != InvalidStrategy) + { + Oid opno = get_opfamily_member(INTEGER_BTREE_FAM_OID, INT8OID, INT8OID, start_strategy); + Oid proc = get_opcode(opno); + + Assert(OidIsValid(proc)); + + ScanKeyInit(&scankey[nkeys++], + Anum_dimension_slice_dimension_id_range_start_range_end_idx_range_start, + start_strategy, + proc, + Int64GetDatum(start_value)); + } + if (end_strategy != InvalidStrategy) + { + Oid opno = get_opfamily_member(INTEGER_BTREE_FAM_OID, INT8OID, INT8OID, end_strategy); + Oid proc = get_opcode(opno); + + Assert(OidIsValid(proc)); + + /* + * range_end is stored as exclusive, so add 1 to the value being + * searched. Also avoid overflow + */ + if (end_value != PG_INT64_MAX) + { + end_value++; + + /* + * If getting as input INT64_MAX-1, need to remap the incremented + * value back to INT64_MAX-1 + */ + end_value = REMAP_LAST_COORDINATE(end_value); + } + else + { + /* + * The point with INT64_MAX gets mapped to INT64_MAX-1 so + * incrementing that gets you to INT_64MAX + */ + end_value = PG_INT64_MAX; + } + + ScanKeyInit(&scankey[nkeys++], + Anum_dimension_slice_dimension_id_range_start_range_end_idx_range_end, + end_strategy, + proc, + Int64GetDatum(end_value)); + } + + dimension_slice_scan_limit_internal(DIMENSION_SLICE_DIMENSION_ID_RANGE_START_RANGE_END_IDX, + scankey, + nkeys, + dimension_vec_tuple_found, + &slices, + limit, + AccessShareLock); + + return dimension_vec_sort(&slices); +} + /* * Scan for slices that collide/overlap with the given range. * @@ -408,6 +489,7 @@ dimension_slice_cut(DimensionSlice *to_cut, DimensionSlice *other, int64 coord) { /* Cut "before" the coordinate */ to_cut->fd.range_start = other->fd.range_end; + return true; } else if (other->fd.range_start > coord && @@ -415,6 +497,7 @@ dimension_slice_cut(DimensionSlice *to_cut, DimensionSlice *other, int64 coord) { /* Cut "after" the coordinate */ to_cut->fd.range_end = other->fd.range_start; + return true; } diff --git a/src/dimension_slice.h b/src/dimension_slice.h index 1c91d609f36..cfc1b8590cf 100644 --- a/src/dimension_slice.h +++ b/src/dimension_slice.h @@ -25,6 +25,7 @@ typedef struct DimensionVec DimensionVec; typedef struct Hypercube Hypercube; extern DimensionVec *dimension_slice_scan_limit(int32 dimension_id, int64 coordinate, int limit); +extern DimensionVec *dimension_slice_scan_range_limit(int32 dimension_id, StrategyNumber start_strategy, int64 start_value, StrategyNumber end_strategy, int64 end_value, int limit); extern DimensionVec *dimension_slice_collision_scan_limit(int32 dimension_id, int64 range_start, int64 range_end, int limit); extern Hypercube *dimension_slice_point_scan(Hyperspace *space, int64 point[]); extern DimensionSlice *dimension_slice_scan_for_existing(DimensionSlice *slice); diff --git a/src/hypertable_restrict_info.c b/src/hypertable_restrict_info.c new file mode 100644 index 00000000000..8040fce7abb --- /dev/null +++ b/src/hypertable_restrict_info.c @@ -0,0 +1,358 @@ +#include +#include +#include +#include +#include +#include + +#include "hypertable_restrict_info.h" +#include "dimension.h" +#include "utils.h" +#include "dimension_slice.h" +#include "chunk.h" +#include "dimension_vector.h" +#include "partitioning.h" + +typedef struct DimensionRestrictInfo +{ + Dimension *dimension; +} DimensionRestrictInfo; + +typedef struct DimensionRestrictInfoOpen +{ + DimensionRestrictInfo base; + int64 lower_bound; /* internal time representation */ + StrategyNumber lower_strategy; + int64 upper_bound; /* internal time representation */ + StrategyNumber upper_strategy; +} DimensionRestrictInfoOpen; + +typedef struct DimensionRestrictInfoClosed +{ + DimensionRestrictInfo base; + int32 value; /* hash value */ + StrategyNumber strategy; /* either Invalid or equal */ +} DimensionRestrictInfoClosed; + +static DimensionRestrictInfoOpen * +dimension_restrict_info_open_create(Dimension *d) +{ + DimensionRestrictInfoOpen *new = palloc(sizeof(DimensionRestrictInfoOpen)); + + new->base.dimension = d; + new->lower_strategy = InvalidStrategy; + new->upper_strategy = InvalidStrategy; + return new; +} + +static DimensionRestrictInfoClosed * +dimension_restrict_info_closed_create(Dimension *d) +{ + DimensionRestrictInfoClosed *new = palloc(sizeof(DimensionRestrictInfoClosed)); + + new->base.dimension = d; + new->strategy = InvalidStrategy; + return new; +} + +static DimensionRestrictInfo * +dimension_restrict_info_create(Dimension *d) +{ + switch (d->type) + { + case DIMENSION_TYPE_OPEN: + return (DimensionRestrictInfo *) dimension_restrict_info_open_create(d); + case DIMENSION_TYPE_CLOSED: + return (DimensionRestrictInfo *) dimension_restrict_info_closed_create(d); + default: + elog(ERROR, "unknown dimension type"); + } +} + +static bool +dimension_restrict_info_open_add(DimensionRestrictInfoOpen *dri, StrategyNumber strategy, Const *c) +{ + int64 value = time_value_to_internal(c->constvalue, c->consttype); + + switch (strategy) + { + case BTLessEqualStrategyNumber: + case BTLessStrategyNumber: + if (dri->upper_strategy == InvalidStrategy || value < dri->upper_bound) + { + dri->upper_strategy = strategy; + dri->upper_bound = value; + } + return true; + case BTGreaterEqualStrategyNumber: + case BTGreaterStrategyNumber: + if (dri->lower_strategy == InvalidStrategy || value > dri->lower_bound) + { + dri->lower_strategy = strategy; + dri->lower_bound = value; + } + return true; + case BTEqualStrategyNumber: + dri->lower_bound = value; + dri->upper_bound = value; + dri->lower_strategy = BTGreaterEqualStrategyNumber; + dri->upper_strategy = BTLessEqualStrategyNumber; + return true; + default: + return false; + } +} + +static bool +dimension_restrict_info_closed_add(DimensionRestrictInfoClosed *dri, StrategyNumber strategy, Const *c) +{ + int64 value = partitioning_func_apply(dri->base.dimension->partitioning, c->constvalue); + + switch (strategy) + { + case BTEqualStrategyNumber: + dri->value = value; + dri->strategy = strategy; + return true; + default: + return false; + } +} + + +static bool +dimension_restrict_info_add(DimensionRestrictInfo *dri, int strategy, Const *c) +{ + switch (dri->dimension->type) + { + case DIMENSION_TYPE_OPEN: + return dimension_restrict_info_open_add((DimensionRestrictInfoOpen *) dri, strategy, c); + case DIMENSION_TYPE_CLOSED: + return dimension_restrict_info_closed_add((DimensionRestrictInfoClosed *) dri, strategy, c); + default: + elog(ERROR, "unknown dimension type"); + } +} + +static DimensionVec * +dimension_restrict_info_open_slices(DimensionRestrictInfoOpen *dri) +{ + /* basic idea: slice_end > lower_bound && slice_start < upper_bound */ + return dimension_slice_scan_range_limit(dri->base.dimension->fd.id, dri->upper_strategy, dri->upper_bound, dri->lower_strategy, dri->lower_bound, 0); +} + +static DimensionVec * +dimension_restrict_info_closed_slices(DimensionRestrictInfoClosed *dri) +{ + if (dri->strategy == BTEqualStrategyNumber) + { + /* slice_end >= value && slice_start <= value */ + return dimension_slice_scan_range_limit(dri->base.dimension->fd.id, + BTLessEqualStrategyNumber, + dri->value, + BTGreaterEqualStrategyNumber, + dri->value, + 0); + } + else + { + /* get all slices */ + return dimension_slice_scan_range_limit(dri->base.dimension->fd.id, + InvalidStrategy, + -1, + InvalidStrategy, + -1, + 0); + } +} + +static DimensionVec * +dimension_restrict_info_slices(DimensionRestrictInfo *dri) +{ + switch (dri->dimension->type) + { + case DIMENSION_TYPE_OPEN: + return dimension_restrict_info_open_slices((DimensionRestrictInfoOpen *) dri); + case DIMENSION_TYPE_CLOSED: + return dimension_restrict_info_closed_slices((DimensionRestrictInfoClosed *) dri); + default: + elog(ERROR, "unknown dimension type"); + } +} + +typedef struct HypertableRestrictInfo +{ + int16 max_attr; + int num_base_restrictions; /* number of base restrictions + * successfully added */ + DimensionRestrictInfo *dimension_restriction[FLEXIBLE_ARRAY_MEMBER]; /* Sparse. key is + * attrOffset of the + * dimension */ +} HypertableRestrictInfo; + + +HypertableRestrictInfo * +hypertable_restrict_info_create(RelOptInfo *rel, Hypertable *ht) +{ + HypertableRestrictInfo *res = palloc0(sizeof(HypertableRestrictInfo) + sizeof(DimensionRestrictInfo *) * rel->max_attr); + int i; + + res->max_attr = rel->max_attr; + for (i = 0; i < ht->space->num_dimensions; i++) + { + DimensionRestrictInfo *dri = dimension_restrict_info_create(&ht->space->dimensions[i]); + + res->dimension_restriction[AttrNumberGetAttrOffset(ht->space->dimensions[i].column_attno)] = dri; + } + + return res; +} + +static DimensionRestrictInfo * +hypertable_restrict_info_get(HypertableRestrictInfo *hri, int attno) +{ + Assert(attno > 0 && AttrNumberGetAttrOffset(attno) < hri->max_attr); + return hri->dimension_restriction[AttrNumberGetAttrOffset(attno)]; +} + +static bool +hypertable_restrict_info_add_op_expr(HypertableRestrictInfo *hri, PlannerInfo *root, OpExpr *clause) +{ + Expr *leftop, + *rightop, + *expr; + DimensionRestrictInfo *dri; + Var *v; + Const *c; + Oid op_oid; + RangeTblEntry *rte; + Oid columntype; + TypeCacheEntry *tce; + int strategy; + Oid lefttype, + righttype; + + if (list_length(clause->args) != 2) + return false; + + /* Same as constraint_exclusion */ + if (contain_mutable_functions((Node *) clause)) + return false; + + leftop = (Expr *) get_leftop((Expr *) clause); + if (IsA(leftop, RelabelType)) + leftop = ((RelabelType *) leftop)->arg; + rightop = (Expr *) get_rightop((Expr *) clause); + if (IsA(rightop, RelabelType)) + rightop = ((RelabelType *) rightop)->arg; + + if (IsA(leftop, Var)) + { + v = (Var *) leftop; + expr = rightop; + op_oid = clause->opno; + } + else if (IsA(rightop, Var)) + { + v = (Var *) rightop; + expr = leftop; + op_oid = get_commutator(clause->opno); + } + else + return false; + + dri = hypertable_restrict_info_get(hri, v->varattno); + /* the attribute is not a dimension */ + if (dri == NULL) + return false; + + expr = (Expr *) eval_const_expressions(root, (Node *) expr); + + if (!IsA(expr, Const) ||!OidIsValid(op_oid) || !op_strict(op_oid)) + return false; + + c = (Const *) expr; + + rte = rt_fetch(v->varno, root->parse->rtable); + + columntype = get_atttype(rte->relid, dri->dimension->column_attno); + tce = lookup_type_cache(columntype, TYPECACHE_BTREE_OPFAMILY); + + if (!op_in_opfamily(op_oid, tce->btree_opf)) + return false; + + get_op_opfamily_properties(op_oid, + tce->btree_opf, + false, + &strategy, + &lefttype, + &righttype); + + return dimension_restrict_info_add(dri, strategy, c); +} + +static void +hypertable_restrict_info_add_restrict_info(HypertableRestrictInfo *hri, PlannerInfo *root, RestrictInfo *ri) +{ + Expr *e = ri->clause; + + if (!IsA(e, OpExpr)) + return; + + if (hypertable_restrict_info_add_op_expr(hri, root, (OpExpr *) e)) + hri->num_base_restrictions++; +} + +void +hypertable_restrict_info_add(HypertableRestrictInfo *hri, + PlannerInfo *root, + List *base_restrict_infos) +{ + ListCell *lc; + + foreach(lc, base_restrict_infos) + { + RestrictInfo *ri = lfirst(lc); + + hypertable_restrict_info_add_restrict_info(hri, root, ri); + } +} + +bool +hypertable_restrict_info_has_restrictions(HypertableRestrictInfo *hri) +{ + return hri->num_base_restrictions > 0; +} + +List * +hypertable_restrict_info_get_chunk_oids(HypertableRestrictInfo *hri, Hypertable *ht, LOCKMODE lockmode) +{ + int i; + List *dimension_vecs = NIL; + + for (i = 0; i < hri->max_attr; i++) + { + DimensionRestrictInfo *dri = hri->dimension_restriction[i]; + DimensionVec *dv; + + if (NULL == dri) + continue; + + dv = dimension_restrict_info_slices(dri); + + Assert(dv->num_slices >= 0); + + /* + * If there are no matching slices in any single dimension, the result + * will be empty + */ + if (dv->num_slices == 0) + return NIL; + + dimension_vecs = lappend(dimension_vecs, dv); + + } + + Assert(list_length(dimension_vecs) == ht->space->num_dimensions); + return chunk_find_all_oids(ht->space, dimension_vecs, lockmode); +} diff --git a/src/hypertable_restrict_info.h b/src/hypertable_restrict_info.h new file mode 100644 index 00000000000..c9af33bdee8 --- /dev/null +++ b/src/hypertable_restrict_info.h @@ -0,0 +1,22 @@ +#ifndef TIMESCALEDB_HYPERTABLE_RESTRICT_INFO_H +#define TIMESCALEDB_HYPERTABLE_RESTRICT_INFO_H + +#include "hypertable.h" + + +/* HypertableRestrictInfo represents restrictions on a hypertable. It uses + * range exclusion logic to figure out which chunks can match the description */ +typedef struct HypertableRestrictInfo HypertableRestrictInfo; + +extern HypertableRestrictInfo *hypertable_restrict_info_create(RelOptInfo *rel, Hypertable *ht); + +/* Add restrictions based on a List of RestrictInfo */ +extern void hypertable_restrict_info_add(HypertableRestrictInfo *hri, PlannerInfo *root, List *base_restrict_infos); + +/* Some restrictions were added */ +extern bool hypertable_restrict_info_has_restrictions(HypertableRestrictInfo *hri); + +/* Get a list of chunk oids for chunks whose constraints match the restriction clauses */ +extern List *hypertable_restrict_info_get_chunk_oids(HypertableRestrictInfo *hri, Hypertable *ht, LOCKMODE lockmode); + +#endif /* TIMESCALEDB_HYPERTABLE_RESTRICT_INFO_H */ diff --git a/src/init.c b/src/init.c index 900345aa70a..9ce8be1c1b9 100644 --- a/src/init.c +++ b/src/init.c @@ -36,9 +36,6 @@ extern void _process_utility_fini(void); extern void _event_trigger_init(void); extern void _event_trigger_fini(void); -extern void _parse_analyze_init(void); -extern void _parse_analyze_fini(void); - extern void PGDLLEXPORT _PG_init(void); extern void PGDLLEXPORT _PG_fini(void); @@ -58,7 +55,6 @@ _PG_init(void) _planner_init(); _event_trigger_init(); _process_utility_init(); - _parse_analyze_init(); _guc_init(); } @@ -70,7 +66,6 @@ _PG_fini(void) * document any exceptions. */ _guc_fini(); - _parse_analyze_fini(); _process_utility_fini(); _event_trigger_fini(); _planner_fini(); diff --git a/src/parse_analyze.c b/src/parse_analyze.c deleted file mode 100644 index 457ec039fa8..00000000000 --- a/src/parse_analyze.c +++ /dev/null @@ -1,121 +0,0 @@ -#include -#include -#include - -#include "cache.h" -#include "hypertable.h" -#include "hypertable_cache.h" -#include "extension.h" -#include "parse_rewrite.h" - -void _parse_analyze_init(void); -void _parse_analyze_fini(void); - -static post_parse_analyze_hook_type prev_post_parse_analyze_hook = NULL; - -typedef struct HypertableQueryCtx -{ - Query *parse; - Query *parent; - CmdType cmdtype; - Cache *hcache; - Hypertable *hentry; -} HypertableQueryCtx; - -/* - * Identify queries on a hypertable by walking the query tree. If the query is - * indeed on a hypertable, setup the necessary state and/or make modifications - * to the query tree. - */ -static bool -hypertable_query_walker(Node *node, void *context) -{ - if (node == NULL) - return false; - - if (IsA(node, RangeTblEntry)) - { - RangeTblEntry *rte = (RangeTblEntry *) node; - HypertableQueryCtx *ctx = (HypertableQueryCtx *) context; - - if (rte->rtekind == RTE_RELATION) - { - Hypertable *hentry = hypertable_cache_get_entry(ctx->hcache, rte->relid); - - if (hentry != NULL) - ctx->hentry = hentry; - } - - return false; - } - - if (IsA(node, Query)) - { - bool result; - HypertableQueryCtx *ctx = (HypertableQueryCtx *) context; - CmdType old = ctx->cmdtype; - Query *query = (Query *) node; - Query *oldparent = ctx->parent; - - /* adjust context */ - ctx->cmdtype = query->commandType; - ctx->parent = query; - - result = query_tree_walker(ctx->parent, hypertable_query_walker, - context, QTW_EXAMINE_RTES); - - /* restore context */ - ctx->cmdtype = old; - ctx->parent = oldparent; - - return result; - } - - return expression_tree_walker(node, hypertable_query_walker, context); -} - -static void -timescaledb_post_parse_analyze(ParseState *pstate, Query *query) -{ - if (NULL != prev_post_parse_analyze_hook) - /* Call any earlier hooks */ - prev_post_parse_analyze_hook(pstate, query); - - if (extension_is_loaded()) - { - HypertableQueryCtx context = { - .parse = query, - .parent = query, - .cmdtype = query->commandType, - .hcache = hypertable_cache_pin(), - .hentry = NULL, - }; - - /* The query for explains is in the utility statement */ - if (query->commandType == CMD_UTILITY && - IsA(query->utilityStmt, ExplainStmt)) - query = (Query *) ((ExplainStmt *) query->utilityStmt)->query; - - hypertable_query_walker((Node *) query, &context); - - /* note assumes 1 hypertable per query */ - if (NULL != context.hentry) - parse_rewrite_query(pstate, query, context.hentry); - - cache_release(context.hcache); - } -} - -void -_parse_analyze_init(void) -{ - prev_post_parse_analyze_hook = post_parse_analyze_hook; - post_parse_analyze_hook = timescaledb_post_parse_analyze; -} - -void -_parse_analyze_fini(void) -{ - post_parse_analyze_hook = prev_post_parse_analyze_hook; - prev_post_parse_analyze_hook = NULL; -} diff --git a/src/parse_rewrite.c b/src/parse_rewrite.c deleted file mode 100644 index 2479cdceb78..00000000000 --- a/src/parse_rewrite.c +++ /dev/null @@ -1,206 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "cache.h" -#include "hypertable.h" -#include "partitioning.h" -#include "parse_rewrite.h" -#include "compat.h" - -typedef struct AddPartFuncQualCtx -{ - ParseState *pstate; - Query *parse; - Hypertable *hentry; -} AddPartFuncQualCtx; - - -/* - * Returns the partitioning info for a var if the var is a partitioning - * column. If the var is not a partitioning column return NULL. - */ -static inline PartitioningInfo * -get_partitioning_info_for_partition_column_var(Var *var_expr, AddPartFuncQualCtx *context) -{ - Hypertable *ht = context->hentry; - RangeTblEntry *rte = rt_fetch(var_expr->varno, context->parse->rtable); - char *varname; - Dimension *dim; - - if (rte->relid != ht->main_table_relid) - return NULL; - - varname = get_rte_attribute_name(rte, var_expr->varattno); - - dim = hyperspace_get_dimension_by_name(ht->space, DIMENSION_TYPE_CLOSED, varname); - - if (dim != NULL) - return dim->partitioning; - - return NULL; -} - -/* - * Creates an expression for partioning_func(var_expr, partitioning_mod) = - * partitioning_func(const_expr, partitioning_mod). This function makes a copy - * of all nodes given in input. - */ -static Expr * -create_partition_func_equals_const(ParseState *pstate, PartitioningInfo *pi, Var *var_expr, Const *const_expr) -{ - Expr *op_expr; - List *func_name = partitioning_func_qualified_name(&pi->partfunc); - Node *var_node; - Node *const_node; - List *args_func_var; - List *args_func_const; - FuncCall *fc_var; - FuncCall *fc_const; - Node *f_var; - Node *f_const; - - var_node = (Node *) copyObject(var_expr); - const_node = (Node *) copyObject(const_expr); - - args_func_var = list_make1(var_node); - args_func_const = list_make1(const_node); - - fc_var = makeFuncCall(func_name, args_func_var, -1); - fc_const = makeFuncCall(func_name, args_func_const, -1); - - f_var = ParseFuncOrColumnCompat(pstate, - func_name, - args_func_var, - fc_var, - -1); - - assign_expr_collations(pstate, f_var); - - f_const = ParseFuncOrColumnCompat(pstate, - func_name, - args_func_const, - fc_const, - -1); - - op_expr = make_op_compat(pstate, - list_make2(makeString("pg_catalog"), makeString("=")), - f_var, - f_const, - -1); - - return op_expr; -} - -static Node * -add_partitioning_func_qual_mutator(Node *node, AddPartFuncQualCtx *context) -{ - if (node == NULL) - return NULL; - - /* - * Detect partitioning_column = const. If not fall-thru. If detected, - * replace with partitioning_column = const AND - * partitioning_func(partition_column) = partitioning_func(const) - */ - if (IsA(node, OpExpr)) - { - OpExpr *exp = (OpExpr *) node; - - if (list_length(exp->args) == 2) - { - /* only look at var op const or const op var; */ - Node *left = (Node *) linitial(exp->args); - Node *right = (Node *) lsecond(exp->args); - Var *var_expr = NULL; - Node *other_expr = NULL; - - if (IsA(left, Var)) - { - var_expr = (Var *) left; - other_expr = right; - } - else if (IsA(right, Var)) - { - var_expr = (Var *) right; - other_expr = left; - } - - if (var_expr != NULL) - { - if (!IsA(other_expr, Const)) - { - /* try to simplify the non-var expression */ - other_expr = eval_const_expressions(NULL, other_expr); - } - if (IsA(other_expr, Const)) - { - /* have a var and const, make sure the op is = */ - Const *const_expr = (Const *) other_expr; - Oid eq_oid = OpernameGetOprid(list_make2(makeString("pg_catalog"), makeString("=")), exprType(left), exprType(right)); - - if (eq_oid == exp->opno) - { - /* - * I now have a var = const. Make sure var is a - * partitioning column - */ - PartitioningInfo *pi = - get_partitioning_info_for_partition_column_var(var_expr, - context); - - if (pi != NULL) - { - /* The var is a partitioning column */ - Expr *partitioning_clause = - create_partition_func_equals_const(context->pstate, pi, var_expr, const_expr); - - return (Node *) make_andclause(list_make2(node, partitioning_clause)); - - } - } - } - } - } - } - - return expression_tree_mutator(node, add_partitioning_func_qual_mutator, - (void *) context); -} - -/* - * This function does a transformation that allows postgres's native constraint - * exclusion to exclude space partititions when the query contains equivalence - * qualifiers on the space partition key. - * - * This function goes through the upper-level qual of a parse tree and finds - * quals of the form: - * partitioning_column = const - * It transforms them into the qual: - * partitioning_column = const AND - * partitioning_func(partition_column, partitioning_mod) = - * partitioning_func(const, partitioning_mod) - * - * This tranformation helps because the check constraint on a table is of the - * form CHECK(partitioning_func(partition_column, partitioning_mod) BETWEEN X - * AND Y). - */ -void -parse_rewrite_query(ParseState *pstate, Query *parse, Hypertable *ht) -{ - AddPartFuncQualCtx context = { - .pstate = pstate, - .parse = parse, - .hentry = ht, - }; - - parse->jointree->quals = add_partitioning_func_qual_mutator(parse->jointree->quals, &context); -} diff --git a/src/parse_rewrite.h b/src/parse_rewrite.h deleted file mode 100644 index d3ee0d8fd42..00000000000 --- a/src/parse_rewrite.h +++ /dev/null @@ -1,11 +0,0 @@ -#ifndef TIMESCALEDB_PARSE_REWRITE_H -#define TIMESCALEDB_PARSE_REWRITE_H - -#include -#include - -typedef struct Hypertable Hypertable; - -extern void parse_rewrite_query(ParseState *pstate, Query *parse, Hypertable *ht); - -#endif /* TIMESCALEDB_PARSE_REWRITE_H */ diff --git a/src/plan_expand_hypertable.c b/src/plan_expand_hypertable.c new file mode 100644 index 00000000000..843b5a3f278 --- /dev/null +++ b/src/plan_expand_hypertable.c @@ -0,0 +1,253 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "plan_expand_hypertable.h" +#include "hypertable.h" +#include "hypertable_restrict_info.h" +#include "planner_import.h" +#include "compat.h" + + + +typedef struct CollectQualCtx +{ + PlannerInfo *root; + RelOptInfo *rel; + List *result; +} CollectQualCtx; + + +static bool +collect_quals_walker(Node *node, CollectQualCtx *ctx) +{ + if (node == NULL) + return false; + + if (IsA(node, FromExpr)) + { + FromExpr *f = (FromExpr *) node; + ListCell *lc; + + foreach(lc, (List *) f->quals) + { + Node *qual = (Node *) lfirst(lc); + RestrictInfo *restrictinfo; + + Relids relids = pull_varnos(qual); + + if (bms_num_members(relids) != 1 || !bms_is_member(ctx->rel->relid, relids)) + continue; +#if PG96 + restrictinfo = make_restrictinfo((Expr *) qual, + true, + false, + false, + relids, + NULL, + NULL); +#else + restrictinfo = make_restrictinfo((Expr *) qual, + true, + false, + false, + ctx->root->qual_security_level, + relids, + NULL, + NULL); +#endif + ctx->result = lappend(ctx->result, restrictinfo); + } + } + + return expression_tree_walker(node, collect_quals_walker, ctx); +} + +/* Since baserestrictinfo is not yet set by the planner, we have to derive + * it ourselves. It's safe for us to miss some restrict info clauses (this + * will just results in more chunks being included) so this does not need + * to be as comprehensive as the PG native derivation. This is inspired + * by the derivation in `deconstruct_recurse` in PG */ + +static List * +get_restrict_info(PlannerInfo *root, RelOptInfo *rel) +{ + CollectQualCtx ctx = { + .root = root, + .rel = rel, + .result = NIL, + }; + + collect_quals_walker((Node *) root->parse->jointree, &ctx); + + return ctx.result; +} + +static List * +find_children_oids(HypertableRestrictInfo *hri, Hypertable *ht, LOCKMODE lockmode) +{ + List *result; + + /* + * optimization: using the HRI only makes sense if we are not using all + * the chunks, otherwise using the cached inheritance hierarchy is faster. + */ + if (!hypertable_restrict_info_has_restrictions(hri)) + return find_all_inheritors(ht->main_table_relid, lockmode, NULL);; + + /* always include parent again, just as find_all_inheritors does */ + result = list_make1_oid(ht->main_table_relid); + + /* add chunks */ + result = list_concat(result, + hypertable_restrict_info_get_chunk_oids(hri, + ht, + lockmode)); + return result; +} + +bool +plan_expand_hypertable_valid_hypertable(Hypertable *ht, Query *parse, Index rti, RangeTblEntry *rte) +{ + if (ht == NULL || + /* inheritance enabled */ + rte->inh == false || + /* row locks not necessary */ + parse->rowMarks != NIL || + /* not update and/or delete */ + 0 != parse->resultRelation) + return false; + + return true; +} + +/* Inspired by expand_inherited_rtentry but expands + * a hypertable chunks into an append rekationship */ +void +plan_expand_hypertable_chunks(Hypertable *ht, + PlannerInfo *root, + Oid relation_objectid, + bool inhparent, + RelOptInfo *rel) +{ + RangeTblEntry *rte = rt_fetch(rel->relid, root->parse->rtable); + List *inh_oids; + Oid parent_oid = relation_objectid; + ListCell *l; + Relation oldrelation = heap_open(parent_oid, NoLock); + LOCKMODE lockmode = AccessShareLock; + Query *parse = root->parse; + Index rti = rel->relid; + List *appinfos = NIL; + HypertableRestrictInfo *hri; + PlanRowMark *oldrc; + List *restrictinfo; + + /* double check our permissions are valid */ + Assert(rti != parse->resultRelation); + oldrc = get_plan_rowmark(root->rowMarks, rti); + if (oldrc && RowMarkRequiresRowShareLock(oldrc->markType)) + { + elog(ERROR, "Unexpected permissions requested"); + } + + + /* mark the parent as an append relation */ + rte->inh = true; + + /* + * rel->baserestrictinfo is not yet set at this point in the planner. So + * do a simple version of that deduction here. + */ + restrictinfo = get_restrict_info(root, rel); + + /* + * This is where the magic happens: use our HypertableRestrictInfo + * infrastructure to deduce the appropriate chunks using our range + * exclusion + */ + hri = hypertable_restrict_info_create(rel, ht); + hypertable_restrict_info_add(hri, root, restrictinfo); + inh_oids = find_children_oids(hri, ht, lockmode); + + /* + * the simple_*_array structures have already been set, we need to add the + * children to them + */ + root->simple_rel_array_size += list_length(inh_oids); + root->simple_rel_array = repalloc(root->simple_rel_array, root->simple_rel_array_size * sizeof(RelOptInfo *)); + root->simple_rte_array = repalloc(root->simple_rte_array, root->simple_rel_array_size * sizeof(RangeTblEntry *)); + + + foreach(l, inh_oids) + { + Oid child_oid = lfirst_oid(l); + Relation newrelation; + RangeTblEntry *childrte; + Index child_rtindex; + AppendRelInfo *appinfo; + + /* Open rel if needed; we already have required locks */ + if (child_oid != parent_oid) + newrelation = heap_open(child_oid, NoLock); + else + newrelation = oldrelation; + + /* chunks cannot be temp tables */ + Assert(!RELATION_IS_OTHER_TEMP(newrelation)); + + /* + * Build an RTE for the child, and attach to query's rangetable list. + * We copy most fields of the parent's RTE, but replace relation OID + * and relkind, and set inh = false. Also, set requiredPerms to zero + * since all required permissions checks are done on the original RTE. + * Likewise, set the child's securityQuals to empty, because we only + * want to apply the parent's RLS conditions regardless of what RLS + * properties individual children may have. (This is an intentional + * choice to make inherited RLS work like regular permissions checks.) + * The parent securityQuals will be propagated to children along with + * other base restriction clauses, so we don't need to do it here. + */ + childrte = copyObject(rte); + childrte->relid = child_oid; + childrte->relkind = newrelation->rd_rel->relkind; + childrte->inh = false; + /* clear the magic bit */ + childrte->ctename = NULL; + childrte->requiredPerms = 0; + childrte->securityQuals = NIL; + parse->rtable = lappend(parse->rtable, childrte); + child_rtindex = list_length(parse->rtable); + root->simple_rte_array[child_rtindex] = childrte; + root->simple_rel_array[child_rtindex] = NULL; + +#if !PG96 + Assert(childrte->relkind != RELKIND_PARTITIONED_TABLE); +#endif + + appinfo = makeNode(AppendRelInfo); + appinfo->parent_relid = rti; + appinfo->child_relid = child_rtindex; + appinfo->parent_reltype = oldrelation->rd_rel->reltype; + appinfo->child_reltype = newrelation->rd_rel->reltype; + make_inh_translation_list(oldrelation, newrelation, child_rtindex, + &appinfo->translated_vars); + appinfo->parent_reloid = parent_oid; + appinfos = lappend(appinfos, appinfo); + + + /* Close child relations, but keep locks */ + if (child_oid != parent_oid) + heap_close(newrelation, NoLock); + } + + heap_close(oldrelation, NoLock); + + root->append_rel_list = list_concat(root->append_rel_list, appinfos); +} diff --git a/src/plan_expand_hypertable.h b/src/plan_expand_hypertable.h new file mode 100644 index 00000000000..e6d59e3c67a --- /dev/null +++ b/src/plan_expand_hypertable.h @@ -0,0 +1,27 @@ +#ifndef TIMESCALEDB_PLAN_EXPAND_HYPERTABLE_H +#define TIMESCALEDB_PLAN_EXPAND_HYPERTABLE_H + +#include "hypertable.h" + +/* This planner optimization reduces planning times when a hypertable has many chunks. + * It does this by expanding hypertable chunks manually, eliding the `expand_inherited_tables` + * logic used by PG. + * + * Slow planning time were previously seen because `expand_inherited_tables` expands all chunks of + * a hypertable, without regard to constraints present in the query. Then, `get_relation_info` is + * called on all chunks before constraint exclusion. Getting the statistics on many chunks ends + * up being expensive because RelationGetNumberOfBlocks has to open the file for each relation. + * This gets even worse under high concurrency. + * + * This logic solves this by expanding only the chunks needed to fulfil the query instead of all chunks. + * In effect, it moves chunk exclusion up in the planning process. But, we actually don't use constraint + * exclusion here, but rather a variant of range exclusion implemented by HypertableRestrictInfo. + * */ + +/* Can we use hypertable expansion here */ +extern bool plan_expand_hypertable_valid_hypertable(Hypertable *ht, Query *parse, Index rti, RangeTblEntry *rte); + +/* Do the expansion */ +extern void plan_expand_hypertable_chunks(Hypertable *ht, PlannerInfo *root, Oid relation_objectid, bool inhparent, RelOptInfo *rel); + +#endif /* TIMESCALEDB_PLAN_EXPAND_HYPERTABLE_H */ diff --git a/src/planner.c b/src/planner.c index f3546fbb48b..9332be2931a 100644 --- a/src/planner.c +++ b/src/planner.c @@ -8,9 +8,17 @@ #include #include #include +#include +#include +#include +#include #include "compat-msvc-enter.h" #include +#include +#include +#include +#include #include "compat-msvc-exit.h" #include "hypertable_cache.h" @@ -22,12 +30,18 @@ #include "planner_utils.h" #include "hypertable_insert.h" #include "constraint_aware_append.h" +#include "partitioning.h" +#include "dimension_slice.h" +#include "dimension_vector.h" +#include "chunk.h" +#include "plan_expand_hypertable.h" void _planner_init(void); void _planner_fini(void); static planner_hook_type prev_planner_hook; static set_rel_pathlist_hook_type prev_set_rel_pathlist_hook; +static get_relation_info_hook_type prev_get_relation_info_hook; typedef struct ModifyTableWalkerCtx { @@ -142,12 +156,80 @@ modifytable_plan_walker(Plan **planptr, void *pctx) } } +#define CTE_NAME_HYPERTABLES "hypertable_parent" + +static void +mark_rte_hypertable_parent(RangeTblEntry *rte) +{ + rte->inh = false; + + /* + * CTE name is never used for regular tables so use that as a signal that + * we performed the substitution. + */ + Assert(rte->ctename == NULL); + rte->ctename = CTE_NAME_HYPERTABLES; +} + +static bool +is_rte_hypertable(RangeTblEntry *rte) +{ + return rte->inh == false && rte->ctename != NULL && strcmp(rte->ctename, CTE_NAME_HYPERTABLES) == 0; +} + +/* This turns off inheritance on hypertables where we will do chunk + * expansion ourselves. This prevents postgres from expanding the inheritance + * tree itself. We will expand the chunks in timescaledb_get_relation_info_hook. */ +static bool +turn_off_inheritance_walker(Node *node, Cache *hc) +{ + if (node == NULL) + return false; + + if (IsA(node, Query)) + { + Query *query = (Query *) node; + ListCell *lc; + int rti = 1; + + foreach(lc, query->rtable) + { + RangeTblEntry *rte = lfirst(lc); + Hypertable *ht = hypertable_cache_get_entry(hc, rte->relid); + + if (rte->inh && plan_expand_hypertable_valid_hypertable(ht, query, rti, rte)) + mark_rte_hypertable_parent(rte); + + rti++; + } + + return query_tree_walker(query, turn_off_inheritance_walker, hc, 0); + } + + return expression_tree_walker(node, turn_off_inheritance_walker, hc); +} + static PlannedStmt * timescaledb_planner(Query *parse, int cursor_opts, ParamListInfo bound_params) { PlannedStmt *plan_stmt = NULL; + + if (extension_is_loaded() && !guc_disable_optimizations && parse->resultRelation == 0) + { + Cache *hc = hypertable_cache_pin(); + + /* + * turn of inheritance on hypertables we will expand ourselves in + * timescaledb_get_relation_info_hook + */ + turn_off_inheritance_walker((Node *) parse, hc); + + cache_release(hc); + } + + if (prev_planner_hook != NULL) { /* Call any earlier hooks */ @@ -197,7 +279,7 @@ should_optimize_append(const Path *path) /* * If there are clauses that have mutable functions, this path is ripe for - * execution-time optimization + * execution-time optimization. */ foreach(lc, rel->baserestrictinfo) { @@ -228,7 +310,6 @@ is_append_parent(RelOptInfo *rel, RangeTblEntry *rte) rte->relkind == RELKIND_RELATION; } - static void timescaledb_set_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, @@ -323,6 +404,54 @@ timescaledb_set_rel_pathlist(PlannerInfo *root, cache_release(hcache); } +/* This hook is meant to editorialize about the information + * the planner gets about a relation. We hijack it here + * to also expand the append relation for hypertables. */ +static void +timescaledb_get_relation_info_hook(PlannerInfo *root, + Oid relation_objectid, + bool inhparent, + RelOptInfo *rel) +{ + RangeTblEntry *rte; + + if (prev_get_relation_info_hook != NULL) + prev_get_relation_info_hook(root, relation_objectid, inhparent, rel); + + if (!extension_is_loaded()) + return; + + rte = rt_fetch(rel->relid, root->parse->rtable); + + /* + * We expand the hypertable chunks into an append relation. Previously, in + * `turn_off_inheritance_walker` we suppressed this expansion. This hook + * is really the first one that's called after the initial planner setup + * and so it's convenient to do the expansion here. Note that this is + * after the usual expansion happens in `expand_inherited_tables` (called + * in `subquery_planner`). Note also that `get_relation_info` (the + * function that calls this hook at the end) is the expensive function to + * run on many chunks so the expansion really cannot be called before this + * hook. + */ + if (is_rte_hypertable(rte)) + { + + Cache *hcache = hypertable_cache_pin(); + Hypertable *ht = hypertable_cache_get_entry(hcache, rte->relid); + + Assert(ht != NULL); + + plan_expand_hypertable_chunks(ht, + root, + relation_objectid, + inhparent, + rel); + + cache_release(hcache); + } +} + void _planner_init(void) { @@ -330,6 +459,9 @@ _planner_init(void) planner_hook = timescaledb_planner; prev_set_rel_pathlist_hook = set_rel_pathlist_hook; set_rel_pathlist_hook = timescaledb_set_rel_pathlist; + + prev_get_relation_info_hook = get_relation_info_hook; + get_relation_info_hook = timescaledb_get_relation_info_hook; } void @@ -337,4 +469,5 @@ _planner_fini(void) { planner_hook = prev_planner_hook; set_rel_pathlist_hook = prev_set_rel_pathlist_hook; + get_relation_info_hook = prev_get_relation_info_hook; } diff --git a/src/planner_import.c b/src/planner_import.c new file mode 100644 index 00000000000..ef18e0ff641 --- /dev/null +++ b/src/planner_import.c @@ -0,0 +1,108 @@ +/* + * This file contains functions copied verbatim from the PG core planner. + * These function had to be copied since they were declared static in the core planner, but we need them for our + * manipulations. + */ +#include +#include +#include + +#include "planner_import.h" + + +/* copied verbatim from prepunion.c */ +void +make_inh_translation_list(Relation oldrelation, Relation newrelation, + Index newvarno, + List **translated_vars) +{ + List *vars = NIL; + TupleDesc old_tupdesc = RelationGetDescr(oldrelation); + TupleDesc new_tupdesc = RelationGetDescr(newrelation); + int oldnatts = old_tupdesc->natts; + int newnatts = new_tupdesc->natts; + int old_attno; + + for (old_attno = 0; old_attno < oldnatts; old_attno++) + { + Form_pg_attribute att; + char *attname; + Oid atttypid; + int32 atttypmod; + Oid attcollation; + int new_attno; + + att = old_tupdesc->attrs[old_attno]; + if (att->attisdropped) + { + /* Just put NULL into this list entry */ + vars = lappend(vars, NULL); + continue; + } + attname = NameStr(att->attname); + atttypid = att->atttypid; + atttypmod = att->atttypmod; + attcollation = att->attcollation; + + /* + * When we are generating the "translation list" for the parent table + * of an inheritance set, no need to search for matches. + */ + if (oldrelation == newrelation) + { + vars = lappend(vars, makeVar(newvarno, + (AttrNumber) (old_attno + 1), + atttypid, + atttypmod, + attcollation, + 0)); + continue; + } + + /* + * Otherwise we have to search for the matching column by name. + * There's no guarantee it'll have the same column position, because + * of cases like ALTER TABLE ADD COLUMN and multiple inheritance. + * However, in simple cases it will be the same column number, so try + * that before we go groveling through all the columns. + * + * Note: the test for (att = ...) != NULL cannot fail, it's just a + * notational device to include the assignment into the if-clause. + */ + if (old_attno < newnatts && + (att = new_tupdesc->attrs[old_attno]) != NULL && + !att->attisdropped && + strcmp(attname, NameStr(att->attname)) == 0) + new_attno = old_attno; + else + { + for (new_attno = 0; new_attno < newnatts; new_attno++) + { + att = new_tupdesc->attrs[new_attno]; + if (!att->attisdropped && + strcmp(attname, NameStr(att->attname)) == 0) + break; + } + if (new_attno >= newnatts) + elog(ERROR, "could not find inherited attribute \"%s\" of relation \"%s\"", + attname, RelationGetRelationName(newrelation)); + } + + /* Found it, check type and collation match */ + if (atttypid != att->atttypid || atttypmod != att->atttypmod) + elog(ERROR, "attribute \"%s\" of relation \"%s\" does not match parent's type", + attname, RelationGetRelationName(newrelation)); + if (attcollation != att->attcollation) + elog(ERROR, "attribute \"%s\" of relation \"%s\" does not match parent's collation", + attname, RelationGetRelationName(newrelation)); + + vars = lappend(vars, makeVar(newvarno, + (AttrNumber) (new_attno + 1), + atttypid, + atttypmod, + attcollation, + 0)); + } + + *translated_vars = vars; +} diff --git a/src/planner_import.h b/src/planner_import.h new file mode 100644 index 00000000000..1490ce611ef --- /dev/null +++ b/src/planner_import.h @@ -0,0 +1,18 @@ +#ifndef TIMESCALEDB_PLANNER_IMPORT_H +#define TIMESCALEDB_PLANNER_IMPORT_H + +#include +#include +#include + +/* + * This file contains functions copied verbatim from the PG core planner. + * These function had to be copied since they were declared static in the core planner, but we need them for our + * manipulations. + */ + +extern void make_inh_translation_list(Relation oldrelation, Relation newrelation, + Index newvarno, + List **translated_vars); + +#endif /* TIMESCALEDB_PLANNER_IMPORT_H */ diff --git a/test/expected/append.out b/test/expected/append.out index 0b50adf9656..b63ecb50c98 100644 --- a/test/expected/append.out +++ b/test/expected/append.out @@ -158,7 +158,6 @@ psql:include/append.sql:68: NOTICE: Stable function now_s() called! EXPLAIN (costs off) SELECT * FROM append_test WHERE time > now_i() - interval '2 months' ORDER BY time; -psql:include/append.sql:75: NOTICE: Immutable function now_i() called! psql:include/append.sql:75: NOTICE: Immutable function now_i() called! QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------ diff --git a/test/expected/append_unoptimized.out b/test/expected/append_unoptimized.out index 795b5ec7a2a..0fd676ece2b 100644 --- a/test/expected/append_unoptimized.out +++ b/test/expected/append_unoptimized.out @@ -178,7 +178,6 @@ psql:include/append.sql:68: NOTICE: Stable function now_s() called! EXPLAIN (costs off) SELECT * FROM append_test WHERE time > now_i() - interval '2 months' ORDER BY time; -psql:include/append.sql:75: NOTICE: Immutable function now_i() called! psql:include/append.sql:75: NOTICE: Immutable function now_i() called! QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------ diff --git a/test/expected/append_x_diff.out b/test/expected/append_x_diff.out index eff4fddba12..cbb8cac5714 100644 --- a/test/expected/append_x_diff.out +++ b/test/expected/append_x_diff.out @@ -109,13 +109,13 @@ 168,169d149 < psql:include/append.sql:68: NOTICE: Stable function now_s() called! < psql:include/append.sql:68: NOTICE: Stable function now_s() called! -183,184c163,164 +182,183c162,163 < QUERY PLAN < ------------------------------------------------------------------------------------------------------------------------------ --- > QUERY PLAN > ------------------------------------------------------------------------------------------------------------------------------------ -187,202c167,175 +186,201c166,174 < -> Append < -> Seq Scan on append_test < Filter: ("time" > ('Tue Aug 22 10:00:00 2017 PDT'::timestamp with time zone - '@ 2 mons'::interval)) @@ -142,13 +142,13 @@ > -> Bitmap Index Scan on _hyper_1_3_chunk_append_test_time_idx > Index Cond: ("time" > ('Tue Aug 22 10:00:00 2017 PDT'::timestamp with time zone - '@ 2 mons'::interval)) > (10 rows) -211,212c184,185 +210,211c183,184 < QUERY PLAN < ------------------------------------------------------------------- --- > QUERY PLAN > ------------------------------------------------------------------------- -215,224c188,198 +214,223c187,197 < -> Append < -> Seq Scan on append_test < Filter: ("time" > (now_v() - '@ 2 mons'::interval)) @@ -171,10 +171,10 @@ > -> Seq Scan on _hyper_1_3_chunk > Filter: ("time" > (now_v() - '@ 2 mons'::interval)) > (12 rows) -250,251d223 +249,250d222 < psql:include/append.sql:94: NOTICE: Stable function now_s() called! < psql:include/append.sql:94: NOTICE: Stable function now_s() called! -259,271c231,241 +258,270c230,240 < QUERY PLAN < ------------------------------------------------------------------------------------------- < Merge Append @@ -200,30 +200,30 @@ > -> Index Scan Backward using _hyper_1_3_chunk_append_test_time_idx on _hyper_1_3_chunk > Index Cond: ("time" > (now_s() - '@ 2 mons'::interval)) > (7 rows) -299a270 +298a269 > psql:include/append.sql:110: NOTICE: Stable function now_s() called! -306c277,279 +305c276,278 < -> Result --- > -> Custom Scan (ConstraintAwareAppend) > Hypertable: append_test > Chunks left after exclusion: 2 -308,311d280 +307,310d279 < -> Seq Scan on append_test < Filter: ("time" > (now_s() - '@ 4 mons'::interval)) < -> Index Scan using _hyper_1_1_chunk_append_test_time_idx on _hyper_1_1_chunk < Index Cond: ("time" > (now_s() - '@ 4 mons'::interval)) -316c285 +315c284 < (14 rows) --- > (12 rows) -328,329c297,298 +327,328c296,297 < QUERY PLAN < ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- --- > QUERY PLAN > ----------------------------------------------------------------------- -334,338c303,306 +333,337c302,305 < -> Result < -> Append < -> Seq Scan on append_test @@ -234,36 +234,36 @@ > Hypertable: append_test > Chunks left after exclusion: 0 > (7 rows) -383,384c351,353 +382,383c350,352 < QUERY PLAN < ------------------------------------------------------------------------------------------------------- --- > psql:include/append.sql:149: NOTICE: Stable function now_s() called! > QUERY PLAN > --------------------------------------------------------------------------------------------------------------- -390c359,361 +389c358,360 < -> Result --- > -> Custom Scan (ConstraintAwareAppend) > Hypertable: append_test > Chunks left after exclusion: 3 -392,394c363 +391,393c362 < -> Seq Scan on append_test < Filter: ((colorid > 0) AND ("time" > (now_s() - '@ 400 days'::interval))) < -> Index Scan using _hyper_1_1_chunk_append_test_time_idx on _hyper_1_1_chunk --- > -> Index Scan Backward using _hyper_1_1_chunk_append_test_time_idx on _hyper_1_1_chunk -397c366 +396c365 < -> Index Scan using _hyper_1_2_chunk_append_test_time_idx on _hyper_1_2_chunk --- > -> Index Scan Backward using _hyper_1_2_chunk_append_test_time_idx on _hyper_1_2_chunk -400c369 +399c368 < -> Index Scan using _hyper_1_3_chunk_append_test_time_idx on _hyper_1_3_chunk --- > -> Index Scan Backward using _hyper_1_3_chunk_append_test_time_idx on _hyper_1_3_chunk -435a405 +434a404 > psql:include/append.sql:166: NOTICE: Stable function now_s() called! -475,476c445,448 +474,475c444,447 < QUERY PLAN < -------------------------------------------------------------------------------------------- --- @@ -271,7 +271,7 @@ > psql:include/append.sql:187: NOTICE: Stable function now_s() called! > QUERY PLAN > -------------------------------------------------------------------------------------------------- -479,497c451,467 +478,496c450,466 < -> Append < -> Seq Scan on append_test a < Filter: ("time" > (now_s() - '@ 3 hours'::interval)) diff --git a/test/expected/insert.out b/test/expected/insert.out index 9851eb57c55..4f6c1f6d71e 100644 --- a/test/expected/insert.out +++ b/test/expected/insert.out @@ -275,8 +275,8 @@ VALUES ('2001-02-01', 98, 'dev1'), SELECT * FROM date_col_test WHERE time > '2001-01-01'; time | temp | device ------------+------+-------- - 02-01-2001 | 98 | dev1 03-02-2001 | 98 | dev1 + 02-01-2001 | 98 | dev1 (2 rows) CREATE TABLE many_partitions_test_1m (time timestamp, temp float8, device text NOT NULL); diff --git a/test/expected/partitioning.out b/test/expected/partitioning.out index d9cd947dda7..dec2b8cfad6 100644 --- a/test/expected/partitioning.out +++ b/test/expected/partitioning.out @@ -30,15 +30,15 @@ SELECT * FROM test.show_constraintsp('_timescaledb_internal._hyper_1_%_chunk'); -- Make sure constraint exclusion works on device column EXPLAIN (verbose, costs off) SELECT * FROM part_legacy WHERE device = 1; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------ + QUERY PLAN +----------------------------------------------------------------------------------------- Append -> Seq Scan on public.part_legacy Output: part_legacy."time", part_legacy.temp, part_legacy.device - Filter: ((part_legacy.device = 1) AND (_timescaledb_internal.get_partition_for_key(part_legacy.device) = 1516350201)) + Filter: (part_legacy.device = 1) -> Seq Scan on _timescaledb_internal._hyper_1_1_chunk Output: _hyper_1_1_chunk."time", _hyper_1_1_chunk.temp, _hyper_1_1_chunk.device - Filter: ((_hyper_1_1_chunk.device = 1) AND (_timescaledb_internal.get_partition_for_key(_hyper_1_1_chunk.device) = 1516350201)) + Filter: (_hyper_1_1_chunk.device = 1) (7 rows) CREATE TABLE part_new(time timestamptz, temp float, device int); @@ -74,15 +74,15 @@ SELECT * FROM test.show_constraintsp('_timescaledb_internal._hyper_2_%_chunk'); -- Make sure constraint exclusion works on device column EXPLAIN (verbose, costs off) SELECT * FROM part_new WHERE device = 1; - QUERY PLAN -------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +----------------------------------------------------------------------------------------- Append -> Seq Scan on public.part_new Output: part_new."time", part_new.temp, part_new.device - Filter: ((part_new.device = 1) AND (_timescaledb_internal.get_partition_hash(part_new.device) = 242423622)) + Filter: (part_new.device = 1) -> Seq Scan on _timescaledb_internal._hyper_2_3_chunk Output: _hyper_2_3_chunk."time", _hyper_2_3_chunk.temp, _hyper_2_3_chunk.device - Filter: ((_hyper_2_3_chunk.device = 1) AND (_timescaledb_internal.get_partition_hash(_hyper_2_3_chunk.device) = 242423622)) + Filter: (_hyper_2_3_chunk.device = 1) (7 rows) CREATE TABLE part_new_convert1(time timestamptz, temp float8, device int); diff --git a/test/expected/plan_expand_hypertable_optimized.out b/test/expected/plan_expand_hypertable_optimized.out new file mode 100644 index 00000000000..a600cf4431a --- /dev/null +++ b/test/expected/plan_expand_hypertable_optimized.out @@ -0,0 +1,643 @@ +SET timescaledb.disable_optimizations= 'off'; +\set PREFIX 'EXPLAIN (costs off) ' +\ir include/plan_expand_hypertable_load.sql +--single time dimension +CREATE TABLE hyper ("time_broken" bigint NOT NULL, "value" integer); +ALTER TABLE hyper +DROP COLUMN time_broken, +ADD COLUMN time BIGINT; +SELECT create_hypertable('hyper', 'time', chunk_time_interval => 10); +psql:include/plan_expand_hypertable_load.sql:8: NOTICE: adding NOT NULL constraint to column "time" + create_hypertable +------------------- + +(1 row) + +INSERT INTO hyper SELECT g, g FROM generate_series(0,1000) g; +--insert a point with INT_MAX_64 +INSERT INTO hyper (time, value) SELECT 9223372036854775807::bigint, 0; +--time and space +CREATE TABLE hyper_w_space ("time_broken" bigint NOT NULL, "device_id" text, "value" integer); +ALTER TABLE hyper_w_space +DROP COLUMN time_broken, +ADD COLUMN time BIGINT; +SELECT create_hypertable('hyper_w_space', 'time', 'device_id', 2, chunk_time_interval => 10); +psql:include/plan_expand_hypertable_load.sql:24: NOTICE: adding NOT NULL constraint to column "time" + create_hypertable +------------------- + +(1 row) + +INSERT INTO hyper_w_space (time, device_id, value) SELECT g, 'dev' || g, g FROM generate_series(0,30) g; +CREATE VIEW hyper_w_space_view AS (SELECT * FROM hyper_w_space); +--with timestamp and space +CREATE TABLE tag (id serial PRIMARY KEY, name text); +CREATE TABLE hyper_ts ("time_broken" timestamptz NOT NULL, "device_id" text, tag_id INT REFERENCES tag(id), "value" integer); +ALTER TABLE hyper_ts +DROP COLUMN time_broken, +ADD COLUMN time TIMESTAMPTZ; +SELECT create_hypertable('hyper_ts', 'time', 'device_id', 2, chunk_time_interval => '10 seconds'::interval); +psql:include/plan_expand_hypertable_load.sql:39: NOTICE: adding NOT NULL constraint to column "time" + create_hypertable +------------------- + +(1 row) + +INSERT INTO tag(name) SELECT 'tag'||g FROM generate_series(0,10) g; +INSERT INTO hyper_ts (time, device_id, tag_id, value) SELECT to_timestamp(g), 'dev' || g, (random() /10)+1, g FROM generate_series(0,30) g; +--one in the future +INSERT INTO hyper_ts (time, device_id, tag_id, value) VALUES ('2100-01-01 02:03:04 PST', 'dev101', 1, 0); +SET client_min_messages = 'error'; +ANALYZE; +RESET client_min_messages; +\ir include/plan_expand_hypertable_query.sql +--we want to see how our logic excludes chunks +--and not how much work constraint_exclusion does +SET constraint_exclusion = 'off'; +--test upper bounds +:PREFIX SELECT * FROM hyper WHERE time < 10 ORDER BY value; + QUERY PLAN +------------------------------------------ + Sort + Sort Key: hyper.value + -> Append + -> Seq Scan on hyper + Filter: ("time" < 10) + -> Seq Scan on _hyper_1_1_chunk + Filter: ("time" < 10) +(7 rows) + +:PREFIX SELECT * FROM hyper WHERE time < 11 ORDER BY value; + QUERY PLAN +------------------------------------------ + Sort + Sort Key: hyper.value + -> Append + -> Seq Scan on hyper + Filter: ("time" < 11) + -> Seq Scan on _hyper_1_1_chunk + Filter: ("time" < 11) + -> Seq Scan on _hyper_1_2_chunk + Filter: ("time" < 11) +(9 rows) + +:PREFIX SELECT * FROM hyper WHERE time = 10 ORDER BY value; + QUERY PLAN +------------------------------------------ + Sort + Sort Key: hyper.value + -> Append + -> Seq Scan on hyper + Filter: ("time" = 10) + -> Seq Scan on _hyper_1_2_chunk + Filter: ("time" = 10) +(7 rows) + +:PREFIX SELECT * FROM hyper WHERE 10 >= time ORDER BY value; + QUERY PLAN +------------------------------------------ + Sort + Sort Key: hyper.value + -> Append + -> Seq Scan on hyper + Filter: (10 >= "time") + -> Seq Scan on _hyper_1_1_chunk + Filter: (10 >= "time") + -> Seq Scan on _hyper_1_2_chunk + Filter: (10 >= "time") +(9 rows) + +--test lower bounds +:PREFIX SELECT * FROM hyper WHERE time >= 10 and time < 20 ORDER BY value; + QUERY PLAN +---------------------------------------------------------- + Sort + Sort Key: hyper.value + -> Append + -> Seq Scan on hyper + Filter: (("time" >= 10) AND ("time" < 20)) + -> Seq Scan on _hyper_1_2_chunk + Filter: (("time" >= 10) AND ("time" < 20)) +(7 rows) + +:PREFIX SELECT * FROM hyper WHERE 10 < time and 20 >= time ORDER BY value; + QUERY PLAN +---------------------------------------------------------- + Sort + Sort Key: hyper.value + -> Append + -> Seq Scan on hyper + Filter: ((10 < "time") AND (20 >= "time")) + -> Seq Scan on _hyper_1_2_chunk + Filter: ((10 < "time") AND (20 >= "time")) + -> Seq Scan on _hyper_1_3_chunk + Filter: ((10 < "time") AND (20 >= "time")) +(9 rows) + +:PREFIX SELECT * FROM hyper WHERE time >= 9 and time < 20 ORDER BY value; + QUERY PLAN +--------------------------------------------------------- + Sort + Sort Key: hyper.value + -> Append + -> Seq Scan on hyper + Filter: (("time" >= 9) AND ("time" < 20)) + -> Seq Scan on _hyper_1_1_chunk + Filter: (("time" >= 9) AND ("time" < 20)) + -> Seq Scan on _hyper_1_2_chunk + Filter: (("time" >= 9) AND ("time" < 20)) +(9 rows) + +:PREFIX SELECT * FROM hyper WHERE time > 9 and time < 20 ORDER BY value; + QUERY PLAN +-------------------------------------------------------- + Sort + Sort Key: hyper.value + -> Append + -> Seq Scan on hyper + Filter: (("time" > 9) AND ("time" < 20)) + -> Seq Scan on _hyper_1_2_chunk + Filter: (("time" > 9) AND ("time" < 20)) +(7 rows) + +--test empty result +:PREFIX SELECT * FROM hyper WHERE time < 0; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on hyper + Filter: ("time" < 0) +(3 rows) + +--test expression evaluation +:PREFIX SELECT * FROM hyper WHERE time < (5*2)::smallint; + QUERY PLAN +------------------------------------------- + Append + -> Seq Scan on hyper + Filter: ("time" < '10'::smallint) + -> Seq Scan on _hyper_1_1_chunk + Filter: ("time" < '10'::smallint) +(5 rows) + +--test logic at INT64_MAX +:PREFIX SELECT * FROM hyper WHERE time = 9223372036854775807::bigint ORDER BY value; + QUERY PLAN +---------------------------------------------------------------- + Sort + Sort Key: hyper.value + -> Append + -> Seq Scan on hyper + Filter: ("time" = '9223372036854775807'::bigint) + -> Seq Scan on _hyper_1_102_chunk + Filter: ("time" = '9223372036854775807'::bigint) +(7 rows) + +:PREFIX SELECT * FROM hyper WHERE time = 9223372036854775806::bigint ORDER BY value; + QUERY PLAN +---------------------------------------------------------------- + Sort + Sort Key: hyper.value + -> Append + -> Seq Scan on hyper + Filter: ("time" = '9223372036854775806'::bigint) + -> Seq Scan on _hyper_1_102_chunk + Filter: ("time" = '9223372036854775806'::bigint) +(7 rows) + +:PREFIX SELECT * FROM hyper WHERE time >= 9223372036854775807::bigint ORDER BY value; + QUERY PLAN +----------------------------------------------------------------- + Sort + Sort Key: hyper.value + -> Append + -> Seq Scan on hyper + Filter: ("time" >= '9223372036854775807'::bigint) + -> Seq Scan on _hyper_1_102_chunk + Filter: ("time" >= '9223372036854775807'::bigint) +(7 rows) + +:PREFIX SELECT * FROM hyper WHERE time > 9223372036854775807::bigint ORDER BY value; + QUERY PLAN +---------------------------------------------------------------- + Sort + Sort Key: hyper.value + -> Append + -> Seq Scan on hyper + Filter: ("time" > '9223372036854775807'::bigint) +(5 rows) + +:PREFIX SELECT * FROM hyper WHERE time > 9223372036854775806::bigint ORDER BY value; + QUERY PLAN +---------------------------------------------------------------- + Sort + Sort Key: hyper.value + -> Append + -> Seq Scan on hyper + Filter: ("time" > '9223372036854775806'::bigint) + -> Seq Scan on _hyper_1_102_chunk + Filter: ("time" > '9223372036854775806'::bigint) +(7 rows) + +--cte +:PREFIX WITH cte AS( + SELECT * FROM hyper WHERE time < 10 +) +SELECT * FROM cte ORDER BY value; + QUERY PLAN +-------------------------------------------- + Sort + Sort Key: cte.value + CTE cte + -> Append + -> Seq Scan on hyper + Filter: ("time" < 10) + -> Seq Scan on _hyper_1_1_chunk + Filter: ("time" < 10) + -> CTE Scan on cte +(9 rows) + +--subquery +:PREFIX SELECT 0 = ANY (SELECT value FROM hyper WHERE time < 10); + QUERY PLAN +-------------------------------------------- + Result + SubPlan 1 + -> Append + -> Seq Scan on hyper + Filter: ("time" < 10) + -> Seq Scan on _hyper_1_1_chunk + Filter: ("time" < 10) +(7 rows) + +--no space constraint +:PREFIX SELECT * FROM hyper_w_space WHERE time < 10 ORDER BY value; + QUERY PLAN +-------------------------------------------- + Sort + Sort Key: hyper_w_space.value + -> Append + -> Seq Scan on hyper_w_space + Filter: ("time" < 10) + -> Seq Scan on _hyper_2_104_chunk + Filter: ("time" < 10) + -> Seq Scan on _hyper_2_103_chunk + Filter: ("time" < 10) +(9 rows) + +--valid space constraint +:PREFIX SELECT * FROM hyper_w_space WHERE time < 10 and device_id = 'dev5' ORDER BY value; + QUERY PLAN +---------------------------------------------------------------------- + Sort + Sort Key: hyper_w_space.value + -> Append + -> Seq Scan on hyper_w_space + Filter: (("time" < 10) AND (device_id = 'dev5'::text)) + -> Seq Scan on _hyper_2_103_chunk + Filter: (("time" < 10) AND (device_id = 'dev5'::text)) +(7 rows) + +:PREFIX SELECT * FROM hyper_w_space WHERE time < 10 and 'dev5' = device_id ORDER BY value; + QUERY PLAN +---------------------------------------------------------------------- + Sort + Sort Key: hyper_w_space.value + -> Append + -> Seq Scan on hyper_w_space + Filter: (("time" < 10) AND ('dev5'::text = device_id)) + -> Seq Scan on _hyper_2_103_chunk + Filter: (("time" < 10) AND ('dev5'::text = device_id)) +(7 rows) + +:PREFIX SELECT * FROM hyper_w_space WHERE time < 10 and 'dev'||(2+3) = device_id ORDER BY value; + QUERY PLAN +---------------------------------------------------------------------- + Sort + Sort Key: hyper_w_space.value + -> Append + -> Seq Scan on hyper_w_space + Filter: (("time" < 10) AND ('dev5'::text = device_id)) + -> Seq Scan on _hyper_2_103_chunk + Filter: (("time" < 10) AND ('dev5'::text = device_id)) +(7 rows) + +--only space constraint +:PREFIX SELECT * FROM hyper_w_space WHERE 'dev5' = device_id ORDER BY value; + QUERY PLAN +-------------------------------------------------- + Sort + Sort Key: hyper_w_space.value + -> Append + -> Seq Scan on hyper_w_space + Filter: ('dev5'::text = device_id) + -> Seq Scan on _hyper_2_106_chunk + Filter: ('dev5'::text = device_id) + -> Seq Scan on _hyper_2_107_chunk + Filter: ('dev5'::text = device_id) + -> Seq Scan on _hyper_2_103_chunk + Filter: ('dev5'::text = device_id) +(11 rows) + +--unhandled space constraint +:PREFIX SELECT * FROM hyper_w_space WHERE time < 10 and device_id > 'dev5' ORDER BY value; + QUERY PLAN +---------------------------------------------------------------------- + Sort + Sort Key: hyper_w_space.value + -> Append + -> Seq Scan on hyper_w_space + Filter: (("time" < 10) AND (device_id > 'dev5'::text)) + -> Seq Scan on _hyper_2_104_chunk + Filter: (("time" < 10) AND (device_id > 'dev5'::text)) + -> Seq Scan on _hyper_2_103_chunk + Filter: (("time" < 10) AND (device_id > 'dev5'::text)) +(9 rows) + +--use of OR +:PREFIX SELECT * FROM hyper_w_space WHERE time < 10 AND (device_id = 'dev5' or device_id = 'dev6') ORDER BY value; + QUERY PLAN +------------------------------------------------------------------------------------------------------ + Sort + Sort Key: hyper_w_space.value + -> Append + -> Seq Scan on hyper_w_space + Filter: (("time" < 10) AND ((device_id = 'dev5'::text) OR (device_id = 'dev6'::text))) + -> Seq Scan on _hyper_2_104_chunk + Filter: (("time" < 10) AND ((device_id = 'dev5'::text) OR (device_id = 'dev6'::text))) + -> Seq Scan on _hyper_2_103_chunk + Filter: (("time" < 10) AND ((device_id = 'dev5'::text) OR (device_id = 'dev6'::text))) +(9 rows) + +--cte +:PREFIX WITH cte AS( + SELECT * FROM hyper_w_space WHERE time < 10 and device_id = 'dev5' +) +SELECT * FROM cte ORDER BY value; + QUERY PLAN +------------------------------------------------------------------------ + Sort + Sort Key: cte.value + CTE cte + -> Append + -> Seq Scan on hyper_w_space + Filter: (("time" < 10) AND (device_id = 'dev5'::text)) + -> Seq Scan on _hyper_2_103_chunk + Filter: (("time" < 10) AND (device_id = 'dev5'::text)) + -> CTE Scan on cte +(9 rows) + +--subquery +:PREFIX SELECT 0 = ANY (SELECT value FROM hyper_w_space WHERE time < 10 and device_id = 'dev5'); + QUERY PLAN +------------------------------------------------------------------------ + Result + SubPlan 1 + -> Append + -> Seq Scan on hyper_w_space + Filter: (("time" < 10) AND (device_id = 'dev5'::text)) + -> Seq Scan on _hyper_2_103_chunk + Filter: (("time" < 10) AND (device_id = 'dev5'::text)) +(7 rows) + +--view +:PREFIX SELECT * FROM hyper_w_space_view WHERE time < 10 and device_id = 'dev5' ORDER BY value; + QUERY PLAN +---------------------------------------------------------------------- + Sort + Sort Key: hyper_w_space.value + -> Append + -> Seq Scan on hyper_w_space + Filter: (("time" < 10) AND (device_id = 'dev5'::text)) + -> Seq Scan on _hyper_2_103_chunk + Filter: (("time" < 10) AND (device_id = 'dev5'::text)) +(7 rows) + +--timestamps +--these should work since they are immutable functions +:PREFIX SELECT * FROM hyper_ts WHERE time < 'Wed Dec 31 16:00:10 1969 PST'::timestamptz ORDER BY value; + QUERY PLAN +------------------------------------------------------------------------------------------- + Sort + Sort Key: hyper_ts.value + -> Append + -> Seq Scan on hyper_ts + Filter: ("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_3_111_chunk + Filter: ("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_3_110_chunk + Filter: ("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) +(9 rows) + +:PREFIX SELECT * FROM hyper_ts WHERE time < to_timestamp(10) ORDER BY value; + QUERY PLAN +------------------------------------------------------------------------------------------- + Sort + Sort Key: hyper_ts.value + -> Append + -> Seq Scan on hyper_ts + Filter: ("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_3_111_chunk + Filter: ("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_3_110_chunk + Filter: ("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) +(9 rows) + +:PREFIX SELECT * FROM hyper_ts WHERE time < 'Wed Dec 31 16:00:10 1969'::timestamp AT TIME ZONE 'PST' ORDER BY value; + QUERY PLAN +------------------------------------------------------------------------------------------- + Sort + Sort Key: hyper_ts.value + -> Append + -> Seq Scan on hyper_ts + Filter: ("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_3_111_chunk + Filter: ("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) + -> Seq Scan on _hyper_3_110_chunk + Filter: ("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) +(9 rows) + +:PREFIX SELECT * FROM hyper_ts WHERE time < to_timestamp(10) and device_id = 'dev1' ORDER BY value; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: hyper_ts.value + -> Append + -> Seq Scan on hyper_ts + Filter: (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text)) + -> Seq Scan on _hyper_3_110_chunk + Filter: (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text)) +(7 rows) + +--these should not work since uses stable functions; +:PREFIX SELECT * FROM hyper_ts WHERE time < 'Wed Dec 31 16:00:10 1969'::timestamp ORDER BY value; + QUERY PLAN +------------------------------------------------------------------------------------------ + Sort + Sort Key: hyper_ts.value + -> Append + -> Seq Scan on hyper_ts + Filter: ("time" < 'Wed Dec 31 16:00:10 1969'::timestamp without time zone) + -> Seq Scan on _hyper_3_110_chunk + Filter: ("time" < 'Wed Dec 31 16:00:10 1969'::timestamp without time zone) + -> Seq Scan on _hyper_3_111_chunk + Filter: ("time" < 'Wed Dec 31 16:00:10 1969'::timestamp without time zone) + -> Seq Scan on _hyper_3_112_chunk + Filter: ("time" < 'Wed Dec 31 16:00:10 1969'::timestamp without time zone) + -> Seq Scan on _hyper_3_113_chunk + Filter: ("time" < 'Wed Dec 31 16:00:10 1969'::timestamp without time zone) + -> Seq Scan on _hyper_3_114_chunk + Filter: ("time" < 'Wed Dec 31 16:00:10 1969'::timestamp without time zone) + -> Seq Scan on _hyper_3_115_chunk + Filter: ("time" < 'Wed Dec 31 16:00:10 1969'::timestamp without time zone) + -> Seq Scan on _hyper_3_116_chunk + Filter: ("time" < 'Wed Dec 31 16:00:10 1969'::timestamp without time zone) + -> Seq Scan on _hyper_3_117_chunk + Filter: ("time" < 'Wed Dec 31 16:00:10 1969'::timestamp without time zone) +(21 rows) + +:PREFIX SELECT * FROM hyper_ts WHERE time < ('Wed Dec 31 16:00:10 1969'::timestamp::timestamptz) ORDER BY value; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: hyper_ts.value + -> Append + -> Seq Scan on hyper_ts + Filter: ("time" < ('Wed Dec 31 16:00:10 1969'::timestamp without time zone)::timestamp with time zone) + -> Seq Scan on _hyper_3_110_chunk + Filter: ("time" < ('Wed Dec 31 16:00:10 1969'::timestamp without time zone)::timestamp with time zone) + -> Seq Scan on _hyper_3_111_chunk + Filter: ("time" < ('Wed Dec 31 16:00:10 1969'::timestamp without time zone)::timestamp with time zone) + -> Seq Scan on _hyper_3_112_chunk + Filter: ("time" < ('Wed Dec 31 16:00:10 1969'::timestamp without time zone)::timestamp with time zone) + -> Seq Scan on _hyper_3_113_chunk + Filter: ("time" < ('Wed Dec 31 16:00:10 1969'::timestamp without time zone)::timestamp with time zone) + -> Seq Scan on _hyper_3_114_chunk + Filter: ("time" < ('Wed Dec 31 16:00:10 1969'::timestamp without time zone)::timestamp with time zone) + -> Seq Scan on _hyper_3_115_chunk + Filter: ("time" < ('Wed Dec 31 16:00:10 1969'::timestamp without time zone)::timestamp with time zone) + -> Seq Scan on _hyper_3_116_chunk + Filter: ("time" < ('Wed Dec 31 16:00:10 1969'::timestamp without time zone)::timestamp with time zone) + -> Seq Scan on _hyper_3_117_chunk + Filter: ("time" < ('Wed Dec 31 16:00:10 1969'::timestamp without time zone)::timestamp with time zone) +(21 rows) + +:PREFIX SELECT * FROM hyper_ts WHERE NOW() < time ORDER BY value; + QUERY PLAN +-------------------------------------------- + Sort + Sort Key: hyper_ts.value + -> Append + -> Seq Scan on hyper_ts + Filter: (now() < "time") + -> Seq Scan on _hyper_3_110_chunk + Filter: (now() < "time") + -> Seq Scan on _hyper_3_111_chunk + Filter: (now() < "time") + -> Seq Scan on _hyper_3_112_chunk + Filter: (now() < "time") + -> Seq Scan on _hyper_3_113_chunk + Filter: (now() < "time") + -> Seq Scan on _hyper_3_114_chunk + Filter: (now() < "time") + -> Seq Scan on _hyper_3_115_chunk + Filter: (now() < "time") + -> Seq Scan on _hyper_3_116_chunk + Filter: (now() < "time") + -> Seq Scan on _hyper_3_117_chunk + Filter: (now() < "time") +(21 rows) + +--joins +:PREFIX SELECT * FROM hyper_ts WHERE tag_id IN (SELECT id FROM tag WHERE tag.id=1) and time < to_timestamp(10) and device_id = 'dev1' ORDER BY value; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: hyper_ts.value + -> Nested Loop + -> Seq Scan on tag + Filter: (id = 1) + -> Append + -> Seq Scan on hyper_ts + Filter: (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text) AND (tag_id = 1)) + -> Seq Scan on _hyper_3_110_chunk + Filter: (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text) AND (tag_id = 1)) +(10 rows) + +:PREFIX SELECT * FROM hyper_ts WHERE tag_id IN (SELECT id FROM tag WHERE tag.id=1) or (time < to_timestamp(10) and device_id = 'dev1') ORDER BY value; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: hyper_ts.value + -> Append + -> Seq Scan on hyper_ts + Filter: ((hashed SubPlan 1) OR (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text))) + SubPlan 1 + -> Seq Scan on tag + Filter: (id = 1) + -> Seq Scan on _hyper_3_110_chunk + Filter: ((hashed SubPlan 1) OR (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text))) + -> Seq Scan on _hyper_3_111_chunk + Filter: ((hashed SubPlan 1) OR (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text))) + -> Seq Scan on _hyper_3_112_chunk + Filter: ((hashed SubPlan 1) OR (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text))) + -> Seq Scan on _hyper_3_113_chunk + Filter: ((hashed SubPlan 1) OR (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text))) + -> Seq Scan on _hyper_3_114_chunk + Filter: ((hashed SubPlan 1) OR (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text))) + -> Seq Scan on _hyper_3_115_chunk + Filter: ((hashed SubPlan 1) OR (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text))) + -> Seq Scan on _hyper_3_116_chunk + Filter: ((hashed SubPlan 1) OR (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text))) + -> Seq Scan on _hyper_3_117_chunk + Filter: ((hashed SubPlan 1) OR (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text))) +(24 rows) + +:PREFIX SELECT * FROM hyper_ts WHERE tag_id IN (SELECT id FROM tag WHERE tag.name='tag1') and time < to_timestamp(10) and device_id = 'dev1' ORDER BY value; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: hyper_ts.value + -> Nested Loop + Join Filter: (hyper_ts.tag_id = tag.id) + -> Seq Scan on tag + Filter: (name = 'tag1'::text) + -> Append + -> Seq Scan on hyper_ts + Filter: (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text)) + -> Seq Scan on _hyper_3_110_chunk + Filter: (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text)) +(11 rows) + +:PREFIX SELECT * FROM hyper_ts JOIN tag on (hyper_ts.tag_id = tag.id ) WHERE time < to_timestamp(10) and device_id = 'dev1' ORDER BY value; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: hyper_ts.value + -> Hash Join + Hash Cond: (tag.id = hyper_ts.tag_id) + -> Seq Scan on tag + -> Hash + -> Append + -> Seq Scan on hyper_ts + Filter: (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text)) + -> Seq Scan on _hyper_3_110_chunk + Filter: (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text)) +(11 rows) + +:PREFIX SELECT * FROM hyper_ts JOIN tag on (hyper_ts.tag_id = tag.id ) WHERE tag.name = 'tag1' and time < to_timestamp(10) and device_id = 'dev1' ORDER BY value; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: hyper_ts.value + -> Nested Loop + Join Filter: (hyper_ts.tag_id = tag.id) + -> Seq Scan on tag + Filter: (name = 'tag1'::text) + -> Append + -> Seq Scan on hyper_ts + Filter: (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text)) + -> Seq Scan on _hyper_3_110_chunk + Filter: (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text)) +(11 rows) + diff --git a/test/expected/plan_expand_hypertable_results_diff.out b/test/expected/plan_expand_hypertable_results_diff.out new file mode 100644 index 00000000000..e1594f8cf9c --- /dev/null +++ b/test/expected/plan_expand_hypertable_results_diff.out @@ -0,0 +1,6 @@ +\set ECHO errors + ?column? +---------- + Done +(1 row) + diff --git a/test/expected/sql_query.out b/test/expected/sql_query.out index 043400d1c09..818a4795075 100644 --- a/test/expected/sql_query.out +++ b/test/expected/sql_query.out @@ -70,14 +70,13 @@ EXPLAIN (verbose ON, costs off) SELECT * FROM PUBLIC."two_Partitions" WHERE devi Append -> Seq Scan on public."two_Partitions" Output: "two_Partitions"."timeCustom", "two_Partitions".device_id, "two_Partitions".series_0, "two_Partitions".series_1, "two_Partitions".series_2, "two_Partitions".series_bool - Filter: (("two_Partitions".device_id = 'dev2'::text) AND (_timescaledb_internal.get_partition_hash("two_Partitions".device_id) = 405750566)) + Filter: ("two_Partitions".device_id = 'dev2'::text) -> Bitmap Heap Scan on _timescaledb_internal._hyper_1_4_chunk Output: _hyper_1_4_chunk."timeCustom", _hyper_1_4_chunk.device_id, _hyper_1_4_chunk.series_0, _hyper_1_4_chunk.series_1, _hyper_1_4_chunk.series_2, _hyper_1_4_chunk.series_bool Recheck Cond: (_hyper_1_4_chunk.device_id = 'dev2'::text) - Filter: (_timescaledb_internal.get_partition_hash(_hyper_1_4_chunk.device_id) = 405750566) -> Bitmap Index Scan on "_hyper_1_4_chunk_two_Partitions_device_id_timeCustom_idx" Index Cond: (_hyper_1_4_chunk.device_id = 'dev2'::text) -(10 rows) +(9 rows) EXPLAIN (verbose ON, costs off) SELECT * FROM PUBLIC."two_Partitions" WHERE device_id = 'dev'||'2'; QUERY PLAN @@ -85,14 +84,13 @@ EXPLAIN (verbose ON, costs off) SELECT * FROM PUBLIC."two_Partitions" WHERE devi Append -> Seq Scan on public."two_Partitions" Output: "two_Partitions"."timeCustom", "two_Partitions".device_id, "two_Partitions".series_0, "two_Partitions".series_1, "two_Partitions".series_2, "two_Partitions".series_bool - Filter: (("two_Partitions".device_id = 'dev2'::text) AND (_timescaledb_internal.get_partition_hash("two_Partitions".device_id) = 405750566)) + Filter: ("two_Partitions".device_id = 'dev2'::text) -> Bitmap Heap Scan on _timescaledb_internal._hyper_1_4_chunk Output: _hyper_1_4_chunk."timeCustom", _hyper_1_4_chunk.device_id, _hyper_1_4_chunk.series_0, _hyper_1_4_chunk.series_1, _hyper_1_4_chunk.series_2, _hyper_1_4_chunk.series_bool Recheck Cond: (_hyper_1_4_chunk.device_id = 'dev2'::text) - Filter: (_timescaledb_internal.get_partition_hash(_hyper_1_4_chunk.device_id) = 405750566) -> Bitmap Index Scan on "_hyper_1_4_chunk_two_Partitions_device_id_timeCustom_idx" Index Cond: (_hyper_1_4_chunk.device_id = 'dev2'::text) -(10 rows) +(9 rows) EXPLAIN (verbose ON, costs off) SELECT * FROM PUBLIC."two_Partitions" WHERE 'dev'||'2' = device_id; QUERY PLAN @@ -100,14 +98,13 @@ EXPLAIN (verbose ON, costs off) SELECT * FROM PUBLIC."two_Partitions" WHERE 'dev Append -> Seq Scan on public."two_Partitions" Output: "two_Partitions"."timeCustom", "two_Partitions".device_id, "two_Partitions".series_0, "two_Partitions".series_1, "two_Partitions".series_2, "two_Partitions".series_bool - Filter: (('dev2'::text = "two_Partitions".device_id) AND (_timescaledb_internal.get_partition_hash("two_Partitions".device_id) = 405750566)) + Filter: ('dev2'::text = "two_Partitions".device_id) -> Bitmap Heap Scan on _timescaledb_internal._hyper_1_4_chunk Output: _hyper_1_4_chunk."timeCustom", _hyper_1_4_chunk.device_id, _hyper_1_4_chunk.series_0, _hyper_1_4_chunk.series_1, _hyper_1_4_chunk.series_2, _hyper_1_4_chunk.series_bool Recheck Cond: ('dev2'::text = _hyper_1_4_chunk.device_id) - Filter: (_timescaledb_internal.get_partition_hash(_hyper_1_4_chunk.device_id) = 405750566) -> Bitmap Index Scan on "_hyper_1_4_chunk_two_Partitions_device_id_timeCustom_idx" Index Cond: ('dev2'::text = _hyper_1_4_chunk.device_id) -(10 rows) +(9 rows) --test integer partition key CREATE TABLE "int_part"(time timestamp, object_id int, temp float); @@ -136,19 +133,18 @@ SELECT * FROM "int_part" WHERE object_id = 1; --make sure this touches only one partititon EXPLAIN (verbose ON, costs off) SELECT * FROM "int_part" WHERE object_id = 1; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +-------------------------------------------------------------------------------------------- Append -> Seq Scan on public.int_part Output: int_part."time", int_part.object_id, int_part.temp - Filter: ((int_part.object_id = 1) AND (_timescaledb_internal.get_partition_hash(int_part.object_id) = 242423622)) + Filter: (int_part.object_id = 1) -> Bitmap Heap Scan on _timescaledb_internal._hyper_2_5_chunk Output: _hyper_2_5_chunk."time", _hyper_2_5_chunk.object_id, _hyper_2_5_chunk.temp Recheck Cond: (_hyper_2_5_chunk.object_id = 1) - Filter: (_timescaledb_internal.get_partition_hash(_hyper_2_5_chunk.object_id) = 242423622) -> Bitmap Index Scan on _hyper_2_5_chunk_int_part_object_id_time_idx Index Cond: (_hyper_2_5_chunk.object_id = 1) -(10 rows) +(9 rows) --TODO: handle this later? --EXPLAIN (verbose ON, costs off) SELECT * FROM "two_Partitions" WHERE device_id IN ('dev2', 'dev21'); diff --git a/test/sql/CMakeLists.txt b/test/sql/CMakeLists.txt index a8fac36e2f2..841717ea050 100644 --- a/test/sql/CMakeLists.txt +++ b/test/sql/CMakeLists.txt @@ -33,6 +33,8 @@ set(TEST_FILES partitioning.sql pg_dump.sql plain.sql + plan_expand_hypertable_optimized.sql + plan_expand_hypertable_results_diff.sql reindex.sql relocate_extension.sql reloptions.sql diff --git a/test/sql/include/plan_expand_hypertable_load.sql b/test/sql/include/plan_expand_hypertable_load.sql new file mode 100644 index 00000000000..84b260f32ae --- /dev/null +++ b/test/sql/include/plan_expand_hypertable_load.sql @@ -0,0 +1,51 @@ +--single time dimension +CREATE TABLE hyper ("time_broken" bigint NOT NULL, "value" integer); + +ALTER TABLE hyper +DROP COLUMN time_broken, +ADD COLUMN time BIGINT; + +SELECT create_hypertable('hyper', 'time', chunk_time_interval => 10); + +INSERT INTO hyper SELECT g, g FROM generate_series(0,1000) g; + +--insert a point with INT_MAX_64 +INSERT INTO hyper (time, value) SELECT 9223372036854775807::bigint, 0; + + + +--time and space +CREATE TABLE hyper_w_space ("time_broken" bigint NOT NULL, "device_id" text, "value" integer); + +ALTER TABLE hyper_w_space +DROP COLUMN time_broken, +ADD COLUMN time BIGINT; + +SELECT create_hypertable('hyper_w_space', 'time', 'device_id', 2, chunk_time_interval => 10); + +INSERT INTO hyper_w_space (time, device_id, value) SELECT g, 'dev' || g, g FROM generate_series(0,30) g; + +CREATE VIEW hyper_w_space_view AS (SELECT * FROM hyper_w_space); + + +--with timestamp and space +CREATE TABLE tag (id serial PRIMARY KEY, name text); +CREATE TABLE hyper_ts ("time_broken" timestamptz NOT NULL, "device_id" text, tag_id INT REFERENCES tag(id), "value" integer); + +ALTER TABLE hyper_ts +DROP COLUMN time_broken, +ADD COLUMN time TIMESTAMPTZ; + +SELECT create_hypertable('hyper_ts', 'time', 'device_id', 2, chunk_time_interval => '10 seconds'::interval); + +INSERT INTO tag(name) SELECT 'tag'||g FROM generate_series(0,10) g; +INSERT INTO hyper_ts (time, device_id, tag_id, value) SELECT to_timestamp(g), 'dev' || g, (random() /10)+1, g FROM generate_series(0,30) g; + +--one in the future +INSERT INTO hyper_ts (time, device_id, tag_id, value) VALUES ('2100-01-01 02:03:04 PST', 'dev101', 1, 0); + + + +SET client_min_messages = 'error'; +ANALYZE; +RESET client_min_messages; diff --git a/test/sql/include/plan_expand_hypertable_query.sql b/test/sql/include/plan_expand_hypertable_query.sql new file mode 100644 index 00000000000..839015daec7 --- /dev/null +++ b/test/sql/include/plan_expand_hypertable_query.sql @@ -0,0 +1,88 @@ +--we want to see how our logic excludes chunks +--and not how much work constraint_exclusion does +SET constraint_exclusion = 'off'; + +--test upper bounds +:PREFIX SELECT * FROM hyper WHERE time < 10 ORDER BY value; +:PREFIX SELECT * FROM hyper WHERE time < 11 ORDER BY value; +:PREFIX SELECT * FROM hyper WHERE time = 10 ORDER BY value; +:PREFIX SELECT * FROM hyper WHERE 10 >= time ORDER BY value; + +--test lower bounds +:PREFIX SELECT * FROM hyper WHERE time >= 10 and time < 20 ORDER BY value; +:PREFIX SELECT * FROM hyper WHERE 10 < time and 20 >= time ORDER BY value; +:PREFIX SELECT * FROM hyper WHERE time >= 9 and time < 20 ORDER BY value; +:PREFIX SELECT * FROM hyper WHERE time > 9 and time < 20 ORDER BY value; + +--test empty result +:PREFIX SELECT * FROM hyper WHERE time < 0; + +--test expression evaluation +:PREFIX SELECT * FROM hyper WHERE time < (5*2)::smallint; + +--test logic at INT64_MAX +:PREFIX SELECT * FROM hyper WHERE time = 9223372036854775807::bigint ORDER BY value; +:PREFIX SELECT * FROM hyper WHERE time = 9223372036854775806::bigint ORDER BY value; +:PREFIX SELECT * FROM hyper WHERE time >= 9223372036854775807::bigint ORDER BY value; +:PREFIX SELECT * FROM hyper WHERE time > 9223372036854775807::bigint ORDER BY value; +:PREFIX SELECT * FROM hyper WHERE time > 9223372036854775806::bigint ORDER BY value; + +--cte +:PREFIX WITH cte AS( + SELECT * FROM hyper WHERE time < 10 +) +SELECT * FROM cte ORDER BY value; + +--subquery +:PREFIX SELECT 0 = ANY (SELECT value FROM hyper WHERE time < 10); + +--no space constraint +:PREFIX SELECT * FROM hyper_w_space WHERE time < 10 ORDER BY value; + +--valid space constraint +:PREFIX SELECT * FROM hyper_w_space WHERE time < 10 and device_id = 'dev5' ORDER BY value; +:PREFIX SELECT * FROM hyper_w_space WHERE time < 10 and 'dev5' = device_id ORDER BY value; +:PREFIX SELECT * FROM hyper_w_space WHERE time < 10 and 'dev'||(2+3) = device_id ORDER BY value; + +--only space constraint +:PREFIX SELECT * FROM hyper_w_space WHERE 'dev5' = device_id ORDER BY value; + +--unhandled space constraint +:PREFIX SELECT * FROM hyper_w_space WHERE time < 10 and device_id > 'dev5' ORDER BY value; + +--use of OR +:PREFIX SELECT * FROM hyper_w_space WHERE time < 10 AND (device_id = 'dev5' or device_id = 'dev6') ORDER BY value; + +--cte +:PREFIX WITH cte AS( + SELECT * FROM hyper_w_space WHERE time < 10 and device_id = 'dev5' +) +SELECT * FROM cte ORDER BY value; + +--subquery +:PREFIX SELECT 0 = ANY (SELECT value FROM hyper_w_space WHERE time < 10 and device_id = 'dev5'); + +--view +:PREFIX SELECT * FROM hyper_w_space_view WHERE time < 10 and device_id = 'dev5' ORDER BY value; + + +--timestamps +--these should work since they are immutable functions +:PREFIX SELECT * FROM hyper_ts WHERE time < 'Wed Dec 31 16:00:10 1969 PST'::timestamptz ORDER BY value; +:PREFIX SELECT * FROM hyper_ts WHERE time < to_timestamp(10) ORDER BY value; +:PREFIX SELECT * FROM hyper_ts WHERE time < 'Wed Dec 31 16:00:10 1969'::timestamp AT TIME ZONE 'PST' ORDER BY value; +:PREFIX SELECT * FROM hyper_ts WHERE time < to_timestamp(10) and device_id = 'dev1' ORDER BY value; + +--these should not work since uses stable functions; +:PREFIX SELECT * FROM hyper_ts WHERE time < 'Wed Dec 31 16:00:10 1969'::timestamp ORDER BY value; +:PREFIX SELECT * FROM hyper_ts WHERE time < ('Wed Dec 31 16:00:10 1969'::timestamp::timestamptz) ORDER BY value; +:PREFIX SELECT * FROM hyper_ts WHERE NOW() < time ORDER BY value; + +--joins +:PREFIX SELECT * FROM hyper_ts WHERE tag_id IN (SELECT id FROM tag WHERE tag.id=1) and time < to_timestamp(10) and device_id = 'dev1' ORDER BY value; +:PREFIX SELECT * FROM hyper_ts WHERE tag_id IN (SELECT id FROM tag WHERE tag.id=1) or (time < to_timestamp(10) and device_id = 'dev1') ORDER BY value; +:PREFIX SELECT * FROM hyper_ts WHERE tag_id IN (SELECT id FROM tag WHERE tag.name='tag1') and time < to_timestamp(10) and device_id = 'dev1' ORDER BY value; +:PREFIX SELECT * FROM hyper_ts JOIN tag on (hyper_ts.tag_id = tag.id ) WHERE time < to_timestamp(10) and device_id = 'dev1' ORDER BY value; +:PREFIX SELECT * FROM hyper_ts JOIN tag on (hyper_ts.tag_id = tag.id ) WHERE tag.name = 'tag1' and time < to_timestamp(10) and device_id = 'dev1' ORDER BY value; + + diff --git a/test/sql/plan_expand_hypertable_optimized.sql b/test/sql/plan_expand_hypertable_optimized.sql new file mode 100644 index 00000000000..60d98f8c4a4 --- /dev/null +++ b/test/sql/plan_expand_hypertable_optimized.sql @@ -0,0 +1,4 @@ +SET timescaledb.disable_optimizations= 'off'; +\set PREFIX 'EXPLAIN (costs off) ' +\ir include/plan_expand_hypertable_load.sql +\ir include/plan_expand_hypertable_query.sql diff --git a/test/sql/plan_expand_hypertable_results_diff.sql b/test/sql/plan_expand_hypertable_results_diff.sql new file mode 100644 index 00000000000..931a7452ea0 --- /dev/null +++ b/test/sql/plan_expand_hypertable_results_diff.sql @@ -0,0 +1,35 @@ +\set ECHO errors +\set TEST_BASE_NAME plan_expand_hypertable +SELECT format('include/%s_load.sql', :'TEST_BASE_NAME') as "TEST_LOAD_NAME", + format('include/%s_query.sql', :'TEST_BASE_NAME') as "TEST_QUERY_NAME", + format('%s/results/%s_results_optimized.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') as "TEST_RESULTS_OPTIMIZED", + format('%s/results/%s_results_unoptimized.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') as "TEST_RESULTS_UNOPTIMIZED" +\gset +SELECT format('\! diff %s %s', :'TEST_RESULTS_OPTIMIZED', :'TEST_RESULTS_UNOPTIMIZED') as "DIFF_CMD" +\gset + + +\o /dev/null +SET client_min_messages = 'error'; +\ir :TEST_LOAD_NAME +RESET client_min_messages; +\o + +--generate the results into two different files +SET client_min_messages = 'fatal'; +\set ECHO none +--make output contain query results +\set PREFIX '' +\o :TEST_RESULTS_OPTIMIZED +SET timescaledb.disable_optimizations= 'off'; +\ir :TEST_QUERY_NAME +\o +\o :TEST_RESULTS_UNOPTIMIZED +SET timescaledb.disable_optimizations= 'on'; +\ir :TEST_QUERY_NAME +\o +RESET client_min_messages; + +:DIFF_CMD + +SELECT 'Done'; \ No newline at end of file