diff --git a/src/bgw/job.c b/src/bgw/job.c index 03c8292ffda..b720fd72ce3 100644 --- a/src/bgw/job.c +++ b/src/bgw/job.c @@ -4,51 +4,52 @@ * LICENSE-APACHE for a copy of the license. */ #include -#include -#include + #include #include +#include +#include +#include #include #include #include +#include #include #include -#include -#include -#include -#include -#include #include #include #include #include +#include +#include #include +#include #include -#include -#include #include +#include #include -#include +#include +#include -#include "job.h" +#include "bgw_policy/chunk_stats.h" +#include "bgw_policy/policy.h" +#include "bgw/scheduler.h" +#include "compat/compat.h" #include "config.h" -#include "scanner.h" +#include "cross_module_fn.h" +#include "debug_assert.h" #include "extension.h" -#include "compat/compat.h" #include "job_stat.h" +#include "job.h" +#include "jsonb_utils.h" #include "license_guc.h" +#include "scan_iterator.h" +#include "scanner.h" #include "utils.h" + #ifdef USE_TELEMETRY #include "telemetry/telemetry.h" #endif -#include "bgw_policy/chunk_stats.h" -#include "bgw_policy/policy.h" -#include "scan_iterator.h" -#include "bgw/scheduler.h" - -#include -#include "jsonb_utils.h" -#include "debug_assert.h" static scheduler_test_hook_type scheduler_test_hook = NULL; static char *job_entrypoint_function_name = "ts_bgw_job_entrypoint"; @@ -504,33 +505,6 @@ ts_bgw_job_find_by_proc_and_hypertable_id(const char *proc_name, const char *pro return list_data.list; } -List * -ts_bgw_job_find_by_proc(const char *proc_name, const char *proc_schema) -{ - Catalog *catalog = ts_catalog_get(); - ScanKeyData scankey[2]; - AccumData list_data = { - .list = NIL, - .alloc_size = sizeof(BgwJob), - }; - ScannerCtx scanctx = { - .table = catalog_get_table_id(catalog, BGW_JOB), - .index = catalog_get_index(ts_catalog_get(), BGW_JOB, BGW_JOB_PROC_HYPERTABLE_ID_IDX), - .data = &list_data, - .scankey = scankey, - .nkeys = sizeof(scankey) / sizeof(*scankey), - .tuple_found = bgw_job_accum_tuple_found, - .lockmode = AccessShareLock, - .scandirection = ForwardScanDirection, - }; - - init_scan_by_proc_schema(&scankey[0], proc_schema); - init_scan_by_proc_name(&scankey[1], proc_name); - - ts_scanner_scan(&scanctx); - return list_data.list; -} - List * ts_bgw_job_find_by_hypertable_id(int32 hypertable_id) { diff --git a/src/bgw/job.h b/src/bgw/job.h index 39ee9bf5fc7..1423039be3c 100644 --- a/src/bgw/job.h +++ b/src/bgw/job.h @@ -35,7 +35,6 @@ extern BackgroundWorkerHandle *ts_bgw_job_start(BgwJob *job, Oid user_oid); extern List *ts_bgw_job_get_all(size_t alloc_size, MemoryContext mctx); extern List *ts_bgw_job_get_scheduled(size_t alloc_size, MemoryContext mctx); -extern TSDLLEXPORT List *ts_bgw_job_find_by_proc(const char *proc_name, const char *proc_schema); extern TSDLLEXPORT List *ts_bgw_job_find_by_hypertable_id(int32 hypertable_id); extern TSDLLEXPORT List *ts_bgw_job_find_by_proc_and_hypertable_id(const char *proc_name, const char *proc_schema, diff --git a/src/chunk.c b/src/chunk.c index 4ab12b0c209..7dac0b1e0ac 100644 --- a/src/chunk.c +++ b/src/chunk.c @@ -5,8 +5,8 @@ */ #include -#include #include +#include #include #include #include @@ -47,17 +47,17 @@ #include "chunk_scan.h" #include "compat/compat.h" #include "cross_module_fn.h" -#include "debug_point.h" #include "debug_assert.h" -#include "dimension.h" +#include "debug_point.h" #include "dimension_slice.h" #include "dimension_vector.h" +#include "dimension.h" #include "errors.h" #include "export.h" #include "extension.h" #include "hypercube.h" -#include "hypertable.h" #include "hypertable_cache.h" +#include "hypertable.h" #include "osm_callbacks.h" #include "partitioning.h" #include "process_utility.h" @@ -2533,36 +2533,6 @@ ts_chunk_get_by_id(int32 id, bool fail_if_not_found) displaykey); } -/* - * Number of chunks created after given chunk. - * If chunk2.id > chunk1.id then chunk2 is created after chunk1 - */ -int -ts_chunk_num_of_chunks_created_after(const Chunk *chunk) -{ - ScanKeyData scankey[1]; - - /* - * Try to find chunks with a greater Id then a given chunk - */ - ScanKeyInit(&scankey[0], - Anum_chunk_idx_id, - BTGreaterStrategyNumber, - F_INT4GT, - Int32GetDatum(chunk->fd.id)); - - return chunk_scan_internal(CHUNK_ID_INDEX, - scankey, - 1, - NULL, - NULL, - NULL, - 0, - ForwardScanDirection, - AccessShareLock, - CurrentMemoryContext); -} - /* * Simple scans provide lightweight ways to access chunk information without the * overhead of getting a full chunk (i.e., no extra metadata, like constraints, @@ -2710,19 +2680,6 @@ ts_chunk_get_hypertable_id_by_reloid(Oid reloid) return 0; } -/* - * Returns the compressed chunk id. The original chunk must exist. - */ -int32 -ts_chunk_get_compressed_chunk_id(int32 chunk_id) -{ - FormData_chunk form; - PG_USED_FOR_ASSERTS_ONLY bool result = - chunk_simple_scan_by_id(chunk_id, &form, /* missing_ok = */ false); - Assert(result); - return form.compressed_chunk_id; -} - FormData_chunk ts_chunk_get_formdata(int32 chunk_id) { diff --git a/src/chunk.h b/src/chunk.h index a50e422e3dc..441cd314c32 100644 --- a/src/chunk.h +++ b/src/chunk.h @@ -177,14 +177,12 @@ extern TSDLLEXPORT Chunk *ts_chunk_get_by_relid(Oid relid, bool fail_if_not_foun extern TSDLLEXPORT void ts_chunk_free(Chunk *chunk); extern bool ts_chunk_exists(const char *schema_name, const char *table_name); extern TSDLLEXPORT int32 ts_chunk_get_hypertable_id_by_reloid(Oid reloid); -extern TSDLLEXPORT int32 ts_chunk_get_compressed_chunk_id(int32 chunk_id); extern TSDLLEXPORT FormData_chunk ts_chunk_get_formdata(int32 chunk_id); extern TSDLLEXPORT Oid ts_chunk_get_relid(int32 chunk_id, bool missing_ok); extern Oid ts_chunk_get_schema_id(int32 chunk_id, bool missing_ok); extern bool ts_chunk_get_id(const char *schema, const char *table, int32 *chunk_id, bool missing_ok); extern bool ts_chunk_exists_relid(Oid relid); -extern TSDLLEXPORT int ts_chunk_num_of_chunks_created_after(const Chunk *chunk); extern TSDLLEXPORT bool ts_chunk_exists_with_compression(int32 hypertable_id); extern void ts_chunk_recreate_all_constraints_for_dimension(Hypertable *ht, int32 dimension_id); extern int ts_chunk_delete_by_hypertable_id(int32 hypertable_id); diff --git a/src/dimension_vector.c b/src/dimension_vector.c index 272195bada1..10c2f146d2a 100644 --- a/src/dimension_vector.c +++ b/src/dimension_vector.c @@ -14,18 +14,6 @@ cmp_slices(const void *left, const void *right) return ts_dimension_slice_cmp(left_slice, right_slice); } -/* - * identical to cmp_slices except for reversed arguments to ts_dimension_slice_cmp - */ -static int -cmp_slices_reverse(const void *left, const void *right) -{ - const DimensionSlice *left_slice = *((DimensionSlice **) left); - const DimensionSlice *right_slice = *((DimensionSlice **) right); - - return -ts_dimension_slice_cmp(left_slice, right_slice); -} - static int cmp_coordinate_and_slice(const void *left, const void *right) { @@ -73,17 +61,6 @@ ts_dimension_vec_sort(DimensionVec **vecptr) return vec; } -DimensionVec * -ts_dimension_vec_sort_reverse(DimensionVec **vecptr) -{ - DimensionVec *vec = *vecptr; - - if (vec->num_slices > 1) - qsort(vec->slices, vec->num_slices, sizeof(DimensionSlice *), cmp_slices_reverse); - - return vec; -} - DimensionVec * ts_dimension_vec_add_slice(DimensionVec **vecptr, DimensionSlice *slice) { diff --git a/src/dimension_vector.h b/src/dimension_vector.h index c4bacc72ae6..3ca0a065973 100644 --- a/src/dimension_vector.h +++ b/src/dimension_vector.h @@ -28,7 +28,6 @@ typedef struct DimensionVec extern DimensionVec *ts_dimension_vec_create(int32 initial_num_slices); extern DimensionVec *ts_dimension_vec_sort(DimensionVec **vec); -extern DimensionVec *ts_dimension_vec_sort_reverse(DimensionVec **vec); extern DimensionVec *ts_dimension_vec_add_slice_sort(DimensionVec **vec, DimensionSlice *slice); extern DimensionVec *ts_dimension_vec_add_slice(DimensionVec **vecptr, DimensionSlice *slice); extern DimensionVec *ts_dimension_vec_add_unique_slice(DimensionVec **vecptr, diff --git a/src/hypertable.c b/src/hypertable.c index 3cab2247240..65f12bba3fe 100644 --- a/src/hypertable.c +++ b/src/hypertable.c @@ -4,6 +4,7 @@ * LICENSE-APACHE for a copy of the license. */ #include + #include #include #include @@ -28,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -36,36 +38,36 @@ #include #include #include -#include #include "hypertable.h" -#include "ts_catalog/compression_settings.h" -#include "ts_catalog/catalog.h" -#include "ts_catalog/metadata.h" -#include "hypercube.h" -#include "dimension.h" -#include "chunk.h" + +#include "bgw_policy/policy.h" #include "chunk_adaptive.h" -#include "subspace_store.h" -#include "hypertable_cache.h" -#include "trigger.h" -#include "scanner.h" +#include "chunk.h" +#include "compat/compat.h" +#include "copy.h" +#include "cross_module_fn.h" +#include "debug_assert.h" #include "dimension_slice.h" #include "dimension_vector.h" -#include "indexing.h" -#include "guc.h" +#include "dimension.h" +#include "error_utils.h" #include "errors.h" -#include "copy.h" -#include "utils.h" -#include "bgw_policy/policy.h" -#include "ts_catalog/continuous_agg.h" +#include "guc.h" +#include "hypercube.h" +#include "hypertable_cache.h" +#include "indexing.h" #include "license_guc.h" -#include "cross_module_fn.h" -#include "scan_iterator.h" -#include "debug_assert.h" #include "osm_callbacks.h" -#include "error_utils.h" -#include "compat/compat.h" +#include "scan_iterator.h" +#include "scanner.h" +#include "subspace_store.h" +#include "trigger.h" +#include "ts_catalog/catalog.h" +#include "ts_catalog/compression_settings.h" +#include "ts_catalog/continuous_agg.h" +#include "ts_catalog/metadata.h" +#include "utils.h" Oid ts_rel_get_owner(Oid relid) @@ -929,29 +931,6 @@ ts_hypertable_get_by_name(const char *schema, const char *name) return ht; } -void -ts_hypertable_scan_by_name(ScanIterator *iterator, const char *schema, const char *name) -{ - iterator->ctx.index = catalog_get_index(ts_catalog_get(), HYPERTABLE, HYPERTABLE_NAME_INDEX); - - /* both cannot be NULL inputs */ - Assert(name != NULL || schema != NULL); - - if (name) - ts_scan_iterator_scan_key_init(iterator, - Anum_hypertable_name_idx_table, - BTEqualStrategyNumber, - F_NAMEEQ, - CStringGetDatum(name)); - - if (schema) - ts_scan_iterator_scan_key_init(iterator, - Anum_hypertable_name_idx_schema, - BTEqualStrategyNumber, - F_NAMEEQ, - CStringGetDatum(schema)); -} - Hypertable * ts_hypertable_get_by_id(int32 hypertable_id) { @@ -1443,43 +1422,6 @@ insert_blocker_trigger_add(Oid relid) return objaddr.objectId; } -TS_FUNCTION_INFO_V1(ts_hypertable_insert_blocker_trigger_add); - -/* - * This function is exposed to drop the old blocking trigger on legacy hypertables. - * We can't do it from SQL code, because internal triggers cannot be dropped from SQL. - * After the legacy internal trigger is dropped, we add the new, visible trigger. - * - * In case the hypertable's root table has data in it, we bail out with an - * error instructing the user to fix the issue first. - */ -Datum -ts_hypertable_insert_blocker_trigger_add(PG_FUNCTION_ARGS) -{ - Oid relid = PG_GETARG_OID(0); - - ts_hypertable_permissions_check(relid, GetUserId()); - - if (ts_table_has_tuples(relid, AccessShareLock)) - ereport(ERROR, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("hypertable \"%s\" has data in the root table", get_rel_name(relid)), - errdetail("Migrate the data from the root table to chunks before running the " - "UPDATE again."), - errhint("Data can be migrated as follows:\n" - "> BEGIN;\n" - "> SET timescaledb.restoring = 'off';\n" - "> INSERT INTO \"%1$s\" SELECT * FROM ONLY \"%1$s\";\n" - "> SET timescaledb.restoring = 'on';\n" - "> TRUNCATE ONLY \"%1$s\";\n" - "> SET timescaledb.restoring = 'off';\n" - "> COMMIT;", - get_rel_name(relid)))); - - /* Add the new trigger */ - PG_RETURN_OID(insert_blocker_trigger_add(relid)); -} - static Datum create_hypertable_datum(FunctionCallInfo fcinfo, const Hypertable *ht, bool created, bool is_generic) diff --git a/src/hypertable.h b/src/hypertable.h index e213f44087b..b4117321d8a 100644 --- a/src/hypertable.h +++ b/src/hypertable.h @@ -151,8 +151,6 @@ extern TSDLLEXPORT int64 ts_hypertable_get_open_dim_max_value(const Hypertable * extern TSDLLEXPORT bool ts_hypertable_has_compression_table(const Hypertable *ht); extern TSDLLEXPORT void ts_hypertable_formdata_fill(FormData_hypertable *fd, const TupleInfo *ti); -extern TSDLLEXPORT void ts_hypertable_scan_by_name(ScanIterator *iterator, const char *schema, - const char *name); #define hypertable_scan(schema, table, tuple_found, data, lockmode) \ ts_hypertable_scan_with_memory_context(schema, \ diff --git a/src/jsonb_utils.c b/src/jsonb_utils.c index 1c05576c445..f9a5f37d444 100644 --- a/src/jsonb_utils.c +++ b/src/jsonb_utils.c @@ -4,18 +4,15 @@ * LICENSE-APACHE for a copy of the license. */ #include -#include +#include +#include #include #include #include #include "compat/compat.h" - -#include - #include "export.h" - #include "jsonb_utils.h" static void ts_jsonb_add_pair(JsonbParseState *state, JsonbValue *key, JsonbValue *value); @@ -129,14 +126,6 @@ ts_jsonb_add_interval(JsonbParseState *state, const char *key, Interval *interva ts_jsonb_add_value(state, key, &json_value); } -void -ts_jsonb_add_numeric(JsonbParseState *state, const char *key, const Numeric value) -{ - JsonbValue json_value = { .type = jbvNumeric, .val.numeric = value }; - - ts_jsonb_add_value(state, key, &json_value); -} - void ts_jsonb_add_value(JsonbParseState *state, const char *key, JsonbValue *value) { @@ -188,27 +177,6 @@ ts_jsonb_get_str_field(const Jsonb *jsonb, const char *key) return text_to_cstring(DatumGetTextP(result)); } -TimestampTz -ts_jsonb_get_time_field(const Jsonb *jsonb, const char *key, bool *field_found) -{ - Datum time_datum; - char *time_str = ts_jsonb_get_str_field(jsonb, key); - - if (time_str == NULL) - { - *field_found = false; - return DT_NOBEGIN; - } - - time_datum = DirectFunctionCall3(timestamptz_in, - /* str= */ CStringGetDatum(time_str), - /* unused */ Int32GetDatum(-1), - /* typmod= */ Int32GetDatum(-1)); - - *field_found = true; - return DatumGetTimestampTz(time_datum); -} - bool ts_jsonb_get_bool_field(const Jsonb *json, const char *key, bool *field_found) { diff --git a/src/jsonb_utils.h b/src/jsonb_utils.h index 98ecc6f7347..ad5d0689bea 100644 --- a/src/jsonb_utils.h +++ b/src/jsonb_utils.h @@ -21,16 +21,12 @@ extern TSDLLEXPORT void ts_jsonb_add_int32(JsonbParseState *state, const char *k const int32 value); extern TSDLLEXPORT void ts_jsonb_add_int64(JsonbParseState *state, const char *key, const int64 value); -extern TSDLLEXPORT void ts_jsonb_add_numeric(JsonbParseState *state, const char *key, - const Numeric value); extern TSDLLEXPORT void ts_jsonb_set_value_by_type(JsonbValue *value, Oid typeid, Datum datum); extern void ts_jsonb_add_value(JsonbParseState *state, const char *key, JsonbValue *value); extern TSDLLEXPORT char *ts_jsonb_get_str_field(const Jsonb *jsonb, const char *key); extern TSDLLEXPORT Interval *ts_jsonb_get_interval_field(const Jsonb *jsonb, const char *key); -extern TSDLLEXPORT TimestampTz ts_jsonb_get_time_field(const Jsonb *jsonb, const char *key, - bool *field_found); extern TSDLLEXPORT bool ts_jsonb_get_bool_field(const Jsonb *json, const char *key, bool *field_found); extern TSDLLEXPORT int32 ts_jsonb_get_int32_field(const Jsonb *json, const char *key, diff --git a/src/ts_catalog/array_utils.c b/src/ts_catalog/array_utils.c index ec78d899b73..dea2c96a140 100644 --- a/src/ts_catalog/array_utils.c +++ b/src/ts_catalog/array_utils.c @@ -4,6 +4,7 @@ * LICENSE-APACHE for a copy of the license. */ #include + #include #include #include @@ -11,9 +12,9 @@ #include #include -#include -#include #include "array_utils.h" +#include "compat/compat.h" +#include "debug_assert.h" /* * Array helper function for internal catalog arrays. @@ -283,39 +284,3 @@ ts_array_add_element_bool(ArrayType *arr, bool value) return DatumGetArrayTypeP(d); } } - -extern TSDLLEXPORT ArrayType * -ts_array_create_from_list_text(List *values) -{ - if (!values) - return NULL; - - List *datums = NIL; - ListCell *lc; - foreach (lc, values) - { - datums = lappend(datums, (void *) CStringGetTextDatum(lfirst(lc))); - } - - Assert(datums); - return construct_array((Datum *) datums->elements, - datums->length, - TEXTOID, - -1, - false, - TYPALIGN_INT); -} - -extern TSDLLEXPORT ArrayType * -ts_array_create_from_list_bool(List *values) -{ - if (!values) - return NULL; - - return construct_array((Datum *) values->elements, - values->length, - BOOLOID, - 1, - true, - TYPALIGN_CHAR); -} diff --git a/src/ts_catalog/array_utils.h b/src/ts_catalog/array_utils.h index 2366057a516..799a81f509f 100644 --- a/src/ts_catalog/array_utils.h +++ b/src/ts_catalog/array_utils.h @@ -6,6 +6,8 @@ #pragma once #include + +#include #include #include "export.h" @@ -31,6 +33,3 @@ extern TSDLLEXPORT ArrayType *ts_array_add_element_text(ArrayType *arr, const ch extern TSDLLEXPORT ArrayType *ts_array_replace_text(ArrayType *arr, const char *old, const char *new); - -extern TSDLLEXPORT ArrayType *ts_array_create_from_list_bool(List *values); -extern TSDLLEXPORT ArrayType *ts_array_create_from_list_text(List *values); diff --git a/src/ts_catalog/compression_chunk_size.c b/src/ts_catalog/compression_chunk_size.c index 67536debb7d..7aeca116e0e 100644 --- a/src/ts_catalog/compression_chunk_size.c +++ b/src/ts_catalog/compression_chunk_size.c @@ -5,10 +5,10 @@ */ #include -#include "ts_catalog/compression_chunk_size.h" -#include "ts_catalog/catalog.h" -#include "scanner.h" #include "scan_iterator.h" +#include "scanner.h" +#include "ts_catalog/catalog.h" +#include "ts_catalog/compression_chunk_size.h" static void init_scan_by_uncompressed_chunk_id(ScanIterator *iterator, int32 uncompressed_chunk_id) @@ -37,95 +37,3 @@ ts_compression_chunk_size_delete(int32 uncompressed_chunk_id) } return count; } - -TotalSizes -ts_compression_chunk_size_totals() -{ - TotalSizes sizes = { 0 }; - ScanIterator iterator = - ts_scan_iterator_create(COMPRESSION_CHUNK_SIZE, AccessExclusiveLock, CurrentMemoryContext); - - ts_scanner_foreach(&iterator) - { - bool nulls[Natts_compression_chunk_size]; - Datum values[Natts_compression_chunk_size]; - FormData_compression_chunk_size fd; - bool should_free; - HeapTuple tuple = ts_scan_iterator_fetch_heap_tuple(&iterator, false, &should_free); - - heap_deform_tuple(tuple, ts_scan_iterator_tupledesc(&iterator), values, nulls); - memset(&fd, 0, sizeof(FormData_compression_chunk_size)); - - Assert(!nulls[AttrNumberGetAttrOffset(Anum_compression_chunk_size_uncompressed_heap_size)]); - Assert( - !nulls[AttrNumberGetAttrOffset(Anum_compression_chunk_size_uncompressed_index_size)]); - Assert( - !nulls[AttrNumberGetAttrOffset(Anum_compression_chunk_size_uncompressed_toast_size)]); - Assert(!nulls[AttrNumberGetAttrOffset(Anum_compression_chunk_size_compressed_heap_size)]); - Assert(!nulls[AttrNumberGetAttrOffset(Anum_compression_chunk_size_compressed_index_size)]); - Assert(!nulls[AttrNumberGetAttrOffset(Anum_compression_chunk_size_compressed_toast_size)]); - fd.uncompressed_heap_size = DatumGetInt64( - values[AttrNumberGetAttrOffset(Anum_compression_chunk_size_uncompressed_heap_size)]); - fd.uncompressed_index_size = DatumGetInt64( - values[AttrNumberGetAttrOffset(Anum_compression_chunk_size_uncompressed_index_size)]); - fd.uncompressed_toast_size = DatumGetInt64( - values[AttrNumberGetAttrOffset(Anum_compression_chunk_size_uncompressed_toast_size)]); - fd.compressed_heap_size = DatumGetInt64( - values[AttrNumberGetAttrOffset(Anum_compression_chunk_size_compressed_heap_size)]); - fd.compressed_index_size = DatumGetInt64( - values[AttrNumberGetAttrOffset(Anum_compression_chunk_size_compressed_index_size)]); - fd.compressed_toast_size = DatumGetInt64( - values[AttrNumberGetAttrOffset(Anum_compression_chunk_size_compressed_toast_size)]); - - sizes.uncompressed_heap_size += fd.uncompressed_heap_size; - sizes.uncompressed_index_size += fd.uncompressed_index_size; - sizes.uncompressed_toast_size += fd.uncompressed_toast_size; - sizes.compressed_heap_size += fd.compressed_heap_size; - sizes.compressed_index_size += fd.compressed_index_size; - sizes.compressed_toast_size += fd.compressed_toast_size; - - if (should_free) - heap_freetuple(tuple); - } - - return sizes; -} - -/* Return the pre-compression row count for the chunk */ -int64 -ts_compression_chunk_size_row_count(int32 uncompressed_chunk_id) -{ - int found_cnt = 0; - int64 rowcnt = 0; - ScanIterator iterator = - ts_scan_iterator_create(COMPRESSION_CHUNK_SIZE, AccessShareLock, CurrentMemoryContext); - init_scan_by_uncompressed_chunk_id(&iterator, uncompressed_chunk_id); - ts_scanner_foreach(&iterator) - { - bool nulls[Natts_compression_chunk_size]; - Datum values[Natts_compression_chunk_size]; - bool should_free; - HeapTuple tuple = ts_scan_iterator_fetch_heap_tuple(&iterator, false, &should_free); - - heap_deform_tuple(tuple, ts_scan_iterator_tupledesc(&iterator), values, nulls); - if (!nulls[AttrNumberGetAttrOffset(Anum_compression_chunk_size_numrows_pre_compression)]) - rowcnt = DatumGetInt64(values[AttrNumberGetAttrOffset( - Anum_compression_chunk_size_numrows_pre_compression)]); - if (should_free) - heap_freetuple(tuple); - found_cnt++; - } - if (found_cnt != 1) - { - /* We do not want to error here because this runs as part of VACUUM - * and we want that to successfully finish even if metadata for a - * chunk is incomplete here. - */ - rowcnt = 0; - elog(WARNING, - "no unique record for chunk with id %d in %s", - uncompressed_chunk_id, - COMPRESSION_CHUNK_SIZE_TABLE_NAME); - } - return rowcnt; -} diff --git a/src/ts_catalog/compression_chunk_size.h b/src/ts_catalog/compression_chunk_size.h index a88bdb54d13..ab097dd2969 100644 --- a/src/ts_catalog/compression_chunk_size.h +++ b/src/ts_catalog/compression_chunk_size.h @@ -9,16 +9,3 @@ #include extern TSDLLEXPORT int ts_compression_chunk_size_delete(int32 uncompressed_chunk_id); - -typedef struct TotalSizes -{ - int64 uncompressed_heap_size; - int64 uncompressed_toast_size; - int64 uncompressed_index_size; - int64 compressed_heap_size; - int64 compressed_toast_size; - int64 compressed_index_size; -} TotalSizes; - -extern TSDLLEXPORT TotalSizes ts_compression_chunk_size_totals(void); -extern TSDLLEXPORT int64 ts_compression_chunk_size_row_count(int32 uncompressed_chunk_id); diff --git a/tsl/src/chunk_api.c b/tsl/src/chunk_api.c index c291e739cc6..782d4b1b9fc 100644 --- a/tsl/src/chunk_api.c +++ b/tsl/src/chunk_api.c @@ -4,6 +4,7 @@ * LICENSE-TIMESCALE for a copy of the license. */ #include + #include #include #include @@ -28,17 +29,16 @@ #include #include -#include "compat/compat.h" -#include "chunk.h" #include "chunk_api.h" -#include "errors.h" +#include "chunk.h" +#include "compat/compat.h" #include "error_utils.h" +#include "errors.h" #include "hypercube.h" #include "hypertable_cache.h" -#include "utils.h" - #include "ts_catalog/array_utils.h" #include "ts_catalog/catalog.h" +#include "utils.h" /* * These values come from the pg_type table. @@ -406,43 +406,6 @@ enum Anum_chunk_relstats _Anum_chunk_relstats_max, }; -/* - * Construct a tuple for the get_chunk_relstats SQL function. - */ -static HeapTuple -chunk_get_single_stats_tuple(Chunk *chunk, TupleDesc tupdesc) -{ - HeapTuple ctup; - Form_pg_class pgcform; - Datum values[_Anum_chunk_relstats_max]; - bool nulls[_Anum_chunk_relstats_max] = { false }; - Datum reltuples; - - ctup = SearchSysCache1(RELOID, ObjectIdGetDatum(chunk->table_id)); - - if (!HeapTupleIsValid(ctup)) - elog(ERROR, - "pg_class entry for chunk \"%s.%s\" not found", - NameStr(chunk->fd.schema_name), - NameStr(chunk->fd.table_name)); - - pgcform = (Form_pg_class) GETSTRUCT(ctup); - - values[AttrNumberGetAttrOffset(Anum_chunk_relstats_chunk_id)] = Int32GetDatum(chunk->fd.id); - values[AttrNumberGetAttrOffset(Anum_chunk_relstats_hypertable_id)] = - Int32GetDatum(chunk->fd.hypertable_id); - values[AttrNumberGetAttrOffset(Anum_chunk_relstats_num_pages)] = - Int32GetDatum(pgcform->relpages); - reltuples = Float4GetDatum(pgcform->reltuples > 0 ? pgcform->reltuples : 0); - values[AttrNumberGetAttrOffset(Anum_chunk_relstats_num_tuples)] = reltuples; - values[AttrNumberGetAttrOffset(Anum_chunk_relstats_num_allvisible)] = - Int32GetDatum(pgcform->relallvisible); - - ReleaseSysCache(ctup); - - return heap_form_tuple(tupdesc, values, nulls); -} - enum Anum_chunk_colstats { Anum_chunk_colstats_chunk_id = 1, @@ -496,485 +459,6 @@ enum OpArrayTypeIdx #define LargSubarrayForOpArray(op_string_array) (&(op_string_array)[ENCODED_OP_LARG_NAME]) #define RargSubarrayForOpArray(op_string_array) (&(op_string_array)[ENCODED_OP_RARG_NAME]) -static void -convert_type_oid_to_strings(Oid type_id, Datum *result_strings) -{ - Form_pg_type type; - Form_pg_namespace namespace; - HeapTuple namespace_tuple; - HeapTuple type_tuple; - - type_tuple = SearchSysCache1(TYPEOID, type_id); - Assert(HeapTupleIsValid(type_tuple)); - type = (Form_pg_type) GETSTRUCT(type_tuple); - result_strings[ENCODED_TYPE_NAME] = PointerGetDatum(pstrdup(NameStr(type->typname))); - - namespace_tuple = SearchSysCache1(NAMESPACEOID, type->typnamespace); - Assert(HeapTupleIsValid(namespace_tuple)); - namespace = (Form_pg_namespace) GETSTRUCT(namespace_tuple); - result_strings[ENCODED_TYPE_NAMESPACE] = PointerGetDatum(pstrdup(NameStr(namespace->nspname))); - ReleaseSysCache(namespace_tuple); - ReleaseSysCache(type_tuple); -} - -static void -convert_op_oid_to_strings(Oid op_id, Datum *result_strings) -{ - Form_pg_operator operator; - Form_pg_namespace namespace; - HeapTuple operator_tuple; - HeapTuple namespace_tuple; - - operator_tuple = SearchSysCache1(OPEROID, op_id); - Assert(HeapTupleIsValid(operator_tuple)); - operator=(Form_pg_operator) GETSTRUCT(operator_tuple); - result_strings[ENCODED_OP_NAME] = PointerGetDatum(pstrdup(NameStr(operator->oprname))); - - namespace_tuple = SearchSysCache1(NAMESPACEOID, operator->oprnamespace); - Assert(HeapTupleIsValid(namespace_tuple)); - namespace = (Form_pg_namespace) GETSTRUCT(namespace_tuple); - result_strings[ENCODED_OP_NAMESPACE] = PointerGetDatum(pstrdup(NameStr(namespace->nspname))); - ReleaseSysCache(namespace_tuple); - - convert_type_oid_to_strings(operator->oprleft, LargSubarrayForOpArray(result_strings)); - convert_type_oid_to_strings(operator->oprright, RargSubarrayForOpArray(result_strings)); - - ReleaseSysCache(operator_tuple); -} - -static void -collect_colstat_slots(const HeapTuple tuple, const Form_pg_statistic formdata, Datum *values, - bool *nulls) -{ - /* Fetching the values and/or numbers fields for a slot requires knowledge of which fields are - * present for each kind of stats. Note that this doesn't support custom statistics. - */ - static const int statistic_kind_slot_fields[STATISTIC_KIND_BOUNDS_HISTOGRAM + 1] = { - 0, - ATTSTATSSLOT_VALUES | ATTSTATSSLOT_NUMBERS, /* MCV */ - ATTSTATSSLOT_VALUES, /* HISTOGRAM */ - ATTSTATSSLOT_NUMBERS, /* CORRELATION */ - ATTSTATSSLOT_VALUES | ATTSTATSSLOT_NUMBERS, /* MCELEM */ - ATTSTATSSLOT_NUMBERS, /* DECHIST */ - /* ATTSTATSSLOT_VALUES is not always present for all HISTOGRAM operators.. */ - ATTSTATSSLOT_NUMBERS, /* RANGE_LENGTH_HISTOGRAM */ - ATTSTATSSLOT_VALUES /* BOUNDS_HISTOGRAM */ - }; - - int i; - Datum slotkind[STATISTIC_NUM_SLOTS]; - Datum op_strings[STRINGS_PER_OP_OID * STATISTIC_NUM_SLOTS]; - Datum slot_collation[STATISTIC_NUM_SLOTS]; - Datum value_type_strings[STRINGS_PER_TYPE_OID * STATISTIC_NUM_SLOTS]; - ArrayType *array; - int nopstrings = 0; - int nvalstrings = 0; - - for (i = 0; i < STATISTIC_NUM_SLOTS; i++) - { - int16 kind = ((int16 *) (&formdata->stakind1))[i]; - Datum slot_op = ObjectIdGetDatum(((Oid *) &formdata->staop1)[i]); - AttStatsSlot stat_slot; - int slot_fields; - const int numbers_idx = AttrNumberGetAttrOffset(Anum_chunk_colstats_slot1_numbers) + i; - const int values_idx = AttrNumberGetAttrOffset(Anum_chunk_colstats_slot1_values) + i; - - slot_collation[i] = ObjectIdGetDatum(((Oid *) &formdata->stacoll1)[i]); - - slotkind[i] = ObjectIdGetDatum(kind); - - /* - * As per comments in pg_statistic_d.h, "kind" codes from 0 - 99 are reserved - * for assignment by the core PostgreSQL project. Beyond that are for PostGIS - * and other projects - */ -#define PG_STATS_KINDS_MAX 99 - if (!OidIsValid(kind) || kind > PG_STATS_KINDS_MAX) - { - nulls[numbers_idx] = true; - nulls[values_idx] = true; - continue; - } - - /* slot_op can be invalid for some "kinds" like STATISTIC_KIND_BOUNDS_HISTOGRAM */ - if (OidIsValid(slot_op)) - { - convert_op_oid_to_strings(slot_op, op_strings + nopstrings); - nopstrings += STRINGS_PER_OP_OID; - } - - if (kind > STATISTIC_KIND_BOUNDS_HISTOGRAM) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("unable to fetch user defined statistics from data nodes"))); - - slot_fields = statistic_kind_slot_fields[kind]; - - get_attstatsslot(&stat_slot, tuple, kind, InvalidOid, slot_fields); - - if (slot_fields & ATTSTATSSLOT_NUMBERS) - { - Datum *stanumbers = palloc(sizeof(Datum) * stat_slot.nnumbers); - int j; - - for (j = 0; j < stat_slot.nnumbers; j++) - stanumbers[j] = Float4GetDatum(stat_slot.numbers[j]); - - array = construct_array(stanumbers, - stat_slot.nnumbers, - FLOAT4OID, - FLOAT4_TYPELEN, - FLOAT4_TYPEBYVAL, - FLOAT4_TYPEALIGN); - values[numbers_idx] = PointerGetDatum(array); - } - else - nulls[numbers_idx] = true; - - if (slot_fields & ATTSTATSSLOT_VALUES) - { - Datum *encoded_value_ary = palloc0(sizeof(Datum) * stat_slot.nvalues); - HeapTuple type_tuple = SearchSysCache1(TYPEOID, stat_slot.valuetype); - Form_pg_type type; - int k; - - Assert(HeapTupleIsValid(type_tuple)); - type = (Form_pg_type) GETSTRUCT(type_tuple); - convert_type_oid_to_strings(stat_slot.valuetype, value_type_strings + nvalstrings); - nvalstrings += STRINGS_PER_TYPE_OID; - - for (k = 0; k < stat_slot.nvalues; ++k) - encoded_value_ary[k] = OidFunctionCall1(type->typoutput, stat_slot.values[k]); - - array = construct_array(encoded_value_ary, - stat_slot.nvalues, - CSTRINGOID, - CSTRING_TYPELEN, - CSTRING_TYPEBYVAL, - CSTRING_TYPEALIGN); - values[values_idx] = PointerGetDatum(array); - - ReleaseSysCache(type_tuple); - } - else - nulls[values_idx] = true; - - free_attstatsslot(&stat_slot); - } - - array = construct_array(slotkind, - STATISTIC_NUM_SLOTS, - INT4OID, - INT4_TYPELEN, - INT4_TYPEBYVAL, - INT4_TYPEALIGN); - values[AttrNumberGetAttrOffset(Anum_chunk_colstats_slot_kinds)] = PointerGetDatum(array); - array = construct_array(op_strings, - nopstrings, - CSTRINGOID, - CSTRING_TYPELEN, - CSTRING_TYPEBYVAL, - CSTRING_TYPEALIGN); - values[AttrNumberGetAttrOffset(Anum_chunk_colstats_slot_op_strings)] = PointerGetDatum(array); - array = construct_array(slot_collation, - STATISTIC_NUM_SLOTS, - OIDOID, - OID_TYPELEN, - OID_TYPEBYVAL, - OID_TYPEALIGN); - values[AttrNumberGetAttrOffset(Anum_chunk_colstats_slot_collations)] = PointerGetDatum(array); - array = construct_array(value_type_strings, - nvalstrings, - CSTRINGOID, - CSTRING_TYPELEN, - CSTRING_TYPEBYVAL, - CSTRING_TYPEALIGN); - values[AttrNumberGetAttrOffset(Anum_chunk_colstats_slot_valtype_strings)] = - PointerGetDatum(array); -} - -/* - * Construct a tuple for the get_chunk_relstats SQL function. - */ -static HeapTuple -chunk_get_single_colstats_tuple(Chunk *chunk, int column, TupleDesc tupdesc) -{ - HeapTuple ctup; - Form_pg_statistic pgsform; - Datum values[_Anum_chunk_colstats_max]; - bool nulls[_Anum_chunk_colstats_max] = { false }; - bool dropped; - - if (DatumGetBool(DirectFunctionCall1(row_security_active, ObjectIdGetDatum(chunk->table_id)))) - return NULL; - - ctup = SearchSysCache2(ATTNUM, ObjectIdGetDatum(chunk->table_id), column); - if (!HeapTupleIsValid(ctup)) - return NULL; - - dropped = ((Form_pg_attribute) GETSTRUCT(ctup))->attisdropped; - ReleaseSysCache(ctup); - - if (dropped) - return NULL; - - if (!DatumGetBool(DirectFunctionCall3(has_column_privilege_id_attnum, - ObjectIdGetDatum(chunk->table_id), - Int16GetDatum(column), - PointerGetDatum(cstring_to_text("SELECT"))))) - return NULL; - - ctup = SearchSysCache3(STATRELATTINH, ObjectIdGetDatum(chunk->table_id), column, false); - - /* pg_statistics will not have an entry for an unanalyzed table */ - if (!HeapTupleIsValid(ctup)) - return NULL; - - pgsform = (Form_pg_statistic) GETSTRUCT(ctup); - - values[AttrNumberGetAttrOffset(Anum_chunk_colstats_chunk_id)] = Int32GetDatum(chunk->fd.id); - values[AttrNumberGetAttrOffset(Anum_chunk_colstats_hypertable_id)] = - Int32GetDatum(chunk->fd.hypertable_id); - values[AttrNumberGetAttrOffset(Anum_chunk_colstats_column_id)] = Int32GetDatum(column); - values[AttrNumberGetAttrOffset(Anum_chunk_colstats_nullfrac)] = - Float4GetDatum(pgsform->stanullfrac); - values[AttrNumberGetAttrOffset(Anum_chunk_colstats_width)] = Int32GetDatum(pgsform->stawidth); - values[AttrNumberGetAttrOffset(Anum_chunk_colstats_distinct)] = - Float4GetDatum(pgsform->stadistinct); - - collect_colstat_slots(ctup, pgsform, values, nulls); - - ReleaseSysCache(ctup); - - return heap_form_tuple(tupdesc, values, nulls); -} - -/* - * StatsProcessContext filters out duplicate stats from replica chunks. - * - * When processing chunk stats from data nodes, we might receive the same - * stats from multiple data nodes when native replication is enabled. With the - * StatsProcessContext we can filter out the duplicates, and ensure we only - * add the stats once. Without the filtering, we will get errors (e.g., unique - * violations). - * - * We could elide the filtering if we requested stats only for the chunks that - * are "primary", but that requires the ability to specify the specific remote - * chunks to retrieve stats for rather than specifying "all chunks" for the - * given hypertable. - */ -typedef struct ChunkAttKey -{ - Oid chunk_relid; - Index attnum; -} ChunkAttKey; - -typedef struct StatsProcessContext -{ - HTAB *htab; - MemoryContext per_tuple_mcxt; -} StatsProcessContext; - -static void * -chunk_api_generate_relstats_context(List *oids) -{ - return list_copy(oids); -} - -static HeapTuple -chunk_api_fetch_next_relstats_tuple(FuncCallContext *funcctx) -{ - List *chunk_oids = (List *) funcctx->user_fctx; - Oid relid; - Chunk *chunk; - - if (chunk_oids == NIL) - return NULL; - - relid = linitial_oid(chunk_oids); - chunk = ts_chunk_get_by_relid(relid, true); - - return chunk_get_single_stats_tuple(chunk, funcctx->tuple_desc); -} - -static void -chunk_api_iterate_relstats_context(FuncCallContext *funcctx) -{ - List *chunk_oids = (List *) funcctx->user_fctx; - MemoryContext oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); - - chunk_oids = list_delete_first(chunk_oids); - funcctx->user_fctx = chunk_oids; - MemoryContextSwitchTo(oldcontext); -} - -typedef struct ColStatContext -{ - List *chunk_oids; - int col_id; - int nattrs; -} ColStatContext; - -static void * -chunk_api_generate_colstats_context(List *oids, Oid ht_relid) -{ - ColStatContext *ctx = palloc0(sizeof(ColStatContext)); - - ctx->chunk_oids = list_copy(oids); - ctx->col_id = 1; - ctx->nattrs = ts_get_relnatts(ht_relid); - - return ctx; -} - -static HeapTuple -chunk_api_fetch_next_colstats_tuple(FuncCallContext *funcctx) -{ - ColStatContext *ctx = funcctx->user_fctx; - HeapTuple tuple = NULL; - - while (tuple == NULL && ctx->chunk_oids != NIL) - { - Oid relid = linitial_oid(ctx->chunk_oids); - Chunk *chunk = ts_chunk_get_by_relid(relid, true); - - tuple = chunk_get_single_colstats_tuple(chunk, ctx->col_id, funcctx->tuple_desc); - - /* This loop looks a bit odd as col_id tracks the postgres column id, which is indexed - * starting at 1 */ - while (tuple == NULL && ctx->col_id < ctx->nattrs) - { - ++ctx->col_id; - tuple = chunk_get_single_colstats_tuple(chunk, ctx->col_id, funcctx->tuple_desc); - } - - if (tuple == NULL) - { - MemoryContext oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); - ctx->chunk_oids = list_delete_first(ctx->chunk_oids); - ctx->col_id = 1; - MemoryContextSwitchTo(oldcontext); - } - } - - return tuple; -} - -static void -chunk_api_iterate_colstats_context(FuncCallContext *funcctx) -{ - ColStatContext *ctx = funcctx->user_fctx; - MemoryContext oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); - - ++ctx->col_id; - if (AttrNumberGetAttrOffset(ctx->col_id) >= ctx->nattrs) - { - ctx->chunk_oids = list_delete_first(ctx->chunk_oids); - ctx->col_id = 1; - } - MemoryContextSwitchTo(oldcontext); -} - -#define GET_CHUNK_RELSTATS_NAME "get_chunk_relstats" -#define GET_CHUNK_COLSTATS_NAME "get_chunk_colstats" - -/* - * Get relation stats or column stats for chunks. - * - * This function takes a hypertable or chunk as input (regclass). In case of a - * hypertable, it will get the relstats for all the chunks in the hypertable, - * otherwise only the given chunk. - */ -static Datum -chunk_api_get_chunk_stats(FunctionCallInfo fcinfo, bool col_stats) -{ - FuncCallContext *funcctx; - HeapTuple tuple; - - if (SRF_IS_FIRSTCALL()) - { - Oid relid = PG_ARGISNULL(0) ? InvalidOid : PG_GETARG_OID(0); - MemoryContext oldcontext; - TupleDesc tupdesc; - Cache *hcache; - Hypertable *ht; - List *chunk_oids = NIL; - Oid ht_relid = InvalidOid; - - if (!OidIsValid(relid)) - ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("invalid table"))); - - ht = ts_hypertable_cache_get_cache_and_entry(relid, CACHE_FLAG_MISSING_OK, &hcache); - - if (NULL == ht) - { - Chunk *chunk = ts_chunk_get_by_relid(relid, false); - - if (NULL == chunk) - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("must be a hypertable or chunk"))); - - chunk_oids = list_make1_oid(chunk->table_id); - - /* We'll need the hypertable if fetching column stats */ - if (col_stats) - ht = ts_hypertable_get_by_id(chunk->fd.hypertable_id); - } - else - { - chunk_oids = find_inheritance_children(relid, NoLock); - } - - if (ht) - ht_relid = ht->main_table_relid; - ts_cache_release(hcache); - - funcctx = SRF_FIRSTCALL_INIT(); - oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); - - if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("function returning record called in context " - "that cannot accept type record"))); - - /* Save the chunk oid list on the multi-call memory context so that it - * survives across multiple calls to this function (until SRF is - * done). */ - funcctx->user_fctx = col_stats ? chunk_api_generate_colstats_context(chunk_oids, ht_relid) : - chunk_api_generate_relstats_context(chunk_oids); - funcctx->tuple_desc = BlessTupleDesc(tupdesc); - MemoryContextSwitchTo(oldcontext); - } - - funcctx = SRF_PERCALL_SETUP(); - tuple = col_stats ? chunk_api_fetch_next_colstats_tuple(funcctx) : - chunk_api_fetch_next_relstats_tuple(funcctx); - - if (tuple == NULL) - SRF_RETURN_DONE(funcctx); - - if (col_stats) - chunk_api_iterate_colstats_context(funcctx); - else - chunk_api_iterate_relstats_context(funcctx); - - SRF_RETURN_NEXT(funcctx, HeapTupleGetDatum(tuple)); -} - -Datum -chunk_api_get_chunk_colstats(PG_FUNCTION_ARGS) -{ - return chunk_api_get_chunk_stats(fcinfo, true); -} - -Datum -chunk_api_get_chunk_relstats(PG_FUNCTION_ARGS) -{ - return chunk_api_get_chunk_stats(fcinfo, false); -} - Datum chunk_create_empty_table(PG_FUNCTION_ARGS) { diff --git a/tsl/src/chunk_api.h b/tsl/src/chunk_api.h index f1a240317ca..5b46e09b97e 100644 --- a/tsl/src/chunk_api.h +++ b/tsl/src/chunk_api.h @@ -13,6 +13,4 @@ extern Datum chunk_status(PG_FUNCTION_ARGS); extern Datum chunk_show(PG_FUNCTION_ARGS); extern Datum chunk_create(PG_FUNCTION_ARGS); -extern Datum chunk_api_get_chunk_relstats(PG_FUNCTION_ARGS); -extern Datum chunk_api_get_chunk_colstats(PG_FUNCTION_ARGS); extern Datum chunk_create_empty_table(PG_FUNCTION_ARGS); diff --git a/tsl/src/nodes/CMakeLists.txt b/tsl/src/nodes/CMakeLists.txt index d3eecdba8ad..d58012a76fd 100644 --- a/tsl/src/nodes/CMakeLists.txt +++ b/tsl/src/nodes/CMakeLists.txt @@ -1,6 +1,5 @@ set(SOURCES) target_sources(${TSL_LIBRARY_NAME} PRIVATE ${SOURCES}) -add_subdirectory(compress_dml) add_subdirectory(decompress_chunk) add_subdirectory(frozen_chunk_dml) add_subdirectory(gapfill) diff --git a/tsl/src/nodes/compress_dml/CMakeLists.txt b/tsl/src/nodes/compress_dml/CMakeLists.txt deleted file mode 100644 index 16d04338a35..00000000000 --- a/tsl/src/nodes/compress_dml/CMakeLists.txt +++ /dev/null @@ -1,2 +0,0 @@ -set(SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/compress_dml.c) -target_sources(${TSL_LIBRARY_NAME} PRIVATE ${SOURCES}) diff --git a/tsl/src/nodes/compress_dml/compress_dml.c b/tsl/src/nodes/compress_dml/compress_dml.c deleted file mode 100644 index 0d65134d10d..00000000000 --- a/tsl/src/nodes/compress_dml/compress_dml.c +++ /dev/null @@ -1,141 +0,0 @@ -/* - * This file and its contents are licensed under the Timescale License. - * Please see the included NOTICE for copyright information and - * LICENSE-TIMESCALE for a copy of the license. - */ - -#include -#include -#include -#include - -#include "compat/compat.h" -#include "chunk.h" -#include "hypertable.h" -#include "compress_dml.h" -#include "utils.h" - -/*Path, Plan and State node for processing dml on compressed chunks - * For now, this just blocks updates/deletes on compressed chunks - * since trigger based approach does not work - */ - -static Path *compress_chunk_dml_path_create(Path *subpath, Oid chunk_relid); -static Plan *compress_chunk_dml_plan_create(PlannerInfo *root, RelOptInfo *relopt, - CustomPath *best_path, List *tlist, List *clauses, - List *custom_plans); -static Node *compress_chunk_dml_state_create(CustomScan *scan); - -static void compress_chunk_dml_begin(CustomScanState *node, EState *estate, int eflags); -static TupleTableSlot *compress_chunk_dml_exec(CustomScanState *node); -static void compress_chunk_dml_end(CustomScanState *node); -static void compress_chunk_dml_rescan(CustomScanState *node); - -static CustomPathMethods compress_chunk_dml_path_methods = { - .CustomName = "CompressChunkDml", - .PlanCustomPath = compress_chunk_dml_plan_create, -}; - -static CustomScanMethods compress_chunk_dml_plan_methods = { - .CustomName = "CompressChunkDml", - .CreateCustomScanState = compress_chunk_dml_state_create, -}; - -static CustomExecMethods compress_chunk_dml_state_methods = { - .CustomName = COMPRESS_CHUNK_DML_STATE_NAME, - .BeginCustomScan = compress_chunk_dml_begin, - .EndCustomScan = compress_chunk_dml_end, - .ExecCustomScan = compress_chunk_dml_exec, - .ReScanCustomScan = compress_chunk_dml_rescan, -}; - -static void -compress_chunk_dml_begin(CustomScanState *node, EState *estate, int eflags) -{ - CustomScan *cscan = castNode(CustomScan, node->ss.ps.plan); - Plan *subplan = linitial(cscan->custom_plans); - node->custom_ps = list_make1(ExecInitNode(subplan, estate, eflags)); -} - -/* - * nothing to reset for rescan in dml blocker - */ -static void -compress_chunk_dml_rescan(CustomScanState *node) -{ -} - -/* we cannot update/delete rows if we have a compressed chunk. so - * throw an error. Note this subplan will return 0 tuples as the chunk is empty - * and all rows are saved in the compressed chunk. - */ -static TupleTableSlot * -compress_chunk_dml_exec(CustomScanState *node) -{ - CompressChunkDmlState *state = (CompressChunkDmlState *) node; - Oid chunk_relid = state->chunk_relid; - elog(ERROR, - "cannot update/delete rows from chunk \"%s\" as it is compressed", - get_rel_name(chunk_relid)); - return NULL; -} - -static void -compress_chunk_dml_end(CustomScanState *node) -{ - PlanState *substate = linitial(node->custom_ps); - ExecEndNode(substate); -} - -static Path * -compress_chunk_dml_path_create(Path *subpath, Oid chunk_relid) -{ - CompressChunkDmlPath *path = (CompressChunkDmlPath *) palloc0(sizeof(CompressChunkDmlPath)); - - memcpy(&path->cpath.path, subpath, sizeof(Path)); - path->cpath.path.type = T_CustomPath; - path->cpath.path.pathtype = T_CustomScan; - path->cpath.path.parent = subpath->parent; - path->cpath.path.pathtarget = subpath->pathtarget; - path->cpath.methods = &compress_chunk_dml_path_methods; - path->cpath.custom_paths = list_make1(subpath); - path->chunk_relid = chunk_relid; - - return &path->cpath.path; -} - -static Plan * -compress_chunk_dml_plan_create(PlannerInfo *root, RelOptInfo *relopt, CustomPath *best_path, - List *tlist, List *clauses, List *custom_plans) -{ - CompressChunkDmlPath *cdpath = (CompressChunkDmlPath *) best_path; - CustomScan *cscan = makeNode(CustomScan); - - Assert(list_length(custom_plans) == 1); - - cscan->methods = &compress_chunk_dml_plan_methods; - cscan->custom_plans = custom_plans; - cscan->scan.scanrelid = relopt->relid; - cscan->scan.plan.targetlist = tlist; - cscan->custom_scan_tlist = NIL; - cscan->custom_private = list_make1_oid(cdpath->chunk_relid); - return &cscan->scan.plan; -} - -static Node * -compress_chunk_dml_state_create(CustomScan *scan) -{ - CompressChunkDmlState *state; - - state = (CompressChunkDmlState *) newNode(sizeof(CompressChunkDmlState), T_CustomScanState); - state->chunk_relid = linitial_oid(scan->custom_private); - state->cscan_state.methods = &compress_chunk_dml_state_methods; - return (Node *) state; -} - -Path * -compress_chunk_dml_generate_paths(Path *subpath, Chunk *chunk) -{ - Assert(chunk->fd.compressed_chunk_id != INVALID_CHUNK_ID); - return compress_chunk_dml_path_create(subpath, chunk->table_id); -} diff --git a/tsl/src/nodes/compress_dml/compress_dml.h b/tsl/src/nodes/compress_dml/compress_dml.h deleted file mode 100644 index 376e8b151a6..00000000000 --- a/tsl/src/nodes/compress_dml/compress_dml.h +++ /dev/null @@ -1,28 +0,0 @@ -/* - * This file and its contents are licensed under the Timescale License. - * Please see the included NOTICE for copyright information and - * LICENSE-TIMESCALE for a copy of the license. - */ -#pragma once - -#include -#include -#include - -#include "hypertable.h" - -typedef struct CompressChunkDmlPath -{ - CustomPath cpath; - Oid chunk_relid; -} CompressChunkDmlPath; - -typedef struct CompressChunkDmlState -{ - CustomScanState cscan_state; - Oid chunk_relid; -} CompressChunkDmlState; - -Path *compress_chunk_dml_generate_paths(Path *subpath, Chunk *chunk); - -#define COMPRESS_CHUNK_DML_STATE_NAME "CompressChunkDmlState" diff --git a/tsl/src/planner.c b/tsl/src/planner.c index 1d063a3f11a..d3c4aa52687 100644 --- a/tsl/src/planner.c +++ b/tsl/src/planner.c @@ -4,24 +4,24 @@ * LICENSE-TIMESCALE for a copy of the license. */ #include + #include #include -#include -#include #include #include +#include +#include -#include "nodes/skip_scan/skip_scan.h" #include "chunk.h" #include "compat/compat.h" #include "continuous_aggs/planner.h" #include "guc.h" #include "hypertable_cache.h" #include "hypertable.h" -#include "nodes/compress_dml/compress_dml.h" -#include "nodes/frozen_chunk_dml/frozen_chunk_dml.h" #include "nodes/decompress_chunk/decompress_chunk.h" +#include "nodes/frozen_chunk_dml/frozen_chunk_dml.h" #include "nodes/gapfill/gapfill.h" +#include "nodes/skip_scan/skip_scan.h" #include "nodes/vector_agg/plan.h" #include "planner.h"