diff --git a/CHANGELOG.md b/CHANGELOG.md index 964c98818a3..1bfd4ede614 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,9 +9,10 @@ accidentally triggering the load of a previous DB version.** **Major Features** **Minor Features** +* #1666 Support drop_chunks API for continuous aggregates **Bugfixes** -* #1648 Drop chunks for materialized hypertable +* #1648 Drop chunks from materialized hypertable * #1665 Add ignore_invalidation_older_than to timescaledb_information.continuous_aggregates view * #1668 Cannot add dimension if hypertable has empty chunks * #1674 Fix time_bucket_gapfill's interaction with GROUP BY diff --git a/src/chunk.c b/src/chunk.c index dea75e38694..b03dc18af06 100644 --- a/src/chunk.c +++ b/src/chunk.c @@ -2392,11 +2392,14 @@ ts_chunk_drop_process_materialization(Oid hypertable_relid, ts_cache_release(hcache); } +/* Continuous agg materialization hypertables can be dropped + * only if a user explicitly specifies the table name + */ List * ts_chunk_do_drop_chunks(Oid table_relid, Datum older_than_datum, Datum newer_than_datum, Oid older_than_type, Oid newer_than_type, bool cascade, CascadeToMaterializationOption cascades_to_materializations, - int32 log_level) + int32 log_level, bool user_supplied_table_name) { uint64 i = 0; uint64 num_chunks = 0; @@ -2413,9 +2416,18 @@ ts_chunk_do_drop_chunks(Oid table_relid, Datum older_than_datum, Datum newer_tha switch (ts_continuous_agg_hypertable_status(hypertable_id)) { case HypertableIsMaterialization: + if (user_supplied_table_name == false) + { + elog(ERROR, "cannot drop chunks on a continuous aggregate materialization table"); + } + has_continuous_aggs = false; + break; case HypertableIsMaterializationAndRaw: - elog(ERROR, "cannot drop_chunks on a continuous aggregate materialization table"); - pg_unreachable(); + if (user_supplied_table_name == false) + { + elog(ERROR, "cannot drop chunks on a continuous aggregate materialization table"); + } + has_continuous_aggs = true; break; case HypertableIsRawTable: if (cascades_to_materializations == CASCADE_TO_MATERIALIZATION_UNKNOWN) @@ -2485,7 +2497,8 @@ ts_chunk_do_drop_chunks(Oid table_relid, Datum older_than_datum, Datum newer_tha older_than_type, newer_than_type, cascade, - log_level); + log_level, + user_supplied_table_name); } return dropped_chunk_names; } @@ -2550,6 +2563,7 @@ ts_chunk_drop_chunks(PG_FUNCTION_ARGS) bool cascade, verbose; CascadeToMaterializationOption cascades_to_materializations; int elevel; + bool user_supplied_table_name = true; /* * When past the first call of the SRF, dropping has already been completed, @@ -2585,10 +2599,27 @@ ts_chunk_drop_chunks(PG_FUNCTION_ARGS) if (table_name != NULL) { if (ht_oids == NIL) - ereport(ERROR, - (errcode(ERRCODE_TS_HYPERTABLE_NOT_EXIST), - errmsg("hypertable \"%s\" does not exist", NameStr(*table_name)))); + { + ContinuousAgg *ca = NULL; + ca = ts_continuous_agg_find_userview_name(schema_name ? NameStr(*schema_name) : NULL, + NameStr(*table_name)); + if (ca == NULL) + ereport(ERROR, + (errcode(ERRCODE_TS_HYPERTABLE_NOT_EXIST), + errmsg("\"%s\" is not a hypertable or a continuous aggregate view", + NameStr(*table_name)), + errhint("It is only possible to drop chunks from a hypertable or " + "continuous aggregate view"))); + else + { + int32 matid = ca->data.mat_hypertable_id; + Hypertable *mat_ht = ts_hypertable_get_by_id(matid); + ht_oids = lappend_oid(ht_oids, mat_ht->main_table_relid); + } + } } + else + user_supplied_table_name = false; /* Initial multi function call setup */ funcctx = SRF_FIRSTCALL_INIT(); @@ -2657,7 +2688,8 @@ ts_chunk_drop_chunks(PG_FUNCTION_ARGS) newer_than_type, cascade, cascades_to_materializations, - elevel); + elevel, + user_supplied_table_name); dc_names = list_concat(dc_names, dc_temp); MemoryContextSwitchTo(oldcontext); diff --git a/src/chunk.h b/src/chunk.h index 9d2e7a9ccc0..51d49977e15 100644 --- a/src/chunk.h +++ b/src/chunk.h @@ -139,7 +139,7 @@ extern TSDLLEXPORT List * ts_chunk_do_drop_chunks(Oid table_relid, Datum older_than_datum, Datum newer_than_datum, Oid older_than_type, Oid newer_than_type, bool cascade, CascadeToMaterializationOption cascades_to_materializations, - int32 log_level); + int32 log_level, bool user_supplied_table_name); extern TSDLLEXPORT Chunk * ts_chunk_get_chunks_in_time_range(Oid table_relid, Datum older_than_datum, Datum newer_than_datum, Oid older_than_type, Oid newer_than_type, char *caller_name, diff --git a/src/continuous_agg.c b/src/continuous_agg.c index 7e9b60e6957..eace781378d 100644 --- a/src/continuous_agg.c +++ b/src/continuous_agg.c @@ -401,6 +401,41 @@ ts_continuous_agg_find_by_view_name(const char *schema, const char *name) return ca; } +ContinuousAgg * +ts_continuous_agg_find_userview_name(const char *schema, const char *name) +{ + ScanIterator iterator = + ts_scan_iterator_create(CONTINUOUS_AGG, AccessShareLock, CurrentMemoryContext); + ContinuousAgg *ca = NULL; + int count = 0; + const char *chkschema = schema; + + ts_scanner_foreach(&iterator) + { + ContinuousAggViewType vtyp; + FormData_continuous_agg *data = + (FormData_continuous_agg *) GETSTRUCT(ts_scan_iterator_tuple(&iterator)); + if (schema == NULL) + { + /* only user visible views will be returned */ + Oid relid = RelnameGetRelid(NameStr(data->user_view_name)); + if (relid == InvalidOid) + continue; + chkschema = NameStr(data->user_view_schema); + } + + vtyp = ts_continuous_agg_view_type(data, chkschema, name); + if (vtyp == ContinuousAggUserView) + { + ca = palloc0(sizeof(*ca)); + continuous_agg_init(ca, data); + count++; + } + } + Assert(count <= 1); + return ca; +} + ContinuousAgg * ts_continuous_agg_find_by_job_id(int32 job_id) { @@ -783,3 +818,51 @@ ts_continuous_agg_get_user_view_oid(ContinuousAgg *agg) elog(ERROR, "could not find user view for continuous agg"); return view_relid; } + +static int32 +find_raw_hypertable_for_materialization(int32 mat_hypertable_id) +{ + short count = 0; + int32 htid = INVALID_HYPERTABLE_ID; + ScanIterator iterator = + ts_scan_iterator_create(CONTINUOUS_AGG, RowExclusiveLock, CurrentMemoryContext); + + init_scan_by_mat_hypertable_id(&iterator, mat_hypertable_id); + ts_scanner_foreach(&iterator) + { + TupleInfo *ti = ts_scan_iterator_tuple_info(&iterator); + HeapTuple tuple = ti->tuple; + Form_continuous_agg form = (Form_continuous_agg) GETSTRUCT(tuple); + htid = form->raw_hypertable_id; + count++; + } + Assert(count <= 1); + ts_scan_iterator_close(&iterator); + return htid; +} + +/* Continuous aggregate materialization hypertables inherit integer_now func + * from the raw hypertable (unless it was explictly reset for cont. aggregate. + * Walk the materialzation hyperatable ->raw hypertable tree till + * we find a hypertable that has integer_now_func set. + */ +TSDLLEXPORT Dimension * +ts_continous_agg_find_integer_now_func_by_materialization_id(int32 mat_htid) +{ + int32 raw_htid = mat_htid; + Dimension *par_dim = NULL; + while (raw_htid != INVALID_HYPERTABLE_ID) + { + Hypertable *raw_ht = ts_hypertable_get_by_id(raw_htid); + Dimension *open_dim = hyperspace_get_open_dimension(raw_ht->space, 0); + if (strlen(NameStr(open_dim->fd.integer_now_func)) != 0 && + strlen(NameStr(open_dim->fd.integer_now_func_schema)) != 0) + { + par_dim = open_dim; + break; + } + mat_htid = raw_htid; + raw_htid = find_raw_hypertable_for_materialization(mat_htid); + } + return par_dim; +} diff --git a/src/continuous_agg.h b/src/continuous_agg.h index 7f57c1a70f9..c1f5e13e5fb 100644 --- a/src/continuous_agg.h +++ b/src/continuous_agg.h @@ -90,5 +90,8 @@ extern void ts_continuous_agg_rename_view(char *old_schema, char *name, char *ne extern TSDLLEXPORT int32 ts_number_of_continuous_aggs(void); extern Oid ts_continuous_agg_get_user_view_oid(ContinuousAgg *agg); +extern TSDLLEXPORT Dimension * +ts_continous_agg_find_integer_now_func_by_materialization_id(int32 mat_htid); +extern ContinuousAgg *ts_continuous_agg_find_userview_name(const char *schema, const char *name); #endif /* TIMESCALEDB_CONTINUOUS_AGG_H */ diff --git a/src/cross_module_fn.c b/src/cross_module_fn.c index 3bc3040cd61..b30fe2b3482 100644 --- a/src/cross_module_fn.c +++ b/src/cross_module_fn.c @@ -347,7 +347,8 @@ static void continuous_agg_drop_chunks_by_chunk_id_default(int32 raw_hypertable_id, Chunk **chunks, Size num_chunks, Datum older_than_datum, Datum newer_than_datum, Oid older_than_type, - Oid newer_than_type, bool cascade, int32 log_level) + Oid newer_than_type, bool cascade, int32 log_level, + bool user_supplied_table_name) { error_no_default_fn_community(); } diff --git a/src/cross_module_fn.h b/src/cross_module_fn.h index bbdd4f413e6..1b1a2211e3a 100644 --- a/src/cross_module_fn.h +++ b/src/cross_module_fn.h @@ -71,7 +71,8 @@ typedef struct CrossModuleFunctions Datum older_than_datum, Datum newer_than_datum, Oid older_than_type, Oid newer_than_type, - bool cascade, int32 log_level); + bool cascade, int32 log_level, + bool user_supplied_table_name); PGFunction continuous_agg_trigfn; void (*continuous_agg_update_options)(ContinuousAgg *cagg, WithClauseResult *with_clause_options); diff --git a/src/hypertable.c b/src/hypertable.c index d685de5b7c4..6bd5786ca0f 100644 --- a/src/hypertable.c +++ b/src/hypertable.c @@ -1987,7 +1987,14 @@ hypertable_tuple_match_name(TupleInfo *ti, void *data) if (!OidIsValid(schema_oid)) return SCAN_CONTINUE; - relid = get_relname_relid(NameStr(fd.table_name), schema_oid); + if (accum->schema_name == NULL) + { + /* only user visible tables will be returned */ + relid = RelnameGetRelid(NameStr(fd.table_name)); + } + else + relid = get_relname_relid(NameStr(fd.table_name), schema_oid); + if (!OidIsValid(relid)) return SCAN_CONTINUE; @@ -2000,13 +2007,12 @@ hypertable_tuple_match_name(TupleInfo *ti, void *data) NameGetDatum(accum->table_name), NameGetDatum(&fd.table_name))))) accum->ht_oids = lappend_oid(accum->ht_oids, relid); - return SCAN_CONTINUE; } /* * Used for drop_chunks. Either name can be NULL, which indicates matching on - * all possible names. + * all possible names visible in search path. */ List * ts_hypertable_get_all_by_name(Name schema_name, Name table_name, MemoryContext mctx) diff --git a/src/interval.c b/src/interval.c index 57a3503122c..b1677663514 100644 --- a/src/interval.c +++ b/src/interval.c @@ -95,7 +95,7 @@ ts_interval_from_sql_input(Oid relid, Datum interval, Oid interval_type, const c { Hypertable *hypertable; Cache *hcache; - FormData_ts_interval *invl = palloc0(sizeof(FormData_ts_interval)); + FormData_ts_interval *invl; Oid partitioning_type; Dimension *open_dim; @@ -109,8 +109,33 @@ ts_interval_from_sql_input(Oid relid, Datum interval, Oid interval_type, const c elog(ERROR, "internal error: no open dimension found while parsing interval"); partitioning_type = ts_dimension_get_partition_type(open_dim); + if (IS_INTEGER_TYPE(partitioning_type)) + { + if (strlen(NameStr(open_dim->fd.integer_now_func)) == 0 || + strlen(NameStr(open_dim->fd.integer_now_func_schema)) == 0) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("integer_now_func not set on hypertable \"%s\"", get_rel_name(relid)))); + } + invl = ts_interval_from_sql_input_internal(open_dim, + interval, + interval_type, + parameter_name, + caller_name); ts_cache_release(hcache); + return invl; +} +/* use this variant only if the open_dim needs to be + * inferred for the hypertable. This is the case for continuous aggr + * related materialization hypertables + */ +TSDLLEXPORT FormData_ts_interval * +ts_interval_from_sql_input_internal(Dimension *open_dim, Datum interval, Oid interval_type, + const char *parameter_name, const char *caller_name) +{ + FormData_ts_interval *invl = palloc0(sizeof(FormData_ts_interval)); + Oid partitioning_type = ts_dimension_get_partition_type(open_dim); switch (interval_type) { case INTERVALOID: @@ -134,12 +159,6 @@ ts_interval_from_sql_input(Oid relid, Datum interval, Oid interval_type, const c errhint("integer-based time duration cannot be used with hypertables with " "a timestamp-based time dimensions"))); - if (strlen(NameStr(open_dim->fd.integer_now_func)) == 0 || - strlen(NameStr(open_dim->fd.integer_now_func_schema)) == 0) - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("integer_now_func not set on hypertable %s", get_rel_name(relid)))); - invl->is_time_interval = false; invl->integer_interval = ts_time_value_to_internal(interval, interval_type); @@ -277,11 +296,15 @@ ts_interval_from_now_func_get_datum(int64 interval, Oid time_dim_type, Oid now_f (errcode(ERRCODE_INTERVAL_FIELD_OVERFLOW), errmsg("ts_interval overflow"))); return Int32GetDatum(res); case INT8OID: - res = DatumGetInt64(now) - interval; - if (res > DatumGetInt64(now)) + { + bool overflow = pg_sub_s64_overflow(DatumGetInt64(now), interval, &res); + if (overflow) + { ereport(ERROR, (errcode(ERRCODE_INTERVAL_FIELD_OVERFLOW), errmsg("ts_interval overflow"))); + } return Int64GetDatum(res); + } default: pg_unreachable(); } diff --git a/src/interval.h b/src/interval.h index 0b63f28eb8d..b102cdd682d 100644 --- a/src/interval.h +++ b/src/interval.h @@ -21,5 +21,7 @@ TSDLLEXPORT bool ts_interval_equal(FormData_ts_interval *invl1, FormData_ts_inte TSDLLEXPORT void ts_interval_now_func_validate(Oid now_func_oid, Oid open_dim_type); TSDLLEXPORT Datum ts_interval_subtract_from_now(FormData_ts_interval *invl, Dimension *open_dim); TSDLLEXPORT int64 ts_get_now_internal(Dimension *open_dim); - +TSDLLEXPORT FormData_ts_interval * +ts_interval_from_sql_input_internal(Dimension *open_dim, Datum interval, Oid interval_type, + const char *parameter_name, const char *caller_name); #endif /* TIMESCALEDB_INTERVAL */ diff --git a/test/expected/chunk_utils.out b/test/expected/chunk_utils.out index f0da183bb49..f1460964d49 100644 --- a/test/expected/chunk_utils.out +++ b/test/expected/chunk_utils.out @@ -681,7 +681,7 @@ ORDER BY c.id; \set ON_ERROR_STOP 0 -- should error because no hypertable SELECT drop_chunks(5, 'drop_chunk_test4'); -ERROR: hypertable "drop_chunk_test4" does not exist +ERROR: "drop_chunk_test4" is not a hypertable or a continuous aggregate view SELECT show_chunks('drop_chunk_test4'); ERROR: relation "drop_chunk_test4" does not exist at character 20 SELECT show_chunks('drop_chunk_test4', 5); @@ -1391,3 +1391,65 @@ psql:include/query_result_test_equal.sql:14: NOTICE: drop cascades to view depe (1 row) \set ON_ERROR_STOP 1 +--drop chunks from hypertable with same name in different schema +-- order of schema in search_path matters -- +\c :TEST_DBNAME :ROLE_SUPERUSER +drop table chunk_id_from_relid_test; +drop table drop_chunk_test1; +drop table drop_chunk_test2; +drop table drop_chunk_test3; +CREATE SCHEMA try_schema; +GRANT CREATE ON SCHEMA try_schema TO :ROLE_DEFAULT_PERM_USER; +GRANT USAGE ON SCHEMA try_schema TO :ROLE_DEFAULT_PERM_USER; +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +CREATE TABLE try_schema.drop_chunk_test_date(time date, temp float8, device_id text); +SELECT create_hypertable('try_schema.drop_chunk_test_date', 'time', chunk_time_interval => interval '1 day', create_default_indexes=>false); +NOTICE: adding not-null constraint to column "time" + create_hypertable +---------------------------------------- + (11,try_schema,drop_chunk_test_date,t) +(1 row) + +INSERT INTO public.drop_chunk_test_date VALUES( '2020-01-10', 100, 'hello'); +INSERT INTO try_schema.drop_chunk_test_date VALUES( '2020-01-10', 100, 'hello'); +set search_path to try_schema, public; +SELECT show_chunks(hypertable=>'public.drop_chunk_test_date', older_than=>'1 day'::interval); + show_chunks +----------------------------------------- + _timescaledb_internal._hyper_6_35_chunk +(1 row) + +SELECT show_chunks(hypertable=>'try_schema.drop_chunk_test_date', older_than=>'1 day'::interval); + show_chunks +------------------------------------------ + _timescaledb_internal._hyper_11_36_chunk +(1 row) + +SELECT drop_chunks(table_name=>'drop_chunk_test_date', older_than=> '1 day'::interval); + drop_chunks +------------------------------------------ + _timescaledb_internal._hyper_11_36_chunk +(1 row) + +--drop_chunks without schema_name and table_name +INSERT INTO public.drop_chunk_test_date VALUES( '2020-02-11', 100, 'hello'); +INSERT INTO try_schema.drop_chunk_test_date VALUES( '2020-02-10', 100, 'hello'); +SELECT show_chunks(hypertable=>'public.drop_chunk_test_date', older_than=>'1 day'::interval); + show_chunks +----------------------------------------- + _timescaledb_internal._hyper_6_35_chunk + _timescaledb_internal._hyper_6_37_chunk +(2 rows) + +SELECT show_chunks(hypertable=>'try_schema.drop_chunk_test_date', older_than=>'1 day'::interval); + show_chunks +------------------------------------------ + _timescaledb_internal._hyper_11_38_chunk +(1 row) + +SELECT drop_chunks( older_than=> '1 day'::interval); + drop_chunks +------------------------------------------ + _timescaledb_internal._hyper_11_38_chunk +(1 row) + diff --git a/test/sql/chunk_utils.sql b/test/sql/chunk_utils.sql index af8148177de..a3deb184b87 100644 --- a/test/sql/chunk_utils.sql +++ b/test/sql/chunk_utils.sql @@ -24,7 +24,6 @@ $BODY$; \set ECHO errors \ir :QUERY_RESULT_TEST_EQUAL_RELPATH \set ECHO all - CREATE TABLE PUBLIC.drop_chunk_test1(time bigint, temp float8, device_id text); CREATE TABLE PUBLIC.drop_chunk_test2(time bigint, temp float8, device_id text); CREATE TABLE PUBLIC.drop_chunk_test3(time bigint, temp float8, device_id text); @@ -574,3 +573,31 @@ SELECT drop_chunks(table_name=>'drop_chunk_test3', older_than=>100); \set ECHO all \set ON_ERROR_STOP 1 + +--drop chunks from hypertable with same name in different schema +-- order of schema in search_path matters -- +\c :TEST_DBNAME :ROLE_SUPERUSER +drop table chunk_id_from_relid_test; +drop table drop_chunk_test1; +drop table drop_chunk_test2; +drop table drop_chunk_test3; +CREATE SCHEMA try_schema; +GRANT CREATE ON SCHEMA try_schema TO :ROLE_DEFAULT_PERM_USER; +GRANT USAGE ON SCHEMA try_schema TO :ROLE_DEFAULT_PERM_USER; + +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +CREATE TABLE try_schema.drop_chunk_test_date(time date, temp float8, device_id text); +SELECT create_hypertable('try_schema.drop_chunk_test_date', 'time', chunk_time_interval => interval '1 day', create_default_indexes=>false); +INSERT INTO public.drop_chunk_test_date VALUES( '2020-01-10', 100, 'hello'); +INSERT INTO try_schema.drop_chunk_test_date VALUES( '2020-01-10', 100, 'hello'); +set search_path to try_schema, public; +SELECT show_chunks(hypertable=>'public.drop_chunk_test_date', older_than=>'1 day'::interval); +SELECT show_chunks(hypertable=>'try_schema.drop_chunk_test_date', older_than=>'1 day'::interval); +SELECT drop_chunks(table_name=>'drop_chunk_test_date', older_than=> '1 day'::interval); + +--drop_chunks without schema_name and table_name +INSERT INTO public.drop_chunk_test_date VALUES( '2020-02-11', 100, 'hello'); +INSERT INTO try_schema.drop_chunk_test_date VALUES( '2020-02-10', 100, 'hello'); +SELECT show_chunks(hypertable=>'public.drop_chunk_test_date', older_than=>'1 day'::interval); +SELECT show_chunks(hypertable=>'try_schema.drop_chunk_test_date', older_than=>'1 day'::interval); +SELECT drop_chunks( older_than=> '1 day'::interval); diff --git a/tsl/src/bgw_policy/drop_chunks_api.c b/tsl/src/bgw_policy/drop_chunks_api.c index 2f0ba15cba3..83b8e37f8f7 100644 --- a/tsl/src/bgw_policy/drop_chunks_api.c +++ b/tsl/src/bgw_policy/drop_chunks_api.c @@ -16,6 +16,8 @@ #include "bgw/job.h" #include "bgw_policy/drop_chunks.h" +#include "continuous_agg.h" +#include "chunk.h" #include "drop_chunks_api.h" #include "errors.h" #include "hypertable.h" @@ -23,7 +25,6 @@ #include "license.h" #include "utils.h" #include "interval.h" -#include "chunk.h" /* Default scheduled interval for drop_chunks jobs is currently 1 day (24 hours) */ #define DEFAULT_SCHEDULE_INTERVAL \ @@ -37,6 +38,85 @@ #define DEFAULT_RETRY_PERIOD \ DatumGetIntervalP(DirectFunctionCall3(interval_in, CStringGetDatum("5 min"), InvalidOid, -1)) +typedef struct DropChunksMeta +{ + Hypertable *ht; + Oid ht_oid; + FormData_ts_interval *older_than; +} DropChunksMeta; + +static void +validate_drop_chunks_hypertable(Cache *hcache, Oid user_htoid, Oid older_than_type, + Datum older_than_datum, DropChunksMeta *meta) +{ + FormData_ts_interval *older_than; + ContinuousAggHypertableStatus status; + + meta->ht = NULL; + meta->ht_oid = user_htoid; + meta->ht = ts_hypertable_cache_get_entry(hcache, meta->ht_oid, true /* missing_ok */); + if (meta->ht != NULL) + { + if (meta->ht->fd.compressed) + { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cannot add drop chunks policy to compresed hypertable \"%s\"", + get_rel_name(user_htoid)), + errhint("Please add the policy to the corresponding uncompressed hypertable " + "instead."))); + } + status = ts_continuous_agg_hypertable_status(meta->ht->fd.id); + if ((status == HypertableIsMaterialization || status == HypertableIsMaterializationAndRaw)) + { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cannot add drop chunks policy to materialized hypertable \"%s\" ", + get_rel_name(user_htoid)), + errhint("Please add the policy to the corresponding continuous aggregate " + "instead."))); + } + older_than = ts_interval_from_sql_input(meta->ht_oid, + older_than_datum, + older_than_type, + "older_than", + "add_drop_chunks_policy"); + } + else + { + /*check if this is a cont aggregate view */ + int32 mat_id; + Dimension *open_dim; + Oid partitioning_type; + char *schema = get_namespace_name(get_rel_namespace(user_htoid)); + char *view_name = get_rel_name(user_htoid); + ContinuousAgg *ca = NULL; + ca = ts_continuous_agg_find_by_view_name(schema, view_name); + if (ca == NULL) + ereport(ERROR, + (errcode(ERRCODE_TS_HYPERTABLE_NOT_EXIST), + errmsg("\"%s\" is not a hypertable or a continuous aggregate view", + view_name))); + mat_id = ca->data.mat_hypertable_id; + meta->ht = ts_hypertable_get_by_id(mat_id); + Assert(meta->ht != NULL); + open_dim = hyperspace_get_open_dimension(meta->ht->space, 0); + partitioning_type = ts_dimension_get_partition_type(open_dim); + if (IS_INTEGER_TYPE(partitioning_type)) + { + open_dim = ts_continous_agg_find_integer_now_func_by_materialization_id(mat_id); + } + older_than = ts_interval_from_sql_input_internal(open_dim, + older_than_datum, + older_than_type, + "older_than", + "add_drop_chunks_policy"); + } + Assert(meta->ht != NULL); + meta->older_than = older_than; + return; +} + Datum drop_chunks_add_policy(PG_FUNCTION_ARGS) { @@ -57,26 +137,16 @@ drop_chunks_add_policy(PG_FUNCTION_ARGS) Hypertable *hypertable; Cache *hcache; FormData_ts_interval *older_than; - + DropChunksMeta meta; + Oid mapped_oid; ts_hypertable_permissions_check(ht_oid, GetUserId()); /* Make sure that an existing policy doesn't exist on this hypertable */ - hypertable = ts_hypertable_cache_get_cache_and_entry(ht_oid, false, &hcache); - - if (hypertable->fd.compressed) - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot add drop chunks policy to hypertable \"%s\" which contains " - "compressed data", - get_rel_name(ht_oid)), - errhint("Please add the policy to the corresponding uncompressed hypertable " - "instead."))); - - older_than = ts_interval_from_sql_input(ht_oid, - older_than_datum, - older_than_type, - "older_than", - "add_drop_chunks_policy"); + hcache = ts_hypertable_cache_pin(); + validate_drop_chunks_hypertable(hcache, ht_oid, older_than_type, older_than_datum, &meta); + older_than = meta.older_than; + hypertable = meta.ht; + mapped_oid = meta.ht->main_table_relid; existing = ts_bgw_policy_drop_chunks_find_by_hypertable(hypertable->fd.id); @@ -125,7 +195,7 @@ drop_chunks_add_policy(PG_FUNCTION_ARGS) policy = (BgwPolicyDropChunks){ .job_id = job_id, - .hypertable_id = ts_hypertable_relid_to_id(ht_oid), + .hypertable_id = ts_hypertable_relid_to_id(mapped_oid), .older_than = *older_than, .cascade = cascade, .cascade_to_materializations = cascade_to_materializations, diff --git a/tsl/src/bgw_policy/job.c b/tsl/src/bgw_policy/job.c index c614d5edb27..c586ee93637 100644 --- a/tsl/src/bgw_policy/job.c +++ b/tsl/src/bgw_policy/job.c @@ -169,6 +169,30 @@ execute_reorder_policy(BgwJob *job, reorder_func reorder, bool fast_continue) return true; } +static Dimension * +get_open_dimension_for_hypertable(Hypertable *ht) +{ + int32 mat_id = ht->fd.id; + Dimension *open_dim = hyperspace_get_open_dimension(ht->space, 0); + Oid partitioning_type = ts_dimension_get_partition_type(open_dim); + if (IS_INTEGER_TYPE(partitioning_type)) + { + /* if this a materialization hypertable related to cont agg + * then need to get the right dimension which has + * integer_now function + */ + + open_dim = ts_continous_agg_find_integer_now_func_by_materialization_id(mat_id); + if (open_dim == NULL) + { + elog(ERROR, + "missing integer_now function for hypertable \"%s\" ", + get_rel_name(ht->main_table_relid)); + } + } + return open_dim; +} + bool execute_drop_chunks_policy(int32 job_id) { @@ -197,8 +221,7 @@ execute_drop_chunks_policy(int32 job_id) table_relid = ts_hypertable_id_to_relid(args->hypertable_id); hypertable = ts_hypertable_cache_get_cache_and_entry(table_relid, false, &hcache); - - open_dim = hyperspace_get_open_dimension(hypertable->space, 0); + open_dim = get_open_dimension_for_hypertable(hypertable); ts_chunk_do_drop_chunks(table_relid, ts_interval_subtract_from_now(&args->older_than, open_dim), (Datum) 0, @@ -206,7 +229,9 @@ execute_drop_chunks_policy(int32 job_id) InvalidOid, args->cascade, args->cascade_to_materializations, - LOG); + LOG, + true /*user_supplied_table_name */ + ); ts_cache_release(hcache); elog(LOG, "completed dropping chunks"); diff --git a/tsl/src/continuous_aggs/drop.c b/tsl/src/continuous_aggs/drop.c index 3a85acf4ea4..0fe7f5f6e36 100644 --- a/tsl/src/continuous_aggs/drop.c +++ b/tsl/src/continuous_aggs/drop.c @@ -18,27 +18,6 @@ /* drop chunks from the materialization hypertable that fall within the time * range. */ -static void -cagg_drop_mat_chunks(Oid mattable_relid, Datum older_than_datum, Datum newer_than_datum, - Oid older_than_type, Oid newer_than_type, bool cascade, int32 log_level) -{ - uint64 i = 0, num_chunks = 0; - Chunk *matchunks; - - matchunks = ts_chunk_get_chunks_in_time_range(mattable_relid, - older_than_datum, - newer_than_datum, - older_than_type, - newer_than_type, - "drop chunks for materialized hypertable", - CurrentMemoryContext, - &num_chunks, - false); - for (; i < num_chunks; i++) - { - ts_chunk_drop(&matchunks[i], cascade, log_level); - } -} void ts_continuous_agg_drop_chunks_by_chunk_id(int32 raw_hypertable_id, Chunk **chunks_ptr, @@ -46,7 +25,7 @@ ts_continuous_agg_drop_chunks_by_chunk_id(int32 raw_hypertable_id, Chunk **chunk Datum older_than_datum, Datum newer_than_datum, Oid older_than_type, Oid newer_than_type, bool cascade, - int32 log_level) + int32 log_level, bool user_supplied_table_name) { ListCell *lc; Oid arg_type = INT4OID; @@ -66,13 +45,15 @@ ts_continuous_agg_drop_chunks_by_chunk_id(int32 raw_hypertable_id, Chunk **chunk SPIPlanPtr delete_plan; ContinuousAgg *agg = lfirst(lc); Hypertable *mat_table = ts_hypertable_get_by_id(agg->data.mat_hypertable_id); - cagg_drop_mat_chunks(mat_table->main_table_relid, - older_than_datum, - newer_than_datum, - older_than_type, - newer_than_type, - cascade, - log_level); + ts_chunk_do_drop_chunks(mat_table->main_table_relid, + older_than_datum, + newer_than_datum, + older_than_type, + newer_than_type, + cascade, + CASCADE_TO_MATERIALIZATION_FALSE, + log_level, + user_supplied_table_name); /* we might still have materialization chunks that have data that refer * to the dropped chunks from the hypertable. This is because the * chunk interval on the mat. hypertable is NOT the same as the diff --git a/tsl/src/continuous_aggs/drop.h b/tsl/src/continuous_aggs/drop.h index c838b3efed5..05f8997190e 100644 --- a/tsl/src/continuous_aggs/drop.h +++ b/tsl/src/continuous_aggs/drop.h @@ -10,12 +10,10 @@ #include -extern void ts_continuous_agg_drop_chunks_by_chunk_id(int32 raw_hypertable_id, Chunk **chunks_ptr, - Size num_chunks, +extern void ts_continuous_agg_drop_chunks_by_chunk_id( + int32 raw_hypertable_id, Chunk **chunks_ptr, Size num_chunks, - Datum older_than_datum, - Datum newer_than_datum, Oid older_than_type, - Oid newer_than_type, bool cascade, - int32 log_level); + Datum older_than_datum, Datum newer_than_datum, Oid older_than_type, Oid newer_than_type, + bool cascade, int32 log_level, bool user_supplied_table_name); #endif /* TIMESCALEDB_TSL_CONTINUOUS_AGGS_DROP_H */ diff --git a/tsl/test/expected/bgw_policy.out b/tsl/test/expected/bgw_policy.out index e42e357383c..9338f59b9d8 100644 --- a/tsl/test/expected/bgw_policy.out +++ b/tsl/test/expected/bgw_policy.out @@ -449,7 +449,7 @@ WARNING: unexpected interval: smaller than one second \set ON_ERROR_STOP 0 -- we cannot add a drop_chunks policy on a table whose open dimension is not time and no now_func is set select add_drop_chunks_policy('test_table_int', INTERVAL '4 months', true); -ERROR: invalid parameter value for older_than +ERROR: integer_now_func not set on hypertable "test_table_int" \set ON_ERROR_STOP 1 INSERT INTO test_table_int VALUES (-2, -2), (-1, -1), (0,0), (1, 1), (2, 2), (3, 3); \c :TEST_DBNAME :ROLE_SUPERUSER; diff --git a/tsl/test/expected/compression_errors.out b/tsl/test/expected/compression_errors.out index 6aff98834f2..f8f8ed53a23 100644 --- a/tsl/test/expected/compression_errors.out +++ b/tsl/test/expected/compression_errors.out @@ -260,7 +260,7 @@ FROM _timescaledb_catalog.hypertable comp_hyper INNER JOIN _timescaledb_catalog.hypertable uncomp_hyper ON (comp_hyper.id = uncomp_hyper.compressed_hypertable_id) WHERE uncomp_hyper.table_name like 'foo' ORDER BY comp_hyper.id LIMIT 1 \gset select add_drop_chunks_policy(:'COMPRESSED_HYPER_NAME', INTERVAL '4 months', true); -ERROR: cannot add drop chunks policy to hypertable "_compressed_hypertable_15" which contains compressed data +ERROR: cannot add drop chunks policy to compresed hypertable "_compressed_hypertable_15" --Constraint checking for compression create table fortable(col integer primary key); create table table_constr( device_id integer, diff --git a/tsl/test/expected/continuous_aggs_bgw_drop_chunks.out b/tsl/test/expected/continuous_aggs_bgw_drop_chunks.out new file mode 100644 index 00000000000..e968f1e27d9 --- /dev/null +++ b/tsl/test/expected/continuous_aggs_bgw_drop_chunks.out @@ -0,0 +1,122 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- +-- Setup for testing bgw jobs --- +-- +\c :TEST_DBNAME :ROLE_SUPERUSER +CREATE OR REPLACE FUNCTION ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish(timeout INT = -1) RETURNS VOID +AS :MODULE_PATHNAME LANGUAGE C VOLATILE; +CREATE OR REPLACE FUNCTION ts_bgw_params_create() RETURNS VOID +AS :MODULE_PATHNAME LANGUAGE C VOLATILE; +\set WAIT_ON_JOB 0 +\set IMMEDIATELY_SET_UNTIL 1 +\set WAIT_FOR_OTHER_TO_ADVANCE 2 +-- Remove any default jobs, e.g., telemetry +SELECT _timescaledb_internal.stop_background_workers(); + stop_background_workers +------------------------- + t +(1 row) + +DELETE FROM _timescaledb_config.bgw_job WHERE TRUE; +TRUNCATE _timescaledb_internal.bgw_job_stat; +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +CREATE TABLE public.bgw_log( + msg_no INT, + mock_time BIGINT, + application_name TEXT, + msg TEXT +); +CREATE VIEW sorted_bgw_log AS + SELECT * FROM bgw_log ORDER BY mock_time, application_name COLLATE "C", msg_no; +CREATE TABLE public.bgw_dsm_handle_store( + handle BIGINT +); +INSERT INTO public.bgw_dsm_handle_store VALUES (0); +SELECT ts_bgw_params_create(); + ts_bgw_params_create +---------------------- + +(1 row) + +----------------------------------- +-- test drop chunks policy runs for materialized hypertables created for +-- cont. aggregates +----------------------------------- +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +CREATE TABLE drop_chunks_table(time BIGINT, data INTEGER); +SELECT hypertable_id AS drop_chunks_table_nid + FROM create_hypertable('drop_chunks_table', 'time', chunk_time_interval => 1) \gset +NOTICE: adding not-null constraint to column "time" +CREATE OR REPLACE FUNCTION integer_now_test2() returns bigint LANGUAGE SQL STABLE as $$ SELECT 40::bigint $$; +SELECT set_integer_now_func('drop_chunks_table', 'integer_now_test2'); + set_integer_now_func +---------------------- + +(1 row) + +CREATE VIEW drop_chunks_view1 WITH ( timescaledb.continuous, timescaledb.refresh_interval='72 hours', timescaledb.refresh_lag = '-5', timescaledb.max_interval_per_job=100) +AS SELECT time_bucket('5', time), max(data) + FROM drop_chunks_table + GROUP BY 1; +--raw hypertable will have 40 chunks and the mat. hypertable will have 2 and 4 +-- chunks respectively +SELECT set_chunk_time_interval('_timescaledb_internal._materialized_hypertable_2', 10); + set_chunk_time_interval +------------------------- + +(1 row) + +\set ON_ERROR_STOP 0 +INSERT INTO drop_chunks_table SELECT i, i FROM generate_series(1, 39) AS i; +\set ON_ERROR_STOP 1 +REFRESH MATERIALIZED VIEW drop_chunks_view1; +INFO: materializing continuous aggregate public.drop_chunks_view1: nothing to invalidate, new range up to 45 +--TEST1 specify drop chunks policy on mat. hypertable by +-- directly does not work +\set ON_ERROR_STOP 0 +SELECT add_drop_chunks_policy( '_timescaledb_internal._materialized_hypertable_2', older_than=> -50, cascade_to_materializations=>false ) as drop_chunks_job_id1 \gset +ERROR: cannot add drop chunks policy to materialized hypertable "_materialized_hypertable_2" +\set ON_ERROR_STOP 1 +--TEST2 specify drop chunks policy on cont. aggregate +-- integer_now func on raw hypertable is used by the drop +-- chunks policy +SELECT hypertable_id, table_name, integer_now_func +FROM _timescaledb_catalog.dimension d, _timescaledb_catalog.hypertable ht +WHERE ht.id = d.hypertable_id; + hypertable_id | table_name | integer_now_func +---------------+----------------------------+------------------- + 1 | drop_chunks_table | integer_now_test2 + 2 | _materialized_hypertable_2 | +(2 rows) + +SELECT chunk_table, ranges FROM chunk_relation_size('_timescaledb_internal._materialized_hypertable_2') +ORDER BY ranges; + chunk_table | ranges +-----------------------------------------+------------- + _timescaledb_internal._hyper_2_40_chunk | {"[0,10)"} + _timescaledb_internal._hyper_2_41_chunk | {"[10,20)"} + _timescaledb_internal._hyper_2_42_chunk | {"[20,30)"} + _timescaledb_internal._hyper_2_43_chunk | {"[30,40)"} +(4 rows) + +SELECT add_drop_chunks_policy( 'drop_chunks_view1', older_than=> 10, cascade_to_materializations=>false ) as drop_chunks_job_id1 \gset +SELECT alter_job_schedule(:drop_chunks_job_id1, schedule_interval => INTERVAL '1 second'); + alter_job_schedule +----------------------------------------------------- + (1001,"@ 1 sec","@ 5 mins",-1,"@ 5 mins",-infinity) +(1 row) + +SELECT ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish(2000000); + ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish +------------------------------------------------------------ + +(1 row) + +SELECT count(c) from show_chunks('_timescaledb_internal._materialized_hypertable_2') as c ; + count +------- + 1 +(1 row) + diff --git a/tsl/test/expected/continuous_aggs_ddl-10.out b/tsl/test/expected/continuous_aggs_ddl-10.out index 220543619f6..000d6f4be69 100644 --- a/tsl/test/expected/continuous_aggs_ddl-10.out +++ b/tsl/test/expected/continuous_aggs_ddl-10.out @@ -258,13 +258,9 @@ SELECT * FROM drop_chunks_view ORDER BY 1; 10 | 5 (3 rows) --- cannot drop directly from the materialization table +-- cannot drop directly from the materialization table without specifying +-- cont. aggregate view name explicitly \set ON_ERROR_STOP 0 -SELECT drop_chunks(schema_name => :'drop_chunks_mat_schema', - table_name => :'drop_chunks_mat_table_name', - newer_than => -20, - verbose => true); -ERROR: cannot drop_chunks on a continuous aggregate materialization table SELECT drop_chunks( newer_than => -20, verbose => true, @@ -272,8 +268,7 @@ SELECT drop_chunks( INFO: dropping chunk _timescaledb_internal._hyper_5_1_chunk INFO: dropping chunk _timescaledb_internal._hyper_5_2_chunk INFO: dropping chunk _timescaledb_internal._hyper_5_3_chunk -INFO: dropping chunk _timescaledb_internal._hyper_6_4_chunk -ERROR: cannot drop_chunks on a continuous aggregate materialization table +ERROR: cannot drop chunks on a continuous aggregate materialization table \set ON_ERROR_STOP 1 SELECT count(c) FROM show_chunks('drop_chunks_table') AS c; count @@ -939,10 +934,12 @@ SELECT chunk_table, ranges FROM chunk_relation_size('drop_chunks_table'); _timescaledb_internal._hyper_12_19_chunk | {"[50,60)"} (2 rows) ---test drop_chunks with cascade_to_materialization set to true (github 1644) +-- TEST drop_chunks with cascade_to_materialization set to true (github 1644) -- This checks if chunks from mat. hypertable are actually dropped -- and deletes data from chunks that cannot be dropped from that mat. hypertable. -SELECT format('%s.%s', schema_name, table_name) AS drop_chunks_mat_tablen +SELECT format('%s.%s', schema_name, table_name) AS drop_chunks_mat_tablen, + schema_name AS drop_chunks_mat_schema, + table_name AS drop_chunks_mat_table_name FROM _timescaledb_catalog.hypertable, _timescaledb_catalog.continuous_agg WHERE _timescaledb_catalog.continuous_agg.raw_hypertable_id = :drop_chunks_table_nid AND _timescaledb_catalog.hypertable.id = _timescaledb_catalog.continuous_agg.mat_hypertable_id \gset @@ -1048,3 +1045,43 @@ ORDER BY ranges; _timescaledb_internal._hyper_13_22_chunk | {"[20,40)"} (1 row) +-- TEST drop chunks from continuous aggregates by specifying view name +SELECT drop_chunks( + table_name => 'drop_chunks_view', + newer_than => -20, + verbose => true); +INFO: dropping chunk _timescaledb_internal._hyper_13_22_chunk + drop_chunks +------------------------------------------ + _timescaledb_internal._hyper_13_22_chunk +(1 row) + +--can also drop chunks by specifying materialized hypertable name +INSERT INTO drop_chunks_table SELECT generate_series(45, 55), 500; +REFRESH MATERIALIZED VIEW drop_chunks_view; +INFO: new materialization range not found for public.drop_chunks_table (time column time): not enough new data past completion threshold of 60 as of 55 +INFO: materializing continuous aggregate public.drop_chunks_view: processing invalidations, no new range +SELECT chunk_table, ranges FROM chunk_relation_size(:'drop_chunks_mat_tablen'); + chunk_table | ranges +------------------------------------------+------------- + _timescaledb_internal._hyper_13_24_chunk | {"[40,60)"} +(1 row) + +\set ON_ERROR_STOP 0 +SELECT drop_chunks( + table_name => :'drop_chunks_mat_table_name', + older_than => 60, + verbose => true); +ERROR: "_materialized_hypertable_13" is not a hypertable or a continuous aggregate view +\set ON_ERROR_STOP 1 +SELECT drop_chunks( + schema_name => :'drop_chunks_mat_schema', + table_name => :'drop_chunks_mat_table_name', + older_than => 60, + verbose => true); +INFO: dropping chunk _timescaledb_internal._hyper_13_24_chunk + drop_chunks +------------------------------------------ + _timescaledb_internal._hyper_13_24_chunk +(1 row) + diff --git a/tsl/test/expected/continuous_aggs_ddl-11.out b/tsl/test/expected/continuous_aggs_ddl-11.out index f6b59f032c2..13a9e06a221 100644 --- a/tsl/test/expected/continuous_aggs_ddl-11.out +++ b/tsl/test/expected/continuous_aggs_ddl-11.out @@ -258,13 +258,9 @@ SELECT * FROM drop_chunks_view ORDER BY 1; 10 | 5 (3 rows) --- cannot drop directly from the materialization table +-- cannot drop directly from the materialization table without specifying +-- cont. aggregate view name explicitly \set ON_ERROR_STOP 0 -SELECT drop_chunks(schema_name => :'drop_chunks_mat_schema', - table_name => :'drop_chunks_mat_table_name', - newer_than => -20, - verbose => true); -ERROR: cannot drop_chunks on a continuous aggregate materialization table SELECT drop_chunks( newer_than => -20, verbose => true, @@ -272,8 +268,7 @@ SELECT drop_chunks( INFO: dropping chunk _timescaledb_internal._hyper_5_1_chunk INFO: dropping chunk _timescaledb_internal._hyper_5_2_chunk INFO: dropping chunk _timescaledb_internal._hyper_5_3_chunk -INFO: dropping chunk _timescaledb_internal._hyper_6_4_chunk -ERROR: cannot drop_chunks on a continuous aggregate materialization table +ERROR: cannot drop chunks on a continuous aggregate materialization table \set ON_ERROR_STOP 1 SELECT count(c) FROM show_chunks('drop_chunks_table') AS c; count @@ -939,10 +934,12 @@ SELECT chunk_table, ranges FROM chunk_relation_size('drop_chunks_table'); _timescaledb_internal._hyper_12_19_chunk | {"[50,60)"} (2 rows) ---test drop_chunks with cascade_to_materialization set to true (github 1644) +-- TEST drop_chunks with cascade_to_materialization set to true (github 1644) -- This checks if chunks from mat. hypertable are actually dropped -- and deletes data from chunks that cannot be dropped from that mat. hypertable. -SELECT format('%s.%s', schema_name, table_name) AS drop_chunks_mat_tablen +SELECT format('%s.%s', schema_name, table_name) AS drop_chunks_mat_tablen, + schema_name AS drop_chunks_mat_schema, + table_name AS drop_chunks_mat_table_name FROM _timescaledb_catalog.hypertable, _timescaledb_catalog.continuous_agg WHERE _timescaledb_catalog.continuous_agg.raw_hypertable_id = :drop_chunks_table_nid AND _timescaledb_catalog.hypertable.id = _timescaledb_catalog.continuous_agg.mat_hypertable_id \gset @@ -1048,3 +1045,43 @@ ORDER BY ranges; _timescaledb_internal._hyper_13_22_chunk | {"[20,40)"} (1 row) +-- TEST drop chunks from continuous aggregates by specifying view name +SELECT drop_chunks( + table_name => 'drop_chunks_view', + newer_than => -20, + verbose => true); +INFO: dropping chunk _timescaledb_internal._hyper_13_22_chunk + drop_chunks +------------------------------------------ + _timescaledb_internal._hyper_13_22_chunk +(1 row) + +--can also drop chunks by specifying materialized hypertable name +INSERT INTO drop_chunks_table SELECT generate_series(45, 55), 500; +REFRESH MATERIALIZED VIEW drop_chunks_view; +INFO: new materialization range not found for public.drop_chunks_table (time column time): not enough new data past completion threshold of 60 as of 55 +INFO: materializing continuous aggregate public.drop_chunks_view: processing invalidations, no new range +SELECT chunk_table, ranges FROM chunk_relation_size(:'drop_chunks_mat_tablen'); + chunk_table | ranges +------------------------------------------+------------- + _timescaledb_internal._hyper_13_24_chunk | {"[40,60)"} +(1 row) + +\set ON_ERROR_STOP 0 +SELECT drop_chunks( + table_name => :'drop_chunks_mat_table_name', + older_than => 60, + verbose => true); +ERROR: "_materialized_hypertable_13" is not a hypertable or a continuous aggregate view +\set ON_ERROR_STOP 1 +SELECT drop_chunks( + schema_name => :'drop_chunks_mat_schema', + table_name => :'drop_chunks_mat_table_name', + older_than => 60, + verbose => true); +INFO: dropping chunk _timescaledb_internal._hyper_13_24_chunk + drop_chunks +------------------------------------------ + _timescaledb_internal._hyper_13_24_chunk +(1 row) + diff --git a/tsl/test/expected/continuous_aggs_ddl-9.6.out b/tsl/test/expected/continuous_aggs_ddl-9.6.out index c5edcd50412..93922de6285 100644 --- a/tsl/test/expected/continuous_aggs_ddl-9.6.out +++ b/tsl/test/expected/continuous_aggs_ddl-9.6.out @@ -258,13 +258,9 @@ SELECT * FROM drop_chunks_view ORDER BY 1; 10 | 5 (3 rows) --- cannot drop directly from the materialization table +-- cannot drop directly from the materialization table without specifying +-- cont. aggregate view name explicitly \set ON_ERROR_STOP 0 -SELECT drop_chunks(schema_name => :'drop_chunks_mat_schema', - table_name => :'drop_chunks_mat_table_name', - newer_than => -20, - verbose => true); -ERROR: cannot drop_chunks on a continuous aggregate materialization table SELECT drop_chunks( newer_than => -20, verbose => true, @@ -272,8 +268,7 @@ SELECT drop_chunks( INFO: dropping chunk _timescaledb_internal._hyper_5_1_chunk INFO: dropping chunk _timescaledb_internal._hyper_5_2_chunk INFO: dropping chunk _timescaledb_internal._hyper_5_3_chunk -INFO: dropping chunk _timescaledb_internal._hyper_6_4_chunk -ERROR: cannot drop_chunks on a continuous aggregate materialization table +ERROR: cannot drop chunks on a continuous aggregate materialization table \set ON_ERROR_STOP 1 SELECT count(c) FROM show_chunks('drop_chunks_table') AS c; count diff --git a/tsl/test/expected/tsl_tables.out b/tsl/test/expected/tsl_tables.out index 8557f44df80..0ddabfc7138 100644 --- a/tsl/test/expected/tsl_tables.out +++ b/tsl/test/expected/tsl_tables.out @@ -138,9 +138,9 @@ ERROR: relation "fake_table" does not exist at character 31 select add_drop_chunks_policy('test_table', cascade=>true); ERROR: function add_drop_chunks_policy(unknown, cascade => boolean) does not exist at character 8 select add_drop_chunks_policy('test_table_int', INTERVAL '3 month', true); -ERROR: invalid parameter value for older_than +ERROR: integer_now_func not set on hypertable "test_table_int" select add_drop_chunks_policy('test_table_int', 42, true); -ERROR: integer_now_func not set on hypertable test_table_int +ERROR: integer_now_func not set on hypertable "test_table_int" \set ON_ERROR_STOP 1 select add_drop_chunks_policy('test_table', INTERVAL '3 month', true); add_drop_chunks_policy @@ -529,7 +529,7 @@ CREATE TABLE non_hypertable(junk int, more_junk int); CREATE INDEX non_ht_index on non_hypertable(junk); \set ON_ERROR_STOP 0 select add_drop_chunks_policy('non_hypertable', INTERVAL '2 month'); -ERROR: table "non_hypertable" is not a hypertable +ERROR: "non_hypertable" is not a hypertable or a continuous aggregate view select add_reorder_policy('non_hypertable', 'non_ht_index'); ERROR: could not add reorder policy because "non_hypertable" is not a hypertable \set ON_ERROR_STOP 1 diff --git a/tsl/test/sql/CMakeLists.txt b/tsl/test/sql/CMakeLists.txt index 3234ef85a5b..d89a395b70b 100644 --- a/tsl/test/sql/CMakeLists.txt +++ b/tsl/test/sql/CMakeLists.txt @@ -13,6 +13,7 @@ set(TEST_FILES_DEBUG bgw_reorder_drop_chunks.sql continuous_aggs.sql continuous_aggs_bgw.sql + continuous_aggs_bgw_drop_chunks.sql continuous_aggs_dump.sql continuous_aggs_materialize.sql continuous_aggs_multi.sql diff --git a/tsl/test/sql/continuous_aggs_bgw_drop_chunks.sql b/tsl/test/sql/continuous_aggs_bgw_drop_chunks.sql new file mode 100644 index 00000000000..fd90292e1da --- /dev/null +++ b/tsl/test/sql/continuous_aggs_bgw_drop_chunks.sql @@ -0,0 +1,86 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. + +-- +-- Setup for testing bgw jobs --- +-- +\c :TEST_DBNAME :ROLE_SUPERUSER +CREATE OR REPLACE FUNCTION ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish(timeout INT = -1) RETURNS VOID +AS :MODULE_PATHNAME LANGUAGE C VOLATILE; + +CREATE OR REPLACE FUNCTION ts_bgw_params_create() RETURNS VOID +AS :MODULE_PATHNAME LANGUAGE C VOLATILE; + +\set WAIT_ON_JOB 0 +\set IMMEDIATELY_SET_UNTIL 1 +\set WAIT_FOR_OTHER_TO_ADVANCE 2 + +-- Remove any default jobs, e.g., telemetry +SELECT _timescaledb_internal.stop_background_workers(); +DELETE FROM _timescaledb_config.bgw_job WHERE TRUE; +TRUNCATE _timescaledb_internal.bgw_job_stat; + +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +CREATE TABLE public.bgw_log( + msg_no INT, + mock_time BIGINT, + application_name TEXT, + msg TEXT +); +CREATE VIEW sorted_bgw_log AS + SELECT * FROM bgw_log ORDER BY mock_time, application_name COLLATE "C", msg_no; +CREATE TABLE public.bgw_dsm_handle_store( + handle BIGINT +); +INSERT INTO public.bgw_dsm_handle_store VALUES (0); +SELECT ts_bgw_params_create(); + +----------------------------------- +-- test drop chunks policy runs for materialized hypertables created for +-- cont. aggregates +----------------------------------- +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER + +CREATE TABLE drop_chunks_table(time BIGINT, data INTEGER); +SELECT hypertable_id AS drop_chunks_table_nid + FROM create_hypertable('drop_chunks_table', 'time', chunk_time_interval => 1) \gset + +CREATE OR REPLACE FUNCTION integer_now_test2() returns bigint LANGUAGE SQL STABLE as $$ SELECT 40::bigint $$; + +SELECT set_integer_now_func('drop_chunks_table', 'integer_now_test2'); + +CREATE VIEW drop_chunks_view1 WITH ( timescaledb.continuous, timescaledb.refresh_interval='72 hours', timescaledb.refresh_lag = '-5', timescaledb.max_interval_per_job=100) +AS SELECT time_bucket('5', time), max(data) + FROM drop_chunks_table + GROUP BY 1; + +--raw hypertable will have 40 chunks and the mat. hypertable will have 2 and 4 +-- chunks respectively +SELECT set_chunk_time_interval('_timescaledb_internal._materialized_hypertable_2', 10); +\set ON_ERROR_STOP 0 +INSERT INTO drop_chunks_table SELECT i, i FROM generate_series(1, 39) AS i; +\set ON_ERROR_STOP 1 +REFRESH MATERIALIZED VIEW drop_chunks_view1; + +--TEST1 specify drop chunks policy on mat. hypertable by +-- directly does not work + +\set ON_ERROR_STOP 0 +SELECT add_drop_chunks_policy( '_timescaledb_internal._materialized_hypertable_2', older_than=> -50, cascade_to_materializations=>false ) as drop_chunks_job_id1 \gset +\set ON_ERROR_STOP 1 + +--TEST2 specify drop chunks policy on cont. aggregate +-- integer_now func on raw hypertable is used by the drop +-- chunks policy +SELECT hypertable_id, table_name, integer_now_func +FROM _timescaledb_catalog.dimension d, _timescaledb_catalog.hypertable ht +WHERE ht.id = d.hypertable_id; + +SELECT chunk_table, ranges FROM chunk_relation_size('_timescaledb_internal._materialized_hypertable_2') +ORDER BY ranges; + +SELECT add_drop_chunks_policy( 'drop_chunks_view1', older_than=> 10, cascade_to_materializations=>false ) as drop_chunks_job_id1 \gset +SELECT alter_job_schedule(:drop_chunks_job_id1, schedule_interval => INTERVAL '1 second'); +SELECT ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish(2000000); +SELECT count(c) from show_chunks('_timescaledb_internal._materialized_hypertable_2') as c ; diff --git a/tsl/test/sql/continuous_aggs_ddl.sql.in b/tsl/test/sql/continuous_aggs_ddl.sql.in index cf477df27d0..19e0eb356eb 100644 --- a/tsl/test/sql/continuous_aggs_ddl.sql.in +++ b/tsl/test/sql/continuous_aggs_ddl.sql.in @@ -182,12 +182,9 @@ SELECT count(c) FROM show_chunks(:'drop_chunks_mat_table') AS c; SELECT * FROM drop_chunks_view ORDER BY 1; --- cannot drop directly from the materialization table +-- cannot drop directly from the materialization table without specifying +-- cont. aggregate view name explicitly \set ON_ERROR_STOP 0 -SELECT drop_chunks(schema_name => :'drop_chunks_mat_schema', - table_name => :'drop_chunks_mat_table_name', - newer_than => -20, - verbose => true); SELECT drop_chunks( newer_than => -20, @@ -481,10 +478,12 @@ SELECT * FROM drop_chunks_table WHERE time < (integer_now_test2()-9) ORDER BY ti --should show chunk with old name and old ranges SELECT chunk_table, ranges FROM chunk_relation_size('drop_chunks_table'); ---test drop_chunks with cascade_to_materialization set to true (github 1644) +-- TEST drop_chunks with cascade_to_materialization set to true (github 1644) -- This checks if chunks from mat. hypertable are actually dropped -- and deletes data from chunks that cannot be dropped from that mat. hypertable. -SELECT format('%s.%s', schema_name, table_name) AS drop_chunks_mat_tablen +SELECT format('%s.%s', schema_name, table_name) AS drop_chunks_mat_tablen, + schema_name AS drop_chunks_mat_schema, + table_name AS drop_chunks_mat_table_name FROM _timescaledb_catalog.hypertable, _timescaledb_catalog.continuous_agg WHERE _timescaledb_catalog.continuous_agg.raw_hypertable_id = :drop_chunks_table_nid AND _timescaledb_catalog.hypertable.id = _timescaledb_catalog.continuous_agg.mat_hypertable_id \gset @@ -514,3 +513,24 @@ SELECT count(c) FROM show_chunks(:'drop_chunks_mat_tablen') AS c; SELECT chunk_table, ranges FROM chunk_relation_size(:'drop_chunks_mat_tablen') ORDER BY ranges; +-- TEST drop chunks from continuous aggregates by specifying view name +SELECT drop_chunks( + table_name => 'drop_chunks_view', + newer_than => -20, + verbose => true); + +--can also drop chunks by specifying materialized hypertable name +INSERT INTO drop_chunks_table SELECT generate_series(45, 55), 500; +REFRESH MATERIALIZED VIEW drop_chunks_view; +SELECT chunk_table, ranges FROM chunk_relation_size(:'drop_chunks_mat_tablen'); +\set ON_ERROR_STOP 0 +SELECT drop_chunks( + table_name => :'drop_chunks_mat_table_name', + older_than => 60, + verbose => true); +\set ON_ERROR_STOP 1 +SELECT drop_chunks( + schema_name => :'drop_chunks_mat_schema', + table_name => :'drop_chunks_mat_table_name', + older_than => 60, + verbose => true);