diff --git a/appveyor.yml b/appveyor.yml index 524435f9b79..014b9870e02 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -125,6 +125,8 @@ build_script: Add-Content "C:\Program Files\postgresql\12\data\postgresql.conf" "timescaledb.license = 'apache'" + Add-Content "C:\Program Files\postgresql\12\data\postgresql.conf" "wal_level = 'logical'" + # Add-Content "C:\Program Files\postgresql\12\data\postgresql.conf" "log_min_messages='debug5'" # build timescale diff --git a/cmake/ScriptFiles.cmake b/cmake/ScriptFiles.cmake index 3d2adb740ee..30b9f19c8f7 100644 --- a/cmake/ScriptFiles.cmake +++ b/cmake/ScriptFiles.cmake @@ -54,6 +54,7 @@ set(SOURCE_FILES metadata.sql dist_internal.sql views.sql + views_experimental.sql gapfill.sql maintenance_utils.sql partialize_finalize.sql diff --git a/sql/chunk.sql b/sql/chunk.sql index f852640beb9..d17f5aa9d8d 100644 --- a/sql/chunk.sql +++ b/sql/chunk.sql @@ -40,12 +40,18 @@ CREATE OR REPLACE FUNCTION _timescaledb_internal.show_chunk(chunk REGCLASS) RETURNS TABLE(chunk_id INTEGER, hypertable_id INTEGER, schema_name NAME, table_name NAME, relkind "char", slices JSONB) AS '@MODULE_PATHNAME@', 'ts_chunk_show' LANGUAGE C VOLATILE; --- Create a chunk with the given dimensional constraints (slices) as given in the JSONB. +-- Create a chunk with the given dimensional constraints (slices) as +-- given in the JSONB. If chunk_table is a valid relation, it will be +-- attached to the hypertable and used as the data table for the new +-- chunk. Note that schema_name and table_name need not be the same as +-- the existing schema and name for chunk_table. The provided chunk +-- table will be renamed and/or moved as necessary. CREATE OR REPLACE FUNCTION _timescaledb_internal.create_chunk( hypertable REGCLASS, - slices JSONB, + slices JSONB, schema_name NAME = NULL, - table_name NAME = NULL) + table_name NAME = NULL, + chunk_table REGCLASS = NULL) RETURNS TABLE(chunk_id INTEGER, hypertable_id INTEGER, schema_name NAME, table_name NAME, relkind "char", slices JSONB, created BOOLEAN) AS '@MODULE_PATHNAME@', 'ts_chunk_create' LANGUAGE C VOLATILE; @@ -63,3 +69,10 @@ RETURNS TABLE(chunk_id INTEGER, hypertable_id INTEGER, att_num INTEGER, nullfrac slot1numbers FLOAT4[], slot2numbers FLOAT4[], slot3numbers FLOAT4[], slot4numbers FLOAT4[], slot5numbers FLOAT4[], slotvaluetypetrings CSTRING[], slot1values CSTRING[], slot2values CSTRING[], slot3values CSTRING[], slot4values CSTRING[], slot5values CSTRING[]) AS '@MODULE_PATHNAME@', 'ts_chunk_get_colstats' LANGUAGE C VOLATILE; + +CREATE OR REPLACE FUNCTION _timescaledb_internal.create_chunk_table( + hypertable REGCLASS, + slices JSONB, + schema_name NAME, + table_name NAME) +RETURNS BOOL AS '@MODULE_PATHNAME@', 'ts_chunk_create_empty_table' LANGUAGE C VOLATILE; diff --git a/sql/ddl_experimental.sql b/sql/ddl_experimental.sql index 5a1fd30fa35..e79280dde34 100644 --- a/sql/ddl_experimental.sql +++ b/sql/ddl_experimental.sql @@ -25,3 +25,24 @@ CREATE OR REPLACE FUNCTION timescaledb_experimental.refresh_continuous_aggregate continuous_aggregate REGCLASS, hypertable_chunk REGCLASS ) RETURNS VOID AS '@MODULE_PATHNAME@', 'ts_continuous_agg_refresh_chunk' LANGUAGE C VOLATILE; + +CREATE OR REPLACE PROCEDURE timescaledb_experimental.move_chunk( + chunk REGCLASS, + source_node NAME = NULL, + destination_node NAME = NULL) +AS '@MODULE_PATHNAME@', 'ts_move_chunk_proc' LANGUAGE C; + +CREATE OR REPLACE PROCEDURE timescaledb_experimental.copy_chunk( + chunk REGCLASS, + source_node NAME = NULL, + destination_node NAME = NULL) +AS '@MODULE_PATHNAME@', 'ts_copy_chunk_proc' LANGUAGE C; + +-- A copy_chunk or move_chunk procedure call involves multiple nodes and +-- depending on the data size can take a long time. Failures are possible +-- when this long running activity is ongoing. We need to be able to recover +-- and cleanup such failed chunk copy/move activities and it's done via this +-- procedure +CREATE OR REPLACE PROCEDURE timescaledb_experimental.cleanup_copy_chunk_operation( + operation_id NAME) +AS '@MODULE_PATHNAME@', 'ts_copy_chunk_cleanup_proc' LANGUAGE C; diff --git a/sql/ddl_internal.sql b/sql/ddl_internal.sql index 8d8ca0f1332..e2d3dac527e 100644 --- a/sql/ddl_internal.sql +++ b/sql/ddl_internal.sql @@ -8,3 +8,45 @@ AS '@MODULE_PATHNAME@', 'ts_chunk_index_clone' LANGUAGE C VOLATILE STRICT; CREATE OR REPLACE FUNCTION _timescaledb_internal.chunk_index_replace(chunk_index_oid_old OID, chunk_index_oid_new OID) RETURNS VOID AS '@MODULE_PATHNAME@', 'ts_chunk_index_replace' LANGUAGE C VOLATILE STRICT; + +CREATE OR REPLACE FUNCTION _timescaledb_internal.create_chunk_replica_table( + chunk REGCLASS, + data_node_name NAME +) RETURNS VOID AS '@MODULE_PATHNAME@', 'ts_chunk_create_replica_table' LANGUAGE C VOLATILE; + +-- Drop the specified chunk replica on the specified data node +CREATE OR REPLACE FUNCTION _timescaledb_internal.chunk_drop_replica( + chunk REGCLASS, + node_name NAME +) RETURNS VOID +AS '@MODULE_PATHNAME@', 'ts_chunk_drop_replica' LANGUAGE C VOLATILE; + +CREATE OR REPLACE PROCEDURE _timescaledb_internal.wait_subscription_sync( + schema_name NAME, + table_name NAME, + retry_count INT DEFAULT 18000, + retry_delay_ms NUMERIC DEFAULT 0.200 +) +LANGUAGE PLPGSQL AS +$BODY$ +DECLARE + in_sync BOOLEAN; +BEGIN + FOR i in 1 .. retry_count + LOOP + SELECT pgs.srsubstate = 'r' + INTO in_sync + FROM pg_subscription_rel pgs + JOIN pg_class pgc ON relname = table_name + JOIN pg_namespace n ON (n.OID = pgc.relnamespace) + WHERE pgs.srrelid = pgc.oid AND schema_name = n.nspname; + + if (in_sync IS NULL OR NOT in_sync) THEN + PERFORM pg_sleep(retry_delay_ms); + ELSE + RETURN; + END IF; + END LOOP; + RAISE 'subscription sync wait timedout'; +END +$BODY$; diff --git a/sql/maintenance_utils.sql b/sql/maintenance_utils.sql index 3ae03796353..9cd928c370b 100644 --- a/sql/maintenance_utils.sql +++ b/sql/maintenance_utils.sql @@ -33,4 +33,3 @@ CREATE OR REPLACE FUNCTION recompress_chunk( chunk REGCLASS, if_not_compressed BOOLEAN = false ) RETURNS REGCLASS AS '@MODULE_PATHNAME@', 'ts_recompress_chunk' LANGUAGE C STRICT VOLATILE; - diff --git a/sql/pre_install/tables.sql b/sql/pre_install/tables.sql index d4ebea5be37..bdfd68cd41c 100644 --- a/sql/pre_install/tables.sql +++ b/sql/pre_install/tables.sql @@ -367,6 +367,37 @@ CREATE INDEX IF NOT EXISTS remote_txn_data_node_name_idx ON _timescaledb_catalog SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.remote_txn', ''); +-- This table stores information about the stage that has been completed of a +-- chunk move/copy activity +-- +-- A cleanup activity can query and check if the backend is running. If the +-- backend has exited then we can commence cleanup. The cleanup +-- activity can also do a diff with the "time_start" value to ascertain if +-- the entire end-to-end activity is going on for too long +-- +-- We also track the end time of every stage. A diff with the current time +-- will give us an idea about how long the current stage has been running +-- +-- Entry for a chunk move/copy activity gets deleted on successful completion +-- +-- We don't want to pg_dump this table's contents. A node restored using it +-- could be part of a totally different multinode setup and we don't want to +-- carry over chunk copy/move operations from earlier (if it makes sense at all) +-- + +CREATE SEQUENCE IF NOT EXISTS _timescaledb_catalog.chunk_copy_operation_id_seq MINVALUE 1; + +CREATE TABLE IF NOT EXISTS _timescaledb_catalog.chunk_copy_operation ( + operation_id name PRIMARY KEY, -- the publisher/subscriber identifier used + backend_pid integer NOT NULL, -- the pid of the backend running this activity + completed_stage name NOT NULL, -- the completed stage/step + time_start timestamptz NOT NULL DEFAULT NOW(), -- start time of the activity + chunk_id integer NOT NULL REFERENCES _timescaledb_catalog.chunk (id) ON DELETE CASCADE, + source_node_name name NOT NULL, + dest_node_name name NOT NULL, + delete_on_source_node bool NOT NULL -- is a move or copy activity +); + -- Set table permissions -- We need to grant SELECT to PUBLIC for all tables even those not -- marked as being dumped because pg_dump will try to access all diff --git a/sql/updates/latest-dev.sql b/sql/updates/latest-dev.sql index e2fcfab27d6..98596cdf3d2 100644 --- a/sql/updates/latest-dev.sql +++ b/sql/updates/latest-dev.sql @@ -3,3 +3,20 @@ GRANT USAGE ON SCHEMA timescaledb_experimental TO PUBLIC; DROP FUNCTION IF EXISTS _timescaledb_internal.block_new_chunks; DROP FUNCTION IF EXISTS _timescaledb_internal.allow_new_chunks; DROP FUNCTION IF EXISTS _timescaledb_internal.refresh_continuous_aggregate; +DROP FUNCTION IF EXISTS _timescaledb_internal.create_chunk; + +CREATE SEQUENCE IF NOT EXISTS _timescaledb_catalog.chunk_copy_operation_id_seq MINVALUE 1; + +CREATE TABLE IF NOT EXISTS _timescaledb_catalog.chunk_copy_operation ( + operation_id name PRIMARY KEY, -- the publisher/subscriber identifier used + backend_pid integer NOT NULL, -- the pid of the backend running this activity + completed_stage name NOT NULL, -- the completed stage/step + time_start timestamptz NOT NULL DEFAULT NOW(), -- start time of the activity + chunk_id integer NOT NULL REFERENCES _timescaledb_catalog.chunk (id) ON DELETE CASCADE, + source_node_name name NOT NULL, + dest_node_name name NOT NULL, + delete_on_source_node bool NOT NULL -- is a move or copy activity +); + +GRANT SELECT ON _timescaledb_catalog.chunk_copy_operation_id_seq TO PUBLIC; +GRANT SELECT ON _timescaledb_catalog.chunk_copy_operation TO PUBLIC; diff --git a/sql/updates/reverse-dev.sql b/sql/updates/reverse-dev.sql index 9ab1014836f..6188f6a01d0 100644 --- a/sql/updates/reverse-dev.sql +++ b/sql/updates/reverse-dev.sql @@ -1,4 +1,43 @@ -DROP SCHEMA IF EXISTS timescaledb_experimental CASCADE; DROP FUNCTION IF EXISTS _timescaledb_internal.block_new_chunks; DROP FUNCTION IF EXISTS _timescaledb_internal.allow_new_chunks; DROP FUNCTION IF EXISTS _timescaledb_internal.refresh_continuous_aggregate; +DROP FUNCTION IF EXISTS _timescaledb_internal.create_chunk_table; +DROP FUNCTION IF EXISTS _timescaledb_internal.create_chunk_replica_table; +DROP FUNCTION IF EXISTS _timescaledb_internal.chunk_drop_replica; +DROP FUNCTION IF EXISTS _timescaledb_internal.create_chunk; +DROP PROCEDURE IF EXISTS _timescaledb_internal.wait_subscription_sync; +DROP PROCEDURE IF EXISTS timescaledb_experimental.move_chunk; +DROP PROCEDURE IF EXISTS timescaledb_experimental.copy_chunk; +DROP PROCEDURE IF EXISTS timescaledb_experimental.cleanup_copy_chunk_operation; +DROP TABLE IF EXISTS _timescaledb_catalog.chunk_copy_operation; +DROP SEQUENCE IF EXISTS _timescaledb_catalog.chunk_copy_operation_id_seq; +DROP VIEW IF EXISTS timescaledb_experimental.chunk_replication_status; +DROP SCHEMA IF EXISTS timescaledb_experimental CASCADE; + +-- We need to rewrite all continuous aggregates to make sure that the +-- queries do not contain qualification. They will be re-written in +-- the post-update script as well, but the previous version does not +-- process all continuous aggregates, leaving some with qualification +-- for the standard functions. To make this work, we need to +-- temporarily set the update stage to the post-update stage, which +-- will allow the ALTER MATERIALIZED VIEW to rewrite the query. If +-- that is not done, the TimescaleDB-specific hooks will not be used +-- and you will get an error message saying that, for example, +-- `conditions_summary` is not a materialized view. +SET timescaledb.update_script_stage TO 'post'; +DO $$ +DECLARE + vname regclass; + materialized_only bool; + altercmd text; + ts_version TEXT; +BEGIN + FOR vname, materialized_only IN select format('%I.%I', cagg.user_view_schema, cagg.user_view_name)::regclass, cagg.materialized_only from _timescaledb_catalog.continuous_agg cagg + LOOP + altercmd := format('ALTER MATERIALIZED VIEW %s SET (timescaledb.materialized_only=%L) ', vname::text, materialized_only); + EXECUTE altercmd; + END LOOP; + EXCEPTION WHEN OTHERS THEN RAISE; +END +$$; +RESET timescaledb.update_script_stage; diff --git a/sql/views_experimental.sql b/sql/views_experimental.sql new file mode 100644 index 00000000000..d24e9168d7b --- /dev/null +++ b/sql/views_experimental.sql @@ -0,0 +1,28 @@ +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. + +CREATE VIEW timescaledb_experimental.chunk_replication_status AS +SELECT + h.schema_name AS hypertable_schema, + h.table_name AS hypertable_name, + c.schema_name AS chunk_schema, + c.table_name AS chunk_name, + h.replication_factor AS desired_num_replicas, + count(cdn.chunk_id) AS num_replicas, + array_agg(cdn.node_name) AS replica_nodes, + -- compute the set of data nodes that doesn't have the chunk + (SELECT array_agg(node_name) FROM + (SELECT node_name FROM _timescaledb_catalog.hypertable_data_node hdn + WHERE hdn.hypertable_id = h.id + EXCEPT + SELECT node_name FROM _timescaledb_catalog.chunk_data_node cdn + WHERE cdn.chunk_id = c.id + ORDER BY node_name) nodes) AS non_replica_nodes +FROM _timescaledb_catalog.chunk c +INNER JOIN _timescaledb_catalog.chunk_data_node cdn ON (cdn.chunk_id = c.id) +INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = c.hypertable_id) +GROUP BY h.id, c.id, hypertable_schema, hypertable_name, chunk_schema, chunk_name +ORDER BY h.id, c.id, hypertable_schema, hypertable_name, chunk_schema, chunk_name; + +GRANT SELECT ON ALL TABLES IN SCHEMA timescaledb_experimental TO PUBLIC; diff --git a/src/catalog.c b/src/catalog.c index 4d13e3639cb..037b300c126 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -103,6 +103,10 @@ static const TableInfoDef catalog_table_names[_MAX_CATALOG_TABLES + 1] = { .schema_name = CATALOG_SCHEMA_NAME, .table_name = REMOTE_TXN_TABLE_NAME, }, + [CHUNK_COPY_OPERATION] = { + .schema_name = CATALOG_SCHEMA_NAME, + .table_name = CHUNK_COPY_OPERATION_TABLE_NAME, + }, [_MAX_CATALOG_TABLES] = { .schema_name = "invalid schema", .table_name = "invalid table", @@ -245,6 +249,12 @@ static const TableIndexDef catalog_table_index_definitions[_MAX_CATALOG_TABLES] [REMOTE_TXN_PKEY_IDX] = "remote_txn_pkey", [REMOTE_TXN_DATA_NODE_NAME_IDX] = "remote_txn_data_node_name_idx" } + }, + [CHUNK_COPY_OPERATION] = { + .length = _MAX_CHUNK_COPY_OPERATION_INDEX, + .names = (char *[]) { + [CHUNK_COPY_OPERATION_PKEY_IDX] = "chunk_copy_operation_pkey", + }, } }; @@ -266,6 +276,7 @@ static const char *catalog_table_serial_id_names[_MAX_CATALOG_TABLES] = { [HYPERTABLE_COMPRESSION] = NULL, [COMPRESSION_CHUNK_SIZE] = NULL, [REMOTE_TXN] = NULL, + [CHUNK_COPY_OPERATION] = CATALOG_SCHEMA_NAME ".chunk_copy_operation_id_seq", }; typedef struct InternalFunctionDef diff --git a/src/catalog.h b/src/catalog.h index b975c26ccb1..1f38bbec718 100644 --- a/src/catalog.h +++ b/src/catalog.h @@ -53,6 +53,7 @@ typedef enum CatalogTable HYPERTABLE_COMPRESSION, COMPRESSION_CHUNK_SIZE, REMOTE_TXN, + CHUNK_COPY_OPERATION, _MAX_CATALOG_TABLES, } CatalogTable; @@ -1182,6 +1183,53 @@ enum Anum_remote_data_node_name_idx _Anum_remote_txn_data_node_name_idx_max, }; +/******************************************** + * + * table to track chunk copy/move operations + * + ********************************************/ + +#define CHUNK_COPY_OPERATION_TABLE_NAME "chunk_copy_operation" + +enum Anum_chunk_copy_operation +{ + Anum_chunk_copy_operation_operation_id = 1, + Anum_chunk_copy_operation_backend_pid, + Anum_chunk_copy_operation_completed_stage, + Anum_chunk_copy_operation_time_start, + Anum_chunk_copy_operation_chunk_id, + Anum_chunk_copy_operation_source_node_name, + Anum_chunk_copy_operation_dest_node_name, + Anum_chunk_copy_operation_delete_on_src_node, + _Anum_chunk_copy_operation_max, +}; + +#define Natts_chunk_copy_operation (_Anum_chunk_copy_operation_max - 1) + +typedef struct FormData_chunk_copy_operation +{ + NameData operation_id; + int32 backend_pid; + NameData completed_stage; + TimestampTz time_start; + int32 chunk_id; + NameData source_node_name; + NameData dest_node_name; + bool delete_on_src_node; +} FormData_chunk_copy_operation; + +enum +{ + CHUNK_COPY_OPERATION_PKEY_IDX = 0, + _MAX_CHUNK_COPY_OPERATION_INDEX, +}; + +enum Anum_chunk_copy_operation_pkey_idx +{ + Anum_chunk_copy_operation_idx_operation_id = 1, + _Anum_chunk_copy_operation_pkey_idx_max, +}; + typedef enum CacheType { CACHE_TYPE_HYPERTABLE, diff --git a/src/chunk.c b/src/chunk.c index bcf44bde23e..170f98a6a28 100644 --- a/src/chunk.c +++ b/src/chunk.c @@ -593,7 +593,7 @@ chunk_collision_resolve(const Hypertable *ht, Hypercube *cube, const Point *p) } static int -chunk_add_constraints(Chunk *chunk) +chunk_add_constraints(const Chunk *chunk) { int num_added; @@ -875,7 +875,7 @@ ts_chunk_create_table(const Chunk *chunk, const Hypertable *ht, const char *tabl SetUserIdAndSecContext(saved_uid, sec_ctx); /* Create the corresponding chunk replicas on the remote data nodes */ - ts_cm_functions->create_chunk_on_data_nodes(chunk, ht); + ts_cm_functions->create_chunk_on_data_nodes(chunk, ht, NULL, NIL); /* Record the remote data node chunk ID mappings */ ts_chunk_data_node_insert_multi(chunk->data_nodes); @@ -949,6 +949,44 @@ ts_chunk_get_data_node_name_list(const Chunk *chunk) return datanodes; } +bool +ts_chunk_has_data_node(const Chunk *chunk, const char *node_name) +{ + ListCell *lc; + ChunkDataNode *cdn; + bool found = false; + + if (chunk == NULL || node_name == NULL) + return false; + + /* check that the chunk is indeed present on the specified data node */ + foreach (lc, chunk->data_nodes) + { + cdn = lfirst(lc); + if (namestrcmp(&cdn->fd.node_name, node_name) == 0) + { + found = true; + break; + } + } + + return found; +} + +static int32 +get_next_chunk_id() +{ + int32 chunk_id; + CatalogSecurityContext sec_ctx; + const Catalog *catalog = ts_catalog_get(); + + ts_catalog_database_info_become_owner(ts_catalog_database_info_get(), &sec_ctx); + chunk_id = ts_catalog_table_next_seq_id(catalog, CHUNK); + ts_catalog_restore_user(&sec_ctx); + + return chunk_id; +} + /* * Create a chunk object from the dimensional constraints in the given hypercube. * @@ -963,11 +1001,9 @@ ts_chunk_get_data_node_name_list(const Chunk *chunk) */ static Chunk * chunk_create_object(const Hypertable *ht, Hypercube *cube, const char *schema_name, - const char *table_name, const char *prefix) + const char *table_name, const char *prefix, int32 chunk_id) { const Hyperspace *hs = ht->space; - const Catalog *catalog = ts_catalog_get(); - CatalogSecurityContext sec_ctx; Chunk *chunk; const char relkind = hypertable_chunk_relkind(ht); @@ -975,12 +1011,7 @@ chunk_create_object(const Hypertable *ht, Hypercube *cube, const char *schema_na schema_name = NameStr(ht->fd.associated_schema_name); /* Create a new chunk based on the hypercube */ - ts_catalog_database_info_become_owner(ts_catalog_database_info_get(), &sec_ctx); - chunk = ts_chunk_create_base(ts_catalog_table_next_seq_id(catalog, CHUNK), - hs->num_dimensions, - relkind); - - ts_catalog_restore_user(&sec_ctx); + chunk = ts_chunk_create_base(chunk_id, hs->num_dimensions, relkind); chunk->fd.hypertable_id = hs->hypertable_id; chunk->cube = cube; @@ -1062,24 +1093,206 @@ init_scan_by_chunk_id(ScanIterator *iterator, int32 chunk_id) Int32GetDatum(chunk_id)); } +/* + * Creates only a table for a chunk. + * Either table name or chunk id needs to be provided. + */ +static Chunk * +chunk_create_only_table_after_lock(const Hypertable *ht, Hypercube *cube, const char *schema_name, + const char *table_name, const char *prefix, int32 chunk_id) +{ + Chunk *chunk; + + Assert(table_name != NULL || chunk_id != INVALID_CHUNK_ID); + + chunk = chunk_create_object(ht, cube, schema_name, table_name, prefix, chunk_id); + Assert(chunk != NULL); + + chunk_create_table(chunk, ht); + + return chunk; +} + +static void +chunk_table_drop_inherit(const Chunk *chunk, Hypertable *ht) +{ + AlterTableCmd drop_inh_cmd = { + .type = T_AlterTableCmd, + .subtype = AT_DropInherit, + .def = (Node *) makeRangeVar(NameStr(ht->fd.schema_name), NameStr(ht->fd.table_name), -1), + .missing_ok = false + }; + + AlterTableInternal(chunk->table_id, list_make1(&drop_inh_cmd), false); +} + +/* + * Checks that given hypercube does not collide with existing chunks and + * creates an empty table for a chunk without any metadata modifications. + */ +Chunk * +ts_chunk_create_only_table(Hypertable *ht, Hypercube *cube, const char *schema_name, + const char *table_name) +{ + ChunkStub *stub; + Chunk *chunk; + ScanTupLock tuplock = { + .lockmode = LockTupleKeyShare, + .waitpolicy = LockWaitBlock, + }; + + /* + * Chunk table can be created if no chunk collides with the dimension slices. + */ + stub = chunk_collides(ht, cube); + if (stub != NULL) + ereport(ERROR, + (errcode(ERRCODE_TS_CHUNK_COLLISION), + errmsg("chunk table creation failed due to dimension slice collision"))); + + /* + * Serialize chunk creation around a lock on the "main table" to avoid + * multiple processes trying to create the same chunk. We use a + * ShareUpdateExclusiveLock, which is the weakest lock possible that + * conflicts with itself. The lock needs to be held until transaction end. + */ + LockRelationOid(ht->main_table_relid, ShareUpdateExclusiveLock); + + ts_hypercube_find_existing_slices(cube, &tuplock); + + chunk = chunk_create_only_table_after_lock(ht, + cube, + schema_name, + table_name, + NULL, + INVALID_CHUNK_ID); + chunk_table_drop_inherit(chunk, ht); + + return chunk; +} + static Chunk * chunk_create_from_hypercube_after_lock(const Hypertable *ht, Hypercube *cube, const char *schema_name, const char *table_name, const char *prefix) { + /* Insert any new dimension slices into metadata */ + ts_dimension_slice_insert_multi(cube->slices, cube->num_slices); + + Chunk *chunk = chunk_create_only_table_after_lock(ht, + cube, + schema_name, + table_name, + prefix, + get_next_chunk_id()); + + chunk_add_constraints(chunk); + chunk_insert_into_metadata_after_lock(chunk); + chunk_create_table_constraints(chunk); + + return chunk; +} + +/* + * Make a chunk table inherit a hypertable. + * + * Execution happens via high-level ALTER TABLE statement. This includes + * numerous checks to ensure that the chunk table has all the prerequisites to + * properly inherit the hypertable. + */ +static void +chunk_add_inheritance(Chunk *chunk, const Hypertable *ht) +{ + AlterTableCmd altercmd = { + .type = T_AlterTableCmd, + .subtype = AT_AddInherit, + .def = (Node *) makeRangeVar((char *) NameStr(ht->fd.schema_name), + (char *) NameStr(ht->fd.table_name), + 0), + .missing_ok = false, + }; + AlterTableStmt alterstmt = { + .type = T_AlterTableStmt, + .cmds = list_make1(&altercmd), + .missing_ok = false, +#if PG14_GE + .objtype = OBJECT_TABLE, +#else + .relkind = OBJECT_TABLE, +#endif + .relation = makeRangeVar((char *) NameStr(chunk->fd.schema_name), + (char *) NameStr(chunk->fd.table_name), + 0), + }; + LOCKMODE lockmode = AlterTableGetLockLevel(alterstmt.cmds); +#if PG13_GE + AlterTableUtilityContext atcontext = { + .relid = AlterTableLookupRelation(&alterstmt, lockmode), + }; + + AlterTable(&alterstmt, lockmode, &atcontext); +#else + AlterTable(AlterTableLookupRelation(&alterstmt, lockmode), lockmode, &alterstmt); +#endif +} + +static Chunk * +chunk_create_from_hypercube_and_table_after_lock(const Hypertable *ht, Hypercube *cube, + Oid chunk_table_relid, const char *schema_name, + const char *table_name, const char *prefix) +{ + Oid current_chunk_schemaid = get_rel_namespace(chunk_table_relid); + Oid new_chunk_schemaid = InvalidOid; Chunk *chunk; + Assert(OidIsValid(chunk_table_relid)); + Assert(OidIsValid(current_chunk_schemaid)); + /* Insert any new dimension slices into metadata */ ts_dimension_slice_insert_multi(cube->slices, cube->num_slices); + chunk = chunk_create_object(ht, cube, schema_name, table_name, prefix, get_next_chunk_id()); + chunk->table_id = chunk_table_relid; + chunk->hypertable_relid = ht->main_table_relid; + Assert(OidIsValid(ht->main_table_relid)); - chunk = chunk_create_object(ht, cube, schema_name, table_name, prefix); - Assert(chunk != NULL); + new_chunk_schemaid = get_namespace_oid(NameStr(chunk->fd.schema_name), false); - chunk_create_table(chunk, ht); + if (current_chunk_schemaid != new_chunk_schemaid) + { + Relation chunk_rel = table_open(chunk_table_relid, AccessExclusiveLock); + ObjectAddresses *objects; + + CheckSetNamespace(current_chunk_schemaid, new_chunk_schemaid); + objects = new_object_addresses(); + AlterTableNamespaceInternal(chunk_rel, current_chunk_schemaid, new_chunk_schemaid, objects); + free_object_addresses(objects); + table_close(chunk_rel, NoLock); + /* Make changes visible */ + CommandCounterIncrement(); + } + if (namestrcmp(&chunk->fd.table_name, get_rel_name(chunk_table_relid)) != 0) + { + /* Renaming will acquire and keep an AccessExclusivelock on the chunk + * table */ + RenameRelationInternal(chunk_table_relid, NameStr(chunk->fd.table_name), true, false); + /* Make changes visible */ + CommandCounterIncrement(); + } + + /* Note that we do not automatically add constrains and triggers to the + * chunk table when the chunk is created from an existing table. However, + * PostgreSQL currently validates that CHECK constraints exists, but no + * validation is done for other objects, including triggers, UNIQUE, + * PRIMARY KEY, and FOREIGN KEY constraints. We might want to either + * enforce that these constraints exist prior to creating the chunk from a + * table, or we ensure that they are automatically added when the chunk is + * created. However, for the latter case, we risk duplicating constraints + * and triggers if some of them already exist on the chunk table prior to + * creating the chunk from it. */ chunk_add_constraints(chunk); chunk_insert_into_metadata_after_lock(chunk); - + chunk_add_inheritance(chunk, ht); chunk_create_table_constraints(chunk); return chunk; @@ -1120,7 +1333,7 @@ chunk_create_from_point_after_lock(const Hypertable *ht, const Point *p, const c Chunk * ts_chunk_find_or_create_without_cuts(const Hypertable *ht, Hypercube *hc, const char *schema_name, - const char *table_name, bool *created) + const char *table_name, Oid chunk_table_relid, bool *created) { ChunkStub *stub; Chunk *chunk = NULL; @@ -1148,7 +1361,16 @@ ts_chunk_find_or_create_without_cuts(const Hypertable *ht, Hypercube *hc, const * commit since we won't create those slices ourselves. */ ts_hypercube_find_existing_slices(hc, &tuplock); - chunk = chunk_create_from_hypercube_after_lock(ht, hc, schema_name, table_name, NULL); + if (OidIsValid(chunk_table_relid)) + chunk = chunk_create_from_hypercube_and_table_after_lock(ht, + hc, + chunk_table_relid, + schema_name, + table_name, + NULL); + else + chunk = + chunk_create_from_hypercube_after_lock(ht, hc, schema_name, table_name, NULL); if (NULL != created) *created = true; @@ -1361,7 +1583,7 @@ chunk_tuple_found(TupleInfo *ti, void *arg) * the data table and related objects. */ chunk->table_id = get_relname_relid(chunk->fd.table_name.data, get_namespace_oid(chunk->fd.schema_name.data, true)); - chunk->hypertable_relid = ts_inheritance_parent_relid(chunk->table_id); + chunk->hypertable_relid = ts_hypertable_id_to_relid(chunk->fd.hypertable_id); chunk->relkind = get_rel_relkind(chunk->table_id); if (chunk->relkind == RELKIND_FOREIGN_TABLE) diff --git a/src/chunk.h b/src/chunk.h index 26f6ad6cd3d..9148e1bf91b 100644 --- a/src/chunk.h +++ b/src/chunk.h @@ -168,10 +168,9 @@ extern TSDLLEXPORT void ts_chunk_drop_preserve_catalog_row(const Chunk *chunk, DropBehavior behavior, int32 log_level); extern TSDLLEXPORT List *ts_chunk_do_drop_chunks(Hypertable *ht, int64 older_than, int64 newer_than, int32 log_level, List **affected_data_nodes); -extern TSDLLEXPORT Chunk *ts_chunk_find_or_create_without_cuts(const Hypertable *ht, Hypercube *hc, - const char *schema_name, - const char *table_name, - bool *created); +extern TSDLLEXPORT Chunk * +ts_chunk_find_or_create_without_cuts(const Hypertable *ht, Hypercube *hc, const char *schema_name, + const char *table_name, Oid chunk_table_relid, bool *created); extern TSDLLEXPORT Chunk *ts_chunk_get_compressed_chunk_parent(const Chunk *chunk); extern TSDLLEXPORT bool ts_chunk_is_unordered(const Chunk *chunk); extern TSDLLEXPORT bool ts_chunk_is_compressed(const Chunk *chunk); @@ -180,7 +179,11 @@ extern TSDLLEXPORT ChunkCompressionStatus ts_chunk_get_compression_status(int32 extern TSDLLEXPORT Datum ts_chunk_id_from_relid(PG_FUNCTION_ARGS); extern TSDLLEXPORT List *ts_chunk_get_chunk_ids_by_hypertable_id(int32 hypertable_id); extern TSDLLEXPORT List *ts_chunk_get_data_node_name_list(const Chunk *chunk); +extern bool TSDLLEXPORT ts_chunk_has_data_node(const Chunk *chunk, const char *node_name); extern List *ts_chunk_data_nodes_copy(const Chunk *chunk); +extern TSDLLEXPORT Chunk *ts_chunk_create_only_table(Hypertable *ht, Hypercube *cube, + const char *schema_name, + const char *table_name); extern TSDLLEXPORT int64 ts_chunk_primary_dimension_start(const Chunk *chunk); diff --git a/src/cross_module_fn.c b/src/cross_module_fn.c index 75ca930ada8..f44a915e658 100644 --- a/src/cross_module_fn.c +++ b/src/cross_module_fn.c @@ -42,6 +42,9 @@ CROSSMODULE_WRAPPER(job_alter); CROSSMODULE_WRAPPER(reorder_chunk); CROSSMODULE_WRAPPER(move_chunk); +CROSSMODULE_WRAPPER(move_chunk_proc); +CROSSMODULE_WRAPPER(copy_chunk_proc); +CROSSMODULE_WRAPPER(copy_chunk_cleanup_proc); /* partialize/finalize aggregate */ CROSSMODULE_WRAPPER(partialize_agg); @@ -79,10 +82,13 @@ CROSSMODULE_WRAPPER(data_node_add); CROSSMODULE_WRAPPER(data_node_delete); CROSSMODULE_WRAPPER(data_node_attach); CROSSMODULE_WRAPPER(data_node_detach); +CROSSMODULE_WRAPPER(chunk_drop_replica); CROSSMODULE_WRAPPER(chunk_set_default_data_node); CROSSMODULE_WRAPPER(chunk_get_relstats); CROSSMODULE_WRAPPER(chunk_get_colstats); +CROSSMODULE_WRAPPER(chunk_create_empty_table); +CROSSMODULE_WRAPPER(chunk_create_replica_table); CROSSMODULE_WRAPPER(timescaledb_fdw_handler); CROSSMODULE_WRAPPER(timescaledb_fdw_validator); @@ -224,7 +230,8 @@ empty_fn(PG_FUNCTION_ARGS) } static void -create_chunk_on_data_nodes_default(const Chunk *chunk, const Hypertable *ht) +create_chunk_on_data_nodes_default(const Chunk *chunk, const Hypertable *ht, + const char *remote_chunk_name, List *data_nodes) { error_no_default_fn_community(); } @@ -328,6 +335,9 @@ TSDLLEXPORT CrossModuleFunctions ts_cm_functions_default = { .job_execute = job_execute_default_fn, .move_chunk = error_no_default_fn_pg_community, + .move_chunk_proc = error_no_default_fn_pg_community, + .copy_chunk_proc = error_no_default_fn_pg_community, + .copy_chunk_cleanup_proc = error_no_default_fn_pg_community, .reorder_chunk = error_no_default_fn_pg_community, .partialize_agg = error_no_default_fn_pg_community, @@ -376,6 +386,7 @@ TSDLLEXPORT CrossModuleFunctions ts_cm_functions_default = { .show_chunk = error_no_default_fn_pg_community, .create_chunk = error_no_default_fn_pg_community, .create_chunk_on_data_nodes = create_chunk_on_data_nodes_default, + .chunk_drop_replica = error_no_default_fn_pg_community, .hypertable_make_distributed = hypertable_make_distributed_default_fn, .get_and_validate_data_node_list = get_and_validate_data_node_list_default_fn, .timescaledb_fdw_handler = error_no_default_fn_pg_community, @@ -399,6 +410,8 @@ TSDLLEXPORT CrossModuleFunctions ts_cm_functions_default = { .func_call_on_data_nodes = func_call_on_data_nodes_default, .chunk_get_relstats = error_no_default_fn_pg_community, .chunk_get_colstats = error_no_default_fn_pg_community, + .chunk_create_empty_table = error_no_default_fn_pg_community, + .chunk_create_replica_table = error_no_default_fn_pg_community, .hypertable_distributed_set_replication_factor = error_no_default_fn_pg_community, .update_compressed_chunk_relstats = update_compressed_chunk_relstats_default, }; diff --git a/src/cross_module_fn.h b/src/cross_module_fn.h index 75443ed8c42..d41fdf22aa6 100644 --- a/src/cross_module_fn.h +++ b/src/cross_module_fn.h @@ -79,6 +79,9 @@ typedef struct CrossModuleFunctions PGFunction reorder_chunk; PGFunction move_chunk; + PGFunction move_chunk_proc; + PGFunction copy_chunk_proc; + PGFunction copy_chunk_cleanup_proc; void (*ddl_command_start)(ProcessUtilityArgs *args); void (*ddl_command_end)(EventTriggerData *command); void (*sql_drop)(List *dropped_objects); @@ -132,6 +135,7 @@ typedef struct CrossModuleFunctions PGFunction chunk_set_default_data_node; PGFunction create_chunk; PGFunction show_chunk; + List *(*get_and_validate_data_node_list)(ArrayType *nodearr); void (*hypertable_make_distributed)(Hypertable *ht, List *data_node_names); PGFunction timescaledb_fdw_handler; @@ -141,7 +145,8 @@ typedef struct CrossModuleFunctions PGFunction remote_txn_id_out; PGFunction remote_txn_heal_data_node; PGFunction remote_connection_cache_show; - void (*create_chunk_on_data_nodes)(const Chunk *chunk, const Hypertable *ht); + void (*create_chunk_on_data_nodes)(const Chunk *chunk, const Hypertable *ht, + const char *remote_chunk_name, List *data_nodes); Path *(*distributed_insert_path_create)(PlannerInfo *root, ModifyTablePath *mtpath, Index hypertable_rti, int subpath_index); uint64 (*distributed_copy)(const CopyStmt *stmt, CopyChunkState *ccstate, List *attnums); @@ -160,6 +165,9 @@ typedef struct CrossModuleFunctions PGFunction chunk_get_relstats; PGFunction chunk_get_colstats; PGFunction hypertable_distributed_set_replication_factor; + PGFunction chunk_create_empty_table; + PGFunction chunk_create_replica_table; + PGFunction chunk_drop_replica; void (*update_compressed_chunk_relstats)(Oid uncompressed_relid, Oid compressed_relid); CompressSingleRowState *(*compress_row_init)(int srcht_id, Relation in_rel, Relation out_rel); TupleTableSlot *(*compress_row_exec)(CompressSingleRowState *cr, TupleTableSlot *slot); diff --git a/src/dimension.c b/src/dimension.c index e1c5bbaee5e..0e7d9bcd924 100644 --- a/src/dimension.c +++ b/src/dimension.c @@ -394,9 +394,19 @@ ts_dimension_get_open_slice_ordinal(const Dimension *dim, const DimensionSlice * /* Find the index (ordinal) of the chunk's slice in the open dimension */ i = ts_dimension_vec_find_slice_index(vec, slice->fd.id); - Assert(i >= 0); - - return i; + if (i >= 0) + return i; + else + { + /* + * Returns the number of slices if the slice not found, i.e., i = -1. + * Dimension slice might not exist if a chunk table is created without + * modifying metadata. It happens only during copy/move chunk for distributed + * hypertable, thus this code, which is used when no space dimension exists, + * is unlikely to be used. + */ + return vec->num_slices; + } } /* diff --git a/src/error_utils.h b/src/error_utils.h new file mode 100644 index 00000000000..d9087f980a4 --- /dev/null +++ b/src/error_utils.h @@ -0,0 +1,27 @@ +/* + * This file and its contents are licensed under the Apache License 2.0. + * Please see the included NOTICE for copyright information and + * LICENSE-APACHE for a copy of the license. + */ +#ifndef TIMESCALEDB_ERROR_UTILS_H +#define TIMESCALEDB_ERROR_UTILS_H + +#define GETARG_NOTNULL_OID(var, arg, name) \ + { \ + var = PG_ARGISNULL(arg) ? InvalidOid : PG_GETARG_OID(arg); \ + if (!OidIsValid(var)) \ + ereport(ERROR, \ + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), \ + errmsg("%s cannot be NULL", name))); \ + } + +#define GETARG_NOTNULL_NULLABLE(var, arg, name, type) \ + { \ + if (PG_ARGISNULL(arg)) \ + ereport(ERROR, \ + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), \ + errmsg("%s cannot be NULL", name))); \ + var = PG_GETARG_##type(arg); \ + } + +#endif /* TIMESCALEDB_ERROR_UTILS_H */ diff --git a/src/utils.c b/src/utils.c index 2c62968c087..4b377eb554c 100644 --- a/src/utils.c +++ b/src/utils.c @@ -525,8 +525,11 @@ ts_create_struct_from_tuple(HeapTuple tuple, MemoryContext mctx, size_t alloc_si { void *struct_ptr = MemoryContextAllocZero(mctx, alloc_size); - /* Make sure the function is not used when the tuple contains NULLs */ - Assert(copy_size == tuple->t_len - tuple->t_data->t_hoff); + /* + * Make sure the function is not used when the tuple contains NULLs. + * Also compare the aligned sizes in the assert. + */ + Assert(copy_size == MAXALIGN(tuple->t_len - tuple->t_data->t_hoff)); memcpy(struct_ptr, GETSTRUCT(tuple), copy_size); return struct_ptr; diff --git a/src/utils.h b/src/utils.h index 7eda9bd30a0..ff873eea81e 100644 --- a/src/utils.h +++ b/src/utils.h @@ -82,8 +82,8 @@ typedef struct Dimension Dimension; extern TSDLLEXPORT Oid ts_get_integer_now_func(const Dimension *open_dim); -extern void *ts_create_struct_from_slot(TupleTableSlot *slot, MemoryContext mctx, size_t alloc_size, - size_t copy_size); +extern TSDLLEXPORT void *ts_create_struct_from_slot(TupleTableSlot *slot, MemoryContext mctx, + size_t alloc_size, size_t copy_size); extern TSDLLEXPORT AppendRelInfo *ts_get_appendrelinfo(PlannerInfo *root, Index rti, bool missing_ok); diff --git a/test/expected/drop_rename_hypertable.out b/test/expected/drop_rename_hypertable.out index ad4494fabb6..2a0df71b7bd 100644 --- a/test/expected/drop_rename_hypertable.out +++ b/test/expected/drop_rename_hypertable.out @@ -196,6 +196,7 @@ SELECT * FROM _timescaledb_catalog.hypertable; ----------------------+--------------------------------------------------+-------+------------ _timescaledb_catalog | chunk | table | super_user _timescaledb_catalog | chunk_constraint | table | super_user + _timescaledb_catalog | chunk_copy_operation | table | super_user _timescaledb_catalog | chunk_data_node | table | super_user _timescaledb_catalog | chunk_index | table | super_user _timescaledb_catalog | compression_algorithm | table | super_user @@ -212,7 +213,7 @@ SELECT * FROM _timescaledb_catalog.hypertable; _timescaledb_catalog | metadata | table | super_user _timescaledb_catalog | remote_txn | table | super_user _timescaledb_catalog | tablespace | table | super_user -(18 rows) +(19 rows) \dt "_timescaledb_internal".* List of relations diff --git a/test/expected/pg_dump.out b/test/expected/pg_dump.out index 31d01644fa8..68e6cc0184b 100644 --- a/test/expected/pg_dump.out +++ b/test/expected/pg_dump.out @@ -545,6 +545,7 @@ WHERE refclassid = 'pg_catalog.pg_extension'::pg_catalog.regclass AND ORDER BY objid::text DESC; objid --------------------------------------------------- + timescaledb_experimental.chunk_replication_status timescaledb_information.compression_settings timescaledb_information.dimensions timescaledb_information.chunks @@ -555,11 +556,13 @@ WHERE refclassid = 'pg_catalog.pg_extension'::pg_catalog.regclass AND timescaledb_information.hypertables _timescaledb_internal.compressed_chunk_stats _timescaledb_internal.hypertable_chunk_local_size + _timescaledb_catalog.chunk_copy_operation + _timescaledb_catalog.chunk_copy_operation_id_seq _timescaledb_catalog.compression_algorithm _timescaledb_internal.bgw_policy_chunk_stats _timescaledb_internal.bgw_job_stat _timescaledb_catalog.tablespace_id_seq -(14 rows) +(17 rows) -- Make sure we can't run our restoring functions as a normal perm user as that would disable functionality for the whole db \c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER diff --git a/tsl/src/CMakeLists.txt b/tsl/src/CMakeLists.txt index b4aabad4c8b..df7c7c8fbcb 100644 --- a/tsl/src/CMakeLists.txt +++ b/tsl/src/CMakeLists.txt @@ -2,6 +2,7 @@ set(SOURCES async_append.c chunk_api.c chunk.c + chunk_copy.c data_node.c deparse.c dist_util.c diff --git a/tsl/src/chunk.c b/tsl/src/chunk.c index 32b98eaa508..0003571317b 100644 --- a/tsl/src/chunk.c +++ b/tsl/src/chunk.c @@ -13,11 +13,14 @@ #include #include #include +#include +#include #include #include #include #include #include +#include #include #include #include @@ -32,38 +35,49 @@ #include #include #include +#include +#include #include "chunk.h" +#include "chunk_api.h" #include "data_node.h" #include "deparse.h" #include "remote/dist_commands.h" +#include "dist_util.h" static bool -chunk_set_foreign_server(Chunk *chunk, ForeignServer *new_server) +chunk_match_data_node_by_server(const Chunk *chunk, const ForeignServer *server) { - Relation ftrel; - HeapTuple tuple; - HeapTuple copy; - Datum values[Natts_pg_foreign_table]; - bool nulls[Natts_pg_foreign_table]; - CatalogSecurityContext sec_ctx; - Oid old_server_id; - long updated; + bool server_found = false; ListCell *lc; - bool new_server_found = false; foreach (lc, chunk->data_nodes) { ChunkDataNode *cdn = lfirst(lc); - if (cdn->foreign_server_oid == new_server->serverid) + if (cdn->foreign_server_oid == server->serverid) { - new_server_found = true; + server_found = true; break; } } - if (!new_server_found) + return server_found; +} + +static bool +chunk_set_foreign_server(Chunk *chunk, ForeignServer *new_server) +{ + Relation ftrel; + HeapTuple tuple; + HeapTuple copy; + Datum values[Natts_pg_foreign_table]; + bool nulls[Natts_pg_foreign_table]; + CatalogSecurityContext sec_ctx; + Oid old_server_id; + long updated; + + if (!chunk_match_data_node_by_server(chunk, new_server)) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("chunk \"%s\" does not exist on data node \"%s\"", @@ -259,3 +273,132 @@ chunk_invoke_drop_chunks(Oid relid, Datum older_than, Datum older_than_type) return num_results; } + +static bool +chunk_is_distributed(const Chunk *chunk) +{ + return chunk->relkind == RELKIND_FOREIGN_TABLE; +} + +Datum +chunk_create_replica_table(PG_FUNCTION_ARGS) +{ + Oid chunk_relid; + const char *data_node_name; + const Chunk *chunk; + const Hypertable *ht; + const ForeignServer *server; + Cache *hcache = ts_hypertable_cache_pin(); + + TS_PREVENT_FUNC_IF_READ_ONLY(); + + GETARG_NOTNULL_OID(chunk_relid, 0, "chunk"); + GETARG_NOTNULL_NULLABLE(data_node_name, 1, "data node name", CSTRING); + + chunk = ts_chunk_get_by_relid(chunk_relid, false); + if (chunk == NULL) + { + const char *rel_name = get_rel_name(chunk_relid); + if (rel_name == NULL) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("oid \"%u\" is not a chunk", chunk_relid))); + else + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("relation \"%s\" is not a chunk", rel_name))); + } + if (!chunk_is_distributed(chunk)) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("chunk \"%s\" doesn't belong to a distributed hypertable", + get_rel_name(chunk_relid)))); + + ht = ts_hypertable_cache_get_entry(hcache, chunk->hypertable_relid, CACHE_FLAG_NONE); + ts_hypertable_permissions_check(ht->main_table_relid, GetUserId()); + + /* Check the given data node exists */ + server = data_node_get_foreign_server(data_node_name, ACL_USAGE, true, false); + /* Find if hypertable is attached to the data node and return an error otherwise */ + data_node_hypertable_get_by_node_name(ht, data_node_name, true); + + if (chunk_match_data_node_by_server(chunk, server)) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("chunk \"%s\" already exists on data node \"%s\"", + get_rel_name(chunk_relid), + data_node_name))); + + chunk_api_call_create_empty_chunk_table(ht, chunk, data_node_name); + + ts_cache_release(hcache); + + PG_RETURN_VOID(); +} + +/* + * chunk_drop_replica: + * + * This function drops a chunk on a specified data node. It then + * removes the metadata about the association of the chunk to this + * data node on the access node. + */ +Datum +chunk_drop_replica(PG_FUNCTION_ARGS) +{ + Oid chunk_relid = PG_ARGISNULL(0) ? InvalidOid : PG_GETARG_OID(0); + const char *node_name = PG_ARGISNULL(1) ? NULL : NameStr(*PG_GETARG_NAME(1)); + ForeignServer *server; + Chunk *chunk; + + TS_PREVENT_FUNC_IF_READ_ONLY(); + + if (!OidIsValid(chunk_relid)) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("invalid chunk relation"))); + + chunk = ts_chunk_get_by_relid(chunk_relid, false); + + if (NULL == chunk) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("invalid chunk relation"), + errdetail("Object with OID %u is not a chunk relation", chunk_relid))); + + /* It has to be a foreign table chunk */ + if (chunk->relkind != RELKIND_FOREIGN_TABLE) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("\"%s\" is not a valid remote chunk", get_rel_name(chunk_relid)))); + + server = data_node_get_foreign_server(node_name, ACL_USAGE, true, false); + Assert(NULL != server); + + /* Early abort on missing permissions */ + ts_hypertable_permissions_check(chunk_relid, GetUserId()); + + if (!ts_chunk_has_data_node(chunk, node_name)) + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_OBJECT), + errmsg("chunk \"%s\" does not exist on data node \"%s\"", + get_rel_name(chunk_relid), + node_name))); + + /* + * There should be at least one surviving replica after the deletion here. + * + * We could fetch the corresponding hypertable and check its + * replication_factor. But the user of this function is using it + * to move chunk from one data node to another and is well aware of + * the replication_factor requirements + */ + if (list_length(chunk->data_nodes) <= 1) + ereport(ERROR, + (errcode(ERRCODE_TS_INSUFFICIENT_NUM_DATA_NODES), + errmsg("cannot drop the last chunk replica"), + errdetail("Dropping the last chunk replica could lead to data loss."))); + + chunk_api_call_chunk_drop_replica(chunk, node_name, server->serverid); + + PG_RETURN_VOID(); +} diff --git a/tsl/src/chunk.h b/tsl/src/chunk.h index 82afcf81012..0ede76be13e 100644 --- a/tsl/src/chunk.h +++ b/tsl/src/chunk.h @@ -8,11 +8,12 @@ #include #include - #include extern void chunk_update_foreign_server_if_needed(int32 chunk_id, Oid existing_server_id); extern Datum chunk_set_default_data_node(PG_FUNCTION_ARGS); +extern Datum chunk_drop_replica(PG_FUNCTION_ARGS); extern int chunk_invoke_drop_chunks(Oid relid, Datum older_than, Datum older_than_type); +extern Datum chunk_create_replica_table(PG_FUNCTION_ARGS); #endif /* TIMESCALEDB_TSL_CHUNK_H */ diff --git a/tsl/src/chunk_api.c b/tsl/src/chunk_api.c index cae0e7aeac9..c9372498dcf 100644 --- a/tsl/src/chunk_api.c +++ b/tsl/src/chunk_api.c @@ -27,11 +27,10 @@ #include #include -#include #include #include +#include #include -#include #include #include "remote/async.h" @@ -39,7 +38,9 @@ #include "remote/stmt_params.h" #include "remote/dist_commands.h" #include "remote/tuplefactory.h" +#include "chunk.h" #include "chunk_api.h" +#include "data_node.h" /* * These values come from the pg_type table. @@ -299,6 +300,47 @@ chunk_show(PG_FUNCTION_ARGS) PG_RETURN_DATUM(HeapTupleGetDatum(tuple)); } +static void +check_privileges_for_creating_chunk(Oid hyper_relid) +{ + AclResult acl_result; + + acl_result = pg_class_aclcheck(hyper_relid, GetUserId(), ACL_INSERT); + if (acl_result != ACLCHECK_OK) + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("permission denied for table \"%s\"", get_rel_name(hyper_relid)), + errdetail("Insert privileges required on \"%s\" to create chunks.", + get_rel_name(hyper_relid)))); +} + +static Hypercube * +get_hypercube_from_slices(Jsonb *slices, const Hypertable *ht) +{ + Hypercube *hc; + const char *parse_err; + + hc = hypercube_from_jsonb(slices, ht->space, &parse_err); + + if (hc == NULL) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("invalid hypercube for hypertable \"%s\"", + get_rel_name(ht->main_table_relid)), + errdetail("%s", parse_err))); + + return hc; +} + +/* + * Create a chunk and its metadata. + * + * This function will create a chunk, either from an existing table or by + * creating a new table. If chunk_table_relid is InvalidOid, the chunk table + * will be created, otherwise the table referenced by the relid will be + * used. The chunk will be associated with the hypertable given by + * hypertable_relid. + */ Datum chunk_create(PG_FUNCTION_ARGS) { @@ -306,6 +348,7 @@ chunk_create(PG_FUNCTION_ARGS) Jsonb *slices = PG_ARGISNULL(1) ? NULL : PG_GETARG_JSONB_P(1); const char *schema_name = PG_ARGISNULL(2) ? NULL : PG_GETARG_CSTRING(2); const char *table_name = PG_ARGISNULL(3) ? NULL : PG_GETARG_CSTRING(3); + Oid chunk_table_relid = PG_ARGISNULL(4) ? InvalidOid : PG_GETARG_OID(4); Cache *hcache = ts_hypertable_cache_pin(); Hypertable *ht = ts_hypertable_cache_get_entry(hcache, hypertable_relid, CACHE_FLAG_NONE); Hypercube *hc; @@ -313,18 +356,10 @@ chunk_create(PG_FUNCTION_ARGS) TupleDesc tupdesc; HeapTuple tuple; bool created; - const char *parse_err; - AclResult acl_result; Assert(NULL != ht); - - acl_result = pg_class_aclcheck(hypertable_relid, GetUserId(), ACL_INSERT); - if (acl_result != ACLCHECK_OK) - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("permission denied for table \"%s\"", get_rel_name(hypertable_relid)), - errdetail("Insert privileges required on \"%s\" to create chunks.", - get_rel_name(hypertable_relid)))); + Assert(OidIsValid(ht->main_table_relid)); + check_privileges_for_creating_chunk(hypertable_relid); if (NULL == slices) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("invalid slices"))); @@ -335,16 +370,14 @@ chunk_create(PG_FUNCTION_ARGS) errmsg("function returning record called in context " "that cannot accept type record"))); - hc = hypercube_from_jsonb(slices, ht->space, &parse_err); - - if (NULL == hc) - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("invalid hypercube for hypertable \"%s\"", get_rel_name(hypertable_relid)), - errdetail("%s", parse_err))); - - chunk = ts_chunk_find_or_create_without_cuts(ht, hc, schema_name, table_name, &created); - + hc = get_hypercube_from_slices(slices, ht); + Assert(NULL != hc); + chunk = ts_chunk_find_or_create_without_cuts(ht, + hc, + schema_name, + table_name, + chunk_table_relid, + &created); Assert(NULL != chunk); tuple = chunk_form_tuple(chunk, ht, tupdesc, created); @@ -359,12 +392,15 @@ chunk_create(PG_FUNCTION_ARGS) } #define CREATE_CHUNK_FUNCTION_NAME "create_chunk" +#define CREATE_CHUNK_NUM_ARGS 5 #define CHUNK_CREATE_STMT \ - "SELECT * FROM " INTERNAL_SCHEMA_NAME "." CREATE_CHUNK_FUNCTION_NAME "($1, $2, $3, $4)" + "SELECT * FROM " INTERNAL_SCHEMA_NAME "." CREATE_CHUNK_FUNCTION_NAME "($1, $2, $3, $4, $5)" #define ESTIMATE_JSON_STR_SIZE(num_dims) (60 * (num_dims)) -static Oid create_chunk_argtypes[4] = { REGCLASSOID, JSONBOID, NAMEOID, NAMEOID }; +static Oid create_chunk_argtypes[CREATE_CHUNK_NUM_ARGS] = { + REGCLASSOID, JSONBOID, NAMEOID, NAMEOID, REGCLASSOID +}; /* * Fill in / get the TupleDesc for the result type of the create_chunk() @@ -375,7 +411,7 @@ get_create_chunk_result_type(TupleDesc *tupdesc) { Oid funcoid = ts_get_function_oid(CREATE_CHUNK_FUNCTION_NAME, INTERNAL_SCHEMA_NAME, - 4, + CREATE_CHUNK_NUM_ARGS, create_chunk_argtypes); if (get_func_result_type(funcoid, NULL, tupdesc) != TYPEFUNC_COMPOSITE) @@ -416,27 +452,33 @@ chunk_api_dimension_slices_json(const Chunk *chunk, const Hypertable *ht) } /* - * Create a replica of a chunk on all its assigned data nodes. + * Create a replica of a chunk on all its assigned or specified list of data nodes. + * + * If "data_nodes" list is explicitly specified use that instead of the list of + * data nodes from the chunk. */ void -chunk_api_create_on_data_nodes(const Chunk *chunk, const Hypertable *ht) +chunk_api_create_on_data_nodes(const Chunk *chunk, const Hypertable *ht, + const char *remote_chunk_name, List *data_nodes) { AsyncRequestSet *reqset = async_request_set_create(); - const char *params[4] = { + const char *params[CREATE_CHUNK_NUM_ARGS] = { quote_qualified_identifier(NameStr(ht->fd.schema_name), NameStr(ht->fd.table_name)), chunk_api_dimension_slices_json(chunk, ht), NameStr(chunk->fd.schema_name), NameStr(chunk->fd.table_name), + remote_chunk_name ? remote_chunk_name : NULL, }; AsyncResponseResult *res; ListCell *lc; TupleDesc tupdesc; AttInMetadata *attinmeta; + List *target_data_nodes = data_nodes ? data_nodes : chunk->data_nodes; get_create_chunk_result_type(&tupdesc); attinmeta = TupleDescGetAttInMetadata(tupdesc); - foreach (lc, chunk->data_nodes) + foreach (lc, target_data_nodes) { ChunkDataNode *cdn = lfirst(lc); TSConnectionId id = remote_connection_id(cdn->foreign_server_oid, GetUserId()); @@ -445,7 +487,8 @@ chunk_api_create_on_data_nodes(const Chunk *chunk, const Hypertable *ht) req = async_request_send_with_params(conn, CHUNK_CREATE_STMT, - stmt_params_create_from_values(params, 4), + stmt_params_create_from_values(params, + CREATE_CHUNK_NUM_ARGS), FORMAT_TEXT); async_request_attach_user_data(req, cdn); @@ -1619,3 +1662,82 @@ chunk_api_get_chunk_relstats(PG_FUNCTION_ARGS) { return chunk_api_get_chunk_stats(fcinfo, false); } + +Datum +chunk_create_empty_table(PG_FUNCTION_ARGS) +{ + Oid hypertable_relid; + Jsonb *slices; + const char *schema_name; + const char *table_name; + Cache *const hcache = ts_hypertable_cache_pin(); + Hypertable *ht; + Hypercube *hc; + + GETARG_NOTNULL_OID(hypertable_relid, 0, "hypertable"); + GETARG_NOTNULL_NULLABLE(slices, 1, "slices", JSONB_P); + GETARG_NOTNULL_NULLABLE(schema_name, 2, "chunk schema name", CSTRING); + GETARG_NOTNULL_NULLABLE(table_name, 3, "chunk table name", CSTRING); + + ht = ts_hypertable_cache_get_entry(hcache, hypertable_relid, CACHE_FLAG_NONE); + Assert(ht != NULL); + check_privileges_for_creating_chunk(hypertable_relid); + hc = get_hypercube_from_slices(slices, ht); + Assert(NULL != hc); + ts_chunk_create_only_table(ht, hc, schema_name, table_name); + + ts_cache_release(hcache); + + PG_RETURN_BOOL(true); +} + +#define CREATE_CHUNK_TABLE_NAME "create_chunk_table" + +void +chunk_api_call_create_empty_chunk_table(const Hypertable *ht, const Chunk *chunk, + const char *node_name) +{ + const char *create_cmd = + psprintf("SELECT %s.%s($1, $2, $3, $4)", INTERNAL_SCHEMA_NAME, CREATE_CHUNK_TABLE_NAME); + const char *params[4] = { quote_qualified_identifier(NameStr(ht->fd.schema_name), + NameStr(ht->fd.table_name)), + chunk_api_dimension_slices_json(chunk, ht), + NameStr(chunk->fd.schema_name), + NameStr(chunk->fd.table_name) }; + + ts_dist_cmd_close_response( + ts_dist_cmd_params_invoke_on_data_nodes(create_cmd, + stmt_params_create_from_values(params, 4), + list_make1((void *) node_name), + true)); +} + +void +chunk_api_call_chunk_drop_replica(const Chunk *chunk, const char *node_name, Oid serverid) +{ + const char *drop_cmd; + List *data_nodes; + + /* + * Drop chunk on the data node using a regular "DROP TABLE". + * Note that CASCADE is not required as it takes care of dropping compressed + * chunk (if any). + * + * If there are any other non-TimescaleDB objects attached to this table due + * to some manual user activity then they should be dropped by the user + * before invoking this function. + */ + + drop_cmd = psprintf("DROP TABLE %s.%s", + quote_identifier(chunk->fd.schema_name.data), + quote_identifier(chunk->fd.table_name.data)); + data_nodes = list_make1((char *) node_name); + ts_dist_cmd_run_on_data_nodes(drop_cmd, data_nodes, true); + + /* + * This chunk might have this data node as primary, change that association + * if so. Then delete the chunk_id and node_name association. + */ + chunk_update_foreign_server_if_needed(chunk->fd.id, serverid); + ts_chunk_data_node_delete_by_chunk_id_and_node_name(chunk->fd.id, node_name); +} diff --git a/tsl/src/chunk_api.h b/tsl/src/chunk_api.h index 63413fd9221..fe2c37f162e 100644 --- a/tsl/src/chunk_api.h +++ b/tsl/src/chunk_api.h @@ -9,12 +9,21 @@ #include #include +#include "chunk.h" + +#include "hypertable_data_node.h" extern Datum chunk_show(PG_FUNCTION_ARGS); extern Datum chunk_create(PG_FUNCTION_ARGS); -extern void chunk_api_create_on_data_nodes(const Chunk *chunk, const Hypertable *ht); +extern void chunk_api_create_on_data_nodes(const Chunk *chunk, const Hypertable *ht, + const char *remote_chunk_name, List *data_nodes); extern Datum chunk_api_get_chunk_relstats(PG_FUNCTION_ARGS); extern Datum chunk_api_get_chunk_colstats(PG_FUNCTION_ARGS); extern void chunk_api_update_distributed_hypertable_stats(Oid relid); +extern Datum chunk_create_empty_table(PG_FUNCTION_ARGS); +extern void chunk_api_call_create_empty_chunk_table(const Hypertable *ht, const Chunk *chunk, + const char *node_name); +extern void chunk_api_call_chunk_drop_replica(const Chunk *chunk, const char *node_name, + Oid serverid); #endif /* TIMESCALEDB_TSL_CHUNK_API_H */ diff --git a/tsl/src/chunk_copy.c b/tsl/src/chunk_copy.c new file mode 100644 index 00000000000..7592d135911 --- /dev/null +++ b/tsl/src/chunk_copy.c @@ -0,0 +1,1001 @@ +/* + * This file and its contents are licensed under the Timescale License. + * Please see the included NOTICE for copyright information and + * LICENSE-TIMESCALE for a copy of the license. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if USE_ASSERT_CHECKING +#include +#endif + +#include +#include +#include +#include +#include +#include + +#include "chunk.h" +#include "chunk_api.h" +#include "chunk_copy.h" +#include "data_node.h" +#include "debug_point.h" +#include "remote/dist_commands.h" +#include "dist_util.h" + +#define CCS_INIT "init" +#define CCS_CREATE_EMPTY_CHUNK "create_empty_chunk" +#define CCS_CREATE_PUBLICATION "create_publication" +#define CCS_CREATE_REPLICATION_SLOT "create_replication_slot" +#define CCS_CREATE_SUBSCRIPTION "create_subscription" +#define CCS_SYNC_START "sync_start" +#define CCS_SYNC "sync" +#define CCS_DROP_PUBLICATION "drop_publication" +#define CCS_DROP_SUBSCRIPTION "drop_subscription" +#define CCS_ATTACH_CHUNK "attach_chunk" +#define CCS_DELETE_CHUNK "delete_chunk" + +typedef struct ChunkCopyStage ChunkCopyStage; +typedef struct ChunkCopy ChunkCopy; + +typedef void (*chunk_copy_stage_func)(ChunkCopy *); + +struct ChunkCopyStage +{ + const char *name; + chunk_copy_stage_func function; + chunk_copy_stage_func function_cleanup; +}; + +/* To track a chunk move or copy activity */ +struct ChunkCopy +{ + /* catalog data */ + FormData_chunk_copy_operation fd; + /* current stage being executed */ + const ChunkCopyStage *stage; + /* chunk to copy */ + Chunk *chunk; + /* from/to foreign servers */ + ForeignServer *src_server; + ForeignServer *dst_server; + /* temporary memory context */ + MemoryContext mcxt; +}; + +static HeapTuple +chunk_copy_operation_make_tuple(const FormData_chunk_copy_operation *fd, TupleDesc desc) +{ + Datum values[Natts_chunk_copy_operation]; + bool nulls[Natts_chunk_copy_operation] = { false }; + memset(values, 0, sizeof(values)); + values[AttrNumberGetAttrOffset(Anum_chunk_copy_operation_operation_id)] = + NameGetDatum(&fd->operation_id); + values[AttrNumberGetAttrOffset(Anum_chunk_copy_operation_backend_pid)] = + Int32GetDatum(fd->backend_pid); + values[AttrNumberGetAttrOffset(Anum_chunk_copy_operation_completed_stage)] = + NameGetDatum(&fd->completed_stage); + values[AttrNumberGetAttrOffset(Anum_chunk_copy_operation_time_start)] = + TimestampTzGetDatum(fd->time_start); + values[AttrNumberGetAttrOffset(Anum_chunk_copy_operation_chunk_id)] = + Int32GetDatum(fd->chunk_id); + values[AttrNumberGetAttrOffset(Anum_chunk_copy_operation_source_node_name)] = + NameGetDatum(&fd->source_node_name); + values[AttrNumberGetAttrOffset(Anum_chunk_copy_operation_dest_node_name)] = + NameGetDatum(&fd->dest_node_name); + values[AttrNumberGetAttrOffset(Anum_chunk_copy_operation_delete_on_src_node)] = + BoolGetDatum(fd->delete_on_src_node); + return heap_form_tuple(desc, values, nulls); +} + +static void +chunk_copy_operation_insert_rel(Relation rel, const FormData_chunk_copy_operation *fd) +{ + CatalogSecurityContext sec_ctx; + HeapTuple new_tuple; + + new_tuple = chunk_copy_operation_make_tuple(fd, RelationGetDescr(rel)); + + ts_catalog_database_info_become_owner(ts_catalog_database_info_get(), &sec_ctx); + ts_catalog_insert(rel, new_tuple); + ts_catalog_restore_user(&sec_ctx); + heap_freetuple(new_tuple); +} + +static void +chunk_copy_operation_insert(const FormData_chunk_copy_operation *fd) +{ + Catalog *catalog; + Relation rel; + + catalog = ts_catalog_get(); + rel = table_open(catalog_get_table_id(catalog, CHUNK_COPY_OPERATION), RowExclusiveLock); + + chunk_copy_operation_insert_rel(rel, fd); + table_close(rel, RowExclusiveLock); +} + +static ScanTupleResult +chunk_copy_operation_tuple_update(TupleInfo *ti, void *data) +{ + ChunkCopy *cc = data; + Datum values[Natts_chunk_copy_operation]; + bool nulls[Natts_chunk_copy_operation]; + CatalogSecurityContext sec_ctx; + bool should_free; + HeapTuple tuple = ts_scanner_fetch_heap_tuple(ti, false, &should_free); + HeapTuple new_tuple; + + heap_deform_tuple(tuple, ts_scanner_get_tupledesc(ti), values, nulls); + + /* We only update the "completed_stage" field */ + Assert(NULL != cc->stage); + values[AttrNumberGetAttrOffset(Anum_chunk_copy_operation_completed_stage)] = + DirectFunctionCall1(namein, CStringGetDatum((cc->stage->name))); + + new_tuple = heap_form_tuple(ts_scanner_get_tupledesc(ti), values, nulls); + ts_catalog_database_info_become_owner(ts_catalog_database_info_get(), &sec_ctx); + ts_catalog_update_tid(ti->scanrel, ts_scanner_get_tuple_tid(ti), new_tuple); + ts_catalog_restore_user(&sec_ctx); + heap_freetuple(new_tuple); + + if (should_free) + heap_freetuple(tuple); + + return SCAN_DONE; +} + +static int +chunk_copy_operation_scan_update_by_id(const char *operation_id, tuple_found_func tuple_found, + void *data, LOCKMODE lockmode) +{ + Catalog *catalog = ts_catalog_get(); + ScanKeyData scankey[1]; + ScannerCtx scanctx = { + .table = catalog_get_table_id(catalog, CHUNK_COPY_OPERATION), + .index = catalog_get_index(catalog, CHUNK_COPY_OPERATION, CHUNK_COPY_OPERATION_PKEY_IDX), + .nkeys = 1, + .limit = 1, + .scankey = scankey, + .data = data, + .tuple_found = tuple_found, + .lockmode = lockmode, + .scandirection = ForwardScanDirection, + }; + + ScanKeyInit(&scankey[0], + Anum_chunk_copy_operation_idx_operation_id, + BTEqualStrategyNumber, + F_NAMEEQ, + DirectFunctionCall1(namein, CStringGetDatum(operation_id))); + + return ts_scanner_scan(&scanctx); +} + +static void +chunk_copy_operation_update(ChunkCopy *cc) +{ + NameData application_name; + + snprintf(application_name.data, + sizeof(application_name.data), + "%s:%s", + cc->fd.operation_id.data, + cc->stage->name); + + pgstat_report_appname(application_name.data); + + chunk_copy_operation_scan_update_by_id(NameStr(cc->fd.operation_id), + chunk_copy_operation_tuple_update, + cc, + RowExclusiveLock); +} + +static ScanTupleResult +chunk_copy_operation_tuple_delete(TupleInfo *ti, void *data) +{ + CatalogSecurityContext sec_ctx; + + ts_catalog_database_info_become_owner(ts_catalog_database_info_get(), &sec_ctx); + ts_catalog_delete_tid(ti->scanrel, ts_scanner_get_tuple_tid(ti)); + ts_catalog_restore_user(&sec_ctx); + + return SCAN_CONTINUE; +} + +static int +chunk_copy_operation_delete_by_id(const char *operation_id) +{ + Catalog *catalog = ts_catalog_get(); + ScanKeyData scankey[1]; + ScannerCtx scanctx = { + .table = catalog_get_table_id(catalog, CHUNK_COPY_OPERATION), + .index = catalog_get_index(catalog, CHUNK_COPY_OPERATION, CHUNK_COPY_OPERATION_PKEY_IDX), + .nkeys = 1, + .limit = 1, + .scankey = scankey, + .data = NULL, + .tuple_found = chunk_copy_operation_tuple_delete, + .lockmode = RowExclusiveLock, + .scandirection = ForwardScanDirection, + }; + + ScanKeyInit(&scankey[0], + Anum_chunk_copy_operation_idx_operation_id, + BTEqualStrategyNumber, + F_NAMEEQ, + DirectFunctionCall1(namein, CStringGetDatum(operation_id))); + + return ts_scanner_scan(&scanctx); +} + +static void +chunk_copy_setup(ChunkCopy *cc, Oid chunk_relid, const char *src_node, const char *dst_node, + bool delete_on_src_node) +{ + Hypertable *ht; + Cache *hcache; + MemoryContext old, mcxt; + + if (!superuser()) + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + (errmsg("must be superuser to copy/move chunk to data node")))); + + if (dist_util_membership() != DIST_MEMBER_ACCESS_NODE) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("function must be run on the access node only"))); + + /* + * The chunk and foreign server info needs to be on a memory context + * that will survive moving to a new transaction for each stage + */ + mcxt = AllocSetContextCreate(PortalContext, "chunk move activity", ALLOCSET_DEFAULT_SIZES); + old = MemoryContextSwitchTo(mcxt); + cc->mcxt = mcxt; + cc->chunk = ts_chunk_get_by_relid(chunk_relid, true); + cc->stage = NULL; + + /* It has to be a foreign table chunk */ + if (cc->chunk->relkind != RELKIND_FOREIGN_TABLE) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("\"%s\" is not a valid remote chunk", get_rel_name(chunk_relid)))); + + /* It has to be an uncompressed chunk, we query the status field on the AN for this */ + if (ts_chunk_is_compressed(cc->chunk)) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("\"%s\" is a compressed remote chunk. Chunk copy/move not supported" + " currently on compressed chunks", + get_rel_name(chunk_relid)))); + + ht = ts_hypertable_cache_get_cache_and_entry(cc->chunk->hypertable_relid, + CACHE_FLAG_NONE, + &hcache); + + ts_hypertable_permissions_check(ht->main_table_relid, GetUserId()); + + if (!hypertable_is_distributed(ht)) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("hypertable \"%s\" is not distributed", + get_rel_name(ht->main_table_relid)))); + + cc->src_server = data_node_get_foreign_server(src_node, ACL_USAGE, true, false); + Assert(NULL != cc->src_server); + + cc->dst_server = data_node_get_foreign_server(dst_node, ACL_USAGE, true, false); + Assert(NULL != cc->dst_server); + + /* Ensure that source and destination data nodes are not the same */ + if (cc->src_server == cc->dst_server) + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_OBJECT), + errmsg("source and destination data node match"))); + + /* Check that src_node is a valid DN and that chunk exists on it */ + if (!ts_chunk_has_data_node(cc->chunk, src_node)) + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_OBJECT), + errmsg("chunk \"%s\" does not exist on source data node \"%s\"", + get_rel_name(chunk_relid), + src_node))); + + /* Check that dst_node is a valid DN and that chunk does not exist on it */ + if (ts_chunk_has_data_node(cc->chunk, dst_node)) + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_OBJECT), + errmsg("chunk \"%s\" already exists on destination data node \"%s\"", + get_rel_name(chunk_relid), + dst_node))); + + /* + * Populate the FormData_chunk_copy_operation structure for use by various stages + * + * The operation_id will be populated in the chunk_copy_stage_init function. + */ + cc->fd.backend_pid = MyProcPid; + namestrcpy(&cc->fd.completed_stage, CCS_INIT); + cc->fd.time_start = GetCurrentTimestamp(); + cc->fd.chunk_id = cc->chunk->fd.id; + namestrcpy(&cc->fd.source_node_name, src_node); + namestrcpy(&cc->fd.dest_node_name, dst_node); + cc->fd.delete_on_src_node = delete_on_src_node; + + ts_cache_release(hcache); + MemoryContextSwitchTo(old); + + /* Commit to get out of starting transaction */ + PopActiveSnapshot(); + CommitTransactionCommand(); +} + +static void +chunk_copy_finish(ChunkCopy *cc) +{ + /* Done using this long lived memory context */ + MemoryContextDelete(cc->mcxt); + + /* Start a transaction for the final outer transaction */ + StartTransactionCommand(); +} + +static void +chunk_copy_stage_init(ChunkCopy *cc) +{ + int32 id; + + /* + * Get the operation id for this chunk move/copy activity. The naming + * convention is "ts_copy_seq-id_chunk-id". + */ + id = ts_catalog_table_next_seq_id(ts_catalog_get(), CHUNK_COPY_OPERATION); + snprintf(cc->fd.operation_id.data, + sizeof(cc->fd.operation_id.data), + "ts_copy_%d_%d", + id, + cc->chunk->fd.id); + + /* Persist the Formdata entry in the catalog */ + chunk_copy_operation_insert(&cc->fd); +} + +static void +chunk_copy_stage_init_cleanup(ChunkCopy *cc) +{ + /* Failure in initial stages, delete this entry from the catalog */ + chunk_copy_operation_delete_by_id(NameStr(cc->fd.operation_id)); +} + +static void +chunk_copy_stage_create_empty_chunk(ChunkCopy *cc) +{ + /* Create an empty chunk table on the dst_node */ + Cache *hcache; + Hypertable *ht; + + ht = ts_hypertable_cache_get_cache_and_entry(cc->chunk->hypertable_relid, + CACHE_FLAG_NONE, + &hcache); + + chunk_api_call_create_empty_chunk_table(ht, cc->chunk, NameStr(cc->fd.dest_node_name)); + + ts_cache_release(hcache); +} + +static void +chunk_copy_stage_create_empty_chunk_cleanup(ChunkCopy *cc) +{ + /* + * Drop the chunk table on the dst_node. We use the API instead of just + * "DROP TABLE" because some metadata cleanup might also be needed + */ + chunk_api_call_chunk_drop_replica(cc->chunk, + NameStr(cc->fd.dest_node_name), + cc->dst_server->serverid); +} + +static void +chunk_copy_stage_create_publication(ChunkCopy *cc) +{ + const char *cmd; + + /* Create publication on the source data node */ + cmd = psprintf("CREATE PUBLICATION %s FOR TABLE %s", + NameStr(cc->fd.operation_id), + quote_qualified_identifier(NameStr(cc->chunk->fd.schema_name), + NameStr(cc->chunk->fd.table_name))); + + /* Create the publication */ + ts_dist_cmd_run_on_data_nodes(cmd, list_make1(NameStr(cc->fd.source_node_name)), true); +} + +static void +chunk_copy_stage_create_replication_slot(ChunkCopy *cc) +{ + const char *cmd; + + /* + * CREATE SUBSCRIPTION from a database within the same database cluster will hang, + * create the replication slot separately before creating the subscription + */ + cmd = psprintf("SELECT pg_create_logical_replication_slot('%s', 'pgoutput')", + NameStr(cc->fd.operation_id)); + + ts_dist_cmd_run_on_data_nodes(cmd, list_make1(NameStr(cc->fd.source_node_name)), true); +} + +static void +chunk_copy_stage_create_replication_slot_cleanup(ChunkCopy *cc) +{ + char *cmd; + DistCmdResult *dist_res; + PGresult *res; + + /* Check if the slot exists on the source data node */ + cmd = psprintf("SELECT 1 FROM pg_catalog.pg_replication_slots WHERE slot_name = '%s'", + NameStr(cc->fd.operation_id)); + dist_res = + ts_dist_cmd_invoke_on_data_nodes(cmd, list_make1(NameStr(cc->fd.source_node_name)), true); + res = ts_dist_cmd_get_result_by_node_name(dist_res, NameStr(cc->fd.source_node_name)); + + if (PQresultStatus(res) != PGRES_TUPLES_OK) + ereport(ERROR, + (errcode(ERRCODE_CONNECTION_EXCEPTION), errmsg("%s", PQresultErrorMessage(res)))); + + /* Drop replication slot on the source data node only if it exists */ + if (PQntuples(res) != 0) + { + cmd = psprintf("SELECT pg_drop_replication_slot('%s')", NameStr(cc->fd.operation_id)); + ts_dist_cmd_run_on_data_nodes(cmd, list_make1(NameStr(cc->fd.source_node_name)), true); + } + + ts_dist_cmd_close_response(dist_res); +} + +static void +chunk_copy_stage_create_publication_cleanup(ChunkCopy *cc) +{ + char *cmd; + DistCmdResult *dist_res; + PGresult *res; + + /* + * Check if the replication slot exists and clean it up if so. This might + * happen if there's a failure in the create_replication_slot stage but + * PG might end up creating the slot even though we issued a ROLLBACK + */ + chunk_copy_stage_create_replication_slot_cleanup(cc); + + /* Check if the publication exists on the source data node */ + cmd = psprintf("SELECT 1 FROM pg_catalog.pg_publication WHERE pubname = '%s'", + NameStr(cc->fd.operation_id)); + dist_res = + ts_dist_cmd_invoke_on_data_nodes(cmd, list_make1(NameStr(cc->fd.source_node_name)), true); + res = ts_dist_cmd_get_result_by_node_name(dist_res, NameStr(cc->fd.source_node_name)); + + if (PQresultStatus(res) != PGRES_TUPLES_OK) + ereport(ERROR, + (errcode(ERRCODE_CONNECTION_EXCEPTION), errmsg("%s", PQresultErrorMessage(res)))); + + /* Drop publication on the source node only if it exists */ + if (PQntuples(res) != 0) + { + cmd = psprintf("DROP PUBLICATION %s", NameStr(cc->fd.operation_id)); + + /* Drop the publication */ + ts_dist_cmd_run_on_data_nodes(cmd, list_make1(NameStr(cc->fd.source_node_name)), true); + } + + ts_dist_cmd_close_response(dist_res); +} + +static void +chunk_copy_stage_create_subscription(ChunkCopy *cc) +{ + const char *cmd; + const char *connection_string; + + /* Prepare connection string to the source node */ + connection_string = remote_connection_get_connstr(NameStr(cc->fd.source_node_name)); + + cmd = psprintf("CREATE SUBSCRIPTION %s CONNECTION '%s' PUBLICATION %s" + " WITH (create_slot = false, enabled = false)", + NameStr(cc->fd.operation_id), + connection_string, + NameStr(cc->fd.operation_id)); + ts_dist_cmd_run_on_data_nodes(cmd, list_make1(NameStr(cc->fd.dest_node_name)), true); +} + +static void +chunk_copy_stage_create_subscription_cleanup(ChunkCopy *cc) +{ + char *cmd; + DistCmdResult *dist_res; + PGresult *res; + + /* Check if the subscription exists on the destination data node */ + cmd = psprintf("SELECT 1 FROM pg_catalog.pg_subscription WHERE subname = '%s'", + NameStr(cc->fd.operation_id)); + dist_res = + ts_dist_cmd_invoke_on_data_nodes(cmd, list_make1(NameStr(cc->fd.dest_node_name)), true); + res = ts_dist_cmd_get_result_by_node_name(dist_res, NameStr(cc->fd.dest_node_name)); + + if (PQresultStatus(res) != PGRES_TUPLES_OK) + ereport(ERROR, + (errcode(ERRCODE_CONNECTION_EXCEPTION), errmsg("%s", PQresultErrorMessage(res)))); + + /* Cleanup only if the subscription exists */ + if (PQntuples(res) != 0) + { + List *nodes = list_make1(NameStr(cc->fd.dest_node_name)); + + /* Disassociate the subscription from the replication slot first */ + cmd = + psprintf("ALTER SUBSCRIPTION %s SET (slot_name = NONE)", NameStr(cc->fd.operation_id)); + ts_dist_cmd_run_on_data_nodes(cmd, nodes, true); + + /* Drop the subscription now */ + pfree(cmd); + cmd = psprintf("DROP SUBSCRIPTION %s", NameStr(cc->fd.operation_id)); + ts_dist_cmd_run_on_data_nodes(cmd, nodes, true); + } + + ts_dist_cmd_close_response(dist_res); +} + +static void +chunk_copy_stage_sync_start(ChunkCopy *cc) +{ + const char *cmd; + + /* Start data transfer on the destination node */ + cmd = psprintf("ALTER SUBSCRIPTION %s ENABLE", NameStr(cc->fd.operation_id)); + ts_dist_cmd_run_on_data_nodes(cmd, list_make1(NameStr(cc->fd.dest_node_name)), true); +} + +static void +chunk_copy_stage_sync_start_cleanup(ChunkCopy *cc) +{ + char *cmd; + DistCmdResult *dist_res; + PGresult *res; + + /* Check if the subscription exists on the destination data node */ + cmd = psprintf("SELECT 1 FROM pg_catalog.pg_subscription WHERE subname = '%s'", + NameStr(cc->fd.operation_id)); + dist_res = + ts_dist_cmd_invoke_on_data_nodes(cmd, list_make1(NameStr(cc->fd.dest_node_name)), true); + res = ts_dist_cmd_get_result_by_node_name(dist_res, NameStr(cc->fd.dest_node_name)); + + if (PQresultStatus(res) != PGRES_TUPLES_OK) + ereport(ERROR, + (errcode(ERRCODE_CONNECTION_EXCEPTION), errmsg("%s", PQresultErrorMessage(res)))); + + /* Alter subscription only if it exists */ + if (PQntuples(res) != 0) + { + /* Stop data transfer on the destination node */ + cmd = psprintf("ALTER SUBSCRIPTION %s DISABLE", NameStr(cc->fd.operation_id)); + ts_dist_cmd_run_on_data_nodes(cmd, list_make1(NameStr(cc->fd.dest_node_name)), true); + } + + ts_dist_cmd_close_response(dist_res); +} + +static void +chunk_copy_stage_sync(ChunkCopy *cc) +{ + char *cmd; + + /* + * Transaction blocks run in REPEATABLE READ mode in the connection pool. + * However this wait_subscription_sync procedure needs to refresh the subcription + * sync status data and hence needs a READ COMMITTED transaction isolation + * level for that. + */ + cmd = psprintf("SET transaction_isolation TO 'READ COMMITTED'"); + ts_dist_cmd_run_on_data_nodes(cmd, list_make1(NameStr(cc->fd.dest_node_name)), true); + pfree(cmd); + + /* Wait until data transfer finishes in its own transaction */ + cmd = psprintf("CALL _timescaledb_internal.wait_subscription_sync(%s, %s)", + quote_literal_cstr(NameStr(cc->chunk->fd.schema_name)), + quote_literal_cstr(NameStr(cc->chunk->fd.table_name))); + + ts_dist_cmd_run_on_data_nodes(cmd, list_make1(NameStr(cc->fd.dest_node_name)), true); + pfree(cmd); +} + +static void +chunk_copy_stage_drop_subscription(ChunkCopy *cc) +{ + char *cmd; + + /* Stop data transfer on the destination node */ + cmd = psprintf("ALTER SUBSCRIPTION %s DISABLE", NameStr(cc->fd.operation_id)); + ts_dist_cmd_run_on_data_nodes(cmd, list_make1(NameStr(cc->fd.dest_node_name)), true); + pfree(cmd); + + /* Disassociate the subscription from the replication slot first */ + cmd = psprintf("ALTER SUBSCRIPTION %s SET (slot_name = NONE)", NameStr(cc->fd.operation_id)); + ts_dist_cmd_run_on_data_nodes(cmd, list_make1(NameStr(cc->fd.dest_node_name)), true); + pfree(cmd); + + /* Drop the subscription now */ + cmd = psprintf("DROP SUBSCRIPTION %s", NameStr(cc->fd.operation_id)); + ts_dist_cmd_run_on_data_nodes(cmd, list_make1(NameStr(cc->fd.dest_node_name)), true); + pfree(cmd); +} + +static void +chunk_copy_stage_drop_publication(ChunkCopy *cc) +{ + char *cmd; + + cmd = psprintf("SELECT pg_drop_replication_slot('%s')", NameStr(cc->fd.operation_id)); + ts_dist_cmd_run_on_data_nodes(cmd, list_make1(NameStr(cc->fd.source_node_name)), true); + + cmd = psprintf("DROP PUBLICATION %s", NameStr(cc->fd.operation_id)); + ts_dist_cmd_run_on_data_nodes(cmd, list_make1(NameStr(cc->fd.source_node_name)), true); +} + +static void +chunk_copy_stage_attach_chunk(ChunkCopy *cc) +{ + Cache *hcache; + Hypertable *ht; + ChunkDataNode *chunk_data_node; + const char *remote_chunk_name; + Chunk *chunk = cc->chunk; + + ht = ts_hypertable_cache_get_cache_and_entry(chunk->hypertable_relid, CACHE_FLAG_NONE, &hcache); + + /* Check that the hypertable is already attached to this data node */ + data_node_hypertable_get_by_node_name(ht, cc->dst_server->servername, true); + + chunk_data_node = palloc0(sizeof(ChunkDataNode)); + + chunk_data_node->fd.chunk_id = chunk->fd.id; + chunk_data_node->fd.node_chunk_id = -1; /* below API will fill it up */ + namestrcpy(&chunk_data_node->fd.node_name, cc->dst_server->servername); + chunk_data_node->foreign_server_oid = cc->dst_server->serverid; + + remote_chunk_name = psprintf("%s.%s", + quote_identifier(chunk->fd.schema_name.data), + quote_identifier(chunk->fd.table_name.data)); + + chunk_api_create_on_data_nodes(chunk, ht, remote_chunk_name, list_make1(chunk_data_node)); + + /* All ok, update the AN chunk metadata to add this data node to it */ + chunk->data_nodes = lappend(chunk->data_nodes, chunk_data_node); + + /* persist this association in the metadata */ + ts_chunk_data_node_insert(chunk_data_node); + + ts_cache_release(hcache); +} + +static void +chunk_copy_stage_delete_chunk(ChunkCopy *cc) +{ + if (!cc->fd.delete_on_src_node) + return; + + chunk_api_call_chunk_drop_replica(cc->chunk, + NameStr(cc->fd.source_node_name), + cc->src_server->serverid); +} + +static const ChunkCopyStage chunk_copy_stages[] = { + /* Initial Marker */ + { CCS_INIT, chunk_copy_stage_init, chunk_copy_stage_init_cleanup }, + + /* + * Create empty chunk table on the dst node. + * The corresponding cleanup function should just delete this empty chunk. + */ + { CCS_CREATE_EMPTY_CHUNK, + chunk_copy_stage_create_empty_chunk, + chunk_copy_stage_create_empty_chunk_cleanup }, + + /* + * Setup logical replication between nodes. + * The corresponding cleanup functions should drop the subscription and + * remove the replication slot followed by dropping of the publication on + * the source data node. + */ + { CCS_CREATE_PUBLICATION, + chunk_copy_stage_create_publication, + chunk_copy_stage_create_publication_cleanup }, + { CCS_CREATE_REPLICATION_SLOT, + chunk_copy_stage_create_replication_slot, + chunk_copy_stage_create_replication_slot_cleanup }, + { CCS_CREATE_SUBSCRIPTION, + chunk_copy_stage_create_subscription, + chunk_copy_stage_create_subscription_cleanup }, + + /* + * Begin data transfer and wait for completion. + * The corresponding cleanup function should just disable the subscription so + * that earlier steps above can drop the subcription/publication cleanly. + */ + { CCS_SYNC_START, chunk_copy_stage_sync_start, chunk_copy_stage_sync_start_cleanup }, + { CCS_SYNC, chunk_copy_stage_sync, NULL }, + + /* + * Cleanup. Nothing else required via the cleanup functions. + */ + { CCS_DROP_SUBSCRIPTION, chunk_copy_stage_drop_subscription, NULL }, + { CCS_DROP_PUBLICATION, chunk_copy_stage_drop_publication, NULL }, + + /* + * Attach chunk to the hypertable on the dst_node. + * The operation has succeeded from the destination data node perspective. + * No cleanup required here. + */ + { CCS_ATTACH_CHUNK, chunk_copy_stage_attach_chunk, NULL }, + + /* + * Maybe delete chunk from the src_node (move operation). + * Again, everything ok, so no cleanup required, we probably shouldn't be + * seeing this entry in the catalog table because the operation has succeeded. + */ + { CCS_DELETE_CHUNK, chunk_copy_stage_delete_chunk, NULL }, + + /* Done Marker */ + { NULL, NULL, NULL } +}; + +static void +chunk_copy_execute(ChunkCopy *cc) +{ + const ChunkCopyStage *stage; + + /* + * Execute each copy stage in a separate transaction. The below will employ + * 2PC by default. This can be later optimized to use 1PC since only one + * datanode is involved in most of the stages. + */ + for (stage = &chunk_copy_stages[0]; stage->name != NULL; stage++) + { + StartTransactionCommand(); + + cc->stage = stage; + cc->stage->function(cc); + + /* Mark current stage as completed and update the catalog */ + chunk_copy_operation_update(cc); + + DEBUG_ERROR_INJECTION(stage->name); + + CommitTransactionCommand(); + } +} + +void +chunk_copy(Oid chunk_relid, const char *src_node, const char *dst_node, bool delete_on_src_node) +{ + ChunkCopy cc; + const MemoryContext oldcontext = CurrentMemoryContext; + + /* Populate copy structure */ + chunk_copy_setup(&cc, chunk_relid, src_node, dst_node, delete_on_src_node); + + /* Execute chunk copy in separate stages */ + PG_TRY(); + { + chunk_copy_execute(&cc); + } + PG_CATCH(); + { + /* Include chunk copy id to the error message */ + ErrorData *edata; + MemoryContextSwitchTo(oldcontext); + edata = CopyErrorData(); + edata->detail = psprintf("Chunk copy operation id: %s.", NameStr(cc.fd.operation_id)); + FlushErrorState(); + ReThrowError(edata); + } + PG_END_TRY(); + + /* Finish up and delete the catalog entry */ + chunk_copy_finish(&cc); +} + +static ScanTupleResult +chunk_copy_operation_tuple_found(TupleInfo *ti, void *const data) +{ + ChunkCopy **cc = data; + + *cc = STRUCT_FROM_SLOT(ti->slot, ti->mctx, ChunkCopy, FormData_chunk_copy_operation); + return SCAN_CONTINUE; +} + +static ChunkCopy * +chunk_copy_operation_get(const char *operation_id) +{ + ScanKeyData scankeys[1]; + ChunkCopy *cc = NULL; + int indexid; + MemoryContext old, mcxt; + + /* Objects need to be in long lived context */ + mcxt = + AllocSetContextCreate(PortalContext, "chunk copy cleanup activity", ALLOCSET_DEFAULT_SIZES); + old = MemoryContextSwitchTo(mcxt); + + if (operation_id != NULL) + { + ScanKeyInit(&scankeys[0], + Anum_chunk_copy_operation_idx_operation_id, + BTEqualStrategyNumber, + F_NAMEEQ, + DirectFunctionCall1(namein, CStringGetDatum(operation_id))); + indexid = CHUNK_COPY_OPERATION_PKEY_IDX; + } + else + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("invalid chunk copy operation identifier"))); + + ts_catalog_scan_one(CHUNK_COPY_OPERATION, + indexid, + scankeys, + 1, + chunk_copy_operation_tuple_found, + AccessShareLock, + CHUNK_COPY_OPERATION_TABLE_NAME, + &cc); + + /* + * If a valid entry is returned then fill up the rest of the fields in the + * ChunkCopy structure + */ + if (cc) + { + cc->mcxt = mcxt; + cc->chunk = ts_chunk_get_by_id(cc->fd.chunk_id, true); + cc->stage = NULL; + + /* No other sanity checks need to be performed since they were done earlier */ + + /* Setup the src_node */ + cc->src_server = + data_node_get_foreign_server(NameStr(cc->fd.source_node_name), ACL_USAGE, true, false); + Assert(NULL != cc->src_server); + + /* Setup the dst_node */ + cc->dst_server = + data_node_get_foreign_server(NameStr(cc->fd.dest_node_name), ACL_USAGE, true, false); + Assert(NULL != cc->dst_server); + } + + MemoryContextSwitchTo(old); + + if (cc == NULL) + /* No entry found, long lived context not required */ + MemoryContextDelete(mcxt); + + return cc; +} + +static void +chunk_copy_cleanup_internal(ChunkCopy *cc, int stage_idx) +{ + bool first = true; + + /* Cleanup each copy stage in a separate transaction */ + do + { + StartTransactionCommand(); + + cc->stage = &chunk_copy_stages[stage_idx]; + if (cc->stage->function_cleanup) + cc->stage->function_cleanup(cc); + + /* Mark stage as cleaned up and update the catalog */ + if (!first && stage_idx != 0) + chunk_copy_operation_update(cc); + else + first = false; + + CommitTransactionCommand(); + } while (--stage_idx >= 0); +} + +void +chunk_copy_cleanup(const char *operation_id) +{ + ChunkCopy *cc; + const MemoryContext oldcontext = CurrentMemoryContext; + const ChunkCopyStage *stage; + bool found = false; + int stage_idx; + + if (!superuser()) + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + (errmsg("must be superuser to cleanup a chunk copy operation")))); + + if (dist_util_membership() != DIST_MEMBER_ACCESS_NODE) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("function must be run on the access node only"))); + + cc = chunk_copy_operation_get(operation_id); + + if (cc == NULL) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("invalid chunk copy operation identifier. Entry not found"))); + + /* Identify the last completed stage for this activity. */ + stage_idx = 0; + for (stage = &chunk_copy_stages[stage_idx]; stage->name != NULL; + stage = &chunk_copy_stages[++stage_idx]) + { + if (namestrcmp(&cc->fd.completed_stage, stage->name) == 0) + { + found = true; + break; + } + } + + /* should always find an entry, add ereport to quell compiler warning */ + Assert(found == true); + if (!found) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("stage '%s' not found for copy chunk cleanup", + NameStr(cc->fd.completed_stage)))); + + /* Commit to get out of starting transaction */ + PopActiveSnapshot(); + CommitTransactionCommand(); + + /* Run the corresponding cleanup steps to roll back the activity. */ + PG_TRY(); + { + chunk_copy_cleanup_internal(cc, stage_idx); + } + PG_CATCH(); + { + /* Include chunk copy id to the error message */ + ErrorData *edata; + MemoryContextSwitchTo(oldcontext); + edata = CopyErrorData(); + edata->detail = psprintf("While cleaning up chunk copy operation id: %s.", + NameStr(cc->fd.operation_id)); + FlushErrorState(); + ReThrowError(edata); + } + PG_END_TRY(); + + /* Finish up and delete the catalog entry */ + chunk_copy_finish(cc); +} diff --git a/tsl/src/chunk_copy.h b/tsl/src/chunk_copy.h new file mode 100644 index 00000000000..2ff3acd752c --- /dev/null +++ b/tsl/src/chunk_copy.h @@ -0,0 +1,13 @@ +/* + * This file and its contents are licensed under the Timescale License. + * Please see the included NOTICE for copyright information and + * LICENSE-TIMESCALE for a copy of the license. + */ +#ifndef TIMESCALEDB_TSL_CHUNK_COPY_H +#define TIMESCALEDB_TSL_CHUNK_COPY_H + +extern void chunk_copy(Oid chunk_relid, const char *src_node, const char *dst_node, + bool delete_on_src_node); +extern void chunk_copy_cleanup(const char *operation_id); + +#endif /* TIMESCALEDB_TSL_CHUNK_COPY_H */ diff --git a/tsl/src/data_node.c b/tsl/src/data_node.c index a431e411d40..be88ba8d344 100644 --- a/tsl/src/data_node.c +++ b/tsl/src/data_node.c @@ -40,7 +40,6 @@ #include "remote/connection_cache.h" #include "data_node.h" #include "remote/utils.h" -#include "hypertable.h" #include "hypertable_cache.h" #include "errors.h" #include "dist_util.h" @@ -1158,16 +1157,18 @@ data_node_detach_hypertable_data_nodes(const char *node_name, List *hypertable_d repartition); } -static HypertableDataNode * -get_hypertable_data_node(Oid table_id, const char *node_name, bool owner_check, bool attach_check) +HypertableDataNode * +data_node_hypertable_get_by_node_name(const Hypertable *ht, const char *node_name, + bool attach_check) { HypertableDataNode *hdn = NULL; - Cache *hcache = ts_hypertable_cache_pin(); - Hypertable *ht = ts_hypertable_cache_get_entry(hcache, table_id, CACHE_FLAG_NONE); ListCell *lc; - if (owner_check) - ts_hypertable_permissions_check(table_id, GetUserId()); + if (!hypertable_is_distributed(ht)) + ereport(ERROR, + (errcode(ERRCODE_TS_HYPERTABLE_NOT_DISTRIBUTED), + errmsg("hypertable \"%s\" is not distributed", + get_rel_name(ht->main_table_relid)))); foreach (lc, ht->data_nodes) { @@ -1185,16 +1186,31 @@ get_hypertable_data_node(Oid table_id, const char *node_name, bool owner_check, (errcode(ERRCODE_TS_DATA_NODE_NOT_ATTACHED), errmsg("data node \"%s\" is not attached to hypertable \"%s\"", node_name, - get_rel_name(table_id)))); + get_rel_name(ht->main_table_relid)))); else ereport(NOTICE, (errcode(ERRCODE_TS_DATA_NODE_NOT_ATTACHED), errmsg("data node \"%s\" is not attached to hypertable \"%s\", " "skipping", node_name, - get_rel_name(table_id)))); + get_rel_name(ht->main_table_relid)))); } + return hdn; +} + +static HypertableDataNode * +get_hypertable_data_node(Oid table_id, const char *node_name, bool owner_check, bool attach_check) +{ + HypertableDataNode *hdn = NULL; + Cache *hcache = ts_hypertable_cache_pin(); + const Hypertable *ht = ts_hypertable_cache_get_entry(hcache, table_id, CACHE_FLAG_NONE); + + if (owner_check) + ts_hypertable_permissions_check(table_id, GetUserId()); + + hdn = data_node_hypertable_get_by_node_name(ht, node_name, attach_check); + ts_cache_release(hcache); return hdn; diff --git a/tsl/src/data_node.h b/tsl/src/data_node.h index e9c44c28fe1..22cca26d478 100644 --- a/tsl/src/data_node.h +++ b/tsl/src/data_node.h @@ -8,7 +8,10 @@ #include +#include + #include "catalog.h" +#include "hypertable.h" #include "remote/dist_txn.h" /* Used to skip ACL checks */ @@ -39,6 +42,10 @@ extern List *data_node_oids_to_node_name_list(List *data_node_oids, AclMode mode extern void data_node_name_list_check_acl(List *data_node_names, AclMode mode); extern Datum data_node_ping(PG_FUNCTION_ARGS); +extern HypertableDataNode *data_node_hypertable_get_by_node_name(const Hypertable *ht, + const char *node_name, + bool attach_check); + /* This should only be used for testing */ extern Datum data_node_add_without_dist_id(PG_FUNCTION_ARGS); diff --git a/tsl/src/hypertable.c b/tsl/src/hypertable.c index 207f99dca04..e2c26e4bfdc 100644 --- a/tsl/src/hypertable.c +++ b/tsl/src/hypertable.c @@ -65,7 +65,7 @@ hypertable_create_backend_tables(int32 hypertable_id, List *data_nodes) DeparsedHypertableCommands *commands = deparse_get_distributed_hypertable_create_command(ht); foreach (cell, deparse_get_tabledef_commands(ht->main_table_relid)) - ts_dist_cmd_run_on_data_nodes(lfirst(cell), data_nodes); + ts_dist_cmd_run_on_data_nodes(lfirst(cell), data_nodes, true); dist_res = ts_dist_cmd_invoke_on_data_nodes(commands->table_create_command, data_nodes, true); foreach (cell, data_nodes) @@ -82,10 +82,10 @@ hypertable_create_backend_tables(int32 hypertable_id, List *data_nodes) ts_dist_cmd_close_response(dist_res); foreach (cell, commands->dimension_add_commands) - ts_dist_cmd_run_on_data_nodes(lfirst(cell), data_nodes); + ts_dist_cmd_run_on_data_nodes(lfirst(cell), data_nodes, true); foreach (cell, commands->grant_commands) - ts_dist_cmd_run_on_data_nodes(lfirst(cell), data_nodes); + ts_dist_cmd_run_on_data_nodes(lfirst(cell), data_nodes, true); return remote_ids; } diff --git a/tsl/src/init.c b/tsl/src/init.c index 564a3927c08..841002c4221 100644 --- a/tsl/src/init.c +++ b/tsl/src/init.c @@ -12,8 +12,8 @@ #include "bgw_policy/job.h" #include "bgw_policy/job_api.h" #include "bgw_policy/reorder_api.h" -#include "chunk_api.h" #include "chunk.h" +#include "chunk_api.h" #include "compression/array.h" #include "compression/compression.h" #include "compression/compress_utils.h" @@ -119,6 +119,9 @@ CrossModuleFunctions tsl_cm_functions = { .reorder_chunk = tsl_reorder_chunk, .move_chunk = tsl_move_chunk, + .move_chunk_proc = tsl_move_chunk_proc, + .copy_chunk_proc = tsl_copy_chunk_proc, + .copy_chunk_cleanup_proc = tsl_copy_chunk_cleanup_proc, .partialize_agg = tsl_partialize_agg, .finalize_agg_sfunc = tsl_finalize_agg_sfunc, .finalize_agg_ffunc = tsl_finalize_agg_ffunc, @@ -163,6 +166,7 @@ CrossModuleFunctions tsl_cm_functions = { .show_chunk = chunk_show, .create_chunk = chunk_create, .create_chunk_on_data_nodes = chunk_api_create_on_data_nodes, + .chunk_drop_replica = chunk_drop_replica, .hypertable_make_distributed = hypertable_make_distributed, .get_and_validate_data_node_list = hypertable_get_and_validate_data_nodes, .timescaledb_fdw_handler = timescaledb_fdw_handler, @@ -191,6 +195,8 @@ CrossModuleFunctions tsl_cm_functions = { .func_call_on_data_nodes = ts_dist_cmd_func_call_on_data_nodes, .chunk_get_relstats = chunk_api_get_chunk_relstats, .chunk_get_colstats = chunk_api_get_chunk_colstats, + .chunk_create_empty_table = chunk_create_empty_table, + .chunk_create_replica_table = chunk_create_replica_table, .hypertable_distributed_set_replication_factor = hypertable_set_replication_factor, .cache_syscache_invalidate = cache_syscache_invalidate, .update_compressed_chunk_relstats = update_compressed_chunk_relstats, diff --git a/tsl/src/remote/connection.c b/tsl/src/remote/connection.c index ce309a113fe..281ff175cfa 100644 --- a/tsl/src/remote/connection.c +++ b/tsl/src/remote/connection.c @@ -38,6 +38,7 @@ #include #include #include "connection.h" +#include "data_node.h" #include "debug_point.h" #include "utils.h" @@ -1233,27 +1234,19 @@ finish_connection(PGconn *conn, char **errmsg) } /* - * This will only open a connection to a specific node, but not do anything - * else. In particular, it will not perform any validation nor configure the - * connection since it cannot know that it connects to a data node database or - * not. For that, please use the `remote_connection_open_with_options` - * function. + * Take options belonging to a foreign server and add additional default and + * other user/ssl related options as appropriate */ -TSConnection * -remote_connection_open_with_options_nothrow(const char *node_name, List *connection_options, - char **errmsg) +static void +setup_full_connection_options(List *connection_options, const char ***all_keywords, + const char ***all_values) { - PGconn *volatile pg_conn = NULL; const char *user_name = NULL; - TSConnection *ts_conn; const char **keywords; const char **values; int option_count; int option_pos; - if (NULL != errmsg) - *errmsg = NULL; - /* * Construct connection params from generic options of ForeignServer * and user. (Some of them might not be libpq options, in @@ -1290,6 +1283,31 @@ remote_connection_open_with_options_nothrow(const char *node_name, List *connect keywords[option_pos] = values[option_pos] = NULL; Assert(option_pos <= option_count); + *all_keywords = keywords; + *all_values = values; +} + +/* + * This will only open a connection to a specific node, but not do anything + * else. In particular, it will not perform any validation nor configure the + * connection since it cannot know that it connects to a data node database or + * not. For that, please use the `remote_connection_open_with_options` + * function. + */ +TSConnection * +remote_connection_open_with_options_nothrow(const char *node_name, List *connection_options, + char **errmsg) +{ + PGconn *volatile pg_conn = NULL; + TSConnection *ts_conn; + const char **keywords; + const char **values; + + if (NULL != errmsg) + *errmsg = NULL; + + setup_full_connection_options(connection_options, &keywords, &values); + pg_conn = PQconnectdbParams(keywords, values, 0 /* Do not expand dbname param */); /* Cast to (char **) to silence warning with MSVC compiler */ @@ -1468,6 +1486,90 @@ add_userinfo_to_server_options(ForeignServer *server, Oid user_id) return options; } +/* + * Append the given string to the buffer, with suitable quoting for passing + * the string as a value in a keyword/value pair in a libpq connection string. + * + * The implementation is based on libpq appendConnStrVal(). + */ +static void +remote_connection_append_connstr_value(StringInfo buf, const char *str) +{ + const char *s; + bool needquotes; + + /* + * If the string is one or more plain ASCII characters, no need to quote + * it. This is quite conservative, but better safe than sorry. + */ + needquotes = true; + for (s = str; *s; s++) + { + if (!((*s >= 'a' && *s <= 'z') || (*s >= 'A' && *s <= 'Z') || (*s >= '0' && *s <= '9') || + *s == '_' || *s == '.')) + { + needquotes = true; + break; + } + needquotes = false; + } + + if (needquotes) + { + appendStringInfoChar(buf, '\''); + while (*str) + { + /* ' and \ must be escaped by to \' and \\ */ + if (*str == '\'' || *str == '\\') + appendStringInfoChar(buf, '\\'); + + appendStringInfoChar(buf, *str); + str++; + } + appendStringInfoChar(buf, '\''); + } + else + appendStringInfoString(buf, str); +} + +char * +remote_connection_get_connstr(const char *node_name) +{ + ForeignServer *server; + List *connection_options; + const char **keywords; + const char **values; + StringInfoData connstr; + StringInfoData connstr_escape; + int i; + + server = data_node_get_foreign_server(node_name, ACL_NO_CHECK, false, false); + connection_options = add_userinfo_to_server_options(server, GetUserId()); + setup_full_connection_options(connection_options, &keywords, &values); + + /* Cycle through the options and create the connection string */ + initStringInfo(&connstr); + i = 0; + while (keywords[i] != NULL) + { + appendStringInfo(&connstr, " %s=", keywords[i]); + remote_connection_append_connstr_value(&connstr, values[i]); + i++; + } + Assert(keywords[i] == NULL && values[i] == NULL); + + initStringInfo(&connstr_escape); + enlargeStringInfo(&connstr_escape, connstr.len * 2 + 1); + connstr_escape.len += PQescapeString(connstr_escape.data, connstr.data, connstr.len); + + /* Cast to (char **) to silence warning with MSVC compiler */ + pfree((char **) keywords); + pfree((char **) values); + pfree(connstr.data); + + return connstr_escape.data; +} + TSConnection * remote_connection_open_by_id(TSConnectionId id) { diff --git a/tsl/src/remote/connection.h b/tsl/src/remote/connection.h index 7cb9d1f9731..cb89311e36d 100644 --- a/tsl/src/remote/connection.h +++ b/tsl/src/remote/connection.h @@ -93,6 +93,7 @@ extern unsigned int remote_connection_get_prep_stmt_number(void); extern bool remote_connection_configure(TSConnection *conn); extern bool remote_connection_check_extension(TSConnection *conn); extern void remote_validate_extension_version(TSConnection *conn, const char *data_node_version); +extern char *remote_connection_get_connstr(const char *node_name); typedef enum TSConnectionResult { diff --git a/tsl/src/remote/dist_commands.c b/tsl/src/remote/dist_commands.c index 8d3d2e0dd8d..75374c616e5 100644 --- a/tsl/src/remote/dist_commands.c +++ b/tsl/src/remote/dist_commands.c @@ -15,7 +15,6 @@ #include "dist_commands.h" #include "dist_txn.h" #include "connection_cache.h" -#include "async.h" #include "data_node.h" #include "dist_util.h" #include "miscadmin.h" @@ -79,9 +78,14 @@ ts_dist_cmd_collect_responses(List *requests) * * The list of data nodes can either be a list of data node names, or foreign * server OIDs. + * + * If "transactional" is false then it means that the SQL should be executed + * in autocommit (implicit statement level commit) mode without the need for + * an explicit 2PC from the access node */ DistCmdResult * -ts_dist_cmd_invoke_on_data_nodes(const char *sql, List *data_nodes, bool transactional) +ts_dist_cmd_params_invoke_on_data_nodes(const char *sql, StmtParams *params, List *data_nodes, + bool transactional) { ListCell *lc; List *requests = NIL; @@ -113,7 +117,11 @@ ts_dist_cmd_invoke_on_data_nodes(const char *sql, List *data_nodes, bool transac ereport(DEBUG2, (errmsg_internal("sending \"%s\" to data node \"%s\"", sql, node_name))); - req = async_request_send(connection, sql); + if (params == NULL) + req = async_request_send(connection, sql); + else + req = async_request_send_with_params(connection, sql, params, FORMAT_TEXT); + async_request_attach_user_data(req, (char *) node_name); requests = lappend(requests, req); } @@ -125,6 +133,12 @@ ts_dist_cmd_invoke_on_data_nodes(const char *sql, List *data_nodes, bool transac return results; } +DistCmdResult * +ts_dist_cmd_invoke_on_data_nodes(const char *sql, List *data_nodes, bool transactional) +{ + return ts_dist_cmd_params_invoke_on_data_nodes(sql, NULL, data_nodes, transactional); +} + DistCmdResult * ts_dist_cmd_invoke_on_data_nodes_using_search_path(const char *sql, const char *search_path, List *node_names, bool transactional) diff --git a/tsl/src/remote/dist_commands.h b/tsl/src/remote/dist_commands.h index b8e70a89f29..9f88504144e 100644 --- a/tsl/src/remote/dist_commands.h +++ b/tsl/src/remote/dist_commands.h @@ -7,13 +7,16 @@ #define TIMESCALEDB_TSL_REMOTE_DIST_COMMANDS_H #include -#include + +#include "async.h" typedef struct DistCmdResult DistCmdResult; typedef struct List PreparedDistCmd; extern DistCmdResult *ts_dist_cmd_invoke_on_data_nodes(const char *sql, List *node_names, bool transactional); +extern DistCmdResult *ts_dist_cmd_params_invoke_on_data_nodes(const char *sql, StmtParams *params, + List *data_nodes, bool transactional); extern DistCmdResult *ts_dist_cmd_invoke_on_data_nodes_using_search_path(const char *sql, const char *search_path, List *node_names, @@ -34,8 +37,8 @@ extern Size ts_dist_cmd_response_count(DistCmdResult *result); extern long ts_dist_cmd_total_row_count(DistCmdResult *result); extern void ts_dist_cmd_close_response(DistCmdResult *response); -#define ts_dist_cmd_run_on_data_nodes(command, nodes) \ - ts_dist_cmd_close_response(ts_dist_cmd_invoke_on_data_nodes(command, nodes, true)); +#define ts_dist_cmd_run_on_data_nodes(command, nodes, transactional) \ + ts_dist_cmd_close_response(ts_dist_cmd_invoke_on_data_nodes(command, nodes, transactional)); extern PreparedDistCmd *ts_dist_cmd_prepare_command(const char *sql, size_t n_params, List *node_names); diff --git a/tsl/src/reorder.c b/tsl/src/reorder.c index f0020c93b43..bb8c3f2f24f 100644 --- a/tsl/src/reorder.c +++ b/tsl/src/reorder.c @@ -61,6 +61,7 @@ #include "annotations.h" #include "chunk.h" +#include "chunk_copy.h" #include "chunk_index.h" #include "hypertable_cache.h" #include "indexing.h" @@ -195,6 +196,75 @@ tsl_move_chunk(PG_FUNCTION_ARGS) PG_RETURN_VOID(); } +/* + * Implement a distributed chunk copy/move operation. + * + * We use a procedure because multiple steps need to be performed via multiple + * transactions across the access node and the two datanodes that are involved. + * The progress of the various stages/steps are tracked in the + * CHUNK_COPY_OPERATION catalog table + */ +static void +tsl_copy_or_move_chunk_proc(FunctionCallInfo fcinfo, bool delete_on_src_node) +{ + Oid chunk_id = PG_ARGISNULL(0) ? InvalidOid : PG_GETARG_OID(0); + const char *src_node_name = PG_ARGISNULL(1) ? NULL : NameStr(*PG_GETARG_NAME(1)); + const char *dst_node_name = PG_ARGISNULL(2) ? NULL : NameStr(*PG_GETARG_NAME(2)); + + TS_PREVENT_FUNC_IF_READ_ONLY(); + + PreventInTransactionBlock(true, get_func_name(FC_FN_OID(fcinfo))); + + /* src_node and dst_node both have to be non-NULL */ + if (src_node_name == NULL || dst_node_name == NULL) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("invalid source or destination node"))); + + if (!OidIsValid(chunk_id)) + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("invalid chunk"))); + + /* perform the actual distributed chunk move after a few sanity checks */ + chunk_copy(chunk_id, src_node_name, dst_node_name, delete_on_src_node); +} + +Datum +tsl_move_chunk_proc(PG_FUNCTION_ARGS) +{ + tsl_copy_or_move_chunk_proc(fcinfo, true); + + PG_RETURN_VOID(); +} + +Datum +tsl_copy_chunk_proc(PG_FUNCTION_ARGS) +{ + tsl_copy_or_move_chunk_proc(fcinfo, false); + + PG_RETURN_VOID(); +} + +Datum +tsl_copy_chunk_cleanup_proc(PG_FUNCTION_ARGS) +{ + const char *operation_id = PG_ARGISNULL(0) ? NULL : NameStr(*PG_GETARG_NAME(0)); + + TS_PREVENT_FUNC_IF_READ_ONLY(); + + PreventInTransactionBlock(true, get_func_name(FC_FN_OID(fcinfo))); + + /* valid input has to be provided */ + if (operation_id == NULL) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("invalid chunk copy operation id"))); + + /* perform the cleanup/repair depending on the stage */ + chunk_copy_cleanup(operation_id); + + PG_RETURN_VOID(); +} + void reorder_chunk(Oid chunk_id, Oid index_id, bool verbose, Oid wait_id, Oid destination_tablespace, Oid index_tablespace) diff --git a/tsl/src/reorder.h b/tsl/src/reorder.h index 6618f0c02c4..09c4a3bf379 100644 --- a/tsl/src/reorder.h +++ b/tsl/src/reorder.h @@ -11,6 +11,9 @@ extern Datum tsl_reorder_chunk(PG_FUNCTION_ARGS); extern Datum tsl_move_chunk(PG_FUNCTION_ARGS); +extern Datum tsl_move_chunk_proc(PG_FUNCTION_ARGS); +extern Datum tsl_copy_chunk_proc(PG_FUNCTION_ARGS); +extern Datum tsl_copy_chunk_cleanup_proc(PG_FUNCTION_ARGS); extern void reorder_chunk(Oid chunk_id, Oid index_id, bool verbose, Oid wait_id, Oid destination_tablespace, Oid index_tablespace); diff --git a/tsl/test/expected/chunk_api.out b/tsl/test/expected/chunk_api.out index 207ffd38d46..38937a4cb39 100644 --- a/tsl/test/expected/chunk_api.out +++ b/tsl/test/expected/chunk_api.out @@ -85,6 +85,58 @@ SELECT * FROM _timescaledb_internal.create_chunk('chunkapi',' {"time": [15150240 ERROR: permission denied for table "chunkapi" DETAIL: Insert privileges required on "chunkapi" to create chunks. \set ON_ERROR_STOP 1 +SET ROLE :ROLE_DEFAULT_PERM_USER; +-- Test create_chunk_table for errors +\set ON_ERROR_STOP 0 +-- Test create_chunk_table for NULL input +SELECT * FROM _timescaledb_internal.create_chunk_table(NULL,' {"time": [1515024000000000, 1519024000000000], "device": [-9223372036854775808, 1073741823]}', '_timescaledb_internal','_hyper_1_1_chunk'); +ERROR: hypertable cannot be NULL +SELECT * FROM _timescaledb_internal.create_chunk_table('chunkapi', NULL, '_timescaledb_internal','_hyper_1_1_chunk'); +ERROR: slices cannot be NULL +SELECT * FROM _timescaledb_internal.create_chunk_table('chunkapi',' {"time": [1515024000000000, 1519024000000000], "device": [-9223372036854775808, 1073741823]}', NULL,'_hyper_1_1_chunk'); +ERROR: chunk schema name cannot be NULL +SELECT * FROM _timescaledb_internal.create_chunk_table('chunkapi',' {"time": [1515024000000000, 1519024000000000], "device": [-9223372036854775808, 1073741823]}', '_timescaledb_internal',NULL); +ERROR: chunk table name cannot be NULL +-- Modified time constraint should fail with collision +SELECT * FROM _timescaledb_internal.create_chunk_table('chunkapi',' {"time": [1514419600000000, 1515024000000000], "device": [-9223372036854775808, 1073741823]}', '_timescaledb_internal','_hyper_1_1_chunk'); +ERROR: chunk table creation failed due to dimension slice collision +-- Missing dimension +SELECT * FROM _timescaledb_internal.create_chunk_table('chunkapi',' {"time": [1514419600000000, 1515024000000000]}', '_timescaledb_internal','_hyper_1_1_chunk'); +ERROR: invalid hypercube for hypertable "chunkapi" +DETAIL: invalid number of hypercube dimensions +-- Extra dimension +SELECT * FROM _timescaledb_internal.create_chunk_table('chunkapi',' {"time": [1514419600000000, 1515024000000000], "device": [-9223372036854775808, 1073741823], "time2": [1514419600000000, 1515024000000000]}', '_timescaledb_internal','_hyper_1_1_chunk'); +ERROR: invalid hypercube for hypertable "chunkapi" +DETAIL: invalid number of hypercube dimensions +-- Bad dimension name +SELECT * FROM _timescaledb_internal.create_chunk_table('chunkapi',' {"time": [1514419600000000, 1515024000000000], "dev": [-9223372036854775808, 1073741823]}', '_timescaledb_internal','_hyper_1_1_chunk'); +ERROR: invalid hypercube for hypertable "chunkapi" +DETAIL: dimension "dev" does not exist in hypertable +-- Same dimension twice +SELECT * FROM _timescaledb_internal.create_chunk_table('chunkapi',' {"time": [1514419600000000, 1515024000000000], "time": [1514419600000000, 1515024000000000]}', '_timescaledb_internal','_hyper_1_1_chunk'); +ERROR: invalid hypercube for hypertable "chunkapi" +DETAIL: invalid number of hypercube dimensions +-- Bad bounds format +SELECT * FROM _timescaledb_internal.create_chunk_table('chunkapi',' {"time": ["1514419200000000", 1515024000000000], "device": [-9223372036854775808, 1073741823]}', '_timescaledb_internal','_hyper_1_1_chunk'); +ERROR: invalid hypercube for hypertable "chunkapi" +DETAIL: constraint for dimension "time" is not numeric +-- Bad slices format +SELECT * FROM _timescaledb_internal.create_chunk_table('chunkapi',' {"time": [1515024000000000], "device": [-9223372036854775808, 1073741823]}', '_timescaledb_internal','_hyper_1_1_chunk'); +ERROR: invalid hypercube for hypertable "chunkapi" +DETAIL: unexpected number of dimensional bounds for dimension "time" +-- Bad slices json +SELECT * FROM _timescaledb_internal.create_chunk_table('chunkapi',' {"time: [1515024000000000] "device": [-9223372036854775808, 1073741823]}', '_timescaledb_internal','_hyper_1_1_chunk'); +ERROR: invalid input syntax for type json +LINE 1: ...imescaledb_internal.create_chunk_table('chunkapi',' {"time: ... + ^ +DETAIL: Token "device" is invalid. +CONTEXT: JSON data, line 1: {"time: [1515024000000000] "device... +-- Valid chunk, but no permissions +SET ROLE :ROLE_DEFAULT_PERM_USER_2; +SELECT * FROM _timescaledb_internal.create_chunk_table('chunkapi',' {"time": [1515024000000000, 1519024000000000], "device": [-9223372036854775808, 1073741823]}', '_timescaledb_internal','_hyper_1_1_chunk'); +ERROR: permission denied for table "chunkapi" +DETAIL: Insert privileges required on "chunkapi" to create chunks. +\set ON_ERROR_STOP 1 -- Test that granting insert on tables allow create_chunk to be -- called. This will also create a chunk that does not collide and has -- a custom schema and name. @@ -518,3 +570,427 @@ WARNING: insufficient number of data nodes for distributed hypertable "disttabl DROP DATABASE :DN_DBNAME_1; DROP DATABASE :DN_DBNAME_2; +-- Test create_chunk_table to recreate the chunk table and show dimension slices +SET ROLE :ROLE_DEFAULT_PERM_USER; +SELECT * FROM chunkapi ORDER BY time; + time | device | temp +------------------------------+--------+------ + Mon Jan 01 05:00:00 2018 PST | 1 | 23.4 +(1 row) + +SELECT chunk_schema AS "CHUNK_SCHEMA", chunk_name AS "CHUNK_NAME" +FROM timescaledb_information.chunks c +ORDER BY chunk_name DESC +LIMIT 1 \gset +SELECT slices AS "SLICES" +FROM _timescaledb_internal.show_chunk(:'CHUNK_SCHEMA'||'.'||:'CHUNK_NAME') \gset +SELECT relname +FROM pg_catalog.pg_inherits, pg_class +WHERE inhrelid = (:'CHUNK_SCHEMA'||'.'||:'CHUNK_NAME')::regclass AND inhparent = oid; + relname +---------- + chunkapi +(1 row) + +SELECT * FROM _timescaledb_catalog.dimension_slice ORDER BY id; + id | dimension_id | range_start | range_end +----+--------------+----------------------+------------------ + 1 | 1 | 1514419200000000 | 1515024000000000 + 2 | 2 | -9223372036854775808 | 1073741823 + 3 | 1 | 1515024000000000 | 1519024000000000 +(3 rows) + +DROP TABLE :CHUNK_SCHEMA.:CHUNK_NAME; +SELECT * FROM _timescaledb_catalog.dimension_slice ORDER BY id; + id | dimension_id | range_start | range_end +----+--------------+----------------------+------------------ + 2 | 2 | -9223372036854775808 | 1073741823 + 3 | 1 | 1515024000000000 | 1519024000000000 +(2 rows) + +SELECT count(*) FROM + _timescaledb_internal.create_chunk_table('chunkapi', :'SLICES', :'CHUNK_SCHEMA', :'CHUNK_NAME'); + count +------- + 1 +(1 row) + +SELECT * FROM _timescaledb_catalog.dimension_slice ORDER BY id; + id | dimension_id | range_start | range_end +----+--------------+----------------------+------------------ + 2 | 2 | -9223372036854775808 | 1073741823 + 3 | 1 | 1515024000000000 | 1519024000000000 +(2 rows) + +SELECT relname +FROM pg_catalog.pg_inherits, pg_class +WHERE inhrelid = (:'CHUNK_SCHEMA'||'.'||:'CHUNK_NAME')::regclass AND inhparent = oid; + relname +--------- +(0 rows) + +-- Test that creat_chunk fails since chunk table already exists +\set ON_ERROR_STOP 0 +SELECT * FROM _timescaledb_internal.create_chunk('chunkapi', :'SLICES', :'CHUNK_SCHEMA', :'CHUNK_NAME'); +ERROR: relation "_hyper_1_1_chunk" already exists +\set ON_ERROR_STOP 1 +-- Test create_chunk_table on a hypertable where the chunk didn't exist before +DROP TABLE chunkapi; +DROP TABLE :CHUNK_SCHEMA.:CHUNK_NAME; +CREATE TABLE chunkapi (time timestamptz, device int, temp float); +SELECT * FROM create_hypertable('chunkapi', 'time', 'device', 2); +NOTICE: adding not-null constraint to column "time" + hypertable_id | schema_name | table_name | created +---------------+-------------+------------+--------- + 3 | public | chunkapi | t +(1 row) + +SELECT count(*) FROM + _timescaledb_internal.create_chunk_table('chunkapi', :'SLICES', :'CHUNK_SCHEMA', :'CHUNK_NAME'); + count +------- + 1 +(1 row) + +-- Demonstrate that current settings for dimensions don't affect create_chunk_table +DROP TABLE chunkapi; +DROP TABLE :CHUNK_SCHEMA.:CHUNK_NAME; +CREATE TABLE chunkapi (time timestamptz not null, device int, temp float); +SELECT * FROM create_hypertable('chunkapi', 'time', 'device', 2, '3d'); + hypertable_id | schema_name | table_name | created +---------------+-------------+------------+--------- + 4 | public | chunkapi | t +(1 row) + +SELECT count(*) FROM + _timescaledb_internal.create_chunk_table('chunkapi', :'SLICES', :'CHUNK_SCHEMA', :'CHUNK_NAME'); + count +------- + 1 +(1 row) + +DROP TABLE chunkapi; +DROP TABLE :CHUNK_SCHEMA.:CHUNK_NAME; +CREATE TABLE chunkapi (time timestamptz not null, device int, temp float); +SELECT * FROM create_hypertable('chunkapi', 'time', 'device', 3); + hypertable_id | schema_name | table_name | created +---------------+-------------+------------+--------- + 5 | public | chunkapi | t +(1 row) + +SELECT count(*) FROM + _timescaledb_internal.create_chunk_table('chunkapi', :'SLICES', :'CHUNK_SCHEMA', :'CHUNK_NAME'); + count +------- + 1 +(1 row) + +-- Test create_chunk_table if a colliding chunk exists +DROP TABLE chunkapi; +DROP TABLE :CHUNK_SCHEMA.:CHUNK_NAME; +CREATE TABLE chunkapi (time timestamptz not null, device int, temp float); +SELECT * FROM create_hypertable('chunkapi', 'time', 'device', 3); + hypertable_id | schema_name | table_name | created +---------------+-------------+------------+--------- + 6 | public | chunkapi | t +(1 row) + +INSERT INTO chunkapi VALUES ('2018-01-01 05:00:00-8', 1, 23.4); +\set ON_ERROR_STOP 0 +SELECT _timescaledb_internal.create_chunk_table('chunkapi', :'SLICES', :'CHUNK_SCHEMA', :'CHUNK_NAME'); +ERROR: chunk table creation failed due to dimension slice collision +\set ON_ERROR_STOP 1 +-- Test create_chunk_table when a chunk exists in different space partition and thus doesn't collide +DROP TABLE chunkapi; +CREATE TABLE chunkapi (time timestamptz not null, device int, temp float); +SELECT * FROM create_hypertable('chunkapi', 'time', 'device', 2); + hypertable_id | schema_name | table_name | created +---------------+-------------+------------+--------- + 7 | public | chunkapi | t +(1 row) + +INSERT INTO chunkapi VALUES ('2018-01-01 05:00:00-8', 2, 23.4); +SELECT _timescaledb_internal.create_chunk_table('chunkapi', :'SLICES', :'CHUNK_SCHEMA', :'CHUNK_NAME'); + create_chunk_table +-------------------- + t +(1 row) + +-- Test create_chunk_table when a chunk exists in different time partition and thus doesn't collide +DROP TABLE chunkapi; +DROP TABLE :CHUNK_SCHEMA.:CHUNK_NAME; +CREATE TABLE chunkapi (time timestamptz not null, device int, temp float); +SELECT * FROM create_hypertable('chunkapi', 'time', 'device', 2); + hypertable_id | schema_name | table_name | created +---------------+-------------+------------+--------- + 8 | public | chunkapi | t +(1 row) + +INSERT INTO chunkapi VALUES ('2018-02-01 05:00:00-8', 1, 23.4); +SELECT _timescaledb_internal.create_chunk_table('chunkapi', :'SLICES', :'CHUNK_SCHEMA', :'CHUNK_NAME'); + create_chunk_table +-------------------- + t +(1 row) + +-- Test create_chunk_table with tablespaces +\c :TEST_DBNAME :ROLE_SUPERUSER +SET client_min_messages = ERROR; +DROP TABLESPACE IF EXISTS tablespace1; +DROP TABLESPACE IF EXISTS tablespace2; +SET client_min_messages = NOTICE; +CREATE TABLESPACE tablespace1 OWNER :ROLE_DEFAULT_PERM_USER LOCATION :TEST_TABLESPACE1_PATH; +CREATE TABLESPACE tablespace2 OWNER :ROLE_DEFAULT_PERM_USER LOCATION :TEST_TABLESPACE2_PATH; +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +-- Use the space partition to calculate the tablespace id to use +DROP TABLE chunkapi; +DROP TABLE :CHUNK_SCHEMA.:CHUNK_NAME; +CREATE TABLE chunkapi (time timestamptz not null, device int, temp float); +SELECT * FROM create_hypertable('chunkapi', 'time', 'device', 3); + hypertable_id | schema_name | table_name | created +---------------+-------------+------------+--------- + 9 | public | chunkapi | t +(1 row) + +SELECT attach_tablespace('tablespace1', 'chunkapi'); + attach_tablespace +------------------- + +(1 row) + +SELECT attach_tablespace('tablespace2', 'chunkapi'); + attach_tablespace +------------------- + +(1 row) + +SELECT count(*) FROM + _timescaledb_internal.create_chunk_table('chunkapi', :'SLICES', :'CHUNK_SCHEMA', :'CHUNK_NAME'); + count +------- + 1 +(1 row) + +SELECT tablespace FROM pg_tables WHERE tablename = :'CHUNK_NAME'; + tablespace +------------- + tablespace1 +(1 row) + +-- Use the time partition to calculate the tablespace id to use +DROP TABLE chunkapi; +DROP TABLE :CHUNK_SCHEMA.:CHUNK_NAME; +CREATE TABLE devices (id int PRIMARY KEY); +INSERT INTO devices VALUES (1); +CREATE TABLE chunkapi (time timestamptz NOT NULL PRIMARY KEY, device int REFERENCES devices(id), temp float CHECK (temp > 0)); +SELECT * FROM create_hypertable('chunkapi', 'time'); + hypertable_id | schema_name | table_name | created +---------------+-------------+------------+--------- + 10 | public | chunkapi | t +(1 row) + +INSERT INTO chunkapi VALUES ('2018-01-01 05:00:00-8', 1, 23.4); +SELECT chunk_schema AS "CHUNK_SCHEMA", chunk_name AS "CHUNK_NAME" +FROM timescaledb_information.chunks c +ORDER BY chunk_name DESC +LIMIT 1 \gset +SELECT slices AS "SLICES" +FROM _timescaledb_internal.show_chunk(:'CHUNK_SCHEMA'||'.'||:'CHUNK_NAME') \gset +-- Save the constraints info in a table for later comparison +CREATE TABLE original_chunk_constraints AS +SELECT "Constraint", "Type", "Columns", "Index"::text, "Expr", "Deferrable", "Deferred", "Validated" +FROM test.show_constraints(format('%I.%I', :'CHUNK_SCHEMA', :'CHUNK_NAME')::regclass); +-- Save contraints metadata +CREATE TABLE original_chunk_constraints_metadata AS +SELECT + chunk_id, + dimension_slice_id, + constraint_name, + hypertable_constraint_name +FROM _timescaledb_catalog.chunk_constraint con +INNER JOIN _timescaledb_catalog.chunk ch ON (con.chunk_id = ch.id) +WHERE ch.schema_name = :'CHUNK_SCHEMA' AND ch.table_name = :'CHUNK_NAME'; +DROP TABLE :CHUNK_SCHEMA.:CHUNK_NAME; +SELECT attach_tablespace('tablespace1', 'chunkapi'); + attach_tablespace +------------------- + +(1 row) + +SELECT attach_tablespace('tablespace2', 'chunkapi'); + attach_tablespace +------------------- + +(1 row) + +SELECT count(*) FROM + _timescaledb_internal.create_chunk_table('chunkapi', :'SLICES', :'CHUNK_SCHEMA', :'CHUNK_NAME'); + count +------- + 1 +(1 row) + +SELECT tablespace FROM pg_tables WHERE tablename = :'CHUNK_NAME'; + tablespace +------------- + tablespace1 +(1 row) + +-- Now create the complete chunk from the chunk table +SELECT _timescaledb_internal.create_chunk('chunkapi', :'SLICES', :'CHUNK_SCHEMA', :'CHUNK_NAME', + format('%I.%I', :'CHUNK_SCHEMA', :'CHUNK_NAME')::regclass); + create_chunk +--------------------------------------------------------------------------------------------------------- + (11,10,_timescaledb_internal,_hyper_10_10_chunk,r,"{""time"": [1514419200000000, 1515024000000000]}",t) +(1 row) + +-- Compare original and new constraints +SELECT * FROM original_chunk_constraints; + Constraint | Type | Columns | Index | Expr | Deferrable | Deferred | Validated +---------------------------+------+----------+--------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------+------------+----------+----------- + 10_1_chunkapi_device_fkey | f | {device} | devices_pkey | | f | f | t + 10_2_chunkapi_pkey | p | {time} | _timescaledb_internal."10_2_chunkapi_pkey" | | f | f | t + chunkapi_temp_check | c | {temp} | - | (temp > (0)::double precision) | f | f | t + constraint_15 | c | {time} | - | (("time" >= 'Wed Dec 27 16:00:00 2017 PST'::timestamp with time zone) AND ("time" < 'Wed Jan 03 16:00:00 2018 PST'::timestamp with time zone)) | f | f | t +(4 rows) + +SELECT * FROM test.show_constraints(format('%I.%I', :'CHUNK_SCHEMA', :'CHUNK_NAME')::regclass); + Constraint | Type | Columns | Index | Expr | Deferrable | Deferred | Validated +---------------------------+------+----------+--------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------+------------+----------+----------- + 11_3_chunkapi_device_fkey | f | {device} | devices_pkey | | f | f | t + 11_4_chunkapi_pkey | p | {time} | _timescaledb_internal."11_4_chunkapi_pkey" | | f | f | t + chunkapi_temp_check | c | {temp} | - | (temp > (0)::double precision) | f | f | t + constraint_16 | c | {time} | - | (("time" >= 'Wed Dec 27 16:00:00 2017 PST'::timestamp with time zone) AND ("time" < 'Wed Jan 03 16:00:00 2018 PST'::timestamp with time zone)) | f | f | t +(4 rows) + +-- Compare original and new chunk constraints metadata +SELECT * FROM original_chunk_constraints_metadata; + chunk_id | dimension_slice_id | constraint_name | hypertable_constraint_name +----------+--------------------+---------------------------+---------------------------- + 10 | 15 | constraint_15 | + 10 | | 10_1_chunkapi_device_fkey | chunkapi_device_fkey + 10 | | 10_2_chunkapi_pkey | chunkapi_pkey +(3 rows) + +SELECT + chunk_id, + dimension_slice_id, + constraint_name, + hypertable_constraint_name +FROM _timescaledb_catalog.chunk_constraint con +INNER JOIN _timescaledb_catalog.chunk ch ON (con.chunk_id = ch.id) +WHERE ch.schema_name = :'CHUNK_SCHEMA' AND ch.table_name = :'CHUNK_NAME'; + chunk_id | dimension_slice_id | constraint_name | hypertable_constraint_name +----------+--------------------+---------------------------+---------------------------- + 11 | 16 | constraint_16 | + 11 | | 11_3_chunkapi_device_fkey | chunkapi_device_fkey + 11 | | 11_4_chunkapi_pkey | chunkapi_pkey +(3 rows) + +DROP TABLE original_chunk_constraints; +DROP TABLE original_chunk_constraints_metadata; +-- The chunk should inherit the hypertable +SELECT relname +FROM pg_catalog.pg_inherits, pg_class +WHERE inhrelid = (:'CHUNK_SCHEMA'||'.'||:'CHUNK_NAME')::regclass AND inhparent = oid; + relname +---------- + chunkapi +(1 row) + +-- Show chunk's attached to the table +SELECT + :'CHUNK_SCHEMA' AS expected_schema, + :'CHUNK_NAME' AS expected_table_name, + (_timescaledb_internal.show_chunk(ch)).* +FROM show_chunks('chunkapi') ch; + expected_schema | expected_table_name | chunk_id | hypertable_id | schema_name | table_name | relkind | slices +-----------------------+---------------------+----------+---------------+-----------------------+--------------------+---------+------------------------------------------------ + _timescaledb_internal | _hyper_10_10_chunk | 11 | 10 | _timescaledb_internal | _hyper_10_10_chunk | r | {"time": [1514419200000000, 1515024000000000]} +(1 row) + +DROP TABLE chunkapi; +DROP TABLE devices; +-- Test creating a chunk from an existing chunk table which was not +-- created via create_chunk_table and having a different name. +CREATE TABLE devices (id int PRIMARY KEY); +INSERT INTO devices VALUES (1); +CREATE TABLE chunkapi (time timestamptz NOT NULL PRIMARY KEY, device int REFERENCES devices(id), temp float CHECK(temp > 0)); +SELECT * FROM create_hypertable('chunkapi', 'time'); + hypertable_id | schema_name | table_name | created +---------------+-------------+------------+--------- + 11 | public | chunkapi | t +(1 row) + +CREATE TABLE newchunk (time timestamptz NOT NULL, device int, temp float); +SELECT * FROM test.show_constraints('newchunk'); + Constraint | Type | Columns | Index | Expr | Deferrable | Deferred | Validated +------------+------+---------+-------+------+------------+----------+----------- +(0 rows) + +INSERT INTO newchunk VALUES ('2018-01-01 05:00:00-8', 1, 23.4); +\set ON_ERROR_STOP 0 +-- Creating the chunk without required CHECK constraints on a table +-- should fail. Currently, PostgreSQL only enforces presence of CHECK +-- constraints, but not foreign key, unique, or primary key +-- constraints. We should probably add checks to enforce the latter +-- too or auto-create all constraints. +SELECT * FROM _timescaledb_internal.create_chunk('chunkapi', :'SLICES', :'CHUNK_SCHEMA', :'CHUNK_NAME', 'newchunk'); +ERROR: child table is missing constraint "chunkapi_temp_check" +\set ON_ERROR_STOP 1 +-- Add the missing CHECK constraint. Note that the name must be the +-- same as on the parent table. +ALTER TABLE newchunk ADD CONSTRAINT chunkapi_temp_check CHECK (temp > 0); +SELECT * FROM _timescaledb_internal.create_chunk('chunkapi', :'SLICES', :'CHUNK_SCHEMA', :'CHUNK_NAME', 'newchunk'); + chunk_id | hypertable_id | schema_name | table_name | relkind | slices | created +----------+---------------+-----------------------+--------------------+---------+------------------------------------------------+--------- + 13 | 11 | _timescaledb_internal | _hyper_10_10_chunk | r | {"time": [1514419200000000, 1515024000000000]} | t +(1 row) + +-- Show the chunk and that names are what we'd expect +SELECT + :'CHUNK_SCHEMA' AS expected_schema, + :'CHUNK_NAME' AS expected_table_name, + (_timescaledb_internal.show_chunk(ch)).* +FROM show_chunks('chunkapi') ch; + expected_schema | expected_table_name | chunk_id | hypertable_id | schema_name | table_name | relkind | slices +-----------------------+---------------------+----------+---------------+-----------------------+--------------------+---------+------------------------------------------------ + _timescaledb_internal | _hyper_10_10_chunk | 13 | 11 | _timescaledb_internal | _hyper_10_10_chunk | r | {"time": [1514419200000000, 1515024000000000]} +(1 row) + +-- The chunk should inherit the hypertable +SELECT relname +FROM pg_catalog.pg_inherits, pg_class +WHERE inhrelid = (:'CHUNK_SCHEMA'||'.'||:'CHUNK_NAME')::regclass AND inhparent = oid; + relname +---------- + chunkapi +(1 row) + +-- Test that it is possible to query the data via the hypertable +SELECT * FROM chunkapi ORDER BY 1,2,3; + time | device | temp +------------------------------+--------+------ + Mon Jan 01 05:00:00 2018 PST | 1 | 23.4 +(1 row) + +-- Show that the chunk has all the necessary constraints. These +-- include inheritable constraints and dimensional constraints, which +-- are specific to the chunk. Currently, foreign key, unique, and +-- primary key constraints are not inherited or auto-created. +SELECT * FROM test.show_constraints(format('%I.%I', :'CHUNK_SCHEMA', :'CHUNK_NAME')::regclass); + Constraint | Type | Columns | Index | Expr | Deferrable | Deferred | Validated +---------------------------+------+----------+--------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------+------------+----------+----------- + 13_7_chunkapi_device_fkey | f | {device} | devices_pkey | | f | f | t + 13_8_chunkapi_pkey | p | {time} | _timescaledb_internal."13_8_chunkapi_pkey" | | f | f | t + chunkapi_temp_check | c | {temp} | - | (temp > (0)::double precision) | f | f | t + constraint_18 | c | {time} | - | (("time" >= 'Wed Dec 27 16:00:00 2017 PST'::timestamp with time zone) AND ("time" < 'Wed Jan 03 16:00:00 2018 PST'::timestamp with time zone)) | f | f | t +(4 rows) + +DROP TABLE chunkapi; +\c :TEST_DBNAME :ROLE_SUPERUSER +SET client_min_messages = ERROR; +DROP TABLESPACE tablespace1; +DROP TABLESPACE tablespace2; +SET client_min_messages = NOTICE; +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER diff --git a/tsl/test/expected/data_node.out b/tsl/test/expected/data_node.out index e718e367a97..49b5bbff097 100644 --- a/tsl/test/expected/data_node.out +++ b/tsl/test/expected/data_node.out @@ -1327,6 +1327,307 @@ NOTICE: extension "timescaledb" already exists on data node, skipping data_node_6 | localhost | 55432 | db_data_node_6 | t | f | f (1 row) +SELECT * FROM delete_data_node('data_node_6'); + delete_data_node +------------------ + t +(1 row) + +-- +-- Tests for copy/move chunk API +-- +RESET ROLE; +DROP DATABASE :DN_DBNAME_1; +DROP DATABASE :DN_DBNAME_2; +DROP DATABASE :DN_DBNAME_3; +SELECT * FROM add_data_node('data_node_1', host => 'localhost', + database => :'DN_DBNAME_1'); + node_name | host | port | database | node_created | database_created | extension_created +-------------+-----------+-------+----------------+--------------+------------------+------------------- + data_node_1 | localhost | 55432 | db_data_node_1 | t | t | t +(1 row) + +SELECT * FROM add_data_node('data_node_2', host => 'localhost', + database => :'DN_DBNAME_2'); + node_name | host | port | database | node_created | database_created | extension_created +-------------+-----------+-------+----------------+--------------+------------------+------------------- + data_node_2 | localhost | 55432 | db_data_node_2 | t | t | t +(1 row) + +SELECT * FROM add_data_node('data_node_3', host => 'localhost', + database => :'DN_DBNAME_3'); + node_name | host | port | database | node_created | database_created | extension_created +-------------+-----------+-------+----------------+--------------+------------------+------------------- + data_node_3 | localhost | 55432 | db_data_node_3 | t | t | t +(1 row) + +GRANT USAGE ON FOREIGN SERVER data_node_1, data_node_2, data_node_3 TO PUBLIC; +SET ROLE :ROLE_1; +CREATE TABLE dist_test(time timestamp NOT NULL, device int, temp float); +SELECT create_distributed_hypertable('dist_test', 'time', 'device', 3); + create_distributed_hypertable +------------------------------- + (9,public,dist_test,t) +(1 row) + +INSERT INTO dist_test SELECT t, (abs(timestamp_hash(t::timestamp)) % 10) + 1, 0.10 FROM generate_series('2018-03-02 1:00'::TIMESTAMPTZ, '2018-03-08 1:00', '1 hour') t; +SELECT * from show_chunks('dist_test'); + show_chunks +---------------------------------------------- + _timescaledb_internal._dist_hyper_9_12_chunk + _timescaledb_internal._dist_hyper_9_13_chunk + _timescaledb_internal._dist_hyper_9_14_chunk + _timescaledb_internal._dist_hyper_9_15_chunk +(4 rows) + +SELECT * FROM test.remote_exec(NULL, $$ SELECT * from show_chunks('dist_test'); $$); +NOTICE: [data_node_1]: SELECT * from show_chunks('dist_test') +NOTICE: [data_node_1]: +show_chunks +-------------------------------------------- +_timescaledb_internal._dist_hyper_9_12_chunk +_timescaledb_internal._dist_hyper_9_15_chunk +(2 rows) + + +NOTICE: [data_node_2]: SELECT * from show_chunks('dist_test') +NOTICE: [data_node_2]: +show_chunks +-------------------------------------------- +_timescaledb_internal._dist_hyper_9_13_chunk +(1 row) + + +NOTICE: [data_node_3]: SELECT * from show_chunks('dist_test') +NOTICE: [data_node_3]: +show_chunks +-------------------------------------------- +_timescaledb_internal._dist_hyper_9_14_chunk +(1 row) + + + remote_exec +------------- + +(1 row) + +SELECT sum(device) FROM dist_test; + sum +----- + 846 +(1 row) + +SELECT * FROM test.remote_exec(ARRAY['data_node_1'], $$ SELECT sum(device) FROM _timescaledb_internal._dist_hyper_9_12_chunk; $$); +NOTICE: [data_node_1]: SELECT sum(device) FROM _timescaledb_internal._dist_hyper_9_12_chunk +NOTICE: [data_node_1]: +sum +--- +406 +(1 row) + + + remote_exec +------------- + +(1 row) + +-- ensure data node name is provided and has proper type +\set ON_ERROR_STOP 0 +CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_12_chunk', source_node=> null, destination_node => 'data_node_2'); +ERROR: invalid source or destination node +CALL timescaledb_experimental.copy_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_12_chunk', source_node=> 'data_node_1', destination_node => null); +ERROR: invalid source or destination node +CALL timescaledb_experimental.copy_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_12_chunk', source_node=> 'data_node_1', destination_node => 2); +ERROR: procedure timescaledb_experimental.copy_chunk(chunk => unknown, source_node => unknown, destination_node => integer) does not exist at character 6 +CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_12_chunk', source_node => 'data_node_1'); +ERROR: invalid source or destination node +\set ON_ERROR_STOP 1 +-- ensure functions can't be run in read only mode +SET default_transaction_read_only TO on; +\set ON_ERROR_STOP 0 +CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_12_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2'); +ERROR: cannot execute move_chunk() in a read-only transaction +CALL timescaledb_experimental.copy_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_12_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2'); +ERROR: cannot execute copy_chunk() in a read-only transaction +\set ON_ERROR_STOP 1 +SET default_transaction_read_only TO off; +-- ensure functions can't be run in an active multi-statement transaction +\set ON_ERROR_STOP 0 +BEGIN; +CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_12_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2'); +ERROR: move_chunk cannot run inside a transaction block +ROLLBACK; +BEGIN; +CALL timescaledb_experimental.copy_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_12_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2'); +ERROR: copy_chunk cannot run inside a transaction block +ROLLBACK; +\set ON_ERROR_STOP 1 +-- must be superuser to copy/move chunks +SET ROLE :ROLE_DEFAULT_PERM_USER; +\set ON_ERROR_STOP 0 +CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_12_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2'); +ERROR: must be superuser to copy/move chunk to data node +\set ON_ERROR_STOP 1 +SET ROLE :ROLE_1; +-- can't run copy/move chunk on a data node +\c :DN_DBNAME_1 :ROLE_CLUSTER_SUPERUSER; +\set ON_ERROR_STOP 0 +CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_12_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2'); +ERROR: function must be run on the access node only +CALL timescaledb_experimental.copy_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_12_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2'); +ERROR: function must be run on the access node only +\set ON_ERROR_STOP 1 +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; +-- ensure that hypertable chunks are distributed +CREATE TABLE nondist_test(time timestamp NOT NULL, device int, temp float); +SELECT create_hypertable('nondist_test', 'time', 'device', 3); + create_hypertable +---------------------------- + (10,public,nondist_test,t) +(1 row) + +INSERT INTO nondist_test SELECT t, (abs(timestamp_hash(t::timestamp)) % 10) + 1, 0.10 FROM generate_series('2018-03-02 1:00'::TIMESTAMPTZ, '2018-03-08 1:00', '1 hour') t; +SELECT * from show_chunks('nondist_test'); + show_chunks +------------------------------------------ + _timescaledb_internal._hyper_10_16_chunk + _timescaledb_internal._hyper_10_17_chunk + _timescaledb_internal._hyper_10_18_chunk + _timescaledb_internal._hyper_10_19_chunk +(4 rows) + +\set ON_ERROR_STOP 0 +CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._hyper_10_16_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2'); +ERROR: "_hyper_10_16_chunk" is not a valid remote chunk +CALL timescaledb_experimental.copy_chunk(chunk=>'_timescaledb_internal._hyper_10_16_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2'); +ERROR: "_hyper_10_16_chunk" is not a valid remote chunk +\set ON_ERROR_STOP 1 +-- ensure that distributed chunk is not compressed +ALTER TABLE dist_test SET (timescaledb.compress, timescaledb.compress_segmentby='device', timescaledb.compress_orderby = 'time DESC'); +SELECT compress_chunk('_timescaledb_internal._dist_hyper_9_15_chunk'); + compress_chunk +---------------------------------------------- + _timescaledb_internal._dist_hyper_9_15_chunk +(1 row) + +\set ON_ERROR_STOP 0 +CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_15_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2'); +ERROR: "_dist_hyper_9_15_chunk" is a compressed remote chunk. Chunk copy/move not supported currently on compressed chunks +CALL timescaledb_experimental.copy_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_15_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2'); +ERROR: "_dist_hyper_9_15_chunk" is a compressed remote chunk. Chunk copy/move not supported currently on compressed chunks +\set ON_ERROR_STOP 1 +-- ensure that chunk exists on a source data node +\set ON_ERROR_STOP 0 +CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_13_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2'); +ERROR: chunk "_dist_hyper_9_13_chunk" does not exist on source data node "data_node_1" +\set ON_ERROR_STOP 1 +-- do actualy copy +CALL timescaledb_experimental.copy_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_12_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2'); +SELECT * FROM test.remote_exec(NULL, $$ SELECT * from show_chunks('dist_test'); $$); +NOTICE: [data_node_1]: SELECT * from show_chunks('dist_test') +NOTICE: [data_node_1]: +show_chunks +-------------------------------------------- +_timescaledb_internal._dist_hyper_9_12_chunk +_timescaledb_internal._dist_hyper_9_15_chunk +(2 rows) + + +NOTICE: [data_node_2]: SELECT * from show_chunks('dist_test') +NOTICE: [data_node_2]: +show_chunks +-------------------------------------------- +_timescaledb_internal._dist_hyper_9_13_chunk +_timescaledb_internal._dist_hyper_9_12_chunk +(2 rows) + + +NOTICE: [data_node_3]: SELECT * from show_chunks('dist_test') +NOTICE: [data_node_3]: +show_chunks +-------------------------------------------- +_timescaledb_internal._dist_hyper_9_14_chunk +(1 row) + + + remote_exec +------------- + +(1 row) + +SELECT * FROM test.remote_exec(ARRAY['data_node_2'], $$ SELECT sum(device) FROM _timescaledb_internal._dist_hyper_9_12_chunk; $$); +NOTICE: [data_node_2]: SELECT sum(device) FROM _timescaledb_internal._dist_hyper_9_12_chunk +NOTICE: [data_node_2]: +sum +--- +406 +(1 row) + + + remote_exec +------------- + +(1 row) + +-- ensure that chunk exists on a destination data node +\set ON_ERROR_STOP 0 +CALL timescaledb_experimental.copy_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_12_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2'); +ERROR: chunk "_dist_hyper_9_12_chunk" already exists on destination data node "data_node_2" +\set ON_ERROR_STOP 1 +-- now try to move the same chunk from data node 2 to 3 +CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_12_chunk', source_node=> 'data_node_2', destination_node => 'data_node_3'); +SELECT * FROM test.remote_exec(NULL, $$ SELECT * from show_chunks('dist_test'); $$); +NOTICE: [data_node_1]: SELECT * from show_chunks('dist_test') +NOTICE: [data_node_1]: +show_chunks +-------------------------------------------- +_timescaledb_internal._dist_hyper_9_12_chunk +_timescaledb_internal._dist_hyper_9_15_chunk +(2 rows) + + +NOTICE: [data_node_2]: SELECT * from show_chunks('dist_test') +NOTICE: [data_node_2]: +show_chunks +-------------------------------------------- +_timescaledb_internal._dist_hyper_9_13_chunk +(1 row) + + +NOTICE: [data_node_3]: SELECT * from show_chunks('dist_test') +NOTICE: [data_node_3]: +show_chunks +-------------------------------------------- +_timescaledb_internal._dist_hyper_9_14_chunk +_timescaledb_internal._dist_hyper_9_12_chunk +(2 rows) + + + remote_exec +------------- + +(1 row) + +SELECT * FROM test.remote_exec(ARRAY['data_node_3'], $$ SELECT sum(device) FROM _timescaledb_internal._dist_hyper_9_12_chunk; $$); +NOTICE: [data_node_3]: SELECT sum(device) FROM _timescaledb_internal._dist_hyper_9_12_chunk +NOTICE: [data_node_3]: +sum +--- +406 +(1 row) + + + remote_exec +------------- + +(1 row) + +SELECT sum(device) FROM dist_test; + sum +----- + 846 +(1 row) + RESET ROLE; DROP DATABASE :DN_DBNAME_1; DROP DATABASE :DN_DBNAME_2; diff --git a/tsl/test/expected/dist_backup.out b/tsl/test/expected/dist_backup.out index 9ac76c03fe7..3ef1209d261 100644 --- a/tsl/test/expected/dist_backup.out +++ b/tsl/test/expected/dist_backup.out @@ -65,7 +65,7 @@ SHOW timescaledb.enable_2pc; SHOW wal_level; wal_level ----------- - replica + logical (1 row) -- make sure not in recovery mode diff --git a/tsl/test/expected/dist_views.out b/tsl/test/expected/dist_views.out index 7ef8147aa6f..32591cf0f41 100644 --- a/tsl/test/expected/dist_views.out +++ b/tsl/test/expected/dist_views.out @@ -164,3 +164,49 @@ SELECT * FROM hypertable_index_size( 'dist_table_time_idx') ; 114688 (1 row) +-- Test chunk_replication_status view +SELECT * FROM timescaledb_experimental.chunk_replication_status +ORDER BY chunk_schema, chunk_name +LIMIT 4; + hypertable_schema | hypertable_name | chunk_schema | chunk_name | desired_num_replicas | num_replicas | replica_nodes | non_replica_nodes +-------------------+-----------------+-----------------------+-----------------------+----------------------+--------------+---------------------------+------------------- + public | dist_table | _timescaledb_internal | _dist_hyper_1_1_chunk | 2 | 2 | {view_node_1,view_node_2} | {view_node_3} + public | dist_table | _timescaledb_internal | _dist_hyper_1_2_chunk | 2 | 2 | {view_node_2,view_node_3} | {view_node_1} + public | dist_table | _timescaledb_internal | _dist_hyper_1_3_chunk | 2 | 2 | {view_node_3,view_node_1} | {view_node_2} + public | quote'tab | _timescaledb_internal | _dist_hyper_2_4_chunk | 2 | 2 | {view_node_1,view_node_2} | {view_node_3} +(4 rows) + +-- drop one chunk replica +SELECT _timescaledb_internal.chunk_drop_replica(format('%I.%I', chunk_schema, chunk_name)::regclass, replica_nodes[1]) +FROM timescaledb_experimental.chunk_replication_status +ORDER BY chunk_schema, chunk_name +LIMIT 1; + chunk_drop_replica +-------------------- + +(1 row) + +SELECT * FROM timescaledb_experimental.chunk_replication_status +WHERE num_replicas < desired_num_replicas +ORDER BY chunk_schema, chunk_name; + hypertable_schema | hypertable_name | chunk_schema | chunk_name | desired_num_replicas | num_replicas | replica_nodes | non_replica_nodes +-------------------+-----------------+-----------------------+-----------------------+----------------------+--------------+---------------+--------------------------- + public | dist_table | _timescaledb_internal | _dist_hyper_1_1_chunk | 2 | 1 | {view_node_2} | {view_node_1,view_node_3} +(1 row) + +-- Example usage of finding data nodes to copy/move chunks between +SELECT + format('%I.%I', chunk_schema, chunk_name)::regclass AS chunk, + replica_nodes[1] AS copy_from_node, + non_replica_nodes[1] AS copy_to_node +FROM + timescaledb_experimental.chunk_replication_status +WHERE + num_replicas < desired_num_replicas +ORDER BY + chunk_schema, chunk_name; + chunk | copy_from_node | copy_to_node +---------------------------------------------+----------------+-------------- + _timescaledb_internal._dist_hyper_1_1_chunk | view_node_2 | view_node_1 +(1 row) + diff --git a/tsl/test/postgresql.conf.in b/tsl/test/postgresql.conf.in index ebbceed89af..067391cdb0b 100644 --- a/tsl/test/postgresql.conf.in +++ b/tsl/test/postgresql.conf.in @@ -26,3 +26,4 @@ ssl_cert_file='@TEST_OUTPUT_DIR@/ts_data_node.crt' ssl_key_file='@TEST_OUTPUT_DIR@/ts_data_node.key' timescaledb.ssl_dir='@TEST_OUTPUT_DIR@/timescaledb/certs' timescaledb.passfile='@TEST_PASSFILE@' +wal_level='logical' diff --git a/tsl/test/shared/expected/dist_chunk.out b/tsl/test/shared/expected/dist_chunk.out new file mode 100644 index 00000000000..ef5a886c0f1 --- /dev/null +++ b/tsl/test/shared/expected/dist_chunk.out @@ -0,0 +1,215 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- This file contains tests for all features that will be used as part +-- of the chunk move/copy multi-node functionality +-- A table for the first chunk will be created on the data node, where it is not present. +SELECT chunk_name, data_nodes +FROM timescaledb_information.chunks +WHERE hypertable_name = 'dist_chunk_copy' +ORDER BY 1, 2; + chunk_name | data_nodes +-------------------------+--------------------------- + _dist_hyper_X_X_chunk | {data_node_2,data_node_3} + _dist_hyper_X_X_chunk | {data_node_2,data_node_3} + _dist_hyper_X_X_chunk | {data_node_1,data_node_3} + _dist_hyper_X_X_chunk | {data_node_1,data_node_3} + _dist_hyper_X_X_chunk | {data_node_2,data_node_3} + _dist_hyper_X_X_chunk | {data_node_1,data_node_2} + _dist_hyper_X_X_chunk | {data_node_1,data_node_3} + _dist_hyper_X_X_chunk | {data_node_2,data_node_3} + _dist_hyper_X_X_chunk | {data_node_1,data_node_2} + _dist_hyper_X_X_chunk | {data_node_2,data_node_3} + _dist_hyper_X_X_chunk | {data_node_1,data_node_3} +(11 rows) + +-- Non-distributed chunk will be used to test an error +SELECT chunk_name +FROM timescaledb_information.chunks +WHERE hypertable_name = 'conditions' +ORDER BY 1; + chunk_name +-------------------- + _hyper_X_X_chunk + _hyper_X_X_chunk +(2 rows) + +-- Test function _timescaledb_internal.create_chunk_replica_table +\set ON_ERROR_STOP 0 +SELECT _timescaledb_internal.create_chunk_replica_table(NULL, 'data_node_1'); +ERROR: chunk cannot be NULL +SELECT _timescaledb_internal.create_chunk_replica_table('_timescaledb_internal._dist_hyper_X_X_chunk', NULL); +ERROR: data node name cannot be NULL +SELECT _timescaledb_internal.create_chunk_replica_table(1234, 'data_node_1'); +ERROR: oid "1234" is not a chunk +SELECT _timescaledb_internal.create_chunk_replica_table('metrics_int', 'data_node_1'); +ERROR: relation "metrics_int" is not a chunk +SELECT _timescaledb_internal.create_chunk_replica_table('conditions', 'data_node_1'); +ERROR: relation "conditions" is not a chunk +SELECT _timescaledb_internal.create_chunk_replica_table('_timescaledb_internal._hyper_X_X_chunk', 'data_node_1'); +ERROR: chunk "_hyper_X_X_chunk" doesn't belong to a distributed hypertable +SELECT _timescaledb_internal.create_chunk_replica_table('_timescaledb_internal._dist_hyper_X_X_chunk', 'data_node_2'); +ERROR: chunk "_dist_hyper_X_X_chunk" already exists on data node "data_node_2" +SELECT _timescaledb_internal.create_chunk_replica_table('_timescaledb_internal._dist_hyper_X_X_chunk', 'data_node_2'); +ERROR: relation "_timescaledb_internal._dist_hyper_X_X_chunk" does not exist at character 57 +SELECT _timescaledb_internal.create_chunk_replica_table('_timescaledb_internal._dist_hyper_X_X_chunk', 'data_node_4'); +ERROR: server "data_node_4" does not exist +BEGIN READ ONLY; +SELECT _timescaledb_internal.create_chunk_replica_table('_timescaledb_internal._dist_hyper_X_X_chunk', 'data_node_3'); +ERROR: cannot execute create_chunk_replica_table() in a read-only transaction +COMMIT; +\set ON_ERROR_STOP 1 +SELECT _timescaledb_internal.create_chunk_replica_table('_timescaledb_internal._dist_hyper_X_X_chunk', 'data_node_1'); + create_chunk_replica_table +---------------------------- + +(1 row) + +-- Test that the table cannot be created since it was already created on the data node +\set ON_ERROR_STOP 0 +SELECT _timescaledb_internal.create_chunk_replica_table('_timescaledb_internal._dist_hyper_X_X_chunk', 'data_node_3'); +ERROR: chunk "_dist_hyper_X_X_chunk" already exists on data node "data_node_3" +\set ON_ERROR_STOP 1 +-- Creating chunk replica table ignores compression now: +SELECT _timescaledb_internal.create_chunk_replica_table('_timescaledb_internal._dist_hyper_X_X_chunk', 'data_node_2'); + create_chunk_replica_table +---------------------------- + +(1 row) + +CALL distributed_exec($$ DROP TABLE _timescaledb_internal._dist_hyper_X_X_chunk $$, '{"data_node_1"}'); +CALL distributed_exec($$ DROP TABLE _timescaledb_internal._dist_hyper_X_X_chunk $$, '{"data_node_2"}'); +-- Test function _timescaledb_internal.chunk_drop_replica +-- Sanity checking of the chunk_drop_replica API +\set ON_ERROR_STOP 0 +-- Check that it doesn't work in a read only transaction +SET default_transaction_read_only TO on; +SELECT _timescaledb_internal.chunk_drop_replica(NULL, NULL); +ERROR: cannot execute chunk_drop_replica() in a read-only transaction +RESET default_transaction_read_only; +-- NULL input for chunk id errors out +SELECT _timescaledb_internal.chunk_drop_replica(NULL, NULL); +ERROR: invalid chunk relation +-- Specifying any regular hypertable instead of chunk errors out +SELECT _timescaledb_internal.chunk_drop_replica('public.metrics', NULL); +ERROR: invalid chunk relation +-- Specifying regular hypertable chunk on a proper data node errors out +SELECT _timescaledb_internal.chunk_drop_replica('_timescaledb_internal._hyper_X_X_chunk', 'data_node_1'); +ERROR: "_hyper_X_X_chunk" is not a valid remote chunk +-- Specifying non-existent chunk on a proper data node errors out +SELECT _timescaledb_internal.chunk_drop_replica('_timescaledb_internal._dist_hyper_X_X_chunk', 'data_node_1'); +ERROR: relation "_timescaledb_internal._dist_hyper_X_X_chunk" does not exist at character 49 +-- Get the last chunk for this hypertable +SELECT ch1.schema_name|| '.' || ch1.table_name as "CHUNK_NAME", ch1.id "CHUNK_ID" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht +WHERE ch1.hypertable_id = ht.id +AND ht.table_name = 'mvcp_hyper' +ORDER BY ch1.id DESC LIMIT 1 \gset +-- Specifying wrong node name errors out +SELECT _timescaledb_internal.chunk_drop_replica(:'CHUNK_NAME', 'bad_node'); +ERROR: server "bad_node" does not exist +-- This chunk contains only one entry as of now +SELECT * FROM :CHUNK_NAME; + time | value +------+------- + 1000 | 1000 +(1 row) + +-- Specifying NULL node name along with proper chunk errors out +SELECT _timescaledb_internal.chunk_drop_replica(:'CHUNK_NAME', NULL); +ERROR: data node name cannot be NULL +\set ON_ERROR_STOP 1 +-- Check the current primary foreign server for this chunk, that will change +-- post the chunk_drop_replica call +SELECT foreign_server_name AS "PRIMARY_CHUNK_NODE" + FROM information_schema.foreign_tables WHERE + foreign_table_name = split_part(:'CHUNK_NAME', '.', 2) + ORDER BY 1 \gset +-- Show the node that was primary for the chunk +\echo :PRIMARY_CHUNK_NODE +data_node_1 +-- Drop the chunk replica on the primary chunk node. Should succeed +SELECT _timescaledb_internal.chunk_drop_replica(:'CHUNK_NAME', :'PRIMARY_CHUNK_NODE'); + chunk_drop_replica +-------------------- + +(1 row) + +-- The primary foreign server for the chunk should be updated now +SELECT foreign_server_name + FROM information_schema.foreign_tables WHERE + foreign_table_name = split_part(:'CHUNK_NAME', '.', 2) + ORDER BY 1; + foreign_server_name +--------------------- + data_node_2 +(1 row) + +-- Number of replicas should have been reduced by 1 +SELECT count(*) FROM _timescaledb_catalog.chunk_data_node WHERE chunk_id = :'CHUNK_ID'; + count +------- + 2 +(1 row) + +-- Ensure that INSERTs still work on this mvcp_hyper table into this chunk +-- Rollback to not modify the shared test state +BEGIN; +INSERT INTO mvcp_hyper VALUES (1001, 1001); +-- Ensure that SELECTs are able to query data from the above chunk +SELECT count(*) FROM mvcp_hyper WHERE time >= 1000; + count +------- + 2 +(1 row) + +ROLLBACK; +-- Check that chunk_drop_replica works with compressed chunk +SELECT substr(compress_chunk(:'CHUNK_NAME')::TEXT, 1, 29); + substr +------------------------------- + _timescaledb_internal._dist_h +(1 row) + +-- Drop one replica of a valid chunk. Should succeed on another datanode +SELECT _timescaledb_internal.chunk_drop_replica(:'CHUNK_NAME', 'data_node_2'); + chunk_drop_replica +-------------------- + +(1 row) + +-- Number of replicas should have been reduced by 1 +SELECT count(*) FROM _timescaledb_catalog.chunk_data_node WHERE chunk_id = :'CHUNK_ID'; + count +------- + 1 +(1 row) + +-- Decompress before checking INSERTs +SELECT substr(decompress_chunk(:'CHUNK_NAME')::TEXT, 1, 29); + substr +------------------------------- + _timescaledb_internal._dist_h +(1 row) + +-- Ensure that INSERTs still work on this mvcp_hyper table into this chunk +-- Rollback to not modify the shared test state +BEGIN; +INSERT INTO mvcp_hyper VALUES (1002, 1002); +-- Ensure that SELECTs are able to query data from the above chunk +SELECT count(*) FROM mvcp_hyper WHERE time >= 1000; + count +------- + 2 +(1 row) + +ROLLBACK; +-- Drop one replica of a valid chunk. Should not succeed on last datanode +SELECT foreign_server_name AS "PRIMARY_CHUNK_NODE" + FROM information_schema.foreign_tables WHERE + foreign_table_name = split_part(:'CHUNK_NAME', '.', 2) + ORDER BY 1 \gset +\set ON_ERROR_STOP 0 +SELECT _timescaledb_internal.chunk_drop_replica(:'CHUNK_NAME', :'PRIMARY_CHUNK_NODE'); +ERROR: cannot drop the last chunk replica +\set ON_ERROR_STOP 1 diff --git a/tsl/test/shared/sql/CMakeLists.txt b/tsl/test/shared/sql/CMakeLists.txt index 02e37e4d96a..6bef4507940 100644 --- a/tsl/test/shared/sql/CMakeLists.txt +++ b/tsl/test/shared/sql/CMakeLists.txt @@ -1,6 +1,10 @@ set(TEST_FILES_SHARED - constify_timestamptz_op_interval.sql constraint_exclusion_prepared.sql - decompress_placeholdervar.sql dist_gapfill.sql dist_insert.sql + constify_timestamptz_op_interval.sql + constraint_exclusion_prepared.sql + decompress_placeholdervar.sql + dist_chunk.sql + dist_gapfill.sql + dist_insert.sql dist_distinct.sql) if(CMAKE_BUILD_TYPE MATCHES Debug) diff --git a/tsl/test/shared/sql/dist_chunk.sql b/tsl/test/shared/sql/dist_chunk.sql new file mode 100644 index 00000000000..7c825fde5b6 --- /dev/null +++ b/tsl/test/shared/sql/dist_chunk.sql @@ -0,0 +1,147 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. + +-- This file contains tests for all features that will be used as part +-- of the chunk move/copy multi-node functionality + + +-- A table for the first chunk will be created on the data node, where it is not present. +SELECT chunk_name, data_nodes +FROM timescaledb_information.chunks +WHERE hypertable_name = 'dist_chunk_copy' +ORDER BY 1, 2; + +-- Non-distributed chunk will be used to test an error +SELECT chunk_name +FROM timescaledb_information.chunks +WHERE hypertable_name = 'conditions' +ORDER BY 1; + +-- Test function _timescaledb_internal.create_chunk_replica_table +\set ON_ERROR_STOP 0 +SELECT _timescaledb_internal.create_chunk_replica_table(NULL, 'data_node_1'); +SELECT _timescaledb_internal.create_chunk_replica_table('_timescaledb_internal._dist_hyper_15_67_chunk', NULL); +SELECT _timescaledb_internal.create_chunk_replica_table(1234, 'data_node_1'); +SELECT _timescaledb_internal.create_chunk_replica_table('metrics_int', 'data_node_1'); +SELECT _timescaledb_internal.create_chunk_replica_table('conditions', 'data_node_1'); +SELECT _timescaledb_internal.create_chunk_replica_table('_timescaledb_internal._hyper_10_51_chunk', 'data_node_1'); +SELECT _timescaledb_internal.create_chunk_replica_table('_timescaledb_internal._dist_hyper_15_67_chunk', 'data_node_2'); +SELECT _timescaledb_internal.create_chunk_replica_table('_timescaledb_internal._dist_hyper_15_27_chunk', 'data_node_2'); +SELECT _timescaledb_internal.create_chunk_replica_table('_timescaledb_internal._dist_hyper_15_67_chunk', 'data_node_4'); +BEGIN READ ONLY; +SELECT _timescaledb_internal.create_chunk_replica_table('_timescaledb_internal._dist_hyper_15_67_chunk', 'data_node_3'); +COMMIT; +\set ON_ERROR_STOP 1 + +SELECT _timescaledb_internal.create_chunk_replica_table('_timescaledb_internal._dist_hyper_15_67_chunk', 'data_node_1'); + +-- Test that the table cannot be created since it was already created on the data node +\set ON_ERROR_STOP 0 +SELECT _timescaledb_internal.create_chunk_replica_table('_timescaledb_internal._dist_hyper_15_67_chunk', 'data_node_3'); +\set ON_ERROR_STOP 1 + +-- Creating chunk replica table ignores compression now: +SELECT _timescaledb_internal.create_chunk_replica_table('_timescaledb_internal._dist_hyper_15_70_chunk', 'data_node_2'); + +CALL distributed_exec($$ DROP TABLE _timescaledb_internal._dist_hyper_15_67_chunk $$, '{"data_node_1"}'); +CALL distributed_exec($$ DROP TABLE _timescaledb_internal._dist_hyper_15_70_chunk $$, '{"data_node_2"}'); + +-- Test function _timescaledb_internal.chunk_drop_replica +-- Sanity checking of the chunk_drop_replica API + +\set ON_ERROR_STOP 0 +-- Check that it doesn't work in a read only transaction +SET default_transaction_read_only TO on; +SELECT _timescaledb_internal.chunk_drop_replica(NULL, NULL); +RESET default_transaction_read_only; + +-- NULL input for chunk id errors out +SELECT _timescaledb_internal.chunk_drop_replica(NULL, NULL); + +-- Specifying any regular hypertable instead of chunk errors out +SELECT _timescaledb_internal.chunk_drop_replica('public.metrics', NULL); + +-- Specifying regular hypertable chunk on a proper data node errors out +SELECT _timescaledb_internal.chunk_drop_replica('_timescaledb_internal._hyper_1_1_chunk', 'data_node_1'); + +-- Specifying non-existent chunk on a proper data node errors out +SELECT _timescaledb_internal.chunk_drop_replica('_timescaledb_internal._dist_hyper_700_38_chunk', 'data_node_1'); + +-- Get the last chunk for this hypertable +SELECT ch1.schema_name|| '.' || ch1.table_name as "CHUNK_NAME", ch1.id "CHUNK_ID" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht +WHERE ch1.hypertable_id = ht.id +AND ht.table_name = 'mvcp_hyper' +ORDER BY ch1.id DESC LIMIT 1 \gset + +-- Specifying wrong node name errors out +SELECT _timescaledb_internal.chunk_drop_replica(:'CHUNK_NAME', 'bad_node'); + +-- This chunk contains only one entry as of now +SELECT * FROM :CHUNK_NAME; + +-- Specifying NULL node name along with proper chunk errors out +SELECT _timescaledb_internal.chunk_drop_replica(:'CHUNK_NAME', NULL); + +\set ON_ERROR_STOP 1 +-- Check the current primary foreign server for this chunk, that will change +-- post the chunk_drop_replica call +SELECT foreign_server_name AS "PRIMARY_CHUNK_NODE" + FROM information_schema.foreign_tables WHERE + foreign_table_name = split_part(:'CHUNK_NAME', '.', 2) + ORDER BY 1 \gset + +-- Show the node that was primary for the chunk +\echo :PRIMARY_CHUNK_NODE + +-- Drop the chunk replica on the primary chunk node. Should succeed +SELECT _timescaledb_internal.chunk_drop_replica(:'CHUNK_NAME', :'PRIMARY_CHUNK_NODE'); + +-- The primary foreign server for the chunk should be updated now +SELECT foreign_server_name + FROM information_schema.foreign_tables WHERE + foreign_table_name = split_part(:'CHUNK_NAME', '.', 2) + ORDER BY 1; + +-- Number of replicas should have been reduced by 1 +SELECT count(*) FROM _timescaledb_catalog.chunk_data_node WHERE chunk_id = :'CHUNK_ID'; + +-- Ensure that INSERTs still work on this mvcp_hyper table into this chunk +-- Rollback to not modify the shared test state +BEGIN; +INSERT INTO mvcp_hyper VALUES (1001, 1001); +-- Ensure that SELECTs are able to query data from the above chunk +SELECT count(*) FROM mvcp_hyper WHERE time >= 1000; +ROLLBACK; + + +-- Check that chunk_drop_replica works with compressed chunk +SELECT substr(compress_chunk(:'CHUNK_NAME')::TEXT, 1, 29); + +-- Drop one replica of a valid chunk. Should succeed on another datanode +SELECT _timescaledb_internal.chunk_drop_replica(:'CHUNK_NAME', 'data_node_2'); + +-- Number of replicas should have been reduced by 1 +SELECT count(*) FROM _timescaledb_catalog.chunk_data_node WHERE chunk_id = :'CHUNK_ID'; + +-- Decompress before checking INSERTs +SELECT substr(decompress_chunk(:'CHUNK_NAME')::TEXT, 1, 29); + +-- Ensure that INSERTs still work on this mvcp_hyper table into this chunk +-- Rollback to not modify the shared test state +BEGIN; +INSERT INTO mvcp_hyper VALUES (1002, 1002); +-- Ensure that SELECTs are able to query data from the above chunk +SELECT count(*) FROM mvcp_hyper WHERE time >= 1000; +ROLLBACK; + +-- Drop one replica of a valid chunk. Should not succeed on last datanode +SELECT foreign_server_name AS "PRIMARY_CHUNK_NODE" + FROM information_schema.foreign_tables WHERE + foreign_table_name = split_part(:'CHUNK_NAME', '.', 2) + ORDER BY 1 \gset + +\set ON_ERROR_STOP 0 +SELECT _timescaledb_internal.chunk_drop_replica(:'CHUNK_NAME', :'PRIMARY_CHUNK_NODE'); +\set ON_ERROR_STOP 1 diff --git a/tsl/test/shared/sql/include/shared_setup.sql b/tsl/test/shared/sql/include/shared_setup.sql index 76d1d0fc08f..b98e87f32b7 100644 --- a/tsl/test/shared/sql/include/shared_setup.sql +++ b/tsl/test/shared/sql/include/shared_setup.sql @@ -213,3 +213,31 @@ INSERT INTO metrics_int_dist1 VALUES (5,1,2,10.0), (100,1,1,0.0), (100,1,2,-100.0); + +-- Create distributed hypertable for copy chunk test. Need to have +-- a space-dimension to have more predictible chunk placement. +CREATE TABLE dist_chunk_copy ( + time timestamptz NOT NULL, + device integer, + value integer); + +SELECT create_distributed_hypertable('dist_chunk_copy', 'time', 'device', replication_factor => 2); +ALTER TABLE dist_chunk_copy SET (timescaledb.compress); + +SELECT setseed(0); +INSERT INTO dist_chunk_copy +SELECT t, ceil(_timescaledb_internal.get_partition_hash(t)::int % 5), random() * 20 +FROM generate_series('2020-01-01'::timestamp, '2020-01-25'::timestamp, '1d') t; + +-- Compress a few chunks of this dist_chunk_copy hypertable +SELECT compress_chunk('_timescaledb_internal._dist_hyper_15_68_chunk'); +SELECT compress_chunk('_timescaledb_internal._dist_hyper_15_70_chunk'); + +CREATE TABLE mvcp_hyper (time bigint NOT NULL, value integer); +SELECT table_name FROM create_distributed_hypertable('mvcp_hyper', 'time', + chunk_time_interval => 200, replication_factor => 3); + +-- Enable compression so that we can test dropping of compressed chunks +ALTER TABLE mvcp_hyper SET (timescaledb.compress, timescaledb.compress_orderby='time DESC'); + +INSERT INTO mvcp_hyper SELECT g, g FROM generate_series(0,1000) g; diff --git a/tsl/test/sql/chunk_api.sql b/tsl/test/sql/chunk_api.sql index d1b0cb19adf..51334420a2a 100644 --- a/tsl/test/sql/chunk_api.sql +++ b/tsl/test/sql/chunk_api.sql @@ -48,6 +48,35 @@ SET ROLE :ROLE_DEFAULT_PERM_USER_2; SELECT * FROM _timescaledb_internal.create_chunk('chunkapi',' {"time": [1515024000000000, 1519024000000000], "device": [-9223372036854775808, 1073741823]}', 'ChunkSchema', 'My_chunk_Table_name'); \set ON_ERROR_STOP 1 +SET ROLE :ROLE_DEFAULT_PERM_USER; +-- Test create_chunk_table for errors +\set ON_ERROR_STOP 0 +-- Test create_chunk_table for NULL input +SELECT * FROM _timescaledb_internal.create_chunk_table(NULL,' {"time": [1515024000000000, 1519024000000000], "device": [-9223372036854775808, 1073741823]}', '_timescaledb_internal','_hyper_1_1_chunk'); +SELECT * FROM _timescaledb_internal.create_chunk_table('chunkapi', NULL, '_timescaledb_internal','_hyper_1_1_chunk'); +SELECT * FROM _timescaledb_internal.create_chunk_table('chunkapi',' {"time": [1515024000000000, 1519024000000000], "device": [-9223372036854775808, 1073741823]}', NULL,'_hyper_1_1_chunk'); +SELECT * FROM _timescaledb_internal.create_chunk_table('chunkapi',' {"time": [1515024000000000, 1519024000000000], "device": [-9223372036854775808, 1073741823]}', '_timescaledb_internal',NULL); +-- Modified time constraint should fail with collision +SELECT * FROM _timescaledb_internal.create_chunk_table('chunkapi',' {"time": [1514419600000000, 1515024000000000], "device": [-9223372036854775808, 1073741823]}', '_timescaledb_internal','_hyper_1_1_chunk'); +-- Missing dimension +SELECT * FROM _timescaledb_internal.create_chunk_table('chunkapi',' {"time": [1514419600000000, 1515024000000000]}', '_timescaledb_internal','_hyper_1_1_chunk'); +-- Extra dimension +SELECT * FROM _timescaledb_internal.create_chunk_table('chunkapi',' {"time": [1514419600000000, 1515024000000000], "device": [-9223372036854775808, 1073741823], "time2": [1514419600000000, 1515024000000000]}', '_timescaledb_internal','_hyper_1_1_chunk'); +-- Bad dimension name +SELECT * FROM _timescaledb_internal.create_chunk_table('chunkapi',' {"time": [1514419600000000, 1515024000000000], "dev": [-9223372036854775808, 1073741823]}', '_timescaledb_internal','_hyper_1_1_chunk'); +-- Same dimension twice +SELECT * FROM _timescaledb_internal.create_chunk_table('chunkapi',' {"time": [1514419600000000, 1515024000000000], "time": [1514419600000000, 1515024000000000]}', '_timescaledb_internal','_hyper_1_1_chunk'); +-- Bad bounds format +SELECT * FROM _timescaledb_internal.create_chunk_table('chunkapi',' {"time": ["1514419200000000", 1515024000000000], "device": [-9223372036854775808, 1073741823]}', '_timescaledb_internal','_hyper_1_1_chunk'); +-- Bad slices format +SELECT * FROM _timescaledb_internal.create_chunk_table('chunkapi',' {"time": [1515024000000000], "device": [-9223372036854775808, 1073741823]}', '_timescaledb_internal','_hyper_1_1_chunk'); +-- Bad slices json +SELECT * FROM _timescaledb_internal.create_chunk_table('chunkapi',' {"time: [1515024000000000] "device": [-9223372036854775808, 1073741823]}', '_timescaledb_internal','_hyper_1_1_chunk'); +-- Valid chunk, but no permissions +SET ROLE :ROLE_DEFAULT_PERM_USER_2; +SELECT * FROM _timescaledb_internal.create_chunk_table('chunkapi',' {"time": [1515024000000000, 1519024000000000], "device": [-9223372036854775808, 1073741823]}', '_timescaledb_internal','_hyper_1_1_chunk'); +\set ON_ERROR_STOP 1 + -- Test that granting insert on tables allow create_chunk to be -- called. This will also create a chunk that does not collide and has -- a custom schema and name. @@ -230,3 +259,265 @@ SELECT * FROM delete_data_node('data_node_1', force => true); SELECT * FROM delete_data_node('data_node_2', force => true); DROP DATABASE :DN_DBNAME_1; DROP DATABASE :DN_DBNAME_2; + +-- Test create_chunk_table to recreate the chunk table and show dimension slices +SET ROLE :ROLE_DEFAULT_PERM_USER; + +SELECT * FROM chunkapi ORDER BY time; + +SELECT chunk_schema AS "CHUNK_SCHEMA", chunk_name AS "CHUNK_NAME" +FROM timescaledb_information.chunks c +ORDER BY chunk_name DESC +LIMIT 1 \gset + +SELECT slices AS "SLICES" +FROM _timescaledb_internal.show_chunk(:'CHUNK_SCHEMA'||'.'||:'CHUNK_NAME') \gset + +SELECT relname +FROM pg_catalog.pg_inherits, pg_class +WHERE inhrelid = (:'CHUNK_SCHEMA'||'.'||:'CHUNK_NAME')::regclass AND inhparent = oid; + +SELECT * FROM _timescaledb_catalog.dimension_slice ORDER BY id; + +DROP TABLE :CHUNK_SCHEMA.:CHUNK_NAME; + +SELECT * FROM _timescaledb_catalog.dimension_slice ORDER BY id; + +SELECT count(*) FROM + _timescaledb_internal.create_chunk_table('chunkapi', :'SLICES', :'CHUNK_SCHEMA', :'CHUNK_NAME'); + +SELECT * FROM _timescaledb_catalog.dimension_slice ORDER BY id; + + +SELECT relname +FROM pg_catalog.pg_inherits, pg_class +WHERE inhrelid = (:'CHUNK_SCHEMA'||'.'||:'CHUNK_NAME')::regclass AND inhparent = oid; + +-- Test that creat_chunk fails since chunk table already exists +\set ON_ERROR_STOP 0 +SELECT * FROM _timescaledb_internal.create_chunk('chunkapi', :'SLICES', :'CHUNK_SCHEMA', :'CHUNK_NAME'); +\set ON_ERROR_STOP 1 + +-- Test create_chunk_table on a hypertable where the chunk didn't exist before + +DROP TABLE chunkapi; +DROP TABLE :CHUNK_SCHEMA.:CHUNK_NAME; +CREATE TABLE chunkapi (time timestamptz, device int, temp float); +SELECT * FROM create_hypertable('chunkapi', 'time', 'device', 2); + +SELECT count(*) FROM + _timescaledb_internal.create_chunk_table('chunkapi', :'SLICES', :'CHUNK_SCHEMA', :'CHUNK_NAME'); + +-- Demonstrate that current settings for dimensions don't affect create_chunk_table + +DROP TABLE chunkapi; +DROP TABLE :CHUNK_SCHEMA.:CHUNK_NAME; +CREATE TABLE chunkapi (time timestamptz not null, device int, temp float); +SELECT * FROM create_hypertable('chunkapi', 'time', 'device', 2, '3d'); + +SELECT count(*) FROM + _timescaledb_internal.create_chunk_table('chunkapi', :'SLICES', :'CHUNK_SCHEMA', :'CHUNK_NAME'); + +DROP TABLE chunkapi; +DROP TABLE :CHUNK_SCHEMA.:CHUNK_NAME; +CREATE TABLE chunkapi (time timestamptz not null, device int, temp float); +SELECT * FROM create_hypertable('chunkapi', 'time', 'device', 3); + +SELECT count(*) FROM + _timescaledb_internal.create_chunk_table('chunkapi', :'SLICES', :'CHUNK_SCHEMA', :'CHUNK_NAME'); + +-- Test create_chunk_table if a colliding chunk exists + +DROP TABLE chunkapi; +DROP TABLE :CHUNK_SCHEMA.:CHUNK_NAME; +CREATE TABLE chunkapi (time timestamptz not null, device int, temp float); +SELECT * FROM create_hypertable('chunkapi', 'time', 'device', 3); + +INSERT INTO chunkapi VALUES ('2018-01-01 05:00:00-8', 1, 23.4); + +\set ON_ERROR_STOP 0 +SELECT _timescaledb_internal.create_chunk_table('chunkapi', :'SLICES', :'CHUNK_SCHEMA', :'CHUNK_NAME'); +\set ON_ERROR_STOP 1 + +-- Test create_chunk_table when a chunk exists in different space partition and thus doesn't collide + +DROP TABLE chunkapi; +CREATE TABLE chunkapi (time timestamptz not null, device int, temp float); +SELECT * FROM create_hypertable('chunkapi', 'time', 'device', 2); + +INSERT INTO chunkapi VALUES ('2018-01-01 05:00:00-8', 2, 23.4); + +SELECT _timescaledb_internal.create_chunk_table('chunkapi', :'SLICES', :'CHUNK_SCHEMA', :'CHUNK_NAME'); + +-- Test create_chunk_table when a chunk exists in different time partition and thus doesn't collide + +DROP TABLE chunkapi; +DROP TABLE :CHUNK_SCHEMA.:CHUNK_NAME; +CREATE TABLE chunkapi (time timestamptz not null, device int, temp float); +SELECT * FROM create_hypertable('chunkapi', 'time', 'device', 2); + +INSERT INTO chunkapi VALUES ('2018-02-01 05:00:00-8', 1, 23.4); + +SELECT _timescaledb_internal.create_chunk_table('chunkapi', :'SLICES', :'CHUNK_SCHEMA', :'CHUNK_NAME'); + +-- Test create_chunk_table with tablespaces + +\c :TEST_DBNAME :ROLE_SUPERUSER +SET client_min_messages = ERROR; +DROP TABLESPACE IF EXISTS tablespace1; +DROP TABLESPACE IF EXISTS tablespace2; +SET client_min_messages = NOTICE; +CREATE TABLESPACE tablespace1 OWNER :ROLE_DEFAULT_PERM_USER LOCATION :TEST_TABLESPACE1_PATH; +CREATE TABLESPACE tablespace2 OWNER :ROLE_DEFAULT_PERM_USER LOCATION :TEST_TABLESPACE2_PATH; +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER + +-- Use the space partition to calculate the tablespace id to use + +DROP TABLE chunkapi; +DROP TABLE :CHUNK_SCHEMA.:CHUNK_NAME; +CREATE TABLE chunkapi (time timestamptz not null, device int, temp float); +SELECT * FROM create_hypertable('chunkapi', 'time', 'device', 3); + +SELECT attach_tablespace('tablespace1', 'chunkapi'); +SELECT attach_tablespace('tablespace2', 'chunkapi'); + +SELECT count(*) FROM + _timescaledb_internal.create_chunk_table('chunkapi', :'SLICES', :'CHUNK_SCHEMA', :'CHUNK_NAME'); + +SELECT tablespace FROM pg_tables WHERE tablename = :'CHUNK_NAME'; + +-- Use the time partition to calculate the tablespace id to use + +DROP TABLE chunkapi; +DROP TABLE :CHUNK_SCHEMA.:CHUNK_NAME; +CREATE TABLE devices (id int PRIMARY KEY); +INSERT INTO devices VALUES (1); +CREATE TABLE chunkapi (time timestamptz NOT NULL PRIMARY KEY, device int REFERENCES devices(id), temp float CHECK (temp > 0)); +SELECT * FROM create_hypertable('chunkapi', 'time'); +INSERT INTO chunkapi VALUES ('2018-01-01 05:00:00-8', 1, 23.4); + +SELECT chunk_schema AS "CHUNK_SCHEMA", chunk_name AS "CHUNK_NAME" +FROM timescaledb_information.chunks c +ORDER BY chunk_name DESC +LIMIT 1 \gset + +SELECT slices AS "SLICES" +FROM _timescaledb_internal.show_chunk(:'CHUNK_SCHEMA'||'.'||:'CHUNK_NAME') \gset + +-- Save the constraints info in a table for later comparison +CREATE TABLE original_chunk_constraints AS +SELECT "Constraint", "Type", "Columns", "Index"::text, "Expr", "Deferrable", "Deferred", "Validated" +FROM test.show_constraints(format('%I.%I', :'CHUNK_SCHEMA', :'CHUNK_NAME')::regclass); + +-- Save contraints metadata +CREATE TABLE original_chunk_constraints_metadata AS +SELECT + chunk_id, + dimension_slice_id, + constraint_name, + hypertable_constraint_name +FROM _timescaledb_catalog.chunk_constraint con +INNER JOIN _timescaledb_catalog.chunk ch ON (con.chunk_id = ch.id) +WHERE ch.schema_name = :'CHUNK_SCHEMA' AND ch.table_name = :'CHUNK_NAME'; + + +DROP TABLE :CHUNK_SCHEMA.:CHUNK_NAME; + +SELECT attach_tablespace('tablespace1', 'chunkapi'); +SELECT attach_tablespace('tablespace2', 'chunkapi'); + +SELECT count(*) FROM + _timescaledb_internal.create_chunk_table('chunkapi', :'SLICES', :'CHUNK_SCHEMA', :'CHUNK_NAME'); + +SELECT tablespace FROM pg_tables WHERE tablename = :'CHUNK_NAME'; + +-- Now create the complete chunk from the chunk table +SELECT _timescaledb_internal.create_chunk('chunkapi', :'SLICES', :'CHUNK_SCHEMA', :'CHUNK_NAME', + format('%I.%I', :'CHUNK_SCHEMA', :'CHUNK_NAME')::regclass); + +-- Compare original and new constraints +SELECT * FROM original_chunk_constraints; +SELECT * FROM test.show_constraints(format('%I.%I', :'CHUNK_SCHEMA', :'CHUNK_NAME')::regclass); + +-- Compare original and new chunk constraints metadata +SELECT * FROM original_chunk_constraints_metadata; +SELECT + chunk_id, + dimension_slice_id, + constraint_name, + hypertable_constraint_name +FROM _timescaledb_catalog.chunk_constraint con +INNER JOIN _timescaledb_catalog.chunk ch ON (con.chunk_id = ch.id) +WHERE ch.schema_name = :'CHUNK_SCHEMA' AND ch.table_name = :'CHUNK_NAME'; + +DROP TABLE original_chunk_constraints; +DROP TABLE original_chunk_constraints_metadata; + +-- The chunk should inherit the hypertable +SELECT relname +FROM pg_catalog.pg_inherits, pg_class +WHERE inhrelid = (:'CHUNK_SCHEMA'||'.'||:'CHUNK_NAME')::regclass AND inhparent = oid; + +-- Show chunk's attached to the table +SELECT + :'CHUNK_SCHEMA' AS expected_schema, + :'CHUNK_NAME' AS expected_table_name, + (_timescaledb_internal.show_chunk(ch)).* +FROM show_chunks('chunkapi') ch; + +DROP TABLE chunkapi; +DROP TABLE devices; + +-- Test creating a chunk from an existing chunk table which was not +-- created via create_chunk_table and having a different name. +CREATE TABLE devices (id int PRIMARY KEY); +INSERT INTO devices VALUES (1); +CREATE TABLE chunkapi (time timestamptz NOT NULL PRIMARY KEY, device int REFERENCES devices(id), temp float CHECK(temp > 0)); +SELECT * FROM create_hypertable('chunkapi', 'time'); + +CREATE TABLE newchunk (time timestamptz NOT NULL, device int, temp float); +SELECT * FROM test.show_constraints('newchunk'); + +INSERT INTO newchunk VALUES ('2018-01-01 05:00:00-8', 1, 23.4); +\set ON_ERROR_STOP 0 +-- Creating the chunk without required CHECK constraints on a table +-- should fail. Currently, PostgreSQL only enforces presence of CHECK +-- constraints, but not foreign key, unique, or primary key +-- constraints. We should probably add checks to enforce the latter +-- too or auto-create all constraints. +SELECT * FROM _timescaledb_internal.create_chunk('chunkapi', :'SLICES', :'CHUNK_SCHEMA', :'CHUNK_NAME', 'newchunk'); +\set ON_ERROR_STOP 1 +-- Add the missing CHECK constraint. Note that the name must be the +-- same as on the parent table. +ALTER TABLE newchunk ADD CONSTRAINT chunkapi_temp_check CHECK (temp > 0); +SELECT * FROM _timescaledb_internal.create_chunk('chunkapi', :'SLICES', :'CHUNK_SCHEMA', :'CHUNK_NAME', 'newchunk'); + +-- Show the chunk and that names are what we'd expect +SELECT + :'CHUNK_SCHEMA' AS expected_schema, + :'CHUNK_NAME' AS expected_table_name, + (_timescaledb_internal.show_chunk(ch)).* +FROM show_chunks('chunkapi') ch; + +-- The chunk should inherit the hypertable +SELECT relname +FROM pg_catalog.pg_inherits, pg_class +WHERE inhrelid = (:'CHUNK_SCHEMA'||'.'||:'CHUNK_NAME')::regclass AND inhparent = oid; + +-- Test that it is possible to query the data via the hypertable +SELECT * FROM chunkapi ORDER BY 1,2,3; + +-- Show that the chunk has all the necessary constraints. These +-- include inheritable constraints and dimensional constraints, which +-- are specific to the chunk. Currently, foreign key, unique, and +-- primary key constraints are not inherited or auto-created. +SELECT * FROM test.show_constraints(format('%I.%I', :'CHUNK_SCHEMA', :'CHUNK_NAME')::regclass); + +DROP TABLE chunkapi; + +\c :TEST_DBNAME :ROLE_SUPERUSER +SET client_min_messages = ERROR; +DROP TABLESPACE tablespace1; +DROP TABLESPACE tablespace2; +SET client_min_messages = NOTICE; +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER diff --git a/tsl/test/sql/data_node.sql b/tsl/test/sql/data_node.sql index b714e275c3c..a27642e8474 100644 --- a/tsl/test/sql/data_node.sql +++ b/tsl/test/sql/data_node.sql @@ -681,6 +681,114 @@ SELECT * FROM add_data_node('data_node_6', host => 'localhost', database => :'DN -- Providing the password on the command line should work SELECT * FROM add_data_node('data_node_6', host => 'localhost', database => :'DN_DBNAME_6', password => :'ROLE_3_PASS'); +SELECT * FROM delete_data_node('data_node_6'); + +-- +-- Tests for copy/move chunk API +-- +RESET ROLE; +DROP DATABASE :DN_DBNAME_1; +DROP DATABASE :DN_DBNAME_2; +DROP DATABASE :DN_DBNAME_3; + +SELECT * FROM add_data_node('data_node_1', host => 'localhost', + database => :'DN_DBNAME_1'); +SELECT * FROM add_data_node('data_node_2', host => 'localhost', + database => :'DN_DBNAME_2'); +SELECT * FROM add_data_node('data_node_3', host => 'localhost', + database => :'DN_DBNAME_3'); +GRANT USAGE ON FOREIGN SERVER data_node_1, data_node_2, data_node_3 TO PUBLIC; + +SET ROLE :ROLE_1; + +CREATE TABLE dist_test(time timestamp NOT NULL, device int, temp float); +SELECT create_distributed_hypertable('dist_test', 'time', 'device', 3); +INSERT INTO dist_test SELECT t, (abs(timestamp_hash(t::timestamp)) % 10) + 1, 0.10 FROM generate_series('2018-03-02 1:00'::TIMESTAMPTZ, '2018-03-08 1:00', '1 hour') t; +SELECT * from show_chunks('dist_test'); +SELECT * FROM test.remote_exec(NULL, $$ SELECT * from show_chunks('dist_test'); $$); + +SELECT sum(device) FROM dist_test; +SELECT * FROM test.remote_exec(ARRAY['data_node_1'], $$ SELECT sum(device) FROM _timescaledb_internal._dist_hyper_9_12_chunk; $$); + +-- ensure data node name is provided and has proper type +\set ON_ERROR_STOP 0 +CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_12_chunk', source_node=> null, destination_node => 'data_node_2'); +CALL timescaledb_experimental.copy_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_12_chunk', source_node=> 'data_node_1', destination_node => null); +CALL timescaledb_experimental.copy_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_12_chunk', source_node=> 'data_node_1', destination_node => 2); +CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_12_chunk', source_node => 'data_node_1'); +\set ON_ERROR_STOP 1 + +-- ensure functions can't be run in read only mode +SET default_transaction_read_only TO on; +\set ON_ERROR_STOP 0 +CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_12_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2'); +CALL timescaledb_experimental.copy_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_12_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2'); +\set ON_ERROR_STOP 1 +SET default_transaction_read_only TO off; + +-- ensure functions can't be run in an active multi-statement transaction +\set ON_ERROR_STOP 0 +BEGIN; +CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_12_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2'); +ROLLBACK; +BEGIN; +CALL timescaledb_experimental.copy_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_12_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2'); +ROLLBACK; +\set ON_ERROR_STOP 1 + +-- must be superuser to copy/move chunks +SET ROLE :ROLE_DEFAULT_PERM_USER; +\set ON_ERROR_STOP 0 +CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_12_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2'); +\set ON_ERROR_STOP 1 +SET ROLE :ROLE_1; + +-- can't run copy/move chunk on a data node +\c :DN_DBNAME_1 :ROLE_CLUSTER_SUPERUSER; +\set ON_ERROR_STOP 0 +CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_12_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2'); +CALL timescaledb_experimental.copy_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_12_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2'); +\set ON_ERROR_STOP 1 +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; + +-- ensure that hypertable chunks are distributed +CREATE TABLE nondist_test(time timestamp NOT NULL, device int, temp float); +SELECT create_hypertable('nondist_test', 'time', 'device', 3); +INSERT INTO nondist_test SELECT t, (abs(timestamp_hash(t::timestamp)) % 10) + 1, 0.10 FROM generate_series('2018-03-02 1:00'::TIMESTAMPTZ, '2018-03-08 1:00', '1 hour') t; +SELECT * from show_chunks('nondist_test'); +\set ON_ERROR_STOP 0 +CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._hyper_10_16_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2'); +CALL timescaledb_experimental.copy_chunk(chunk=>'_timescaledb_internal._hyper_10_16_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2'); +\set ON_ERROR_STOP 1 + +-- ensure that distributed chunk is not compressed +ALTER TABLE dist_test SET (timescaledb.compress, timescaledb.compress_segmentby='device', timescaledb.compress_orderby = 'time DESC'); +SELECT compress_chunk('_timescaledb_internal._dist_hyper_9_15_chunk'); +\set ON_ERROR_STOP 0 +CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_15_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2'); +CALL timescaledb_experimental.copy_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_15_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2'); +\set ON_ERROR_STOP 1 + +-- ensure that chunk exists on a source data node +\set ON_ERROR_STOP 0 +CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_13_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2'); +\set ON_ERROR_STOP 1 + +-- do actualy copy +CALL timescaledb_experimental.copy_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_12_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2'); +SELECT * FROM test.remote_exec(NULL, $$ SELECT * from show_chunks('dist_test'); $$); +SELECT * FROM test.remote_exec(ARRAY['data_node_2'], $$ SELECT sum(device) FROM _timescaledb_internal._dist_hyper_9_12_chunk; $$); + +-- ensure that chunk exists on a destination data node +\set ON_ERROR_STOP 0 +CALL timescaledb_experimental.copy_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_12_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2'); +\set ON_ERROR_STOP 1 + +-- now try to move the same chunk from data node 2 to 3 +CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_12_chunk', source_node=> 'data_node_2', destination_node => 'data_node_3'); +SELECT * FROM test.remote_exec(NULL, $$ SELECT * from show_chunks('dist_test'); $$); +SELECT * FROM test.remote_exec(ARRAY['data_node_3'], $$ SELECT sum(device) FROM _timescaledb_internal._dist_hyper_9_12_chunk; $$); +SELECT sum(device) FROM dist_test; RESET ROLE; DROP DATABASE :DN_DBNAME_1; diff --git a/tsl/test/sql/dist_views.sql b/tsl/test/sql/dist_views.sql index 4ad9fbdc68b..6c29bcd4920 100644 --- a/tsl/test/sql/dist_views.sql +++ b/tsl/test/sql/dist_views.sql @@ -60,3 +60,30 @@ SELECT create_hypertable( 'special#tab', 'a', 'b', replication_factor=>2, chunk_ INSERT into "special#tab" select generate_series( '2020-02-02 10:00', '2020-02-05 10:00' , '1 day'::interval), 10; SELECT * FROM chunks_detailed_size( '"special#tab"') ORDER BY chunk_name, node_name; SELECT * FROM hypertable_index_size( 'dist_table_time_idx') ; + +-- Test chunk_replication_status view +SELECT * FROM timescaledb_experimental.chunk_replication_status +ORDER BY chunk_schema, chunk_name +LIMIT 4; + +-- drop one chunk replica +SELECT _timescaledb_internal.chunk_drop_replica(format('%I.%I', chunk_schema, chunk_name)::regclass, replica_nodes[1]) +FROM timescaledb_experimental.chunk_replication_status +ORDER BY chunk_schema, chunk_name +LIMIT 1; + +SELECT * FROM timescaledb_experimental.chunk_replication_status +WHERE num_replicas < desired_num_replicas +ORDER BY chunk_schema, chunk_name; + +-- Example usage of finding data nodes to copy/move chunks between +SELECT + format('%I.%I', chunk_schema, chunk_name)::regclass AS chunk, + replica_nodes[1] AS copy_from_node, + non_replica_nodes[1] AS copy_to_node +FROM + timescaledb_experimental.chunk_replication_status +WHERE + num_replicas < desired_num_replicas +ORDER BY + chunk_schema, chunk_name; diff --git a/tsl/test/t/002_chunk_copy_move.pl b/tsl/test/t/002_chunk_copy_move.pl new file mode 100644 index 00000000000..3a85edd1bcb --- /dev/null +++ b/tsl/test/t/002_chunk_copy_move.pl @@ -0,0 +1,203 @@ +# This file and its contents are licensed under the Timescale License. +# Please see the included NOTICE for copyright information and +# LICENSE-TIMESCALE for a copy of the license. + +# test the multi node chunk copy/move operation end-to-end +use strict; +use warnings; +use AccessNode; +use DataNode; +use TestLib; +use Test::More tests => 272; + +#Initialize all the multi-node instances +my $an = AccessNode->create('an'); +my $dn1 = DataNode->create('dn1', allows_streaming => 'logical'); +my $dn2 = DataNode->create('dn2', allows_streaming => 'logical'); + +$an->add_data_node($dn1); +$an->add_data_node($dn2); + +#Create a distributed hypertable and insert a few rows +$an->safe_psql( + 'postgres', + qq[ + CREATE TABLE test(time timestamp NOT NULL, device int, temp float); + SELECT create_distributed_hypertable('test', 'time', 'device', 3); + INSERT INTO test SELECT t, (abs(timestamp_hash(t::timestamp)) % 10) + 1, 0.10 FROM generate_series('2018-03-02 1:00'::TIMESTAMPTZ, '2018-03-08 1:00', '1 hour') t; + ]); + +#Check that chunks are shown appropriately on all nodes of the multi-node setup +my $query = q[SELECT * from show_chunks('test');]; +my $operation_id = "ts_copy_1_1"; + +#Check chunk states before the move +check_pre_move_chunk_states(); + +#Setup the error injection function on the AN +my $extversion = $an->safe_psql('postgres', + "SELECT extversion from pg_catalog.pg_extension WHERE extname = 'timescaledb'" +); +$an->safe_psql( + 'postgres', + qq[ + CREATE OR REPLACE FUNCTION error_injection_on(TEXT) RETURNS VOID LANGUAGE C VOLATILE STRICT + AS 'timescaledb-$extversion', 'ts_debug_point_enable'; + ]); + +#Induce errors in various stages in the chunk move activity and ensure that the +#cleanup function restores things to the previous sane state + +my @stages = + qw(init create_empty_chunk create_publication create_replication_slot create_subscription sync_start sync drop_publication drop_subscription attach_chunk delete_chunk); + +my ($stdout, $stderr, $ret); +my $curr_index = 1; +my $arrSize = @stages; + +while ($curr_index < $arrSize) +{ + #Enable the error at each stage + #Call the move_chunk procedure which should error out now + ($ret, $stdout, $stderr) = $an->psql('postgres', + "SELECT error_injection_on('$stages[$curr_index]'); CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._dist_hyper_1_1_chunk', source_node=> 'dn1', destination_node => 'dn2');" + ); + is($ret, 3, + "move_chunk fails as expected in stage '$stages[$curr_index]'"); + like( + $stderr, + qr/ERROR: error injected at debug point '$stages[$curr_index]'/, + 'failure in expected stage'); + + #The earlier debug error point gets released automatically since it's a session lock + #Call the cleanup procedure to make things right + $operation_id = "ts_copy_" . $curr_index . "_1"; + $an->safe_psql('postgres', + "CALL timescaledb_experimental.cleanup_copy_chunk_operation(operation_id=>'$operation_id');" + ); + + #Check chunk state is as before the move + check_pre_move_chunk_states(); + + $curr_index++; +} + +#Move chunk _timescaledb_internal._dist_hyper_1_1_chunk to DN2 from AN +$an->safe_psql('postgres', + "CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._dist_hyper_1_1_chunk', source_node=> 'dn1', destination_node => 'dn2')" +); + +#Query datanode1 after the above move +$dn1->psql_is( + 'postgres', + $query, + "_timescaledb_internal._dist_hyper_1_3_chunk\n_timescaledb_internal._dist_hyper_1_4_chunk", + 'DN1 shows correct set of chunks'); + +#Check contents on the chunk on DN2, after the move +$dn2->psql_is( + 'postgres', + "SELECT sum(device) FROM _timescaledb_internal._dist_hyper_1_1_chunk", + qq[406], + "DN2 has correct contents after the move in the chunk"); + +#Query datanode2 +$dn2->psql_is( + 'postgres', + $query, + "_timescaledb_internal._dist_hyper_1_2_chunk\n_timescaledb_internal._dist_hyper_1_1_chunk", + 'DN2 shows correct set of chunks'); + +#Copy chunk _timescaledb_internal._dist_hyper_1_1_chunk to DN1 from DN2 +$an->safe_psql('postgres', + "CALL timescaledb_experimental.copy_chunk(chunk=>'_timescaledb_internal._dist_hyper_1_1_chunk', source_node=> 'dn2', destination_node => 'dn1')" +); + +#Query datanode1 after the above copy +$dn1->psql_is( + 'postgres', + $query, + "_timescaledb_internal._dist_hyper_1_3_chunk\n_timescaledb_internal._dist_hyper_1_4_chunk\n_timescaledb_internal._dist_hyper_1_1_chunk", + 'DN1 shows correct set of chunks after the copy'); + +#Check contents on the chunk on DN2, after the copy +$dn1->psql_is( + 'postgres', + "SELECT sum(device) FROM _timescaledb_internal._dist_hyper_1_1_chunk", + qq[406], + "DN1 has correct contents after the copy in the chunk"); + +#Check contents on the chunk on DN2, after the copy +$dn2->psql_is( + 'postgres', + "SELECT sum(device) FROM _timescaledb_internal._dist_hyper_1_1_chunk", + qq[406], + "DN2 has correct contents after the copy in the chunk"); + +#Query datanode2 +$dn2->psql_is( + 'postgres', + $query, + "_timescaledb_internal._dist_hyper_1_2_chunk\n_timescaledb_internal._dist_hyper_1_1_chunk", + 'DN2 shows correct set of chunks after the copy'); + +done_testing(); + +#Check the following +#1) chunk is still on "dn1", +#2) there's no entry on "dn2", +#3) there are no left over replication slots and publications on "dn1", +#4) there is no subscription on "dn2" +sub check_pre_move_chunk_states +{ + #Query Access node + $an->psql_is( + 'postgres', $query, q[_timescaledb_internal._dist_hyper_1_1_chunk +_timescaledb_internal._dist_hyper_1_2_chunk +_timescaledb_internal._dist_hyper_1_3_chunk +_timescaledb_internal._dist_hyper_1_4_chunk], 'AN shows correct set of chunks' + ); + + #Query datanode1 + $dn1->psql_is( + 'postgres', + $query, + "_timescaledb_internal._dist_hyper_1_1_chunk\n_timescaledb_internal._dist_hyper_1_3_chunk\n_timescaledb_internal._dist_hyper_1_4_chunk", + 'DN1 shows correct set of chunks'); + + #Check contents on the chunk on DN1 + $dn1->psql_is( + 'postgres', + "SELECT sum(device) FROM _timescaledb_internal._dist_hyper_1_1_chunk", + qq[406], + "DN1 has correct contents in the chunk"); + + #Query datanode2 + $dn2->psql_is( + 'postgres', $query, + "_timescaledb_internal._dist_hyper_1_2_chunk", + 'DN2 shows correct set of chunks'); + + #Check that there is no replication slot on datanode1 + $dn1->psql_is( + 'postgres', + "SELECT 1 FROM pg_catalog.pg_replication_slots WHERE slot_name = '$operation_id'", + "", + 'DN1 doesn\'t have left over replication slots'); + + #Check that there is no publication on datanode1 + $dn1->psql_is( + 'postgres', + "SELECT 1 FROM pg_catalog.pg_publication WHERE pubname = '$operation_id'", + "", + 'DN1 doesn\'t have left over publication'); + + #Check that there is no subscription on datanode2 + $dn2->psql_is( + 'postgres', + "SELECT 1 FROM pg_catalog.pg_subscription WHERE subname = '$operation_id'", + "", + 'DN2 doesn\'t have left over subscription'); +} + +1; diff --git a/tsl/test/t/CMakeLists.txt b/tsl/test/t/CMakeLists.txt index cc54c4e04bb..d763a2bb985 100644 --- a/tsl/test/t/CMakeLists.txt +++ b/tsl/test/t/CMakeLists.txt @@ -1,4 +1,9 @@ set(PROVE_TEST_FILES 001_simple_multinode.pl) +set(PROVE_DEBUG_TEST_FILES 002_chunk_copy_move.pl) + +if(CMAKE_BUILD_TYPE MATCHES Debug) + list(APPEND PROVE_TEST_FILES ${PROVE_DEBUG_TEST_FILES}) +endif(CMAKE_BUILD_TYPE MATCHES Debug) foreach(P_FILE ${PROVE_TEST_FILES}) configure_file(${P_FILE} ${CMAKE_CURRENT_BINARY_DIR}/${P_FILE} COPYONLY)