From 7c8fc7a70e3e6087184ed5e5ba52a7ebe6d91d37 Mon Sep 17 00:00:00 2001 From: Ante Kresic Date: Wed, 7 Feb 2024 11:58:06 +0100 Subject: [PATCH] Release 2.14.0 This release contains performance improvements and bug fixes since the 2.13.1 release. We recommend that you upgrade at the next available opportunity. In addition, it includes these noteworthy features: * Ability to change compression settings on existing compressed hypertables at any time. New compression settings take effect on any new chunks that are compressed after the change. * Reduced locking requirements during chunk recompression * Limiting tuple decompression during DML operations to avoid decompressing a lot of tuples and causing storage issues (100k limit, configurable) * Helper functions for determining compression settings **For this release only**, you will need to restart the database before running `ALTER EXTENSION` **Multi-node support removal announcement** Following the deprecation announcement for Multi-node in TimescaleDB 2.13, Multi-node is no longer supported starting with TimescaleDB 2.14. TimescaleDB 2.13 is the last version that includes multi-node support. Learn more about it [here](docs/MultiNodeDeprecation.md). If you want to migrate from multi-node TimescaleDB to single-node TimescaleDB, read the [migration documentation](https://docs.timescale.com/migrate/latest/multi-node-to-timescale-service/). **Deprecation notice: recompress_chunk procedure** TimescaleDB 2.14 is the last version that will include the recompress_chunk procedure. Its functionality will be replaced by the compress_chunk function, which, starting on TimescaleDB 2.14, works on both uncompressed and partially compressed chunks. The compress_chunk function should be used going forward to fully compress all types of chunks or even recompress old fully compressed chunks using new compression settings (through the newly introduced recompress optional parameter). **Features** * #6325 Add plan-time chunk exclusion for real-time CAggs * #6360 Remove support for creating Continuous Aggregates with old format * #6386 Add functions for determining compression defaults * #6410 Remove multinode public API * #6440 Allow SQLValueFunction pushdown into compressed scan * #6463 Support approximate hypertable size * #6513 Make compression settings per chunk * #6529 Remove reindex_relation from recompression * #6531 Fix if_not_exists behavior for CAgg policy with NULL offsets * #6545 Remove restrictions for changing compression settings * #6566 Limit tuple decompression during DML operations * #6579 Change compress_chunk and decompress_chunk to idempotent version by default * #6608 Add LWLock for OSM usage in loader * #6609 Deprecate recompress_chunk * #6609 Add optional recompress argument to compress_chunk **Bugfixes** * #6541 Inefficient join plans on compressed hypertables. * #6491 Enable now() plantime constification with BETWEEN * #6494 Fix create_hypertable referenced by fk succeeds * #6498 Suboptimal query plans when using time_bucket with query parameters * #6507 time_bucket_gapfill with timezones doesn't handle daylight savings * #6509 Make extension state available through function * #6512 Log extension state changes * #6522 Disallow triggers on CAggs * #6523 Reduce locking level on compressed chunk index during segmentwise recompression * #6531 Fix if_not_exists behavior for CAgg policy with NULL offsets * #6571 Fix pathtarget adjustment for MergeAppend paths in aggregation pushdown code * #6575 Fix compressed chunk not found during upserts * #6592 Fix recompression policy ignoring partially compressed chunks * #6610 Ensure qsort comparison function is transitive **Thanks** * @coney21 and @GStechschulte for reporting the problem with inefficient join plans on compressed hypertables. * @HollowMan6 for reporting triggers not working on materialized views of CAggs * @jbx1 for reporting suboptimal query plans when using time_bucket with query parameters * @JerkoNikolic for reporting the issue with gapfill and DST * @pdipesh02 for working on removing the old Continuous Aggregate format * @raymalt and @martinhale for reporting very slow query plans on realtime CAggs queries --- .unreleased/PR_6491_now_between | 1 - .unreleased/bugfix_5688 | 3 - .unreleased/enhancement_6325 | 2 - .unreleased/fix-parameterized-time-bucket | 2 - .unreleased/fix-segmentby-em | 2 - .unreleased/fix_6494 | 1 - .unreleased/fix_6507 | 2 - .unreleased/fix_6509 | 1 - .unreleased/fix_6512 | 1 - .unreleased/fix_6522 | 2 - .unreleased/fix_6571 | 1 - .unreleased/fix_6575 | 1 - .unreleased/fix_6610 | 1 - .unreleased/pr_6360 | 3 - .unreleased/pr_6386 | 1 - .unreleased/pr_6410 | 1 - .unreleased/pr_6440 | 1 - .unreleased/pr_6463 | 1 - .unreleased/pr_6513 | 1 - .unreleased/pr_6529 | 1 - .unreleased/pr_6545 | 1 - .unreleased/pr_6558 | 1 - .unreleased/pr_6566 | 1 - .unreleased/pr_6579 | 1 - .unreleased/pr_6592 | 1 - .unreleased/pr_6608 | 1 - .unreleased/pr_6609 | 2 - CHANGELOG.md | 74 ++ sql/CMakeLists.txt | 8 +- sql/updates/2.13.1--2.14.0.sql | 451 ++++++++++++ sql/updates/2.14.0--2.13.1.sql | 797 ++++++++++++++++++++++ sql/updates/latest-dev.sql | 451 ------------ sql/updates/reverse-dev.sql | 797 ---------------------- version.config | 2 +- 34 files changed, 1328 insertions(+), 1289 deletions(-) delete mode 100644 .unreleased/PR_6491_now_between delete mode 100644 .unreleased/bugfix_5688 delete mode 100644 .unreleased/enhancement_6325 delete mode 100644 .unreleased/fix-parameterized-time-bucket delete mode 100644 .unreleased/fix-segmentby-em delete mode 100644 .unreleased/fix_6494 delete mode 100644 .unreleased/fix_6507 delete mode 100644 .unreleased/fix_6509 delete mode 100644 .unreleased/fix_6512 delete mode 100644 .unreleased/fix_6522 delete mode 100644 .unreleased/fix_6571 delete mode 100644 .unreleased/fix_6575 delete mode 100644 .unreleased/fix_6610 delete mode 100644 .unreleased/pr_6360 delete mode 100644 .unreleased/pr_6386 delete mode 100644 .unreleased/pr_6410 delete mode 100644 .unreleased/pr_6440 delete mode 100644 .unreleased/pr_6463 delete mode 100644 .unreleased/pr_6513 delete mode 100644 .unreleased/pr_6529 delete mode 100644 .unreleased/pr_6545 delete mode 100644 .unreleased/pr_6558 delete mode 100644 .unreleased/pr_6566 delete mode 100644 .unreleased/pr_6579 delete mode 100644 .unreleased/pr_6592 delete mode 100644 .unreleased/pr_6608 delete mode 100644 .unreleased/pr_6609 create mode 100644 sql/updates/2.13.1--2.14.0.sql create mode 100644 sql/updates/2.14.0--2.13.1.sql diff --git a/.unreleased/PR_6491_now_between b/.unreleased/PR_6491_now_between deleted file mode 100644 index 34ccac5061f..00000000000 --- a/.unreleased/PR_6491_now_between +++ /dev/null @@ -1 +0,0 @@ -Fixes: #6491 Enable now() plantime constification with BETWEEN diff --git a/.unreleased/bugfix_5688 b/.unreleased/bugfix_5688 deleted file mode 100644 index 537345f579a..00000000000 --- a/.unreleased/bugfix_5688 +++ /dev/null @@ -1,3 +0,0 @@ -Implements: #6531 Fix if_not_exists behavior for CAgg policy with NULL offsets - -Fixes: #5688 diff --git a/.unreleased/enhancement_6325 b/.unreleased/enhancement_6325 deleted file mode 100644 index bf3b7fea4d8..00000000000 --- a/.unreleased/enhancement_6325 +++ /dev/null @@ -1,2 +0,0 @@ -Implements: #6325 Add plan-time chunk exclusion for real-time CAggs -Thanks: @raymalt and @martinhale for reporting very slow query plans on realtime CAggs queries diff --git a/.unreleased/fix-parameterized-time-bucket b/.unreleased/fix-parameterized-time-bucket deleted file mode 100644 index 68ee92e7c44..00000000000 --- a/.unreleased/fix-parameterized-time-bucket +++ /dev/null @@ -1,2 +0,0 @@ -Fixes: #6498 Suboptimal query plans when using time_bucket with query parameters -Thanks: @jbx1 for reporting suboptimal query plans when using time_bucket with query parameters diff --git a/.unreleased/fix-segmentby-em b/.unreleased/fix-segmentby-em deleted file mode 100644 index 087b080211a..00000000000 --- a/.unreleased/fix-segmentby-em +++ /dev/null @@ -1,2 +0,0 @@ -Fixes: #6424, #6536 Inefficient join plans on compressed hypertables. -Thanks: @coney21 and @GStechschulte for reporting the problem with inefficient join plans on compressed hypertables. diff --git a/.unreleased/fix_6494 b/.unreleased/fix_6494 deleted file mode 100644 index 2ec5c54bed3..00000000000 --- a/.unreleased/fix_6494 +++ /dev/null @@ -1 +0,0 @@ -Fixes: #6494 Fix create_hypertable referenced by fk succeeds diff --git a/.unreleased/fix_6507 b/.unreleased/fix_6507 deleted file mode 100644 index f2c8a88ce38..00000000000 --- a/.unreleased/fix_6507 +++ /dev/null @@ -1,2 +0,0 @@ -Fixes: #6507 time_bucket_gapfill with timezones doesn't handle daylight savings -Thanks: @JerkoNikolic Thanks for reporting the issue with gapfill and DST diff --git a/.unreleased/fix_6509 b/.unreleased/fix_6509 deleted file mode 100644 index 25784c2c8d9..00000000000 --- a/.unreleased/fix_6509 +++ /dev/null @@ -1 +0,0 @@ -Fixes: #6509 Make extension state available through function diff --git a/.unreleased/fix_6512 b/.unreleased/fix_6512 deleted file mode 100644 index 23c4f42cb55..00000000000 --- a/.unreleased/fix_6512 +++ /dev/null @@ -1 +0,0 @@ -Fixes: #6512 Log extension state changes diff --git a/.unreleased/fix_6522 b/.unreleased/fix_6522 deleted file mode 100644 index 46b8420ea1c..00000000000 --- a/.unreleased/fix_6522 +++ /dev/null @@ -1,2 +0,0 @@ -Fixes: #6522 Disallow triggers on CAggs -Thanks: @HollowMan6 Thanks for reporting this issue diff --git a/.unreleased/fix_6571 b/.unreleased/fix_6571 deleted file mode 100644 index 91211fc3dc5..00000000000 --- a/.unreleased/fix_6571 +++ /dev/null @@ -1 +0,0 @@ -Fixes: #6571 Fix pathtarget adjustment for MergeAppend paths in aggregation pushdown code diff --git a/.unreleased/fix_6575 b/.unreleased/fix_6575 deleted file mode 100644 index c2d08fcf026..00000000000 --- a/.unreleased/fix_6575 +++ /dev/null @@ -1 +0,0 @@ -Fixes: #6575 Fix compressed chunk not found during upserts diff --git a/.unreleased/fix_6610 b/.unreleased/fix_6610 deleted file mode 100644 index 7c02a9d2fce..00000000000 --- a/.unreleased/fix_6610 +++ /dev/null @@ -1 +0,0 @@ -Fixes: #6610 Ensure qsort comparison function is transitive diff --git a/.unreleased/pr_6360 b/.unreleased/pr_6360 deleted file mode 100644 index 5ea6a104de4..00000000000 --- a/.unreleased/pr_6360 +++ /dev/null @@ -1,3 +0,0 @@ -Implements: #6360 Remove support for creating Continuous Aggregates with old format - -Thanks: @pdipesh02 for working on removing the old Continuous Aggregate format diff --git a/.unreleased/pr_6386 b/.unreleased/pr_6386 deleted file mode 100644 index a7e16d0e92e..00000000000 --- a/.unreleased/pr_6386 +++ /dev/null @@ -1 +0,0 @@ -Implements: #6386 Add functions for determining compression defaults \ No newline at end of file diff --git a/.unreleased/pr_6410 b/.unreleased/pr_6410 deleted file mode 100644 index abbb9325f01..00000000000 --- a/.unreleased/pr_6410 +++ /dev/null @@ -1 +0,0 @@ -Implements: #6410 Remove multinode public API diff --git a/.unreleased/pr_6440 b/.unreleased/pr_6440 deleted file mode 100644 index f84c257669d..00000000000 --- a/.unreleased/pr_6440 +++ /dev/null @@ -1 +0,0 @@ -Implements: #6440 Allow SQLValueFunction pushdown into compressed scan diff --git a/.unreleased/pr_6463 b/.unreleased/pr_6463 deleted file mode 100644 index 4b9d4c70a00..00000000000 --- a/.unreleased/pr_6463 +++ /dev/null @@ -1 +0,0 @@ -Implements: #6463 Support approximate hypertable size diff --git a/.unreleased/pr_6513 b/.unreleased/pr_6513 deleted file mode 100644 index 9f1ef76ac3c..00000000000 --- a/.unreleased/pr_6513 +++ /dev/null @@ -1 +0,0 @@ -Implements: #6513 Make compression settings per chunk diff --git a/.unreleased/pr_6529 b/.unreleased/pr_6529 deleted file mode 100644 index de85b18bc97..00000000000 --- a/.unreleased/pr_6529 +++ /dev/null @@ -1 +0,0 @@ -Implements: #6529 Remove reindex_relation from recompression diff --git a/.unreleased/pr_6545 b/.unreleased/pr_6545 deleted file mode 100644 index a00d2b225d4..00000000000 --- a/.unreleased/pr_6545 +++ /dev/null @@ -1 +0,0 @@ -Implements: #6545 Remove restrictions for changing compression settings diff --git a/.unreleased/pr_6558 b/.unreleased/pr_6558 deleted file mode 100644 index 1bf0e6ff9be..00000000000 --- a/.unreleased/pr_6558 +++ /dev/null @@ -1 +0,0 @@ -Fixes: #6523 Reduce locking level on compressed chunk index during segmentwise recompression diff --git a/.unreleased/pr_6566 b/.unreleased/pr_6566 deleted file mode 100644 index 11e94f4becd..00000000000 --- a/.unreleased/pr_6566 +++ /dev/null @@ -1 +0,0 @@ -Implements: #6566 Limit tuple decompression during DML operations diff --git a/.unreleased/pr_6579 b/.unreleased/pr_6579 deleted file mode 100644 index 8dc9745bf91..00000000000 --- a/.unreleased/pr_6579 +++ /dev/null @@ -1 +0,0 @@ -Implements: #6579 Change compress_chunk and decompress_chunk to idempotent version by default diff --git a/.unreleased/pr_6592 b/.unreleased/pr_6592 deleted file mode 100644 index d6b43bad485..00000000000 --- a/.unreleased/pr_6592 +++ /dev/null @@ -1 +0,0 @@ -Fixes: #6592 Fix recompression policy ignoring partially compressed chunks diff --git a/.unreleased/pr_6608 b/.unreleased/pr_6608 deleted file mode 100644 index 1f400a3e8d4..00000000000 --- a/.unreleased/pr_6608 +++ /dev/null @@ -1 +0,0 @@ -Implements: #6608 Add LWLock for OSM usage in loader diff --git a/.unreleased/pr_6609 b/.unreleased/pr_6609 deleted file mode 100644 index 9d7c9e70ac8..00000000000 --- a/.unreleased/pr_6609 +++ /dev/null @@ -1,2 +0,0 @@ -Implements: #6609 Deprecate recompress_chunk -Implements: #6609 Add optional recompress argument to compress_chunk diff --git a/CHANGELOG.md b/CHANGELOG.md index 01dfd48395b..7af10b40ad1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,80 @@ `psql` with the `-X` flag to prevent any `.psqlrc` commands from accidentally triggering the load of a previous DB version.** +## 2.14.0 (2024-02-08) + +This release contains performance improvements and bug fixes since +the 2.13.1 release. We recommend that you upgrade at the next +available opportunity. + +In addition, it includes these noteworthy features: + +* Ability to change compression settings on existing compressed hypertables at any time. +New compression settings take effect on any new chunks that are compressed after the change. +* Reduced locking requirements during chunk recompression +* Limiting tuple decompression during DML operations to avoid decompressing a lot of tuples and causing storage issues (100k limit, configurable) +* Helper functions for determining compression settings + +**For this release only**, you will need to restart the database before running `ALTER EXTENSION` + +**Multi-node support removal announcement** +Following the deprecation announcement for Multi-node in TimescaleDB 2.13, +Multi-node is no longer supported starting with TimescaleDB 2.14. + +TimescaleDB 2.13 is the last version that includes multi-node support. Learn more about it [here](docs/MultiNodeDeprecation.md). + +If you want to migrate from multi-node TimescaleDB to single-node TimescaleDB, read the +[migration documentation](https://docs.timescale.com/migrate/latest/multi-node-to-timescale-service/). + +**Deprecation notice: recompress_chunk procedure** +TimescaleDB 2.14 is the last version that will include the recompress_chunk procedure. Its +functionality will be replaced by the compress_chunk function, which, starting on TimescaleDB 2.14, +works on both uncompressed and partially compressed chunks. +The compress_chunk function should be used going forward to fully compress all types of chunks or even recompress +old fully compressed chunks using new compression settings (through the newly introduced recompress optional parameter). + +**Features** +* #6325 Add plan-time chunk exclusion for real-time CAggs +* #6360 Remove support for creating Continuous Aggregates with old format +* #6386 Add functions for determining compression defaults +* #6410 Remove multinode public API +* #6440 Allow SQLValueFunction pushdown into compressed scan +* #6463 Support approximate hypertable size +* #6513 Make compression settings per chunk +* #6529 Remove reindex_relation from recompression +* #6531 Fix if_not_exists behavior for CAgg policy with NULL offsets +* #6545 Remove restrictions for changing compression settings +* #6566 Limit tuple decompression during DML operations +* #6579 Change compress_chunk and decompress_chunk to idempotent version by default +* #6608 Add LWLock for OSM usage in loader +* #6609 Deprecate recompress_chunk +* #6609 Add optional recompress argument to compress_chunk + +**Bugfixes** +* #6541 Inefficient join plans on compressed hypertables. +* #6491 Enable now() plantime constification with BETWEEN +* #6494 Fix create_hypertable referenced by fk succeeds +* #6498 Suboptimal query plans when using time_bucket with query parameters +* #6507 time_bucket_gapfill with timezones doesn't handle daylight savings +* #6509 Make extension state available through function +* #6512 Log extension state changes +* #6522 Disallow triggers on CAggs +* #6523 Reduce locking level on compressed chunk index during segmentwise recompression +* #6531 Fix if_not_exists behavior for CAgg policy with NULL offsets +* #6571 Fix pathtarget adjustment for MergeAppend paths in aggregation pushdown code +* #6575 Fix compressed chunk not found during upserts +* #6592 Fix recompression policy ignoring partially compressed chunks +* #6610 Ensure qsort comparison function is transitive + +**Thanks** +* @coney21 and @GStechschulte for reporting the problem with inefficient join plans on compressed hypertables. +* @HollowMan6 for reporting triggers not working on materialized views of +CAggs +* @jbx1 for reporting suboptimal query plans when using time_bucket with query parameters +* @JerkoNikolic for reporting the issue with gapfill and DST +* @pdipesh02 for working on removing the old Continuous Aggregate format +* @raymalt and @martinhale for reporting very slow query plans on realtime CAggs queries + ## 2.13.1 (2024-01-09) This release contains bug fixes since the 2.13.0 release. diff --git a/sql/CMakeLists.txt b/sql/CMakeLists.txt index 9951c26961f..3e51058aa52 100644 --- a/sql/CMakeLists.txt +++ b/sql/CMakeLists.txt @@ -42,11 +42,12 @@ set(MOD_FILES updates/2.12.0--2.12.1.sql updates/2.12.1--2.12.2.sql updates/2.12.2--2.13.0.sql - updates/2.13.0--2.13.1.sql) + updates/2.13.0--2.13.1.sql + updates/2.13.1--2.14.0.sql) # The downgrade file to generate a downgrade script for the current version, as # specified in version.config -set(CURRENT_REV_FILE reverse-dev.sql) +set(CURRENT_REV_FILE 2.14.0--2.13.1.sql) # Files for generating old downgrade scripts. This should only include files for # downgrade from one version to its previous version since we do not support # skipping versions when downgrading. @@ -80,7 +81,8 @@ set(OLD_REV_FILES 2.12.1--2.12.0.sql 2.12.2--2.12.1.sql 2.13.0--2.12.2.sql - 2.13.1--2.13.0.sql) + 2.13.1--2.13.0.sql + 2.14.0--2.13.1.sql) set(MODULE_PATHNAME "$libdir/timescaledb-${PROJECT_VERSION_MOD}") set(LOADER_PATHNAME "$libdir/timescaledb") diff --git a/sql/updates/2.13.1--2.14.0.sql b/sql/updates/2.13.1--2.14.0.sql new file mode 100644 index 00000000000..5f4c8c4fb79 --- /dev/null +++ b/sql/updates/2.13.1--2.14.0.sql @@ -0,0 +1,451 @@ + +-- ERROR if trying to update the extension while multinode is present +DO $$ +DECLARE + data_nodes TEXT; + dist_hypertables TEXT; +BEGIN + SELECT string_agg(format('%I.%I', schema_name, table_name), ', ') + INTO dist_hypertables + FROM _timescaledb_catalog.hypertable + WHERE replication_factor > 0; + + IF dist_hypertables IS NOT NULL THEN + RAISE USING + ERRCODE = 'feature_not_supported', + MESSAGE = 'cannot upgrade because multi-node has been removed in 2.14.0', + DETAIL = 'The following distributed hypertables should be migrated to regular: '||dist_hypertables; + END IF; + + SELECT string_agg(format('%I', srv.srvname), ', ') + INTO data_nodes + FROM pg_foreign_server srv + JOIN pg_foreign_data_wrapper fdw ON srv.srvfdw = fdw.oid AND fdw.fdwname = 'timescaledb_fdw'; + + IF data_nodes IS NOT NULL THEN + RAISE USING + ERRCODE = 'feature_not_supported', + MESSAGE = 'cannot upgrade because multi-node has been removed in 2.14.0', + DETAIL = 'The following data nodes should be removed: '||data_nodes; + END IF; + + IF EXISTS(SELECT FROM _timescaledb_catalog.metadata WHERE key = 'dist_uuid') THEN + RAISE USING + ERRCODE = 'feature_not_supported', + MESSAGE = 'cannot upgrade because multi-node has been removed in 2.14.0', + DETAIL = 'This node appears to be part of a multi-node installation'; + END IF; +END $$; + +DROP FUNCTION IF EXISTS _timescaledb_functions.ping_data_node; +DROP FUNCTION IF EXISTS _timescaledb_internal.ping_data_node; +DROP FUNCTION IF EXISTS _timescaledb_functions.remote_txn_heal_data_node; +DROP FUNCTION IF EXISTS _timescaledb_internal.remote_txn_heal_data_node; + +DROP FUNCTION IF EXISTS _timescaledb_functions.set_dist_id; +DROP FUNCTION IF EXISTS _timescaledb_internal.set_dist_id; +DROP FUNCTION IF EXISTS _timescaledb_functions.set_peer_dist_id; +DROP FUNCTION IF EXISTS _timescaledb_internal.set_peer_dist_id; +DROP FUNCTION IF EXISTS _timescaledb_functions.validate_as_data_node; +DROP FUNCTION IF EXISTS _timescaledb_internal.validate_as_data_node; +DROP FUNCTION IF EXISTS _timescaledb_functions.show_connection_cache; +DROP FUNCTION IF EXISTS _timescaledb_internal.show_connection_cache; + +DROP FUNCTION IF EXISTS @extschema@.create_hypertable(relation REGCLASS, time_column_name NAME, partitioning_column NAME, number_partitions INTEGER, associated_schema_name NAME, associated_table_prefix NAME, chunk_time_interval ANYELEMENT, create_default_indexes BOOLEAN, if_not_exists BOOLEAN, partitioning_func REGPROC, migrate_data BOOLEAN, chunk_target_size TEXT, chunk_sizing_func REGPROC, time_partitioning_func REGPROC, replication_factor INTEGER, data_nodes NAME[], distributed BOOLEAN); + +CREATE FUNCTION @extschema@.create_hypertable( + relation REGCLASS, + time_column_name NAME, + partitioning_column NAME = NULL, + number_partitions INTEGER = NULL, + associated_schema_name NAME = NULL, + associated_table_prefix NAME = NULL, + chunk_time_interval ANYELEMENT = NULL::bigint, + create_default_indexes BOOLEAN = TRUE, + if_not_exists BOOLEAN = FALSE, + partitioning_func REGPROC = NULL, + migrate_data BOOLEAN = FALSE, + chunk_target_size TEXT = NULL, + chunk_sizing_func REGPROC = '_timescaledb_functions.calculate_chunk_interval'::regproc, + time_partitioning_func REGPROC = NULL +) RETURNS TABLE(hypertable_id INT, schema_name NAME, table_name NAME, created BOOL) AS '@MODULE_PATHNAME@', 'ts_hypertable_create' LANGUAGE C VOLATILE; + +DROP FUNCTION IF EXISTS @extschema@.create_distributed_hypertable; + +DROP FUNCTION IF EXISTS @extschema@.add_data_node; +DROP FUNCTION IF EXISTS @extschema@.delete_data_node; +DROP FUNCTION IF EXISTS @extschema@.attach_data_node; +DROP FUNCTION IF EXISTS @extschema@.detach_data_node; +DROP FUNCTION IF EXISTS @extschema@.alter_data_node; + +DROP PROCEDURE IF EXISTS @extschema@.distributed_exec; +DROP FUNCTION IF EXISTS @extschema@.create_distributed_restore_point; +DROP FUNCTION IF EXISTS @extschema@.set_replication_factor; + +CREATE TABLE _timescaledb_catalog.compression_settings ( + relid regclass NOT NULL, + segmentby text[], + orderby text[], + orderby_desc bool[], + orderby_nullsfirst bool[], + CONSTRAINT compression_settings_pkey PRIMARY KEY (relid), + CONSTRAINT compression_settings_check_segmentby CHECK (array_ndims(segmentby) = 1), + CONSTRAINT compression_settings_check_orderby_null CHECK ( (orderby IS NULL AND orderby_desc IS NULL AND orderby_nullsfirst IS NULL) OR (orderby IS NOT NULL AND orderby_desc IS NOT NULL AND orderby_nullsfirst IS NOT NULL) ), + CONSTRAINT compression_settings_check_orderby_cardinality CHECK (array_ndims(orderby) = 1 AND array_ndims(orderby_desc) = 1 AND array_ndims(orderby_nullsfirst) = 1 AND cardinality(orderby) = cardinality(orderby_desc) AND cardinality(orderby) = cardinality(orderby_nullsfirst)) +); + +INSERT INTO _timescaledb_catalog.compression_settings(relid, segmentby, orderby, orderby_desc, orderby_nullsfirst) + SELECT + format('%I.%I', ht.schema_name, ht.table_name)::regclass, + array_agg(attname ORDER BY segmentby_column_index) FILTER(WHERE segmentby_column_index >= 1) AS compress_segmentby, + array_agg(attname ORDER BY orderby_column_index) FILTER(WHERE orderby_column_index >= 1) AS compress_orderby, + array_agg(NOT orderby_asc ORDER BY orderby_column_index) FILTER(WHERE orderby_column_index >= 1) AS compress_orderby_desc, + array_agg(orderby_nullsfirst ORDER BY orderby_column_index) FILTER(WHERE orderby_column_index >= 1) AS compress_orderby_nullsfirst + FROM _timescaledb_catalog.hypertable_compression hc + INNER JOIN _timescaledb_catalog.hypertable ht ON ht.id = hc.hypertable_id + GROUP BY hypertable_id, ht.schema_name, ht.table_name; + +GRANT SELECT ON _timescaledb_catalog.compression_settings TO PUBLIC; +SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.compression_settings', ''); + +ALTER EXTENSION timescaledb DROP TABLE _timescaledb_catalog.hypertable_compression; +DROP VIEW IF EXISTS timescaledb_information.compression_settings; +DROP TABLE _timescaledb_catalog.hypertable_compression; + +DROP FOREIGN DATA WRAPPER IF EXISTS timescaledb_fdw; +DROP FUNCTION IF EXISTS @extschema@.timescaledb_fdw_handler(); +DROP FUNCTION IF EXISTS @extschema@.timescaledb_fdw_validator(text[], oid); + + +DROP FUNCTION IF EXISTS _timescaledb_functions.create_chunk_replica_table; +DROP FUNCTION IF EXISTS _timescaledb_functions.chunk_drop_replica; +DROP PROCEDURE IF EXISTS _timescaledb_functions.wait_subscription_sync; +DROP FUNCTION IF EXISTS _timescaledb_functions.health; +DROP FUNCTION IF EXISTS _timescaledb_functions.drop_stale_chunks; + +DROP FUNCTION IF EXISTS _timescaledb_internal.create_chunk_replica_table; +DROP FUNCTION IF EXISTS _timescaledb_internal.chunk_drop_replica; +DROP PROCEDURE IF EXISTS _timescaledb_internal.wait_subscription_sync; +DROP FUNCTION IF EXISTS _timescaledb_internal.health; +DROP FUNCTION IF EXISTS _timescaledb_internal.drop_stale_chunks; + +ALTER TABLE _timescaledb_catalog.remote_txn DROP CONSTRAINT remote_txn_remote_transaction_id_check; + +DROP TYPE IF EXISTS @extschema@.rxid CASCADE; +DROP FUNCTION IF EXISTS _timescaledb_functions.rxid_in; +DROP FUNCTION IF EXISTS _timescaledb_functions.rxid_out; + +DROP FUNCTION IF EXISTS _timescaledb_functions.data_node_hypertable_info; +DROP FUNCTION IF EXISTS _timescaledb_functions.data_node_chunk_info; +DROP FUNCTION IF EXISTS _timescaledb_functions.data_node_compressed_chunk_stats; +DROP FUNCTION IF EXISTS _timescaledb_functions.data_node_index_size; +DROP FUNCTION IF EXISTS _timescaledb_internal.data_node_hypertable_info; +DROP FUNCTION IF EXISTS _timescaledb_internal.data_node_chunk_info; +DROP FUNCTION IF EXISTS _timescaledb_internal.data_node_compressed_chunk_stats; +DROP FUNCTION IF EXISTS _timescaledb_internal.data_node_index_size; + +DROP FUNCTION IF EXISTS timescaledb_experimental.block_new_chunks; +DROP FUNCTION IF EXISTS timescaledb_experimental.allow_new_chunks; +DROP FUNCTION IF EXISTS timescaledb_experimental.subscription_exec; +DROP PROCEDURE IF EXISTS timescaledb_experimental.move_chunk; +DROP PROCEDURE IF EXISTS timescaledb_experimental.copy_chunk; +DROP PROCEDURE IF EXISTS timescaledb_experimental.cleanup_copy_chunk_operation; + +DROP FUNCTION IF EXISTS _timescaledb_functions.set_chunk_default_data_node; +DROP FUNCTION IF EXISTS _timescaledb_internal.set_chunk_default_data_node; + +DROP FUNCTION IF EXISTS _timescaledb_functions.drop_dist_ht_invalidation_trigger; +DROP FUNCTION IF EXISTS _timescaledb_internal.drop_dist_ht_invalidation_trigger; + +-- remove multinode catalog tables +DROP VIEW IF EXISTS timescaledb_information.chunks; +DROP VIEW IF EXISTS timescaledb_information.data_nodes; +DROP VIEW IF EXISTS timescaledb_information.hypertables; +DROP VIEW IF EXISTS timescaledb_experimental.chunk_replication_status; + +ALTER EXTENSION timescaledb DROP TABLE _timescaledb_catalog.remote_txn; +DROP TABLE _timescaledb_catalog.remote_txn; +ALTER EXTENSION timescaledb DROP TABLE _timescaledb_catalog.hypertable_data_node; +DROP TABLE _timescaledb_catalog.hypertable_data_node; +ALTER EXTENSION timescaledb DROP TABLE _timescaledb_catalog.chunk_data_node; +DROP TABLE _timescaledb_catalog.chunk_data_node; +ALTER EXTENSION timescaledb DROP TABLE _timescaledb_catalog.chunk_copy_operation; +DROP TABLE _timescaledb_catalog.chunk_copy_operation; +ALTER EXTENSION timescaledb DROP SEQUENCE _timescaledb_catalog.chunk_copy_operation_id_seq; +DROP SEQUENCE _timescaledb_catalog.chunk_copy_operation_id_seq; +ALTER EXTENSION timescaledb DROP TABLE _timescaledb_catalog.dimension_partition; +DROP TABLE _timescaledb_catalog.dimension_partition; + +DROP FUNCTION IF EXISTS _timescaledb_functions.hypertable_remote_size; +DROP FUNCTION IF EXISTS _timescaledb_internal.hypertable_remote_size; +DROP FUNCTION IF EXISTS _timescaledb_functions.chunks_remote_size; +DROP FUNCTION IF EXISTS _timescaledb_internal.chunks_remote_size; +DROP FUNCTION IF EXISTS _timescaledb_functions.indexes_remote_size; +DROP FUNCTION IF EXISTS _timescaledb_internal.indexes_remote_size; +DROP FUNCTION IF EXISTS _timescaledb_functions.compressed_chunk_remote_stats; +DROP FUNCTION IF EXISTS _timescaledb_internal.compressed_chunk_remote_stats; + +-- rebuild _timescaledb_catalog.hypertable +ALTER TABLE _timescaledb_config.bgw_job + DROP CONSTRAINT bgw_job_hypertable_id_fkey; +ALTER TABLE _timescaledb_catalog.chunk + DROP CONSTRAINT chunk_hypertable_id_fkey; +ALTER TABLE _timescaledb_catalog.chunk_index + DROP CONSTRAINT chunk_index_hypertable_id_fkey; +ALTER TABLE _timescaledb_catalog.continuous_agg + DROP CONSTRAINT continuous_agg_mat_hypertable_id_fkey, + DROP CONSTRAINT continuous_agg_raw_hypertable_id_fkey; +ALTER TABLE _timescaledb_catalog.continuous_aggs_bucket_function + DROP CONSTRAINT continuous_aggs_bucket_function_mat_hypertable_id_fkey; +ALTER TABLE _timescaledb_catalog.continuous_aggs_invalidation_threshold + DROP CONSTRAINT continuous_aggs_invalidation_threshold_hypertable_id_fkey; +ALTER TABLE _timescaledb_catalog.dimension + DROP CONSTRAINT dimension_hypertable_id_fkey; +ALTER TABLE _timescaledb_catalog.hypertable + DROP CONSTRAINT hypertable_compressed_hypertable_id_fkey; +ALTER TABLE _timescaledb_catalog.tablespace + DROP CONSTRAINT tablespace_hypertable_id_fkey; + +DROP VIEW IF EXISTS timescaledb_information.hypertables; +DROP VIEW IF EXISTS timescaledb_information.job_stats; +DROP VIEW IF EXISTS timescaledb_information.jobs; +DROP VIEW IF EXISTS timescaledb_information.continuous_aggregates; +DROP VIEW IF EXISTS timescaledb_information.chunks; +DROP VIEW IF EXISTS timescaledb_information.dimensions; +DROP VIEW IF EXISTS timescaledb_information.compression_settings; +DROP VIEW IF EXISTS _timescaledb_internal.hypertable_chunk_local_size; +DROP VIEW IF EXISTS _timescaledb_internal.compressed_chunk_stats; +DROP VIEW IF EXISTS timescaledb_experimental.chunk_replication_status; +DROP VIEW IF EXISTS timescaledb_experimental.policies; + +-- recreate table +CREATE TABLE _timescaledb_catalog.hypertable_tmp AS SELECT * FROM _timescaledb_catalog.hypertable; +CREATE TABLE _timescaledb_catalog.tmp_hypertable_seq_value AS SELECT last_value, is_called FROM _timescaledb_catalog.hypertable_id_seq; + +ALTER EXTENSION timescaledb DROP TABLE _timescaledb_catalog.hypertable; +ALTER EXTENSION timescaledb DROP SEQUENCE _timescaledb_catalog.hypertable_id_seq; + +SET timescaledb.restoring = on; -- must disable the hooks otherwise we can't do anything without the table _timescaledb_catalog.hypertable + +DROP TABLE _timescaledb_catalog.hypertable; + +CREATE SEQUENCE _timescaledb_catalog.hypertable_id_seq MINVALUE 1; +SELECT setval('_timescaledb_catalog.hypertable_id_seq', last_value, is_called) FROM _timescaledb_catalog.tmp_hypertable_seq_value; +DROP TABLE _timescaledb_catalog.tmp_hypertable_seq_value; + +CREATE TABLE _timescaledb_catalog.hypertable ( + id INTEGER PRIMARY KEY NOT NULL DEFAULT nextval('_timescaledb_catalog.hypertable_id_seq'), + schema_name name NOT NULL, + table_name name NOT NULL, + associated_schema_name name NOT NULL, + associated_table_prefix name NOT NULL, + num_dimensions smallint NOT NULL, + chunk_sizing_func_schema name NOT NULL, + chunk_sizing_func_name name NOT NULL, + chunk_target_size bigint NOT NULL, -- size in bytes + compression_state smallint NOT NULL DEFAULT 0, + compressed_hypertable_id integer, + status integer NOT NULL DEFAULT 0 +); + +SET timescaledb.restoring = off; + +INSERT INTO _timescaledb_catalog.hypertable ( + id, + schema_name, + table_name, + associated_schema_name, + associated_table_prefix, + num_dimensions, + chunk_sizing_func_schema, + chunk_sizing_func_name, + chunk_target_size, + compression_state, + compressed_hypertable_id +) +SELECT + id, + schema_name, + table_name, + associated_schema_name, + associated_table_prefix, + num_dimensions, + chunk_sizing_func_schema, + chunk_sizing_func_name, + chunk_target_size, + compression_state, + compressed_hypertable_id +FROM + _timescaledb_catalog.hypertable_tmp +ORDER BY id; + +UPDATE _timescaledb_catalog.hypertable h +SET status = 3 +WHERE EXISTS ( + SELECT FROM _timescaledb_catalog.chunk c WHERE c.osm_chunk AND c.hypertable_id = h.id +); + +ALTER SEQUENCE _timescaledb_catalog.hypertable_id_seq OWNED BY _timescaledb_catalog.hypertable.id; +SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.hypertable_id_seq', ''); + +GRANT SELECT ON _timescaledb_catalog.hypertable TO PUBLIC; +GRANT SELECT ON _timescaledb_catalog.hypertable_id_seq TO PUBLIC; + +DROP TABLE _timescaledb_catalog.hypertable_tmp; +-- now add any constraints +ALTER TABLE _timescaledb_catalog.hypertable + ADD CONSTRAINT hypertable_associated_schema_name_associated_table_prefix_key UNIQUE (associated_schema_name, associated_table_prefix), + ADD CONSTRAINT hypertable_table_name_schema_name_key UNIQUE (table_name, schema_name), + ADD CONSTRAINT hypertable_schema_name_check CHECK (schema_name != '_timescaledb_catalog'), + ADD CONSTRAINT hypertable_dim_compress_check CHECK (num_dimensions > 0 OR compression_state = 2), + ADD CONSTRAINT hypertable_chunk_target_size_check CHECK (chunk_target_size >= 0), + ADD CONSTRAINT hypertable_compress_check CHECK ( (compression_state = 0 OR compression_state = 1 ) OR (compression_state = 2 AND compressed_hypertable_id IS NULL)), + ADD CONSTRAINT hypertable_compressed_hypertable_id_fkey FOREIGN KEY (compressed_hypertable_id) REFERENCES _timescaledb_catalog.hypertable (id); + +GRANT SELECT ON TABLE _timescaledb_catalog.hypertable TO PUBLIC; + +-- 3. reestablish constraints on other tables +ALTER TABLE _timescaledb_config.bgw_job + ADD CONSTRAINT bgw_job_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE; +ALTER TABLE _timescaledb_catalog.chunk + ADD CONSTRAINT chunk_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id); +ALTER TABLE _timescaledb_catalog.chunk_index + ADD CONSTRAINT chunk_index_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE; +ALTER TABLE _timescaledb_catalog.continuous_agg + ADD CONSTRAINT continuous_agg_mat_hypertable_id_fkey FOREIGN KEY (mat_hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE, + ADD CONSTRAINT continuous_agg_raw_hypertable_id_fkey FOREIGN KEY (raw_hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE; +ALTER TABLE _timescaledb_catalog.continuous_aggs_bucket_function + ADD CONSTRAINT continuous_aggs_bucket_function_mat_hypertable_id_fkey FOREIGN KEY (mat_hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE; +ALTER TABLE _timescaledb_catalog.continuous_aggs_invalidation_threshold + ADD CONSTRAINT continuous_aggs_invalidation_threshold_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE; +ALTER TABLE _timescaledb_catalog.dimension + ADD CONSTRAINT dimension_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE; +ALTER TABLE _timescaledb_catalog.tablespace + ADD CONSTRAINT tablespace_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE; + +CREATE SCHEMA _timescaledb_debug; + +-- Migrate existing compressed hypertables to new internal format +DO $$ +DECLARE + chunk regclass; + hypertable regclass; + ht_id integer; + index regclass; + column_name name; + cmd text; +BEGIN + SET timescaledb.restoring TO ON; + + -- Detach compressed chunks from their parent hypertables + FOR chunk, hypertable, ht_id IN + SELECT + format('%I.%I',ch.schema_name,ch.table_name)::regclass chunk, + format('%I.%I',ht.schema_name,ht.table_name)::regclass hypertable, + ht.id + FROM _timescaledb_catalog.chunk ch + INNER JOIN _timescaledb_catalog.hypertable ht_uncomp + ON ch.hypertable_id = ht_uncomp.compressed_hypertable_id + INNER JOIN _timescaledb_catalog.hypertable ht + ON ht.id = ht_uncomp.compressed_hypertable_id + LOOP + + cmd := format('ALTER TABLE %s NO INHERIT %s', chunk, hypertable); + EXECUTE cmd; + -- remove references to indexes from the compressed hypertable + DELETE FROM _timescaledb_catalog.chunk_index WHERE hypertable_id = ht_id; + + END LOOP; + + + FOR hypertable IN + SELECT + format('%I.%I',ht.schema_name,ht.table_name)::regclass hypertable + FROM _timescaledb_catalog.hypertable ht_uncomp + INNER JOIN _timescaledb_catalog.hypertable ht + ON ht.id = ht_uncomp.compressed_hypertable_id + LOOP + + -- remove indexes from the compressed hypertable (but not chunks) + FOR index IN + SELECT indexrelid::regclass FROM pg_index WHERE indrelid = hypertable + LOOP + cmd := format('DROP INDEX %s', index); + EXECUTE cmd; + END LOOP; + + -- remove columns from the compressed hypertable (but not chunks) + FOR column_name IN + SELECT attname FROM pg_attribute WHERE attrelid = hypertable AND attnum > 0 + LOOP + cmd := format('ALTER TABLE %s DROP COLUMN %I', hypertable, column_name); + EXECUTE cmd; + END LOOP; + + END LOOP; + + SET timescaledb.restoring TO OFF; +END $$; + +DROP FUNCTION IF EXISTS _timescaledb_internal.hypertable_constraint_add_table_fk_constraint; +DROP FUNCTION IF EXISTS _timescaledb_functions.hypertable_constraint_add_table_fk_constraint; + +-- only define stub here, actual code will be filled in at end of update script +CREATE FUNCTION _timescaledb_functions.constraint_clone(constraint_oid OID,target_oid REGCLASS) RETURNS VOID LANGUAGE PLPGSQL AS $$BEGIN END$$ SET search_path TO pg_catalog, pg_temp; + +DROP FUNCTION IF EXISTS _timescaledb_functions.chunks_in; +DROP FUNCTION IF EXISTS _timescaledb_internal.chunks_in; + +CREATE FUNCTION _timescaledb_functions.metadata_insert_trigger() RETURNS TRIGGER LANGUAGE PLPGSQL +AS $$ +BEGIN + IF EXISTS (SELECT FROM _timescaledb_catalog.metadata WHERE key = NEW.key) THEN + UPDATE _timescaledb_catalog.metadata SET value = NEW.value WHERE key = NEW.key; + RETURN NULL; + END IF; + RETURN NEW; +END +$$ SET search_path TO pg_catalog, pg_temp; + +CREATE TRIGGER metadata_insert_trigger BEFORE INSERT ON _timescaledb_catalog.metadata FOR EACH ROW EXECUTE PROCEDURE _timescaledb_functions.metadata_insert_trigger(); + +SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.metadata', $$ WHERE key <> 'uuid' $$); + +-- Remove unwanted entries from extconfig and extcondition in pg_extension +-- We use ALTER EXTENSION DROP TABLE to remove these entries. +ALTER EXTENSION timescaledb DROP TABLE _timescaledb_cache.cache_inval_hypertable; +ALTER EXTENSION timescaledb DROP TABLE _timescaledb_cache.cache_inval_extension; +ALTER EXTENSION timescaledb DROP TABLE _timescaledb_cache.cache_inval_bgw_job; +ALTER EXTENSION timescaledb DROP TABLE _timescaledb_internal.job_errors; + +-- Associate the above tables back to keep the dependencies safe +ALTER EXTENSION timescaledb ADD TABLE _timescaledb_cache.cache_inval_hypertable; +ALTER EXTENSION timescaledb ADD TABLE _timescaledb_cache.cache_inval_extension; +ALTER EXTENSION timescaledb ADD TABLE _timescaledb_cache.cache_inval_bgw_job; +ALTER EXTENSION timescaledb ADD TABLE _timescaledb_internal.job_errors; + +ALTER EXTENSION timescaledb DROP TABLE _timescaledb_catalog.hypertable; +ALTER EXTENSION timescaledb ADD TABLE _timescaledb_catalog.hypertable; +SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.hypertable', 'WHERE id >= 1'); + +CREATE FUNCTION _timescaledb_functions.relation_approximate_size(relation REGCLASS) +RETURNS TABLE (total_size BIGINT, heap_size BIGINT, index_size BIGINT, toast_size BIGINT) +AS '@MODULE_PATHNAME@', 'ts_relation_approximate_size' LANGUAGE C STRICT VOLATILE; + +CREATE FUNCTION @extschema@.hypertable_approximate_detailed_size(relation REGCLASS) +RETURNS TABLE (table_bytes BIGINT, index_bytes BIGINT, toast_bytes BIGINT, total_bytes BIGINT) +AS '@MODULE_PATHNAME@', 'ts_hypertable_approximate_size' LANGUAGE C VOLATILE; + +--- returns approximate total-bytes for a hypertable (includes table + index) +CREATE FUNCTION @extschema@.hypertable_approximate_size( + hypertable REGCLASS) +RETURNS BIGINT +LANGUAGE SQL VOLATILE STRICT AS +$BODY$ + SELECT sum(total_bytes)::bigint + FROM @extschema@.hypertable_approximate_detailed_size(hypertable); +$BODY$ SET search_path TO pg_catalog, pg_temp; + +DROP FUNCTION IF EXISTS @extschema@.compress_chunk; +CREATE FUNCTION @extschema@.compress_chunk(uncompressed_chunk REGCLASS, if_not_compressed BOOLEAN = true, recompress BOOLEAN = false) RETURNS REGCLASS AS '' LANGUAGE SQL SET search_path TO pg_catalog, pg_temp; + diff --git a/sql/updates/2.14.0--2.13.1.sql b/sql/updates/2.14.0--2.13.1.sql new file mode 100644 index 00000000000..a3a6600ede3 --- /dev/null +++ b/sql/updates/2.14.0--2.13.1.sql @@ -0,0 +1,797 @@ +-- check whether we can safely downgrade compression setup +DO $$ +DECLARE + hypertable regclass; + ht_uncomp regclass; + chunk_relids oid[]; + ht_id integer; +BEGIN + + FOR hypertable, ht_uncomp, ht_id IN + SELECT + format('%I.%I',ht.schema_name,ht.table_name)::regclass, + format('%I.%I',ht_uncomp.schema_name,ht_uncomp.table_name)::regclass, + ht.id + FROM _timescaledb_catalog.hypertable ht_uncomp + INNER JOIN _timescaledb_catalog.hypertable ht + ON ht.id = ht_uncomp.compressed_hypertable_id + LOOP + + -- hypertables need to at least have 1 compressed chunk so we can restore the columns + IF NOT EXISTS(SELECT FROM _timescaledb_catalog.chunk WHERE hypertable_id = ht_id) THEN + RAISE USING + ERRCODE = 'feature_not_supported', + MESSAGE = 'Cannot downgrade compressed hypertables with no compressed chunks. Disable compression on the affected hypertable before downgrading.', + DETAIL = 'The following hypertable is affected: '|| ht_uncomp::text; + END IF; + + chunk_relids := array(SELECT format('%I.%I',schema_name,table_name)::regclass FROM _timescaledb_catalog.chunk WHERE hypertable_id = ht_id); + + -- any hypertable with distinct compression settings cannot be downgraded + IF EXISTS ( + SELECT FROM ( + SELECT DISTINCT segmentby, orderby, orderby_desc, orderby_nullsfirst + FROM _timescaledb_catalog.compression_settings + WHERE relid = hypertable OR relid = ANY(chunk_relids) + ) dist_settings HAVING count(*) > 1 + ) THEN + RAISE USING + ERRCODE = 'feature_not_supported', + MESSAGE = 'Cannot downgrade hypertables with distinct compression settings. Decompress the affected hypertable before downgrading.', + DETAIL = 'The following hypertable is affected: '|| ht_uncomp::text; + END IF; + + END LOOP; +END +$$; + +CREATE FUNCTION _timescaledb_functions.tmp_resolve_indkeys(oid,int2[]) RETURNS text[] LANGUAGE SQL AS $$ + SELECT array_agg(attname) + FROM ( + SELECT attname + FROM (SELECT unnest($2) attnum) indkeys + JOIN LATERAL ( + SELECT attname FROM pg_attribute att WHERE att.attnum=indkeys.attnum AND att.attrelid=$1 + ) r ON true + ) resolve; +$$ SET search_path TO pg_catalog, pg_temp; + +DO $$ +DECLARE + chunk regclass; + hypertable regclass; + ht_id integer; + chunk_id integer; + _index regclass; + ht_index regclass; + chunk_index regclass; + index_name name; + chunk_index_name name; + _indkey text[]; + column_name name; + column_type regtype; + cmd text; +BEGIN + SET timescaledb.restoring TO ON; + + FOR hypertable, ht_id IN + SELECT + format('%I.%I',ht.schema_name,ht.table_name)::regclass, + ht.id + FROM _timescaledb_catalog.hypertable ht_uncomp + INNER JOIN _timescaledb_catalog.hypertable ht + ON ht.id = ht_uncomp.compressed_hypertable_id + LOOP + + -- get first chunk which we use as template for restoring columns and indexes + SELECT format('%I.%I',schema_name,table_name)::regclass INTO STRICT chunk FROM _timescaledb_catalog.chunk WHERE hypertable_id = ht_id ORDER by id LIMIT 1; + + -- restore columns from the compressed hypertable + FOR column_name, column_type IN + SELECT attname, atttypid::regtype FROM pg_attribute WHERE attrelid = chunk AND attnum > 0 + LOOP + cmd := format('ALTER TABLE %s ADD COLUMN %I %s', hypertable, column_name, column_type); + EXECUTE cmd; + END LOOP; + + -- restore indexes on the compressed hypertable + FOR _index, _indkey IN + SELECT indexrelid::regclass, _timescaledb_functions.tmp_resolve_indkeys(indrelid, indkey) FROM pg_index WHERE indrelid = chunk + LOOP + SELECT relname INTO STRICT index_name FROM pg_class WHERE oid = _index; + cmd := pg_get_indexdef(_index); + cmd := replace(cmd, format(' INDEX %s ON ', index_name), ' INDEX ON '); + cmd := replace(cmd, chunk::text, hypertable::text); + EXECUTE cmd; + + -- get indexrelid of index we just created on hypertable + SELECT indexrelid INTO STRICT ht_index FROM pg_index WHERE indrelid = hypertable AND _timescaledb_functions.tmp_resolve_indkeys(hypertable, indkey) = _indkey; + SELECT relname INTO STRICT index_name FROM pg_class WHERE oid = ht_index; + + -- restore indexes in our catalog + FOR chunk, chunk_id IN + SELECT format('%I.%I',schema_name,table_name)::regclass, id FROM _timescaledb_catalog.chunk WHERE hypertable_id = ht_id + LOOP + SELECT indexrelid INTO STRICT chunk_index FROM pg_index WHERE indrelid = chunk AND _timescaledb_functions.tmp_resolve_indkeys(chunk, indkey) = _indkey; + SELECT relname INTO STRICT chunk_index_name FROM pg_class WHERE oid = chunk_index; + INSERT INTO _timescaledb_catalog.chunk_index (chunk_id, index_name, hypertable_id, hypertable_index_name) + VALUES (chunk_id, chunk_index_name, ht_id, index_name); + END LOOP; + + END LOOP; + + -- restore inheritance + cmd := format('ALTER TABLE %s INHERIT %s', chunk, hypertable); + EXECUTE cmd; + + END LOOP; + + SET timescaledb.restoring TO OFF; +END $$; + +DROP FUNCTION _timescaledb_functions.tmp_resolve_indkeys; + +CREATE FUNCTION _timescaledb_functions.ping_data_node(node_name NAME, timeout INTERVAL = NULL) RETURNS BOOLEAN +AS '@MODULE_PATHNAME@', 'ts_data_node_ping' LANGUAGE C VOLATILE; + +CREATE FUNCTION _timescaledb_functions.remote_txn_heal_data_node(foreign_server_oid oid) +RETURNS INT +AS '@MODULE_PATHNAME@', 'ts_remote_txn_heal_data_node' +LANGUAGE C STRICT; + +CREATE FUNCTION _timescaledb_functions.set_dist_id(dist_id UUID) RETURNS BOOL +AS '@MODULE_PATHNAME@', 'ts_dist_set_id' LANGUAGE C VOLATILE STRICT; + +CREATE FUNCTION _timescaledb_functions.set_peer_dist_id(dist_id UUID) RETURNS BOOL +AS '@MODULE_PATHNAME@', 'ts_dist_set_peer_id' LANGUAGE C VOLATILE STRICT; + +-- Function to validate that a node has local settings to function as +-- a data node. Throws error if validation fails. +CREATE FUNCTION _timescaledb_functions.validate_as_data_node() RETURNS void +AS '@MODULE_PATHNAME@', 'ts_dist_validate_as_data_node' LANGUAGE C VOLATILE STRICT; + +CREATE FUNCTION _timescaledb_functions.show_connection_cache() +RETURNS TABLE ( + node_name name, + user_name name, + host text, + port int, + database name, + backend_pid int, + connection_status text, + transaction_status text, + transaction_depth int, + processing boolean, + invalidated boolean) +AS '@MODULE_PATHNAME@', 'ts_remote_connection_cache_show' LANGUAGE C VOLATILE STRICT; + +DROP FUNCTION IF EXISTS @extschema@.create_hypertable(relation REGCLASS, time_column_name NAME, partitioning_column NAME, number_partitions INTEGER, associated_schema_name NAME, associated_table_prefix NAME, chunk_time_interval ANYELEMENT, create_default_indexes BOOLEAN, if_not_exists BOOLEAN, partitioning_func REGPROC, migrate_data BOOLEAN, chunk_target_size TEXT, chunk_sizing_func REGPROC, time_partitioning_func REGPROC); + +CREATE FUNCTION @extschema@.create_hypertable( + relation REGCLASS, + time_column_name NAME, + partitioning_column NAME = NULL, + number_partitions INTEGER = NULL, + associated_schema_name NAME = NULL, + associated_table_prefix NAME = NULL, + chunk_time_interval ANYELEMENT = NULL::bigint, + create_default_indexes BOOLEAN = TRUE, + if_not_exists BOOLEAN = FALSE, + partitioning_func REGPROC = NULL, + migrate_data BOOLEAN = FALSE, + chunk_target_size TEXT = NULL, + chunk_sizing_func REGPROC = '_timescaledb_functions.calculate_chunk_interval'::regproc, + time_partitioning_func REGPROC = NULL, + replication_factor INTEGER = NULL, + data_nodes NAME[] = NULL, + distributed BOOLEAN = NULL +) RETURNS TABLE(hypertable_id INT, schema_name NAME, table_name NAME, created BOOL) AS '@MODULE_PATHNAME@', 'ts_hypertable_create' LANGUAGE C VOLATILE; + +CREATE FUNCTION @extschema@.create_distributed_hypertable( + relation REGCLASS, + time_column_name NAME, + partitioning_column NAME = NULL, + number_partitions INTEGER = NULL, + associated_schema_name NAME = NULL, + associated_table_prefix NAME = NULL, + chunk_time_interval ANYELEMENT = NULL::bigint, + create_default_indexes BOOLEAN = TRUE, + if_not_exists BOOLEAN = FALSE, + partitioning_func REGPROC = NULL, + migrate_data BOOLEAN = FALSE, + chunk_target_size TEXT = NULL, + chunk_sizing_func REGPROC = '_timescaledb_functions.calculate_chunk_interval'::regproc, + time_partitioning_func REGPROC = NULL, + replication_factor INTEGER = NULL, + data_nodes NAME[] = NULL +) RETURNS TABLE(hypertable_id INT, schema_name NAME, table_name NAME, created BOOL) AS '@MODULE_PATHNAME@', 'ts_hypertable_distributed_create' LANGUAGE C VOLATILE; + +CREATE FUNCTION @extschema@.add_data_node( + node_name NAME, + host TEXT, + database NAME = NULL, + port INTEGER = NULL, + if_not_exists BOOLEAN = FALSE, + bootstrap BOOLEAN = TRUE, + password TEXT = NULL +) RETURNS TABLE(node_name NAME, host TEXT, port INTEGER, database NAME, + node_created BOOL, database_created BOOL, extension_created BOOL) +AS '@MODULE_PATHNAME@', 'ts_data_node_add' LANGUAGE C VOLATILE; + +CREATE FUNCTION @extschema@.delete_data_node( + node_name NAME, + if_exists BOOLEAN = FALSE, + force BOOLEAN = FALSE, + repartition BOOLEAN = TRUE, + drop_database BOOLEAN = FALSE +) RETURNS BOOLEAN AS '@MODULE_PATHNAME@', 'ts_data_node_delete' LANGUAGE C VOLATILE; + +CREATE FUNCTION @extschema@.attach_data_node( + node_name NAME, + hypertable REGCLASS, + if_not_attached BOOLEAN = FALSE, + repartition BOOLEAN = TRUE +) RETURNS TABLE(hypertable_id INTEGER, node_hypertable_id INTEGER, node_name NAME) +AS '@MODULE_PATHNAME@', 'ts_data_node_attach' LANGUAGE C VOLATILE; + +CREATE FUNCTION @extschema@.detach_data_node( + node_name NAME, + hypertable REGCLASS = NULL, + if_attached BOOLEAN = FALSE, + force BOOLEAN = FALSE, + repartition BOOLEAN = TRUE, + drop_remote_data BOOLEAN = FALSE +) RETURNS INTEGER +AS '@MODULE_PATHNAME@', 'ts_data_node_detach' LANGUAGE C VOLATILE; + +CREATE FUNCTION @extschema@.alter_data_node( + node_name NAME, + host TEXT = NULL, + database NAME = NULL, + port INTEGER = NULL, + available BOOLEAN = NULL +) RETURNS TABLE(node_name NAME, host TEXT, port INTEGER, database NAME, available BOOLEAN) + +AS '@MODULE_PATHNAME@', 'ts_data_node_alter' LANGUAGE C VOLATILE; +CREATE PROCEDURE @extschema@.distributed_exec( + query TEXT, + node_list name[] = NULL, + transactional BOOLEAN = TRUE) +AS '@MODULE_PATHNAME@', 'ts_distributed_exec' LANGUAGE C; + +CREATE FUNCTION @extschema@.create_distributed_restore_point( + name TEXT +) RETURNS TABLE(node_name NAME, node_type TEXT, restore_point pg_lsn) +AS '@MODULE_PATHNAME@', 'ts_create_distributed_restore_point' LANGUAGE C VOLATILE STRICT; + +CREATE FUNCTION @extschema@.set_replication_factor( + hypertable REGCLASS, + replication_factor INTEGER +) RETURNS VOID +AS '@MODULE_PATHNAME@', 'ts_hypertable_distributed_set_replication_factor' LANGUAGE C VOLATILE; + +CREATE TABLE _timescaledb_catalog.hypertable_compression ( + hypertable_id integer NOT NULL, + attname name NOT NULL, + compression_algorithm_id smallint, + segmentby_column_index smallint, + orderby_column_index smallint, + orderby_asc boolean, + orderby_nullsfirst boolean, + -- table constraints + CONSTRAINT hypertable_compression_pkey PRIMARY KEY (hypertable_id, attname), + CONSTRAINT hypertable_compression_hypertable_id_orderby_column_index_key UNIQUE (hypertable_id, orderby_column_index), + CONSTRAINT hypertable_compression_hypertable_id_segmentby_column_index_key UNIQUE (hypertable_id, segmentby_column_index), + CONSTRAINT hypertable_compression_compression_algorithm_id_fkey FOREIGN KEY (compression_algorithm_id) REFERENCES _timescaledb_catalog.compression_algorithm (id) +); + +INSERT INTO _timescaledb_catalog.hypertable_compression( + hypertable_id, + attname, + compression_algorithm_id, + segmentby_column_index, + orderby_column_index, + orderby_asc, + orderby_nullsfirst +) SELECT + ht.id, + att.attname, + CASE + WHEN att.attname = ANY(cs.segmentby) THEN 0 + WHEN att.atttypid IN ('numeric'::regtype) THEN 1 + WHEN att.atttypid IN ('float4'::regtype,'float8'::regtype) THEN 3 + WHEN att.atttypid IN ('int2'::regtype,'int4'::regtype,'int8'::regtype,'date'::regtype,'timestamp'::regtype,'timestamptz'::regtype) THEN 4 + WHEN EXISTS(SELECT FROM pg_operator op WHERE op.oprname = '=' AND op.oprkind = 'b' AND op.oprcanhash = true AND op.oprleft = att.atttypid AND op.oprright = att.atttypid) THEN 2 + ELSE 1 + END AS compression_algorithm_id, + CASE WHEN att.attname = ANY(cs.segmentby) THEN array_position(cs.segmentby, att.attname::text) ELSE NULL END AS segmentby_column_index, + CASE WHEN att.attname = ANY(cs.orderby) THEN array_position(cs.orderby, att.attname::text) ELSE NULL END AS orderby_column_index, + CASE WHEN att.attname = ANY(cs.orderby) THEN NOT cs.orderby_desc[array_position(cs.orderby, att.attname::text)] ELSE false END AS orderby_asc, + CASE WHEN att.attname = ANY(cs.orderby) THEN cs.orderby_nullsfirst[array_position(cs.orderby, att.attname::text)] ELSE false END AS orderby_nullsfirst +FROM _timescaledb_catalog.hypertable ht +INNER JOIN _timescaledb_catalog.compression_settings cs ON cs.relid = format('%I.%I',ht.schema_name,ht.table_name)::regclass +LEFT JOIN pg_attribute att ON att.attrelid = format('%I.%I',ht.schema_name,ht.table_name)::regclass AND attnum > 0 +WHERE compressed_hypertable_id IS NOT NULL; + +SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.hypertable_compression', ''); +GRANT SELECT ON _timescaledb_catalog.hypertable_compression TO PUBLIC; + +ALTER EXTENSION timescaledb DROP VIEW timescaledb_information.compression_settings; +DROP VIEW timescaledb_information.compression_settings; +ALTER EXTENSION timescaledb DROP TABLE _timescaledb_catalog.compression_settings; +DROP TABLE _timescaledb_catalog.compression_settings; + +CREATE FUNCTION @extschema@.timescaledb_fdw_handler() RETURNS fdw_handler AS '@MODULE_PATHNAME@', 'ts_timescaledb_fdw_handler' LANGUAGE C STRICT; +CREATE FUNCTION @extschema@.timescaledb_fdw_validator(text[], oid) RETURNS void AS '@MODULE_PATHNAME@', 'ts_timescaledb_fdw_validator' LANGUAGE C STRICT; + +CREATE FOREIGN DATA WRAPPER timescaledb_fdw HANDLER @extschema@.timescaledb_fdw_handler VALIDATOR @extschema@.timescaledb_fdw_validator; + +CREATE FUNCTION _timescaledb_functions.create_chunk_replica_table( + chunk REGCLASS, + data_node_name NAME +) RETURNS VOID AS '@MODULE_PATHNAME@', 'ts_chunk_create_replica_table' LANGUAGE C VOLATILE; + +CREATE FUNCTION _timescaledb_functions.chunk_drop_replica( + chunk REGCLASS, + node_name NAME +) RETURNS VOID +AS '@MODULE_PATHNAME@', 'ts_chunk_drop_replica' LANGUAGE C VOLATILE; + +CREATE PROCEDURE _timescaledb_functions.wait_subscription_sync( + schema_name NAME, + table_name NAME, + retry_count INT DEFAULT 18000, + retry_delay_ms NUMERIC DEFAULT 0.200 +) +LANGUAGE PLPGSQL AS +$BODY$ +DECLARE + in_sync BOOLEAN; +BEGIN + FOR i in 1 .. retry_count + LOOP + SELECT pgs.srsubstate = 'r' + INTO in_sync + FROM pg_subscription_rel pgs + JOIN pg_class pgc ON relname = table_name + JOIN pg_namespace n ON (n.OID = pgc.relnamespace) + WHERE pgs.srrelid = pgc.oid AND schema_name = n.nspname; + + if (in_sync IS NULL OR NOT in_sync) THEN + PERFORM pg_sleep(retry_delay_ms); + ELSE + RETURN; + END IF; + END LOOP; + RAISE 'subscription sync wait timedout'; +END +$BODY$ SET search_path TO pg_catalog, pg_temp; + +CREATE FUNCTION _timescaledb_functions.health() RETURNS +TABLE (node_name NAME, healthy BOOL, in_recovery BOOL, error TEXT) +AS '@MODULE_PATHNAME@', 'ts_health_check' LANGUAGE C VOLATILE; + +CREATE FUNCTION _timescaledb_functions.drop_stale_chunks( + node_name NAME, + chunks integer[] = NULL +) RETURNS VOID +AS '@MODULE_PATHNAME@', 'ts_chunks_drop_stale' LANGUAGE C VOLATILE; + +CREATE FUNCTION _timescaledb_functions.rxid_in(cstring) RETURNS @extschema@.rxid + AS '@MODULE_PATHNAME@', 'ts_remote_txn_id_in' LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE; + +CREATE FUNCTION _timescaledb_functions.rxid_out(@extschema@.rxid) RETURNS cstring + AS '@MODULE_PATHNAME@', 'ts_remote_txn_id_out' LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE; + +CREATE TYPE @extschema@.rxid ( + internallength = 16, + input = _timescaledb_functions.rxid_in, + output = _timescaledb_functions.rxid_out +); + +CREATE FUNCTION _timescaledb_functions.data_node_hypertable_info( + node_name NAME, + schema_name_in name, + table_name_in name +) +RETURNS TABLE ( + table_bytes bigint, + index_bytes bigint, + toast_bytes bigint, + total_bytes bigint) +AS '@MODULE_PATHNAME@', 'ts_dist_remote_hypertable_info' LANGUAGE C VOLATILE STRICT; + +CREATE FUNCTION _timescaledb_functions.data_node_chunk_info( + node_name NAME, + schema_name_in name, + table_name_in name +) +RETURNS TABLE ( + chunk_id integer, + chunk_schema name, + chunk_name name, + table_bytes bigint, + index_bytes bigint, + toast_bytes bigint, + total_bytes bigint) +AS '@MODULE_PATHNAME@', 'ts_dist_remote_chunk_info' LANGUAGE C VOLATILE STRICT; + +CREATE FUNCTION _timescaledb_functions.data_node_compressed_chunk_stats(node_name name, schema_name_in name, table_name_in name) + RETURNS TABLE ( + chunk_schema name, + chunk_name name, + compression_status text, + before_compression_table_bytes bigint, + before_compression_index_bytes bigint, + before_compression_toast_bytes bigint, + before_compression_total_bytes bigint, + after_compression_table_bytes bigint, + after_compression_index_bytes bigint, + after_compression_toast_bytes bigint, + after_compression_total_bytes bigint + ) +AS '@MODULE_PATHNAME@' , 'ts_dist_remote_compressed_chunk_info' LANGUAGE C VOLATILE STRICT; + +CREATE FUNCTION _timescaledb_functions.data_node_index_size(node_name name, schema_name_in name, index_name_in name) +RETURNS TABLE ( hypertable_id INTEGER, total_bytes BIGINT) +AS '@MODULE_PATHNAME@' , 'ts_dist_remote_hypertable_index_info' LANGUAGE C VOLATILE STRICT; + +CREATE FUNCTION timescaledb_experimental.block_new_chunks(data_node_name NAME, hypertable REGCLASS = NULL, force BOOLEAN = FALSE) RETURNS INTEGER +AS '@MODULE_PATHNAME@', 'ts_data_node_block_new_chunks' LANGUAGE C VOLATILE; + +CREATE FUNCTION timescaledb_experimental.allow_new_chunks(data_node_name NAME, hypertable REGCLASS = NULL) RETURNS INTEGER +AS '@MODULE_PATHNAME@', 'ts_data_node_allow_new_chunks' LANGUAGE C VOLATILE; + +CREATE PROCEDURE timescaledb_experimental.move_chunk( + chunk REGCLASS, + source_node NAME = NULL, + destination_node NAME = NULL, + operation_id NAME = NULL) +AS '@MODULE_PATHNAME@', 'ts_move_chunk_proc' LANGUAGE C; + +CREATE PROCEDURE timescaledb_experimental.copy_chunk( + chunk REGCLASS, + source_node NAME = NULL, + destination_node NAME = NULL, + operation_id NAME = NULL) +AS '@MODULE_PATHNAME@', 'ts_copy_chunk_proc' LANGUAGE C; + +CREATE FUNCTION timescaledb_experimental.subscription_exec( + subscription_command TEXT +) RETURNS VOID AS '@MODULE_PATHNAME@', 'ts_subscription_exec' LANGUAGE C VOLATILE; + +CREATE PROCEDURE timescaledb_experimental.cleanup_copy_chunk_operation( + operation_id NAME) +AS '@MODULE_PATHNAME@', 'ts_copy_chunk_cleanup_proc' LANGUAGE C; + +CREATE FUNCTION _timescaledb_functions.set_chunk_default_data_node(chunk REGCLASS, node_name NAME) RETURNS BOOLEAN +AS '@MODULE_PATHNAME@', 'ts_chunk_set_default_data_node' LANGUAGE C VOLATILE; + +CREATE FUNCTION _timescaledb_functions.drop_dist_ht_invalidation_trigger( + raw_hypertable_id INTEGER +) RETURNS VOID AS '@MODULE_PATHNAME@', 'ts_drop_dist_ht_invalidation_trigger' LANGUAGE C STRICT VOLATILE; + +-- restore multinode catalog tables +CREATE TABLE _timescaledb_catalog.remote_txn ( + data_node_name name, --this is really only to allow us to cleanup stuff on a per-node basis. + remote_transaction_id text NOT NULL, + -- table constraints + CONSTRAINT remote_txn_pkey PRIMARY KEY (remote_transaction_id) +); + +ALTER TABLE _timescaledb_catalog.remote_txn ADD CONSTRAINT remote_txn_remote_transaction_id_check CHECK (remote_transaction_id::@extschema@.rxid IS NOT NULL); + +CREATE INDEX remote_txn_data_node_name_idx ON _timescaledb_catalog.remote_txn (data_node_name); + +SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.remote_txn', ''); +GRANT SELECT ON TABLE _timescaledb_catalog.remote_txn TO PUBLIC; + +CREATE TABLE _timescaledb_catalog.hypertable_data_node ( + hypertable_id integer NOT NULL, + node_hypertable_id integer NULL, + node_name name NOT NULL, + block_chunks boolean NOT NULL, + -- table constraints + CONSTRAINT hypertable_data_node_hypertable_id_node_name_key UNIQUE (hypertable_id, node_name), + CONSTRAINT hypertable_data_node_node_hypertable_id_node_name_key UNIQUE (node_hypertable_id, node_name), + CONSTRAINT hypertable_data_node_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable (id) +); + +SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.hypertable_data_node', ''); +GRANT SELECT ON TABLE _timescaledb_catalog.hypertable_data_node TO PUBLIC; + +CREATE TABLE _timescaledb_catalog.chunk_data_node ( + chunk_id integer NOT NULL, + node_chunk_id integer NOT NULL, + node_name name NOT NULL, + -- table constraints + CONSTRAINT chunk_data_node_chunk_id_node_name_key UNIQUE (chunk_id, node_name), + CONSTRAINT chunk_data_node_node_chunk_id_node_name_key UNIQUE (node_chunk_id, node_name), + CONSTRAINT chunk_data_node_chunk_id_fkey FOREIGN KEY (chunk_id) REFERENCES _timescaledb_catalog.chunk (id) +); + +CREATE INDEX chunk_data_node_node_name_idx ON _timescaledb_catalog.chunk_data_node (node_name); +SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.chunk_data_node', ''); +GRANT SELECT ON TABLE _timescaledb_catalog.chunk_data_node TO PUBLIC; + +CREATE SEQUENCE _timescaledb_catalog.chunk_copy_operation_id_seq MINVALUE 1; +GRANT SELECT ON SEQUENCE _timescaledb_catalog.chunk_copy_operation_id_seq TO PUBLIC; + +CREATE TABLE _timescaledb_catalog.chunk_copy_operation ( + operation_id name NOT NULL, -- the publisher/subscriber identifier used + backend_pid integer NOT NULL, -- the pid of the backend running this activity + completed_stage name NOT NULL, -- the completed stage/step + time_start timestamptz NOT NULL DEFAULT NOW(), -- start time of the activity + chunk_id integer NOT NULL, + compress_chunk_name name NOT NULL, + source_node_name name NOT NULL, + dest_node_name name NOT NULL, + delete_on_source_node bool NOT NULL, -- is a move or copy activity + -- table constraints + CONSTRAINT chunk_copy_operation_pkey PRIMARY KEY (operation_id), + CONSTRAINT chunk_copy_operation_chunk_id_fkey FOREIGN KEY (chunk_id) REFERENCES _timescaledb_catalog.chunk (id) ON DELETE CASCADE +); + +GRANT SELECT ON TABLE _timescaledb_catalog.chunk_copy_operation TO PUBLIC; + +CREATE TABLE _timescaledb_catalog.dimension_partition ( + dimension_id integer NOT NULL REFERENCES _timescaledb_catalog.dimension (id) ON DELETE CASCADE, + range_start bigint NOT NULL, + data_nodes name[] NULL, + UNIQUE (dimension_id, range_start) +); + +GRANT SELECT ON TABLE _timescaledb_catalog.dimension_partition TO PUBLIC; + +SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.dimension_partition', ''); +CREATE FUNCTION _timescaledb_functions.hypertable_remote_size( + schema_name_in name, + table_name_in name) +RETURNS TABLE ( + table_bytes bigint, + index_bytes bigint, + toast_bytes bigint, + total_bytes bigint, + node_name NAME) +LANGUAGE SQL VOLATILE STRICT AS +$BODY$ +$BODY$ SET search_path TO pg_catalog, pg_temp; + +CREATE FUNCTION _timescaledb_functions.chunks_remote_size( + schema_name_in name, + table_name_in name) +RETURNS TABLE ( + chunk_id integer, + chunk_schema NAME, + chunk_name NAME, + table_bytes bigint, + index_bytes bigint, + toast_bytes bigint, + total_bytes bigint, + node_name NAME) +LANGUAGE SQL VOLATILE STRICT AS +$BODY$ +$BODY$ SET search_path TO pg_catalog, pg_temp; + +CREATE FUNCTION _timescaledb_functions.indexes_remote_size( + schema_name_in NAME, + table_name_in NAME, + index_name_in NAME +) +RETURNS BIGINT +LANGUAGE SQL VOLATILE STRICT AS +$BODY$ +$BODY$ SET search_path TO pg_catalog, pg_temp; + +CREATE FUNCTION _timescaledb_functions.compressed_chunk_remote_stats(schema_name_in name, table_name_in name) + RETURNS TABLE ( + chunk_schema name, + chunk_name name, + compression_status text, + before_compression_table_bytes bigint, + before_compression_index_bytes bigint, + before_compression_toast_bytes bigint, + before_compression_total_bytes bigint, + after_compression_table_bytes bigint, + after_compression_index_bytes bigint, + after_compression_toast_bytes bigint, + after_compression_total_bytes bigint, + node_name name) + LANGUAGE SQL + STABLE STRICT + AS +$BODY$ +$BODY$ SET search_path TO pg_catalog, pg_temp; + +-- recreate the _timescaledb_catalog.hypertable table as new field was added +-- 1. drop CONSTRAINTS from other tables referencing the existing one +ALTER TABLE _timescaledb_config.bgw_job + DROP CONSTRAINT bgw_job_hypertable_id_fkey; +ALTER TABLE _timescaledb_catalog.chunk + DROP CONSTRAINT chunk_hypertable_id_fkey; +ALTER TABLE _timescaledb_catalog.chunk_index + DROP CONSTRAINT chunk_index_hypertable_id_fkey; +ALTER TABLE _timescaledb_catalog.continuous_agg + DROP CONSTRAINT continuous_agg_mat_hypertable_id_fkey, + DROP CONSTRAINT continuous_agg_raw_hypertable_id_fkey; +ALTER TABLE _timescaledb_catalog.continuous_aggs_bucket_function + DROP CONSTRAINT continuous_aggs_bucket_function_mat_hypertable_id_fkey; +ALTER TABLE _timescaledb_catalog.continuous_aggs_invalidation_threshold + DROP CONSTRAINT continuous_aggs_invalidation_threshold_hypertable_id_fkey; +ALTER TABLE _timescaledb_catalog.dimension + DROP CONSTRAINT dimension_hypertable_id_fkey; +ALTER TABLE _timescaledb_catalog.hypertable + DROP CONSTRAINT hypertable_compressed_hypertable_id_fkey; +ALTER TABLE _timescaledb_catalog.hypertable_data_node + DROP CONSTRAINT hypertable_data_node_hypertable_id_fkey; +ALTER TABLE _timescaledb_catalog.tablespace + DROP CONSTRAINT tablespace_hypertable_id_fkey; + +-- drop dependent views +ALTER EXTENSION timescaledb DROP VIEW timescaledb_information.hypertables; +ALTER EXTENSION timescaledb DROP VIEW timescaledb_information.job_stats; +ALTER EXTENSION timescaledb DROP VIEW timescaledb_information.jobs; +ALTER EXTENSION timescaledb DROP VIEW timescaledb_information.continuous_aggregates; +ALTER EXTENSION timescaledb DROP VIEW timescaledb_information.chunks; +ALTER EXTENSION timescaledb DROP VIEW timescaledb_information.dimensions; +ALTER EXTENSION timescaledb DROP VIEW _timescaledb_internal.hypertable_chunk_local_size; +ALTER EXTENSION timescaledb DROP VIEW _timescaledb_internal.compressed_chunk_stats; +ALTER EXTENSION timescaledb DROP VIEW timescaledb_experimental.policies; + +DROP VIEW timescaledb_information.hypertables; +DROP VIEW timescaledb_information.job_stats; +DROP VIEW timescaledb_information.jobs; +DROP VIEW timescaledb_information.continuous_aggregates; +DROP VIEW timescaledb_information.chunks; +DROP VIEW timescaledb_information.dimensions; +DROP VIEW _timescaledb_internal.hypertable_chunk_local_size; +DROP VIEW _timescaledb_internal.compressed_chunk_stats; +DROP VIEW timescaledb_experimental.policies; + +-- recreate table +CREATE TABLE _timescaledb_catalog.hypertable_tmp AS SELECT * FROM _timescaledb_catalog.hypertable; +CREATE TABLE _timescaledb_catalog.tmp_hypertable_seq_value AS SELECT last_value, is_called FROM _timescaledb_catalog.hypertable_id_seq; + +ALTER EXTENSION timescaledb DROP TABLE _timescaledb_catalog.hypertable; +ALTER EXTENSION timescaledb DROP SEQUENCE _timescaledb_catalog.hypertable_id_seq; + +SET timescaledb.restoring = on; -- must disable the hooks otherwise we can't do anything without the table _timescaledb_catalog.hypertable + +DROP TABLE _timescaledb_catalog.hypertable; + +CREATE SEQUENCE _timescaledb_catalog.hypertable_id_seq MINVALUE 1; +SELECT setval('_timescaledb_catalog.hypertable_id_seq', last_value, is_called) FROM _timescaledb_catalog.tmp_hypertable_seq_value; +DROP TABLE _timescaledb_catalog.tmp_hypertable_seq_value; + +CREATE TABLE _timescaledb_catalog.hypertable ( + id INTEGER PRIMARY KEY NOT NULL DEFAULT nextval('_timescaledb_catalog.hypertable_id_seq'), + schema_name name NOT NULL, + table_name name NOT NULL, + associated_schema_name name NOT NULL, + associated_table_prefix name NOT NULL, + num_dimensions smallint NOT NULL, + chunk_sizing_func_schema name NOT NULL, + chunk_sizing_func_name name NOT NULL, + chunk_target_size bigint NOT NULL, -- size in bytes + compression_state smallint NOT NULL DEFAULT 0, + compressed_hypertable_id integer, + replication_factor smallint NULL, + status int NOT NULL DEFAULT 0 +); + +SET timescaledb.restoring = off; + +INSERT INTO _timescaledb_catalog.hypertable ( + id, + schema_name, + table_name, + associated_schema_name, + associated_table_prefix, + num_dimensions, + chunk_sizing_func_schema, + chunk_sizing_func_name, + chunk_target_size, + compression_state, + compressed_hypertable_id +) +SELECT + id, + schema_name, + table_name, + associated_schema_name, + associated_table_prefix, + num_dimensions, + chunk_sizing_func_schema, + chunk_sizing_func_name, + chunk_target_size, + compression_state, + compressed_hypertable_id +FROM + _timescaledb_catalog.hypertable_tmp +ORDER BY id; + +ALTER SEQUENCE _timescaledb_catalog.hypertable_id_seq OWNED BY _timescaledb_catalog.hypertable.id; +SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.hypertable_id_seq', ''); + +GRANT SELECT ON _timescaledb_catalog.hypertable TO PUBLIC; +GRANT SELECT ON _timescaledb_catalog.hypertable_id_seq TO PUBLIC; + +DROP TABLE _timescaledb_catalog.hypertable_tmp; +-- now add any constraints +ALTER TABLE _timescaledb_catalog.hypertable + -- ADD CONSTRAINT hypertable_pkey PRIMARY KEY (id), + ADD CONSTRAINT hypertable_associated_schema_name_associated_table_prefix_key UNIQUE (associated_schema_name, associated_table_prefix), + ADD CONSTRAINT hypertable_table_name_schema_name_key UNIQUE (table_name, schema_name), + ADD CONSTRAINT hypertable_schema_name_check CHECK (schema_name != '_timescaledb_catalog'), + -- internal compressed hypertables have compression state = 2 + ADD CONSTRAINT hypertable_dim_compress_check CHECK (num_dimensions > 0 OR compression_state = 2), + ADD CONSTRAINT hypertable_chunk_target_size_check CHECK (chunk_target_size >= 0), + ADD CONSTRAINT hypertable_compress_check CHECK ( (compression_state = 0 OR compression_state = 1 ) OR (compression_state = 2 AND compressed_hypertable_id IS NULL)), + ADD CONSTRAINT hypertable_replication_factor_check CHECK (replication_factor > 0 OR replication_factor = -1), + ADD CONSTRAINT hypertable_compressed_hypertable_id_fkey FOREIGN KEY (compressed_hypertable_id) REFERENCES _timescaledb_catalog.hypertable (id); + +GRANT SELECT ON TABLE _timescaledb_catalog.hypertable TO PUBLIC; + +-- 3. reestablish constraints on other tables +ALTER TABLE _timescaledb_config.bgw_job + ADD CONSTRAINT bgw_job_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE; +ALTER TABLE _timescaledb_catalog.chunk + ADD CONSTRAINT chunk_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id); +ALTER TABLE _timescaledb_catalog.chunk_index + ADD CONSTRAINT chunk_index_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE; +ALTER TABLE _timescaledb_catalog.continuous_agg + ADD CONSTRAINT continuous_agg_mat_hypertable_id_fkey FOREIGN KEY (mat_hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE, + ADD CONSTRAINT continuous_agg_raw_hypertable_id_fkey FOREIGN KEY (raw_hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE; +ALTER TABLE _timescaledb_catalog.continuous_aggs_bucket_function + ADD CONSTRAINT continuous_aggs_bucket_function_mat_hypertable_id_fkey FOREIGN KEY (mat_hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE; +ALTER TABLE _timescaledb_catalog.continuous_aggs_invalidation_threshold + ADD CONSTRAINT continuous_aggs_invalidation_threshold_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE; +ALTER TABLE _timescaledb_catalog.dimension + ADD CONSTRAINT dimension_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE; +ALTER TABLE _timescaledb_catalog.hypertable_compression + ADD CONSTRAINT hypertable_compression_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE; +ALTER TABLE _timescaledb_catalog.hypertable_data_node + ADD CONSTRAINT hypertable_data_node_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id); +ALTER TABLE _timescaledb_catalog.tablespace + ADD CONSTRAINT tablespace_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE; + +DROP FUNCTION IF EXISTS _timescaledb_debug.extension_state; +DROP SCHEMA IF EXISTS _timescaledb_debug; + +DROP FUNCTION IF EXISTS _timescaledb_internal.hypertable_constraint_add_table_fk_constraint; + +DROP FUNCTION _timescaledb_functions.constraint_clone; + +CREATE FUNCTION _timescaledb_functions.hypertable_constraint_add_table_fk_constraint(user_ht_constraint_name name,user_ht_schema_name name,user_ht_table_name name,compress_ht_id integer) RETURNS void LANGUAGE PLPGSQL AS $$BEGIN END$$ SET search_path TO pg_catalog,pg_temp; + +CREATE FUNCTION _timescaledb_functions.chunks_in(record RECORD, chunks INTEGER[]) RETURNS BOOL +AS 'BEGIN END' LANGUAGE PLPGSQL SET search_path TO pg_catalog,pg_temp; + +SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.metadata', $$ + WHERE KEY = 'exported_uuid' $$); + +DROP TRIGGER metadata_insert_trigger ON _timescaledb_catalog.metadata; +DROP FUNCTION _timescaledb_functions.metadata_insert_trigger(); + +DROP FUNCTION IF EXISTS _timescaledb_functions.get_orderby_defaults(regclass,text[]); +DROP FUNCTION IF EXISTS _timescaledb_functions.get_segmentby_defaults(regclass); + +--- re-include in the pg_dump config +SELECT pg_catalog.pg_extension_config_dump('_timescaledb_cache.cache_inval_hypertable', ''); +SELECT pg_catalog.pg_extension_config_dump('_timescaledb_cache.cache_inval_extension', ''); +SELECT pg_catalog.pg_extension_config_dump('_timescaledb_cache.cache_inval_bgw_job', ''); +SELECT pg_catalog.pg_extension_config_dump('_timescaledb_internal.job_errors', ''); + +-- Remove unwanted entry from extconfig and extcondition in pg_extension +ALTER EXTENSION timescaledb DROP TABLE _timescaledb_catalog.hypertable; +-- Associate the above table back to keep the dependencies safe +ALTER EXTENSION timescaledb ADD TABLE _timescaledb_catalog.hypertable; +-- include this now in the config +SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.hypertable', ''); +DROP FUNCTION IF EXISTS _timescaledb_functions.relation_approximate_size(relation REGCLASS); +DROP FUNCTION IF EXISTS @extschema@.hypertable_approximate_detailed_size(relation REGCLASS); +DROP FUNCTION IF EXISTS @extschema@.hypertable_approximate_size(hypertable REGCLASS); + +DROP FUNCTION IF EXISTS @extschema@.compress_chunk; +CREATE FUNCTION @extschema@.compress_chunk(uncompressed_chunk REGCLASS, if_not_compressed BOOLEAN = true) RETURNS REGCLASS AS '' LANGUAGE SQL SET search_path TO pg_catalog,pg_temp; + diff --git a/sql/updates/latest-dev.sql b/sql/updates/latest-dev.sql index 5f4c8c4fb79..e69de29bb2d 100644 --- a/sql/updates/latest-dev.sql +++ b/sql/updates/latest-dev.sql @@ -1,451 +0,0 @@ - --- ERROR if trying to update the extension while multinode is present -DO $$ -DECLARE - data_nodes TEXT; - dist_hypertables TEXT; -BEGIN - SELECT string_agg(format('%I.%I', schema_name, table_name), ', ') - INTO dist_hypertables - FROM _timescaledb_catalog.hypertable - WHERE replication_factor > 0; - - IF dist_hypertables IS NOT NULL THEN - RAISE USING - ERRCODE = 'feature_not_supported', - MESSAGE = 'cannot upgrade because multi-node has been removed in 2.14.0', - DETAIL = 'The following distributed hypertables should be migrated to regular: '||dist_hypertables; - END IF; - - SELECT string_agg(format('%I', srv.srvname), ', ') - INTO data_nodes - FROM pg_foreign_server srv - JOIN pg_foreign_data_wrapper fdw ON srv.srvfdw = fdw.oid AND fdw.fdwname = 'timescaledb_fdw'; - - IF data_nodes IS NOT NULL THEN - RAISE USING - ERRCODE = 'feature_not_supported', - MESSAGE = 'cannot upgrade because multi-node has been removed in 2.14.0', - DETAIL = 'The following data nodes should be removed: '||data_nodes; - END IF; - - IF EXISTS(SELECT FROM _timescaledb_catalog.metadata WHERE key = 'dist_uuid') THEN - RAISE USING - ERRCODE = 'feature_not_supported', - MESSAGE = 'cannot upgrade because multi-node has been removed in 2.14.0', - DETAIL = 'This node appears to be part of a multi-node installation'; - END IF; -END $$; - -DROP FUNCTION IF EXISTS _timescaledb_functions.ping_data_node; -DROP FUNCTION IF EXISTS _timescaledb_internal.ping_data_node; -DROP FUNCTION IF EXISTS _timescaledb_functions.remote_txn_heal_data_node; -DROP FUNCTION IF EXISTS _timescaledb_internal.remote_txn_heal_data_node; - -DROP FUNCTION IF EXISTS _timescaledb_functions.set_dist_id; -DROP FUNCTION IF EXISTS _timescaledb_internal.set_dist_id; -DROP FUNCTION IF EXISTS _timescaledb_functions.set_peer_dist_id; -DROP FUNCTION IF EXISTS _timescaledb_internal.set_peer_dist_id; -DROP FUNCTION IF EXISTS _timescaledb_functions.validate_as_data_node; -DROP FUNCTION IF EXISTS _timescaledb_internal.validate_as_data_node; -DROP FUNCTION IF EXISTS _timescaledb_functions.show_connection_cache; -DROP FUNCTION IF EXISTS _timescaledb_internal.show_connection_cache; - -DROP FUNCTION IF EXISTS @extschema@.create_hypertable(relation REGCLASS, time_column_name NAME, partitioning_column NAME, number_partitions INTEGER, associated_schema_name NAME, associated_table_prefix NAME, chunk_time_interval ANYELEMENT, create_default_indexes BOOLEAN, if_not_exists BOOLEAN, partitioning_func REGPROC, migrate_data BOOLEAN, chunk_target_size TEXT, chunk_sizing_func REGPROC, time_partitioning_func REGPROC, replication_factor INTEGER, data_nodes NAME[], distributed BOOLEAN); - -CREATE FUNCTION @extschema@.create_hypertable( - relation REGCLASS, - time_column_name NAME, - partitioning_column NAME = NULL, - number_partitions INTEGER = NULL, - associated_schema_name NAME = NULL, - associated_table_prefix NAME = NULL, - chunk_time_interval ANYELEMENT = NULL::bigint, - create_default_indexes BOOLEAN = TRUE, - if_not_exists BOOLEAN = FALSE, - partitioning_func REGPROC = NULL, - migrate_data BOOLEAN = FALSE, - chunk_target_size TEXT = NULL, - chunk_sizing_func REGPROC = '_timescaledb_functions.calculate_chunk_interval'::regproc, - time_partitioning_func REGPROC = NULL -) RETURNS TABLE(hypertable_id INT, schema_name NAME, table_name NAME, created BOOL) AS '@MODULE_PATHNAME@', 'ts_hypertable_create' LANGUAGE C VOLATILE; - -DROP FUNCTION IF EXISTS @extschema@.create_distributed_hypertable; - -DROP FUNCTION IF EXISTS @extschema@.add_data_node; -DROP FUNCTION IF EXISTS @extschema@.delete_data_node; -DROP FUNCTION IF EXISTS @extschema@.attach_data_node; -DROP FUNCTION IF EXISTS @extschema@.detach_data_node; -DROP FUNCTION IF EXISTS @extschema@.alter_data_node; - -DROP PROCEDURE IF EXISTS @extschema@.distributed_exec; -DROP FUNCTION IF EXISTS @extschema@.create_distributed_restore_point; -DROP FUNCTION IF EXISTS @extschema@.set_replication_factor; - -CREATE TABLE _timescaledb_catalog.compression_settings ( - relid regclass NOT NULL, - segmentby text[], - orderby text[], - orderby_desc bool[], - orderby_nullsfirst bool[], - CONSTRAINT compression_settings_pkey PRIMARY KEY (relid), - CONSTRAINT compression_settings_check_segmentby CHECK (array_ndims(segmentby) = 1), - CONSTRAINT compression_settings_check_orderby_null CHECK ( (orderby IS NULL AND orderby_desc IS NULL AND orderby_nullsfirst IS NULL) OR (orderby IS NOT NULL AND orderby_desc IS NOT NULL AND orderby_nullsfirst IS NOT NULL) ), - CONSTRAINT compression_settings_check_orderby_cardinality CHECK (array_ndims(orderby) = 1 AND array_ndims(orderby_desc) = 1 AND array_ndims(orderby_nullsfirst) = 1 AND cardinality(orderby) = cardinality(orderby_desc) AND cardinality(orderby) = cardinality(orderby_nullsfirst)) -); - -INSERT INTO _timescaledb_catalog.compression_settings(relid, segmentby, orderby, orderby_desc, orderby_nullsfirst) - SELECT - format('%I.%I', ht.schema_name, ht.table_name)::regclass, - array_agg(attname ORDER BY segmentby_column_index) FILTER(WHERE segmentby_column_index >= 1) AS compress_segmentby, - array_agg(attname ORDER BY orderby_column_index) FILTER(WHERE orderby_column_index >= 1) AS compress_orderby, - array_agg(NOT orderby_asc ORDER BY orderby_column_index) FILTER(WHERE orderby_column_index >= 1) AS compress_orderby_desc, - array_agg(orderby_nullsfirst ORDER BY orderby_column_index) FILTER(WHERE orderby_column_index >= 1) AS compress_orderby_nullsfirst - FROM _timescaledb_catalog.hypertable_compression hc - INNER JOIN _timescaledb_catalog.hypertable ht ON ht.id = hc.hypertable_id - GROUP BY hypertable_id, ht.schema_name, ht.table_name; - -GRANT SELECT ON _timescaledb_catalog.compression_settings TO PUBLIC; -SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.compression_settings', ''); - -ALTER EXTENSION timescaledb DROP TABLE _timescaledb_catalog.hypertable_compression; -DROP VIEW IF EXISTS timescaledb_information.compression_settings; -DROP TABLE _timescaledb_catalog.hypertable_compression; - -DROP FOREIGN DATA WRAPPER IF EXISTS timescaledb_fdw; -DROP FUNCTION IF EXISTS @extschema@.timescaledb_fdw_handler(); -DROP FUNCTION IF EXISTS @extschema@.timescaledb_fdw_validator(text[], oid); - - -DROP FUNCTION IF EXISTS _timescaledb_functions.create_chunk_replica_table; -DROP FUNCTION IF EXISTS _timescaledb_functions.chunk_drop_replica; -DROP PROCEDURE IF EXISTS _timescaledb_functions.wait_subscription_sync; -DROP FUNCTION IF EXISTS _timescaledb_functions.health; -DROP FUNCTION IF EXISTS _timescaledb_functions.drop_stale_chunks; - -DROP FUNCTION IF EXISTS _timescaledb_internal.create_chunk_replica_table; -DROP FUNCTION IF EXISTS _timescaledb_internal.chunk_drop_replica; -DROP PROCEDURE IF EXISTS _timescaledb_internal.wait_subscription_sync; -DROP FUNCTION IF EXISTS _timescaledb_internal.health; -DROP FUNCTION IF EXISTS _timescaledb_internal.drop_stale_chunks; - -ALTER TABLE _timescaledb_catalog.remote_txn DROP CONSTRAINT remote_txn_remote_transaction_id_check; - -DROP TYPE IF EXISTS @extschema@.rxid CASCADE; -DROP FUNCTION IF EXISTS _timescaledb_functions.rxid_in; -DROP FUNCTION IF EXISTS _timescaledb_functions.rxid_out; - -DROP FUNCTION IF EXISTS _timescaledb_functions.data_node_hypertable_info; -DROP FUNCTION IF EXISTS _timescaledb_functions.data_node_chunk_info; -DROP FUNCTION IF EXISTS _timescaledb_functions.data_node_compressed_chunk_stats; -DROP FUNCTION IF EXISTS _timescaledb_functions.data_node_index_size; -DROP FUNCTION IF EXISTS _timescaledb_internal.data_node_hypertable_info; -DROP FUNCTION IF EXISTS _timescaledb_internal.data_node_chunk_info; -DROP FUNCTION IF EXISTS _timescaledb_internal.data_node_compressed_chunk_stats; -DROP FUNCTION IF EXISTS _timescaledb_internal.data_node_index_size; - -DROP FUNCTION IF EXISTS timescaledb_experimental.block_new_chunks; -DROP FUNCTION IF EXISTS timescaledb_experimental.allow_new_chunks; -DROP FUNCTION IF EXISTS timescaledb_experimental.subscription_exec; -DROP PROCEDURE IF EXISTS timescaledb_experimental.move_chunk; -DROP PROCEDURE IF EXISTS timescaledb_experimental.copy_chunk; -DROP PROCEDURE IF EXISTS timescaledb_experimental.cleanup_copy_chunk_operation; - -DROP FUNCTION IF EXISTS _timescaledb_functions.set_chunk_default_data_node; -DROP FUNCTION IF EXISTS _timescaledb_internal.set_chunk_default_data_node; - -DROP FUNCTION IF EXISTS _timescaledb_functions.drop_dist_ht_invalidation_trigger; -DROP FUNCTION IF EXISTS _timescaledb_internal.drop_dist_ht_invalidation_trigger; - --- remove multinode catalog tables -DROP VIEW IF EXISTS timescaledb_information.chunks; -DROP VIEW IF EXISTS timescaledb_information.data_nodes; -DROP VIEW IF EXISTS timescaledb_information.hypertables; -DROP VIEW IF EXISTS timescaledb_experimental.chunk_replication_status; - -ALTER EXTENSION timescaledb DROP TABLE _timescaledb_catalog.remote_txn; -DROP TABLE _timescaledb_catalog.remote_txn; -ALTER EXTENSION timescaledb DROP TABLE _timescaledb_catalog.hypertable_data_node; -DROP TABLE _timescaledb_catalog.hypertable_data_node; -ALTER EXTENSION timescaledb DROP TABLE _timescaledb_catalog.chunk_data_node; -DROP TABLE _timescaledb_catalog.chunk_data_node; -ALTER EXTENSION timescaledb DROP TABLE _timescaledb_catalog.chunk_copy_operation; -DROP TABLE _timescaledb_catalog.chunk_copy_operation; -ALTER EXTENSION timescaledb DROP SEQUENCE _timescaledb_catalog.chunk_copy_operation_id_seq; -DROP SEQUENCE _timescaledb_catalog.chunk_copy_operation_id_seq; -ALTER EXTENSION timescaledb DROP TABLE _timescaledb_catalog.dimension_partition; -DROP TABLE _timescaledb_catalog.dimension_partition; - -DROP FUNCTION IF EXISTS _timescaledb_functions.hypertable_remote_size; -DROP FUNCTION IF EXISTS _timescaledb_internal.hypertable_remote_size; -DROP FUNCTION IF EXISTS _timescaledb_functions.chunks_remote_size; -DROP FUNCTION IF EXISTS _timescaledb_internal.chunks_remote_size; -DROP FUNCTION IF EXISTS _timescaledb_functions.indexes_remote_size; -DROP FUNCTION IF EXISTS _timescaledb_internal.indexes_remote_size; -DROP FUNCTION IF EXISTS _timescaledb_functions.compressed_chunk_remote_stats; -DROP FUNCTION IF EXISTS _timescaledb_internal.compressed_chunk_remote_stats; - --- rebuild _timescaledb_catalog.hypertable -ALTER TABLE _timescaledb_config.bgw_job - DROP CONSTRAINT bgw_job_hypertable_id_fkey; -ALTER TABLE _timescaledb_catalog.chunk - DROP CONSTRAINT chunk_hypertable_id_fkey; -ALTER TABLE _timescaledb_catalog.chunk_index - DROP CONSTRAINT chunk_index_hypertable_id_fkey; -ALTER TABLE _timescaledb_catalog.continuous_agg - DROP CONSTRAINT continuous_agg_mat_hypertable_id_fkey, - DROP CONSTRAINT continuous_agg_raw_hypertable_id_fkey; -ALTER TABLE _timescaledb_catalog.continuous_aggs_bucket_function - DROP CONSTRAINT continuous_aggs_bucket_function_mat_hypertable_id_fkey; -ALTER TABLE _timescaledb_catalog.continuous_aggs_invalidation_threshold - DROP CONSTRAINT continuous_aggs_invalidation_threshold_hypertable_id_fkey; -ALTER TABLE _timescaledb_catalog.dimension - DROP CONSTRAINT dimension_hypertable_id_fkey; -ALTER TABLE _timescaledb_catalog.hypertable - DROP CONSTRAINT hypertable_compressed_hypertable_id_fkey; -ALTER TABLE _timescaledb_catalog.tablespace - DROP CONSTRAINT tablespace_hypertable_id_fkey; - -DROP VIEW IF EXISTS timescaledb_information.hypertables; -DROP VIEW IF EXISTS timescaledb_information.job_stats; -DROP VIEW IF EXISTS timescaledb_information.jobs; -DROP VIEW IF EXISTS timescaledb_information.continuous_aggregates; -DROP VIEW IF EXISTS timescaledb_information.chunks; -DROP VIEW IF EXISTS timescaledb_information.dimensions; -DROP VIEW IF EXISTS timescaledb_information.compression_settings; -DROP VIEW IF EXISTS _timescaledb_internal.hypertable_chunk_local_size; -DROP VIEW IF EXISTS _timescaledb_internal.compressed_chunk_stats; -DROP VIEW IF EXISTS timescaledb_experimental.chunk_replication_status; -DROP VIEW IF EXISTS timescaledb_experimental.policies; - --- recreate table -CREATE TABLE _timescaledb_catalog.hypertable_tmp AS SELECT * FROM _timescaledb_catalog.hypertable; -CREATE TABLE _timescaledb_catalog.tmp_hypertable_seq_value AS SELECT last_value, is_called FROM _timescaledb_catalog.hypertable_id_seq; - -ALTER EXTENSION timescaledb DROP TABLE _timescaledb_catalog.hypertable; -ALTER EXTENSION timescaledb DROP SEQUENCE _timescaledb_catalog.hypertable_id_seq; - -SET timescaledb.restoring = on; -- must disable the hooks otherwise we can't do anything without the table _timescaledb_catalog.hypertable - -DROP TABLE _timescaledb_catalog.hypertable; - -CREATE SEQUENCE _timescaledb_catalog.hypertable_id_seq MINVALUE 1; -SELECT setval('_timescaledb_catalog.hypertable_id_seq', last_value, is_called) FROM _timescaledb_catalog.tmp_hypertable_seq_value; -DROP TABLE _timescaledb_catalog.tmp_hypertable_seq_value; - -CREATE TABLE _timescaledb_catalog.hypertable ( - id INTEGER PRIMARY KEY NOT NULL DEFAULT nextval('_timescaledb_catalog.hypertable_id_seq'), - schema_name name NOT NULL, - table_name name NOT NULL, - associated_schema_name name NOT NULL, - associated_table_prefix name NOT NULL, - num_dimensions smallint NOT NULL, - chunk_sizing_func_schema name NOT NULL, - chunk_sizing_func_name name NOT NULL, - chunk_target_size bigint NOT NULL, -- size in bytes - compression_state smallint NOT NULL DEFAULT 0, - compressed_hypertable_id integer, - status integer NOT NULL DEFAULT 0 -); - -SET timescaledb.restoring = off; - -INSERT INTO _timescaledb_catalog.hypertable ( - id, - schema_name, - table_name, - associated_schema_name, - associated_table_prefix, - num_dimensions, - chunk_sizing_func_schema, - chunk_sizing_func_name, - chunk_target_size, - compression_state, - compressed_hypertable_id -) -SELECT - id, - schema_name, - table_name, - associated_schema_name, - associated_table_prefix, - num_dimensions, - chunk_sizing_func_schema, - chunk_sizing_func_name, - chunk_target_size, - compression_state, - compressed_hypertable_id -FROM - _timescaledb_catalog.hypertable_tmp -ORDER BY id; - -UPDATE _timescaledb_catalog.hypertable h -SET status = 3 -WHERE EXISTS ( - SELECT FROM _timescaledb_catalog.chunk c WHERE c.osm_chunk AND c.hypertable_id = h.id -); - -ALTER SEQUENCE _timescaledb_catalog.hypertable_id_seq OWNED BY _timescaledb_catalog.hypertable.id; -SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.hypertable_id_seq', ''); - -GRANT SELECT ON _timescaledb_catalog.hypertable TO PUBLIC; -GRANT SELECT ON _timescaledb_catalog.hypertable_id_seq TO PUBLIC; - -DROP TABLE _timescaledb_catalog.hypertable_tmp; --- now add any constraints -ALTER TABLE _timescaledb_catalog.hypertable - ADD CONSTRAINT hypertable_associated_schema_name_associated_table_prefix_key UNIQUE (associated_schema_name, associated_table_prefix), - ADD CONSTRAINT hypertable_table_name_schema_name_key UNIQUE (table_name, schema_name), - ADD CONSTRAINT hypertable_schema_name_check CHECK (schema_name != '_timescaledb_catalog'), - ADD CONSTRAINT hypertable_dim_compress_check CHECK (num_dimensions > 0 OR compression_state = 2), - ADD CONSTRAINT hypertable_chunk_target_size_check CHECK (chunk_target_size >= 0), - ADD CONSTRAINT hypertable_compress_check CHECK ( (compression_state = 0 OR compression_state = 1 ) OR (compression_state = 2 AND compressed_hypertable_id IS NULL)), - ADD CONSTRAINT hypertable_compressed_hypertable_id_fkey FOREIGN KEY (compressed_hypertable_id) REFERENCES _timescaledb_catalog.hypertable (id); - -GRANT SELECT ON TABLE _timescaledb_catalog.hypertable TO PUBLIC; - --- 3. reestablish constraints on other tables -ALTER TABLE _timescaledb_config.bgw_job - ADD CONSTRAINT bgw_job_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE; -ALTER TABLE _timescaledb_catalog.chunk - ADD CONSTRAINT chunk_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id); -ALTER TABLE _timescaledb_catalog.chunk_index - ADD CONSTRAINT chunk_index_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE; -ALTER TABLE _timescaledb_catalog.continuous_agg - ADD CONSTRAINT continuous_agg_mat_hypertable_id_fkey FOREIGN KEY (mat_hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE, - ADD CONSTRAINT continuous_agg_raw_hypertable_id_fkey FOREIGN KEY (raw_hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE; -ALTER TABLE _timescaledb_catalog.continuous_aggs_bucket_function - ADD CONSTRAINT continuous_aggs_bucket_function_mat_hypertable_id_fkey FOREIGN KEY (mat_hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE; -ALTER TABLE _timescaledb_catalog.continuous_aggs_invalidation_threshold - ADD CONSTRAINT continuous_aggs_invalidation_threshold_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE; -ALTER TABLE _timescaledb_catalog.dimension - ADD CONSTRAINT dimension_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE; -ALTER TABLE _timescaledb_catalog.tablespace - ADD CONSTRAINT tablespace_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE; - -CREATE SCHEMA _timescaledb_debug; - --- Migrate existing compressed hypertables to new internal format -DO $$ -DECLARE - chunk regclass; - hypertable regclass; - ht_id integer; - index regclass; - column_name name; - cmd text; -BEGIN - SET timescaledb.restoring TO ON; - - -- Detach compressed chunks from their parent hypertables - FOR chunk, hypertable, ht_id IN - SELECT - format('%I.%I',ch.schema_name,ch.table_name)::regclass chunk, - format('%I.%I',ht.schema_name,ht.table_name)::regclass hypertable, - ht.id - FROM _timescaledb_catalog.chunk ch - INNER JOIN _timescaledb_catalog.hypertable ht_uncomp - ON ch.hypertable_id = ht_uncomp.compressed_hypertable_id - INNER JOIN _timescaledb_catalog.hypertable ht - ON ht.id = ht_uncomp.compressed_hypertable_id - LOOP - - cmd := format('ALTER TABLE %s NO INHERIT %s', chunk, hypertable); - EXECUTE cmd; - -- remove references to indexes from the compressed hypertable - DELETE FROM _timescaledb_catalog.chunk_index WHERE hypertable_id = ht_id; - - END LOOP; - - - FOR hypertable IN - SELECT - format('%I.%I',ht.schema_name,ht.table_name)::regclass hypertable - FROM _timescaledb_catalog.hypertable ht_uncomp - INNER JOIN _timescaledb_catalog.hypertable ht - ON ht.id = ht_uncomp.compressed_hypertable_id - LOOP - - -- remove indexes from the compressed hypertable (but not chunks) - FOR index IN - SELECT indexrelid::regclass FROM pg_index WHERE indrelid = hypertable - LOOP - cmd := format('DROP INDEX %s', index); - EXECUTE cmd; - END LOOP; - - -- remove columns from the compressed hypertable (but not chunks) - FOR column_name IN - SELECT attname FROM pg_attribute WHERE attrelid = hypertable AND attnum > 0 - LOOP - cmd := format('ALTER TABLE %s DROP COLUMN %I', hypertable, column_name); - EXECUTE cmd; - END LOOP; - - END LOOP; - - SET timescaledb.restoring TO OFF; -END $$; - -DROP FUNCTION IF EXISTS _timescaledb_internal.hypertable_constraint_add_table_fk_constraint; -DROP FUNCTION IF EXISTS _timescaledb_functions.hypertable_constraint_add_table_fk_constraint; - --- only define stub here, actual code will be filled in at end of update script -CREATE FUNCTION _timescaledb_functions.constraint_clone(constraint_oid OID,target_oid REGCLASS) RETURNS VOID LANGUAGE PLPGSQL AS $$BEGIN END$$ SET search_path TO pg_catalog, pg_temp; - -DROP FUNCTION IF EXISTS _timescaledb_functions.chunks_in; -DROP FUNCTION IF EXISTS _timescaledb_internal.chunks_in; - -CREATE FUNCTION _timescaledb_functions.metadata_insert_trigger() RETURNS TRIGGER LANGUAGE PLPGSQL -AS $$ -BEGIN - IF EXISTS (SELECT FROM _timescaledb_catalog.metadata WHERE key = NEW.key) THEN - UPDATE _timescaledb_catalog.metadata SET value = NEW.value WHERE key = NEW.key; - RETURN NULL; - END IF; - RETURN NEW; -END -$$ SET search_path TO pg_catalog, pg_temp; - -CREATE TRIGGER metadata_insert_trigger BEFORE INSERT ON _timescaledb_catalog.metadata FOR EACH ROW EXECUTE PROCEDURE _timescaledb_functions.metadata_insert_trigger(); - -SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.metadata', $$ WHERE key <> 'uuid' $$); - --- Remove unwanted entries from extconfig and extcondition in pg_extension --- We use ALTER EXTENSION DROP TABLE to remove these entries. -ALTER EXTENSION timescaledb DROP TABLE _timescaledb_cache.cache_inval_hypertable; -ALTER EXTENSION timescaledb DROP TABLE _timescaledb_cache.cache_inval_extension; -ALTER EXTENSION timescaledb DROP TABLE _timescaledb_cache.cache_inval_bgw_job; -ALTER EXTENSION timescaledb DROP TABLE _timescaledb_internal.job_errors; - --- Associate the above tables back to keep the dependencies safe -ALTER EXTENSION timescaledb ADD TABLE _timescaledb_cache.cache_inval_hypertable; -ALTER EXTENSION timescaledb ADD TABLE _timescaledb_cache.cache_inval_extension; -ALTER EXTENSION timescaledb ADD TABLE _timescaledb_cache.cache_inval_bgw_job; -ALTER EXTENSION timescaledb ADD TABLE _timescaledb_internal.job_errors; - -ALTER EXTENSION timescaledb DROP TABLE _timescaledb_catalog.hypertable; -ALTER EXTENSION timescaledb ADD TABLE _timescaledb_catalog.hypertable; -SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.hypertable', 'WHERE id >= 1'); - -CREATE FUNCTION _timescaledb_functions.relation_approximate_size(relation REGCLASS) -RETURNS TABLE (total_size BIGINT, heap_size BIGINT, index_size BIGINT, toast_size BIGINT) -AS '@MODULE_PATHNAME@', 'ts_relation_approximate_size' LANGUAGE C STRICT VOLATILE; - -CREATE FUNCTION @extschema@.hypertable_approximate_detailed_size(relation REGCLASS) -RETURNS TABLE (table_bytes BIGINT, index_bytes BIGINT, toast_bytes BIGINT, total_bytes BIGINT) -AS '@MODULE_PATHNAME@', 'ts_hypertable_approximate_size' LANGUAGE C VOLATILE; - ---- returns approximate total-bytes for a hypertable (includes table + index) -CREATE FUNCTION @extschema@.hypertable_approximate_size( - hypertable REGCLASS) -RETURNS BIGINT -LANGUAGE SQL VOLATILE STRICT AS -$BODY$ - SELECT sum(total_bytes)::bigint - FROM @extschema@.hypertable_approximate_detailed_size(hypertable); -$BODY$ SET search_path TO pg_catalog, pg_temp; - -DROP FUNCTION IF EXISTS @extschema@.compress_chunk; -CREATE FUNCTION @extschema@.compress_chunk(uncompressed_chunk REGCLASS, if_not_compressed BOOLEAN = true, recompress BOOLEAN = false) RETURNS REGCLASS AS '' LANGUAGE SQL SET search_path TO pg_catalog, pg_temp; - diff --git a/sql/updates/reverse-dev.sql b/sql/updates/reverse-dev.sql index a3a6600ede3..e69de29bb2d 100644 --- a/sql/updates/reverse-dev.sql +++ b/sql/updates/reverse-dev.sql @@ -1,797 +0,0 @@ --- check whether we can safely downgrade compression setup -DO $$ -DECLARE - hypertable regclass; - ht_uncomp regclass; - chunk_relids oid[]; - ht_id integer; -BEGIN - - FOR hypertable, ht_uncomp, ht_id IN - SELECT - format('%I.%I',ht.schema_name,ht.table_name)::regclass, - format('%I.%I',ht_uncomp.schema_name,ht_uncomp.table_name)::regclass, - ht.id - FROM _timescaledb_catalog.hypertable ht_uncomp - INNER JOIN _timescaledb_catalog.hypertable ht - ON ht.id = ht_uncomp.compressed_hypertable_id - LOOP - - -- hypertables need to at least have 1 compressed chunk so we can restore the columns - IF NOT EXISTS(SELECT FROM _timescaledb_catalog.chunk WHERE hypertable_id = ht_id) THEN - RAISE USING - ERRCODE = 'feature_not_supported', - MESSAGE = 'Cannot downgrade compressed hypertables with no compressed chunks. Disable compression on the affected hypertable before downgrading.', - DETAIL = 'The following hypertable is affected: '|| ht_uncomp::text; - END IF; - - chunk_relids := array(SELECT format('%I.%I',schema_name,table_name)::regclass FROM _timescaledb_catalog.chunk WHERE hypertable_id = ht_id); - - -- any hypertable with distinct compression settings cannot be downgraded - IF EXISTS ( - SELECT FROM ( - SELECT DISTINCT segmentby, orderby, orderby_desc, orderby_nullsfirst - FROM _timescaledb_catalog.compression_settings - WHERE relid = hypertable OR relid = ANY(chunk_relids) - ) dist_settings HAVING count(*) > 1 - ) THEN - RAISE USING - ERRCODE = 'feature_not_supported', - MESSAGE = 'Cannot downgrade hypertables with distinct compression settings. Decompress the affected hypertable before downgrading.', - DETAIL = 'The following hypertable is affected: '|| ht_uncomp::text; - END IF; - - END LOOP; -END -$$; - -CREATE FUNCTION _timescaledb_functions.tmp_resolve_indkeys(oid,int2[]) RETURNS text[] LANGUAGE SQL AS $$ - SELECT array_agg(attname) - FROM ( - SELECT attname - FROM (SELECT unnest($2) attnum) indkeys - JOIN LATERAL ( - SELECT attname FROM pg_attribute att WHERE att.attnum=indkeys.attnum AND att.attrelid=$1 - ) r ON true - ) resolve; -$$ SET search_path TO pg_catalog, pg_temp; - -DO $$ -DECLARE - chunk regclass; - hypertable regclass; - ht_id integer; - chunk_id integer; - _index regclass; - ht_index regclass; - chunk_index regclass; - index_name name; - chunk_index_name name; - _indkey text[]; - column_name name; - column_type regtype; - cmd text; -BEGIN - SET timescaledb.restoring TO ON; - - FOR hypertable, ht_id IN - SELECT - format('%I.%I',ht.schema_name,ht.table_name)::regclass, - ht.id - FROM _timescaledb_catalog.hypertable ht_uncomp - INNER JOIN _timescaledb_catalog.hypertable ht - ON ht.id = ht_uncomp.compressed_hypertable_id - LOOP - - -- get first chunk which we use as template for restoring columns and indexes - SELECT format('%I.%I',schema_name,table_name)::regclass INTO STRICT chunk FROM _timescaledb_catalog.chunk WHERE hypertable_id = ht_id ORDER by id LIMIT 1; - - -- restore columns from the compressed hypertable - FOR column_name, column_type IN - SELECT attname, atttypid::regtype FROM pg_attribute WHERE attrelid = chunk AND attnum > 0 - LOOP - cmd := format('ALTER TABLE %s ADD COLUMN %I %s', hypertable, column_name, column_type); - EXECUTE cmd; - END LOOP; - - -- restore indexes on the compressed hypertable - FOR _index, _indkey IN - SELECT indexrelid::regclass, _timescaledb_functions.tmp_resolve_indkeys(indrelid, indkey) FROM pg_index WHERE indrelid = chunk - LOOP - SELECT relname INTO STRICT index_name FROM pg_class WHERE oid = _index; - cmd := pg_get_indexdef(_index); - cmd := replace(cmd, format(' INDEX %s ON ', index_name), ' INDEX ON '); - cmd := replace(cmd, chunk::text, hypertable::text); - EXECUTE cmd; - - -- get indexrelid of index we just created on hypertable - SELECT indexrelid INTO STRICT ht_index FROM pg_index WHERE indrelid = hypertable AND _timescaledb_functions.tmp_resolve_indkeys(hypertable, indkey) = _indkey; - SELECT relname INTO STRICT index_name FROM pg_class WHERE oid = ht_index; - - -- restore indexes in our catalog - FOR chunk, chunk_id IN - SELECT format('%I.%I',schema_name,table_name)::regclass, id FROM _timescaledb_catalog.chunk WHERE hypertable_id = ht_id - LOOP - SELECT indexrelid INTO STRICT chunk_index FROM pg_index WHERE indrelid = chunk AND _timescaledb_functions.tmp_resolve_indkeys(chunk, indkey) = _indkey; - SELECT relname INTO STRICT chunk_index_name FROM pg_class WHERE oid = chunk_index; - INSERT INTO _timescaledb_catalog.chunk_index (chunk_id, index_name, hypertable_id, hypertable_index_name) - VALUES (chunk_id, chunk_index_name, ht_id, index_name); - END LOOP; - - END LOOP; - - -- restore inheritance - cmd := format('ALTER TABLE %s INHERIT %s', chunk, hypertable); - EXECUTE cmd; - - END LOOP; - - SET timescaledb.restoring TO OFF; -END $$; - -DROP FUNCTION _timescaledb_functions.tmp_resolve_indkeys; - -CREATE FUNCTION _timescaledb_functions.ping_data_node(node_name NAME, timeout INTERVAL = NULL) RETURNS BOOLEAN -AS '@MODULE_PATHNAME@', 'ts_data_node_ping' LANGUAGE C VOLATILE; - -CREATE FUNCTION _timescaledb_functions.remote_txn_heal_data_node(foreign_server_oid oid) -RETURNS INT -AS '@MODULE_PATHNAME@', 'ts_remote_txn_heal_data_node' -LANGUAGE C STRICT; - -CREATE FUNCTION _timescaledb_functions.set_dist_id(dist_id UUID) RETURNS BOOL -AS '@MODULE_PATHNAME@', 'ts_dist_set_id' LANGUAGE C VOLATILE STRICT; - -CREATE FUNCTION _timescaledb_functions.set_peer_dist_id(dist_id UUID) RETURNS BOOL -AS '@MODULE_PATHNAME@', 'ts_dist_set_peer_id' LANGUAGE C VOLATILE STRICT; - --- Function to validate that a node has local settings to function as --- a data node. Throws error if validation fails. -CREATE FUNCTION _timescaledb_functions.validate_as_data_node() RETURNS void -AS '@MODULE_PATHNAME@', 'ts_dist_validate_as_data_node' LANGUAGE C VOLATILE STRICT; - -CREATE FUNCTION _timescaledb_functions.show_connection_cache() -RETURNS TABLE ( - node_name name, - user_name name, - host text, - port int, - database name, - backend_pid int, - connection_status text, - transaction_status text, - transaction_depth int, - processing boolean, - invalidated boolean) -AS '@MODULE_PATHNAME@', 'ts_remote_connection_cache_show' LANGUAGE C VOLATILE STRICT; - -DROP FUNCTION IF EXISTS @extschema@.create_hypertable(relation REGCLASS, time_column_name NAME, partitioning_column NAME, number_partitions INTEGER, associated_schema_name NAME, associated_table_prefix NAME, chunk_time_interval ANYELEMENT, create_default_indexes BOOLEAN, if_not_exists BOOLEAN, partitioning_func REGPROC, migrate_data BOOLEAN, chunk_target_size TEXT, chunk_sizing_func REGPROC, time_partitioning_func REGPROC); - -CREATE FUNCTION @extschema@.create_hypertable( - relation REGCLASS, - time_column_name NAME, - partitioning_column NAME = NULL, - number_partitions INTEGER = NULL, - associated_schema_name NAME = NULL, - associated_table_prefix NAME = NULL, - chunk_time_interval ANYELEMENT = NULL::bigint, - create_default_indexes BOOLEAN = TRUE, - if_not_exists BOOLEAN = FALSE, - partitioning_func REGPROC = NULL, - migrate_data BOOLEAN = FALSE, - chunk_target_size TEXT = NULL, - chunk_sizing_func REGPROC = '_timescaledb_functions.calculate_chunk_interval'::regproc, - time_partitioning_func REGPROC = NULL, - replication_factor INTEGER = NULL, - data_nodes NAME[] = NULL, - distributed BOOLEAN = NULL -) RETURNS TABLE(hypertable_id INT, schema_name NAME, table_name NAME, created BOOL) AS '@MODULE_PATHNAME@', 'ts_hypertable_create' LANGUAGE C VOLATILE; - -CREATE FUNCTION @extschema@.create_distributed_hypertable( - relation REGCLASS, - time_column_name NAME, - partitioning_column NAME = NULL, - number_partitions INTEGER = NULL, - associated_schema_name NAME = NULL, - associated_table_prefix NAME = NULL, - chunk_time_interval ANYELEMENT = NULL::bigint, - create_default_indexes BOOLEAN = TRUE, - if_not_exists BOOLEAN = FALSE, - partitioning_func REGPROC = NULL, - migrate_data BOOLEAN = FALSE, - chunk_target_size TEXT = NULL, - chunk_sizing_func REGPROC = '_timescaledb_functions.calculate_chunk_interval'::regproc, - time_partitioning_func REGPROC = NULL, - replication_factor INTEGER = NULL, - data_nodes NAME[] = NULL -) RETURNS TABLE(hypertable_id INT, schema_name NAME, table_name NAME, created BOOL) AS '@MODULE_PATHNAME@', 'ts_hypertable_distributed_create' LANGUAGE C VOLATILE; - -CREATE FUNCTION @extschema@.add_data_node( - node_name NAME, - host TEXT, - database NAME = NULL, - port INTEGER = NULL, - if_not_exists BOOLEAN = FALSE, - bootstrap BOOLEAN = TRUE, - password TEXT = NULL -) RETURNS TABLE(node_name NAME, host TEXT, port INTEGER, database NAME, - node_created BOOL, database_created BOOL, extension_created BOOL) -AS '@MODULE_PATHNAME@', 'ts_data_node_add' LANGUAGE C VOLATILE; - -CREATE FUNCTION @extschema@.delete_data_node( - node_name NAME, - if_exists BOOLEAN = FALSE, - force BOOLEAN = FALSE, - repartition BOOLEAN = TRUE, - drop_database BOOLEAN = FALSE -) RETURNS BOOLEAN AS '@MODULE_PATHNAME@', 'ts_data_node_delete' LANGUAGE C VOLATILE; - -CREATE FUNCTION @extschema@.attach_data_node( - node_name NAME, - hypertable REGCLASS, - if_not_attached BOOLEAN = FALSE, - repartition BOOLEAN = TRUE -) RETURNS TABLE(hypertable_id INTEGER, node_hypertable_id INTEGER, node_name NAME) -AS '@MODULE_PATHNAME@', 'ts_data_node_attach' LANGUAGE C VOLATILE; - -CREATE FUNCTION @extschema@.detach_data_node( - node_name NAME, - hypertable REGCLASS = NULL, - if_attached BOOLEAN = FALSE, - force BOOLEAN = FALSE, - repartition BOOLEAN = TRUE, - drop_remote_data BOOLEAN = FALSE -) RETURNS INTEGER -AS '@MODULE_PATHNAME@', 'ts_data_node_detach' LANGUAGE C VOLATILE; - -CREATE FUNCTION @extschema@.alter_data_node( - node_name NAME, - host TEXT = NULL, - database NAME = NULL, - port INTEGER = NULL, - available BOOLEAN = NULL -) RETURNS TABLE(node_name NAME, host TEXT, port INTEGER, database NAME, available BOOLEAN) - -AS '@MODULE_PATHNAME@', 'ts_data_node_alter' LANGUAGE C VOLATILE; -CREATE PROCEDURE @extschema@.distributed_exec( - query TEXT, - node_list name[] = NULL, - transactional BOOLEAN = TRUE) -AS '@MODULE_PATHNAME@', 'ts_distributed_exec' LANGUAGE C; - -CREATE FUNCTION @extschema@.create_distributed_restore_point( - name TEXT -) RETURNS TABLE(node_name NAME, node_type TEXT, restore_point pg_lsn) -AS '@MODULE_PATHNAME@', 'ts_create_distributed_restore_point' LANGUAGE C VOLATILE STRICT; - -CREATE FUNCTION @extschema@.set_replication_factor( - hypertable REGCLASS, - replication_factor INTEGER -) RETURNS VOID -AS '@MODULE_PATHNAME@', 'ts_hypertable_distributed_set_replication_factor' LANGUAGE C VOLATILE; - -CREATE TABLE _timescaledb_catalog.hypertable_compression ( - hypertable_id integer NOT NULL, - attname name NOT NULL, - compression_algorithm_id smallint, - segmentby_column_index smallint, - orderby_column_index smallint, - orderby_asc boolean, - orderby_nullsfirst boolean, - -- table constraints - CONSTRAINT hypertable_compression_pkey PRIMARY KEY (hypertable_id, attname), - CONSTRAINT hypertable_compression_hypertable_id_orderby_column_index_key UNIQUE (hypertable_id, orderby_column_index), - CONSTRAINT hypertable_compression_hypertable_id_segmentby_column_index_key UNIQUE (hypertable_id, segmentby_column_index), - CONSTRAINT hypertable_compression_compression_algorithm_id_fkey FOREIGN KEY (compression_algorithm_id) REFERENCES _timescaledb_catalog.compression_algorithm (id) -); - -INSERT INTO _timescaledb_catalog.hypertable_compression( - hypertable_id, - attname, - compression_algorithm_id, - segmentby_column_index, - orderby_column_index, - orderby_asc, - orderby_nullsfirst -) SELECT - ht.id, - att.attname, - CASE - WHEN att.attname = ANY(cs.segmentby) THEN 0 - WHEN att.atttypid IN ('numeric'::regtype) THEN 1 - WHEN att.atttypid IN ('float4'::regtype,'float8'::regtype) THEN 3 - WHEN att.atttypid IN ('int2'::regtype,'int4'::regtype,'int8'::regtype,'date'::regtype,'timestamp'::regtype,'timestamptz'::regtype) THEN 4 - WHEN EXISTS(SELECT FROM pg_operator op WHERE op.oprname = '=' AND op.oprkind = 'b' AND op.oprcanhash = true AND op.oprleft = att.atttypid AND op.oprright = att.atttypid) THEN 2 - ELSE 1 - END AS compression_algorithm_id, - CASE WHEN att.attname = ANY(cs.segmentby) THEN array_position(cs.segmentby, att.attname::text) ELSE NULL END AS segmentby_column_index, - CASE WHEN att.attname = ANY(cs.orderby) THEN array_position(cs.orderby, att.attname::text) ELSE NULL END AS orderby_column_index, - CASE WHEN att.attname = ANY(cs.orderby) THEN NOT cs.orderby_desc[array_position(cs.orderby, att.attname::text)] ELSE false END AS orderby_asc, - CASE WHEN att.attname = ANY(cs.orderby) THEN cs.orderby_nullsfirst[array_position(cs.orderby, att.attname::text)] ELSE false END AS orderby_nullsfirst -FROM _timescaledb_catalog.hypertable ht -INNER JOIN _timescaledb_catalog.compression_settings cs ON cs.relid = format('%I.%I',ht.schema_name,ht.table_name)::regclass -LEFT JOIN pg_attribute att ON att.attrelid = format('%I.%I',ht.schema_name,ht.table_name)::regclass AND attnum > 0 -WHERE compressed_hypertable_id IS NOT NULL; - -SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.hypertable_compression', ''); -GRANT SELECT ON _timescaledb_catalog.hypertable_compression TO PUBLIC; - -ALTER EXTENSION timescaledb DROP VIEW timescaledb_information.compression_settings; -DROP VIEW timescaledb_information.compression_settings; -ALTER EXTENSION timescaledb DROP TABLE _timescaledb_catalog.compression_settings; -DROP TABLE _timescaledb_catalog.compression_settings; - -CREATE FUNCTION @extschema@.timescaledb_fdw_handler() RETURNS fdw_handler AS '@MODULE_PATHNAME@', 'ts_timescaledb_fdw_handler' LANGUAGE C STRICT; -CREATE FUNCTION @extschema@.timescaledb_fdw_validator(text[], oid) RETURNS void AS '@MODULE_PATHNAME@', 'ts_timescaledb_fdw_validator' LANGUAGE C STRICT; - -CREATE FOREIGN DATA WRAPPER timescaledb_fdw HANDLER @extschema@.timescaledb_fdw_handler VALIDATOR @extschema@.timescaledb_fdw_validator; - -CREATE FUNCTION _timescaledb_functions.create_chunk_replica_table( - chunk REGCLASS, - data_node_name NAME -) RETURNS VOID AS '@MODULE_PATHNAME@', 'ts_chunk_create_replica_table' LANGUAGE C VOLATILE; - -CREATE FUNCTION _timescaledb_functions.chunk_drop_replica( - chunk REGCLASS, - node_name NAME -) RETURNS VOID -AS '@MODULE_PATHNAME@', 'ts_chunk_drop_replica' LANGUAGE C VOLATILE; - -CREATE PROCEDURE _timescaledb_functions.wait_subscription_sync( - schema_name NAME, - table_name NAME, - retry_count INT DEFAULT 18000, - retry_delay_ms NUMERIC DEFAULT 0.200 -) -LANGUAGE PLPGSQL AS -$BODY$ -DECLARE - in_sync BOOLEAN; -BEGIN - FOR i in 1 .. retry_count - LOOP - SELECT pgs.srsubstate = 'r' - INTO in_sync - FROM pg_subscription_rel pgs - JOIN pg_class pgc ON relname = table_name - JOIN pg_namespace n ON (n.OID = pgc.relnamespace) - WHERE pgs.srrelid = pgc.oid AND schema_name = n.nspname; - - if (in_sync IS NULL OR NOT in_sync) THEN - PERFORM pg_sleep(retry_delay_ms); - ELSE - RETURN; - END IF; - END LOOP; - RAISE 'subscription sync wait timedout'; -END -$BODY$ SET search_path TO pg_catalog, pg_temp; - -CREATE FUNCTION _timescaledb_functions.health() RETURNS -TABLE (node_name NAME, healthy BOOL, in_recovery BOOL, error TEXT) -AS '@MODULE_PATHNAME@', 'ts_health_check' LANGUAGE C VOLATILE; - -CREATE FUNCTION _timescaledb_functions.drop_stale_chunks( - node_name NAME, - chunks integer[] = NULL -) RETURNS VOID -AS '@MODULE_PATHNAME@', 'ts_chunks_drop_stale' LANGUAGE C VOLATILE; - -CREATE FUNCTION _timescaledb_functions.rxid_in(cstring) RETURNS @extschema@.rxid - AS '@MODULE_PATHNAME@', 'ts_remote_txn_id_in' LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE; - -CREATE FUNCTION _timescaledb_functions.rxid_out(@extschema@.rxid) RETURNS cstring - AS '@MODULE_PATHNAME@', 'ts_remote_txn_id_out' LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE; - -CREATE TYPE @extschema@.rxid ( - internallength = 16, - input = _timescaledb_functions.rxid_in, - output = _timescaledb_functions.rxid_out -); - -CREATE FUNCTION _timescaledb_functions.data_node_hypertable_info( - node_name NAME, - schema_name_in name, - table_name_in name -) -RETURNS TABLE ( - table_bytes bigint, - index_bytes bigint, - toast_bytes bigint, - total_bytes bigint) -AS '@MODULE_PATHNAME@', 'ts_dist_remote_hypertable_info' LANGUAGE C VOLATILE STRICT; - -CREATE FUNCTION _timescaledb_functions.data_node_chunk_info( - node_name NAME, - schema_name_in name, - table_name_in name -) -RETURNS TABLE ( - chunk_id integer, - chunk_schema name, - chunk_name name, - table_bytes bigint, - index_bytes bigint, - toast_bytes bigint, - total_bytes bigint) -AS '@MODULE_PATHNAME@', 'ts_dist_remote_chunk_info' LANGUAGE C VOLATILE STRICT; - -CREATE FUNCTION _timescaledb_functions.data_node_compressed_chunk_stats(node_name name, schema_name_in name, table_name_in name) - RETURNS TABLE ( - chunk_schema name, - chunk_name name, - compression_status text, - before_compression_table_bytes bigint, - before_compression_index_bytes bigint, - before_compression_toast_bytes bigint, - before_compression_total_bytes bigint, - after_compression_table_bytes bigint, - after_compression_index_bytes bigint, - after_compression_toast_bytes bigint, - after_compression_total_bytes bigint - ) -AS '@MODULE_PATHNAME@' , 'ts_dist_remote_compressed_chunk_info' LANGUAGE C VOLATILE STRICT; - -CREATE FUNCTION _timescaledb_functions.data_node_index_size(node_name name, schema_name_in name, index_name_in name) -RETURNS TABLE ( hypertable_id INTEGER, total_bytes BIGINT) -AS '@MODULE_PATHNAME@' , 'ts_dist_remote_hypertable_index_info' LANGUAGE C VOLATILE STRICT; - -CREATE FUNCTION timescaledb_experimental.block_new_chunks(data_node_name NAME, hypertable REGCLASS = NULL, force BOOLEAN = FALSE) RETURNS INTEGER -AS '@MODULE_PATHNAME@', 'ts_data_node_block_new_chunks' LANGUAGE C VOLATILE; - -CREATE FUNCTION timescaledb_experimental.allow_new_chunks(data_node_name NAME, hypertable REGCLASS = NULL) RETURNS INTEGER -AS '@MODULE_PATHNAME@', 'ts_data_node_allow_new_chunks' LANGUAGE C VOLATILE; - -CREATE PROCEDURE timescaledb_experimental.move_chunk( - chunk REGCLASS, - source_node NAME = NULL, - destination_node NAME = NULL, - operation_id NAME = NULL) -AS '@MODULE_PATHNAME@', 'ts_move_chunk_proc' LANGUAGE C; - -CREATE PROCEDURE timescaledb_experimental.copy_chunk( - chunk REGCLASS, - source_node NAME = NULL, - destination_node NAME = NULL, - operation_id NAME = NULL) -AS '@MODULE_PATHNAME@', 'ts_copy_chunk_proc' LANGUAGE C; - -CREATE FUNCTION timescaledb_experimental.subscription_exec( - subscription_command TEXT -) RETURNS VOID AS '@MODULE_PATHNAME@', 'ts_subscription_exec' LANGUAGE C VOLATILE; - -CREATE PROCEDURE timescaledb_experimental.cleanup_copy_chunk_operation( - operation_id NAME) -AS '@MODULE_PATHNAME@', 'ts_copy_chunk_cleanup_proc' LANGUAGE C; - -CREATE FUNCTION _timescaledb_functions.set_chunk_default_data_node(chunk REGCLASS, node_name NAME) RETURNS BOOLEAN -AS '@MODULE_PATHNAME@', 'ts_chunk_set_default_data_node' LANGUAGE C VOLATILE; - -CREATE FUNCTION _timescaledb_functions.drop_dist_ht_invalidation_trigger( - raw_hypertable_id INTEGER -) RETURNS VOID AS '@MODULE_PATHNAME@', 'ts_drop_dist_ht_invalidation_trigger' LANGUAGE C STRICT VOLATILE; - --- restore multinode catalog tables -CREATE TABLE _timescaledb_catalog.remote_txn ( - data_node_name name, --this is really only to allow us to cleanup stuff on a per-node basis. - remote_transaction_id text NOT NULL, - -- table constraints - CONSTRAINT remote_txn_pkey PRIMARY KEY (remote_transaction_id) -); - -ALTER TABLE _timescaledb_catalog.remote_txn ADD CONSTRAINT remote_txn_remote_transaction_id_check CHECK (remote_transaction_id::@extschema@.rxid IS NOT NULL); - -CREATE INDEX remote_txn_data_node_name_idx ON _timescaledb_catalog.remote_txn (data_node_name); - -SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.remote_txn', ''); -GRANT SELECT ON TABLE _timescaledb_catalog.remote_txn TO PUBLIC; - -CREATE TABLE _timescaledb_catalog.hypertable_data_node ( - hypertable_id integer NOT NULL, - node_hypertable_id integer NULL, - node_name name NOT NULL, - block_chunks boolean NOT NULL, - -- table constraints - CONSTRAINT hypertable_data_node_hypertable_id_node_name_key UNIQUE (hypertable_id, node_name), - CONSTRAINT hypertable_data_node_node_hypertable_id_node_name_key UNIQUE (node_hypertable_id, node_name), - CONSTRAINT hypertable_data_node_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable (id) -); - -SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.hypertable_data_node', ''); -GRANT SELECT ON TABLE _timescaledb_catalog.hypertable_data_node TO PUBLIC; - -CREATE TABLE _timescaledb_catalog.chunk_data_node ( - chunk_id integer NOT NULL, - node_chunk_id integer NOT NULL, - node_name name NOT NULL, - -- table constraints - CONSTRAINT chunk_data_node_chunk_id_node_name_key UNIQUE (chunk_id, node_name), - CONSTRAINT chunk_data_node_node_chunk_id_node_name_key UNIQUE (node_chunk_id, node_name), - CONSTRAINT chunk_data_node_chunk_id_fkey FOREIGN KEY (chunk_id) REFERENCES _timescaledb_catalog.chunk (id) -); - -CREATE INDEX chunk_data_node_node_name_idx ON _timescaledb_catalog.chunk_data_node (node_name); -SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.chunk_data_node', ''); -GRANT SELECT ON TABLE _timescaledb_catalog.chunk_data_node TO PUBLIC; - -CREATE SEQUENCE _timescaledb_catalog.chunk_copy_operation_id_seq MINVALUE 1; -GRANT SELECT ON SEQUENCE _timescaledb_catalog.chunk_copy_operation_id_seq TO PUBLIC; - -CREATE TABLE _timescaledb_catalog.chunk_copy_operation ( - operation_id name NOT NULL, -- the publisher/subscriber identifier used - backend_pid integer NOT NULL, -- the pid of the backend running this activity - completed_stage name NOT NULL, -- the completed stage/step - time_start timestamptz NOT NULL DEFAULT NOW(), -- start time of the activity - chunk_id integer NOT NULL, - compress_chunk_name name NOT NULL, - source_node_name name NOT NULL, - dest_node_name name NOT NULL, - delete_on_source_node bool NOT NULL, -- is a move or copy activity - -- table constraints - CONSTRAINT chunk_copy_operation_pkey PRIMARY KEY (operation_id), - CONSTRAINT chunk_copy_operation_chunk_id_fkey FOREIGN KEY (chunk_id) REFERENCES _timescaledb_catalog.chunk (id) ON DELETE CASCADE -); - -GRANT SELECT ON TABLE _timescaledb_catalog.chunk_copy_operation TO PUBLIC; - -CREATE TABLE _timescaledb_catalog.dimension_partition ( - dimension_id integer NOT NULL REFERENCES _timescaledb_catalog.dimension (id) ON DELETE CASCADE, - range_start bigint NOT NULL, - data_nodes name[] NULL, - UNIQUE (dimension_id, range_start) -); - -GRANT SELECT ON TABLE _timescaledb_catalog.dimension_partition TO PUBLIC; - -SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.dimension_partition', ''); -CREATE FUNCTION _timescaledb_functions.hypertable_remote_size( - schema_name_in name, - table_name_in name) -RETURNS TABLE ( - table_bytes bigint, - index_bytes bigint, - toast_bytes bigint, - total_bytes bigint, - node_name NAME) -LANGUAGE SQL VOLATILE STRICT AS -$BODY$ -$BODY$ SET search_path TO pg_catalog, pg_temp; - -CREATE FUNCTION _timescaledb_functions.chunks_remote_size( - schema_name_in name, - table_name_in name) -RETURNS TABLE ( - chunk_id integer, - chunk_schema NAME, - chunk_name NAME, - table_bytes bigint, - index_bytes bigint, - toast_bytes bigint, - total_bytes bigint, - node_name NAME) -LANGUAGE SQL VOLATILE STRICT AS -$BODY$ -$BODY$ SET search_path TO pg_catalog, pg_temp; - -CREATE FUNCTION _timescaledb_functions.indexes_remote_size( - schema_name_in NAME, - table_name_in NAME, - index_name_in NAME -) -RETURNS BIGINT -LANGUAGE SQL VOLATILE STRICT AS -$BODY$ -$BODY$ SET search_path TO pg_catalog, pg_temp; - -CREATE FUNCTION _timescaledb_functions.compressed_chunk_remote_stats(schema_name_in name, table_name_in name) - RETURNS TABLE ( - chunk_schema name, - chunk_name name, - compression_status text, - before_compression_table_bytes bigint, - before_compression_index_bytes bigint, - before_compression_toast_bytes bigint, - before_compression_total_bytes bigint, - after_compression_table_bytes bigint, - after_compression_index_bytes bigint, - after_compression_toast_bytes bigint, - after_compression_total_bytes bigint, - node_name name) - LANGUAGE SQL - STABLE STRICT - AS -$BODY$ -$BODY$ SET search_path TO pg_catalog, pg_temp; - --- recreate the _timescaledb_catalog.hypertable table as new field was added --- 1. drop CONSTRAINTS from other tables referencing the existing one -ALTER TABLE _timescaledb_config.bgw_job - DROP CONSTRAINT bgw_job_hypertable_id_fkey; -ALTER TABLE _timescaledb_catalog.chunk - DROP CONSTRAINT chunk_hypertable_id_fkey; -ALTER TABLE _timescaledb_catalog.chunk_index - DROP CONSTRAINT chunk_index_hypertable_id_fkey; -ALTER TABLE _timescaledb_catalog.continuous_agg - DROP CONSTRAINT continuous_agg_mat_hypertable_id_fkey, - DROP CONSTRAINT continuous_agg_raw_hypertable_id_fkey; -ALTER TABLE _timescaledb_catalog.continuous_aggs_bucket_function - DROP CONSTRAINT continuous_aggs_bucket_function_mat_hypertable_id_fkey; -ALTER TABLE _timescaledb_catalog.continuous_aggs_invalidation_threshold - DROP CONSTRAINT continuous_aggs_invalidation_threshold_hypertable_id_fkey; -ALTER TABLE _timescaledb_catalog.dimension - DROP CONSTRAINT dimension_hypertable_id_fkey; -ALTER TABLE _timescaledb_catalog.hypertable - DROP CONSTRAINT hypertable_compressed_hypertable_id_fkey; -ALTER TABLE _timescaledb_catalog.hypertable_data_node - DROP CONSTRAINT hypertable_data_node_hypertable_id_fkey; -ALTER TABLE _timescaledb_catalog.tablespace - DROP CONSTRAINT tablespace_hypertable_id_fkey; - --- drop dependent views -ALTER EXTENSION timescaledb DROP VIEW timescaledb_information.hypertables; -ALTER EXTENSION timescaledb DROP VIEW timescaledb_information.job_stats; -ALTER EXTENSION timescaledb DROP VIEW timescaledb_information.jobs; -ALTER EXTENSION timescaledb DROP VIEW timescaledb_information.continuous_aggregates; -ALTER EXTENSION timescaledb DROP VIEW timescaledb_information.chunks; -ALTER EXTENSION timescaledb DROP VIEW timescaledb_information.dimensions; -ALTER EXTENSION timescaledb DROP VIEW _timescaledb_internal.hypertable_chunk_local_size; -ALTER EXTENSION timescaledb DROP VIEW _timescaledb_internal.compressed_chunk_stats; -ALTER EXTENSION timescaledb DROP VIEW timescaledb_experimental.policies; - -DROP VIEW timescaledb_information.hypertables; -DROP VIEW timescaledb_information.job_stats; -DROP VIEW timescaledb_information.jobs; -DROP VIEW timescaledb_information.continuous_aggregates; -DROP VIEW timescaledb_information.chunks; -DROP VIEW timescaledb_information.dimensions; -DROP VIEW _timescaledb_internal.hypertable_chunk_local_size; -DROP VIEW _timescaledb_internal.compressed_chunk_stats; -DROP VIEW timescaledb_experimental.policies; - --- recreate table -CREATE TABLE _timescaledb_catalog.hypertable_tmp AS SELECT * FROM _timescaledb_catalog.hypertable; -CREATE TABLE _timescaledb_catalog.tmp_hypertable_seq_value AS SELECT last_value, is_called FROM _timescaledb_catalog.hypertable_id_seq; - -ALTER EXTENSION timescaledb DROP TABLE _timescaledb_catalog.hypertable; -ALTER EXTENSION timescaledb DROP SEQUENCE _timescaledb_catalog.hypertable_id_seq; - -SET timescaledb.restoring = on; -- must disable the hooks otherwise we can't do anything without the table _timescaledb_catalog.hypertable - -DROP TABLE _timescaledb_catalog.hypertable; - -CREATE SEQUENCE _timescaledb_catalog.hypertable_id_seq MINVALUE 1; -SELECT setval('_timescaledb_catalog.hypertable_id_seq', last_value, is_called) FROM _timescaledb_catalog.tmp_hypertable_seq_value; -DROP TABLE _timescaledb_catalog.tmp_hypertable_seq_value; - -CREATE TABLE _timescaledb_catalog.hypertable ( - id INTEGER PRIMARY KEY NOT NULL DEFAULT nextval('_timescaledb_catalog.hypertable_id_seq'), - schema_name name NOT NULL, - table_name name NOT NULL, - associated_schema_name name NOT NULL, - associated_table_prefix name NOT NULL, - num_dimensions smallint NOT NULL, - chunk_sizing_func_schema name NOT NULL, - chunk_sizing_func_name name NOT NULL, - chunk_target_size bigint NOT NULL, -- size in bytes - compression_state smallint NOT NULL DEFAULT 0, - compressed_hypertable_id integer, - replication_factor smallint NULL, - status int NOT NULL DEFAULT 0 -); - -SET timescaledb.restoring = off; - -INSERT INTO _timescaledb_catalog.hypertable ( - id, - schema_name, - table_name, - associated_schema_name, - associated_table_prefix, - num_dimensions, - chunk_sizing_func_schema, - chunk_sizing_func_name, - chunk_target_size, - compression_state, - compressed_hypertable_id -) -SELECT - id, - schema_name, - table_name, - associated_schema_name, - associated_table_prefix, - num_dimensions, - chunk_sizing_func_schema, - chunk_sizing_func_name, - chunk_target_size, - compression_state, - compressed_hypertable_id -FROM - _timescaledb_catalog.hypertable_tmp -ORDER BY id; - -ALTER SEQUENCE _timescaledb_catalog.hypertable_id_seq OWNED BY _timescaledb_catalog.hypertable.id; -SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.hypertable_id_seq', ''); - -GRANT SELECT ON _timescaledb_catalog.hypertable TO PUBLIC; -GRANT SELECT ON _timescaledb_catalog.hypertable_id_seq TO PUBLIC; - -DROP TABLE _timescaledb_catalog.hypertable_tmp; --- now add any constraints -ALTER TABLE _timescaledb_catalog.hypertable - -- ADD CONSTRAINT hypertable_pkey PRIMARY KEY (id), - ADD CONSTRAINT hypertable_associated_schema_name_associated_table_prefix_key UNIQUE (associated_schema_name, associated_table_prefix), - ADD CONSTRAINT hypertable_table_name_schema_name_key UNIQUE (table_name, schema_name), - ADD CONSTRAINT hypertable_schema_name_check CHECK (schema_name != '_timescaledb_catalog'), - -- internal compressed hypertables have compression state = 2 - ADD CONSTRAINT hypertable_dim_compress_check CHECK (num_dimensions > 0 OR compression_state = 2), - ADD CONSTRAINT hypertable_chunk_target_size_check CHECK (chunk_target_size >= 0), - ADD CONSTRAINT hypertable_compress_check CHECK ( (compression_state = 0 OR compression_state = 1 ) OR (compression_state = 2 AND compressed_hypertable_id IS NULL)), - ADD CONSTRAINT hypertable_replication_factor_check CHECK (replication_factor > 0 OR replication_factor = -1), - ADD CONSTRAINT hypertable_compressed_hypertable_id_fkey FOREIGN KEY (compressed_hypertable_id) REFERENCES _timescaledb_catalog.hypertable (id); - -GRANT SELECT ON TABLE _timescaledb_catalog.hypertable TO PUBLIC; - --- 3. reestablish constraints on other tables -ALTER TABLE _timescaledb_config.bgw_job - ADD CONSTRAINT bgw_job_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE; -ALTER TABLE _timescaledb_catalog.chunk - ADD CONSTRAINT chunk_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id); -ALTER TABLE _timescaledb_catalog.chunk_index - ADD CONSTRAINT chunk_index_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE; -ALTER TABLE _timescaledb_catalog.continuous_agg - ADD CONSTRAINT continuous_agg_mat_hypertable_id_fkey FOREIGN KEY (mat_hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE, - ADD CONSTRAINT continuous_agg_raw_hypertable_id_fkey FOREIGN KEY (raw_hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE; -ALTER TABLE _timescaledb_catalog.continuous_aggs_bucket_function - ADD CONSTRAINT continuous_aggs_bucket_function_mat_hypertable_id_fkey FOREIGN KEY (mat_hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE; -ALTER TABLE _timescaledb_catalog.continuous_aggs_invalidation_threshold - ADD CONSTRAINT continuous_aggs_invalidation_threshold_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE; -ALTER TABLE _timescaledb_catalog.dimension - ADD CONSTRAINT dimension_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE; -ALTER TABLE _timescaledb_catalog.hypertable_compression - ADD CONSTRAINT hypertable_compression_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE; -ALTER TABLE _timescaledb_catalog.hypertable_data_node - ADD CONSTRAINT hypertable_data_node_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id); -ALTER TABLE _timescaledb_catalog.tablespace - ADD CONSTRAINT tablespace_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE; - -DROP FUNCTION IF EXISTS _timescaledb_debug.extension_state; -DROP SCHEMA IF EXISTS _timescaledb_debug; - -DROP FUNCTION IF EXISTS _timescaledb_internal.hypertable_constraint_add_table_fk_constraint; - -DROP FUNCTION _timescaledb_functions.constraint_clone; - -CREATE FUNCTION _timescaledb_functions.hypertable_constraint_add_table_fk_constraint(user_ht_constraint_name name,user_ht_schema_name name,user_ht_table_name name,compress_ht_id integer) RETURNS void LANGUAGE PLPGSQL AS $$BEGIN END$$ SET search_path TO pg_catalog,pg_temp; - -CREATE FUNCTION _timescaledb_functions.chunks_in(record RECORD, chunks INTEGER[]) RETURNS BOOL -AS 'BEGIN END' LANGUAGE PLPGSQL SET search_path TO pg_catalog,pg_temp; - -SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.metadata', $$ - WHERE KEY = 'exported_uuid' $$); - -DROP TRIGGER metadata_insert_trigger ON _timescaledb_catalog.metadata; -DROP FUNCTION _timescaledb_functions.metadata_insert_trigger(); - -DROP FUNCTION IF EXISTS _timescaledb_functions.get_orderby_defaults(regclass,text[]); -DROP FUNCTION IF EXISTS _timescaledb_functions.get_segmentby_defaults(regclass); - ---- re-include in the pg_dump config -SELECT pg_catalog.pg_extension_config_dump('_timescaledb_cache.cache_inval_hypertable', ''); -SELECT pg_catalog.pg_extension_config_dump('_timescaledb_cache.cache_inval_extension', ''); -SELECT pg_catalog.pg_extension_config_dump('_timescaledb_cache.cache_inval_bgw_job', ''); -SELECT pg_catalog.pg_extension_config_dump('_timescaledb_internal.job_errors', ''); - --- Remove unwanted entry from extconfig and extcondition in pg_extension -ALTER EXTENSION timescaledb DROP TABLE _timescaledb_catalog.hypertable; --- Associate the above table back to keep the dependencies safe -ALTER EXTENSION timescaledb ADD TABLE _timescaledb_catalog.hypertable; --- include this now in the config -SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.hypertable', ''); -DROP FUNCTION IF EXISTS _timescaledb_functions.relation_approximate_size(relation REGCLASS); -DROP FUNCTION IF EXISTS @extschema@.hypertable_approximate_detailed_size(relation REGCLASS); -DROP FUNCTION IF EXISTS @extschema@.hypertable_approximate_size(hypertable REGCLASS); - -DROP FUNCTION IF EXISTS @extschema@.compress_chunk; -CREATE FUNCTION @extschema@.compress_chunk(uncompressed_chunk REGCLASS, if_not_compressed BOOLEAN = true) RETURNS REGCLASS AS '' LANGUAGE SQL SET search_path TO pg_catalog,pg_temp; - diff --git a/version.config b/version.config index a538688d3bf..9122968bf5d 100644 --- a/version.config +++ b/version.config @@ -1,3 +1,3 @@ -version = 2.14.0-dev +version = 2.14.0 update_from_version = 2.13.1 downgrade_to_version = 2.13.1