Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix issue with deleting data node and dropping database #5114

Merged
merged 1 commit into from Jan 13, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
1 change: 1 addition & 0 deletions CHANGELOG.md
Expand Up @@ -8,6 +8,7 @@ accidentally triggering the load of a previous DB version.**

**Bugfixes**
* #4926 Fix corruption when inserting into compressed chunks
* #5114 Fix issue with deleting data node and dropping database
* #5130 Fix CAgg on CAgg variable bucket size validation
* #5133 Fix CAgg on CAgg using different column order on the original hypertable
* #5152 Fix adding column with NULL constraint to compressed hypertable
Expand Down
4 changes: 3 additions & 1 deletion tsl/src/data_node.c
Expand Up @@ -1841,7 +1841,6 @@ data_node_delete(PG_FUNCTION_ARGS)
if (drop_database)
{
TS_PREVENT_IN_TRANSACTION_BLOCK(true);
drop_data_node_database(server);
}

/* close any pending connections */
Expand Down Expand Up @@ -1873,6 +1872,9 @@ data_node_delete(PG_FUNCTION_ARGS)

parsetree = (Node *) &stmt;

if (drop_database)
drop_data_node_database(server);

/* Make sure event triggers are invoked so that all dropped objects
* are collected during a cascading drop. This ensures all dependent
* objects get cleaned up. */
Expand Down
8 changes: 4 additions & 4 deletions tsl/test/expected/data_node.out
Expand Up @@ -2117,6 +2117,10 @@ SELECT * FROM alter_data_node('data_node_1');
data_node_1 | foo.bar | 8989 | new_db | t
(1 row)

DROP TABLE hyper1;
DROP TABLE hyper2;
DROP TABLE hyper3;
DROP TABLE hyper_1dim;
\set ON_ERROR_STOP 0
-- test some error cases
SELECT * FROM alter_data_node(NULL);
Expand Down Expand Up @@ -2144,10 +2148,6 @@ SELECT node_name, options FROM timescaledb_information.data_nodes order by node_
data_node_3 | {host=localhost,port=55432,dbname=db_data_node_3,available=true}
(3 rows)

DROP TABLE hyper1;
DROP TABLE hyper2;
DROP TABLE hyper3;
DROP TABLE hyper_1dim;
DROP VIEW chunk_query_data_node;
-- create new session to clear out connection cache
\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER;
Expand Down
72 changes: 67 additions & 5 deletions tsl/test/expected/data_node_bootstrap.out
Expand Up @@ -124,11 +124,6 @@ SELECT * FROM delete_data_node('bootstrap_test', drop_database => true);
ERROR: delete_data_node() cannot run inside a transaction block
ROLLBACK;
\set ON_ERROR_STOP 1
-- Using the drop_database option when there are active connections to
-- the data node should fail. But any connections in the current
-- session should be cleared when dropping the database. To test that
-- the connection is cleared, first create a connection in the
-- connection cache by inserting some data
CREATE TABLE conditions (time timestamptz, device int, temp float);
SELECT create_distributed_hypertable('conditions', 'time', 'device');
WARNING: only one data node was assigned to the hypertable
Expand All @@ -138,6 +133,73 @@ NOTICE: adding not-null constraint to column "time"
(1,public,conditions,t)
(1 row)

\set ON_ERROR_STOP 0
-- Should fail because the data node is the last one
SELECT * FROM delete_data_node('bootstrap_test', drop_database => true);
ERROR: insufficient number of data nodes for distributed hypertable "conditions"
\set ON_ERROR_STOP 1
-- Add another data node
SELECT node_name, database, node_created, database_created, extension_created
FROM add_data_node('bootstrap_test_2', host => 'localhost', database => 'bootstrap_test_2', bootstrap => true);
node_name | database | node_created | database_created | extension_created
------------------+------------------+--------------+------------------+-------------------
bootstrap_test_2 | bootstrap_test_2 | t | t | t
(1 row)

SELECT attach_data_node('bootstrap_test_2', 'conditions');
NOTICE: the number of partitions in dimension "device" was increased to 2
attach_data_node
------------------------
(1,1,bootstrap_test_2)
(1 row)

-- Insert some data into the node
INSERT INTO conditions VALUES ('2021-12-01 10:30', 2, 20.3);
\set ON_ERROR_STOP 0
-- Should fail because the data node still holds data
SELECT * FROM delete_data_node('bootstrap_test_2', drop_database => true);
ERROR: insufficient number of data nodes
\set ON_ERROR_STOP 1
-- Data node's database still exists after failure to delete
SELECT count(*) FROM pg_database WHERE datname = 'bootstrap_test_2';
count
-------
1
(1 row)

-- Delete the chunks so that we can delete the data node
SELECT drop_chunks('conditions', older_than => '2022-01-01'::timestamptz);
drop_chunks
---------------------------------------------
_timescaledb_internal._dist_hyper_1_1_chunk
(1 row)

SELECT * FROM delete_data_node('bootstrap_test_2', drop_database => true);
NOTICE: the number of partitions in dimension "device" of hypertable "conditions" was decreased to 1
delete_data_node
------------------
t
(1 row)

-- The data node's database is dropped
SELECT count(*) FROM pg_database WHERE datname = 'bootstrap_test_2';
count
-------
0
(1 row)

SELECT data_nodes FROM timescaledb_information.hypertables
WHERE hypertable_name = 'conditions';
data_nodes
------------------
{bootstrap_test}
(1 row)

-- Using the drop_database option when there are active connections to
-- the data node should fail. But any connections in the current
-- session should be cleared when dropping the database. To test that
-- the connection is cleared, first create a connection in the
-- connection cache by inserting some data
INSERT INTO conditions VALUES ('2021-12-01 10:30', 1, 20.3);
DROP TABLE conditions;
-- Now drop the data node and it should clear the connection from the
Expand Down
9 changes: 5 additions & 4 deletions tsl/test/sql/data_node.sql
Expand Up @@ -978,6 +978,11 @@ SELECT node_name, options FROM timescaledb_information.data_nodes order by node_
-- just show current options:
SELECT * FROM alter_data_node('data_node_1');

DROP TABLE hyper1;
DROP TABLE hyper2;
DROP TABLE hyper3;
DROP TABLE hyper_1dim;

\set ON_ERROR_STOP 0
-- test some error cases
SELECT * FROM alter_data_node(NULL);
Expand All @@ -991,10 +996,6 @@ SELECT delete_data_node('data_node_1', drop_database=>true);
SELECT * FROM alter_data_node('data_node_1', host=>'localhost', port=>:old_port, database=>:'DN_DBNAME_1');
SELECT node_name, options FROM timescaledb_information.data_nodes order by node_name;

DROP TABLE hyper1;
DROP TABLE hyper2;
DROP TABLE hyper3;
DROP TABLE hyper_1dim;
DROP VIEW chunk_query_data_node;

-- create new session to clear out connection cache
Expand Down
37 changes: 35 additions & 2 deletions tsl/test/sql/data_node_bootstrap.sql
Expand Up @@ -82,13 +82,46 @@ SELECT * FROM delete_data_node('bootstrap_test', drop_database => true);
ROLLBACK;
\set ON_ERROR_STOP 1

CREATE TABLE conditions (time timestamptz, device int, temp float);
SELECT create_distributed_hypertable('conditions', 'time', 'device');

\set ON_ERROR_STOP 0
-- Should fail because the data node is the last one
SELECT * FROM delete_data_node('bootstrap_test', drop_database => true);
\set ON_ERROR_STOP 1

-- Add another data node
SELECT node_name, database, node_created, database_created, extension_created
FROM add_data_node('bootstrap_test_2', host => 'localhost', database => 'bootstrap_test_2', bootstrap => true);
SELECT attach_data_node('bootstrap_test_2', 'conditions');

-- Insert some data into the node
INSERT INTO conditions VALUES ('2021-12-01 10:30', 2, 20.3);

\set ON_ERROR_STOP 0
-- Should fail because the data node still holds data
SELECT * FROM delete_data_node('bootstrap_test_2', drop_database => true);
\set ON_ERROR_STOP 1

-- Data node's database still exists after failure to delete
SELECT count(*) FROM pg_database WHERE datname = 'bootstrap_test_2';

-- Delete the chunks so that we can delete the data node
SELECT drop_chunks('conditions', older_than => '2022-01-01'::timestamptz);

SELECT * FROM delete_data_node('bootstrap_test_2', drop_database => true);

-- The data node's database is dropped
SELECT count(*) FROM pg_database WHERE datname = 'bootstrap_test_2';

SELECT data_nodes FROM timescaledb_information.hypertables
WHERE hypertable_name = 'conditions';

-- Using the drop_database option when there are active connections to
-- the data node should fail. But any connections in the current
-- session should be cleared when dropping the database. To test that
-- the connection is cleared, first create a connection in the
-- connection cache by inserting some data
CREATE TABLE conditions (time timestamptz, device int, temp float);
SELECT create_distributed_hypertable('conditions', 'time', 'device');
INSERT INTO conditions VALUES ('2021-12-01 10:30', 1, 20.3);
DROP TABLE conditions;

Expand Down