Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Limit tuple decompression during DML operations #6566

Merged
merged 1 commit into from
Jan 29, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
1 change: 1 addition & 0 deletions .unreleased/pr_6566
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Implements: #6566 Limit tuple decompression during DML operations
18 changes: 18 additions & 0 deletions src/guc.c
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,7 @@ bool ts_guc_enable_cagg_reorder_groupby = true;
bool ts_guc_enable_now_constify = true;
bool ts_guc_enable_osm_reads = true;
TSDLLEXPORT bool ts_guc_enable_dml_decompression = true;
TSDLLEXPORT int ts_guc_max_tuples_decompressed_per_dml = 100000;
TSDLLEXPORT bool ts_guc_enable_transparent_decompression = true;
TSDLLEXPORT bool ts_guc_enable_decompression_logrep_markers = false;
TSDLLEXPORT bool ts_guc_enable_decompression_sorted_merge = true;
Expand Down Expand Up @@ -339,6 +340,23 @@ _guc_init(void)
NULL,
NULL);

DefineCustomIntVariable("timescaledb.max_tuples_decompressed_per_dml_transaction",
"The max number of tuples that can be decompressed during an "
"INSERT, UPDATE, or DELETE.",
" If the number of tuples exceeds this value, an error will "
"be thrown and transaction rolled back. "
"Setting this to 0 sets this value to unlimited number of "
"tuples decompressed.",
&ts_guc_max_tuples_decompressed_per_dml,
100000,
0,
2147483647,
PGC_USERSET,
0,
NULL,
NULL,
NULL);

DefineCustomBoolVariable("timescaledb.enable_transparent_decompression",
"Enable transparent decompression",
"Enable transparent decompression when querying hypertable",
Expand Down
1 change: 1 addition & 0 deletions src/guc.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ extern bool ts_guc_enable_cagg_reorder_groupby;
extern bool ts_guc_enable_now_constify;
extern bool ts_guc_enable_osm_reads;
extern TSDLLEXPORT bool ts_guc_enable_dml_decompression;
extern TSDLLEXPORT int ts_guc_max_tuples_decompressed_per_dml;
extern TSDLLEXPORT bool ts_guc_enable_transparent_decompression;
extern TSDLLEXPORT bool ts_guc_enable_decompression_logrep_markers;
extern TSDLLEXPORT bool ts_guc_enable_decompression_sorted_merge;
Expand Down
16 changes: 16 additions & 0 deletions src/nodes/chunk_dispatch/chunk_dispatch.c
Original file line number Diff line number Diff line change
Expand Up @@ -433,6 +433,22 @@ chunk_dispatch_exec(CustomScanState *node)
on_chunk_insert_state_changed,
state);

if (ts_guc_max_tuples_decompressed_per_dml > 0)
{
if (cis->cds->tuples_decompressed > ts_guc_max_tuples_decompressed_per_dml)
{
ereport(ERROR,
(errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED),
errmsg("tuple decompression limit exceeded by operation"),
errdetail("current limit: %d, tuples decompressed: %lld",
ts_guc_max_tuples_decompressed_per_dml,
(long long int) cis->cds->tuples_decompressed),
errhint("Consider increasing "
"timescaledb.max_tuples_decompressed_per_dml_transaction or set "
"to 0 (unlimited).")));
}
}

/*
* Set the result relation in the executor state to the target chunk.
* This makes sure that the tuple gets inserted into the correct
Expand Down
16 changes: 16 additions & 0 deletions src/nodes/hypertable_modify.c
Original file line number Diff line number Diff line change
Expand Up @@ -761,6 +761,22 @@ ExecModifyTable(CustomScanState *cs_node, PlanState *pstate)
CommandCounterIncrement();
/* mark rows visible */
estate->es_output_cid = GetCurrentCommandId(true);

if (ts_guc_max_tuples_decompressed_per_dml > 0)
{
if (ht_state->tuples_decompressed > ts_guc_max_tuples_decompressed_per_dml)
{
ereport(ERROR,
(errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED),
errmsg("tuple decompression limit exceeded by operation"),
errdetail("current limit: %d, tuples decompressed: %lld",
ts_guc_max_tuples_decompressed_per_dml,
(long long int) ht_state->tuples_decompressed),
errhint("Consider increasing "
"timescaledb.max_tuples_decompressed_per_dml_transaction or "
"set to 0 (unlimited).")));
}
}
}
}
/*
Expand Down
37 changes: 37 additions & 0 deletions tsl/test/expected/compression_insert.out
Original file line number Diff line number Diff line change
Expand Up @@ -1042,3 +1042,40 @@ SELECT count(compress_chunk(ch)) FROM show_chunks('test_copy') ch;

\copy test_copy FROM data/copy_data.csv WITH CSV HEADER;
DROP TABLE test_copy;
-- Text limitting decompressed tuple during an insert
CREATE TABLE test_limit (
timestamp int not null,
id bigint
);
SELECT * FROM create_hypertable('test_limit', 'timestamp', chunk_time_interval=>1000);
hypertable_id | schema_name | table_name | created
---------------+-------------+------------+---------
24 | public | test_limit | t
(1 row)

INSERT INTO test_limit SELECT t, i FROM generate_series(1,10000,1) t CROSS JOIN generate_series(1,3,1) i;
CREATE UNIQUE INDEX timestamp_id_idx ON test_limit(timestamp, id);
ALTER TABLE test_limit SET (
timescaledb.compress,
timescaledb.compress_orderby = 'timestamp'
);
WARNING: column "id" should be used for segmenting or ordering
SELECT count(compress_chunk(ch)) FROM show_chunks('test_limit') ch;
count
-------
11
(1 row)

SET timescaledb.max_tuples_decompressed_per_dml_transaction = 5000;
\set VERBOSITY default
\set ON_ERROR_STOP 0
-- Inserting in the same period should decompress tuples
INSERT INTO test_limit SELECT t, 11 FROM generate_series(1,6000,1000) t;
ERROR: tuple decompression limit exceeded by operation
DETAIL: current limit: 5000, tuples decompressed: 6000
HINT: Consider increasing timescaledb.max_tuples_decompressed_per_dml_transaction or set to 0 (unlimited).
-- Setting to 0 should remove the limit.
SET timescaledb.max_tuples_decompressed_per_dml_transaction = 0;
INSERT INTO test_limit SELECT t, 11 FROM generate_series(1,6000,1000) t;
\set ON_ERROR_STOP 1
DROP TABLE test_limit;
40 changes: 40 additions & 0 deletions tsl/test/expected/compression_update_delete.out
Original file line number Diff line number Diff line change
Expand Up @@ -2660,3 +2660,43 @@ RESET timescaledb.debug_compression_path_info;
DROP TABLE t6367;
\c :TEST_DBNAME :ROLE_SUPERUSER
DROP DATABASE test6367;
-- Text limitting decompressed tuple during an UPDATE or DELETE
CREATE TABLE test_limit (
timestamp int not null,
id bigint
);
SELECT * FROM create_hypertable('test_limit', 'timestamp', chunk_time_interval=>10000);
hypertable_id | schema_name | table_name | created
---------------+-------------+------------+---------
33 | public | test_limit | t
(1 row)

INSERT INTO test_limit SELECT t, i FROM generate_series(1,10000,1) t CROSS JOIN generate_series(1,3,1) i;
ALTER TABLE test_limit SET (
timescaledb.compress,
timescaledb.compress_orderby = 'timestamp'
);
SELECT count(compress_chunk(ch)) FROM show_chunks('test_limit') ch;
count
-------
2
(1 row)

SET timescaledb.max_tuples_decompressed_per_dml_transaction = 5000;
\set VERBOSITY default
\set ON_ERROR_STOP 0
-- Updating or deleting everything will break the set limit.
UPDATE test_limit SET id = 0;
ERROR: tuple decompression limit exceeded by operation
DETAIL: current limit: 5000, tuples decompressed: 30000
HINT: Consider increasing timescaledb.max_tuples_decompressed_per_dml_transaction or set to 0 (unlimited).
DELETE FROM test_limit WHERE id > 0;
ERROR: tuple decompression limit exceeded by operation
DETAIL: current limit: 5000, tuples decompressed: 30000
HINT: Consider increasing timescaledb.max_tuples_decompressed_per_dml_transaction or set to 0 (unlimited).
-- Setting to 0 should remove the limit.
SET timescaledb.max_tuples_decompressed_per_dml_transaction = 0;
UPDATE test_limit SET id = 0;
DELETE FROM test_limit WHERE id > 0;
\set ON_ERROR_STOP 1
DROP TABLE test_limit;
27 changes: 27 additions & 0 deletions tsl/test/sql/compression_insert.sql
Original file line number Diff line number Diff line change
Expand Up @@ -688,3 +688,30 @@ SELECT count(compress_chunk(ch)) FROM show_chunks('test_copy') ch;
\copy test_copy FROM data/copy_data.csv WITH CSV HEADER;

DROP TABLE test_copy;

-- Text limitting decompressed tuple during an insert
CREATE TABLE test_limit (
timestamp int not null,
id bigint
);
SELECT * FROM create_hypertable('test_limit', 'timestamp', chunk_time_interval=>1000);
INSERT INTO test_limit SELECT t, i FROM generate_series(1,10000,1) t CROSS JOIN generate_series(1,3,1) i;
CREATE UNIQUE INDEX timestamp_id_idx ON test_limit(timestamp, id);

ALTER TABLE test_limit SET (
timescaledb.compress,
timescaledb.compress_orderby = 'timestamp'
);
SELECT count(compress_chunk(ch)) FROM show_chunks('test_limit') ch;

SET timescaledb.max_tuples_decompressed_per_dml_transaction = 5000;
\set VERBOSITY default
\set ON_ERROR_STOP 0
-- Inserting in the same period should decompress tuples
INSERT INTO test_limit SELECT t, 11 FROM generate_series(1,6000,1000) t;
-- Setting to 0 should remove the limit.
SET timescaledb.max_tuples_decompressed_per_dml_transaction = 0;
INSERT INTO test_limit SELECT t, 11 FROM generate_series(1,6000,1000) t;
\set ON_ERROR_STOP 1

DROP TABLE test_limit;
28 changes: 28 additions & 0 deletions tsl/test/sql/compression_update_delete.sql
Original file line number Diff line number Diff line change
Expand Up @@ -1418,3 +1418,31 @@ RESET timescaledb.debug_compression_path_info;
DROP TABLE t6367;
\c :TEST_DBNAME :ROLE_SUPERUSER
DROP DATABASE test6367;

-- Text limitting decompressed tuple during an UPDATE or DELETE
CREATE TABLE test_limit (
timestamp int not null,
id bigint
);
SELECT * FROM create_hypertable('test_limit', 'timestamp', chunk_time_interval=>10000);
INSERT INTO test_limit SELECT t, i FROM generate_series(1,10000,1) t CROSS JOIN generate_series(1,3,1) i;

ALTER TABLE test_limit SET (
timescaledb.compress,
timescaledb.compress_orderby = 'timestamp'
);
SELECT count(compress_chunk(ch)) FROM show_chunks('test_limit') ch;

SET timescaledb.max_tuples_decompressed_per_dml_transaction = 5000;
\set VERBOSITY default
\set ON_ERROR_STOP 0
-- Updating or deleting everything will break the set limit.
UPDATE test_limit SET id = 0;
DELETE FROM test_limit WHERE id > 0;
-- Setting to 0 should remove the limit.
SET timescaledb.max_tuples_decompressed_per_dml_transaction = 0;
UPDATE test_limit SET id = 0;
DELETE FROM test_limit WHERE id > 0;
\set ON_ERROR_STOP 1

DROP TABLE test_limit;