Skip to content

Commit

Permalink
Prevent locking compressed tuples
Browse files Browse the repository at this point in the history
Error out when we detect that a scan is trying to lock compressed
tuple. This does not block lock tuples on compressed hypertables
in general but only errors for queries where actual compressed
tuples would be returned. This will allow queries on compressed
hypertables where compressed tuples are filtered by other means
e.g. chunk exclusion or query constraints.
Not blocking this will produce a very misleading `Could not read
block 0` error.
  • Loading branch information
svenklemm committed Jun 19, 2024
1 parent 5836445 commit 1982f29
Show file tree
Hide file tree
Showing 7 changed files with 75 additions and 10 deletions.
1 change: 1 addition & 0 deletions .unreleased/pr_7046
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Fixes: #7046 Prevent locking compressed tuples
8 changes: 8 additions & 0 deletions tsl/src/nodes/decompress_chunk/exec.c
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,7 @@ decompress_chunk_state_create(CustomScan *cscan)
list_nth_int(settings, DCS_BatchSortedMerge);
chunk_state->decompress_context.enable_bulk_decompression =
list_nth_int(settings, DCS_EnableBulkDecompression);
chunk_state->has_row_marks = list_nth_int(settings, DCS_HasRowMarks);

Assert(IsA(cscan->custom_exprs, List));
Assert(list_length(cscan->custom_exprs) == 1);
Expand Down Expand Up @@ -429,6 +430,13 @@ decompress_chunk_exec_impl(DecompressChunkState *chunk_state, const BatchQueueFu
return NULL;
}

if (chunk_state->has_row_marks)
{
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("locking compressed tuples is not supported")));
}

if (chunk_state->csstate.ss.ps.ps_ProjInfo)
{
ExprContext *econtext = chunk_state->csstate.ss.ps.ps_ExprContext;
Expand Down
1 change: 1 addition & 0 deletions tsl/src/nodes/decompress_chunk/exec.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ typedef struct DecompressChunkState
List *is_segmentby_column;
List *bulk_decompression_column;
List *custom_scan_tlist;
bool has_row_marks;

DecompressContext decompress_context;

Expand Down
1 change: 1 addition & 0 deletions tsl/src/nodes/decompress_chunk/planner.c
Original file line number Diff line number Diff line change
Expand Up @@ -1304,6 +1304,7 @@ decompress_chunk_plan_create(PlannerInfo *root, RelOptInfo *rel, CustomPath *pat
lfirst_int(list_nth_cell(settings, DCS_Reverse)) = dcpath->reverse;
lfirst_int(list_nth_cell(settings, DCS_BatchSortedMerge)) = dcpath->batch_sorted_merge;
lfirst_int(list_nth_cell(settings, DCS_EnableBulkDecompression)) = enable_bulk_decompression;
lfirst_int(list_nth_cell(settings, DCS_HasRowMarks)) = root->parse->rowMarks != NIL;

/*
* Vectorized quals must go into custom_exprs, because Postgres has to see
Expand Down
1 change: 1 addition & 0 deletions tsl/src/nodes/decompress_chunk/planner.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ typedef enum
DCS_Reverse = 2,
DCS_BatchSortedMerge = 3,
DCS_EnableBulkDecompression = 4,
DCS_HasRowMarks = 5,
DCS_Count
} DecompressChunkSettingsIndex;

Expand Down
48 changes: 40 additions & 8 deletions tsl/test/shared/expected/compression_dml.out
Original file line number Diff line number Diff line change
Expand Up @@ -88,9 +88,9 @@ SELECT chunk_schema || '.' || chunk_name as "chunk_table"
FROM timescaledb_information.chunks
WHERE hypertable_name = 'mytab' ORDER BY range_start limit 1 \gset
-- compress only the first chunk
SELECT compress_chunk(:'chunk_table');
compress_chunk
_timescaledb_internal._hyper_X_X_chunk
SELECT count(compress_chunk(:'chunk_table'));
count
1
(1 row)

-- insert a row into first compressed chunk
Expand Down Expand Up @@ -127,11 +127,10 @@ FROM
generate_series('1990-01-01'::timestamptz, '1990-01-10'::timestamptz, INTERVAL '1 day') AS g1(time),
generate_series(1, 3, 1 ) AS g2(source_id),
generate_series(1, 3, 1 ) AS g3(label);
SELECT compress_chunk(c) FROM show_chunks('comp_seg_varchar') c;
compress_chunk
_timescaledb_internal._hyper_X_X_chunk
_timescaledb_internal._hyper_X_X_chunk
(2 rows)
SELECT count(compress_chunk(c)) FROM show_chunks('comp_seg_varchar') c;
count
2
(1 row)

-- all tuples should come from compressed chunks
EXPLAIN (analyze,costs off, timing off, summary off) SELECT * FROM comp_seg_varchar;
Expand Down Expand Up @@ -170,3 +169,36 @@ QUERY PLAN
(6 rows)

DROP TABLE comp_seg_varchar;
-- test row locks for compressed tuples are blocked
CREATE TABLE row_locks(time timestamptz NOT NULL);
SELECT table_name FROM create_hypertable('row_locks', 'time');
table_name
row_locks
(1 row)

ALTER TABLE row_locks SET (timescaledb.compress);
WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes
NOTICE: default segment by for hypertable "row_locks" is set to ""
NOTICE: default order by for hypertable "row_locks" is set to ""time" DESC"
INSERT INTO row_locks VALUES('2021-01-01 00:00:00');
SELECT count(compress_chunk(c)) FROM show_chunks('row_locks') c;
count
1
(1 row)

-- should succeed cause no compressed tuples are returned
SELECT FROM row_locks WHERE time < '2021-01-01 00:00:00' FOR UPDATE;
(0 rows)

-- should be blocked
\set ON_ERROR_STOP 0
SELECT FROM row_locks FOR UPDATE;
ERROR: locking compressed tuples is not supported
SELECT FROM row_locks FOR NO KEY UPDATE;
ERROR: locking compressed tuples is not supported
SELECT FROM row_locks FOR SHARE;
ERROR: locking compressed tuples is not supported
SELECT FROM row_locks FOR KEY SHARE;
ERROR: locking compressed tuples is not supported
\set ON_ERROR_STOP 1
DROP TABLE row_locks;
25 changes: 23 additions & 2 deletions tsl/test/shared/sql/compression_dml.sql
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ SELECT chunk_schema || '.' || chunk_name as "chunk_table"
WHERE hypertable_name = 'mytab' ORDER BY range_start limit 1 \gset

-- compress only the first chunk
SELECT compress_chunk(:'chunk_table');
SELECT count(compress_chunk(:'chunk_table'));

-- insert a row into first compressed chunk
INSERT INTO mytab SELECT '2022-10-07 05:30:10+05:30'::timestamp with time zone, 3, 3;
Expand Down Expand Up @@ -105,7 +105,7 @@ generate_series('1990-01-01'::timestamptz, '1990-01-10'::timestamptz, INTERVAL '
generate_series(1, 3, 1 ) AS g2(source_id),
generate_series(1, 3, 1 ) AS g3(label);

SELECT compress_chunk(c) FROM show_chunks('comp_seg_varchar') c;
SELECT count(compress_chunk(c)) FROM show_chunks('comp_seg_varchar') c;


-- all tuples should come from compressed chunks
Expand All @@ -124,3 +124,24 @@ ON CONFLICT (source_id, label, time) DO UPDATE SET data = '{"update": true}';
EXPLAIN (analyze,costs off, timing off, summary off) SELECT * FROM comp_seg_varchar;

DROP TABLE comp_seg_varchar;

-- test row locks for compressed tuples are blocked
CREATE TABLE row_locks(time timestamptz NOT NULL);
SELECT table_name FROM create_hypertable('row_locks', 'time');
ALTER TABLE row_locks SET (timescaledb.compress);
INSERT INTO row_locks VALUES('2021-01-01 00:00:00');
SELECT count(compress_chunk(c)) FROM show_chunks('row_locks') c;

-- should succeed cause no compressed tuples are returned
SELECT FROM row_locks WHERE time < '2021-01-01 00:00:00' FOR UPDATE;
-- should be blocked
\set ON_ERROR_STOP 0
SELECT FROM row_locks FOR UPDATE;
SELECT FROM row_locks FOR NO KEY UPDATE;
SELECT FROM row_locks FOR SHARE;
SELECT FROM row_locks FOR KEY SHARE;
\set ON_ERROR_STOP 1

DROP TABLE row_locks;


0 comments on commit 1982f29

Please sign in to comment.