Skip to content

Commit

Permalink
Fix segfault after column drop on compressed table
Browse files Browse the repository at this point in the history
Decompression produces records which have all the decompressed data
set, but it also retains the fields which are used internally during
decompression.
These didn't cause any problem - unless an operation is being done
with the whole row - in which case all the fields which have ended up
being non-null can be a potential segfault source.

Fixes #5458 #5411
  • Loading branch information
kgyrtkirk committed Apr 5, 2023
1 parent c6b9f50 commit 742a48c
Show file tree
Hide file tree
Showing 4 changed files with 50 additions and 1 deletion.
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ accidentally triggering the load of a previous DB version.**
* #5499 Do not segfault on large histogram() parameters
* #5497 Allow named time_bucket arguments in Cagg definition
* #5500 Fix when no FROM clause in continuous aggregate definition
* #5462 Fix segfault after column drop on compressed table

**Thanks**
* @nikolaps for reporting an issue with the COPY fetcher
Expand Down
2 changes: 1 addition & 1 deletion tsl/src/nodes/decompress_chunk/exec.c
Original file line number Diff line number Diff line change
Expand Up @@ -473,7 +473,7 @@ decompress_chunk_create_tuple(DecompressChunkState *state)

if (!state->initialized)
{
ExecClearTuple(decompressed_slot);
ExecStoreAllNullTuple(decompressed_slot);

/*
* Reset expression memory context to clean out any cruft from
Expand Down
30 changes: 30 additions & 0 deletions tsl/test/expected/compression_errors.out
Original file line number Diff line number Diff line change
Expand Up @@ -630,3 +630,33 @@ INSERT INTO ts_table SELECT * FROM data_table;
--cleanup tables
DROP TABLE data_table cascade;
DROP TABLE ts_table cascade;
--invalid reads for row expressions after column dropped on compressed tables #5458
CREATE TABLE readings(
"time" TIMESTAMPTZ NOT NULL,
battery_status TEXT,
battery_temperature DOUBLE PRECISION
);
INSERT INTO readings ("time") VALUES ('2022-11-11 11:11:11-00');
SELECT create_hypertable('readings', 'time', chunk_time_interval => interval '12 hour', migrate_data=>true);
NOTICE: migrating data to chunks
create_hypertable
------------------------
(35,public,readings,t)
(1 row)

ALTER TABLE readings SET (timescaledb.compress,timescaledb.compress_segmentby = 'battery_temperature');
SELECT compress_chunk(show_chunks('readings'));
compress_chunk
------------------------------------------
_timescaledb_internal._hyper_35_22_chunk
(1 row)

ALTER TABLE readings DROP COLUMN battery_status;
INSERT INTO readings ("time", battery_temperature) VALUES ('2022-11-11 11:11:11', 0.2);
SELECT readings FROM readings;
readings
--------------------------------------
("Fri Nov 11 03:11:11 2022 PST",)
("Fri Nov 11 11:11:11 2022 PST",0.2)
(2 rows)

18 changes: 18 additions & 0 deletions tsl/test/sql/compression_errors.sql
Original file line number Diff line number Diff line change
Expand Up @@ -364,3 +364,21 @@ INSERT INTO ts_table SELECT * FROM data_table;
--cleanup tables
DROP TABLE data_table cascade;
DROP TABLE ts_table cascade;

--invalid reads for row expressions after column dropped on compressed tables #5458
CREATE TABLE readings(
"time" TIMESTAMPTZ NOT NULL,
battery_status TEXT,
battery_temperature DOUBLE PRECISION
);

INSERT INTO readings ("time") VALUES ('2022-11-11 11:11:11-00');

SELECT create_hypertable('readings', 'time', chunk_time_interval => interval '12 hour', migrate_data=>true);

ALTER TABLE readings SET (timescaledb.compress,timescaledb.compress_segmentby = 'battery_temperature');
SELECT compress_chunk(show_chunks('readings'));

ALTER TABLE readings DROP COLUMN battery_status;
INSERT INTO readings ("time", battery_temperature) VALUES ('2022-11-11 11:11:11', 0.2);
SELECT readings FROM readings;

0 comments on commit 742a48c

Please sign in to comment.