diff --git a/CHANGELOG.md b/CHANGELOG.md index 3e5c044ed27..78264de3a01 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,6 +24,7 @@ argument or resolve the type ambiguity by casting to the intended type. * #4696 Report warning when enabling compression on hypertable * #4745 Fix FK constraint violation error while insert into hypertable which references partitioned table * #4756 Improve compression job IO performance +* #4807 Fix segmentation fault during INSERT into compressed hypertable. **Thanks** * @jvanns for reporting hypertable FK reference to vanilla PostgreSQL partitioned table doesn't seem to work diff --git a/tsl/src/compression/compression.c b/tsl/src/compression/compression.c index 45dc6b475b4..bff0cb28611 100644 --- a/tsl/src/compression/compression.c +++ b/tsl/src/compression/compression.c @@ -707,10 +707,12 @@ static void row_compressor_update_group(RowCompressor *row_compressor, TupleTableSlot *row) { int col; - + /* save original memory context */ + const MemoryContext oldcontext = CurrentMemoryContext; Assert(row_compressor->rows_compressed_into_current_value == 0); Assert(row_compressor->n_input_columns <= row->tts_nvalid); + MemoryContextSwitchTo(row_compressor->per_row_ctx->parent); for (col = 0; col < row_compressor->n_input_columns; col++) { PerColumn *column = &row_compressor->per_column[col]; @@ -722,13 +724,13 @@ row_compressor_update_group(RowCompressor *row_compressor, TupleTableSlot *row) Assert(column->compressor == NULL); - MemoryContextSwitchTo(row_compressor->per_row_ctx->parent); /* Performance Improvment: We should just use array access here; everything is guaranteed to be fetched */ val = slot_getattr(row, AttrOffsetGetAttrNumber(col), &is_null); segment_info_update(column->segment_info, val, is_null); - MemoryContextSwitchTo(row_compressor->per_row_ctx); } + /* switch to original memory context */ + MemoryContextSwitchTo(oldcontext); } static bool diff --git a/tsl/test/shared/expected/compression_dml.out b/tsl/test/shared/expected/compression_dml.out index 0fa6e397117..f5f92ce6158 100644 --- a/tsl/test/shared/expected/compression_dml.out +++ b/tsl/test/shared/expected/compression_dml.out @@ -24,3 +24,42 @@ EXECUTE p2('2021-02-22T08:00:00+00'); DEALLOCATE p1; DEALLOCATE p2; DROP TABLE i3719; +-- github issue 4778 +CREATE TABLE metric_5m ( + time TIMESTAMPTZ NOT NULL, + value DOUBLE PRECISION NOT NULL, + series_id BIGINT NOT NULL +); +SELECT table_name FROM create_hypertable( + 'metric_5m'::regclass, + 'time'::name, chunk_time_interval=>interval '5m', + create_default_indexes=> false); + table_name + metric_5m +(1 row) + +-- enable compression +ALTER TABLE metric_5m SET ( + timescaledb.compress, + timescaledb.compress_segmentby = 'series_id', + timescaledb.compress_orderby = 'time, value' +); +SET work_mem TO '64kB'; +SELECT '2022-10-10 14:33:44.1234+05:30' as start_date \gset +-- populate hypertable +INSERT INTO metric_5m (time, series_id, value) + SELECT t, s,1 from generate_series(:'start_date'::timestamptz, :'start_date'::timestamptz + interval '1 day', '10s') t cross join generate_series(1,10, 1) s; +-- manually compress all chunks +SELECT count(compress_chunk(c.schema_name|| '.' || c.table_name)) + FROM _timescaledb_catalog.chunk c, _timescaledb_catalog.hypertable ht where + c.hypertable_id = ht.id and ht.table_name = 'metric_5m' and c.compressed_chunk_id IS NULL; + count + 289 +(1 row) + +-- populate into compressed hypertable, this should not crash +INSERT INTO metric_5m (time, series_id, value) + SELECT t, s,1 from generate_series(:'start_date'::timestamptz, :'start_date'::timestamptz + interval '1 day', '10s') t cross join generate_series(1,10, 1) s; +-- clean up +RESET work_mem; +DROP TABLE metric_5m; diff --git a/tsl/test/shared/sql/compression_dml.sql b/tsl/test/shared/sql/compression_dml.sql index af157af9656..03098b3c430 100644 --- a/tsl/test/shared/sql/compression_dml.sql +++ b/tsl/test/shared/sql/compression_dml.sql @@ -21,3 +21,36 @@ DEALLOCATE p1; DEALLOCATE p2; DROP TABLE i3719; + +-- github issue 4778 +CREATE TABLE metric_5m ( + time TIMESTAMPTZ NOT NULL, + value DOUBLE PRECISION NOT NULL, + series_id BIGINT NOT NULL +); +SELECT table_name FROM create_hypertable( + 'metric_5m'::regclass, + 'time'::name, chunk_time_interval=>interval '5m', + create_default_indexes=> false); +-- enable compression +ALTER TABLE metric_5m SET ( + timescaledb.compress, + timescaledb.compress_segmentby = 'series_id', + timescaledb.compress_orderby = 'time, value' +); +SET work_mem TO '64kB'; +SELECT '2022-10-10 14:33:44.1234+05:30' as start_date \gset +-- populate hypertable +INSERT INTO metric_5m (time, series_id, value) + SELECT t, s,1 from generate_series(:'start_date'::timestamptz, :'start_date'::timestamptz + interval '1 day', '10s') t cross join generate_series(1,10, 1) s; +-- manually compress all chunks +SELECT count(compress_chunk(c.schema_name|| '.' || c.table_name)) + FROM _timescaledb_catalog.chunk c, _timescaledb_catalog.hypertable ht where + c.hypertable_id = ht.id and ht.table_name = 'metric_5m' and c.compressed_chunk_id IS NULL; + +-- populate into compressed hypertable, this should not crash +INSERT INTO metric_5m (time, series_id, value) + SELECT t, s,1 from generate_series(:'start_date'::timestamptz, :'start_date'::timestamptz + interval '1 day', '10s') t cross join generate_series(1,10, 1) s; +-- clean up +RESET work_mem; +DROP TABLE metric_5m; \ No newline at end of file