Skip to content

Commit

Permalink
Fix segementation fault during INSERT into compressed hypertable.
Browse files Browse the repository at this point in the history
INSERT into compressed hypertable with number of open chunks greater
than ts_guc_max_open_chunks_per_insert causes segementation fault.
New row which needs to be inserted into compressed chunk has to be
compressed. Memory required as part of compressing a row is allocated
from RowCompressor::per_row_ctx memory context. Once row is compressed,
ExecInsert() is called, where memory from same context is used to
allocate and free it instead of using "Executor State". This causes
a corruption in memory.

Fixes: timescale#4778
  • Loading branch information
sb230132 committed Oct 13, 2022
1 parent 8f5698f commit 38878be
Show file tree
Hide file tree
Showing 4 changed files with 78 additions and 3 deletions.
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ argument or resolve the type ambiguity by casting to the intended type.
* #4696 Report warning when enabling compression on hypertable
* #4745 Fix FK constraint violation error while insert into hypertable which references partitioned table
* #4756 Improve compression job IO performance
* #4807 Fix segmentation fault during INSERT into compressed hypertable.

**Thanks**
* @jvanns for reporting hypertable FK reference to vanilla PostgreSQL partitioned table doesn't seem to work
Expand Down
8 changes: 5 additions & 3 deletions tsl/src/compression/compression.c
Original file line number Diff line number Diff line change
Expand Up @@ -707,10 +707,12 @@ static void
row_compressor_update_group(RowCompressor *row_compressor, TupleTableSlot *row)
{
int col;

/* save original memory context */
const MemoryContext oldcontext = CurrentMemoryContext;
Assert(row_compressor->rows_compressed_into_current_value == 0);
Assert(row_compressor->n_input_columns <= row->tts_nvalid);

MemoryContextSwitchTo(row_compressor->per_row_ctx->parent);
for (col = 0; col < row_compressor->n_input_columns; col++)
{
PerColumn *column = &row_compressor->per_column[col];
Expand All @@ -722,13 +724,13 @@ row_compressor_update_group(RowCompressor *row_compressor, TupleTableSlot *row)

Assert(column->compressor == NULL);

MemoryContextSwitchTo(row_compressor->per_row_ctx->parent);
/* Performance Improvment: We should just use array access here; everything is guaranteed to
be fetched */
val = slot_getattr(row, AttrOffsetGetAttrNumber(col), &is_null);
segment_info_update(column->segment_info, val, is_null);
MemoryContextSwitchTo(row_compressor->per_row_ctx);
}
/* switch to original memory context */
MemoryContextSwitchTo(oldcontext);
}

static bool
Expand Down
39 changes: 39 additions & 0 deletions tsl/test/shared/expected/compression_dml.out
Original file line number Diff line number Diff line change
Expand Up @@ -24,3 +24,42 @@ EXECUTE p2('2021-02-22T08:00:00+00');
DEALLOCATE p1;
DEALLOCATE p2;
DROP TABLE i3719;
-- github issue 4778
CREATE TABLE metric_5m (
time TIMESTAMPTZ NOT NULL,
value DOUBLE PRECISION NOT NULL,
series_id BIGINT NOT NULL
);
SELECT table_name FROM create_hypertable(
'metric_5m'::regclass,
'time'::name, chunk_time_interval=>interval '5m',
create_default_indexes=> false);
table_name
metric_5m
(1 row)

-- enable compression
ALTER TABLE metric_5m SET (
timescaledb.compress,
timescaledb.compress_segmentby = 'series_id',
timescaledb.compress_orderby = 'time, value'
);
SET work_mem TO '64kB';
SELECT '2022-10-10 14:33:44.1234+05:30' as start_date \gset
-- populate hypertable
INSERT INTO metric_5m (time, series_id, value)
SELECT t, s,1 from generate_series(:'start_date'::timestamptz, :'start_date'::timestamptz + interval '1 day', '10s') t cross join generate_series(1,10, 1) s;
-- manually compress all chunks
SELECT count(compress_chunk(c.schema_name|| '.' || c.table_name))
FROM _timescaledb_catalog.chunk c, _timescaledb_catalog.hypertable ht where
c.hypertable_id = ht.id and ht.table_name = 'metric_5m' and c.compressed_chunk_id IS NULL;
count
289
(1 row)

-- populate into compressed hypertable, this should not crash
INSERT INTO metric_5m (time, series_id, value)
SELECT t, s,1 from generate_series(:'start_date'::timestamptz, :'start_date'::timestamptz + interval '1 day', '10s') t cross join generate_series(1,10, 1) s;
-- clean up
RESET work_mem;
DROP TABLE metric_5m;
33 changes: 33 additions & 0 deletions tsl/test/shared/sql/compression_dml.sql
Original file line number Diff line number Diff line change
Expand Up @@ -21,3 +21,36 @@ DEALLOCATE p1;
DEALLOCATE p2;

DROP TABLE i3719;

-- github issue 4778
CREATE TABLE metric_5m (
time TIMESTAMPTZ NOT NULL,
value DOUBLE PRECISION NOT NULL,
series_id BIGINT NOT NULL
);
SELECT table_name FROM create_hypertable(
'metric_5m'::regclass,
'time'::name, chunk_time_interval=>interval '5m',
create_default_indexes=> false);
-- enable compression
ALTER TABLE metric_5m SET (
timescaledb.compress,
timescaledb.compress_segmentby = 'series_id',
timescaledb.compress_orderby = 'time, value'
);
SET work_mem TO '64kB';
SELECT '2022-10-10 14:33:44.1234+05:30' as start_date \gset
-- populate hypertable
INSERT INTO metric_5m (time, series_id, value)
SELECT t, s,1 from generate_series(:'start_date'::timestamptz, :'start_date'::timestamptz + interval '1 day', '10s') t cross join generate_series(1,10, 1) s;
-- manually compress all chunks
SELECT count(compress_chunk(c.schema_name|| '.' || c.table_name))
FROM _timescaledb_catalog.chunk c, _timescaledb_catalog.hypertable ht where
c.hypertable_id = ht.id and ht.table_name = 'metric_5m' and c.compressed_chunk_id IS NULL;

-- populate into compressed hypertable, this should not crash
INSERT INTO metric_5m (time, series_id, value)
SELECT t, s,1 from generate_series(:'start_date'::timestamptz, :'start_date'::timestamptz + interval '1 day', '10s') t cross join generate_series(1,10, 1) s;
-- clean up
RESET work_mem;
DROP TABLE metric_5m;

0 comments on commit 38878be

Please sign in to comment.