Skip to content

Commit

Permalink
Server crash when using duplicate segmentby column
Browse files Browse the repository at this point in the history
The segmentby column info array is populated by using the column
attribute number as an array index. This is done as part of validating
and creating segment by column info in function `compresscolinfo_init`.

Since the column is duplicated the attribute number for both the
segmentby column is same. When this attribute number is used as an
index, only one of the array element is populated correctly with the
detailed column info whareas the other element of the array ramins
NULL. This segmentby column info is updated in catalog as part of
processing compression options (ALTER TABLE ...).

When the chunk is being compressed this segmentby column information is
being retrieved from the catalog to create the scan key in order to
identify any existing index on the table that matches the segmentby
column. Out of the two keys one key gets updated correctly whereas the
second key contains NULL values. This results into a crash during index
scan to identify any existing index on the table.

The proposed change avoid this crash by raising an error if user has
specified duplicated columns as part of compress_segmentby or
compress_orderby options.
  • Loading branch information
pdipesh02 committed Sep 6, 2023
1 parent 44e41c1 commit 34e018c
Show file tree
Hide file tree
Showing 4 changed files with 31 additions and 1 deletion.
1 change: 1 addition & 0 deletions .unreleased/bugfix_6044
@@ -0,0 +1 @@
Fixes: #6044 Server crash when using duplicate segmentby column
23 changes: 22 additions & 1 deletion tsl/src/compression/create.c
Expand Up @@ -222,14 +222,16 @@ compresscolinfo_init(CompressColInfo *cc, Oid srctbl_relid, List *segmentby_cols
Relation rel;
TupleDesc tupdesc;
int i, colno, attno;
int16 *segorder_colindex;
int16 *segorder_colindex, *colindex;
int seg_attnolen = 0;
ListCell *lc;
Oid compresseddata_oid = ts_custom_type_cache_get(CUSTOM_TYPE_COMPRESSED_DATA)->type_oid;

seg_attnolen = list_length(segmentby_cols);
rel = table_open(srctbl_relid, AccessShareLock);
segorder_colindex = palloc0(sizeof(int32) * (rel->rd_att->natts));
/* To check duplicates in segmentby/orderby column list. */
colindex = palloc0(sizeof(int16) * (rel->rd_att->natts));
tupdesc = rel->rd_att;
i = 1;

Expand All @@ -245,11 +247,21 @@ compresscolinfo_init(CompressColInfo *cc, Oid srctbl_relid, List *segmentby_cols
errhint("The timescaledb.compress_segmentby option must reference a valid "
"column.")));
}

/* check if segmentby columns are distinct. */
if (colindex[col_attno - 1] != 0)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("duplicate column name \"%s\"", NameStr(col->colname)),
errhint("The timescaledb.compress_segmentby option must reference distinct "
"column.")));
colindex[col_attno - 1] = 1;
segorder_colindex[col_attno - 1] = i++;
}
/* the column indexes are numbered as seg_attnolen + <orderby_index>
*/
Assert(seg_attnolen == (i - 1));
memset(colindex, 0, sizeof(int16) * (rel->rd_att->natts));
foreach (lc, orderby_cols)
{
CompressedParsedCol *col = (CompressedParsedCol *) lfirst(lc);
Expand All @@ -262,6 +274,14 @@ compresscolinfo_init(CompressColInfo *cc, Oid srctbl_relid, List *segmentby_cols
errhint("The timescaledb.compress_orderby option must reference a valid "
"column.")));

/* check if orderby columns are distinct. */
if (colindex[col_attno - 1] != 0)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("duplicate column name \"%s\"", NameStr(col->colname)),
errhint("The timescaledb.compress_orderby option must reference distinct "
"column.")));

/* check if orderby_cols and segmentby_cols are distinct */
if (segorder_colindex[col_attno - 1] != 0)
ereport(ERROR,
Expand All @@ -271,6 +291,7 @@ compresscolinfo_init(CompressColInfo *cc, Oid srctbl_relid, List *segmentby_cols
errhint("Use separate columns for the timescaledb.compress_orderby and"
" timescaledb.compress_segmentby options.")));

colindex[col_attno - 1] = 1;
segorder_colindex[col_attno - 1] = i++;
}

Expand Down
6 changes: 6 additions & 0 deletions tsl/test/expected/compression_errors.out
Expand Up @@ -195,6 +195,12 @@ HINT: The option timescaledb.compress_segmentby must be a set of columns separa
ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'a, p');
ERROR: invalid ordering column type point
DETAIL: Could not identify a less-than operator for the type.
ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_segmentby = 'b, b');
ERROR: duplicate column name "b"
HINT: The timescaledb.compress_segmentby option must reference distinct column.
ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'b, b');
ERROR: duplicate column name "b"
HINT: The timescaledb.compress_orderby option must reference distinct column.
--should succeed
ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'a, b');
--ddl on ht with compression
Expand Down
2 changes: 2 additions & 0 deletions tsl/test/sql/compression_errors.sql
Expand Up @@ -99,6 +99,8 @@ ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_segmentby = 'ran
ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_segmentby = 'c LIMIT 1');
ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_segmentby = 'c + b');
ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'a, p');
ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_segmentby = 'b, b');
ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'b, b');

--should succeed
ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'a, b');
Expand Down

0 comments on commit 34e018c

Please sign in to comment.