Skip to content

Commit

Permalink
Check unique indexes when enabling compression
Browse files Browse the repository at this point in the history
When enabling compression on a table, there is a check of unique
and primary key *constraints*, but no check if there is just a unique
index. This means that it is possible to create a compressed table
without getting a warning that you should include the columns in the
index into the segmentby field, which can lead to suboptimal query
times.

This commit adds a check for the unique index as well and ensure that a
similar warning is printed as for a unique constraint.

Fixes #5892
  • Loading branch information
mkindahl committed Jul 26, 2023
1 parent 61c288e commit 00282a0
Show file tree
Hide file tree
Showing 5 changed files with 120 additions and 9 deletions.
2 changes: 2 additions & 0 deletions .unreleased/bugfix_5892
@@ -0,0 +1,2 @@
Implements: #5894 Check unique indexes when enabling compression
Fixes: #5892
87 changes: 78 additions & 9 deletions tsl/src/compression/create.c
Expand Up @@ -779,7 +779,7 @@ get_col_info_for_attnum(Hypertable *ht, CompressColInfo *colinfo, AttrNumber att
* This is limited to foreign key constraints now
*/
static List *
validate_existing_constraints(Hypertable *ht, CompressColInfo *colinfo)
validate_existing_constraints(Hypertable *ht, CompressColInfo *colinfo, Bitmapset **indexes)
{
Oid relid = ht->main_table_relid;
Relation pg_constr;
Expand All @@ -802,9 +802,18 @@ validate_existing_constraints(Hypertable *ht, CompressColInfo *colinfo)
{
Form_pg_constraint form = (Form_pg_constraint) GETSTRUCT(tuple);

/* we check primary ,unique and exclusion constraints.
* move foreign key constarints over to compression table
* ignore triggers
/* Save away the supporting index, if any, that we check so that we
* can ignore it in the index checking and not get duplicate messages.
*
* We are saving all checked indexes here even though only the unique
* index is a problem at this point. It potentially avoids a second
* check of an index that we have already checked. */
if (form->conindid)
*indexes = bms_add_member(*indexes, form->conindid);

/*
* We check primary, unique, and exclusion constraints. Move foreign
* key constraints over to compression table ignore triggers
*/
if (form->contype == CONSTRAINT_CHECK || form->contype == CONSTRAINT_TRIGGER)
continue;
Expand All @@ -822,6 +831,7 @@ validate_existing_constraints(Hypertable *ht, CompressColInfo *colinfo)
int j, numkeys;
int16 *attnums;
bool is_null;

/* Extract the conkey array, ie, attnums of PK's columns */
Datum adatum = heap_getattr(tuple,
Anum_pg_constraint_conkey,
Expand All @@ -844,11 +854,10 @@ validate_existing_constraints(Hypertable *ht, CompressColInfo *colinfo)
attnums = (int16 *) ARR_DATA_PTR(arr);
for (j = 0; j < numkeys; j++)
{
FormData_hypertable_compression *col_def =
Form_hypertable_compression col_def =
get_col_info_for_attnum(ht, colinfo, attnums[j]);

if (col_def == NULL)
elog(ERROR, "missing column definition for constraint");
Ensure(col_def, "missing column definition for constraint");

if (form->contype == CONSTRAINT_FOREIGN)
{
Expand Down Expand Up @@ -883,6 +892,59 @@ validate_existing_constraints(Hypertable *ht, CompressColInfo *colinfo)
return conlist;
}

/*
* Validate existing indexes on the hypertable. Note that there can be indexes
* that do not have a corresponding constraint.
*
* We pass in a list of indexes that we should ignore since these are checked
* by the constraint checking above.
*/
static void
validate_existing_indexes(Hypertable *ht, CompressColInfo *colinfo, Bitmapset *ignore)
{
Relation pg_index;
HeapTuple htup;
ScanKeyData skey;
SysScanDesc indscan;

ScanKeyInit(&skey,
Anum_pg_index_indrelid,
BTEqualStrategyNumber,
F_OIDEQ,
ObjectIdGetDatum(ht->main_table_relid));

pg_index = table_open(IndexRelationId, AccessShareLock);
indscan = systable_beginscan(pg_index, IndexIndrelidIndexId, true, NULL, 1, &skey);

while (HeapTupleIsValid(htup = systable_getnext(indscan)))
{
Form_pg_index index = (Form_pg_index) GETSTRUCT(htup);

/* We can ignore indexes that are being dropped, invalid indexes,
* exclusion indexes, and any indexes checked by the constraint
* checking. We can also skip checks below if the index is not a
* unique index. */
if (!index->indislive || !index->indisvalid || index->indisexclusion ||
index->indisprimary || !index->indisunique || bms_is_member(index->indexrelid, ignore))
continue;

/* Now we check that all columns of the unique index are part of the
* segmentby columns. */
for (int i = 0; i < index->indnkeyatts; i++)
{
int attno = index->indkey.values[i];
Form_hypertable_compression col_def = get_col_info_for_attnum(ht, colinfo, attno);
Ensure(col_def, "missing column definition for unique index");
if (col_def->segmentby_column_index < 1 && col_def->orderby_column_index < 1)
ereport(WARNING,
(errmsg("column \"%s\" should be used for segmenting or ordering",
NameStr(col_def->attname))));
}
}
systable_endscan(indscan);
table_close(pg_index, AccessShareLock);
}

static void
check_modify_compression_options(Hypertable *ht, WithClauseResult *with_clause_options,
List *parsed_orderby_cols)
Expand Down Expand Up @@ -1132,8 +1194,15 @@ tsl_process_compress_table(AlterTableCmd *cmd, Hypertable *ht,
check_modify_compression_options(ht, with_clause_options, orderby_cols);

compresscolinfo_init(&compress_cols, ht->main_table_relid, segmentby_cols, orderby_cols);
/* check if we can create a compressed hypertable with existing constraints */
constraint_list = validate_existing_constraints(ht, &compress_cols);

/* Check if we can create a compressed hypertable with existing
* constraints and indexes. */
{
Bitmapset *indexes = NULL;
constraint_list = validate_existing_constraints(ht, &compress_cols, &indexes);
validate_existing_indexes(ht, &compress_cols, indexes);
bms_free(indexes);
}

/* take explicit locks on catalog tables and keep them till end of txn */
LockRelationOid(catalog_get_table_id(ts_catalog_get(), HYPERTABLE), RowExclusiveLock);
Expand Down
1 change: 1 addition & 0 deletions tsl/test/expected/compression.out
Expand Up @@ -1904,6 +1904,7 @@ NOTICE: adding not-null constraint to column "time"

INSERT INTO ts_device_table SELECT generate_series(0,999,1), 1, 100, 20;
ALTER TABLE ts_device_table set(timescaledb.compress, timescaledb.compress_segmentby='location', timescaledb.compress_orderby='time');
WARNING: column "device" should be used for segmenting or ordering
SELECT compress_chunk(i) AS chunk_name FROM show_chunks('ts_device_table') i \gset
SELECT count(*) FROM ts_device_table;
count
Expand Down
22 changes: 22 additions & 0 deletions tsl/test/expected/compression_errors.out
Expand Up @@ -782,3 +782,25 @@ SELECT compress_chunk(show_chunks('readings'), TRUE);
INSERT INTO readings("time", candy, battery_temperature)
VALUES ('2022-11-11 11:11:11', 33, 0.3)
;
-- Segmentby checks should be done for unique indexes without
-- constraints, so create a table without constraints and add a unique
-- index and try to create a table without using the right segmentby
-- column.
CREATE TABLE table_unique_index(
location smallint not null,
device_id smallint not null,
time timestamptz not null,
value float8 not null
);
CREATE UNIQUE index ON table_unique_index(location, device_id, time);
SELECT table_name FROM create_hypertable('table_unique_index', 'time');
table_name
--------------------
table_unique_index
(1 row)

\set ON_ERROR_STOP 0
ALTER TABLE table_unique_index SET (timescaledb.compress);
WARNING: column "location" should be used for segmenting or ordering
WARNING: column "device_id" should be used for segmenting or ordering
\set ON_ERROR_STOP 1
17 changes: 17 additions & 0 deletions tsl/test/sql/compression_errors.sql
Expand Up @@ -469,3 +469,20 @@ SELECT compress_chunk(show_chunks('readings'), TRUE);
INSERT INTO readings("time", candy, battery_temperature)
VALUES ('2022-11-11 11:11:11', 33, 0.3)
;

-- Segmentby checks should be done for unique indexes without
-- constraints, so create a table without constraints and add a unique
-- index and try to create a table without using the right segmentby
-- column.
CREATE TABLE table_unique_index(
location smallint not null,
device_id smallint not null,
time timestamptz not null,
value float8 not null
);
CREATE UNIQUE index ON table_unique_index(location, device_id, time);
SELECT table_name FROM create_hypertable('table_unique_index', 'time');
\set ON_ERROR_STOP 0
ALTER TABLE table_unique_index SET (timescaledb.compress);
\set ON_ERROR_STOP 1

0 comments on commit 00282a0

Please sign in to comment.