Skip to content

Commit

Permalink
Stats improvement for Uncompressed Chunks
Browse files Browse the repository at this point in the history
During the compression autovacuum use to be disabled for uncompressed
chunk and enable after decompression. This leads to postgres
maintainence issue. Let's not disable autovacuum for uncompressed
chunk anymore. Let postgres take care of the stats in its natural way.

Fixes #309
  • Loading branch information
shhnwz committed Feb 2, 2023
1 parent 9133319 commit db715f9
Show file tree
Hide file tree
Showing 29 changed files with 966 additions and 1,357 deletions.
7 changes: 0 additions & 7 deletions src/cross_module_fn.c
Expand Up @@ -381,12 +381,6 @@ func_call_on_data_nodes_default(FunctionCallInfo finfo, List *data_node_oids)
pg_unreachable();
}

static void
update_compressed_chunk_relstats_default(Oid uncompressed_relid, Oid compressed_relid)
{
error_no_default_fn_community();
}

static void
dist_update_stale_chunk_metadata_default(Chunk *new_chunk, List *chunk_data_nodes)
{
Expand Down Expand Up @@ -553,7 +547,6 @@ TSDLLEXPORT CrossModuleFunctions ts_cm_functions_default = {
.chunk_create_empty_table = error_no_default_fn_pg_community,
.chunk_create_replica_table = error_no_default_fn_pg_community,
.hypertable_distributed_set_replication_factor = error_no_default_fn_pg_community,
.update_compressed_chunk_relstats = update_compressed_chunk_relstats_default,
.health_check = error_no_default_fn_pg_community,
};

Expand Down
1 change: 0 additions & 1 deletion src/cross_module_fn.h
Expand Up @@ -201,7 +201,6 @@ typedef struct CrossModuleFunctions
PGFunction chunk_freeze_chunk;
PGFunction chunk_unfreeze_chunk;
PGFunction chunks_drop_stale;
void (*update_compressed_chunk_relstats)(Oid uncompressed_relid, Oid compressed_relid);
PGFunction health_check;
} CrossModuleFunctions;

Expand Down
14 changes: 0 additions & 14 deletions src/planner/planner.c
Expand Up @@ -1308,20 +1308,6 @@ timescaledb_get_relation_info_hook(PlannerInfo *root, Oid relation_objectid, boo
* IndexPaths at all
*/
rel->indexlist = NIL;

/* Relation size estimates are messed up on compressed chunks due to there
* being no actual pages for the table in the storage manager.
*/
rel->pages = (BlockNumber) uncompressed_chunk->rd_rel->relpages;
rel->tuples = (double) uncompressed_chunk->rd_rel->reltuples;
if (rel->pages == 0)
rel->allvisfrac = 0.0;
else if (uncompressed_chunk->rd_rel->relallvisible >= (int32) rel->pages)
rel->allvisfrac = 1.0;
else
rel->allvisfrac =
(double) uncompressed_chunk->rd_rel->relallvisible / rel->pages;

table_close(uncompressed_chunk, NoLock);
}
}
Expand Down
81 changes: 7 additions & 74 deletions src/process_utility.c
Expand Up @@ -768,30 +768,6 @@ typedef struct ChunkPair
Oid compressed_relid;
} ChunkPair;

static void
add_compressed_chunk_to_vacuum(Hypertable *ht, Oid comp_chunk_relid, void *arg)
{
VacuumCtx *ctx = (VacuumCtx *) arg;
Chunk *compressed_chunk = ts_chunk_get_by_relid(comp_chunk_relid, true);
VacuumRelation *chunk_vacuum_rel;

Chunk *chunk_parent;
/* chunk is from a compressed hypertable */
Assert(TS_HYPERTABLE_IS_INTERNAL_COMPRESSION_TABLE(ht));

/*chunks for internal compression table have a parent */
chunk_parent = ts_chunk_get_compressed_chunk_parent(compressed_chunk);
Assert(chunk_parent != NULL);

ChunkPair *cp = palloc(sizeof(ChunkPair));
cp->uncompressed_relid = chunk_parent->table_id;
cp->compressed_relid = comp_chunk_relid;
ctx->chunk_pairs = lappend(ctx->chunk_pairs, cp);
/* analyze/vacuum the compressed rel instead */
chunk_vacuum_rel = makeVacuumRelation(NULL, comp_chunk_relid, NIL);
ctx->chunk_rels = lappend(ctx->chunk_rels, chunk_vacuum_rel);
}

/* Adds a chunk to the list of tables to be vacuumed */
static void
add_chunk_to_vacuum(Hypertable *ht, Oid chunk_relid, void *arg)
Expand All @@ -801,30 +777,11 @@ add_chunk_to_vacuum(Hypertable *ht, Oid chunk_relid, void *arg)
VacuumRelation *chunk_vacuum_rel;
RangeVar *chunk_range_var;

/* If the chunk has an associated compressed chunk, analyze that instead
* When we compress a chunk, we save stats for the raw chunk, do
* not modify that. Data now lives in the compressed chunk, so
* analyze it.
*/
if (chunk->fd.compressed_chunk_id != INVALID_CHUNK_ID)
{
Chunk *comp_chunk = ts_chunk_get_by_id(chunk->fd.compressed_chunk_id, true);
ChunkPair *cp = palloc(sizeof(ChunkPair));
cp->uncompressed_relid = chunk_relid;
cp->compressed_relid = comp_chunk->table_id;
ctx->chunk_pairs = lappend(ctx->chunk_pairs, cp);
/* analyze/vacuum the compressed rel instead */
chunk_vacuum_rel = makeVacuumRelation(NULL, comp_chunk->table_id, NIL);
ctx->chunk_rels = lappend(ctx->chunk_rels, chunk_vacuum_rel);
}
else
{
chunk_range_var = copyObject(ctx->ht_vacuum_rel->relation);
chunk_range_var->relname = NameStr(chunk->fd.table_name);
chunk_range_var->schemaname = NameStr(chunk->fd.schema_name);
chunk_vacuum_rel =
makeVacuumRelation(chunk_range_var, chunk_relid, ctx->ht_vacuum_rel->va_cols);
}
chunk_range_var = copyObject(ctx->ht_vacuum_rel->relation);
chunk_range_var->relname = NameStr(chunk->fd.table_name);
chunk_range_var->schemaname = NameStr(chunk->fd.schema_name);
chunk_vacuum_rel =
makeVacuumRelation(chunk_range_var, chunk_relid, ctx->ht_vacuum_rel->va_cols);
ctx->chunk_rels = lappend(ctx->chunk_rels, chunk_vacuum_rel);
}

Expand All @@ -851,7 +808,6 @@ ts_get_all_vacuum_rels(bool is_vacuumcmd)
{
Form_pg_class classform = (Form_pg_class) GETSTRUCT(tuple);
Hypertable *ht;
Chunk *chunk;
Oid relid;

relid = classform->oid;
Expand All @@ -873,16 +829,8 @@ ts_get_all_vacuum_rels(bool is_vacuumcmd)

ht = ts_hypertable_cache_get_entry(hcache, relid, CACHE_FLAG_MISSING_OK);
if (ht)
{
if (hypertable_is_distributed(ht))
continue;
}
else
{
chunk = ts_chunk_get_by_relid(relid, false);
if (chunk && chunk->fd.compressed_chunk_id != INVALID_CHUNK_ID)
continue;
}

/*
* Build VacuumRelation(s) specifying the table OIDs to be processed.
Expand Down Expand Up @@ -950,17 +898,8 @@ process_vacuum(ProcessUtilityArgs *args)
*/
if (hypertable_is_distributed(ht))
continue;

if (TS_HYPERTABLE_IS_INTERNAL_COMPRESSION_TABLE(ht))
{
ctx.ht_vacuum_rel = vacuum_rel;
foreach_chunk(ht, add_compressed_chunk_to_vacuum, &ctx);
}
else
{
ctx.ht_vacuum_rel = vacuum_rel;
foreach_chunk(ht, add_chunk_to_vacuum, &ctx);
}
ctx.ht_vacuum_rel = vacuum_rel;
foreach_chunk(ht, add_chunk_to_vacuum, &ctx);
}
}
vacuum_rels = lappend(vacuum_rels, vacuum_rel);
Expand All @@ -978,12 +917,6 @@ process_vacuum(ProcessUtilityArgs *args)

/* ACL permission checks inside vacuum_rel and analyze_rel called by this ExecVacuum */
ExecVacuum(args->parse_state, stmt, is_toplevel);
foreach (lc, ctx.chunk_pairs)
{
ChunkPair *cp = (ChunkPair *) lfirst(lc);
ts_cm_functions->update_compressed_chunk_relstats(cp->uncompressed_relid,
cp->compressed_relid);
}
}
/*
Restore original list. stmt->rels which has references to
Expand Down
51 changes: 1 addition & 50 deletions tsl/src/compression/api.c
Expand Up @@ -240,47 +240,6 @@ compresschunkcxt_init(CompressChunkCxt *cxt, Cache *hcache, Oid hypertable_relid
cxt->srcht_chunk = srcchunk;
}

static void
disable_autovacuum_on_chunk(Oid chunk_relid)
{
AlterTableCmd at_cmd = {
.type = T_AlterTableCmd,
.subtype = AT_SetRelOptions,
.def = (Node *) list_make1(
makeDefElem("autovacuum_enabled", (Node *) makeString("false"), -1)),
};
ts_alter_table_with_event_trigger(chunk_relid, NULL, list_make1(&at_cmd), false);
}

/* This function is intended to undo the disabling of autovacuum done when we compressed a chunk.
* Note that we do not cache the previous value for this (as we don't expect users to toggle this
* for individual chunks), so we use the hypertable's setting to determine whether to enable this on
* the decompressed chunk.
*/
static void
restore_autovacuum_on_decompress(Oid uncompressed_hypertable_relid, Oid uncompressed_chunk_relid)
{
Relation tablerel = table_open(uncompressed_hypertable_relid, AccessShareLock);
bool ht_autovac_enabled =
tablerel->rd_options ? ((StdRdOptions *) (tablerel)->rd_options)->autovacuum.enabled : true;

table_close(tablerel, AccessShareLock);
if (ht_autovac_enabled)
{
AlterTableCmd at_cmd = {
.type = T_AlterTableCmd,
.subtype = AT_SetRelOptions,
.def = (Node *) list_make1(
makeDefElem("autovacuum_enabled", (Node *) makeString("true"), -1)),
};

ts_alter_table_with_event_trigger(uncompressed_chunk_relid,
NULL,
list_make1(&at_cmd),
false);
}
}

static Chunk *
find_chunk_to_merge_into(Hypertable *ht, Chunk *current_chunk)
{
Expand Down Expand Up @@ -415,10 +374,7 @@ compress_chunk_impl(Oid hypertable_relid, Oid chunk_relid)
/* acquire locks on src and compress hypertable and src chunk */
LockRelationOid(cxt.srcht->main_table_relid, AccessShareLock);
LockRelationOid(cxt.compress_ht->main_table_relid, AccessShareLock);
LockRelationOid(cxt.srcht_chunk->table_id, ShareLock);

/* Disabling autovacuum on chunk which should be empty while in compressed state */
disable_autovacuum_on_chunk(chunk_relid);
LockRelationOid(cxt.srcht_chunk->table_id, ExclusiveLock);

/* acquire locks on catalog tables to keep till end of txn */
LockRelationOid(catalog_get_table_id(ts_catalog_get(), HYPERTABLE_COMPRESSION),
Expand Down Expand Up @@ -516,7 +472,6 @@ compress_chunk_impl(Oid hypertable_relid, Oid chunk_relid)
colinfo_array,
htcols_listlen);

merge_chunk_relstats(mergable_chunk->table_id, cxt.srcht_chunk->table_id);
ts_chunk_merge_on_dimension(mergable_chunk, cxt.srcht_chunk, time_dim->fd.id);

if (chunk_unordered)
Expand Down Expand Up @@ -637,10 +592,6 @@ decompress_chunk_impl(Oid uncompressed_hypertable_relid, Oid uncompressed_chunk_
*/
LockRelationOid(compressed_chunk->table_id, AccessExclusiveLock);
ts_chunk_drop(compressed_chunk, DROP_RESTRICT, -1);

/* reenable autovacuum if necessary */
restore_autovacuum_on_decompress(uncompressed_hypertable_relid, uncompressed_chunk_relid);

ts_cache_release(hcache);
return true;
}
Expand Down
110 changes: 0 additions & 110 deletions tsl/src/compression/compression.c
Expand Up @@ -190,70 +190,6 @@ get_compressed_data_header(Datum data)
return header;
}

static void
capture_pgclass_stats(Oid table_oid, int *out_pages, int *out_visible, float *out_tuples)
{
Relation pg_class = table_open(RelationRelationId, RowExclusiveLock);
HeapTuple tuple = SearchSysCacheCopy1(RELOID, ObjectIdGetDatum(table_oid));
Form_pg_class classform;

if (!HeapTupleIsValid(tuple))
elog(ERROR, "could not find tuple for relation %u", table_oid);

classform = (Form_pg_class) GETSTRUCT(tuple);

*out_pages = classform->relpages;
*out_visible = classform->relallvisible;
*out_tuples = classform->reltuples;

heap_freetuple(tuple);
table_close(pg_class, RowExclusiveLock);
}

static void
restore_pgclass_stats(Oid table_oid, int pages, int visible, float tuples)
{
Relation pg_class;
HeapTuple tuple;
Form_pg_class classform;

pg_class = table_open(RelationRelationId, RowExclusiveLock);
tuple = SearchSysCacheCopy1(RELOID, ObjectIdGetDatum(table_oid));
if (!HeapTupleIsValid(tuple))
elog(ERROR, "could not find tuple for relation %u", table_oid);
classform = (Form_pg_class) GETSTRUCT(tuple);

classform->relpages = pages;
classform->relallvisible = visible;
classform->reltuples = tuples;

CatalogTupleUpdate(pg_class, &tuple->t_self, tuple);

heap_freetuple(tuple);
table_close(pg_class, RowExclusiveLock);
}

/* Merge the relstats when merging chunks while compressing them.
* We need to do this in order to update the relstats of the chunk
* that is merged into since the compressed one will be dropped by
* the merge.
*/
extern void
merge_chunk_relstats(Oid merged_relid, Oid compressed_relid)
{
int comp_pages, merged_pages, comp_visible, merged_visible;
float comp_tuples, merged_tuples;

capture_pgclass_stats(compressed_relid, &comp_pages, &comp_visible, &comp_tuples);
capture_pgclass_stats(merged_relid, &merged_pages, &merged_visible, &merged_tuples);

merged_pages += comp_pages;
merged_visible += comp_visible;
merged_tuples += comp_tuples;

restore_pgclass_stats(merged_relid, merged_pages, merged_visible, merged_tuples);
}

/* Truncate the relation WITHOUT applying triggers. This is the
* main difference with ExecuteTruncate. Triggers aren't applied
* because the data remains, just in compressed form. Also don't
Expand All @@ -267,16 +203,13 @@ truncate_relation(Oid table_oid)
* be a lock upgrade. */
Relation rel = table_open(table_oid, AccessExclusiveLock);
Oid toast_relid;
int pages, visible;
float tuples;

/* Chunks should never have fks into them, but double check */
if (fks != NIL)
elog(ERROR, "found a FK into a chunk while truncating");

CheckTableForSerializableConflictIn(rel);

capture_pgclass_stats(table_oid, &pages, &visible, &tuples);
RelationSetNewRelfilenode(rel, rel->rd_rel->relpersistence);

toast_relid = rel->rd_rel->reltoastrelid;
Expand All @@ -299,7 +232,6 @@ truncate_relation(Oid table_oid)
#endif
reindex_relation(table_oid, REINDEX_REL_PROCESS_TOAST, options);
rel = table_open(table_oid, AccessExclusiveLock);
restore_pgclass_stats(table_oid, pages, visible, tuples);
CommandCounterIncrement();
table_close(rel, NoLock);
}
Expand Down Expand Up @@ -1996,45 +1928,3 @@ compression_get_toast_storage(CompressionAlgorithms algorithm)
elog(ERROR, "invalid compression algorithm %d", algorithm);
return definitions[algorithm].compressed_data_storage;
}

/* Get relstats from compressed chunk and insert into relstats for the
* corresponding chunk (that held the uncompressed data) from raw hypertable
*/
extern void
update_compressed_chunk_relstats(Oid uncompressed_relid, Oid compressed_relid)
{
double rowcnt;
int comp_pages, uncomp_pages, comp_visible, uncomp_visible;
float comp_tuples, uncomp_tuples, out_tuples;
Chunk *uncompressed_chunk = ts_chunk_get_by_relid(uncompressed_relid, true);
Chunk *compressed_chunk = ts_chunk_get_by_relid(compressed_relid, true);

if (uncompressed_chunk->table_id != uncompressed_relid ||
uncompressed_chunk->fd.compressed_chunk_id != compressed_chunk->fd.id ||
compressed_chunk->table_id != compressed_relid)
{
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("mismatched chunks for relstats update on compressed chunk \"%s\"",
get_rel_name(uncompressed_relid))));
}

capture_pgclass_stats(uncompressed_relid, &uncomp_pages, &uncomp_visible, &uncomp_tuples);

/* Before compressing a chunk in 2.0, we save its stats. Prior
* releases do not support this. So the stats on uncompressed relid
* could be invalid. In this case, do the best that we can.
*/
if (uncomp_tuples == 0)
{
/* we need page info from compressed relid */
capture_pgclass_stats(compressed_relid, &comp_pages, &comp_visible, &comp_tuples);
rowcnt = (double) ts_compression_chunk_size_row_count(uncompressed_chunk->fd.id);
if (rowcnt > 0)
out_tuples = (float4) rowcnt;
else
out_tuples = (float4) comp_tuples;
restore_pgclass_stats(uncompressed_relid, comp_pages, comp_visible, out_tuples);
CommandCounterIncrement();
}
}

0 comments on commit db715f9

Please sign in to comment.