Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove no longer used compression code #5183

Merged
merged 1 commit into from Jan 16, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
5 changes: 1 addition & 4 deletions src/cross_module_fn.c
Expand Up @@ -507,10 +507,7 @@ TSDLLEXPORT CrossModuleFunctions ts_cm_functions_default = {
.dictionary_compressor_finish = error_no_default_fn_pg_community,
.array_compressor_append = error_no_default_fn_pg_community,
.array_compressor_finish = error_no_default_fn_pg_community,
.compress_row_init = NULL,
.compress_row_exec = NULL,
.compress_row_end = NULL,
.compress_row_destroy = NULL,

.data_node_add = error_no_default_fn_pg_community,
.data_node_delete = error_no_default_fn_pg_community,
.data_node_attach = error_no_default_fn_pg_community,
Expand Down
5 changes: 0 additions & 5 deletions src/cross_module_fn.h
Expand Up @@ -34,7 +34,6 @@ typedef struct JsonbParseState JsonbParseState;
typedef struct Hypertable Hypertable;
typedef struct Chunk Chunk;
typedef struct CopyChunkState CopyChunkState;
typedef struct CompressSingleRowState CompressSingleRowState;

typedef struct CrossModuleFunctions
{
Expand Down Expand Up @@ -203,10 +202,6 @@ typedef struct CrossModuleFunctions
PGFunction chunk_unfreeze_chunk;
PGFunction chunks_drop_stale;
void (*update_compressed_chunk_relstats)(Oid uncompressed_relid, Oid compressed_relid);
CompressSingleRowState *(*compress_row_init)(int srcht_id, Relation in_rel, Relation out_rel);
TupleTableSlot *(*compress_row_exec)(CompressSingleRowState *cr, TupleTableSlot *slot);
void (*compress_row_end)(CompressSingleRowState *cr);
void (*compress_row_destroy)(CompressSingleRowState *cr);
PGFunction health_check;
} CrossModuleFunctions;

Expand Down
2 changes: 1 addition & 1 deletion tsl/src/compression/README.md
Expand Up @@ -33,7 +33,7 @@ The `simple8b rle` algorithm is a building block for many of the compression alg
It compresses a series of `uint64` values. It compresses the data by packing the values into the least
amount of bits necessary for the magnitude of the int values, using run-length-encoding for large numbers of repeated values,
A complete description is in the header file. Note that this is a header-only implementation as performance
is paramount here as it is used a primitive in all the other compression algorithms.
is paramount here as it is used as a primitive in all the other compression algorithms.

## Compression Algorithms

Expand Down
172 changes: 0 additions & 172 deletions tsl/src/compression/compression.c
Expand Up @@ -2029,175 +2029,3 @@ update_compressed_chunk_relstats(Oid uncompressed_relid, Oid compressed_relid)
CommandCounterIncrement();
}
}

typedef struct CompressSingleRowState
{
Relation in_rel;
Relation out_rel;
RowCompressor row_compressor;
TupleTableSlot *out_slot;
} CompressSingleRowState;

static TupleTableSlot *compress_singlerow(CompressSingleRowState *cr, TupleTableSlot *in_slot);

CompressSingleRowState *
compress_row_init(int srcht_id, Relation in_rel, Relation out_rel)
{
ListCell *lc;
List *htcols_list = NIL;
int i = 0, cclen;
const ColumnCompressionInfo **ccinfo;
TupleDesc in_desc = RelationGetDescr(in_rel);
TupleDesc out_desc = RelationGetDescr(out_rel);
int16 *in_column_offsets;
int n_keys;
const ColumnCompressionInfo **keys;

CompressSingleRowState *cr = palloc(sizeof(CompressSingleRowState));
cr->out_slot =
MakeSingleTupleTableSlot(RelationGetDescr(out_rel), table_slot_callbacks(out_rel));
cr->in_rel = in_rel;
cr->out_rel = out_rel;

/* get compression properties for hypertable */
htcols_list = ts_hypertable_compression_get(srcht_id);
cclen = list_length(htcols_list);
ccinfo = palloc(sizeof(ColumnCompressionInfo *) * cclen);
foreach (lc, htcols_list)
{
FormData_hypertable_compression *fd = (FormData_hypertable_compression *) lfirst(lc);
ccinfo[i++] = fd;
}
in_column_offsets =
compress_chunk_populate_keys(RelationGetRelid(in_rel), ccinfo, cclen, &n_keys, &keys);
row_compressor_init(&cr->row_compressor,
in_desc,
out_rel,
cclen,
ccinfo,
in_column_offsets,
out_desc->natts,
false /*need_bistate*/);
return cr;
}

/* create a single row compressed tuple from data in slot */
TupleTableSlot *
compress_row_exec(CompressSingleRowState *cr, TupleTableSlot *slot)
{
TupleTableSlot *compress_slot;
slot_getallattrs(slot);

cr->row_compressor.rows_compressed_into_current_value = 0;
row_compressor_update_group(&cr->row_compressor, slot);
row_compressor_append_row(&cr->row_compressor, slot);
compress_slot = compress_singlerow(cr, slot);
return compress_slot;
}

static TupleTableSlot *
compress_singlerow(CompressSingleRowState *cr, TupleTableSlot *in_slot)
{
Datum *invalues, *out_values;
bool *out_isnull;
TupleTableSlot *out_slot = cr->out_slot;
RowCompressor *row_compressor = &cr->row_compressor;

ExecClearTuple(out_slot);

/* ExecClearTuple above will leave dropped columns as non-null, which will
* cause a segmentation fault in `heap_compute_data_size` since that
* function expects dropped columns to have the null bit set. Since the
* null bits are set below for all columns except */
memset(out_slot->tts_isnull,
true,
sizeof(*out_slot->tts_isnull) * out_slot->tts_tupleDescriptor->natts);

invalues = in_slot->tts_values;
out_values = out_slot->tts_values;
out_isnull = out_slot->tts_isnull;

/* Possible optimization: Can we do a pass through compression without a
* full copy? full copy needed for multiple values. But we are dealing
* only with a single value, so just need the result of transformation
* after passing it through the compressor function This probably needs a
* bit of rewrite of the compression algorithm code
*/
Assert(row_compressor->n_input_columns == in_slot->tts_tupleDescriptor->natts);
for (int col = 0; col < row_compressor->n_input_columns; col++)
{
PerColumn *column = &row_compressor->per_column[col];
Compressor *compressor = row_compressor->per_column[col].compressor;
int in_colno = col;

int16 out_colno = row_compressor->uncompressed_col_to_compressed_col[col];
/* if there is no compressor, this must be a segmenter */
if (compressor != NULL)
{
void *compressed_data;
compressed_data = compressor->finish(compressor);
out_isnull[out_colno] = (compressed_data == NULL);
if (compressed_data)
out_values[out_colno] = PointerGetDatum(compressed_data);
if (column->min_max_metadata_builder != NULL)
{
if (compressed_data)
{
/* we can copy directly since we have only 1 row. */
out_isnull[column->min_metadata_attr_offset] = false;
out_isnull[column->max_metadata_attr_offset] = false;
out_values[column->min_metadata_attr_offset] = invalues[in_colno];
out_values[column->max_metadata_attr_offset] = invalues[in_colno];
}
else
{
out_isnull[column->min_metadata_attr_offset] = true;
out_isnull[column->max_metadata_attr_offset] = true;
}

segment_meta_min_max_builder_reset(column->min_max_metadata_builder);
}
}
/* if there is no compressor, this must be a segmenter */
else if (column->segment_info != NULL)
{
out_isnull[out_colno] = column->segment_info->is_null;
if (column->segment_info->is_null)
out_values[out_colno] = 0;
else
out_values[out_colno] = invalues[in_colno];
}
else
{
/* we have a 1-1 column mapping from uncompressed -> compressed chunk.
* However, some columns could have been dropped from the uncompressed
*chunk before the compressed one is created.
*/
Assert(out_colno == 0);
}
}

/* fill in additional meta data info */
out_values[row_compressor->count_metadata_column_offset] =
Int32GetDatum(1); /*we have only 1 row */
out_isnull[row_compressor->count_metadata_column_offset] = false;
/* Add an invalid sequence number */
out_values[row_compressor->sequence_num_metadata_column_offset] = Int32GetDatum(0);
out_isnull[row_compressor->sequence_num_metadata_column_offset] = false;

Assert(row_compressor->rows_compressed_into_current_value == 1);
ExecStoreVirtualTuple(out_slot);
return out_slot;
}

void
compress_row_end(CompressSingleRowState *cr)
{
row_compressor_finish(&cr->row_compressor);
}

void
compress_row_destroy(CompressSingleRowState *cr)
{
ExecDropSingleTupleTableSlot(cr->out_slot);
}
9 changes: 0 additions & 9 deletions tsl/src/compression/compression.h
Expand Up @@ -153,13 +153,4 @@ extern DecompressionIterator *(*tsl_get_decompression_iterator_init(
extern void update_compressed_chunk_relstats(Oid uncompressed_relid, Oid compressed_relid);
extern void merge_chunk_relstats(Oid merged_relid, Oid compressed_relid);

/* CompressSingleRowState methods */
struct CompressSingleRowState;
typedef struct CompressSingleRowState CompressSingleRowState;

extern CompressSingleRowState *compress_row_init(int srcht_id, Relation in_rel, Relation out_rel);
extern TupleTableSlot *compress_row_exec(CompressSingleRowState *cr, TupleTableSlot *slot);
extern void compress_row_end(CompressSingleRowState *cr);
extern void compress_row_destroy(CompressSingleRowState *cr);

#endif
6 changes: 2 additions & 4 deletions tsl/src/init.c
Expand Up @@ -162,6 +162,7 @@ CrossModuleFunctions tsl_cm_functions = {
.invalidation_process_cagg_log = tsl_invalidation_process_cagg_log,
.cagg_try_repair = tsl_cagg_try_repair,

/* Compression */
.compressed_data_decompress_forward = tsl_compressed_data_decompress_forward,
.compressed_data_decompress_reverse = tsl_compressed_data_decompress_reverse,
.compressed_data_send = tsl_compressed_data_send,
Expand All @@ -181,10 +182,7 @@ CrossModuleFunctions tsl_cm_functions = {
.process_rename_cmd = tsl_process_rename_cmd,
.compress_chunk = tsl_compress_chunk,
.decompress_chunk = tsl_decompress_chunk,
.compress_row_init = compress_row_init,
.compress_row_exec = compress_row_exec,
.compress_row_end = compress_row_end,
.compress_row_destroy = compress_row_destroy,

.data_node_add = data_node_add,
.data_node_delete = data_node_delete,
.data_node_attach = data_node_attach,
Expand Down