Skip to content

Commit

Permalink
Merge branch 'main' into group-by-PR
Browse files Browse the repository at this point in the history
  • Loading branch information
sb230132 committed Aug 17, 2022
2 parents 7cb22e9 + 5c96d25 commit 9c24007
Show file tree
Hide file tree
Showing 51 changed files with 3,178 additions and 145 deletions.
2 changes: 2 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,11 +15,13 @@ accidentally triggering the load of a previous DB version.**
* #4486 Adding boolean column with default value doesn't work on compressed table
* #4555 Handle properly default privileges on Continuous Aggregates
* #4575 Fix use of `get_partition_hash` and `get_partition_for_key` inside an IMMUTABLE function
* #4416 Handle TRUNCATE TABLE on chunks

**Thanks**
@janko for reporting
@AlmiS for reporting error on `get_partition_hash` executed inside an IMMUTABLE function
@michaelkitson for reporting permission errors using default privileges on Continuous Aggregates
@jayadevanm for reporting error of TRUNCATE TABLE on compressed chunk

## 2.7.2 (2022-07-26)

Expand Down
25 changes: 13 additions & 12 deletions coccinelle/hash_create.cocci
Original file line number Diff line number Diff line change
Expand Up @@ -5,34 +5,35 @@
// to be explicit about the memory context our hash tables live in so we enforce
// usage of the flag.
@ hash_create @
expression res;
position p;
@@

res@p = hash_create(...);
hash_create@p(...)

@safelist@
expression res;
expression arg1, arg2, arg3;
expression w1, w2;
expression flags;
position hash_create.p;
@@
(
res@p = hash_create(arg1,arg2,arg3, w1 | HASH_CONTEXT | w2);
hash_create@p(arg1,arg2,arg3, w1 | HASH_CONTEXT | w2)
|
res@p = hash_create(arg1,arg2,arg3, w1 | HASH_CONTEXT);
hash_create@p(arg1,arg2,arg3, w1 | HASH_CONTEXT)
|
res@p = hash_create(arg1,arg2,arg3, HASH_CONTEXT | w2 );
|
Assert(flags & HASH_CONTEXT);
res@p = hash_create(arg1,arg2,arg3, flags);
hash_create@p(arg1,arg2,arg3, HASH_CONTEXT | w2 )
)
@ depends on !safelist @
@safelist2@
expression res;
expression arg1, arg2, arg3;
expression flags;
position hash_create.p;
@@
Assert(flags & HASH_CONTEXT);
res = hash_create@p(arg1,arg2,arg3, flags);
@ depends on !safelist && !safelist2 @
position hash_create.p;
@@

+ /* hash_create without HASH_CONTEXT flag */
res@p = hash_create(...);
hash_create@p(...)

24 changes: 22 additions & 2 deletions src/compat/compat.h
Original file line number Diff line number Diff line change
Expand Up @@ -535,8 +535,6 @@ get_reindex_options(ReindexStmt *stmt)
make_new_heap(tableOid, tableSpace, relpersistence, ExclusiveLock)
#endif

#endif /* TIMESCALEDB_COMPAT_H */

/*
* PostgreSQL < 14 does not have F_TIMESTAMPTZ_GT macro but instead has
* the oid of that function as F_TIMESTAMP_GT even though the signature
Expand All @@ -547,3 +545,25 @@ get_reindex_options(ReindexStmt *stmt)
#define F_TIMESTAMPTZ_GE F_TIMESTAMP_GE
#define F_TIMESTAMPTZ_GT F_TIMESTAMP_GT
#endif

/*
* PostgreSQL 15 removed "utils/int8.h" header and change the "scanint8"
* function to "pg_strtoint64" in "utils/builtins.h".
*
* https://github.com/postgres/postgres/commit/cfc7191dfea330dd7a71e940d59de78129bb6175
*/
#if PG15_LT
#include <utils/int8.h>
static inline int64
pg_strtoint64(const char *str)
{
int64 result;
scanint8(str, false, &result);

return result;
}
#else
#include <utils/builtins.h>
#endif

#endif /* TIMESCALEDB_COMPAT_H */
6 changes: 4 additions & 2 deletions src/loader/bgw_launcher.c
Original file line number Diff line number Diff line change
Expand Up @@ -293,12 +293,14 @@ register_entrypoint_for_db(Oid db_id, VirtualTransactionId vxid, BackgroundWorke
static HTAB *
init_database_htab(void)
{
HASHCTL info = { .keysize = sizeof(Oid), .entrysize = sizeof(DbHashEntry) };
HASHCTL info = { .keysize = sizeof(Oid),
.entrysize = sizeof(DbHashEntry),
.hcxt = TopMemoryContext };

return hash_create("launcher_db_htab",
ts_guc_max_background_workers,
&info,
HASH_BLOBS | HASH_ELEM);
HASH_BLOBS | HASH_CONTEXT | HASH_ELEM);
}

/* Insert a scheduler entry into the hash table. Correctly set entry values. */
Expand Down
51 changes: 38 additions & 13 deletions src/process_utility.c
Original file line number Diff line number Diff line change
Expand Up @@ -1001,7 +1001,7 @@ process_truncate(ProcessUtilityArgs *args)
List *hypertables = NIL;
List *relations = NIL;
bool list_changed = false;
MemoryContext parsetreectx = GetMemoryChunkContext(args->parsetree);
MemoryContext oldctx, parsetreectx = GetMemoryChunkContext(args->parsetree);

/* For all hypertables, we drop the now empty chunks. We also propagate the
* TRUNCATE call to the compressed version of the hypertable, if it exists.
Expand Down Expand Up @@ -1037,7 +1037,6 @@ process_truncate(ProcessUtilityArgs *args)
if (cagg)
{
Hypertable *mat_ht, *raw_ht;
MemoryContext oldctx;

if (!relation_should_recurse(rv))
ereport(ERROR,
Expand Down Expand Up @@ -1071,13 +1070,14 @@ process_truncate(ProcessUtilityArgs *args)
break;
}
case RELKIND_RELATION:
/* TRUNCATE for foreign tables not implemented yet. This will raise an error. */
case RELKIND_FOREIGN_TABLE:
{
Hypertable *ht =
ts_hypertable_cache_get_entry(hcache, relid, CACHE_FLAG_MISSING_OK);
Chunk *chunk;

if (!ht)
list_append = true;
else
if (ht)
{
ContinuousAggHypertableStatus agg_status;

Expand Down Expand Up @@ -1114,6 +1114,38 @@ process_truncate(ProcessUtilityArgs *args)
*/
list_changed = true;
}
else if ((chunk = ts_chunk_get_by_relid(relid, false)) != NULL)
{ /* this is a chunk */
ht = ts_hypertable_cache_get_entry(hcache,
chunk->hypertable_relid,
CACHE_FLAG_NONE);

Assert(ht != NULL);

/* If the hypertable has continuous aggregates, then invalidate
* the truncated region. */
if (ts_continuous_agg_hypertable_status(ht->fd.id) == HypertableIsRawTable)
ts_continuous_agg_invalidate_chunk(ht, chunk);
/* Truncate the compressed chunk too. */
if (chunk->fd.compressed_chunk_id != INVALID_CHUNK_ID)
{
Chunk *compressed_chunk =
ts_chunk_get_by_id(chunk->fd.compressed_chunk_id, false);
if (compressed_chunk != NULL)
{
/* Create list item into the same context of the list. */
oldctx = MemoryContextSwitchTo(parsetreectx);
rv = makeRangeVar(NameStr(compressed_chunk->fd.schema_name),
NameStr(compressed_chunk->fd.table_name),
-1);
MemoryContextSwitchTo(oldctx);
list_changed = true;
}
}
list_append = true;
}
else
list_append = true;
break;
}
}
Expand Down Expand Up @@ -1234,14 +1266,7 @@ process_drop_chunk(ProcessUtilityArgs *args, DropStmt *stmt)
/* If the hypertable has continuous aggregates, then invalidate
* the dropped region. */
if (ts_continuous_agg_hypertable_status(ht->fd.id) == HypertableIsRawTable)
{
int64 start = ts_chunk_primary_dimension_start(chunk);
int64 end = ts_chunk_primary_dimension_end(chunk);

Assert(hyperspace_get_open_dimension(ht->space, 0)->fd.id ==
chunk->cube->slices[0]->fd.dimension_id);
ts_cm_functions->continuous_agg_invalidate_raw_ht(ht, start, end);
}
ts_continuous_agg_invalidate_chunk(ht, chunk);
}
}

Expand Down
12 changes: 12 additions & 0 deletions src/ts_catalog/continuous_agg.c
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
#include "bgw/job.h"
#include "ts_catalog/continuous_agg.h"
#include "cross_module_fn.h"
#include "hypercube.h"
#include "hypertable.h"
#include "hypertable_cache.h"
#include "scan_iterator.h"
Expand Down Expand Up @@ -1351,6 +1352,17 @@ ts_continuous_agg_find_integer_now_func_by_materialization_id(int32 mat_htid)
return par_dim;
}

TSDLLEXPORT void
ts_continuous_agg_invalidate_chunk(Hypertable *ht, Chunk *chunk)
{
int64 start = ts_chunk_primary_dimension_start(chunk);
int64 end = ts_chunk_primary_dimension_end(chunk);

Assert(hyperspace_get_open_dimension(ht->space, 0)->fd.id ==
chunk->cube->slices[0]->fd.dimension_id);
ts_cm_functions->continuous_agg_invalidate_raw_ht(ht, start, end);
}

typedef struct Watermark
{
int32 hyper_id;
Expand Down
2 changes: 2 additions & 0 deletions src/ts_catalog/continuous_agg.h
Original file line number Diff line number Diff line change
Expand Up @@ -174,6 +174,8 @@ extern TSDLLEXPORT const Dimension *
ts_continuous_agg_find_integer_now_func_by_materialization_id(int32 mat_htid);
extern ContinuousAgg *ts_continuous_agg_find_userview_name(const char *schema, const char *name);

extern TSDLLEXPORT void ts_continuous_agg_invalidate_chunk(Hypertable *ht, Chunk *chunk);

extern TSDLLEXPORT bool ts_continuous_agg_bucket_width_variable(const ContinuousAgg *agg);
extern TSDLLEXPORT int64 ts_continuous_agg_bucket_width(const ContinuousAgg *agg);

Expand Down
16 changes: 8 additions & 8 deletions src/ts_catalog/dimension_partition.c
Original file line number Diff line number Diff line change
Expand Up @@ -260,22 +260,22 @@ create_dimension_partition_tuple(Relation rel, const DimensionPartition *dp)
}
else
{
Datum *dn_datums = palloc(sizeof(Datum) * list_length(dp->data_nodes));
int data_nodes_len = list_length(dp->data_nodes);
Datum *dn_datums = palloc(sizeof(Datum) * data_nodes_len);
NameData *dn_names = palloc(NAMEDATALEN * data_nodes_len);
ArrayType *dn_arr;
ListCell *lc;

foreach (lc, dp->data_nodes)
{
const char *dn = lfirst(lc);
dn_datums[i++] = CStringGetDatum(dn);
namestrcpy(&dn_names[i], dn);
dn_datums[i] = NameGetDatum(&dn_names[i]);
++i;
}

dn_arr = construct_array(dn_datums,
list_length(dp->data_nodes),
NAMEOID,
NAMEDATALEN,
false,
TYPALIGN_CHAR);
dn_arr =
construct_array(dn_datums, data_nodes_len, NAMEOID, NAMEDATALEN, false, TYPALIGN_CHAR);
values[AttrNumberGetAttrOffset(Anum_dimension_partition_data_nodes)] =
PointerGetDatum(dn_arr);
}
Expand Down
14 changes: 14 additions & 0 deletions test/expected/chunk_utils.out
Original file line number Diff line number Diff line change
Expand Up @@ -283,6 +283,20 @@ SELECT * FROM _timescaledb_catalog.dimension_slice ORDER BY id;
24 | 3 | 6 | 7
(24 rows)

-- Test that truncating chunks works
SELECT count(*) FROM _timescaledb_internal._hyper_2_7_chunk;
count
-------
1
(1 row)

TRUNCATE TABLE _timescaledb_internal._hyper_2_7_chunk;
SELECT count(*) FROM _timescaledb_internal._hyper_2_7_chunk;
count
-------
0
(1 row)

-- Drop one chunk "manually" and verify that dimension slices and
-- constraints are cleaned up. Each chunk has two constraints and two
-- dimension slices. Both constraints should be deleted, but only one
Expand Down
1 change: 1 addition & 0 deletions test/perl/TimescaleNode.pm
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ sub init
# template config file
$self->append_conf('postgresql.conf',
TestLib::slurp_file("$ENV{'CONFDIR'}/postgresql.conf"));
$self->append_conf('postgresql.conf', 'datestyle=ISO');
}

# helper function to check output from PSQL for a query
Expand Down
4 changes: 2 additions & 2 deletions test/pg_hba.conf.in
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
# TYPE DATABASE USER ADDRESS METHOD

# "local" is for Unix domain socket connections only
local all all trust
local replication all trust
@TEST_HBA_LOCAL@ all all trust
@TEST_HBA_LOCAL@ replication all trust
# IPv4 local connections:
hostssl all @TEST_ROLE_CLUSTER_SUPERUSER@ 127.0.0.1/32 cert clientcert=verify-full
hostssl all @TEST_ROLE_1@ 127.0.0.1/32 cert clientcert=verify-full
Expand Down
2 changes: 2 additions & 0 deletions test/postgresql.conf.in
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@ shared_preload_libraries=timescaledb
max_worker_processes=24
autovacuum=false
random_page_cost=1.0
timezone='US/Pacific'
datestyle='Postgres, MDY'
timescaledb.license='apache'
@TELEMETRY_DEFAULT_SETTING@
timescaledb.last_tuned='1971-02-03 04:05:06.789012 -0300'
Expand Down
5 changes: 5 additions & 0 deletions test/sql/chunk_utils.sql
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,11 @@ FULL OUTER JOIN _timescaledb_catalog.dimension_slice ds ON (ds.id = cc.dimension
ORDER BY c.id;
SELECT * FROM _timescaledb_catalog.dimension_slice ORDER BY id;

-- Test that truncating chunks works
SELECT count(*) FROM _timescaledb_internal._hyper_2_7_chunk;
TRUNCATE TABLE _timescaledb_internal._hyper_2_7_chunk;
SELECT count(*) FROM _timescaledb_internal._hyper_2_7_chunk;

-- Drop one chunk "manually" and verify that dimension slices and
-- constraints are cleaned up. Each chunk has two constraints and two
-- dimension slices. Both constraints should be deleted, but only one
Expand Down
7 changes: 7 additions & 0 deletions test/test-defs.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,13 @@ set(TEST_SCHEDULE_SHARED
set(ISOLATION_TEST_SCHEDULE ${CMAKE_CURRENT_BINARY_DIR}/isolation_test_schedule)
set(TEST_PASSFILE ${TEST_OUTPUT_DIR}/pgpass.conf)

# Windows does not support local connections (unix domain sockets)
if(WIN32)
set(TEST_HBA_LOCAL "#local")
else()
set(TEST_HBA_LOCAL "local")
endif()

configure_file(${PRIMARY_TEST_DIR}/pg_hba.conf.in pg_hba.conf)
set(TEST_PG_HBA_FILE ${TEST_OUTPUT_DIR}/pg_hba.conf)

Expand Down
27 changes: 3 additions & 24 deletions tsl/src/bgw_policy/policies_v2.c
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,6 @@ bool
validate_and_create_policies(policies_info all_policies, bool if_exists)
{
int refresh_job_id = 0, compression_job_id = 0, retention_job_id = 0;
bool error = false;
int64 refresh_interval = 0, compress_after = 0, drop_after = 0, drop_after_HT = 0;
int64 start_offset = 0, end_offset = 0, refresh_window_size = 0, refresh_total_interval = 0;
List *jobs = NIL;
Expand Down Expand Up @@ -167,33 +166,13 @@ validate_and_create_policies(policies_info all_policies, bool if_exists)
if (all_policies.refresh && all_policies.compress)
{
/* Check if refresh policy does not overlap with compression */
if (IS_INTEGER_TYPE(all_policies.partition_type))
{
if (refresh_total_interval > compress_after)
error = true;
}
else
{
if (refresh_total_interval > compress_after)
error = true;
}
if (error)
if (refresh_total_interval > compress_after)
emit_error(err_refresh_compress_overlap);
}
if (all_policies.refresh && all_policies.retention)
{
/* Check if refresh policy does not overlap with compression */
if (IS_INTEGER_TYPE(all_policies.partition_type))
{
if (refresh_total_interval > drop_after)
error = true;
}
else
{
if (refresh_total_interval > drop_after)
error = true;
}
if (error)
/* Check if refresh policy does not overlap with retention */
if (refresh_total_interval > drop_after)
emit_error(err_refresh_reten_overlap);
}
if (all_policies.retention && all_policies.compress)
Expand Down
1 change: 0 additions & 1 deletion tsl/src/continuous_aggs/create.c
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,6 @@
#include <utils/rel.h>
#include <utils/builtins.h>
#include <utils/catcache.h>
#include <utils/int8.h>
#include <utils/regproc.h>
#include <utils/ruleutils.h>
#include <utils/syscache.h>
Expand Down
Loading

0 comments on commit 9c24007

Please sign in to comment.