Skip to content

Commit

Permalink
Change return value of add_dimension to TABLE
Browse files Browse the repository at this point in the history
Change the return value of add_dimension to return a record consisting
of dimension_id, schema_name, table_name, column_name. This improves
user feedback about success of the operation but also gives the function
an API returning useful information for non-human consumption.
  • Loading branch information
svenklemm committed Oct 15, 2018
1 parent 19299cf commit d9b2dfe
Show file tree
Hide file tree
Showing 8 changed files with 97 additions and 44 deletions.
2 changes: 1 addition & 1 deletion sql/ddl_api.sql
Expand Up @@ -138,7 +138,7 @@ CREATE OR REPLACE FUNCTION add_dimension(
chunk_time_interval ANYELEMENT = NULL::BIGINT,
partitioning_func REGPROC = NULL,
if_not_exists BOOLEAN = FALSE
) RETURNS VOID
) RETURNS TABLE(dimension_id INT, schema_name NAME, table_name NAME, column_name NAME)
AS '@MODULE_PATHNAME@', 'ts_dimension_add' LANGUAGE C VOLATILE;

CREATE OR REPLACE FUNCTION attach_tablespace(
Expand Down
1 change: 1 addition & 0 deletions sql/updates/1.0.0-rc2--1.0.0-dev.sql
Expand Up @@ -6,4 +6,5 @@ GRANT SELECT ON ALL SEQUENCES IN SCHEMA _timescaledb_catalog TO PUBLIC;
GRANT SELECT ON ALL SEQUENCES IN SCHEMA _timescaledb_config TO PUBLIC;

DROP FUNCTION IF EXISTS create_hypertable(regclass,name,name,integer,name,name,anyelement,boolean,boolean,regproc,boolean,text,regproc);
DROP FUNCTION IF EXISTS add_dimension(regclass,name,integer,anyelement,regproc,boolean);

52 changes: 45 additions & 7 deletions src/dimension.c
Expand Up @@ -506,7 +506,7 @@ dimension_tuple_update(TupleInfo *ti, void *data)
return false;
}

static void
static int32
dimension_insert_relation(Relation rel, int32 hypertable_id,
Name colname, Oid coltype, int16 num_slices,
regproc partitioning_func, int64 interval_length)
Expand All @@ -515,6 +515,7 @@ dimension_insert_relation(Relation rel, int32 hypertable_id,
Datum values[Natts_dimension];
bool nulls[Natts_dimension] = {false};
CatalogSecurityContext sec_ctx;
int32 dimension_id;

values[AttrNumberGetAttrOffset(Anum_dimension_hypertable_id)] = Int32GetDatum(hypertable_id);
values[AttrNumberGetAttrOffset(Anum_dimension_column_name)] = NameGetDatum(colname);
Expand Down Expand Up @@ -542,12 +543,15 @@ dimension_insert_relation(Relation rel, int32 hypertable_id,
}

catalog_become_owner(catalog_get(), &sec_ctx);
values[AttrNumberGetAttrOffset(Anum_dimension_id)] = Int32GetDatum(catalog_table_next_seq_id(catalog_get(), DIMENSION));
dimension_id = Int32GetDatum(catalog_table_next_seq_id(catalog_get(), DIMENSION));
values[AttrNumberGetAttrOffset(Anum_dimension_id)] = dimension_id;
catalog_insert_values(rel, desc, values, nulls);
catalog_restore_user(&sec_ctx);

return dimension_id;
}

static void
static int32
dimension_insert(int32 hypertable_id,
Name colname,
Oid coltype,
Expand All @@ -557,10 +561,12 @@ dimension_insert(int32 hypertable_id,
{
Catalog *catalog = catalog_get();
Relation rel;
int32 dimension_id;

rel = heap_open(catalog->tables[DIMENSION].id, RowExclusiveLock);
dimension_insert_relation(rel, hypertable_id, colname, coltype, num_slices, partitioning_func, interval_length);
dimension_id = dimension_insert_relation(rel, hypertable_id, colname, coltype, num_slices, partitioning_func, interval_length);
heap_close(rel, RowExclusiveLock);
return dimension_id;
}

int
Expand Down Expand Up @@ -995,8 +1001,35 @@ dimension_add_from_info(DimensionInfo *info)

Assert(info->ht != NULL);

dimension_insert(info->ht->fd.id, info->colname, info->coltype,
info->num_slices, info->partitioning_func, info->interval);
info->dimension_id = dimension_insert(info->ht->fd.id, info->colname, info->coltype,
info->num_slices, info->partitioning_func, info->interval);
}

/*
* Create a datum to be returned by add_dimension DDL function
*/
static Datum
dimension_create_datum(FunctionCallInfo fcinfo, DimensionInfo *info)
{
TupleDesc tupdesc;
HeapTuple tuple;
Datum values[Natts_add_dimension];
bool nulls[Natts_add_dimension] = {false};

if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("function returning record called in "
"context that cannot accept type record")));

tupdesc = BlessTupleDesc(tupdesc);
values[AttrNumberGetAttrOffset(Anum_add_dimension_id)] = info->dimension_id;
values[AttrNumberGetAttrOffset(Anum_add_dimension_schema_name)] = NameGetDatum(&info->ht->fd.schema_name);
values[AttrNumberGetAttrOffset(Anum_add_dimension_table_name)] = NameGetDatum(&info->ht->fd.table_name);
values[AttrNumberGetAttrOffset(Anum_add_dimension_column_name)] = NameGetDatum(info->colname);
tuple = heap_form_tuple(tupdesc, values, nulls);

return HeapTupleGetDatum(tuple);
}

TS_FUNCTION_INFO_V1(ts_dimension_add);
Expand Down Expand Up @@ -1026,6 +1059,7 @@ ts_dimension_add(PG_FUNCTION_ARGS)
.partitioning_func = PG_ARGISNULL(4) ? InvalidOid : PG_GETARG_OID(4),
.if_not_exists = PG_ARGISNULL(5) ? false : PG_GETARG_BOOL(5),
};
Datum retval = 0;

hypertable_permissions_check(info.table_relid, GetUserId());

Expand Down Expand Up @@ -1083,11 +1117,15 @@ ts_dimension_add(PG_FUNCTION_ARGS)
*/
info.ht = hypertable_get_by_id(info.ht->fd.id);
indexing_verify_indexes(info.ht);
retval = dimension_create_datum(fcinfo, &info);
}

cache_release(hcache);

PG_RETURN_VOID();
if (retval)
PG_RETURN_DATUM(retval);
else
PG_RETURN_NULL();
}

/* Used as a tuple found function */
Expand Down
14 changes: 14 additions & 0 deletions src/dimension.h
Expand Up @@ -86,6 +86,7 @@ typedef struct Hypertable Hypertable;
typedef struct DimensionInfo
{
Oid table_relid;
int32 dimension_id;
Name colname;
Oid coltype;
DimensionType type;
Expand All @@ -107,6 +108,19 @@ typedef struct DimensionInfo
(di)->colname != NULL && \
((di)->num_slices_is_set || OidIsValid((di)->interval_datum)))

/* add_dimension record attribute numbers */
enum Anum_add_dimension
{
Anum_add_dimension_id = 1,
Anum_add_dimension_schema_name,
Anum_add_dimension_table_name,
Anum_add_dimension_column_name,
_Anum_add_dimension_max,
};

#define Natts_add_dimension \
(_Anum_add_dimension_max - 1)

extern Hyperspace *dimension_scan(int32 hypertable_id, Oid main_table_relid, int16 num_dimension, MemoryContext mctx);
extern DimensionSlice *dimension_calculate_default_slice(Dimension *dim, int64 value);
extern Point *hyperspace_calculate_point(Hyperspace *h, HeapTuple tuple, TupleDesc tupdesc);
Expand Down
36 changes: 18 additions & 18 deletions test/expected/create_hypertable.out
Expand Up @@ -77,9 +77,9 @@ SELECT * FROM _timescaledb_internal.get_create_command('test_table');

--test adding one more closed dimension
select add_dimension('test_schema.test_table', 'location', 4);
add_dimension
---------------

add_dimension
-------------------------------------
(5,test_schema,test_table,location)
(1 row)

select * from _timescaledb_catalog.hypertable where table_name = 'test_table';
Expand Down Expand Up @@ -140,9 +140,9 @@ ERROR: get_create_command only supports hypertables with up to 2 dimensions
--test adding one more open dimension
select add_dimension('test_schema.test_table', 'id', chunk_time_interval => 1000);
NOTICE: adding not-null constraint to column "id"
add_dimension
---------------

add_dimension
-------------------------------
(6,test_schema,test_table,id)
(1 row)

select * from _timescaledb_catalog.hypertable where table_name = 'test_table';
Expand Down Expand Up @@ -173,9 +173,9 @@ NOTICE: adding not-null constraint to column "time"

SELECT add_dimension('dim_test_time', 'time2', chunk_time_interval => INTERVAL '1 day');
NOTICE: adding not-null constraint to column "time2"
add_dimension
---------------

add_dimension
--------------------------------
(8,public,dim_test_time,time2)
(1 row)

-- Test add_dimension: only integral should work on BIGINT columns
Expand All @@ -188,9 +188,9 @@ ERROR: invalid interval: must be an interval or integer type
\set ON_ERROR_STOP 1
SELECT add_dimension('dim_test_time', 'time3', chunk_time_interval => 500);
NOTICE: adding not-null constraint to column "time3"
add_dimension
---------------

add_dimension
--------------------------------
(9,public,dim_test_time,time3)
(1 row)

-- Test add_dimension: integrals should work on TIMESTAMPTZ columns
Expand All @@ -205,9 +205,9 @@ NOTICE: adding not-null constraint to column "time"
SELECT add_dimension('dim_test_time2', 'time2', chunk_time_interval => 500);
WARNING: unexpected interval: smaller than one second
NOTICE: adding not-null constraint to column "time2"
add_dimension
---------------

add_dimension
----------------------------------
(11,public,dim_test_time2,time2)
(1 row)

--adding a dimension twice should not fail with 'if_not_exists'
Expand Down Expand Up @@ -487,8 +487,8 @@ ERROR: invalid partitioning function
\set ON_ERROR_STOP 1
-- A valid function should work:
select add_dimension('test_schema.test_partfunc', 'device', 2, partitioning_func => 'partfunc_valid');
add_dimension
---------------

add_dimension
---------------------------------------
(18,test_schema,test_partfunc,device)
(1 row)

18 changes: 9 additions & 9 deletions test/expected/drop_chunks.out
Expand Up @@ -25,21 +25,21 @@ NOTICE: adding not-null constraint to column "time"

-- Add space dimensions to ensure chunks share dimension slices
SELECT add_dimension('public.drop_chunk_test1', 'device_id', 2);
add_dimension
---------------

add_dimension
---------------------------------------
(4,public,drop_chunk_test1,device_id)
(1 row)

SELECT add_dimension('public.drop_chunk_test2', 'device_id', 2);
add_dimension
---------------

add_dimension
---------------------------------------
(5,public,drop_chunk_test2,device_id)
(1 row)

SELECT add_dimension('public.drop_chunk_test3', 'device_id', 2);
add_dimension
---------------

add_dimension
---------------------------------------
(6,public,drop_chunk_test3,device_id)
(1 row)

SELECT c.id AS chunk_id, c.hypertable_id, c.schema_name AS chunk_schema, c.table_name AS chunk_table, ds.range_start, ds.range_end
Expand Down
6 changes: 3 additions & 3 deletions test/expected/insert_single.out
Expand Up @@ -283,9 +283,9 @@ NOTICE: adding not-null constraint to column "time"
(1 row)

SELECT add_dimension('"3dim"', 'location', 2);
add_dimension
---------------

add_dimension
--------------------------
(9,public,3dim,location)
(1 row)

INSERT INTO "3dim" VALUES('2017-01-20T09:00:01', 22.5, 'blue', 'nyc');
Expand Down
12 changes: 6 additions & 6 deletions test/expected/partitioning.out
Expand Up @@ -122,9 +122,9 @@ SELECT add_dimension('part_add_dim', 'location', 2, partitioning_func => 'bad_fu
ERROR: function "bad_func" does not exist at character 74
\set ON_ERROR_STOP 1
SELECT add_dimension('part_add_dim', 'location', 2, partitioning_func => '_timescaledb_internal.get_partition_for_key');
add_dimension
---------------

add_dimension
----------------------------------
(9,public,part_add_dim,location)
(1 row)

SELECT * FROM _timescaledb_catalog.dimension;
Expand Down Expand Up @@ -287,9 +287,9 @@ ERROR: cannot create a unique index without the column "device" (used in partit
DROP INDEX time_index;
CREATE UNIQUE INDEX time_space_index ON hyper_with_index(time, device);
SELECT add_dimension('hyper_with_index', 'device', 4);
add_dimension
---------------

add_dimension
-------------------------------------
(23,public,hyper_with_index,device)
(1 row)

CREATE TABLE hyper_with_primary(time TIMESTAMPTZ PRIMARY KEY, temp float, device int);
Expand Down

0 comments on commit d9b2dfe

Please sign in to comment.