diff --git a/.travis.yml b/.travis.yml index 14942b2..f83f720 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,5 +1,5 @@ sudo: required -dist: trusty +dist: bionic language: c cache: apt: true @@ -16,13 +16,16 @@ env: - PGVERSION=9.6 - PGVERSION=10 - PGVERSION=11 + - PGVERSION=12 + before_install: - - git clone -b v0.7.9 --depth 1 https://github.com/citusdata/tools.git + - git clone -b v0.7.13 --depth 1 https://github.com/citusdata/tools.git - sudo make -C tools install - setup_apt - nuke_pg install: - sudo apt-get install protobuf-c-compiler + - sudo apt-get install libprotobuf-c0-dev - sudo locale-gen da_DK - sudo locale-gen da_DK.utf8 - sudo pip install cpp-coveralls diff --git a/Makefile b/Makefile index d5dfd29..86471fa 100644 --- a/Makefile +++ b/Makefile @@ -46,8 +46,8 @@ ifndef MAJORVERSION MAJORVERSION := $(basename $(VERSION)) endif -ifeq (,$(findstring $(MAJORVERSION), 9.3 9.4 9.5 9.6 10 11)) - $(error PostgreSQL 9.3 or 9.4 or 9.5 or 9.6 or 10 or 11 is required to compile this extension) +ifeq (,$(findstring $(MAJORVERSION), 9.3 9.4 9.5 9.6 10 11 12)) + $(error PostgreSQL 9.3 to 12 is required to compile this extension) endif cstore.pb-c.c: cstore.proto diff --git a/cstore_compression.c b/cstore_compression.c index 9c5c55d..3b37fd4 100644 --- a/cstore_compression.c +++ b/cstore_compression.c @@ -141,9 +141,16 @@ DecompressBuffer(StringInfo buffer, CompressionType compressionType) decompressedData = palloc0(decompressedDataSize); #if PG_VERSION_NUM >= 90500 + +#if PG_VERSION_NUM >= 120000 + decompressedByteCount = pglz_decompress(CSTORE_COMPRESS_RAWDATA(buffer->data), + compressedDataSize, decompressedData, + decompressedDataSize, true); +#else decompressedByteCount = pglz_decompress(CSTORE_COMPRESS_RAWDATA(buffer->data), - compressedDataSize, - decompressedData, decompressedDataSize); + compressedDataSize, decompressedData, + decompressedDataSize); +#endif if (decompressedByteCount < 0) { diff --git a/cstore_fdw.c b/cstore_fdw.c index 63ac172..94c7018 100644 --- a/cstore_fdw.c +++ b/cstore_fdw.c @@ -43,7 +43,14 @@ #include "optimizer/pathnode.h" #include "optimizer/planmain.h" #include "optimizer/restrictinfo.h" +#if PG_VERSION_NUM >= 120000 +#include "access/heapam.h" +#include "access/tableam.h" +#include "executor/tuptable.h" +#include "optimizer/optimizer.h" +#else #include "optimizer/var.h" +#endif #include "parser/parser.h" #include "parser/parsetree.h" #include "parser/parse_coerce.h" @@ -55,7 +62,11 @@ #include "utils/memutils.h" #include "utils/lsyscache.h" #include "utils/rel.h" +#if PG_VERSION_NUM >= 120000 +#include "utils/snapmgr.h" +#else #include "utils/tqual.h" +#endif /* local functions forward declarations */ @@ -94,6 +105,7 @@ static bool DirectoryExists(StringInfo directoryName); static void CreateDirectory(StringInfo directoryName); static void RemoveCStoreDatabaseDirectory(Oid databaseOid); static StringInfo OptionNamesString(Oid currentContextId); +static HeapTuple GetSlotHeapTuple(TupleTableSlot *tts); static CStoreFdwOptions * CStoreGetOptions(Oid foreignTableId); static char * CStoreGetOptionValue(Oid foreignTableId, const char *optionName); static void ValidateForeignTableOptions(char *filename, char *compressionTypeString, @@ -148,7 +160,6 @@ static bool CStoreIsForeignScanParallelSafe(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte); #endif - /* declarations for dynamic loading */ PG_MODULE_MAGIC; @@ -579,7 +590,11 @@ CopyIntoCStoreTable(const CopyStmt *copyStatement, const char *queryString) { /* read the next row in tupleContext */ MemoryContext oldContext = MemoryContextSwitchTo(tupleContext); +#if PG_VERSION_NUM >= 120000 + nextRowFound = NextCopyFrom(copyState, NULL, columnValues, columnNulls); +#else nextRowFound = NextCopyFrom(copyState, NULL, columnValues, columnNulls, NULL); +#endif MemoryContextSwitchTo(oldContext); /* write the row to the cstore file */ @@ -793,7 +808,7 @@ FindCStoreTables(List *tableList) } -/* +/* * OpenRelationsForTruncate opens and locks relations for tables to be truncated. * * It also performs a permission checks to see if the user has truncate privilege @@ -971,9 +986,9 @@ DistributedTable(Oid relationId) bool distributedTable = false; Oid partitionOid = InvalidOid; Relation heapRelation = NULL; - HeapScanDesc scanDesc = NULL; + TableScanDesc scanDesc = NULL; const int scanKeyCount = 1; - ScanKeyData scanKey[scanKeyCount]; + ScanKeyData scanKey[1]; HeapTuple heapTuple = NULL; bool missingOK = true; @@ -996,13 +1011,13 @@ DistributedTable(Oid relationId) ScanKeyInit(&scanKey[0], ATTR_NUM_PARTITION_RELATION_ID, InvalidStrategy, F_OIDEQ, ObjectIdGetDatum(relationId)); - scanDesc = heap_beginscan(heapRelation, SnapshotSelf, scanKeyCount, scanKey); + scanDesc = table_beginscan(heapRelation, SnapshotSelf, scanKeyCount, scanKey); heapTuple = heap_getnext(scanDesc, ForwardScanDirection); distributedTable = HeapTupleIsValid(heapTuple); - heap_endscan(scanDesc); + table_endscan(scanDesc); relation_close(heapRelation, AccessShareLock); return distributedTable; @@ -1115,7 +1130,7 @@ CreateDirectory(StringInfo directoryName) /* * RemoveCStoreDatabaseDirectory removes CStore directory previously - * created for this database. + * created for this database. * However it does not remove 'cstore_fdw' directory even if there * are no other databases left. */ @@ -1367,6 +1382,20 @@ OptionNamesString(Oid currentContextId) } +/* + * GetSlotHeapTuple abstracts getting HeapTuple from TupleTableSlot between versions + */ +static HeapTuple +GetSlotHeapTuple(TupleTableSlot *tts) +{ +#if PG_VERSION_NUM >= 120000 + return tts->tts_ops->copy_heap_tuple(tts); +#else + return tts->tts_tuple; +#endif +} + + /* * CStoreGetOptions returns the option values to be used when reading and writing * the cstore file. To resolve these values, the function checks options for the @@ -1526,17 +1555,24 @@ static char * CStoreDefaultFilePath(Oid foreignTableId) { Relation relation = relation_open(foreignTableId, AccessShareLock); - RelFileNode relationFileNode = relation->rd_node; - + RelFileNode relationFileNode = relation->rd_node; Oid databaseOid = relationFileNode.dbNode; Oid relationFileOid = relationFileNode.relNode; + relation_close(relation, AccessShareLock); + + /* PG12 onward does not create relfilenode for foreign tables */ + if (databaseOid == InvalidOid) + { + databaseOid = MyDatabaseId; + relationFileOid = foreignTableId; + + } + StringInfo cstoreFilePath = makeStringInfo(); appendStringInfo(cstoreFilePath, "%s/%s/%u/%u", DataDir, CSTORE_FDW_NAME, databaseOid, relationFileOid); - relation_close(relation, AccessShareLock); - return cstoreFilePath->data; } @@ -2078,7 +2114,9 @@ CStoreAcquireSampleRows(Relation relation, int logLevel, /* set up tuple slot */ columnValues = palloc0(columnCount * sizeof(Datum)); columnNulls = palloc0(columnCount * sizeof(bool)); -#if PG_VERSION_NUM >= 110000 +#if PG_VERSION_NUM >= 120000 + scanTupleSlot = MakeTupleTableSlot(NULL, &TTSOpsVirtual); +#elif PG_VERSION_NUM >= 110000 scanTupleSlot = MakeTupleTableSlot(NULL); #else scanTupleSlot = MakeTupleTableSlot(); @@ -2123,7 +2161,7 @@ CStoreAcquireSampleRows(Relation relation, int logLevel, MemoryContextSwitchTo(oldContext); /* if there are no more records to read, break */ - if (scanTupleSlot->tts_isempty) + if (TTS_EMPTY(scanTupleSlot)) { break; } @@ -2298,14 +2336,19 @@ CStoreExecForeignInsert(EState *executorState, ResultRelInfo *relationInfo, TupleTableSlot *tupleSlot, TupleTableSlot *planSlot) { TableWriteState *writeState = (TableWriteState*) relationInfo->ri_FdwState; + HeapTuple heapTuple; Assert(writeState != NULL); - if(HeapTupleHasExternal(tupleSlot->tts_tuple)) + heapTuple = GetSlotHeapTuple(tupleSlot); + + if (HeapTupleHasExternal(heapTuple)) { /* detoast any toasted attributes */ - tupleSlot->tts_tuple = toast_flatten_tuple(tupleSlot->tts_tuple, - tupleSlot->tts_tupleDescriptor); + HeapTuple newTuple = toast_flatten_tuple(heapTuple, + tupleSlot->tts_tupleDescriptor); + + ExecForceStoreHeapTuple(newTuple, tupleSlot, true); } slot_getallattrs(tupleSlot); diff --git a/cstore_reader.c b/cstore_reader.c index 1e93178..7e9c6bc 100644 --- a/cstore_reader.c +++ b/cstore_reader.c @@ -23,10 +23,16 @@ #include "access/skey.h" #include "commands/defrem.h" #include "nodes/makefuncs.h" +#if PG_VERSION_NUM >= 120000 +#include "nodes/pathnodes.h" +#include "nodes/nodeFuncs.h" +#include "optimizer/optimizer.h" +#else #include "optimizer/clauses.h" #include "optimizer/predtest.h" -#include "optimizer/restrictinfo.h" #include "optimizer/var.h" +#endif +#include "optimizer/restrictinfo.h" #include "port.h" #include "storage/fd.h" #include "utils/memutils.h" @@ -80,7 +86,7 @@ static void DeserializeBlockData(StripeBuffers *stripeBuffers, uint64 blockIndex TupleDesc tupleDescriptor); static Datum ColumnDefaultValue(TupleConstr *tupleConstraints, Form_pg_attribute attributeForm); -static int64 FileSize(FILE *file); +static int64 FILESize(FILE *file); static StringInfo ReadFromFile(FILE *file, uint64 offset, uint32 size); static void ResetUncompressedBlockData(ColumnBlockData **blockDataArray, uint32 columnCount); @@ -181,7 +187,7 @@ CStoreReadFooter(StringInfo tableFooterFilename) errhint("Try copying in data to the table."))); } - footerFileSize = FileSize(tableFooterFile); + footerFileSize = FILESize(tableFooterFile); if (footerFileSize < CSTORE_POSTSCRIPT_SIZE_LENGTH) { ereport(ERROR, (errmsg("invalid cstore file"))); @@ -362,7 +368,7 @@ CreateEmptyBlockDataArray(uint32 columnCount, bool *columnMask, uint32 blockRowC blockDataArray[columnIndex] = blockData; } } - + return blockDataArray; } @@ -387,7 +393,7 @@ FreeColumnBlockDataArray(ColumnBlockData **blockDataArray, uint32 columnCount) pfree(blockData); } } - + pfree(blockDataArray); } @@ -947,7 +953,7 @@ GetOperatorByType(Oid typeId, Oid accessMethodId, int16 strategyNumber) } -/* +/* * UpdateConstraint updates the base constraint with the given min/max values. * The function is copied from CitusDB's shard pruning logic. */ @@ -1289,7 +1295,7 @@ ColumnDefaultValue(TupleConstr *tupleConstraints, Form_pg_attribute attributeFor /* Returns the size of the given file handle. */ static int64 -FileSize(FILE *file) +FILESize(FILE *file) { int64 fileSize = 0; int fseekResult = 0; diff --git a/cstore_version_compat.h b/cstore_version_compat.h index f1079c2..a7f961f 100644 --- a/cstore_version_compat.h +++ b/cstore_version_compat.h @@ -45,6 +45,14 @@ completionTag) #endif +#if PG_VERSION_NUM < 120000 +#define TTS_EMPTY(slot) ((slot)->tts_isempty) +#define ExecForceStoreHeapTuple(tuple, slot, shouldFree) \ + ExecStoreTuple(newTuple, tupleSlot, InvalidBuffer, shouldFree); +#define HeapScanDesc TableScanDesc +#define table_beginscan heap_beginscan +#define table_endscan heap_endscan +#endif #endif /* CSTORE_COMPAT_H */ diff --git a/cstore_writer.c b/cstore_writer.c index 1148dbd..b690642 100644 --- a/cstore_writer.c +++ b/cstore_writer.c @@ -23,7 +23,11 @@ #include "access/nbtree.h" #include "catalog/pg_collation.h" #include "commands/defrem.h" +#if PG_VERSION_NUM >= 120000 +#include "optimizer/optimizer.h" +#else #include "optimizer/var.h" +#endif #include "port.h" #include "storage/fd.h" #include "utils/memutils.h" @@ -482,7 +486,7 @@ CreateEmptyStripeSkipList(uint32 stripeMaxRowCount, uint32 blockRowCount, /* * FlushStripe flushes current stripe data into the file. The function first ensures - * the last data block for each column is properly serialized and compressed. Then, + * the last data block for each column is properly serialized and compressed. Then, * the function creates the skip list and footer buffers. Finally, the function * flushes the skip list, data, and footer buffers to the file. */ @@ -751,7 +755,7 @@ SerializeSingleDatum(StringInfo datumBuffer, Datum datum, bool datumTypeByValue, Assert(!datumTypeByValue); memcpy(currentDatumDataPointer, DatumGetPointer(datum), datumLength); } - + datumBuffer->len += datumLengthAligned; }