Skip to content

Commit

Permalink
Improve memory handling for remote COPY
Browse files Browse the repository at this point in the history
This change improves memory usage in the `COPY` code used for
distributed hypertables. The following issues have been addressed:

* `PGresult` objects were not cleared, leading to memory leaks.
* The caching of chunk connections didn't work since the lookup
  compared ephemeral chunk pointers instead of chunk IDs. The effect
  was that cached chunk connection state was reallocated every time
  instead of being reused. This likely also caused worse performance.

To address these issues, the following changes are made:

* All `PGresult` objects are now cleared with `PQclear`.
* Lookup for chunk connections now compares chunk IDs instead of chunk
  pointers.
* The per-tuple memory context is moved the to the outer processing
  loop to ensure that everything in the loop is allocated on the
  per-tuple memory context, which is also reset at every iteration of
  the loop.
* The use of memory contexts is also simplified to have only one
  memory context for state that should survive across resets of the
  per-tuple memory context.

Fixes #2677
  • Loading branch information
erimatnor committed Dec 2, 2020
1 parent 7c76fd4 commit b4cd8be
Show file tree
Hide file tree
Showing 6 changed files with 182 additions and 115 deletions.
6 changes: 3 additions & 3 deletions src/copy.c
Original file line number Diff line number Diff line change
Expand Up @@ -441,8 +441,6 @@ copyfrom(CopyChunkState *ccstate, List *range_table, Hypertable *ht, void (*call
/* Close any trigger target relations */
ExecCleanUpTriggerState(estate);

copy_chunk_state_destroy(ccstate);

/*
* If we skipped writing WAL, then we need to sync the heap (but not
* indexes since those use WAL anyway)
Expand Down Expand Up @@ -663,10 +661,11 @@ timescaledb_DoCopy(const CopyStmt *stmt, const char *queryString, uint64 *proces
ccstate->where_clause = where_clause;

if (hypertable_is_distributed(ht))
ts_cm_functions->distributed_copy(stmt, processed, ccstate, attnums);
*processed = ts_cm_functions->distributed_copy(stmt, ccstate, attnums);
else
*processed = copyfrom(ccstate, pstate->p_rtable, ht, CopyFromErrorCallback, cstate);

copy_chunk_state_destroy(ccstate);
EndCopyFrom(cstate);
free_parsestate(pstate);
table_close(rel, NoLock);
Expand Down Expand Up @@ -732,6 +731,7 @@ timescaledb_move_from_table_to_chunks(Hypertable *ht, LOCKMODE lockmode)
scandesc = table_beginscan(rel, snapshot, 0, NULL);
ccstate = copy_chunk_state_create(ht, rel, next_copy_from_table_to_chunks, NULL, scandesc);
copyfrom(ccstate, pstate->p_rtable, ht, copy_table_to_chunk_error_callback, scandesc);
copy_chunk_state_destroy(ccstate);
heap_endscan(scandesc);
UnregisterSnapshot(snapshot);
table_close(rel, lockmode);
Expand Down
7 changes: 4 additions & 3 deletions src/cross_module_fn.c
Original file line number Diff line number Diff line change
Expand Up @@ -236,11 +236,12 @@ data_node_dispatch_path_create_default(PlannerInfo *root, ModifyTablePath *mtpat
return NULL;
}

static void
distributed_copy_default(const CopyStmt *stmt, uint64 *processed, CopyChunkState *ccstate,
List *attnums)
static uint64
distributed_copy_default(const CopyStmt *stmt, CopyChunkState *ccstate, List *attnums)
{
error_no_default_fn_community();

return 0;
}

static bool
Expand Down
3 changes: 1 addition & 2 deletions src/cross_module_fn.h
Original file line number Diff line number Diff line change
Expand Up @@ -139,8 +139,7 @@ typedef struct CrossModuleFunctions
void (*create_chunk_on_data_nodes)(Chunk *chunk, Hypertable *ht);
Path *(*data_node_dispatch_path_create)(PlannerInfo *root, ModifyTablePath *mtpath,
Index hypertable_rti, int subpath_index);
void (*distributed_copy)(const CopyStmt *stmt, uint64 *processed, CopyChunkState *ccstate,
List *attnums);
uint64 (*distributed_copy)(const CopyStmt *stmt, CopyChunkState *ccstate, List *attnums);
bool (*set_distributed_id)(Datum id);
void (*set_distributed_peer_id)(Datum id);
bool (*is_frontend_session)(void);
Expand Down

0 comments on commit b4cd8be

Please sign in to comment.