Skip to content

Commit

Permalink
Merge branch 'adb-7.1.0' into ADBDEV-4345
Browse files Browse the repository at this point in the history
  • Loading branch information
bandetto committed May 24, 2024
2 parents 3961c82 + dd3c67d commit 1100a41
Show file tree
Hide file tree
Showing 32 changed files with 891 additions and 510 deletions.
13 changes: 1 addition & 12 deletions arenadata/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -18,23 +18,12 @@ RUN dnf makecache && \
dnf -y install libicu perl-ExtUtils-Embed perl-Env perl-JSON && \
dnf -y install perl-IPC-Run perl-Test-Base libxslt-devel openldap-devel && \
dnf -y install python39-psycopg2 python39-psutil python39-pyyaml python39-lxml && \
dnf -y install llvm-devel clang && \
dnf clean all && \
#we install pytest from pypi since the version from the repository does not contain an executable file
#the rest of the packages are not available in the repository.
pip3 install pytest gsutil behave~=1.2.6 coverage~=4.5 'mock<=5.0.0' allure-behave

RUN dnf -y install \
--repofrompath=devel_external,https://dl.rockylinux.org/vault/rocky/8.8/devel/x86_64/os \
llvm-libs-15.0.7-1.module+el8.8.0+1144+0a4e73bd \
llvm-15.0.7-1.module+el8.8.0+1144+0a4e73bd \
llvm-test-15.0.7-1.module+el8.8.0+1144+0a4e73bd \
llvm-static-15.0.7-1.module+el8.8.0+1144+0a4e73bd \
llvm-devel-15.0.7-1.module+el8.8.0+1144+0a4e73bd \
clang-resource-filesystem-15.0.7-1.module+el8.8.0+1144+0a4e73bd \
clang-libs-15.0.7-1.module+el8.8.0+1144+0a4e73bd \
clang-15.0.7-1.module+el8.8.0+1144+0a4e73bd && \
dnf clean all

# setup ssh configuration
RUN ssh-keygen -f /etc/ssh/ssh_host_rsa_key -N '' -t rsa && \
ssh-keygen -f /etc/ssh/ssh_host_dsa_key -N '' -t dsa && \
Expand Down
23 changes: 23 additions & 0 deletions arenadata/readme.md
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,27 @@ We need to execute [../concourse/scripts/ic_gpdb.bash](../concourse/scripts/ic_g
/home/gpadmin/gpdb_src/concourse/scripts/ic_gpdb.bash
```

## Jit regression tests suite

* jit tests are basically no different from regular regression tests except they are executed with jit enabled
* jit tests need to be executed with optimizer both on and off. Notice that make flags differ a bit for each scenario

* optimizer=on
```bash
docker run --name gpdb7_opt_on --rm -it -e TEST_OS=centos \
-e MAKE_TEST_COMMAND="-k PGOPTIONS='-c optimizer=on -c jit=on -c jit_above_cost=0 -c optimizer_jit_above_cost=0 -c gp_explain_jit=off' installcheck" \
--sysctl "kernel.sem=500 1024000 200 4096" gpdb7_regress:latest \
/home/gpadmin/gpdb_src/concourse/scripts/ic_gpdb.bash
```

* optimizer=off
```bash
docker run --name gpdb7_opt_on --rm -it -e TEST_OS=centos \
-e MAKE_TEST_COMMAND="make -k PGOPTIONS='-c optimizer=off -c jit=on -c jit_above_cost=0 -c gp_explain_jit=off' installcheck" \
--sysctl "kernel.sem=500 1024000 200 4096" gpdb7_regress:latest \
/home/gpadmin/gpdb_src/concourse/scripts/ic_gpdb.bash
```

* we need to modify `MAKE_TEST_COMMAND` environment variable to run different suite. e.g. we may run test againt Postgres optimizer or ORCA with altering `PGOPTIONS` environment variable;
* we need to increase semaphore amount to be able to run demo cluster

Expand Down Expand Up @@ -89,6 +110,8 @@ bash arenadata/scripts/run_behave_tests.bash gpstart gpstop


Tests use `allure-behave` package and store allure output files in `allure-results` folder.
Also, the allure report for each failed test has gpdb logs attached files. See `gpMgmt/test/behave_utils/arenadata/formatter.py`
It required to add `gpMgmt/tests` directory to `PYTHONPATH`.

Greenplum cluster in Docker containers has its own peculiarities in preparing a cluster for tests.
All tests are run in one way or another on the demo cluster, wherever possible.
Expand Down
2 changes: 2 additions & 0 deletions arenadata/scripts/run_behave_tests.bash
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,8 @@ run_feature() {

docker-compose -p $project -f arenadata/docker-compose.yaml exec -T \
-e FEATURE="$feature" -e BEHAVE_FLAGS="--tags $feature --tags=$cluster \
-f behave_utils.arenadata.formatter:CustomFormatter \
-o non-existed-output \
-f allure_behave.formatter:AllureFormatter \
-o /tmp/allure-results" \
cdw gpdb_src/arenadata/scripts/behave_gpdb.bash
Expand Down
5 changes: 3 additions & 2 deletions gpMgmt/Makefile.behave
Original file line number Diff line number Diff line change
@@ -1,14 +1,15 @@
PEXPECT_LIB=$(GPHOME)/bin/lib
TEST_DIR=$(CURDIR)/test

# To pass in custom flags as behave args(such as multiple flags),
# use flags=--tags=foo, --tags=-bar
behave:
@which behave || (echo "behave not found. Run pip install -r gpMgmt/requirements-dev.txt" && exit 1)
@echo "Running behave on management scripts..."
@if [ -n """$(flags)""" ]; then \
PYTHONPATH=$$PYTHONPATH:$(PEXPECT_LIB) behave $(CURDIR)/test/behave/* -s -k $(flags) 2>&1 ; \
PYTHONPATH=$$PYTHONPATH:$(PEXPECT_LIB):$(TEST_DIR) behave $(CURDIR)/test/behave/* -s -k $(flags) 2>&1 ; \
elif [ -n """$(tags)""" ]; then \
PYTHONPATH=$$PYTHONPATH:$(PEXPECT_LIB) behave $(CURDIR)/test/behave/* -s -k --tags=$(tags) 2>&1 ; \
PYTHONPATH=$$PYTHONPATH:$(PEXPECT_LIB):$(TEST_DIR) behave $(CURDIR)/test/behave/* -s -k --tags=$(tags) 2>&1 ; \
else \
echo "Please specify tags=tagname or flags=[behave flags]"; \
exit 1; \
Expand Down
6 changes: 6 additions & 0 deletions gpMgmt/test/behave/mgmt_utils/environment.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
import shutil

import behave
from behave import use_fixture

from test.behave_utils.utils import drop_database_if_exists, start_database_if_not_started,\
create_database, \
Expand Down Expand Up @@ -98,6 +99,11 @@ def before_scenario(context, scenario):
scenario.skip("skipping scenario tagged with @skip")
return

if "concourse_cluster" in scenario.effective_tags and not hasattr(context, "concourse_cluster_created"):
from test.behave_utils.arenadata.fixtures import init_cluster
context.concourse_cluster_created = True
return use_fixture(init_cluster, context)

if 'gpmovemirrors' in context.feature.tags:
context.mirror_context = MirrorMgmtContext()

Expand Down
2 changes: 1 addition & 1 deletion gpMgmt/test/behave/mgmt_utils/minirepro.feature
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
@minirepro
@minirepro @skip
Feature: Dump minimum database objects that is related to the query

@minirepro_UI
Expand Down
Empty file.
11 changes: 11 additions & 0 deletions gpMgmt/test/behave_utils/arenadata/fixtures.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
from behave import fixture


@fixture
def init_cluster(context):
context.execute_steps(u"""
Given the database is not running
And a working directory of the test as '/tmp/concourse_cluster'
And the user runs command "rm -rf ~/gpAdminLogs/gpinitsystem*"
And a cluster is created with mirrors on "cdw" and "sdw1, sdw2, sdw3"
""")
49 changes: 49 additions & 0 deletions gpMgmt/test/behave_utils/arenadata/formatter.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
import os

from behave.formatter.base import Formatter
from behave.model import ScenarioOutline
from behave.model_core import Status


def before_scenario(scenario):
pass


def after_scenario(scenario):
if scenario.status == Status.failed:
try:
import allure
except ImportError:
pass
else:
if scenario.captured.output:
allure.attach(scenario.captured.output, name="stdout/stderr")
logs_path = os.path.join(os.path.expanduser("~"), "gpAdminLogs")
files = os.listdir(logs_path)
for file in files:
if os.path.isfile(os.path.join(logs_path, file)):
allure.attach.file(os.path.join(logs_path, file), name=file)


def wrap_scenario(scenario):
def inner(func):
def wrapped_func(*args, **kwargs):
before_scenario(scenario)
result = func(*args, **kwargs)
after_scenario(scenario)
return result
return wrapped_func
return inner


class CustomFormatter(Formatter):

def _wrap_scenario(self, scenarios):
for scenario in scenarios:
if isinstance(scenario, ScenarioOutline):
self._wrap_scenario(scenario)
else:
scenario.run = wrap_scenario(scenario)(scenario.run)

def feature(self, feature):
self._wrap_scenario(feature.scenarios)
4 changes: 4 additions & 0 deletions src/backend/catalog/heap.c
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,7 @@
#include "utils/snapmgr.h"
#include "utils/syscache.h"

#include "catalog/aocatalog.h"
#include "catalog/oid_dispatch.h"
#include "catalog/pg_appendonly.h"
#include "catalog/pg_stat_last_operation.h"
Expand Down Expand Up @@ -1778,9 +1779,12 @@ heap_create_with_catalog(const char *relname,
*
* Also, skip this in bootstrap mode, since we don't make dependencies
* while bootstrapping.
*
* GPDB: The section about TOAST is still relevant for AO aux tables.
*/
if (relkind != RELKIND_COMPOSITE_TYPE &&
relkind != RELKIND_TOASTVALUE &&
!IsAppendonlyMetadataRelkind(relkind) &&
!IsBootstrapProcessingMode())
{
ObjectAddress myself,
Expand Down
36 changes: 28 additions & 8 deletions src/backend/commands/copy.c
Original file line number Diff line number Diff line change
Expand Up @@ -282,7 +282,7 @@ static GpDistributionData *InitDistributionData(CopyState cstate, EState *estate
static void FreeDistributionData(GpDistributionData *distData);
static void InitCopyFromDispatchSplit(CopyState cstate, GpDistributionData *distData, EState *estate);
static unsigned int GetTargetSeg(GpDistributionData *distData, TupleTableSlot *slot);
static ProgramPipes *open_program_pipes(char *command, bool forwrite);
static ProgramPipes *open_program_pipes(CopyState cstate, bool forwrite);
static void close_program_pipes(CopyState cstate, bool ifThrow);
CopyIntoClause*
MakeCopyIntoClause(CopyStmt *stmt);
Expand Down Expand Up @@ -2406,7 +2406,7 @@ BeginCopyToOnSegment(QueryDesc *queryDesc)

if (cstate->is_program)
{
cstate->program_pipes = open_program_pipes(cstate->filename, true);
cstate->program_pipes = open_program_pipes(cstate, true);
cstate->copy_file = fdopen(cstate->program_pipes->pipes[0], PG_BINARY_W);

if (cstate->copy_file == NULL)
Expand Down Expand Up @@ -2625,7 +2625,7 @@ BeginCopyTo(ParseState *pstate,
if (is_program)
{
progress_vals[1] = PROGRESS_COPY_TYPE_PROGRAM;
cstate->program_pipes = open_program_pipes(cstate->filename, true);
cstate->program_pipes = open_program_pipes(cstate, true);
cstate->copy_file = fdopen(cstate->program_pipes->pipes[0], PG_BINARY_W);

if (cstate->copy_file == NULL)
Expand Down Expand Up @@ -5101,7 +5101,7 @@ BeginCopyFrom(ParseState *pstate,
if (cstate->is_program)
{
progress_vals[1] = PROGRESS_COPY_TYPE_PROGRAM;
cstate->program_pipes = open_program_pipes(cstate->filename, false);
cstate->program_pipes = open_program_pipes(cstate, false);
cstate->copy_file = fdopen(cstate->program_pipes->pipes[0], PG_BINARY_R);
if (cstate->copy_file == NULL)
ereport(ERROR,
Expand Down Expand Up @@ -8014,9 +8014,21 @@ GetTargetSeg(GpDistributionData *distData, TupleTableSlot *slot)
return target_seg;
}

static void
close_program_pipes_on_reset(void *arg)
{
if (!IsAbortInProgress())
return;

CopyState cstate = arg;

close_program_pipes(cstate, false);
}

static ProgramPipes*
open_program_pipes(char *command, bool forwrite)
open_program_pipes(CopyState cstate, bool forwrite)
{
char *command = cstate->filename;
int save_errno;
pqsigfunc save_SIGPIPE;
/* set up extvar */
Expand Down Expand Up @@ -8054,6 +8066,12 @@ open_program_pipes(char *command, bool forwrite)
errmsg("can not start command: %s", command)));
}

MemoryContextCallback *callback = MemoryContextAlloc(cstate->copycontext, sizeof(MemoryContextCallback));

callback->arg = cstate;
callback->func = close_program_pipes_on_reset;
MemoryContextRegisterResetCallback(cstate->copycontext, callback);

return program_pipes;
}

Expand All @@ -8063,8 +8081,7 @@ close_program_pipes(CopyState cstate, bool ifThrow)
Assert(cstate->is_program);

int ret = 0;
StringInfoData sinfo;
initStringInfo(&sinfo);
StringInfoData sinfo = {0};

if (cstate->copy_file)
{
Expand All @@ -8077,8 +8094,11 @@ close_program_pipes(CopyState cstate, bool ifThrow)
{
return;
}


if (ifThrow)
initStringInfo(&sinfo);
ret = pclose_with_stderr(cstate->program_pipes->pid, cstate->program_pipes->pipes, &sinfo);
cstate->program_pipes = NULL;

if (ret == 0 || !ifThrow)
{
Expand Down
66 changes: 0 additions & 66 deletions src/backend/commands/explain_gp.c
Original file line number Diff line number Diff line change
Expand Up @@ -1020,20 +1020,6 @@ cdbexplain_depositStatsToNode(PlanState *planstate, CdbExplain_RecvStatCtx *ctx)
CdbExplain_DepStatAcc vmem_reserved;
CdbExplain_DepStatAcc totalPartTableScanned;

/* Buffer usage counters */
CdbExplain_DepStatAcc shared_blks_hit;
CdbExplain_DepStatAcc shared_blks_read;
CdbExplain_DepStatAcc shared_blks_written;
CdbExplain_DepStatAcc shared_blks_dirtied;
CdbExplain_DepStatAcc local_blks_hit;
CdbExplain_DepStatAcc local_blks_read;
CdbExplain_DepStatAcc local_blks_written;
CdbExplain_DepStatAcc local_blks_dirtied;
CdbExplain_DepStatAcc temp_blks_read;
CdbExplain_DepStatAcc temp_blks_written;
CdbExplain_DepStatAcc blk_read_time;
CdbExplain_DepStatAcc blk_write_time;

int imsgptr;
int nInst;

Expand All @@ -1059,18 +1045,6 @@ cdbexplain_depositStatsToNode(PlanState *planstate, CdbExplain_RecvStatCtx *ctx)
cdbexplain_depStatAcc_init0(&workmemwanted);
cdbexplain_depStatAcc_init0(&totalWorkfileCreated);
cdbexplain_depStatAcc_init0(&totalPartTableScanned);
cdbexplain_depStatAcc_init0(&shared_blks_hit);
cdbexplain_depStatAcc_init0(&shared_blks_read);
cdbexplain_depStatAcc_init0(&shared_blks_written);
cdbexplain_depStatAcc_init0(&shared_blks_dirtied);
cdbexplain_depStatAcc_init0(&local_blks_hit);
cdbexplain_depStatAcc_init0(&local_blks_read);
cdbexplain_depStatAcc_init0(&local_blks_written);
cdbexplain_depStatAcc_init0(&local_blks_dirtied);
cdbexplain_depStatAcc_init0(&temp_blks_read);
cdbexplain_depStatAcc_init0(&temp_blks_written);
cdbexplain_depStatAcc_init0(&blk_read_time);
cdbexplain_depStatAcc_init0(&blk_write_time);

/* Initialize per-slice accumulators. */
cdbexplain_depStatAcc_init0(&peakmemused);
Expand Down Expand Up @@ -1110,18 +1084,6 @@ cdbexplain_depositStatsToNode(PlanState *planstate, CdbExplain_RecvStatCtx *ctx)
cdbexplain_depStatAcc_upd(&workmemwanted, rsi->workmemwanted, rsh, rsi, nsi);
cdbexplain_depStatAcc_upd(&totalWorkfileCreated, (rsi->workfileCreated ? 1 : 0), rsh, rsi, nsi);
cdbexplain_depStatAcc_upd(&totalPartTableScanned, rsi->numPartScanned, rsh, rsi, nsi);
cdbexplain_depStatAcc_upd(&shared_blks_hit, rsi->bufusage.shared_blks_hit, rsh, rsi, nsi);
cdbexplain_depStatAcc_upd(&shared_blks_read, rsi->bufusage.shared_blks_read, rsh, rsi, nsi);
cdbexplain_depStatAcc_upd(&shared_blks_written, rsi->bufusage.shared_blks_written, rsh, rsi, nsi);
cdbexplain_depStatAcc_upd(&shared_blks_dirtied, rsi->bufusage.shared_blks_dirtied, rsh, rsi, nsi);
cdbexplain_depStatAcc_upd(&local_blks_hit, rsi->bufusage.local_blks_hit, rsh, rsi, nsi);
cdbexplain_depStatAcc_upd(&local_blks_read, rsi->bufusage.local_blks_read, rsh, rsi, nsi);
cdbexplain_depStatAcc_upd(&local_blks_written, rsi->bufusage.local_blks_written, rsh, rsi, nsi);
cdbexplain_depStatAcc_upd(&local_blks_dirtied, rsi->bufusage.local_blks_dirtied, rsh, rsi, nsi);
cdbexplain_depStatAcc_upd(&temp_blks_read, rsi->bufusage.temp_blks_read, rsh, rsi, nsi);
cdbexplain_depStatAcc_upd(&temp_blks_written, rsi->bufusage.temp_blks_written, rsh, rsi, nsi);
cdbexplain_depStatAcc_upd(&blk_read_time, INSTR_TIME_GET_DOUBLE(rsi->bufusage.blk_read_time), rsh, rsi, nsi);
cdbexplain_depStatAcc_upd(&blk_write_time, INSTR_TIME_GET_DOUBLE(rsi->bufusage.blk_write_time), rsh, rsi, nsi);

/* Update per-slice accumulators. */
cdbexplain_depStatAcc_upd(&peakmemused, rsh->worker.peakmemused, rsh, rsi, nsi);
Expand Down Expand Up @@ -1203,34 +1165,6 @@ cdbexplain_depositStatsToNode(PlanState *planstate, CdbExplain_RecvStatCtx *ctx)
if (peakmemused.agg.vmax > 1.05 * cdbexplain_agg_avg(&peakmemused.agg))
cdbexplain_depStatAcc_saveText(&peakmemused, ctx->extratextbuf, &saved);

/*
* For positive buffer counters, save extra message text
*/
if (shared_blks_hit.agg.vsum > 0)
cdbexplain_depStatAcc_saveText(&shared_blks_hit, ctx->extratextbuf, &saved);
if (shared_blks_read.agg.vsum > 0)
cdbexplain_depStatAcc_saveText(&shared_blks_read, ctx->extratextbuf, &saved);
if (shared_blks_written.agg.vsum > 0)
cdbexplain_depStatAcc_saveText(&shared_blks_written, ctx->extratextbuf, &saved);
if (shared_blks_dirtied.agg.vsum > 0)
cdbexplain_depStatAcc_saveText(&shared_blks_dirtied, ctx->extratextbuf, &saved);
if (local_blks_hit.agg.vsum > 0)
cdbexplain_depStatAcc_saveText(&local_blks_hit, ctx->extratextbuf, &saved);
if (local_blks_read.agg.vsum > 0)
cdbexplain_depStatAcc_saveText(&local_blks_read, ctx->extratextbuf, &saved);
if (local_blks_written.agg.vsum > 0)
cdbexplain_depStatAcc_saveText(&local_blks_written, ctx->extratextbuf, &saved);
if (local_blks_dirtied.agg.vsum > 0)
cdbexplain_depStatAcc_saveText(&local_blks_dirtied, ctx->extratextbuf, &saved);
if (temp_blks_read.agg.vsum > 0)
cdbexplain_depStatAcc_saveText(&temp_blks_read, ctx->extratextbuf, &saved);
if (temp_blks_written.agg.vsum > 0)
cdbexplain_depStatAcc_saveText(&temp_blks_written, ctx->extratextbuf, &saved);
if (blk_read_time.agg.vsum > 0)
cdbexplain_depStatAcc_saveText(&blk_read_time, ctx->extratextbuf, &saved);
if (blk_write_time.agg.vsum > 0)
cdbexplain_depStatAcc_saveText(&blk_write_time, ctx->extratextbuf, &saved);

/*
* One worker which produced the greatest number of output rows.
* (Always give at least one node a chance to have its extra message
Expand Down
Loading

0 comments on commit 1100a41

Please sign in to comment.