Skip to content

Commit

Permalink
Add tests and update scripts
Browse files Browse the repository at this point in the history
  • Loading branch information
ildus committed Dec 17, 2018
1 parent 79f2a4b commit 41d96bf
Show file tree
Hide file tree
Showing 10 changed files with 304 additions and 5 deletions.
4 changes: 2 additions & 2 deletions Makefile
Expand Up @@ -6,9 +6,9 @@ OBJS = sr_plan.o $(WIN32RES)
PGFILEDESC = "sr_plan - save and read plan"

EXTENSION = sr_plan
EXTVERSION = 1.1
EXTVERSION = 1.2
DATA_built = sr_plan--$(EXTVERSION).sql
DATA = sr_plan--1.0--1.1.sql
DATA = sr_plan--1.0--1.1.sql sr_plan--1.1--1.2.sql

EXTRA_CLEAN = sr_plan--$(EXTVERSION).sql
REGRESS = sr_plan sr_plan_schema joins explain
Expand Down
1 change: 1 addition & 0 deletions conf.add
@@ -0,0 +1 @@
shared_preload_libraries='sr_plan'
3 changes: 2 additions & 1 deletion init.sql
Expand Up @@ -5,10 +5,11 @@

CREATE TABLE sr_plans (
query_hash int NOT NULL,
query_id int8 NOT NULL,
plan_hash int NOT NULL,
enable boolean NOT NULL,
query varchar NOT NULL,
plan text NOT NULL,
enable boolean NOT NULL,

reloids oid[],
index_reloids oid[]
Expand Down
23 changes: 23 additions & 0 deletions sr_plan--1.0--1.1.sql
Expand Up @@ -16,3 +16,26 @@ CREATE INDEX sr_plans_query_index_oids ON sr_plans USING gin(index_reloids);

DROP FUNCTION explain_jsonb_plan(jsonb) CASCADE;
DROP FUNCTION sr_plan_invalid_table() CASCADE;

CREATE OR REPLACE FUNCTION sr_plan_invalid_table() RETURNS event_trigger
LANGUAGE plpgsql AS $$
DECLARE
obj record;
indobj record;
BEGIN
FOR obj IN SELECT * FROM pg_event_trigger_dropped_objects()
WHERE object_type = 'table' OR object_type = 'index'
LOOP
IF obj.object_type = 'table' THEN
DELETE FROM @extschema@.sr_plans WHERE reloids @> ARRAY[obj.objid];
ELSE
IF obj.object_type = 'index' THEN
DELETE FROM @extschema@.sr_plans WHERE index_reloids @> ARRAY[obj.objid];
END IF;
END IF;
END LOOP;
END
$$;

CREATE EVENT TRIGGER sr_plan_invalid_table ON sql_drop
EXECUTE PROCEDURE sr_plan_invalid_table();
55 changes: 55 additions & 0 deletions sr_plan--1.1--1.2.sql
@@ -0,0 +1,55 @@
SET sr_plan.enabled = false;

DROP FUNCTION sr_plan_invalid_table() CASCADE;
DROP TABLE sr_plans CASCADE;
CREATE TABLE sr_plans (
query_hash int NOT NULL,
query_id int8 NOT NULL,
plan_hash int NOT NULL,
enable boolean NOT NULL,
query varchar NOT NULL,
plan text NOT NULL,

reloids oid[],
index_reloids oid[]
);
CREATE INDEX sr_plans_query_hash_idx ON sr_plans (query_hash);
CREATE INDEX sr_plans_query_oids ON sr_plans USING gin(reloids);
CREATE INDEX sr_plans_query_index_oids ON sr_plans USING gin(index_reloids);

CREATE OR REPLACE FUNCTION _p(anyelement)
RETURNS anyelement
AS 'MODULE_PATHNAME', 'do_nothing'
LANGUAGE C STRICT VOLATILE;

CREATE FUNCTION show_plan(query_hash int4,
index int4 default null,
format cstring default null)
RETURNS SETOF RECORD
AS 'MODULE_PATHNAME', 'show_plan'
LANGUAGE C VOLATILE;

CREATE FUNCTION sr_plan_invalid_table() RETURNS event_trigger
LANGUAGE plpgsql AS $$
DECLARE
obj record;
indobj record;
BEGIN
FOR obj IN SELECT * FROM pg_event_trigger_dropped_objects()
WHERE object_type = 'table' OR object_type = 'index'
LOOP
IF obj.object_type = 'table' THEN
DELETE FROM @extschema@.sr_plans WHERE reloids @> ARRAY[obj.objid];
ELSE
IF obj.object_type = 'index' THEN
DELETE FROM @extschema@.sr_plans WHERE index_reloids @> ARRAY[obj.objid];
END IF;
END IF;
END LOOP;
END
$$;

CREATE EVENT TRIGGER sr_plan_invalid_table ON sql_drop
EXECUTE PROCEDURE sr_plan_invalid_table();

SET sr_plan.enabled = true;
55 changes: 55 additions & 0 deletions sr_plan--1.2.sql
@@ -0,0 +1,55 @@
/* contrib/sr_plan/init.sql */

-- complain if script is sourced in psql, rather than via CREATE EXTENSION
\echo Use "CREATE EXTENSION sr_plan" to load this file. \quit

CREATE TABLE sr_plans (
query_hash int NOT NULL,
query_id int8 NOT NULL,
plan_hash int NOT NULL,
enable boolean NOT NULL,
query varchar NOT NULL,
plan text NOT NULL,

reloids oid[],
index_reloids oid[]
);

CREATE INDEX sr_plans_query_hash_idx ON sr_plans (query_hash);
CREATE INDEX sr_plans_query_oids ON sr_plans USING gin(reloids);
CREATE INDEX sr_plans_query_index_oids ON sr_plans USING gin(index_reloids);

CREATE FUNCTION _p(anyelement)
RETURNS anyelement
AS 'MODULE_PATHNAME', 'do_nothing'
LANGUAGE C STRICT VOLATILE;

CREATE FUNCTION show_plan(query_hash int4,
index int4 default null,
format cstring default null)
RETURNS SETOF RECORD
AS 'MODULE_PATHNAME', 'show_plan'
LANGUAGE C VOLATILE;

CREATE OR REPLACE FUNCTION sr_plan_invalid_table() RETURNS event_trigger
LANGUAGE plpgsql AS $$
DECLARE
obj record;
indobj record;
BEGIN
FOR obj IN SELECT * FROM pg_event_trigger_dropped_objects()
WHERE object_type = 'table' OR object_type = 'index'
LOOP
IF obj.object_type = 'table' THEN
DELETE FROM @extschema@.sr_plans WHERE reloids @> ARRAY[obj.objid];
ELSE
IF obj.object_type = 'index' THEN
DELETE FROM @extschema@.sr_plans WHERE index_reloids @> ARRAY[obj.objid];
END IF;
END IF;
END LOOP;
END
$$;

CREATE EVENT TRIGGER sr_plan_invalid_table ON sql_drop
EXECUTE PROCEDURE sr_plan_invalid_table();
1 change: 1 addition & 0 deletions sr_plan.c
Expand Up @@ -581,6 +581,7 @@ sr_planner(Query *parse, int cursorOptions, ParamListInfo boundParams)
MemSet(nulls, 0, sizeof(nulls));

values[Anum_sr_query_hash - 1] = query_hash;
values[Anum_sr_query_id - 1] = Int64GetDatum(parse->queryId);
values[Anum_sr_plan_hash - 1] = plan_hash;
values[Anum_sr_query - 1] = CStringGetTextDatum(cachedInfo.query_text);
values[Anum_sr_plan - 1] = CStringGetTextDatum(plan_text);
Expand Down
2 changes: 1 addition & 1 deletion sr_plan.control
@@ -1,4 +1,4 @@
# sr_plan extension
comment = 'functions for save and read plan'
default_version = '1.1'
default_version = '1.2'
module_pathname = '$libdir/sr_plan'
3 changes: 2 additions & 1 deletion sr_plan.h
Expand Up @@ -74,10 +74,11 @@ void common_walker(const void *obj, void (*callback) (void *));
enum
{
Anum_sr_query_hash = 1,
Anum_sr_query_id,
Anum_sr_plan_hash,
Anum_sr_enable,
Anum_sr_query,
Anum_sr_plan,
Anum_sr_enable,
Anum_sr_reloids,
Anum_sr_index_reloids,
Anum_sr_attcount
Expand Down
162 changes: 162 additions & 0 deletions tests/test_sr_plan.py
@@ -0,0 +1,162 @@
#!/usr/bin/env python3

import sys
import os
import tempfile
import contextlib
import shutil
import unittest
import subprocess

from testgres import get_new_node

sql_init = '''
CREATE TABLE test_table(test_attr1 int, test_attr2 int);
INSERT INTO test_table SELECT i, i + 1 FROM generate_series(1, 20) i;
'''

queries = [
"SELECT * FROM test_table WHERE test_attr1 = _p(10);",
"SELECT * FROM test_table WHERE test_attr1 = 10;",
"SELECT * FROM test_table WHERE test_attr1 = 15;"
]

my_dir = os.path.dirname(os.path.abspath(__file__))
repo_dir = os.path.abspath(os.path.join(my_dir, '../'))
temp_dir = tempfile.mkdtemp()

upgrade_to = '1.2'
check_upgrade_from = ['1.1.0']

compilation = '''
make USE_PGXS=1 clean
make USE_PGXS=1 install
'''

dump_sql = '''
SELECT * FROM pg_extension WHERE extname = 'sr_plan';
SELECT pg_get_functiondef(objid)
FROM pg_catalog.pg_depend JOIN pg_proc ON pg_proc.oid = pg_depend.objid
WHERE refclassid = 'pg_catalog.pg_extension'::REGCLASS AND
refobjid = (SELECT oid
FROM pg_catalog.pg_extension
WHERE extname = 'sr_plan') AND
deptype = 'e'
ORDER BY objid::regprocedure::TEXT ASC;
\\d+ sr_plans
\\dy sr_plan_invalid_table
'''

@contextlib.contextmanager
def cwd(path):
curdir = os.getcwd()
os.chdir(path)

try:
yield
finally:
os.chdir(curdir)


def shell(cmd):
subprocess.check_output(cmd, shell=True)


def copytree(src, dst):
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
shutil.copytree(s, d)
else:
shutil.copy2(s, d)


class Tests(unittest.TestCase):
def start_node(self):
node = get_new_node()
node.init()
node.append_conf("shared_preload_libraries='sr_plan'\n")
node.start()
node.psql('create extension sr_plan')
node.psql(sql_init)

return node

def test_hash_consistency(self):
''' Test query hash consistency '''

with self.start_node() as node:
node.psql("set sr_plan.write_mode=on")
node.psql("set sr_plan.log_usage=NOTICE")
for q in queries:
node.psql(q)

node.psql("set sr_plan.write_mode=off")
queries1 = node.psql('select query_hash from sr_plans')
self.assertEqual(len(queries), 3)
node.psql("delete from sr_plans")
node.stop()

node.start()
node.psql("set sr_plan.write_mode=on")
node.psql("set sr_plan.log_usage=NOTICE")
for q in queries:
node.psql(q)

node.psql("set sr_plan.write_mode=off")
queries2 = node.psql('select query_hash from sr_plans')
node.stop()

self.assertEqual(queries1, queries2)

def test_update(self):
copytree(repo_dir, temp_dir)
dumps = []

with cwd(temp_dir):
for ver in check_upgrade_from:
shell("git clean -fdx")
shell("git reset --hard")
shell("git checkout -q %s" % ver)
shell(compilation)

with self.start_node() as node:
node.stop()

shell("git clean -fdx")
shell("git checkout -q 55f4704c7258527bd9ccd54cb35790a8e438caaa")
shell(compilation)

node.start()
node.safe_psql("alter extension sr_plan update to '%s'" % upgrade_to)

p = subprocess.Popen(["psql", "postgres", "-p", str(node.port)],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
dumps.append([ver, p.communicate(input=dump_sql.encode())[0].decode()])
node.stop()

# now make clean install
with self.start_node() as node:
p = subprocess.Popen(["psql", "postgres", "-p", str(node.port)],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
dumped_objects_new = p.communicate(input=dump_sql.encode())[0].decode()


self.assertEqual(len(dumps), len(check_upgrade_from))
for ver, dump in dumps:
self.assertEqual(dump, dumped_objects_new)


if __name__ == "__main__":
if len(sys.argv) > 1:
suite = unittest.TestLoader().loadTestsFromName(sys.argv[1],
module=sys.modules[__name__])
else:
suite = unittest.TestLoader().loadTestsFromTestCase(Tests)

unittest.TextTestRunner(verbosity=2, failfast=True).run(suite)

0 comments on commit 41d96bf

Please sign in to comment.