Skip to content

Commit

Permalink
Make VACUUM tests more resilient
Browse files Browse the repository at this point in the history
PG13 changes index vacuum behaviour in versions after 13.2 so
index vacuum is no longer triggered when inserting only.
This lead to test failures in the ABI test which was running
PG13 snapshot but will also lead to test differences between
13.2 and 13.3 when that is released.
This patch removes the indexes from the vacuum and vacuum multi
tests and makes them smaller for vacuum_parallel test to make
the size stable.

postgres/postgres@9663d12
  • Loading branch information
svenklemm committed Apr 3, 2021
1 parent eace5ea commit dbbbce6
Show file tree
Hide file tree
Showing 6 changed files with 90 additions and 71 deletions.
8 changes: 2 additions & 6 deletions test/expected/vacuum.out
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
-- LICENSE-APACHE for a copy of the license.
CREATE TABLE vacuum_test(time timestamp, temp float);
-- create hypertable with three chunks
SELECT create_hypertable('vacuum_test', 'time', chunk_time_interval => 2628000000000);
SELECT create_hypertable('vacuum_test', 'time', chunk_time_interval => 2628000000000, create_default_indexes => false);
NOTICE: adding not-null constraint to column "time"
create_hypertable
--------------------------
Expand Down Expand Up @@ -33,22 +33,18 @@ ORDER BY tablename, attname, array_to_string(histogram_bounds, ',');

VACUUM (VERBOSE, ANALYZE) vacuum_test;
INFO: vacuuming "_timescaledb_internal._hyper_1_1_chunk"
INFO: index "_hyper_1_1_chunk_vacuum_test_time_idx" now contains 2 row versions in 2 pages
INFO: "_hyper_1_1_chunk": found 0 removable, 2 nonremovable row versions in 1 out of 1 pages
INFO: analyzing "_timescaledb_internal._hyper_1_1_chunk"
INFO: "_hyper_1_1_chunk": scanned 1 of 1 pages, containing 2 live rows and 0 dead rows; 2 rows in sample, 2 estimated total rows
INFO: vacuuming "_timescaledb_internal._hyper_1_2_chunk"
INFO: index "_hyper_1_2_chunk_vacuum_test_time_idx" now contains 2 row versions in 2 pages
INFO: "_hyper_1_2_chunk": found 0 removable, 2 nonremovable row versions in 1 out of 1 pages
INFO: analyzing "_timescaledb_internal._hyper_1_2_chunk"
INFO: "_hyper_1_2_chunk": scanned 1 of 1 pages, containing 2 live rows and 0 dead rows; 2 rows in sample, 2 estimated total rows
INFO: vacuuming "_timescaledb_internal._hyper_1_3_chunk"
INFO: index "_hyper_1_3_chunk_vacuum_test_time_idx" now contains 2 row versions in 2 pages
INFO: "_hyper_1_3_chunk": found 0 removable, 2 nonremovable row versions in 1 out of 1 pages
INFO: analyzing "_timescaledb_internal._hyper_1_3_chunk"
INFO: "_hyper_1_3_chunk": scanned 1 of 1 pages, containing 2 live rows and 0 dead rows; 2 rows in sample, 2 estimated total rows
INFO: vacuuming "public.vacuum_test"
INFO: index "vacuum_test_time_idx" now contains 0 row versions in 1 pages
INFO: "vacuum_test": found 0 removable, 0 nonremovable row versions in 0 out of 0 pages
INFO: analyzing "public.vacuum_test"
INFO: "vacuum_test": scanned 0 of 0 pages, containing 0 live rows and 0 dead rows; 0 rows in sample, 0 estimated total rows
Expand Down Expand Up @@ -83,7 +79,7 @@ ORDER BY tablename, attname, array_to_string(histogram_bounds, ',');
DROP TABLE vacuum_test;
--test plain analyze (no_vacuum)
CREATE TABLE analyze_test(time timestamp, temp float);
SELECT create_hypertable('analyze_test', 'time', chunk_time_interval => 2628000000000);
SELECT create_hypertable('analyze_test', 'time', chunk_time_interval => 2628000000000, create_default_indexes => false);
NOTICE: adding not-null constraint to column "time"
create_hypertable
---------------------------
Expand Down
12 changes: 2 additions & 10 deletions test/expected/vacuum_multi.out
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
--Similar to normal vacuum tests, but PG11 introduced ability to vacuum multiple tables at once, we make sure that works for hypertables as well.
CREATE TABLE vacuum_test(time timestamp, temp float);
-- create hypertable with three chunks
SELECT create_hypertable('vacuum_test', 'time', chunk_time_interval => 2628000000000);
SELECT create_hypertable('vacuum_test', 'time', chunk_time_interval => 2628000000000, create_default_indexes => false);
NOTICE: adding not-null constraint to column "time"
create_hypertable
--------------------------
Expand All @@ -18,7 +18,7 @@ INSERT INTO vacuum_test VALUES ('2017-01-20T16:00:01', 17.5),
('2017-06-20T16:00:01', 18.5),
('2017-06-21T16:00:01', 11.0);
CREATE TABLE analyze_test(time timestamp, temp float);
SELECT create_hypertable('analyze_test', 'time', chunk_time_interval => 2628000000000);
SELECT create_hypertable('analyze_test', 'time', chunk_time_interval => 2628000000000, create_default_indexes => false);
NOTICE: adding not-null constraint to column "time"
create_hypertable
---------------------------
Expand Down Expand Up @@ -55,32 +55,26 @@ ORDER BY tablename, attname, array_to_string(histogram_bounds, ',');

VACUUM (VERBOSE, ANALYZE) vacuum_norm, vacuum_test, analyze_test;
INFO: vacuuming "_timescaledb_internal._hyper_1_1_chunk"
INFO: index "_hyper_1_1_chunk_vacuum_test_time_idx" now contains 2 row versions in 2 pages
INFO: "_hyper_1_1_chunk": found 0 removable, 2 nonremovable row versions in 1 out of 1 pages
INFO: analyzing "_timescaledb_internal._hyper_1_1_chunk"
INFO: "_hyper_1_1_chunk": scanned 1 of 1 pages, containing 2 live rows and 0 dead rows; 2 rows in sample, 2 estimated total rows
INFO: vacuuming "_timescaledb_internal._hyper_1_2_chunk"
INFO: index "_hyper_1_2_chunk_vacuum_test_time_idx" now contains 2 row versions in 2 pages
INFO: "_hyper_1_2_chunk": found 0 removable, 2 nonremovable row versions in 1 out of 1 pages
INFO: analyzing "_timescaledb_internal._hyper_1_2_chunk"
INFO: "_hyper_1_2_chunk": scanned 1 of 1 pages, containing 2 live rows and 0 dead rows; 2 rows in sample, 2 estimated total rows
INFO: vacuuming "_timescaledb_internal._hyper_1_3_chunk"
INFO: index "_hyper_1_3_chunk_vacuum_test_time_idx" now contains 2 row versions in 2 pages
INFO: "_hyper_1_3_chunk": found 0 removable, 2 nonremovable row versions in 1 out of 1 pages
INFO: analyzing "_timescaledb_internal._hyper_1_3_chunk"
INFO: "_hyper_1_3_chunk": scanned 1 of 1 pages, containing 2 live rows and 0 dead rows; 2 rows in sample, 2 estimated total rows
INFO: vacuuming "_timescaledb_internal._hyper_2_4_chunk"
INFO: index "_hyper_2_4_chunk_analyze_test_time_idx" now contains 2 row versions in 2 pages
INFO: "_hyper_2_4_chunk": found 0 removable, 2 nonremovable row versions in 1 out of 1 pages
INFO: analyzing "_timescaledb_internal._hyper_2_4_chunk"
INFO: "_hyper_2_4_chunk": scanned 1 of 1 pages, containing 2 live rows and 0 dead rows; 2 rows in sample, 2 estimated total rows
INFO: vacuuming "_timescaledb_internal._hyper_2_5_chunk"
INFO: index "_hyper_2_5_chunk_analyze_test_time_idx" now contains 2 row versions in 2 pages
INFO: "_hyper_2_5_chunk": found 0 removable, 2 nonremovable row versions in 1 out of 1 pages
INFO: analyzing "_timescaledb_internal._hyper_2_5_chunk"
INFO: "_hyper_2_5_chunk": scanned 1 of 1 pages, containing 2 live rows and 0 dead rows; 2 rows in sample, 2 estimated total rows
INFO: vacuuming "_timescaledb_internal._hyper_2_6_chunk"
INFO: index "_hyper_2_6_chunk_analyze_test_time_idx" now contains 2 row versions in 2 pages
INFO: "_hyper_2_6_chunk": found 0 removable, 2 nonremovable row versions in 1 out of 1 pages
INFO: analyzing "_timescaledb_internal._hyper_2_6_chunk"
INFO: "_hyper_2_6_chunk": scanned 1 of 1 pages, containing 2 live rows and 0 dead rows; 2 rows in sample, 2 estimated total rows
Expand All @@ -89,7 +83,6 @@ INFO: "vacuum_norm": found 0 removable, 6 nonremovable row versions in 1 out of
INFO: analyzing "public.vacuum_norm"
INFO: "vacuum_norm": scanned 1 of 1 pages, containing 6 live rows and 0 dead rows; 6 rows in sample, 6 estimated total rows
INFO: vacuuming "public.vacuum_test"
INFO: index "vacuum_test_time_idx" now contains 0 row versions in 1 pages
INFO: "vacuum_test": found 0 removable, 0 nonremovable row versions in 0 out of 0 pages
INFO: analyzing "public.vacuum_test"
INFO: "vacuum_test": scanned 0 of 0 pages, containing 0 live rows and 0 dead rows; 0 rows in sample, 0 estimated total rows
Expand All @@ -98,7 +91,6 @@ INFO: "_hyper_1_1_chunk": scanned 1 of 1 pages, containing 2 live rows and 0 de
INFO: "_hyper_1_2_chunk": scanned 1 of 1 pages, containing 2 live rows and 0 dead rows; 2 rows in sample, 2 estimated total rows
INFO: "_hyper_1_3_chunk": scanned 1 of 1 pages, containing 2 live rows and 0 dead rows; 2 rows in sample, 2 estimated total rows
INFO: vacuuming "public.analyze_test"
INFO: index "analyze_test_time_idx" now contains 0 row versions in 1 pages
INFO: "analyze_test": found 0 removable, 0 nonremovable row versions in 0 out of 0 pages
INFO: analyzing "public.analyze_test"
INFO: "analyze_test": scanned 0 of 0 pages, containing 0 live rows and 0 dead rows; 0 rows in sample, 0 estimated total rows
Expand Down
90 changes: 54 additions & 36 deletions test/expected/vacuum_parallel.out
Original file line number Diff line number Diff line change
Expand Up @@ -4,49 +4,67 @@
-- PG13 introduced parallel VACUUM functionality. It gets invoked when a table
-- has two or more indexes on it. Read up more at
-- https://www.postgresql.org/docs/13/sql-vacuum.html#PARALLEL
CREATE TABLE vacuum_test(time timestamp, temp1 float, temp2 int);
-- create hypertable with chunks
SELECT create_hypertable('vacuum_test', 'time', chunk_time_interval => 2628000000000);
NOTICE: adding not-null constraint to column "time"
CREATE TABLE vacuum_test(time timestamp NOT NULL, temp1 float, temp2 int);
-- create hypertable
-- we create chunks in public schema cause otherwise we would need
-- elevated privileges to create indexes directly
SELECT create_hypertable('vacuum_test', 'time', create_default_indexes => false, associated_schema_name => 'public');
create_hypertable
--------------------------
(1,public,vacuum_test,t)
(1 row)

-- create additional indexes on the temp columns
CREATE INDEX vt_temp1 on vacuum_test (temp1);
CREATE INDEX vt_temp2 on vacuum_test (temp2);
-- parallel vacuum needs the index size to be larger than 512KB to kick in
INSERT INTO vacuum_test SELECT TIMESTAMP 'epoch' + (i * INTERVAL '100 second'),
i, i+1 FROM generate_series(1, 100000) as T(i);
-- parallel vacuum needs the index size to be larger than min_parallel_index_scan_size to kick in
SET min_parallel_index_scan_size TO 0;
INSERT INTO vacuum_test SELECT TIMESTAMP 'epoch' + (i * INTERVAL '4h'),
i, i+1 FROM generate_series(1, 100) as T(i);
-- create indexes on the temp columns
-- we create indexes manually because otherwise vacuum verbose output
-- would be different between 13.2 and 13.3+
-- 13.2 would try to vacuum the parent table index too while 13.3+ wouldn't
CREATE INDEX ON _hyper_1_1_chunk(time);
CREATE INDEX ON _hyper_1_1_chunk(temp1);
CREATE INDEX ON _hyper_1_1_chunk(temp2);
CREATE INDEX ON _hyper_1_2_chunk(time);
CREATE INDEX ON _hyper_1_2_chunk(temp1);
CREATE INDEX ON _hyper_1_2_chunk(temp2);
CREATE INDEX ON _hyper_1_3_chunk(time);
CREATE INDEX ON _hyper_1_3_chunk(temp1);
CREATE INDEX ON _hyper_1_3_chunk(temp2);
-- INSERT only will not trigger vacuum on indexes for PG13.3+
UPDATE vacuum_test SET time = time + '1s'::interval, temp1 = random(), temp2 = random();
-- we should see two parallel workers for each chunk
VACUUM (PARALLEL 3, VERBOSE) vacuum_test;
INFO: vacuuming "_timescaledb_internal._hyper_1_1_chunk"
INFO: launched 2 parallel vacuum workers for index cleanup (planned: 2)
INFO: index "_hyper_1_1_chunk_vacuum_test_time_idx" now contains 26279 row versions in 131 pages
INFO: index "_hyper_1_1_chunk_vt_temp1" now contains 26279 row versions in 74 pages
INFO: index "_hyper_1_1_chunk_vt_temp2" now contains 26279 row versions in 74 pages
INFO: "_hyper_1_1_chunk": found 0 removable, 26279 nonremovable row versions in 168 out of 168 pages
INFO: vacuuming "_timescaledb_internal._hyper_1_2_chunk"
INFO: launched 2 parallel vacuum workers for index cleanup (planned: 2)
INFO: index "_hyper_1_2_chunk_vacuum_test_time_idx" now contains 26280 row versions in 131 pages
INFO: index "_hyper_1_2_chunk_vt_temp1" now contains 26280 row versions in 74 pages
INFO: index "_hyper_1_2_chunk_vt_temp2" now contains 26280 row versions in 74 pages
INFO: "_hyper_1_2_chunk": found 0 removable, 26280 nonremovable row versions in 168 out of 168 pages
INFO: vacuuming "_timescaledb_internal._hyper_1_3_chunk"
INFO: launched 2 parallel vacuum workers for index cleanup (planned: 2)
INFO: index "_hyper_1_3_chunk_vacuum_test_time_idx" now contains 26280 row versions in 131 pages
INFO: index "_hyper_1_3_chunk_vt_temp1" now contains 26280 row versions in 74 pages
INFO: index "_hyper_1_3_chunk_vt_temp2" now contains 26280 row versions in 74 pages
INFO: "_hyper_1_3_chunk": found 0 removable, 26280 nonremovable row versions in 168 out of 168 pages
INFO: vacuuming "_timescaledb_internal._hyper_1_4_chunk"
INFO: index "_hyper_1_4_chunk_vacuum_test_time_idx" now contains 21161 row versions in 106 pages
INFO: index "_hyper_1_4_chunk_vt_temp1" now contains 21161 row versions in 60 pages
INFO: index "_hyper_1_4_chunk_vt_temp2" now contains 21161 row versions in 60 pages
INFO: "_hyper_1_4_chunk": found 0 removable, 21161 nonremovable row versions in 135 out of 135 pages
INFO: vacuuming "public._hyper_1_1_chunk"
INFO: launched 2 parallel vacuum workers for index vacuuming (planned: 2)
INFO: scanned index "_hyper_1_1_chunk_time_idx" to remove 41 row versions
INFO: scanned index "_hyper_1_1_chunk_temp1_idx" to remove 41 row versions
INFO: scanned index "_hyper_1_1_chunk_temp2_idx" to remove 41 row versions
INFO: "_hyper_1_1_chunk": removed 41 row versions in 1 pages
INFO: index "_hyper_1_1_chunk_time_idx" now contains 41 row versions in 2 pages
INFO: index "_hyper_1_1_chunk_temp1_idx" now contains 41 row versions in 2 pages
INFO: index "_hyper_1_1_chunk_temp2_idx" now contains 41 row versions in 2 pages
INFO: "_hyper_1_1_chunk": found 41 removable, 41 nonremovable row versions in 1 out of 1 pages
INFO: vacuuming "public._hyper_1_2_chunk"
INFO: launched 2 parallel vacuum workers for index vacuuming (planned: 2)
INFO: scanned index "_hyper_1_2_chunk_time_idx" to remove 42 row versions
INFO: scanned index "_hyper_1_2_chunk_temp1_idx" to remove 42 row versions
INFO: scanned index "_hyper_1_2_chunk_temp2_idx" to remove 42 row versions
INFO: "_hyper_1_2_chunk": removed 42 row versions in 1 pages
INFO: index "_hyper_1_2_chunk_time_idx" now contains 42 row versions in 2 pages
INFO: index "_hyper_1_2_chunk_temp1_idx" now contains 42 row versions in 2 pages
INFO: index "_hyper_1_2_chunk_temp2_idx" now contains 42 row versions in 2 pages
INFO: "_hyper_1_2_chunk": found 42 removable, 42 nonremovable row versions in 1 out of 1 pages
INFO: vacuuming "public._hyper_1_3_chunk"
INFO: launched 2 parallel vacuum workers for index vacuuming (planned: 2)
INFO: scanned index "_hyper_1_3_chunk_time_idx" to remove 17 row versions
INFO: scanned index "_hyper_1_3_chunk_temp1_idx" to remove 17 row versions
INFO: scanned index "_hyper_1_3_chunk_temp2_idx" to remove 17 row versions
INFO: "_hyper_1_3_chunk": removed 17 row versions in 1 pages
INFO: index "_hyper_1_3_chunk_time_idx" now contains 17 row versions in 2 pages
INFO: index "_hyper_1_3_chunk_temp1_idx" now contains 17 row versions in 2 pages
INFO: index "_hyper_1_3_chunk_temp2_idx" now contains 17 row versions in 2 pages
INFO: "_hyper_1_3_chunk": found 17 removable, 17 nonremovable row versions in 1 out of 1 pages
INFO: vacuuming "public.vacuum_test"
INFO: index "vacuum_test_time_idx" now contains 0 row versions in 1 pages
INFO: index "vt_temp1" now contains 0 row versions in 1 pages
INFO: index "vt_temp2" now contains 0 row versions in 1 pages
INFO: "vacuum_test": found 0 removable, 0 nonremovable row versions in 0 out of 0 pages
DROP TABLE vacuum_test;
4 changes: 2 additions & 2 deletions test/sql/vacuum.sql
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
CREATE TABLE vacuum_test(time timestamp, temp float);

-- create hypertable with three chunks
SELECT create_hypertable('vacuum_test', 'time', chunk_time_interval => 2628000000000);
SELECT create_hypertable('vacuum_test', 'time', chunk_time_interval => 2628000000000, create_default_indexes => false);

INSERT INTO vacuum_test VALUES ('2017-01-20T16:00:01', 17.5),
('2017-01-21T16:00:01', 19.1),
Expand Down Expand Up @@ -40,7 +40,7 @@ DROP TABLE vacuum_test;
--test plain analyze (no_vacuum)
CREATE TABLE analyze_test(time timestamp, temp float);

SELECT create_hypertable('analyze_test', 'time', chunk_time_interval => 2628000000000);
SELECT create_hypertable('analyze_test', 'time', chunk_time_interval => 2628000000000, create_default_indexes => false);

INSERT INTO analyze_test VALUES ('2017-01-20T16:00:01', 17.5),
('2017-01-21T16:00:01', 19.1),
Expand Down
6 changes: 2 additions & 4 deletions test/sql/vacuum_multi.sql
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
CREATE TABLE vacuum_test(time timestamp, temp float);

-- create hypertable with three chunks
SELECT create_hypertable('vacuum_test', 'time', chunk_time_interval => 2628000000000);
SELECT create_hypertable('vacuum_test', 'time', chunk_time_interval => 2628000000000, create_default_indexes => false);

INSERT INTO vacuum_test VALUES ('2017-01-20T16:00:01', 17.5),
('2017-01-21T16:00:01', 19.1),
Expand All @@ -16,7 +16,7 @@ INSERT INTO vacuum_test VALUES ('2017-01-20T16:00:01', 17.5),
('2017-06-21T16:00:01', 11.0);
CREATE TABLE analyze_test(time timestamp, temp float);

SELECT create_hypertable('analyze_test', 'time', chunk_time_interval => 2628000000000);
SELECT create_hypertable('analyze_test', 'time', chunk_time_interval => 2628000000000, create_default_indexes => false);

INSERT INTO analyze_test VALUES ('2017-01-20T16:00:01', 17.5),
('2017-01-21T16:00:01', 19.1),
Expand Down Expand Up @@ -54,5 +54,3 @@ SELECT tablename, attname, histogram_bounds, n_distinct FROM pg_stats
WHERE schemaname = 'public'
ORDER BY tablename, attname, array_to_string(histogram_bounds, ',');



41 changes: 28 additions & 13 deletions test/sql/vacuum_parallel.sql
Original file line number Diff line number Diff line change
Expand Up @@ -2,23 +2,38 @@
-- Please see the included NOTICE for copyright information and
-- LICENSE-APACHE for a copy of the license.


-- PG13 introduced parallel VACUUM functionality. It gets invoked when a table
-- has two or more indexes on it. Read up more at
-- https://www.postgresql.org/docs/13/sql-vacuum.html#PARALLEL

CREATE TABLE vacuum_test(time timestamp, temp1 float, temp2 int);

-- create hypertable with chunks
SELECT create_hypertable('vacuum_test', 'time', chunk_time_interval => 2628000000000);

-- create additional indexes on the temp columns
CREATE INDEX vt_temp1 on vacuum_test (temp1);
CREATE INDEX vt_temp2 on vacuum_test (temp2);

-- parallel vacuum needs the index size to be larger than 512KB to kick in
INSERT INTO vacuum_test SELECT TIMESTAMP 'epoch' + (i * INTERVAL '100 second'),
i, i+1 FROM generate_series(1, 100000) as T(i);
CREATE TABLE vacuum_test(time timestamp NOT NULL, temp1 float, temp2 int);

-- create hypertable
-- we create chunks in public schema cause otherwise we would need
-- elevated privileges to create indexes directly
SELECT create_hypertable('vacuum_test', 'time', create_default_indexes => false, associated_schema_name => 'public');

-- parallel vacuum needs the index size to be larger than min_parallel_index_scan_size to kick in
SET min_parallel_index_scan_size TO 0;
INSERT INTO vacuum_test SELECT TIMESTAMP 'epoch' + (i * INTERVAL '4h'),
i, i+1 FROM generate_series(1, 100) as T(i);

-- create indexes on the temp columns
-- we create indexes manually because otherwise vacuum verbose output
-- would be different between 13.2 and 13.3+
-- 13.2 would try to vacuum the parent table index too while 13.3+ wouldn't
CREATE INDEX ON _hyper_1_1_chunk(time);
CREATE INDEX ON _hyper_1_1_chunk(temp1);
CREATE INDEX ON _hyper_1_1_chunk(temp2);
CREATE INDEX ON _hyper_1_2_chunk(time);
CREATE INDEX ON _hyper_1_2_chunk(temp1);
CREATE INDEX ON _hyper_1_2_chunk(temp2);
CREATE INDEX ON _hyper_1_3_chunk(time);
CREATE INDEX ON _hyper_1_3_chunk(temp1);
CREATE INDEX ON _hyper_1_3_chunk(temp2);

-- INSERT only will not trigger vacuum on indexes for PG13.3+
UPDATE vacuum_test SET time = time + '1s'::interval, temp1 = random(), temp2 = random();

-- we should see two parallel workers for each chunk
VACUUM (PARALLEL 3, VERBOSE) vacuum_test;
Expand Down

0 comments on commit dbbbce6

Please sign in to comment.