Skip to content
This repository has been archived by the owner on Apr 2, 2024. It is now read-only.

Commit

Permalink
Add infrastructure for psql-based golden tests
Browse files Browse the repository at this point in the history
This adds the infrastructure for simple golden file tests.
Also add a simple golden file test for views.
  • Loading branch information
cevian committed Apr 30, 2020
1 parent 3e5e64e commit 39b76b5
Show file tree
Hide file tree
Showing 7 changed files with 269 additions and 8 deletions.
24 changes: 23 additions & 1 deletion pkg/internal/testhelpers/containers.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,10 @@ package testhelpers
import (
"context"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"testing"

"github.com/docker/go-connections/nat"
Expand Down Expand Up @@ -91,7 +93,7 @@ func dbSetup(DBName string) (*pgxpool.Pool, error) {
}

// StartPGContainer starts a postgreSQL container for use in testing
func StartPGContainer(ctx context.Context, withExtension bool) (testcontainers.Container, error) {
func StartPGContainer(ctx context.Context, withExtension bool, testDataDir string) (testcontainers.Container, error) {
containerPort := nat.Port("5432/tcp")
var image string
if withExtension {
Expand All @@ -106,7 +108,15 @@ func StartPGContainer(ctx context.Context, withExtension bool) (testcontainers.C
Env: map[string]string{
"POSTGRES_PASSWORD": "password",
},
SkipReaper: false, /* switch to true not to kill docker container */
}

if testDataDir != "" {
req.BindMounts = map[string]string{
testDataDir: "/testdata",
}
}

container, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{
ContainerRequest: req,
Started: true,
Expand Down Expand Up @@ -169,3 +179,15 @@ func StartPromContainer(storagePath string, ctx context.Context) (testcontainers

return container, nil
}

// TempDir returns a temp directory for tests
func TempDir(name string) (string, error) {
tmpDir := ""

if runtime.GOOS == "darwin" {
// Docker on Mac lacks access to default os tmp dir - "/var/folders/random_number"
// so switch to cross-user tmp dir
tmpDir = "/tmp"
}
return ioutil.TempDir(tmpDir, name)
}
2 changes: 1 addition & 1 deletion pkg/internal/testhelpers/containers_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ func TestMain(m *testing.M) {
flag.Parse()
ctx := context.Background()
if !testing.Short() && *useDocker {
pgContainer, err := StartPGContainer(ctx, true)
pgContainer, err := StartPGContainer(ctx, true, "")
if err != nil {
fmt.Println("Error setting up container", err)
os.Exit(1)
Expand Down
7 changes: 7 additions & 0 deletions pkg/log/log.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ package log

import (
"fmt"
"os"

"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
Expand Down Expand Up @@ -55,6 +56,12 @@ func Error(keyvals ...interface{}) {
_ = level.Error(logger).Log(keyvals...)
}

// Fatal logs an ERROR level message and exits
func Fatal(keyvals ...interface{}) {
_ = level.Error(logger).Log(keyvals...)
os.Exit(1)
}

// CustomCacheLogger is a custom logger used for transforming cache logs
// so that they conform the our logging setup. It also implements the
// bigcache.Logger interface.
Expand Down
131 changes: 126 additions & 5 deletions pkg/pgmodel/migrate_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,14 +9,18 @@ import (
"encoding/json"
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"reflect"
"strings"
"sync"
"testing"
"time"

"github.com/jackc/pgx/v4/pgxpool"
"github.com/testcontainers/testcontainers-go"
"github.com/timescale/timescale-prometheus/pkg/internal/testhelpers"
"github.com/timescale/timescale-prometheus/pkg/log"

Expand All @@ -26,9 +30,12 @@ import (
)

var (
database = flag.String("database", "tmp_db_timescale_migrate_test", "database to run integration tests on")
useDocker = flag.Bool("use-docker", true, "start database using a docker container")
useExtension = flag.Bool("use-extension", true, "use the timescale_prometheus_extra extension")
database = flag.String("database", "tmp_db_timescale_migrate_test", "database to run integration tests on")
useDocker = flag.Bool("use-docker", true, "start database using a docker container")
useExtension = flag.Bool("use-extension", true, "use the timescale_prometheus_extra extension")
updateGoldenFiles = flag.Bool("update", false, "update the golden files of this test")
pgContainer testcontainers.Container
pgContainerTestDataDir string
)

const (
Expand Down Expand Up @@ -993,6 +1000,86 @@ func TestSQLDropMetricChunk(t *testing.T) {
})
}

func copyFile(src string, dest string) error {
sourceFile, err := os.Open(src)
if err != nil {
return err
}
defer sourceFile.Close()

newFile, err := os.Create(dest)
if err != nil {
return err
}
defer newFile.Close()

_, err = io.Copy(newFile, sourceFile)
if err != nil {
return err
}
return nil
}

func TestSQLGoldenFiles(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test")
}
withDB(t, *database, func(db *pgxpool.Pool, t testing.TB) {
files, err := filepath.Glob("testdata/sql/*")
if err != nil {
t.Fatal(err)
}

for _, file := range files {
base := filepath.Base(file)
base = strings.TrimSuffix(base, filepath.Ext(base))
i, err := pgContainer.Exec(context.Background(), []string{"bash", "-c", "psql -U postgres -d " + *database + " -f /testdata/sql/" + base + ".sql &> /testdata/out/" + base + ".out"})
if err != nil {
t.Fatal(err)
}

if i != 0 {
/* on psql failure print the logs */
rc, err := pgContainer.Logs(context.Background())
if err != nil {
t.Fatal(err)
}
defer rc.Close()

msg, err := ioutil.ReadAll(rc)
if err != nil {
t.Fatal(err)
}
t.Log(string(msg))
}

expectedFile := filepath.Join("testdata/expected/", base+".out")
actualFile := filepath.Join(pgContainerTestDataDir, "out", base+".out")

if *updateGoldenFiles {
err = copyFile(actualFile, expectedFile)
if err != nil {
t.Fatal(err)
}
}

expected, err := ioutil.ReadFile(expectedFile)
if err != nil {
t.Fatal(err)
}

actual, err := ioutil.ReadFile(actualFile)
if err != nil {
t.Fatal(err)
}

if string(expected) != string(actual) {
t.Fatalf("Golden file does not match result: diff %s %s", expectedFile, actualFile)
}
}
})
}

func TestSQLDropChunk(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test")
Expand Down Expand Up @@ -1110,12 +1197,45 @@ func TestExtensionFunctions(t *testing.T) {
})
}

func generatePGTestDirFiles() string {
tmpDir, err := testhelpers.TempDir("testdata")
if err != nil {
log.Fatal(err)
}

err = os.Mkdir(filepath.Join(tmpDir, "sql"), 0777)
if err != nil {
log.Fatal(err)
}
err = os.Mkdir(filepath.Join(tmpDir, "out"), 0777)
if err != nil {
log.Fatal(err)
}

files, err := filepath.Glob("testdata/sql/*")
if err != nil {
log.Fatal(err)
}

for _, file := range files {
err = copyFile(file, filepath.Join(tmpDir, "sql", filepath.Base(file)))
if err != nil {
log.Fatal(err)
}
}
return tmpDir
}

func TestMain(m *testing.M) {
flag.Parse()
ctx := context.Background()
_ = log.Init("debug")
if !testing.Short() && *useDocker {
pgCont, err := testhelpers.StartPGContainer(ctx, *useExtension)
var err error

pgContainerTestDataDir = generatePGTestDirFiles()

pgContainer, err = testhelpers.StartPGContainer(ctx, *useExtension, pgContainerTestDataDir)
if err != nil {
fmt.Println("Error setting up container", err)
os.Exit(1)
Expand All @@ -1133,10 +1253,11 @@ func TestMain(m *testing.M) {
os.Exit(1)
}
defer func() {
err := pgCont.Terminate(ctx)
if err != nil {
panic(err)
}
pgContainer = nil

err = promCont.Terminate(ctx)
if err != nil {
panic(err)
Expand Down
85 changes: 85 additions & 0 deletions pkg/pgmodel/testdata/expected/views.out
Original file line number Diff line number Diff line change
@@ -0,0 +1,85 @@
\set ON_ERROR_STOP 1
SELECT _prom_catalog.get_or_create_metric_table_name('cpu_usage');
psql:/testdata/sql/views.sql:4: NOTICE: adding not-null constraint to column "time"
DETAIL: Time dimensions cannot have NULL values
psql:/testdata/sql/views.sql:4: NOTICE: adding index _compressed_hypertable_2_series_id__ts_meta_sequence_num_idx ON _timescaledb_internal._compressed_hypertable_2 USING BTREE(series_id, _ts_meta_sequence_num)
get_or_create_metric_table_name
---------------------------------
(1,cpu_usage)
(1 row)

SELECT _prom_catalog.get_or_create_metric_table_name('cpu_total');
psql:/testdata/sql/views.sql:5: NOTICE: adding not-null constraint to column "time"
DETAIL: Time dimensions cannot have NULL values
psql:/testdata/sql/views.sql:5: NOTICE: adding index _compressed_hypertable_4_series_id__ts_meta_sequence_num_idx ON _timescaledb_internal._compressed_hypertable_4 USING BTREE(series_id, _ts_meta_sequence_num)
get_or_create_metric_table_name
---------------------------------
(2,cpu_total)
(1 row)

INSERT INTO prom_data.cpu_usage
SELECT timestamptz '2000-01-01 02:03:04'+(interval '1s' * g), 100.1 + g, series_id('{"__name__": "cpu_usage", "namespace":"dev", "node": "brain"}')
FROM generate_series(1,10) g;
INSERT 0 10
INSERT INTO prom_data.cpu_usage
SELECT timestamptz '2000-01-01 02:03:04'+(interval '1s' * g), 100.1 + g, series_id('{"__name__": "cpu_usage", "namespace":"production", "node": "pinky", "new_tag":"foo"}')
FROM generate_series(1,10) g;
INSERT 0 10
INSERT INTO prom_data.cpu_total
SELECT timestamptz '2000-01-01 02:03:04'+(interval '1s' * g), 100.0, series_id('{"__name__": "cpu_total", "namespace":"dev", "node": "brain"}')
FROM generate_series(1,10) g;
INSERT 0 10
INSERT INTO prom_data.cpu_total
SELECT timestamptz '2000-01-01 02:03:04'+(interval '1s' * g), 100.0, series_id('{"__name__": "cpu_total", "namespace":"production", "node": "pinky", "new_tag_2":"bar"}')
FROM generate_series(1,10) g;
INSERT 0 10
SELECT * FROM prom_info.label ORDER BY key;
key | value_column_name | id_column_name | values
-----------+-------------------+----------------+-----------------------
__name__ | __name__ | __name___id | {cpu_total,cpu_usage}
namespace | namespace | namespace_id | {dev,production}
new_tag | new_tag | new_tag_id | {foo}
new_tag_2 | new_tag_2 | new_tag_2_id | {bar}
node | node | node_id | {brain,pinky}
(5 rows)

SELECT count(compress_chunk(i)) from show_chunks('prom_data.cpu_usage') i;
count
-------
1
(1 row)

SELECT * FROM prom_info.metric ORDER BY id;
id | metric_name | table_name | retention_period | chunk_interval | label_keys | size | compression_ratio | total_chunks | compressed_chunks
----+-------------+------------+------------------+----------------+-------------------------------------+-------+-------------------------+--------------+-------------------
1 | cpu_usage | cpu_usage | 90 days | 08:00:00 | {__name__,namespace,new_tag,node} | 48 kB | 20.00000000000000000000 | 1 | 1
2 | cpu_total | cpu_total | 90 days | 08:00:00 | {__name__,namespace,new_tag_2,node} | 40 kB | | |
(2 rows)

SELECT * FROM cpu_usage ORDER BY time, series_id LIMIT 5;
time | value | series_id | labels | node_id | namespace_id | new_tag_id
------------------------+-------+-----------+-----------+---------+--------------+------------
2000-01-01 02:03:05+00 | 101.1 | 1 | {2,1,3} | 1 | 3 |
2000-01-01 02:03:05+00 | 101.1 | 2 | {2,4,6,5} | 4 | 6 | 5
2000-01-01 02:03:06+00 | 102.1 | 1 | {2,1,3} | 1 | 3 |
2000-01-01 02:03:06+00 | 102.1 | 2 | {2,4,6,5} | 4 | 6 | 5
2000-01-01 02:03:07+00 | 103.1 | 1 | {2,1,3} | 1 | 3 |
(5 rows)

SELECT time, value, jsonb(labels), val(namespace_id) FROM cpu_usage ORDER BY time, series_id LIMIT 5;
time | value | jsonb | val
------------------------+-------+-----------------------------------------------------------------------------------------+------------
2000-01-01 02:03:05+00 | 101.1 | {"node": "brain", "__name__": "cpu_usage", "namespace": "dev"} | dev
2000-01-01 02:03:05+00 | 101.1 | {"node": "pinky", "new_tag": "foo", "__name__": "cpu_usage", "namespace": "production"} | production
2000-01-01 02:03:06+00 | 102.1 | {"node": "brain", "__name__": "cpu_usage", "namespace": "dev"} | dev
2000-01-01 02:03:06+00 | 102.1 | {"node": "pinky", "new_tag": "foo", "__name__": "cpu_usage", "namespace": "production"} | production
2000-01-01 02:03:07+00 | 103.1 | {"node": "brain", "__name__": "cpu_usage", "namespace": "dev"} | dev
(5 rows)

SELECT * FROM prom_series.cpu_usage ORDER BY series_id;
series_id | labels | node | namespace | new_tag
-----------+-----------+-------+------------+---------
1 | {2,1,3} | brain | dev |
2 | {2,4,6,5} | pinky | production | foo
(2 rows)

26 changes: 26 additions & 0 deletions pkg/pgmodel/testdata/sql/views.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
\set ECHO all
\set ON_ERROR_STOP 1

SELECT _prom_catalog.get_or_create_metric_table_name('cpu_usage');
SELECT _prom_catalog.get_or_create_metric_table_name('cpu_total');
INSERT INTO prom_data.cpu_usage
SELECT timestamptz '2000-01-01 02:03:04'+(interval '1s' * g), 100.1 + g, series_id('{"__name__": "cpu_usage", "namespace":"dev", "node": "brain"}')
FROM generate_series(1,10) g;
INSERT INTO prom_data.cpu_usage
SELECT timestamptz '2000-01-01 02:03:04'+(interval '1s' * g), 100.1 + g, series_id('{"__name__": "cpu_usage", "namespace":"production", "node": "pinky", "new_tag":"foo"}')
FROM generate_series(1,10) g;
INSERT INTO prom_data.cpu_total
SELECT timestamptz '2000-01-01 02:03:04'+(interval '1s' * g), 100.0, series_id('{"__name__": "cpu_total", "namespace":"dev", "node": "brain"}')
FROM generate_series(1,10) g;
INSERT INTO prom_data.cpu_total
SELECT timestamptz '2000-01-01 02:03:04'+(interval '1s' * g), 100.0, series_id('{"__name__": "cpu_total", "namespace":"production", "node": "pinky", "new_tag_2":"bar"}')
FROM generate_series(1,10) g;

SELECT * FROM prom_info.label ORDER BY key;

SELECT count(compress_chunk(i)) from show_chunks('prom_data.cpu_usage') i;
SELECT * FROM prom_info.metric ORDER BY id;

SELECT * FROM cpu_usage ORDER BY time, series_id LIMIT 5;
SELECT time, value, jsonb(labels), val(namespace_id) FROM cpu_usage ORDER BY time, series_id LIMIT 5;
SELECT * FROM prom_series.cpu_usage ORDER BY series_id;
2 changes: 1 addition & 1 deletion pkg/util/election_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -211,7 +211,7 @@ func TestMain(m *testing.M) {
}
ctx := context.Background()
if !testing.Short() && *useDocker {
container, err := testhelpers.StartPGContainer(ctx, false)
container, err := testhelpers.StartPGContainer(ctx, false, "")
if err != nil {
fmt.Println("Error setting up container", err)
os.Exit(1)
Expand Down

0 comments on commit 39b76b5

Please sign in to comment.