Skip to content

Commit

Permalink
test: implement test for upgrade
Browse files Browse the repository at this point in the history
This patch introduces new type of test - "upgrade".
The main idea - try to bootstrap server with old schema version
then this changes should be applied to all replicas in replicaset.

We significant change from existing integration tests - we try to
do this using prepared snaps and wals.

Need for #640
  • Loading branch information
olegrok committed Mar 24, 2020
1 parent 887843a commit 84b3c8b
Show file tree
Hide file tree
Showing 30 changed files with 329 additions and 0 deletions.
61 changes: 61 additions & 0 deletions test/upgrade/README.md
@@ -0,0 +1,61 @@
### Notes about upgrade tests

This part of tests check that we could perform an upgrade
from a smaller tarantool schema version to more higher.

This readme demonstrates how this works.
First of all, we should prepare the data with an
initial schema version.

As example let's see on `test_master_replica`.
The main idea of the tests. At first bootstrap an
instance using Tarantool 1.10 and then upgrade it to
2.2+.

Our algorithm:
* Bootstrap an instance using Tarantool 1.10
* Upgrade Tarantool version to 2.2+
* Start a cluster with upgrade procedure
* Preform some checks.


We have a test script that could be used as an
a snippet for bootstrap.

Let's apply the following diff and run test:
```diff
--- a/test/upgrade/test_master_replica/upgrade_master_replica_1_10_test.lua
+++ b/test/upgrade/test_master_replica/upgrade_master_replica_1_10_test.lua
@@ -6,10 +6,7 @@ local helpers = require('test.helper')

g.before_all = function()
local cwd = fio.cwd()
- local test_data_dir = fio.pathjoin(cwd, 'test/upgrade/test_master_replica/data')
- local datadir = fio.tempdir()
- local ok, err = fio.copytree(test_data_dir, datadir)
- assert(ok, err)
+ local datadir = fio.pathjoin(cwd, 'test/upgrade/test_master_replica/data')

local cookie = 'upgrade-1.10-2.2'

@@ -58,13 +55,12 @@ g.before_all = function()
})
-- We start cluster from existing 1.10 snapshots
-- with schema version {'1', '10', '2'}
- g.cluster.bootstrapped = true
+ --g.cluster.bootstrapped = true
g.cluster:start()
end

g.after_all = function()
g.cluster:stop()
- fio.rmtree(g.cluster.datadir)
end

function g.test_upgrade()
```

A result will be written in `'test/upgrade/test_master_replica/data'`.
Then we could uncomment `g.cluster.bootstrapped = true` back and run
a test again to be closer to real situation.
After wals/snaps/configs are ready to be tested.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,8 @@
XLOG
0.13
Version: 1.10.5-31-g9cb39b862
Instance: aaaaaaaa-aaaa-0000-0000-000000000001
VClock: {1: 3}
PrevVClock: {1: 2}

���
@@ -0,0 +1,5 @@
---
cookie_max_age: 2592000
enabled: false
cookie_renew_age: 86400
...
@@ -0,0 +1,35 @@
---
replicasets:
bbbbbbbb-0000-0000-0000-000000000000:
weight: 1
master:
- bbbbbbbb-bbbb-0000-0000-000000000001
- bbbbbbbb-bbbb-0000-0000-000000000002
alias: unnamed
roles:
myrole-permanent: true
vshard-storage: true
vshard_group: default
aaaaaaaa-0000-0000-0000-000000000000:
master:
- aaaaaaaa-aaaa-0000-0000-000000000001
weight: 0
roles:
myrole-permanent: true
vshard-router: true
alias: unnamed
servers:
bbbbbbbb-bbbb-0000-0000-000000000002:
disabled: false
replicaset_uuid: bbbbbbbb-0000-0000-0000-000000000000
uri: localhost:13305
bbbbbbbb-bbbb-0000-0000-000000000001:
disabled: false
replicaset_uuid: bbbbbbbb-0000-0000-0000-000000000000
uri: localhost:13303
aaaaaaaa-aaaa-0000-0000-000000000001:
disabled: false
replicaset_uuid: aaaaaaaa-0000-0000-0000-000000000000
uri: localhost:13301
failover: false
...
@@ -0,0 +1,10 @@
---
default:
rebalancer_max_receiving: 100
bootstrapped: true
collect_bucket_garbage_interval: 0.5
collect_lua_garbage: false
sync_timeout: 1
bucket_count: 3000
rebalancer_disbalance_threshold: 1
...
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,8 @@
XLOG
0.13
Version: 1.10.5-31-g9cb39b862
Instance: bbbbbbbb-bbbb-0000-0000-000000000001
VClock: {1: 3036, 2: 2}
PrevVClock: {1: 3036, 2: 1}

���
@@ -0,0 +1,5 @@
---
cookie_max_age: 2592000
enabled: false
cookie_renew_age: 86400
...
@@ -0,0 +1,35 @@
---
replicasets:
bbbbbbbb-0000-0000-0000-000000000000:
weight: 1
master:
- bbbbbbbb-bbbb-0000-0000-000000000001
- bbbbbbbb-bbbb-0000-0000-000000000002
alias: unnamed
roles:
myrole-permanent: true
vshard-storage: true
vshard_group: default
aaaaaaaa-0000-0000-0000-000000000000:
master:
- aaaaaaaa-aaaa-0000-0000-000000000001
weight: 0
roles:
myrole-permanent: true
vshard-router: true
alias: unnamed
servers:
bbbbbbbb-bbbb-0000-0000-000000000002:
disabled: false
replicaset_uuid: bbbbbbbb-0000-0000-0000-000000000000
uri: localhost:13305
bbbbbbbb-bbbb-0000-0000-000000000001:
disabled: false
replicaset_uuid: bbbbbbbb-0000-0000-0000-000000000000
uri: localhost:13303
aaaaaaaa-aaaa-0000-0000-000000000001:
disabled: false
replicaset_uuid: aaaaaaaa-0000-0000-0000-000000000000
uri: localhost:13301
failover: false
...
@@ -0,0 +1,10 @@
---
default:
rebalancer_max_receiving: 100
bootstrapped: true
collect_bucket_garbage_interval: 0.5
collect_lua_garbage: false
sync_timeout: 1
bucket_count: 3000
rebalancer_disbalance_threshold: 1
...
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,8 @@
XLOG
0.13
Version: 1.10.5-31-g9cb39b862
Instance: bbbbbbbb-bbbb-0000-0000-000000000002
VClock: {1: 3036, 2: 2}
PrevVClock: {1: 3035, 2: 1}

���
@@ -0,0 +1,5 @@
---
cookie_max_age: 2592000
enabled: false
cookie_renew_age: 86400
...
@@ -0,0 +1,35 @@
---
replicasets:
bbbbbbbb-0000-0000-0000-000000000000:
weight: 1
master:
- bbbbbbbb-bbbb-0000-0000-000000000001
- bbbbbbbb-bbbb-0000-0000-000000000002
alias: unnamed
roles:
myrole-permanent: true
vshard-storage: true
vshard_group: default
aaaaaaaa-0000-0000-0000-000000000000:
master:
- aaaaaaaa-aaaa-0000-0000-000000000001
weight: 0
roles:
myrole-permanent: true
vshard-router: true
alias: unnamed
servers:
bbbbbbbb-bbbb-0000-0000-000000000002:
disabled: false
replicaset_uuid: bbbbbbbb-0000-0000-0000-000000000000
uri: localhost:13305
bbbbbbbb-bbbb-0000-0000-000000000001:
disabled: false
replicaset_uuid: bbbbbbbb-0000-0000-0000-000000000000
uri: localhost:13303
aaaaaaaa-aaaa-0000-0000-000000000001:
disabled: false
replicaset_uuid: aaaaaaaa-0000-0000-0000-000000000000
uri: localhost:13301
failover: false
...
@@ -0,0 +1,10 @@
---
default:
rebalancer_max_receiving: 100
bootstrapped: true
collect_bucket_garbage_interval: 0.5
collect_lua_garbage: false
sync_timeout: 1
bucket_count: 3000
rebalancer_disbalance_threshold: 1
...
@@ -0,0 +1,94 @@
local fio = require('fio')
local t = require('luatest')
local g = t.group()

local helpers = require('test.helper')

g.before_all = function()
local cwd = fio.cwd()
local test_data_dir = fio.pathjoin(cwd, 'test/upgrade/test_master_replica/data')
local datadir = fio.tempdir()
local ok, err = fio.copytree(test_data_dir, datadir)
assert(ok, err)

local cookie = 'upgrade-1.10-2.2'

g.cluster = helpers.Cluster:new({
datadir = datadir,
server_command = helpers.entrypoint('srv_basic'),
use_vshard = true,
cookie = cookie,
replicasets = {
{
uuid = helpers.uuid('a'),
roles = {'vshard-router'},
servers = {
{
alias = 'router',
instance_uuid = helpers.uuid('a', 'a', 1),
advertise_port = 13301,
env = {
TARANTOOL_UPGRADE_SCHEMA = 'true',
},
},
},
},
{
uuid = helpers.uuid('b'),
roles = {'vshard-storage'},
servers = {
{
alias = 'storage-1',
instance_uuid = helpers.uuid('b', 'b', 1),
advertise_port = 13303,
env = {
TARANTOOL_UPGRADE_SCHEMA = 'true',
},
}, {
alias = 'storage-2',
instance_uuid = helpers.uuid('b', 'b', 2),
advertise_port = 13305,
env = {
TARANTOOL_UPGRADE_SCHEMA = 'true',
},
},
},
},
},
})
-- We start cluster from existing 1.10 snapshots
-- with schema version {'1', '10', '2'}
g.cluster.bootstrapped = true
g.cluster:start()
end

g.after_all = function()
g.cluster:stop()
fio.rmtree(g.cluster.datadir)
end

function g.test_upgrade()
local tarantool_version = _G._TARANTOOL
t.skip_if(tarantool_version < '2.0', 'Tarantool version should be greater 2.0')

local schema_version_1 = g.cluster:server('storage-1').net_box.space._schema:get({'version'})
t.assert(schema_version_1[1] > '1', 'Schema version is upgraded to 2+')

local schema_version_2 = g.cluster:server('storage-2').net_box.space._schema:get({'version'})
t.assert(schema_version_2[1] > '1', 'Schema version is upgraded to 2+')

-- Test replication is not broken
local storage_1 = g.cluster:server('storage-1').net_box
local storage_2 = g.cluster:server('storage-2').net_box

storage_1:eval([[
box.schema.space.create('test')
box.space.test:create_index('pk')
]])

local tuple = {1, 0.1, 'str', {a = 'a'}, {1, 2}}
storage_1.space.test:insert(tuple)

local storage2_tuple = storage_2:eval('return box.space.test:get({1})')
t.assert_equals(storage2_tuple, tuple)
end

0 comments on commit 84b3c8b

Please sign in to comment.