Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Gh 587 describe tarantool migration #640

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
5 changes: 4 additions & 1 deletion .gitlab-ci.yml
Expand Up @@ -118,10 +118,13 @@ compatibility:
variables:
IMAGE_NAME: *IMAGE_NAME_EE
CARTRIDGE_OLDER_PATH: /tmp/cartridge-1.2.0
TARANTOOL_OLDER_PATH: ${PWD}/tarantool-enterprise/tarantool
TARANTOOL_NEWER_PATH: /usr/bin/tarantool
script:
- mkdir -p $CARTRIDGE_OLDER_PATH
- (cd $CARTRIDGE_OLDER_PATH; tarantoolctl rocks install cartridge 1.2.0-1)
- luatest -v -p compatibility.upgrade
- curl -L https://tarantool.io/installer.sh | VER=2.2 bash
- luatest -v -p compatibility

misc:
<<: *test-template
Expand Down
4 changes: 4 additions & 0 deletions CHANGELOG.md
Expand Up @@ -47,6 +47,10 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.

- Add option for clusterwide env in test helpers.

- New option in `cartridge.cfg({auto_upgrade_schema=...}, ...)`
to perform auto upgrade schema to actual tarantool version
(only for leader). It also has bean added for `argparse`.

### Changed

- Network error shows with fixed splash panel instead of notification.
Expand Down
9 changes: 9 additions & 0 deletions cartridge.lua
Expand Up @@ -186,6 +186,13 @@ end
-- List of pages to be hidden in WebUI.
-- (**Added** in v2.0.1-54, default: `{}`)
--
-- @tparam ?boolean opts.auto_upgrade_schema
-- Auto schema upgrade.
-- (**Added** in v2.0.1-54,
-- default: `false`, overridden by
-- env `TARANTOOL_AUTO_UPGRADE_SCHEMA`
-- args `--auto-upgrade-schema`)
--
-- @tparam ?table box_opts
-- tarantool extra box.cfg options (e.g. memtx_memory),
-- that may require additional tuning
Expand All @@ -208,6 +215,7 @@ local function cfg(opts, box_opts)
vshard_groups = '?table',
console_sock = '?string',
webui_blacklist = '?table',
auto_upgrade_schema = '?boolean',
}, '?table')

if opts.webui_blacklist ~= nil then
Expand Down Expand Up @@ -509,6 +517,7 @@ local function cfg(opts, box_opts)
box_opts = box_opts,
binary_port = advertise.service,
advertise_uri = advertise_uri,
auto_upgrade_schema = opts.auto_upgrade_schema,
})
if not ok then
return nil, err
Expand Down
1 change: 1 addition & 0 deletions cartridge/argparse.lua
Expand Up @@ -91,6 +91,7 @@ local cluster_opts = {
console_sock = 'string', -- **string**
auth_enabled = 'boolean', -- **boolean**
bucket_count = 'number', -- **number**
auto_upgrade_schema = 'boolean', -- **boolean**
}

--- Common [box.cfg](https://www.tarantool.io/en/doc/latest/reference/configuration/) tuning options.
Expand Down
43 changes: 38 additions & 5 deletions cartridge/confapplier.lua
Expand Up @@ -255,10 +255,12 @@ local function apply_config(clusterwide_config)
return true
end


local function boot_instance(clusterwide_config)
checks('ClusterwideConfig')
local function boot_instance(clusterwide_config, auto_upgrade_schema)
checks('ClusterwideConfig', '?boolean')
assert(clusterwide_config.locked)
if auto_upgrade_schema == nil then
auto_upgrade_schema = false
end
assert(
vars.state == 'Unconfigured' -- bootstraping from scratch
or vars.state == 'ConfigLoaded', -- bootstraping from snapshot
Expand Down Expand Up @@ -289,6 +291,7 @@ local function boot_instance(clusterwide_config)
local snapshots = fio.glob(fio.pathjoin(vars.workdir, '*.snap'))
local instance_uuid
local replicaset_uuid
local is_leader
if next(snapshots) == nil then
-- When snapshots are absent the only way to do it
-- is to find myself by uri.
Expand Down Expand Up @@ -348,7 +351,6 @@ local function boot_instance(clusterwide_config)
if #box_opts.replication == 0 then
box_opts.read_only = false
end

elseif vars.state == 'Unconfigured' then
-- Instance is being bootstrapped (neither snapshot nor config
-- don't exist yet)
Expand All @@ -365,6 +367,7 @@ local function boot_instance(clusterwide_config)

-- Set up 'star' replication for the bootstrap
if instance_uuid == leader_uuid then
is_leader = true
box_opts.replication = nil
box_opts.read_only = false
-- leader should be bootstrapped with quorum = 0, otherwise
Expand All @@ -374,10 +377,22 @@ local function boot_instance(clusterwide_config)
-- readonly.
box_opts.replication_connect_quorum = 0
else
is_leader = false
box_opts.replication = {pool.format_uri(leader.uri)}
end
end

-- If the config has bean loaded before call
if is_leader == nil and auto_upgrade_schema then
local instance_uuid = topology.find_server_by_uri(
topology_cfg, vars.advertise_uri
)
local server = topology_cfg.servers[instance_uuid]
is_leader = topology.get_leaders_order(
topology_cfg, server.replicaset_uuid
)[1] == instance_uuid
end

log.warn('Calling box.cfg()...')
-- This operation may be long
-- It recovers snapshot
Expand Down Expand Up @@ -421,6 +436,18 @@ local function boot_instance(clusterwide_config)
username, password
)

if is_leader and auto_upgrade_schema then
local schema_version = box.space._schema:get{'version'}
log.info(string.format('Schema version before upgrade %d.%d.%d',
schema_version[2], schema_version[3], schema_version[4]
))
box.schema.upgrade()
local schema_version = box.space._schema:get{'version'}
log.info(string.format('Schema version after upgrade %d.%d.%d',
schema_version[2], schema_version[3], schema_version[4]
))
end

box.cfg({read_only = read_only})
end

Expand Down Expand Up @@ -475,6 +502,7 @@ local function init(opts)
box_opts = 'table',
binary_port = 'number',
advertise_uri = 'string',
auto_upgrade_schema = '?boolean'
})

assert(vars.state == '', 'Unexpected state ' .. vars.state)
Expand Down Expand Up @@ -532,8 +560,13 @@ local function init(opts)
return true
end

local auto_upgrade_schema = opts.auto_upgrade_schema
if auto_upgrade_schema == nil then
auto_upgrade_schema = false
end

set_state('ConfigLoaded')
fiber.new(boot_instance, clusterwide_config)
fiber.new(boot_instance, clusterwide_config, auto_upgrade_schema)
end

return true
Expand Down
88 changes: 88 additions & 0 deletions test/compatibility/upgrade_schema_test.lua
@@ -0,0 +1,88 @@
local fio = require('fio')

local helpers = require('test.helper')
local t = require('luatest')
local g = t.group()

function g.before_all()
g.tempdir = fio.tempdir()
g.srv_basic = helpers.entrypoint('srv_basic')
g.cluster = helpers.Cluster:new({
datadir = g.tempdir,
use_vshard = true,
cookie = 'test-cluster-cookie',
server_command = g.srv_basic,
args = nil,
replicasets = {{
uuid = helpers.uuid('a'),
roles = {'vshard-router', 'vshard-storage'},
servers = {{
alias = 'leader',
instance_uuid = helpers.uuid('a', 'a', 1),
advertise_port = 13301,
http_port = 8081,
env = {
TARANTOOL_AUTO_UPGRADE_SCHEMA = 'true',
},
}, {
alias = 'replica',
instance_uuid = helpers.uuid('a', 'a', 2),
advertise_port = 13302,
http_port = 8082,
}},
}},
})
end

function g.after_all()
fio.rmtree(g.tempdir)
end

local function start_cartridge(server_command, ...)
g.cluster.server_command = server_command
g.cluster.args = {...}
for _, server in ipairs(g.cluster.servers) do
server.command = server_command
server.args = {...}
end
g.cluster:start()
end

function g.test_upgrade()
local tarantool_older_path = os.getenv('TARANTOOL_OLDER_PATH')
local tarantool_newer_path = os.getenv('TARANTOOL_NEWER_PATH')
t.skip_if(
tarantool_older_path == nil or tarantool_newer_path == nil,
'No older or newer version provided. Skipping'
)

start_cartridge(tarantool_older_path, g.srv_basic)
g.cluster.main_server.net_box:eval([[
box.schema.create_space('test', {
format = {
id = 'unsigned',
name = 'string'
}
})
]])
local old_tarantool_version = string.split(
g.cluster.main_server.net_box:eval('return _TARANTOOL'), '.'
)
local old_schema_version = g.cluster.main_server.net_box.space._schema:get{'version'}
t.assert_equals(old_schema_version[2], tonumber(old_tarantool_version[1]))
t.assert_equals(old_schema_version[3], tonumber(old_tarantool_version[2]))
local old_space = g.cluster.main_server.net_box:eval('return box.space')
g.cluster:stop()

start_cartridge(tarantool_newer_path, g.srv_basic)
local new_tarantool_version = string.split(
g.cluster.main_server.net_box:eval('return _TARANTOOL'), '.'
)
local new_schema_version = g.cluster.main_server.net_box.space._schema:get{'version'}
t.assert_equals(new_schema_version[2], tonumber(new_tarantool_version[1]))
t.assert_equals(new_schema_version[3], tonumber(new_tarantool_version[2]))

local new_space = g.cluster.main_server.net_box:eval('return box.space')
t.assert_equals(old_space, new_space)
g.cluster:stop()
end