From e83415440941a24ccd4821f82a4f5ca0079eb025 Mon Sep 17 00:00:00 2001 From: Pavel Shipilov Date: Thu, 27 Feb 2020 13:30:13 +0300 Subject: [PATCH] Test cartridge upgrade Fix bugs Add upgrade schema test for luatest Add test Fix test Fix bugs and style fix Fix remarks Delete unnecessary file Fix bugs Add compitibility.upgrade_schema for CI Fix test Add changelog --- .gitlab-ci.yml | 5 +- CHANGELOG.md | 4 + cartridge.lua | 9 +++ cartridge/argparse.lua | 1 + cartridge/confapplier.lua | 43 +++++++++-- test/compatibility/upgrade_schema_test.lua | 88 ++++++++++++++++++++++ 6 files changed, 144 insertions(+), 6 deletions(-) create mode 100644 test/compatibility/upgrade_schema_test.lua diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 87d6fc5e0..d3cbeb53e 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -118,10 +118,13 @@ compatibility: variables: IMAGE_NAME: *IMAGE_NAME_EE CARTRIDGE_OLDER_PATH: /tmp/cartridge-1.2.0 + TARANTOOL_OLDER_PATH: ${PWD}/tarantool-enterprise/tarantool + TARANTOOL_NEWER_PATH: /usr/bin/tarantool script: - mkdir -p $CARTRIDGE_OLDER_PATH - (cd $CARTRIDGE_OLDER_PATH; tarantoolctl rocks install cartridge 1.2.0-1) - - luatest -v -p compatibility.upgrade + - curl -L https://tarantool.io/installer.sh | VER=2.2 bash + - luatest -v -p compatibility misc: <<: *test-template diff --git a/CHANGELOG.md b/CHANGELOG.md index b5c697b0a..bb901a0dd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -47,6 +47,10 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0. - Add option for clusterwide env in test helpers. +- New option in `cartridge.cfg({auto_upgrade_schema=...}, ...)` + to perform auto upgrade schema to actual tarantool version + (only for leader). It also has bean added for `argparse`. + ### Changed - Network error shows with fixed splash panel instead of notification. diff --git a/cartridge.lua b/cartridge.lua index e97e781e1..e3d7794c2 100644 --- a/cartridge.lua +++ b/cartridge.lua @@ -186,6 +186,13 @@ end -- List of pages to be hidden in WebUI. -- (**Added** in v2.0.1-54, default: `{}`) -- +-- @tparam ?boolean opts.auto_upgrade_schema +-- Auto schema upgrade. +-- (**Added** in v2.0.1-54, +-- default: `false`, overridden by +-- env `TARANTOOL_AUTO_UPGRADE_SCHEMA` +-- args `--auto-upgrade-schema`) +-- -- @tparam ?table box_opts -- tarantool extra box.cfg options (e.g. memtx_memory), -- that may require additional tuning @@ -208,6 +215,7 @@ local function cfg(opts, box_opts) vshard_groups = '?table', console_sock = '?string', webui_blacklist = '?table', + auto_upgrade_schema = '?boolean', }, '?table') if opts.webui_blacklist ~= nil then @@ -509,6 +517,7 @@ local function cfg(opts, box_opts) box_opts = box_opts, binary_port = advertise.service, advertise_uri = advertise_uri, + auto_upgrade_schema = opts.auto_upgrade_schema, }) if not ok then return nil, err diff --git a/cartridge/argparse.lua b/cartridge/argparse.lua index e72fc0c07..6dc2eafad 100644 --- a/cartridge/argparse.lua +++ b/cartridge/argparse.lua @@ -91,6 +91,7 @@ local cluster_opts = { console_sock = 'string', -- **string** auth_enabled = 'boolean', -- **boolean** bucket_count = 'number', -- **number** + auto_upgrade_schema = 'boolean', -- **boolean** } --- Common [box.cfg](https://www.tarantool.io/en/doc/latest/reference/configuration/) tuning options. diff --git a/cartridge/confapplier.lua b/cartridge/confapplier.lua index 962607057..c57d43032 100644 --- a/cartridge/confapplier.lua +++ b/cartridge/confapplier.lua @@ -255,10 +255,12 @@ local function apply_config(clusterwide_config) return true end - -local function boot_instance(clusterwide_config) - checks('ClusterwideConfig') +local function boot_instance(clusterwide_config, auto_upgrade_schema) + checks('ClusterwideConfig', '?boolean') assert(clusterwide_config.locked) + if auto_upgrade_schema == nil then + auto_upgrade_schema = false + end assert( vars.state == 'Unconfigured' -- bootstraping from scratch or vars.state == 'ConfigLoaded', -- bootstraping from snapshot @@ -289,6 +291,7 @@ local function boot_instance(clusterwide_config) local snapshots = fio.glob(fio.pathjoin(vars.workdir, '*.snap')) local instance_uuid local replicaset_uuid + local is_leader if next(snapshots) == nil then -- When snapshots are absent the only way to do it -- is to find myself by uri. @@ -348,7 +351,6 @@ local function boot_instance(clusterwide_config) if #box_opts.replication == 0 then box_opts.read_only = false end - elseif vars.state == 'Unconfigured' then -- Instance is being bootstrapped (neither snapshot nor config -- don't exist yet) @@ -365,6 +367,7 @@ local function boot_instance(clusterwide_config) -- Set up 'star' replication for the bootstrap if instance_uuid == leader_uuid then + is_leader = true box_opts.replication = nil box_opts.read_only = false -- leader should be bootstrapped with quorum = 0, otherwise @@ -374,10 +377,22 @@ local function boot_instance(clusterwide_config) -- readonly. box_opts.replication_connect_quorum = 0 else + is_leader = false box_opts.replication = {pool.format_uri(leader.uri)} end end + -- If the config has bean loaded before call + if is_leader == nil and auto_upgrade_schema then + local instance_uuid = topology.find_server_by_uri( + topology_cfg, vars.advertise_uri + ) + local server = topology_cfg.servers[instance_uuid] + is_leader = topology.get_leaders_order( + topology_cfg, server.replicaset_uuid + )[1] == instance_uuid + end + log.warn('Calling box.cfg()...') -- This operation may be long -- It recovers snapshot @@ -421,6 +436,18 @@ local function boot_instance(clusterwide_config) username, password ) + if is_leader and auto_upgrade_schema then + local schema_version = box.space._schema:get{'version'} + log.info(string.format('Schema version before upgrade %d.%d.%d', + schema_version[2], schema_version[3], schema_version[4] + )) + box.schema.upgrade() + local schema_version = box.space._schema:get{'version'} + log.info(string.format('Schema version after upgrade %d.%d.%d', + schema_version[2], schema_version[3], schema_version[4] + )) + end + box.cfg({read_only = read_only}) end @@ -475,6 +502,7 @@ local function init(opts) box_opts = 'table', binary_port = 'number', advertise_uri = 'string', + auto_upgrade_schema = '?boolean' }) assert(vars.state == '', 'Unexpected state ' .. vars.state) @@ -532,8 +560,13 @@ local function init(opts) return true end + local auto_upgrade_schema = opts.auto_upgrade_schema + if auto_upgrade_schema == nil then + auto_upgrade_schema = false + end + set_state('ConfigLoaded') - fiber.new(boot_instance, clusterwide_config) + fiber.new(boot_instance, clusterwide_config, auto_upgrade_schema) end return true diff --git a/test/compatibility/upgrade_schema_test.lua b/test/compatibility/upgrade_schema_test.lua new file mode 100644 index 000000000..c2fc653dd --- /dev/null +++ b/test/compatibility/upgrade_schema_test.lua @@ -0,0 +1,88 @@ +local fio = require('fio') + +local helpers = require('test.helper') +local t = require('luatest') +local g = t.group() + +function g.before_all() + g.tempdir = fio.tempdir() + g.srv_basic = helpers.entrypoint('srv_basic') + g.cluster = helpers.Cluster:new({ + datadir = g.tempdir, + use_vshard = true, + cookie = 'test-cluster-cookie', + server_command = g.srv_basic, + args = nil, + replicasets = {{ + uuid = helpers.uuid('a'), + roles = {'vshard-router', 'vshard-storage'}, + servers = {{ + alias = 'leader', + instance_uuid = helpers.uuid('a', 'a', 1), + advertise_port = 13301, + http_port = 8081, + env = { + TARANTOOL_AUTO_UPGRADE_SCHEMA = 'true', + }, + }, { + alias = 'replica', + instance_uuid = helpers.uuid('a', 'a', 2), + advertise_port = 13302, + http_port = 8082, + }}, + }}, + }) +end + +function g.after_all() + fio.rmtree(g.tempdir) +end + +local function start_cartridge(server_command, ...) + g.cluster.server_command = server_command + g.cluster.args = {...} + for _, server in ipairs(g.cluster.servers) do + server.command = server_command + server.args = {...} + end + g.cluster:start() +end + +function g.test_upgrade() + local tarantool_older_path = os.getenv('TARANTOOL_OLDER_PATH') + local tarantool_newer_path = os.getenv('TARANTOOL_NEWER_PATH') + t.skip_if( + tarantool_older_path == nil or tarantool_newer_path == nil, + 'No older or newer version provided. Skipping' + ) + + start_cartridge(tarantool_older_path, g.srv_basic) + g.cluster.main_server.net_box:eval([[ +box.schema.create_space('test', { + format = { + id = 'unsigned', + name = 'string' + } +}) +]]) + local old_tarantool_version = string.split( + g.cluster.main_server.net_box:eval('return _TARANTOOL'), '.' + ) + local old_schema_version = g.cluster.main_server.net_box.space._schema:get{'version'} + t.assert_equals(old_schema_version[2], tonumber(old_tarantool_version[1])) + t.assert_equals(old_schema_version[3], tonumber(old_tarantool_version[2])) + local old_space = g.cluster.main_server.net_box:eval('return box.space') + g.cluster:stop() + + start_cartridge(tarantool_newer_path, g.srv_basic) + local new_tarantool_version = string.split( + g.cluster.main_server.net_box:eval('return _TARANTOOL'), '.' + ) + local new_schema_version = g.cluster.main_server.net_box.space._schema:get{'version'} + t.assert_equals(new_schema_version[2], tonumber(new_tarantool_version[1])) + t.assert_equals(new_schema_version[3], tonumber(new_tarantool_version[2])) + + local new_space = g.cluster.main_server.net_box:eval('return box.space') + t.assert_equals(old_space, new_space) + g.cluster:stop() +end