Skip to content

Commit

Permalink
ddl: fix record delete
Browse files Browse the repository at this point in the history
Before this patch and after introducing sharding info hash cache [1]
calling delete on "_ddl_sharding_info" and "_ddl_sharding_func" spaces
resulted in "attempt to index local 'tuple' (a nil value)" trigger
error. This patch fixes the trigger. If sharding info record is deleted,
corresponding cache section will be deleted as well.

1. 428744d

Closes #310
  • Loading branch information
DifferentialOrange committed Jul 21, 2022
1 parent 4409775 commit 50c2307
Show file tree
Hide file tree
Showing 3 changed files with 90 additions and 11 deletions.
2 changes: 2 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
### Fixed
* Fetching invalid ddl confuguration (sharding key for non-existing space)
is no longer breaks CRUD requests (#308, PR #309).
* ddl space record delete no more throws error if crud is used (#310, PR #311).
* crud sharding metainfo is now updated on ddl record delete (#310, PR #311).

## [0.12.0] - 28-06-22

Expand Down
32 changes: 21 additions & 11 deletions crud/common/sharding/storage_metadata_cache.lua
Original file line number Diff line number Diff line change
Expand Up @@ -18,16 +18,26 @@ local ddl_space = {

local trigger_stash = stash.get(stash.name.ddl_triggers)

local function update_sharding_func_hash(tuple)
local space_name = tuple[utils.SPACE_NAME_FIELDNO]
local sharding_func_def = utils.extract_sharding_func_def(tuple)
cache_data[FUNC][space_name] = utils.compute_hash(sharding_func_def)
local function update_sharding_func_hash(old, new)
if new ~= nil then
local space_name = new[utils.SPACE_NAME_FIELDNO]
local sharding_func_def = utils.extract_sharding_func_def(new)
cache_data[FUNC][space_name] = utils.compute_hash(sharding_func_def)
else
local space_name = old[utils.SPACE_NAME_FIELDNO]
cache_data[FUNC][space_name] = nil
end
end

local function update_sharding_key_hash(tuple)
local space_name = tuple[utils.SPACE_NAME_FIELDNO]
local sharding_key_def = tuple[utils.SPACE_SHARDING_KEY_FIELDNO]
cache_data[KEY][space_name] = utils.compute_hash(sharding_key_def)
local function update_sharding_key_hash(old, new)
if new ~= nil then
local space_name = new[utils.SPACE_NAME_FIELDNO]
local sharding_key_def = new[utils.SPACE_SHARDING_KEY_FIELDNO]
cache_data[KEY][space_name] = utils.compute_hash(sharding_key_def)
else
local space_name = old[utils.SPACE_NAME_FIELDNO]
cache_data[KEY][space_name] = nil
end
end

local update_hash = {
Expand All @@ -49,8 +59,8 @@ local function init_cache(section)
pcall(space.on_replace, space, nil, trigger_stash[section])

trigger_stash[section] = space:on_replace(
function(_, new)
return update_hash_func(new)
function(old, new)
return update_hash_func(old, new)
end
)

Expand All @@ -61,7 +71,7 @@ local function init_cache(section)
-- It is more like an overcautiousness since the cycle
-- isn't expected to yield, but let it be here.
if cache_data[section][space_name] == nil then
update_hash_func(tuple)
update_hash_func(nil, tuple)
end
end
end
Expand Down
67 changes: 67 additions & 0 deletions test/integration/ddl_sharding_info_reload_test.lua
Original file line number Diff line number Diff line change
Expand Up @@ -219,6 +219,73 @@ pgroup_storage.test_sharding_func_hash_is_updated_when_ddl_is_updated = function
t.assert_equals(hash, sharding_utils.compute_hash({body = sharding_func_body}))
end

pgroup_storage.test_gh_310_ddl_key_record_delete_removes_cache_entry = function(g)
local storage = g.cluster:server('s1-master')
local space_name = sharding_cases.sharding_key_hash.test_space

-- Init cache by fetching sharding info.
local _, err = get_hash(storage, 'get_sharding_key_hash', space_name)
t.assert_equals(err, nil)

-- Drop space together with sharding info.
local _, err = storage:eval([[
local space_name = ...
local current_schema, err = ddl.get_schema()
if err ~= nil then
error(err)
end
box.space[space_name]:drop()
box.space['_ddl_sharding_key']:delete(space_name)
current_schema.spaces[space_name] = nil
local _, err = ddl.set_schema(current_schema)
if err ~= nil then
error(err)
end
]], {space_name})
t.assert_equals(err, nil)

local hash, err = get_hash(storage, 'get_sharding_key_hash', space_name)
t.assert_equals(err, nil)
t.assert_equals(hash, nil)
end

pgroup_storage.test_gh_310_ddl_func_record_delete_removes_cache_entry = function(g)
local storage = g.cluster:server('s1-master')
local space_name = sharding_cases.sharding_func_hash.test_space

-- Init cache by fetching sharding info.
local _, err = get_hash(storage, 'get_sharding_func_hash', space_name)
t.assert_equals(err, nil)

-- Drop space together with sharding info.
local _, err = storage:eval([[
local space_name = ...
local current_schema, err = ddl.get_schema()
if err ~= nil then
error(err)
end
box.space[space_name]:drop()
box.space['_ddl_sharding_func']:delete(space_name)
current_schema.spaces[space_name] = nil
local _, err = ddl.set_schema(current_schema)
if err ~= nil then
error(err)
end
]], {space_name})
t.assert_equals(err, nil)

local hash, err = get_hash(storage, 'get_sharding_func_hash', space_name)
t.assert_equals(err, nil)
t.assert_equals(hash, nil)
end

-- Test storage hash metadata mechanisms are ok after code reload.

Expand Down

0 comments on commit 50c2307

Please sign in to comment.