Skip to content

Commit 226ece6

Browse files
ddl: fix record delete
Before this patch and after introducing sharding info hash cache [1] calling delete on "_ddl_sharding_info" and "_ddl_sharding_func" spaces resulted in "attempt to index local 'tuple' (a nil value)" trigger error. This patch fixes the trigger. If sharding info record is deleted, corresponding cache section will be deleted as well. 1. 428744d Closes #310
1 parent 4409775 commit 226ece6

File tree

3 files changed

+90
-11
lines changed

3 files changed

+90
-11
lines changed

CHANGELOG.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,8 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
1414
### Fixed
1515
* Fetching invalid ddl confuguration (sharding key for non-existing space)
1616
is no longer breaks CRUD requests (#308, PR #309).
17+
* ddl space record delete no more throws error if crud is used (#310, PR #311).
18+
* crud sharding metainfo is now updated on ddl record delete (#310, PR #311).
1719

1820
## [0.12.0] - 28-06-22
1921

crud/common/sharding/storage_metadata_cache.lua

Lines changed: 21 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -18,16 +18,26 @@ local ddl_space = {
1818

1919
local trigger_stash = stash.get(stash.name.ddl_triggers)
2020

21-
local function update_sharding_func_hash(tuple)
22-
local space_name = tuple[utils.SPACE_NAME_FIELDNO]
23-
local sharding_func_def = utils.extract_sharding_func_def(tuple)
24-
cache_data[FUNC][space_name] = utils.compute_hash(sharding_func_def)
21+
local function update_sharding_func_hash(old, new)
22+
if new ~= nil then
23+
local space_name = new[utils.SPACE_NAME_FIELDNO]
24+
local sharding_func_def = utils.extract_sharding_func_def(new)
25+
cache_data[FUNC][space_name] = utils.compute_hash(sharding_func_def)
26+
else
27+
local space_name = old[utils.SPACE_NAME_FIELDNO]
28+
cache_data[FUNC][space_name] = nil
29+
end
2530
end
2631

27-
local function update_sharding_key_hash(tuple)
28-
local space_name = tuple[utils.SPACE_NAME_FIELDNO]
29-
local sharding_key_def = tuple[utils.SPACE_SHARDING_KEY_FIELDNO]
30-
cache_data[KEY][space_name] = utils.compute_hash(sharding_key_def)
32+
local function update_sharding_key_hash(old, new)
33+
if new ~= nil then
34+
local space_name = new[utils.SPACE_NAME_FIELDNO]
35+
local sharding_key_def = new[utils.SPACE_SHARDING_KEY_FIELDNO]
36+
cache_data[KEY][space_name] = utils.compute_hash(sharding_key_def)
37+
else
38+
local space_name = old[utils.SPACE_NAME_FIELDNO]
39+
cache_data[KEY][space_name] = nil
40+
end
3141
end
3242

3343
local update_hash = {
@@ -49,8 +59,8 @@ local function init_cache(section)
4959
pcall(space.on_replace, space, nil, trigger_stash[section])
5060

5161
trigger_stash[section] = space:on_replace(
52-
function(_, new)
53-
return update_hash_func(new)
62+
function(old, new)
63+
return update_hash_func(old, new)
5464
end
5565
)
5666

@@ -61,7 +71,7 @@ local function init_cache(section)
6171
-- It is more like an overcautiousness since the cycle
6272
-- isn't expected to yield, but let it be here.
6373
if cache_data[section][space_name] == nil then
64-
update_hash_func(tuple)
74+
update_hash_func(nil, tuple)
6575
end
6676
end
6777
end

test/integration/ddl_sharding_info_reload_test.lua

Lines changed: 67 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -219,6 +219,73 @@ pgroup_storage.test_sharding_func_hash_is_updated_when_ddl_is_updated = function
219219
t.assert_equals(hash, sharding_utils.compute_hash({body = sharding_func_body}))
220220
end
221221

222+
pgroup_storage.test_gh_310_ddl_key_record_delete_removes_cache_entry = function(g)
223+
local storage = g.cluster:server('s1-master')
224+
local space_name = sharding_cases.sharding_key_hash.test_space
225+
226+
-- Init cache by fetching sharding info.
227+
local _, err = get_hash(storage, 'get_sharding_key_hash', space_name)
228+
t.assert_equals(err, nil)
229+
230+
-- Drop space together with sharding info.
231+
local _, err = storage:eval([[
232+
local space_name = ...
233+
234+
local current_schema, err = ddl.get_schema()
235+
if err ~= nil then
236+
error(err)
237+
end
238+
239+
box.space[space_name]:drop()
240+
box.space['_ddl_sharding_key']:delete(space_name)
241+
242+
current_schema.spaces[space_name] = nil
243+
244+
local _, err = ddl.set_schema(current_schema)
245+
if err ~= nil then
246+
error(err)
247+
end
248+
]], {space_name})
249+
t.assert_equals(err, nil)
250+
251+
local hash, err = get_hash(storage, 'get_sharding_key_hash', space_name)
252+
t.assert_equals(err, nil)
253+
t.assert_equals(hash, nil)
254+
end
255+
256+
pgroup_storage.test_gh_310_ddl_func_record_delete_removes_cache_entry = function(g)
257+
local storage = g.cluster:server('s1-master')
258+
local space_name = sharding_cases.sharding_func_hash.test_space
259+
260+
-- Init cache by fetching sharding info.
261+
local _, err = get_hash(storage, 'get_sharding_func_hash', space_name)
262+
t.assert_equals(err, nil)
263+
264+
-- Drop space together with sharding info.
265+
local _, err = storage:eval([[
266+
local space_name = ...
267+
268+
local current_schema, err = ddl.get_schema()
269+
if err ~= nil then
270+
error(err)
271+
end
272+
273+
box.space[space_name]:drop()
274+
box.space['_ddl_sharding_func']:delete(space_name)
275+
276+
current_schema.spaces[space_name] = nil
277+
278+
local _, err = ddl.set_schema(current_schema)
279+
if err ~= nil then
280+
error(err)
281+
end
282+
]], {space_name})
283+
t.assert_equals(err, nil)
284+
285+
local hash, err = get_hash(storage, 'get_sharding_func_hash', space_name)
286+
t.assert_equals(err, nil)
287+
t.assert_equals(hash, nil)
288+
end
222289

223290
-- Test storage hash metadata mechanisms are ok after code reload.
224291

0 commit comments

Comments
 (0)