@@ -76,6 +76,16 @@ def get_bucket_num_shards(bucket_name, bucket_id):
7676 num_shards = json_op ['data' ]['bucket_info' ]['num_shards' ]
7777 return num_shards
7878
79+ def get_bucket_reshard_status (bucket_name ):
80+ """
81+ function to get bucket reshard status
82+ """
83+ cmd = exec_cmd ("radosgw-admin bucket stats --bucket {}" .format (bucket_name ))
84+ json_op = json .loads (cmd )
85+ #print(json.dumps(json_op, indent = 4, sort_keys=True))
86+ reshard_status = json_op ['reshard_status' ]
87+ return reshard_status
88+
7989def run_bucket_reshard_cmd (bucket_name , num_shards , ** kwargs ):
8090 cmd = 'radosgw-admin bucket reshard --bucket {} --num-shards {}' .format (bucket_name , num_shards )
8191 cmd += ' --rgw-reshard-bucket-lock-duration 30' # reduce to minimum
@@ -139,6 +149,11 @@ def test_bucket_reshard(conn, name, **fault):
139149 bucket .delete_objects (Delete = {'Objects' :[{'Key' :o .key } for o in objs ]})
140150 bucket .delete ()
141151
152+ def calc_reshardlog_count (json_op ):
153+ cnt = 0
154+ for shard in json_op :
155+ cnt += len (shard ['shard_entries' ])
156+ return cnt
142157
143158def main ():
144159 """
@@ -235,6 +250,68 @@ def main():
235250 log .debug ('TEST: reshard bucket with abort at change_reshard_state\n ' )
236251 test_bucket_reshard (connection , 'abort-at-change-reshard-state' , abort_at = 'change_reshard_state' )
237252
253+ # TESTCASE 'logrecord could be stopped after reshard failed'
254+ log .debug (' test: logrecord could be stopped after reshard failed' )
255+ num_shards = get_bucket_stats (BUCKET_NAME ).num_shards
256+ assert "None" == get_bucket_reshard_status (BUCKET_NAME )
257+ _ , ret = run_bucket_reshard_cmd (BUCKET_NAME , num_shards + 1 , check_retcode = False , abort_at = 'change_reshard_state' )
258+ assert (ret != 0 and ret != errno .EBUSY )
259+ assert "InLogrecord" == get_bucket_reshard_status (BUCKET_NAME )
260+
261+ bucket .put_object (Key = 'put_during_logrecord' , Body = b"some_data" )
262+ cmd = exec_cmd ('radosgw-admin reshardlog list --bucket %s' % BUCKET_NAME )
263+ json_op = json .loads (cmd .decode ('utf-8' , 'ignore' )) # ignore utf-8 can't decode 0x80
264+ assert calc_reshardlog_count (json_op ) == 1
265+
266+ # end up with logrecord status, the logrecord will be purged
267+ time .sleep (30 )
268+ assert "InLogrecord" == get_bucket_reshard_status (BUCKET_NAME )
269+ bucket .put_object (Key = 'put_during_logrecord1' , Body = b"some_data1" )
270+ cmd = exec_cmd ('radosgw-admin reshardlog list --bucket %s' % BUCKET_NAME )
271+ json_op = json .loads (cmd .decode ('utf-8' , 'ignore' )) # ignore utf-8 can't decode 0x80
272+ assert calc_reshardlog_count (json_op ) == 0
273+ assert "None" == get_bucket_reshard_status (BUCKET_NAME )
274+
275+ # TESTCASE 'duplicated entries should be purged before reshard'
276+ log .debug (' test: duplicated entries should be purged before reshard' )
277+ num_shards = get_bucket_stats (BUCKET_NAME ).num_shards
278+ _ , ret = run_bucket_reshard_cmd (BUCKET_NAME , num_shards + 1 , check_retcode = False , abort_at = 'do_reshard' )
279+ assert (ret != 0 and ret != errno .EBUSY )
280+ assert "InLogrecord" == get_bucket_reshard_status (BUCKET_NAME )
281+
282+ bucket .put_object (Key = 'put_during_logrecord2' , Body = b"some_data2" )
283+ cmd = exec_cmd ('radosgw-admin reshardlog list --bucket %s' % BUCKET_NAME )
284+ json_op = json .loads (cmd .decode ('utf-8' , 'ignore' )) # ignore utf-8 can't decode 0x80
285+ assert calc_reshardlog_count (json_op ) == 1
286+
287+ # begin to reshard again, the duplicated entries will be purged
288+ time .sleep (30 )
289+ _ , ret = run_bucket_reshard_cmd (BUCKET_NAME , num_shards + 1 , check_retcode = False , abort_at = 'logrecord_writes' )
290+ assert (ret != 0 and ret != errno .EBUSY )
291+ cmd = exec_cmd ('radosgw-admin reshardlog list --bucket %s' % BUCKET_NAME )
292+ json_op = json .loads (cmd .decode ('utf-8' , 'ignore' )) # ignore utf-8 can't decode 0x80
293+ assert calc_reshardlog_count (json_op ) == 0
294+
295+ # TESTCASE 'duplicated entries can be purged manually'
296+ log .debug (' test: duplicated entries can be purged manually' )
297+ time .sleep (30 )
298+ num_shards = get_bucket_stats (BUCKET_NAME ).num_shards
299+ _ , ret = run_bucket_reshard_cmd (BUCKET_NAME , num_shards + 1 , check_retcode = False , abort_at = 'do_reshard' )
300+ assert (ret != 0 and ret != errno .EBUSY )
301+ assert "InLogrecord" == get_bucket_reshard_status (BUCKET_NAME )
302+
303+ bucket .put_object (Key = 'put_during_logrecord3' , Body = b"some_data3" )
304+ cmd = exec_cmd ('radosgw-admin reshardlog list --bucket %s' % BUCKET_NAME )
305+ json_op = json .loads (cmd .decode ('utf-8' , 'ignore' )) # ignore utf-8 can't decode 0x80
306+ assert calc_reshardlog_count (json_op ) == 1
307+
308+ time .sleep (30 )
309+ exec_cmd ('radosgw-admin reshardlog purge --bucket %s' % BUCKET_NAME )
310+ cmd = exec_cmd ('radosgw-admin reshardlog list --bucket %s' % BUCKET_NAME )
311+ json_op = json .loads (cmd .decode ('utf-8' , 'ignore' )) # ignore utf-8 can't decode 0x80
312+ assert calc_reshardlog_count (json_op ) == 0
313+ log .debug ('check reshard logrecord successfully' )
314+
238315 # TESTCASE 'versioning reshard-','bucket', reshard','versioning reshard','succeeds'
239316 log .debug (' test: reshard versioned bucket' )
240317 num_shards_expected = get_bucket_stats (VER_BUCKET_NAME ).num_shards + 1
@@ -288,6 +365,8 @@ def main():
288365 time .sleep (1 )
289366 ver_bucket .put_object (Key = 'put_during_reshard' , Body = b"some_data" )
290367 log .debug ('put object successful' )
368+ # waiter for delay reshard to finish
369+ time .sleep (5 )
291370
292371 # TESTCASE 'check that bucket stats are correct after reshard with unlinked entries'
293372 log .debug ('TEST: check that bucket stats are correct after reshard with unlinked entries\n ' )
0 commit comments