Skip to content

Commit 5f16ece

Browse files
authored
Merge pull request ceph#62323 from clwluvw/bucket-replication-mismatch
rgw: reject PutBucketReplication policies with mismatched statuses, skip sync on update Reviewed-by: Casey Bodley <[email protected]>
2 parents 8bc1235 + 826d493 commit 5f16ece

File tree

3 files changed

+321
-0
lines changed

3 files changed

+321
-0
lines changed

src/rgw/driver/rados/rgw_data_sync.cc

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4520,6 +4520,20 @@ class RGWBucketSyncSingleEntryCR : public RGWCoroutine {
45204520
tn->log(0, "entry with empty obj name, skipping");
45214521
goto done;
45224522
}
4523+
4524+
// make sure versioned object only lands on versioned bucket and non-versioned object only lands on non-versioned bucket
4525+
if (key.instance.empty() == sync_pipe.dest_bucket_info.versioned()) {
4526+
set_status("skipping entry due to versioning mismatch");
4527+
tn->log(0, SSTR("skipping entry due to versioning mismatch: " << key));
4528+
goto done;
4529+
}
4530+
// if object lock is enabled on either, the other should follow as well
4531+
if (sync_pipe.source_bucket_info.obj_lock_enabled() != sync_pipe.dest_bucket_info.obj_lock_enabled()) {
4532+
set_status("skipping entry due to object lock mismatch");
4533+
tn->log(0, SSTR("skipping entry due to object lock mismatch: " << key));
4534+
goto done;
4535+
}
4536+
45234537
if (error_injection &&
45244538
rand() % 10000 < cct->_conf->rgw_sync_data_inject_err_probability * 10000.0) {
45254539
tn->log(0, SSTR(": injecting data sync error on key=" << key.name));

src/rgw/rgw_rest_s3.cc

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1339,6 +1339,28 @@ struct ReplicationConfiguration {
13391339
}
13401340
pipe->dest.bucket.emplace(dest_bk);
13411341

1342+
std::unique_ptr<rgw::sal::Bucket> dest_bucket;
1343+
if (int r = driver->load_bucket(s, *pipe->dest.bucket, &dest_bucket, s->yield); r < 0) {
1344+
if (r == -ENOENT) {
1345+
s->err.message = "Destination bucket must exist.";
1346+
return -EINVAL;
1347+
}
1348+
1349+
ldpp_dout(s, 0) << "ERROR: failed to load bucket info for bucket=" << *pipe->dest.bucket << " r=" << r << dendl;
1350+
return r;
1351+
}
1352+
1353+
// check versioning identicality
1354+
if (dest_bucket->get_info().versioned() != s->bucket->get_info().versioned()) {
1355+
s->err.message = "Versioning must be identical in source and destination buckets.";
1356+
return -EINVAL;
1357+
}
1358+
// check object lock identicality
1359+
if (dest_bucket->get_info().obj_lock_enabled() != s->bucket->get_info().obj_lock_enabled()) {
1360+
s->err.message = "Object lock must be identical in source and destination buckets.";
1361+
return -EINVAL;
1362+
}
1363+
13421364
if (filter) {
13431365
int r = filter->to_sync_pipe_filter(s->cct, &pipe->params.source.filter);
13441366
if (r < 0) {

src/test/rgw/rgw_multi/tests.py

Lines changed: 285 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3885,3 +3885,288 @@ def test_bucket_replication_alt_user():
38853885
# check that object exists in destination bucket
38863886
k = get_key(dest, dest_bucket, objname)
38873887
assert_equal(k.get_contents_as_string().decode('utf-8'), 'foo')
3888+
3889+
@allow_bucket_replication
3890+
def test_bucket_replication_reject_versioning_identical():
3891+
zonegroup = realm.master_zonegroup()
3892+
zonegroup_conns = ZonegroupConns(zonegroup)
3893+
3894+
source = zonegroup_conns.non_account_rw_zones[0]
3895+
dest = zonegroup_conns.non_account_rw_zones[1]
3896+
3897+
source_bucket = source.create_bucket(gen_bucket_name())
3898+
dest_bucket = dest.create_bucket(gen_bucket_name())
3899+
source.s3_client.put_bucket_versioning(
3900+
Bucket=source_bucket.name,
3901+
VersioningConfiguration={'Status': 'Enabled'}
3902+
)
3903+
zonegroup_meta_checkpoint(zonegroup)
3904+
3905+
# create replication configuration
3906+
e = assert_raises(ClientError,
3907+
source.s3_client.put_bucket_replication,
3908+
Bucket=source_bucket.name,
3909+
ReplicationConfiguration={
3910+
'Role': '',
3911+
'Rules': [{
3912+
'ID': 'rule1',
3913+
'Status': 'Enabled',
3914+
'Destination': {
3915+
'Bucket': f'arn:aws:s3:::{dest_bucket.name}',
3916+
}
3917+
}]
3918+
})
3919+
assert e.response['ResponseMetadata']['HTTPStatusCode'] == 400
3920+
3921+
@allow_bucket_replication
3922+
def test_bucket_replicaion_reject_objectlock_identical():
3923+
zonegroup = realm.master_zonegroup()
3924+
zonegroup_conns = ZonegroupConns(zonegroup)
3925+
3926+
source = zonegroup_conns.non_account_rw_zones[0]
3927+
dest = zonegroup_conns.non_account_rw_zones[1]
3928+
3929+
source_bucket = source.create_bucket(gen_bucket_name())
3930+
dest_bucket_name = gen_bucket_name()
3931+
dest.s3_client.create_bucket(Bucket=dest_bucket_name, ObjectLockEnabledForBucket=True)
3932+
zonegroup_meta_checkpoint(zonegroup)
3933+
3934+
# create replication configuration
3935+
e = assert_raises(ClientError,
3936+
source.s3_client.put_bucket_replication,
3937+
Bucket=source_bucket.name,
3938+
ReplicationConfiguration={
3939+
'Role': '',
3940+
'Rules': [{
3941+
'ID': 'rule1',
3942+
'Status': 'Enabled',
3943+
'Destination': {
3944+
'Bucket': f'arn:aws:s3:::{dest_bucket_name}',
3945+
}
3946+
}]
3947+
})
3948+
assert e.response['ResponseMetadata']['HTTPStatusCode'] == 400
3949+
3950+
@allow_bucket_replication
3951+
def test_bucket_replication_non_versioned_to_versioned():
3952+
zonegroup = realm.master_zonegroup()
3953+
zonegroup_conns = ZonegroupConns(zonegroup)
3954+
3955+
source = zonegroup_conns.non_account_rw_zones[0]
3956+
dest = zonegroup_conns.non_account_rw_zones[1]
3957+
3958+
source_bucket = source.create_bucket(gen_bucket_name())
3959+
dest_bucket = dest.create_bucket(gen_bucket_name())
3960+
zonegroup_meta_checkpoint(zonegroup)
3961+
3962+
# create replication configuration
3963+
response = source.s3_client.put_bucket_replication(
3964+
Bucket=source_bucket.name,
3965+
ReplicationConfiguration={
3966+
'Role': '',
3967+
'Rules': [
3968+
{
3969+
'ID': 'rule1',
3970+
'Status': 'Enabled',
3971+
'Destination': {
3972+
'Bucket': f'arn:aws:s3:::{dest_bucket.name}',
3973+
}
3974+
}
3975+
]
3976+
}
3977+
)
3978+
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
3979+
zonegroup_meta_checkpoint(zonegroup)
3980+
3981+
# enable versioning on destination bucket
3982+
dest.s3_client.put_bucket_versioning(
3983+
Bucket=dest_bucket.name,
3984+
VersioningConfiguration={'Status': 'Enabled'}
3985+
)
3986+
zonegroup_meta_checkpoint(zonegroup)
3987+
3988+
# upload an object and wait for sync.
3989+
objname = 'dummy'
3990+
k = new_key(source, source_bucket, objname)
3991+
k.set_contents_from_string('foo')
3992+
zone_data_checkpoint(dest.zone, source.zone)
3993+
3994+
# check that object not exists in destination bucket
3995+
e = assert_raises(ClientError, dest.s3_client.get_object, Bucket=dest_bucket.name, Key=objname)
3996+
assert e.response['Error']['Code'] == 'NoSuchKey'
3997+
3998+
@allow_bucket_replication
3999+
def test_bucket_replication_versioned_to_non_versioned():
4000+
zonegroup = realm.master_zonegroup()
4001+
zonegroup_conns = ZonegroupConns(zonegroup)
4002+
4003+
source = zonegroup_conns.non_account_rw_zones[0]
4004+
dest = zonegroup_conns.non_account_rw_zones[1]
4005+
4006+
source_bucket = source.create_bucket(gen_bucket_name())
4007+
dest_bucket = dest.create_bucket(gen_bucket_name())
4008+
zonegroup_meta_checkpoint(zonegroup)
4009+
4010+
# create replication configuration
4011+
response = source.s3_client.put_bucket_replication(
4012+
Bucket=source_bucket.name,
4013+
ReplicationConfiguration={
4014+
'Role': '',
4015+
'Rules': [
4016+
{
4017+
'ID': 'rule1',
4018+
'Status': 'Enabled',
4019+
'Destination': {
4020+
'Bucket': f'arn:aws:s3:::{dest_bucket.name}',
4021+
}
4022+
}
4023+
]
4024+
}
4025+
)
4026+
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
4027+
zonegroup_meta_checkpoint(zonegroup)
4028+
4029+
# enable versioning on source bucket
4030+
source.s3_client.put_bucket_versioning(
4031+
Bucket=source_bucket.name,
4032+
VersioningConfiguration={'Status': 'Enabled'}
4033+
)
4034+
zonegroup_meta_checkpoint(zonegroup)
4035+
4036+
# upload an object and wait for sync.
4037+
objname = 'dummy'
4038+
k = new_key(source, source_bucket, objname)
4039+
k.set_contents_from_string('foo')
4040+
zone_data_checkpoint(dest.zone, source.zone)
4041+
4042+
# check that object not exists in destination bucket
4043+
e = assert_raises(ClientError, dest.s3_client.get_object, Bucket=dest_bucket.name, Key=objname)
4044+
assert e.response['Error']['Code'] == 'NoSuchKey'
4045+
4046+
@allow_bucket_replication
4047+
def test_bucket_replication_lock_enabled_to_lock_disabled():
4048+
zonegroup = realm.master_zonegroup()
4049+
zonegroup_conns = ZonegroupConns(zonegroup)
4050+
4051+
source = zonegroup_conns.non_account_rw_zones[0]
4052+
dest = zonegroup_conns.non_account_rw_zones[1]
4053+
4054+
source_bucket_name = gen_bucket_name()
4055+
source.create_bucket(source_bucket_name)
4056+
# enabled versioning
4057+
source.s3_client.put_bucket_versioning(
4058+
Bucket=source_bucket_name,
4059+
VersioningConfiguration={'Status': 'Enabled'}
4060+
)
4061+
dest_bucket = dest.create_bucket(gen_bucket_name())
4062+
# enabled versioning
4063+
dest.s3_client.put_bucket_versioning(
4064+
Bucket=dest_bucket.name,
4065+
VersioningConfiguration={'Status': 'Enabled'}
4066+
)
4067+
zonegroup_meta_checkpoint(zonegroup)
4068+
4069+
# create replication configuration
4070+
source.s3_client.put_bucket_replication(
4071+
Bucket=source_bucket_name,
4072+
ReplicationConfiguration={
4073+
'Role': '',
4074+
'Rules': [{
4075+
'ID': 'rule1',
4076+
'Status': 'Enabled',
4077+
'Destination': {
4078+
'Bucket': f'arn:aws:s3:::{dest_bucket.name}',
4079+
}
4080+
}]
4081+
}
4082+
)
4083+
zonegroup_meta_checkpoint(zonegroup)
4084+
4085+
# enable object lock on source bucket
4086+
source.s3_client.put_object_lock_configuration(
4087+
Bucket=source_bucket_name,
4088+
ObjectLockConfiguration={
4089+
'ObjectLockEnabled': 'Enabled',
4090+
'Rule': {
4091+
'DefaultRetention': {
4092+
'Mode': 'GOVERNANCE',
4093+
'Days': 1
4094+
}
4095+
}
4096+
}
4097+
)
4098+
zonegroup_meta_checkpoint(zonegroup)
4099+
4100+
# upload an object and wait for sync.
4101+
objname = 'dummy'
4102+
k = new_key(source, source_bucket_name, objname)
4103+
k.set_contents_from_string('foo')
4104+
zone_data_checkpoint(dest.zone, source.zone)
4105+
4106+
# check that object does not exist in destination bucket
4107+
e = assert_raises(ClientError, dest.s3_client.get_object, Bucket=dest_bucket.name, Key=objname)
4108+
assert e.response['Error']['Code'] == 'NoSuchKey'
4109+
4110+
@allow_bucket_replication
4111+
def test_bucket_replication_lock_disabled_to_lock_enabled():
4112+
zonegroup = realm.master_zonegroup()
4113+
zonegroup_conns = ZonegroupConns(zonegroup)
4114+
4115+
source = zonegroup_conns.non_account_rw_zones[0]
4116+
dest = zonegroup_conns.non_account_rw_zones[1]
4117+
4118+
source_bucket = source.create_bucket(gen_bucket_name())
4119+
# enabled versioning
4120+
source.s3_client.put_bucket_versioning(
4121+
Bucket=source_bucket.name,
4122+
VersioningConfiguration={'Status': 'Enabled'}
4123+
)
4124+
dest_bucket_name = gen_bucket_name()
4125+
dest.create_bucket(dest_bucket_name)
4126+
# enabled versioning
4127+
dest.s3_client.put_bucket_versioning(
4128+
Bucket=dest_bucket_name,
4129+
VersioningConfiguration={'Status': 'Enabled'}
4130+
)
4131+
zonegroup_meta_checkpoint(zonegroup)
4132+
4133+
# create replication configuration
4134+
source.s3_client.put_bucket_replication(
4135+
Bucket=source_bucket.name,
4136+
ReplicationConfiguration={
4137+
'Role': '',
4138+
'Rules': [{
4139+
'ID': 'rule1',
4140+
'Status': 'Enabled',
4141+
'Destination': {
4142+
'Bucket': f'arn:aws:s3:::{dest_bucket_name}',
4143+
}
4144+
}]
4145+
}
4146+
)
4147+
zonegroup_meta_checkpoint(zonegroup)
4148+
4149+
# enable object lock on destination bucket
4150+
dest.s3_client.put_object_lock_configuration(
4151+
Bucket=dest_bucket_name,
4152+
ObjectLockConfiguration={
4153+
'ObjectLockEnabled': 'Enabled',
4154+
'Rule': {
4155+
'DefaultRetention': {
4156+
'Mode': 'GOVERNANCE',
4157+
'Days': 1
4158+
}
4159+
}
4160+
}
4161+
)
4162+
zonegroup_meta_checkpoint(zonegroup)
4163+
4164+
# upload an object and wait for sync.
4165+
objname = 'dummy'
4166+
k = new_key(source, source_bucket.name, objname)
4167+
k.set_contents_from_string('foo')
4168+
zone_data_checkpoint(dest.zone, source.zone)
4169+
4170+
# check that object does not exist in destination bucket
4171+
e = assert_raises(ClientError, dest.s3_client.get_object, Bucket=dest_bucket_name, Key=objname)
4172+
assert e.response['Error']['Code'] == 'NoSuchKey'

0 commit comments

Comments
 (0)