Skip to content

Commit e93de5f

Browse files
Rexrexcsn
authored andcommitted
Add integration test for cluster_resource_bucket
* Add creation/deletion test for cluster_resource_bucket parameter * Modified update test to use cluster_resource_bucket for test_update_hit Signed-off-by: Rex <[email protected]>
1 parent 8b4efdd commit e93de5f

File tree

9 files changed

+125
-4
lines changed

9 files changed

+125
-4
lines changed

tests/integration-tests/configs/develop.yaml

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -543,4 +543,11 @@ test-suites:
543543
- regions: ["us-east-1"]
544544
instances: ["p4d.24xlarge"]
545545
oss: ["alinux", "ubuntu1804", "centos7"]
546-
schedulers: ["slurm"]
546+
schedulers: ["slurm"]
547+
resource_bucket:
548+
test_resource_bucket.py::test_resource_bucket:
549+
dimensions:
550+
- regions: ["ap-southeast-1"]
551+
instances: {{ common.INSTANCES_DEFAULT_X86 }}
552+
oss: ["alinux2"]
553+
schedulers: ["slurm", "awsbatch"]
Lines changed: 57 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,57 @@
1+
import logging
2+
3+
import boto3
4+
import pytest
5+
from assertpy import assert_that
6+
7+
8+
@pytest.mark.regions(["us-east-2"])
9+
@pytest.mark.schedulers(["slurm", "awsbatch"])
10+
@pytest.mark.oss(["alinux"])
11+
@pytest.mark.usefixtures("os", "instance")
12+
def test_resource_bucket(region, scheduler, pcluster_config_reader, clusters_factory, s3_bucket_factory, test_datadir):
13+
# Bucket used to host cluster artifacts must have versioning enabled
14+
logging.info("Testing cluster creation/deletion behavior when specifying cluster_resource_bucket")
15+
bucket_name = s3_bucket_factory()
16+
# Upload a file to bucket, we will check to make sure this file is not removed when deleting cluster artifacts
17+
boto3.resource("s3").Bucket(bucket_name).upload_file(str(test_datadir / "s3_test_file"), "s3_test_file")
18+
19+
cluster_config = pcluster_config_reader(
20+
config_file="pcluster.config_{0}.ini".format(scheduler), resource_bucket=bucket_name
21+
)
22+
cluster = clusters_factory(cluster_config)
23+
assert_that(cluster.cfn_outputs.get("ResourcesS3Bucket")).is_equal_to(bucket_name)
24+
artifact_directory = cluster.cfn_outputs.get("ArtifactS3RootDirectory")
25+
assert_that(artifact_directory).is_not_none()
26+
# Update cluster with a new resource bucket
27+
# We need to make sure the bucket name in cfn NEVER gets updated
28+
update_bucket_name = s3_bucket_factory()
29+
updated_config_file = pcluster_config_reader(
30+
config_file="pcluster.config_{0}.ini".format(scheduler), resource_bucket=update_bucket_name
31+
)
32+
cluster.config_file = str(updated_config_file)
33+
cluster.update()
34+
assert_that(cluster.cfn_outputs.get("ResourcesS3Bucket")).is_equal_to(bucket_name)
35+
assert_that(cluster.cfn_outputs.get("ArtifactS3RootDirectory")).is_equal_to(artifact_directory)
36+
37+
cluster.delete()
38+
_check_delete_behavior(region, bucket_name, artifact_directory)
39+
40+
41+
def _check_delete_behavior(region, bucket_name, artifact_directory):
42+
s3_client = boto3.client("s3", region_name=region)
43+
response = s3_client.list_objects_v2(Bucket=bucket_name, Delimiter="/", Prefix=artifact_directory)
44+
if response.get("Contents") or response.get("CommonPrefixes"):
45+
logging.error(
46+
"Objects under %s/%s not cleaned up properly!\nContents: %s\nCommonPrefixes: %s",
47+
bucket_name,
48+
artifact_directory,
49+
response.get("Contents"),
50+
response.get("CommonPrefixes"),
51+
)
52+
raise Exception
53+
try:
54+
s3_client.head_object(Bucket=bucket_name, Key="s3_test_file")
55+
except Exception as e:
56+
logging.error("Unable to verify pre-existing files in bucket are preserved, with error: %s", e)
57+
raise
Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
[global]
2+
cluster_template = default
3+
4+
[aws]
5+
aws_region_name = {{ region }}
6+
7+
[vpc parallelcluster-vpc]
8+
vpc_id = {{ vpc_id }}
9+
master_subnet_id = {{ public_subnet_id }}
10+
compute_subnet_id = {{ private_subnet_id }}
11+
use_public_ips = false
12+
13+
[cluster default]
14+
base_os = {{ os }}
15+
key_name = {{ key_name }}
16+
vpc_settings = parallelcluster-vpc
17+
scheduler = {{ scheduler }}
18+
master_instance_type = {{ instance }}
19+
compute_instance_type = {{ instance }}
20+
min_vcpus=1
21+
cluster_resource_bucket = {{ resource_bucket }}
Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,28 @@
1+
[global]
2+
cluster_template = default
3+
4+
[aws]
5+
aws_region_name = {{ region }}
6+
7+
[vpc parallelcluster-vpc]
8+
vpc_id = {{ vpc_id }}
9+
master_subnet_id = {{ public_subnet_id }}
10+
compute_subnet_id = {{ private_subnet_id }}
11+
use_public_ips = false
12+
13+
[cluster default]
14+
base_os = {{ os }}
15+
key_name = {{ key_name }}
16+
vpc_settings = parallelcluster-vpc
17+
scheduler = {{ scheduler }}
18+
master_instance_type = {{ instance }}
19+
cluster_resource_bucket = {{ resource_bucket }}
20+
queue_settings = queue1
21+
22+
[queue queue1]
23+
compute_resource_settings = queue1_i1
24+
compute_type = ondemand
25+
26+
[compute_resource queue1_i1]
27+
instance_type = {{ instance }}
28+
min_count = 1
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
# Sample test file to make sure existing files in S3 bucket are not removed on when cleaning up cluster resources

tests/integration-tests/tests/update/test_update.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ def test_update_sit(
4242
bucket.upload_file(str(test_datadir / "postinstall.sh"), "scripts/postinstall.sh")
4343

4444
# Create cluster with initial configuration
45-
init_config_file = pcluster_config_reader()
45+
init_config_file = pcluster_config_reader(resource_bucket=bucket_name)
4646
cluster = clusters_factory(init_config_file)
4747

4848
# Update cluster with the same configuration, command should not result any error even if not using force update
@@ -148,7 +148,7 @@ def test_update_hit(region, scheduler, pcluster_config_reader, clusters_factory,
148148
bucket.upload_file(str(test_datadir / "postinstall.sh"), "scripts/postinstall.sh")
149149

150150
# Create cluster with initial configuration
151-
init_config_file = pcluster_config_reader()
151+
init_config_file = pcluster_config_reader(resource_bucket=bucket_name)
152152
cluster = clusters_factory(init_config_file)
153153

154154
# Update cluster with the same configuration, command should not result any error even if not using force update
@@ -204,7 +204,9 @@ def test_update_hit(region, scheduler, pcluster_config_reader, clusters_factory,
204204
job_id = slurm_commands.assert_job_submitted(result.stdout)
205205

206206
# Update cluster with new configuration
207-
updated_config_file = pcluster_config_reader(config_file="pcluster.config.update.ini", bucket=bucket_name)
207+
updated_config_file = pcluster_config_reader(
208+
config_file="pcluster.config.update.ini", bucket=bucket_name, resource_bucket=bucket_name
209+
)
208210
cluster.config_file = str(updated_config_file)
209211
cluster.update()
210212

tests/integration-tests/tests/update/test_update/test_update_hit/pcluster.config.ini

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ raid_settings = custom
3030
fsx_settings = custom
3131
cw_log_settings = custom
3232
queue_settings = queue1,queue2
33+
cluster_resource_bucket = {{ resource_bucket }}
3334

3435
[queue queue1]
3536
compute_resource_settings = queue1_i1,queue1_i2

tests/integration-tests/tests/update/test_update/test_update_hit/pcluster.config.update.ini

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,7 @@ raid_settings = custom
2929
fsx_settings = custom
3030
cw_log_settings = custom
3131
queue_settings = queue1,queue2,queue3
32+
cluster_resource_bucket = {{ resource_bucket }}
3233

3334
[queue queue1]
3435
compute_resource_settings = queue1_i1,queue1_i2,queue1_i3

tests/integration-tests/utils.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -195,6 +195,8 @@ def create_s3_bucket(bucket_name, region):
195195
s3_client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration={"LocationConstraint": region})
196196
else:
197197
s3_client.create_bucket(Bucket=bucket_name)
198+
# Enable versioning on bucket
199+
s3_client.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={"Status": "Enabled"})
198200

199201

200202
@retry(wait_exponential_multiplier=500, wait_exponential_max=5000, stop_max_attempt_number=3)
@@ -208,6 +210,7 @@ def delete_s3_bucket(bucket_name, region):
208210
try:
209211
bucket = boto3.resource("s3", region_name=region).Bucket(bucket_name)
210212
bucket.objects.all().delete()
213+
bucket.object_versions.all().delete()
211214
bucket.delete()
212215
except boto3.client("s3").exceptions.NoSuchBucket:
213216
pass

0 commit comments

Comments
 (0)