Skip to content

Commit 0d92599

Browse files
committed
Be able to only deploy capella cluster with terraform.py
Adds two flags that allow one to use the terraform.py script in order to only deploy the capella cluster (serverless dataplane or provisioned cluster), without deploying client/utility nodes or serverless dbs. This can be useful for debugging sometimes or doing some kind of manual work on the cluster or dataplane, or if one sets up 2 Jenkins jobs: 1 to deploy a cluster or dataplane and another to run a test on it (and then keep it around to run more tests). Change-Id: Ie517905a67e7f0d5c89e2ff6a12070b181f7ac30 Reviewed-on: https://review.couchbase.org/c/perfrunner/+/190191 Tested-by: Build Bot <[email protected]> Reviewed-by: Laura Silaja <[email protected]>
1 parent 6b8f98c commit 0d92599

File tree

2 files changed

+62
-42
lines changed

2 files changed

+62
-42
lines changed

perfrunner/settings.py

Lines changed: 14 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -7,10 +7,13 @@
77
from typing import Dict, Iterable, Iterator, List, Tuple
88

99
from decorator import decorator
10-
from fabric.api import local
1110

1211
from logger import logger
13-
from perfrunner.helpers.misc import maybe_atoi, target_hash
12+
from perfrunner.helpers.misc import (
13+
maybe_atoi,
14+
run_local_shell_command,
15+
target_hash,
16+
)
1417

1518
CBMONITOR_HOST = 'cbmonitor.sc.couchbase.com'
1619
SHOWFAST_HOST = 'showfast.sc.couchbase.com' # 'localhost:8000'
@@ -511,14 +514,13 @@ def server_group_map(self) -> dict:
511514
return server_grp_map
512515

513516
def get_aws_iid(self, hostname: str, region: str) -> str:
514-
iid = local(
517+
iid, _, _ = run_local_shell_command(
515518
(
516519
"env/bin/aws ec2 describe-instances --region {} "
517520
"--filter \"Name=ip-address,Values=$(dig +short {})\" "
518521
"--query \"Reservations[].Instances[].InstanceId\" "
519522
"--output text"
520-
).format(region, hostname),
521-
capture=True
523+
).format(region, hostname)
522524
)
523525
return iid.strip()
524526

@@ -557,17 +559,13 @@ def set_nebula_instance_ids(self) -> None:
557559
"--output text"
558560
).format(region, self.infrastructure_settings['cbc_dataplane'])
559561

560-
dn_iids = local(
561-
query.format('couchbase-cloud-nebula'),
562-
capture=True
563-
).strip().split()
562+
stdout, _, _ = run_local_shell_command(query.format('couchbase-cloud-nebula'))
563+
dn_iids = stdout.strip().split()
564564
logger.info('Found DN instance IDs: {}'.format(', '.join(dn_iids)))
565565
self.config.set('nebula_instance_ids', 'direct_nebula', '\n' + '\n'.join(dn_iids))
566566

567-
dapi_iids = local(
568-
query.format('couchbase-cloud-data-api'),
569-
capture=True
570-
).strip().split()
567+
stdout, _, _ = run_local_shell_command(query.format('couchbase-cloud-data-api'))
568+
dapi_iids = stdout.strip().split()
571569
logger.info('Found DAPI instance IDs: {}'.format(', '.join(dapi_iids)))
572570
self.config.set('nebula_instance_ids', 'dapi', '\n' + '\n'.join(dapi_iids))
573571

@@ -579,16 +577,16 @@ def set_sgw_instance_ids(self) -> None:
579577

580578
if self.capella_backend == 'aws':
581579
region = os.environ.get('AWS_REGION', 'us-east-1')
582-
sgids = local(
580+
stdout, _, _ = run_local_shell_command(
583581
(
584582
"env/bin/aws ec2 describe-instances --region {} "
585583
"--filters \"Name=tag-key,Values=couchbase-cloud-syncgateway-id\" "
586584
"\"Name=tag:couchbase-app-services,Values={}\" "
587585
"--query \"Reservations[].Instances[].InstanceId\" "
588586
"--output text"
589587
).format(region, self.infrastructure_settings['cbc_cluster']),
590-
capture=True
591-
).strip().split()
588+
)
589+
sgids = stdout.strip().split()
592590
logger.info("Found Instance IDs for sgw: {}".format(', '.join(sgids)))
593591
self.config.set('sgw_instance_ids', 'sync_gateways', '\n', + '\n'.join(sgids))
594592
self.update_spec_file()

perfrunner/utils/terraform.py

Lines changed: 48 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -219,6 +219,7 @@ def create_tfvar_nodes(self) -> Dict[str, Dict]:
219219
return tfvar_nodes
220220

221221
def populate_tfvars(self):
222+
logger.info('Setting tfvars')
222223
cloud_provider = self.backend if self.provider == 'capella' else self.provider
223224
global_tag = self.options.tag if self.options.tag else ''
224225
if cloud_provider.lower() == 'gcp':
@@ -254,10 +255,13 @@ def populate_tfvars(self):
254255

255256
# Initializes terraform environment.
256257
def terraform_init(self, provider):
258+
logger.info('Initializating Terraform (terraform init)')
257259
local('cd terraform/{} && terraform init >> terraform.log'.format(provider))
258260

259261
# Apply and output terraform deployment.
260262
def terraform_apply(self, provider):
263+
logger.info('Building and executing Terraform deployment plan '
264+
'(terraform plan, terraform apply)')
261265
local('cd terraform/{} && '
262266
'terraform plan -out tfplan.out >> terraform.log && '
263267
'terraform apply -auto-approve tfplan.out'
@@ -270,6 +274,8 @@ def terraform_output(self, provider):
270274
return output
271275

272276
def terraform_destroy(self, provider):
277+
logger.info('Building and executing Terraform destruction plan '
278+
'(terraform plan -destroy, terraform apply)')
273279
local('cd terraform/{} && '
274280
'terraform plan -destroy -out tfplan_destroy.out >> terraform.log && '
275281
'terraform apply -auto-approve tfplan_destroy.out'
@@ -461,14 +467,15 @@ def deploy(self):
461467
if not self.project_id:
462468
self.create_project()
463469

464-
# Configure terraform
465-
self.populate_tfvars()
466-
self.terraform_init(self.backend)
470+
if not self.options.capella_only:
471+
# Configure terraform
472+
self.populate_tfvars()
473+
self.terraform_init(self.backend)
467474

468-
# Deploy non-capella resources
469-
self.terraform_apply(self.backend)
470-
non_capella_output = self.terraform_output(self.backend)
471-
Terraform.update_spec(self, non_capella_output)
475+
# Deploy non-capella resources
476+
self.terraform_apply(self.backend)
477+
non_capella_output = self.terraform_output(self.backend)
478+
Terraform.update_spec(self, non_capella_output)
472479

473480
# Deploy capella cluster
474481
self.deploy_cluster()
@@ -479,8 +486,8 @@ def deploy(self):
479486
# Update cluster spec file
480487
self.update_spec()
481488

482-
# Do VPC peering
483-
if self.options.vpc_peering:
489+
if not self.options.capella_only and self.options.vpc_peering:
490+
# Do VPC peering
484491
network_info = non_capella_output['network']['value']
485492
self.peer_vpc(network_info, self.cluster_ids[0])
486493

@@ -1042,19 +1049,28 @@ def get_tenant_id(self):
10421049
return tenant_id
10431050

10441051
def deploy(self):
1045-
# Configure terraform
1046-
Terraform.populate_tfvars(self)
1047-
self.terraform_init(self.backend)
1048-
1049-
# Deploy non-capella resources
1050-
self.terraform_apply(self.backend)
1051-
non_capella_output = self.terraform_output(self.backend)
1052-
Terraform.update_spec(self, non_capella_output)
1052+
if not self.options.capella_only:
1053+
logger.info('Deploying non-Capella resources')
1054+
# Configure terraform
1055+
Terraform.populate_tfvars(self)
1056+
self.terraform_init(self.backend)
1057+
1058+
# Deploy non-capella resources
1059+
self.terraform_apply(self.backend)
1060+
non_capella_output = self.terraform_output(self.backend)
1061+
Terraform.update_spec(self, non_capella_output)
1062+
else:
1063+
logger.info('Skipping deploying non-Capella resources as --capella-only flag is set')
10531064

10541065
# Deploy serverless dataplane + databases
10551066
self.deploy_serverless_dataplane()
1056-
self.create_project()
1057-
self.create_serverless_dbs()
1067+
1068+
if not self.options.no_serverless_dbs:
1069+
self.create_project()
1070+
self.create_serverless_dbs()
1071+
else:
1072+
logger.info('Skipping deploying serverless dbs as --no-serverless-dbs flag is set')
1073+
10581074
self.update_spec()
10591075

10601076
def destroy(self):
@@ -1078,6 +1094,7 @@ def destroy(self):
10781094

10791095
def deploy_serverless_dataplane(self):
10801096
if not self.dp_id:
1097+
logger.info('Deploying serverless dataplane')
10811098
# If no dataplane ID given (which is normal) then we deploy a new one
10821099
nebula_config = self.infra_spec.direct_nebula
10831100
dapi_config = self.infra_spec.data_api
@@ -1134,7 +1151,8 @@ def deploy_serverless_dataplane(self):
11341151
self.infra_spec.config.set('infrastructure', 'cbc_dataplane', self.dp_id)
11351152
self.infra_spec.update_spec_file()
11361153
else:
1137-
logger.info('Existing dataplane specified: {}'.format(self.dp_id))
1154+
logger.info('Skipping serverless dataplane deployment as existing dataplane specified: '
1155+
'{}'.format(self.dp_id))
11381156
logger.info('Verifying existing dataplane deployment')
11391157

11401158
# Whether we have just deployed a dataplane or not, we will confirm the deployment
@@ -1273,18 +1291,15 @@ def create_serverless_dbs(self):
12731291
logger.info('All serverless DBs deployed')
12741292

12751293
def update_spec(self):
1276-
db_id = self.test_config.buckets[0]
1277-
resp = self.serverless_client.get_database_debug_info(db_id)
1294+
resp = self.serverless_client.get_serverless_dataplane_info(self.dp_id)
12781295
raise_for_status(resp)
12791296
dp_info = resp.json()
12801297

1281-
logger.info('Dataplane config: {}'.format(pretty_dict(dp_info['dataplane'])))
1282-
12831298
resp = self.serverless_client.get_access_to_serverless_dataplane_nodes(self.dp_id)
12841299
raise_for_status(resp)
12851300
dp_creds = resp.json()
12861301

1287-
hostname = dp_info['dataplane']['couchbase']['nodes'][0]['hostname']
1302+
hostname = dp_info['couchbase']['nodes'][0]['hostname']
12881303
auth = (
12891304
dp_creds['couchbaseCreds']['username'],
12901305
dp_creds['couchbaseCreds']['password']
@@ -1737,7 +1752,7 @@ def get_args():
17371752
choices=[
17381753
'us-central1-a',
17391754
'us-central1-b',
1740-
'us-central1-c'
1755+
'us-central1-c',
17411756
'us-central1-f',
17421757
'us-west1-a',
17431758
'us-west1-b',
@@ -1811,6 +1826,13 @@ def get_args():
18111826
action='store_true',
18121827
help='Don\'t destroy cluster or serverless dataplane, only the clients '
18131828
'and utilities')
1829+
parser.add_argument('--no-serverless-dbs',
1830+
action='store_true',
1831+
help='Don\'t deploy serverless databases, only deploy serverless dataplane')
1832+
parser.add_argument('--capella-only',
1833+
action='store_true',
1834+
help='Only deploy Capella resources (provisioned cluster or serverless '
1835+
'dataplane). Will not deploy perf client or utility nodes.')
18141836
parser.add_argument('-t', '--tag',
18151837
help='Global tag for launched instances.')
18161838
parser.add_argument('--enable-disk-autoscaling',

0 commit comments

Comments
 (0)