Skip to content

Commit f630f14

Browse files
committed
first draft
1 parent ef6f013 commit f630f14

File tree

3 files changed

+110
-0
lines changed

3 files changed

+110
-0
lines changed
Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
import ExistingEksMixedConstruct from '../lib/existing-eks-mixed-observability-construct';
2+
import { configureApp, errorHandler } from '../lib/common/construct-utils';
3+
4+
const app = configureApp();
5+
6+
new ExistingEksMixedConstruct().buildAsync(app, 'existing-eks-mixed').catch((error) => {
7+
errorHandler(app, "Existing Cluster Pattern is missing information of existing cluster: " + error);
8+
});
Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,39 @@
1+
# Existing EKS Cluster AWS Mixed Observability Accelerator
2+
3+
## Objective
4+
5+
This pattern depends on the previous deployment of [_Single New EKS Cluster Observability Accelerator_](./single-new-eks-cluster.md).
6+
After deploying this pattern you will have the same setup as in [_Single New EKS Cluster AWS Mixed Observability Accelerator_](./single-new-eks-mixed-observability.md).
7+
8+
## Deploying
9+
10+
1. Follow the instructions in [_Single New EKS Cluster Observability Accelerator_](./single-new-eks-cluster.md).
11+
12+
2. Edit `~/.cdk.json` by setting the kubectl role name, as provided by the output of the above deployment of the _Single New EKS Cluster Observability Accelerator_.
13+
14+
```json
15+
...
16+
"existing.kubectl.rolename":""
17+
...
18+
```
19+
20+
2. Run the following command from the root of this repository to deploy the pipeline stack:
21+
22+
```bash
23+
make build
24+
make pattern existing-eks-mixed-observability deploy
25+
```
26+
27+
## Verify the resources
28+
29+
Please see [_Single New EKS Cluster AWS Mixed Observability Accelerator_](./single-new-eks-mixed-observability.md).
30+
31+
## Teardown
32+
33+
You can teardown the whole CDK stack with the following command:
34+
35+
```bash
36+
make pattern existing-eks-mixed-observability destroy
37+
```
38+
39+
Lines changed: 63 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,63 @@
1+
import { ImportClusterProvider, utils } from '@aws-quickstart/eks-blueprints';
2+
import * as blueprints from '@aws-quickstart/eks-blueprints';
3+
import { cloudWatchDeploymentMode } from '@aws-quickstart/eks-blueprints';
4+
import { ObservabilityBuilder } from '../common/observability-builder';
5+
import * as cdk from "aws-cdk-lib";
6+
import * as eks from 'aws-cdk-lib/aws-eks';
7+
8+
export default class ExistingEksMixedobservabilityConstruct {
9+
async buildAsync(scope: cdk.App, id: string) {
10+
// AddOns for the cluster
11+
const stackId = `${id}-observability-accelerator`;
12+
13+
const clusterName = utils.valueFromContext(scope, "existing.cluster.name", undefined);
14+
const kubectlRoleName = utils.valueFromContext(scope, "existing.kubectl.rolename", undefined);
15+
16+
const account = process.env.COA_ACCOUNT_ID! || process.env.CDK_DEFAULT_ACCOUNT!;
17+
const region = process.env.COA_AWS_REGION! || process.env.CDK_DEFAULT_REGION!;
18+
19+
const sdkCluster = await blueprints.describeCluster(clusterName, region); // get cluster information using EKS APIs
20+
const vpcId = sdkCluster.resourcesVpcConfig?.vpcId;
21+
22+
/**
23+
* Assumes the supplied role is registered in the target cluster for kubectl access.
24+
*/
25+
26+
const importClusterProvider = new ImportClusterProvider({
27+
clusterName: sdkCluster.name!,
28+
version: eks.KubernetesVersion.of(sdkCluster.version!),
29+
clusterEndpoint: sdkCluster.endpoint,
30+
openIdConnectProvider: blueprints.getResource(context =>
31+
new blueprints.LookupOpenIdConnectProvider(sdkCluster.identity!.oidc!.issuer!).provide(context)),
32+
clusterCertificateAuthorityData: sdkCluster.certificateAuthority?.data,
33+
kubectlRoleArn: blueprints.getResource(context => new blueprints.LookupRoleProvider(kubectlRoleName).provide(context)).roleArn,
34+
clusterSecurityGroupId: sdkCluster.resourcesVpcConfig?.clusterSecurityGroupId
35+
});
36+
37+
const cloudWatchAdotAddOn = new blueprints.addons.CloudWatchAdotAddOn({
38+
deploymentMode: cloudWatchDeploymentMode.DEPLOYMENT,
39+
namespace: 'default',
40+
name: 'adot-collector-cloudwatch',
41+
metricsNameSelectors: ['apiserver_request_.*', 'container_memory_.*', 'container_threads', 'otelcol_process_.*'],
42+
});
43+
44+
const addOns: Array<blueprints.ClusterAddOn> = [
45+
new blueprints.addons.CloudWatchLogsAddon({
46+
logGroupPrefix: `/aws/eks/${stackId}`,
47+
logRetentionDays: 30
48+
}),
49+
new blueprints.addons.AdotCollectorAddOn(),
50+
cloudWatchAdotAddOn,
51+
new blueprints.addons.XrayAdotAddOn(),
52+
];
53+
54+
ObservabilityBuilder.builder()
55+
.account(account)
56+
.region(region)
57+
.addExistingClusterObservabilityBuilderAddOns()
58+
.clusterProvider(importClusterProvider)
59+
.resourceProvider(blueprints.GlobalResources.Vpc, new blueprints.VpcProvider(vpcId)) // this is required with import cluster provider
60+
.addOns(...addOns)
61+
.build(scope, stackId);
62+
}
63+
}

0 commit comments

Comments
 (0)