Skip to content

Commit 60622a3

Browse files
committed
chore: eks/ecr integration testing preparations continued
- decoupling cluster creation from making the image accessible to the cluster EKS clusters, even if existing, will need access to the image being tested. this is most easily achieved by pushing the image to ECR, just like in KinD we load the image to KinD. - loadImageInCluster may decide on an updated image name for testing this is because when we push the image to the remote cluster, its name/tag may change to include the registry's prefix. - clean an environment instead of deleting it for clusters we don't want to delete (EKS), just clean up. - more generic decoupling from KinD - adding the EKS test to CircleCI config
1 parent a502863 commit 60622a3

File tree

7 files changed

+138
-37
lines changed

7 files changed

+138
-37
lines changed

.circleci/config.yml

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -173,6 +173,32 @@ jobs:
173173
fi
174174
when: on_fail
175175

176+
eks_integration_tests:
177+
<<: *default_machine_config
178+
steps:
179+
- checkout
180+
- run:
181+
name: INTEGRATION TESTS EKS
182+
command: |
183+
export NVM_DIR="/opt/circleci/.nvm" &&
184+
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" &&
185+
nvm install v10 &&
186+
npm install &&
187+
docker login --username ${DOCKERHUB_USER} --password ${DOCKERHUB_PASSWORD} &&
188+
export IMAGE_TAG=$([[ "$CIRCLE_BRANCH" == "staging" ]] && echo "staging-candidate" || echo "discardable") &&
189+
export KUBERNETES_MONITOR_IMAGE_NAME_AND_TAG=snyk/kubernetes-monitor:${IMAGE_TAG}-${CIRCLE_SHA1} &&
190+
docker pull ${KUBERNETES_MONITOR_IMAGE_NAME_AND_TAG} &&
191+
npm run test:integration:eks
192+
- run:
193+
name: Notify Slack on failure
194+
command: |
195+
if [[ "$CIRCLE_BRANCH" == "staging" ]]; then
196+
./scripts/slack-notify-failure.sh "staging-eks-integration-tests-${CIRCLE_SHA1}"
197+
else
198+
echo "Current branch is $CIRCLE_BRANCH so skipping notifying Slack"
199+
fi
200+
when: on_fail
201+
176202
######################## MERGE TO STAGING ########################
177203
tag_and_push:
178204
<<: *default_container_config
@@ -293,6 +319,10 @@ workflows:
293319
requires:
294320
- build_image
295321
<<: *staging_branch_only_filter
322+
- eks_integration_tests:
323+
requires:
324+
- build_image
325+
<<: *staging_branch_only_filter
296326
- package_manager_test_apk:
297327
requires:
298328
- build_image

test/helpers/kubectl.ts

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,12 @@ export async function createNamespace(namespace: string): Promise<void> {
3434
console.log(`Created namespace ${namespace}`);
3535
}
3636

37+
export async function deleteNamespace(namespace: string): Promise<void> {
38+
console.log(`Deleting namespace ${namespace}...`);
39+
await exec(`./kubectl delete namespace ${namespace}`);
40+
console.log(`Deleted namespace ${namespace}`);
41+
}
42+
3743
export async function createSecret(
3844
secretName: string,
3945
namespace: string,

test/integration/kubernetes.test.ts

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,15 @@ tap.test('deploy snyk-monitor', async (t) => {
3737

3838
// Next we apply some sample workloads
3939
tap.test('deploy sample workloads', async (t) => {
40-
await setup.createSampleDeployments();
40+
const servicesNamespace = 'services';
41+
const someImageWithSha = 'alpine@sha256:7746df395af22f04212cd25a92c1d6dbc5a06a0ca9579a229ef43008d4d1302a';
42+
await Promise.all([
43+
kubectl.applyK8sYaml('./test/fixtures/alpine-pod.yaml'),
44+
kubectl.applyK8sYaml('./test/fixtures/nginx-replicationcontroller.yaml'),
45+
kubectl.applyK8sYaml('./test/fixtures/redis-deployment.yaml'),
46+
kubectl.applyK8sYaml('./test/fixtures/centos-deployment.yaml'),
47+
kubectl.createDeploymentFromImage('alpine-from-sha', someImageWithSha, servicesNamespace),
48+
]);
4149
t.pass('successfully deployed sample workloads');
4250
});
4351

test/setup/index.ts

Lines changed: 18 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,9 @@ import platforms from './platforms';
66
import * as kubectl from '../helpers/kubectl';
77
import * as waiters from './waiters';
88

9+
const testPlatform = process.env['TEST_PLATFORM'] || 'kind';
10+
const createCluster = process.env['CREATE_CLUSTER'] === 'true';
11+
912
function getIntegrationId(): string {
1013
const integrationId = uuidv4();
1114
console.log(`Generated new integration ID ${integrationId}`);
@@ -55,9 +58,13 @@ function createTestYamlDeployment(
5558

5659
export async function removeMonitor(): Promise<void> {
5760
try {
58-
await platforms.kind.delete();
61+
if (createCluster) {
62+
await platforms[testPlatform].delete();
63+
} else {
64+
await platforms[testPlatform].clean();
65+
}
5966
} catch (error) {
60-
console.log(`Could not delete kind cluster: ${error.message}`);
67+
console.log(`Could not remove the Kubernetes-Monitor: ${error.message}`);
6168
}
6269

6370
console.log('Removing KUBECONFIG environment variable...');
@@ -73,20 +80,20 @@ export async function removeMonitor(): Promise<void> {
7380

7481
async function createEnvironment(): Promise<void> {
7582
// TODO: we probably want to use k8s-api for that, not kubectl
76-
const servicesNamespace = 'services';
77-
await kubectl.createNamespace(servicesNamespace);
83+
await kubectl.createNamespace('services');
7884
// Small hack to prevent timing problems in CircleCI...
85+
// TODO: should be replaced by actively waiting for the namespace to be created
7986
await sleep(5000);
87+
}
8088

81-
// Create imagePullSecrets for pulling private images from gcr.io.
82-
// This is needed for deploying gcr.io images in KinD (this is _not_ used by snyk-monitor).
89+
async function createSecretForGcrIoAccess(): Promise<void> {
8390
const gcrSecretName = 'gcr-io';
8491
const gcrKubectlSecretsKeyPrefix = '--';
8592
const gcrSecretType = 'docker-registry';
8693
const gcrToken = getEnvVariableOrDefault('GCR_IO_SERVICE_ACCOUNT', '{}');
8794
await kubectl.createSecret(
8895
gcrSecretName,
89-
servicesNamespace,
96+
'services',
9097
{
9198
'docker-server': 'https://gcr.io',
9299
'docker-username': '_json_key',
@@ -129,17 +136,17 @@ export async function deployMonitor(): Promise<string> {
129136
'snyk/kubernetes-monitor:local',
130137
);
131138

132-
const testPlatform = process.env['TEST_PLATFORM'] || 'kind';
133-
const createCluster = process.env['CREATE_CLUSTER'] === 'true';
134139
console.log(`platform chosen is ${testPlatform}, createCluster===${createCluster}`);
135140

136141
await kubectl.downloadKubectl();
137142
if (createCluster) {
138-
await platforms[testPlatform].create(imageNameAndTag);
143+
await platforms[testPlatform].create();
139144
}
145+
const remoteImageName = await platforms[testPlatform].loadImage(imageNameAndTag);
140146
await platforms[testPlatform].config();
141147
await createEnvironment();
142-
const integrationId = await installKubernetesMonitor(imageNameAndTag);
148+
await createSecretForGcrIoAccess();
149+
const integrationId = await installKubernetesMonitor(remoteImageName);
143150
await waiters.waitForMonitorToBeReady();
144151
console.log(`Deployed the snyk-monitor with integration ID ${integrationId}`);
145152
return integrationId;
@@ -157,15 +164,3 @@ export async function deployMonitor(): Promise<string> {
157164
throw err;
158165
}
159166
}
160-
161-
export async function createSampleDeployments(): Promise<void> {
162-
const servicesNamespace = 'services';
163-
const someImageWithSha = 'alpine@sha256:7746df395af22f04212cd25a92c1d6dbc5a06a0ca9579a229ef43008d4d1302a';
164-
await Promise.all([
165-
kubectl.applyK8sYaml('./test/fixtures/alpine-pod.yaml'),
166-
kubectl.applyK8sYaml('./test/fixtures/nginx-replicationcontroller.yaml'),
167-
kubectl.applyK8sYaml('./test/fixtures/redis-deployment.yaml'),
168-
kubectl.applyK8sYaml('./test/fixtures/centos-deployment.yaml'),
169-
kubectl.createDeploymentFromImage('alpine-from-sha', someImageWithSha, servicesNamespace),
170-
]);
171-
}

test/setup/platforms/eks.ts

Lines changed: 53 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,62 @@
1-
export async function createCluster(imageNameAndTag: string): Promise<void> {
2-
exportKubeConfig();
1+
import { exec } from 'child-process-promise';
2+
import * as kubectl from '../../helpers/kubectl';
3+
4+
export async function createCluster(): Promise<void> {
35
throw new Error('Not implemented');
4-
// process.env.KUBECONFIG = 'path-to-/kubeconfig-aws';
56
}
67

78
export async function deleteCluster(): Promise<void> {
89
throw new Error('Not implemented');
910
}
1011

1112
export async function exportKubeConfig(): Promise<void> {
12-
throw new Error('Not implemented');
13+
await exec('aws eks update-kubeconfig --name runtime-experiments --kubeconfig ./kubeconfig');
14+
process.env.KUBECONFIG = './kubeconfig';
15+
}
16+
17+
export async function loadImageInCluster(imageNameAndTag: string): Promise<string> {
18+
console.log(`Loading image ${imageNameAndTag} in ECR...`);
19+
20+
// update the `aws` CLI, the one in CircleCI's default image is outdated and doens't support eks
21+
await exec('pip install awscli --ignore-installed six');
22+
23+
// TODO: assert all the vars are present before starting the setup?
24+
// TODO: wipe out the data during teardown?
25+
await exec(`aws configure set aws_access_key_id ${process.env['AWS_ACCESS_KEY_ID']}`);
26+
await exec(`aws configure set aws_secret_access_key ${process.env['AWS_SECRET_ACCESS_KEY']}`);
27+
await exec(`aws configure set region ${process.env['AWS_REGION']}`);
28+
29+
const ecrLogin = await exec('aws ecr get-login --region us-east-2 --no-include-email');
30+
31+
// aws ecr get-login returns something that looks like:
32+
// docker login -U AWS -p <secret> https://the-address-of-ecr-we-should-use.com
33+
// `docker tag` wants just the last part without https://
34+
// `docker login` wants everything
35+
36+
// validate output so we don't execute malicious stuff
37+
if (ecrLogin.stdout.indexOf('docker login -u AWS -p') !== 0) {
38+
throw new Error('aws ecr get-login returned an unexpected output');
39+
}
40+
41+
const targetImage = targetImageFromLoginDetails(ecrLogin.stdout);
42+
43+
await exec(`docker tag ${imageNameAndTag} ${targetImage}`);
44+
await exec(ecrLogin.stdout);
45+
await exec(`docker push ${targetImage}`);
46+
47+
console.log(`Loaded image ${targetImage} in ECR`);
48+
return targetImage;
49+
}
50+
51+
export async function clean(): Promise<void> {
52+
await Promise.all([
53+
kubectl.deleteNamespace('services'),
54+
kubectl.deleteNamespace('snyk-monitor'),
55+
]);
56+
}
57+
58+
function targetImageFromLoginDetails(ecrLoginOutput: string): string {
59+
const split = ecrLoginOutput.split(' ');
60+
const targetImagePrefix = split[split.length - 1].replace('https://', '').trim();
61+
return `${targetImagePrefix}/snyk/kubernetes-monitor:local`;
1362
}

test/setup/platforms/index.ts

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,23 +3,31 @@ import * as eks from './eks';
33

44
interface IPlatformSetup {
55
// create a Kubernetes cluster
6-
create: (imageNameAndTag: string) => Promise<void>;
6+
create: () => Promise<void>;
7+
// loads the image so Kubernetes may run it, return the name of the image in its registry's format
8+
loadImage: (imageNameAndTag: string) => Promise<string>;
79
// delete a Kubernetes cluster
810
delete: () => Promise<void>;
911
// set KUBECONFIG to point at the tested cluster
1012
config: () => Promise<void>;
13+
// clean up whatever we littered an existing cluster with
14+
clean: () => Promise<void>;
1115
}
1216

1317
const kindSetup: IPlatformSetup = {
1418
create: kind.createCluster,
19+
loadImage: kind.loadImageInCluster,
1520
delete: kind.deleteCluster,
1621
config: kind.exportKubeConfig,
22+
clean: kind.clean,
1723
};
1824

1925
const eksSetup: IPlatformSetup = {
2026
create: eks.createCluster,
27+
loadImage: eks.loadImageInCluster,
2128
delete: eks.deleteCluster,
2229
config: eks.exportKubeConfig,
30+
clean: eks.clean,
2331
};
2432

2533
export default {

test/setup/platforms/kind.ts

Lines changed: 13 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -6,11 +6,10 @@ import * as needle from 'needle';
66

77
const clusterName = 'kind';
88

9-
export async function createCluster(imageNameAndTag: string): Promise<void> {
9+
export async function createCluster(): Promise<void> {
1010
const osDistro = platform();
1111
await download(osDistro);
1212
await createKindCluster(clusterName);
13-
await loadImageInCluster(imageNameAndTag);
1413
}
1514

1615
export async function deleteCluster(): Promise<void> {
@@ -27,6 +26,18 @@ export async function exportKubeConfig(): Promise<void> {
2726
console.log('Exported K8s config!');
2827
}
2928

29+
export async function loadImageInCluster(imageNameAndTag: string): Promise<string> {
30+
console.log(`Loading image ${imageNameAndTag} in KinD cluster...`);
31+
await exec(`./kind load docker-image ${imageNameAndTag}`);
32+
console.log(`Loaded image ${imageNameAndTag}`);
33+
return imageNameAndTag;
34+
}
35+
36+
export async function clean(): Promise<void> {
37+
// just delete the cluster instead
38+
throw new Error('Not implemented');
39+
}
40+
3041
async function download(osDistro: string): Promise<void> {
3142
try {
3243
accessSync(resolve(process.cwd(), 'kind'), constants.R_OK);
@@ -62,9 +73,3 @@ async function createKindCluster(clusterName, kindImageTag = 'latest'): Promise<
6273
await exec(`./kind create cluster --name="${clusterName}" ${kindImageArgument}`);
6374
console.log(`Created cluster ${clusterName}!`);
6475
}
65-
66-
async function loadImageInCluster(imageNameAndTag): Promise<void> {
67-
console.log(`Loading image ${imageNameAndTag} in cluster...`);
68-
await exec(`./kind load docker-image ${imageNameAndTag}`);
69-
console.log(`Loaded image ${imageNameAndTag}`);
70-
}

0 commit comments

Comments
 (0)