Skip to content

Commit 3f428f9

Browse files
committed
chore(init): initial commit
0 parents  commit 3f428f9

File tree

4,130 files changed

+314346
-0
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

4,130 files changed

+314346
-0
lines changed

.gitignore

Whitespace-only changes.

README.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
## AWS Lambda Layer Release
2+
Your one stop AWS Lambda Layer Release Function.

action.yml

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
name: 'AWS Lambda Layer Release'
2+
description: 'Action lets you release your latest lambda layer version'
3+
inputs:
4+
access_key_id: # id of input
5+
description: 'AWS_ACCESS_KEY_ID. String'
6+
required: true
7+
secret_access_key:
8+
description: 'AWS_SECRET_ACCESS_KEY. String'
9+
required: true
10+
layer_name:
11+
description: "AWS lambda layer name. String"
12+
required: true
13+
archive:
14+
description: "Path to lambda layer zip archive. String"
15+
required: true
16+
region:
17+
description: "The AWS region for this lambda and layer. Defaults to ap-south-1 (Mumbai). String"
18+
required: true
19+
default: "ap-south-1"
20+
runtimes:
21+
description: 'List of runtimes. "nodejs" || "nodejs4.3" || "nodejs6.10" || "nodejs8.10" || "nodejs10.x" || "nodejs12.x" || "nodejs14.x" || "nodejs16.x" || "java8" || "java8.al2" || "java11" || "python2.7" || "python3.6" || "python3.7" || "python3.8" || "python3.9" || "dotnetcore1.0" || "dotnetcore2.0" || "dotnetcore2.1" || "dotnetcore3.1" || "dotnet6" || "nodejs4.3-edge" || "go1.x" || "ruby2.5" || "ruby2.7" || "provided" || "provided.al2" || "nodejs18.x" || "python3.10" || "java17" || "ruby3.2" || "python3.11". String[]'
22+
required: false
23+
architectures:
24+
description: 'List of architectures. "x86_64" || "arm64". String[]'
25+
required: false
26+
default: '["x86_64", "arm64"]'
27+
s3_bucket:
28+
description: 's3 bucket is required if layer exceeds 10MB size. String'
29+
required: false
30+
functions:
31+
description: "Optional Lambda function name(s) to update the layer to latest. String[]"
32+
required: false
33+
default: '[]'
34+
runs:
35+
using: 'node16'
36+
main: 'index.js'

helper.js

Lines changed: 153 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,153 @@
1+
const { readFileSync, statSync } = require('fs');
2+
const { LambdaClient, PublishLayerVersionCommand, UpdateFunctionConfigurationCommand } = require('@aws-sdk/client-lambda');
3+
const { S3Client, PutObjectCommand, DeleteObjectCommand } = require('@aws-sdk/client-s3');
4+
5+
const non_error_response_codes = [200, 201, 204];
6+
7+
/**
8+
* Reusable function to initiate the lambda client
9+
* @param {*} param0
10+
* @returns
11+
*/
12+
const lambdaClient = ({ region, accessKeyId, secretAccessKey }) => new LambdaClient({
13+
region,
14+
credentials: { accessKeyId, secretAccessKey }
15+
});
16+
/**
17+
* Reusable function to initiate the s3 client
18+
* @param {*} param0
19+
* @returns
20+
*/
21+
const s3Client = ({ region, accessKeyId, secretAccessKey }) => new S3Client({
22+
region,
23+
credentials: { accessKeyId, secretAccessKey }
24+
})
25+
26+
const errored = response => non_error_response_codes.indexOf(response.$metadata.httpStatusCode) == -1 ? true : false;
27+
28+
exports.getArchiveSize = archive => statSync(archive).size
29+
/**
30+
* Resuable function to publish lambda layer. This function dynamically identifies the config
31+
* based on whether to pick archive from file or S3
32+
* @param {*} param0
33+
*/
34+
exports.publishLambdaLayer = async ({
35+
region,
36+
accessKeyId,
37+
secretAccessKey,
38+
layerName,
39+
archive,
40+
architectures,
41+
runtimes,
42+
s3Bucket = null
43+
}) => {
44+
/**
45+
* Initiate the lambda client
46+
*/
47+
const client = lambdaClient({ region, accessKeyId, secretAccessKey });
48+
const payload = {
49+
LayerName: layerName,
50+
Description: "",
51+
/**
52+
* If s3Bucket is defined in the input Parameters then value S3 uploaded layer.
53+
*/
54+
Content: s3Bucket ? {
55+
S3Bucket: s3Bucket,
56+
S3Key: `${layerName}.zip`
57+
} : {
58+
/**
59+
* Direct parse layer only if s3Bucket is null in params
60+
*/
61+
ZipFile: readFileSync(archive)
62+
},
63+
CompatibleArchitectures: architectures,
64+
CompatibleRuntimes: runtimes
65+
}
66+
const command = new PublishLayerVersionCommand(payload);
67+
const response = await client.send(command);
68+
if (errored(response)) {
69+
console.log(JSON.stringify(response));
70+
throw new Error("Error While publishing layer. If you feel this is a bug, raise a ticket on the repo.");
71+
}
72+
console.log("Success Uploading Layer!");
73+
return response;
74+
};
75+
/**
76+
* Reusable function that will publish layer to S3 just in case of layer size is higher than
77+
* the expected size. The code will identify whether this archive can be directly uploaded and if not,
78+
* It should be uploaded to S3 first using this function and then layer parsing is done by dynamoc Content config
79+
* in @function publishLambdaLayer
80+
* @param {*} param0
81+
*/
82+
exports.publishS3LayerArchive = async ({
83+
region,
84+
accessKeyId,
85+
secretAccessKey,
86+
s3Bucket,
87+
layerName,
88+
archive
89+
}) => {
90+
const client = s3Client({ region, accessKeyId, secretAccessKey });
91+
const command = new PutObjectCommand({
92+
Bucket: s3Bucket,
93+
Key: `${layerName}.zip`,
94+
Body: readFileSync(archive)
95+
});
96+
const response = await client.send(command);
97+
if (errored(response)) {
98+
console.log(JSON.stringify(response));
99+
throw new Error("Error While publishing layer to S3. If you feel this is a bug, raise a ticket on the repo.");
100+
}
101+
console.log("Success Uoloading Layer to S3!");
102+
return response;
103+
}
104+
/**
105+
* Reusable function to cleanup the temporary created layer archive on S3 to reduce the accumulating charges over time.
106+
* @param {*} param0
107+
*/
108+
exports.deleteTemporaryArchiveFromS3 = async ({
109+
region,
110+
accessKeyId,
111+
secretAccessKey,
112+
s3Bucket,
113+
s3Key
114+
}) => {
115+
const client = s3Client({ region, accessKeyId, secretAccessKey });
116+
const command = new DeleteObjectCommand({
117+
Bucket: s3Bucket,
118+
Key: s3Key
119+
});
120+
const response = await client.send(command);
121+
if (errored(response)) {
122+
console.log(JSON.stringify(response));
123+
throw new Error("Error While Deleting Layer From S3. If you feel this is a bug, raise a ticket on the repo.");
124+
}
125+
console.log("Success Deleting Layer From S3!");
126+
return response;
127+
}
128+
/**
129+
* Refresh lambda function to use the latest version of layer
130+
*/
131+
exports.refreshLambdaLayerVersion = async ({
132+
region,
133+
accessKeyId,
134+
secretAccessKey,
135+
functionNames,
136+
layerARN,
137+
}) => {
138+
const client = lambdaClient({ region, accessKeyId, secretAccessKey });
139+
const commands = []
140+
for (const functionName of functionNames)
141+
commands.push(client.send(new UpdateFunctionConfigurationCommand({
142+
FunctionName: functionName,
143+
Layers: [layerARN]
144+
})));
145+
146+
const response = await Promise.all(commands);
147+
if (errored(response)) {
148+
console.log(JSON.stringify(response));
149+
throw new Error("Error While Refreshing Lambda For Latest Layer. If you feel this is a bug, raise a ticket on the repo.");
150+
}
151+
console.log("Refreshing Lambda For Latest Layer.");
152+
return response;
153+
}

index.js

Lines changed: 81 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,81 @@
1+
const { getInput, setFailed } = require('@actions/core');
2+
3+
const {
4+
publishLambdaLayer,
5+
publishS3LayerArchive,
6+
getArchiveSize,
7+
deleteTemporaryArchiveFromS3,
8+
refreshLambdaLayerVersion
9+
} = require('./helper');
10+
11+
12+
(async () => {
13+
try {
14+
const accessKeyId = getInput('access_key_id');
15+
const secretAccessKey = getInput('secret_access_key');
16+
const layerName = getInput('layer_name');
17+
const archive = getInput('archive');
18+
const region = getInput('region');
19+
const runtimes = getInput('runtimes') ? JSON.parse(getInput('runtimes')) : [];
20+
const architectures = getInput('architectures') ? JSON.parse(getInput('architectures')) : [];
21+
const s3Bucket = getInput('s3_bucket');
22+
const functionNames = JSON.parse(getInput('functions'));
23+
24+
console.log(runtimes, architectures);
25+
const creds = { region, accessKeyId, secretAccessKey };
26+
27+
const size = getArchiveSize(archive);
28+
console.log(`Archive size is ${size}`);
29+
let layerResponse;
30+
if (size > 10000000) {
31+
if (!s3Bucket) {
32+
setFailed('Param s3_bucket is required if layer size exceeds 10MB.');
33+
}
34+
// upload s3 archive
35+
await publishS3LayerArchive({
36+
...creds,
37+
s3Bucket,
38+
archive,
39+
layerName
40+
});
41+
layerResponse = await publishLambdaLayer({
42+
...creds,
43+
archive,
44+
s3Bucket,
45+
layerName,
46+
architectures,
47+
runtimes,
48+
});
49+
/**
50+
* try to remove the archive from s3 bucket
51+
*
52+
*/
53+
await deleteTemporaryArchiveFromS3({
54+
...creds,
55+
s3Bucket,
56+
s3Key: `${layerName}.zip`
57+
});
58+
/**
59+
* refresh the lambda functions to use the latest layer
60+
*/
61+
} else {
62+
layerResponse = await publishLambdaLayer({
63+
...creds,
64+
archive,
65+
layerName,
66+
architectures,
67+
runtimes,
68+
});
69+
}
70+
if (functionNames.length) {
71+
// trigger functions update
72+
await refreshLambdaLayerVersion({
73+
...creds,
74+
functionNames,
75+
layerARN: layerResponse.LayerVersionArn
76+
});
77+
}
78+
} catch (err) {
79+
setFailed(err.message);
80+
}
81+
})();

node_modules/.bin/fxparser

Lines changed: 1 addition & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

node_modules/.bin/uuid

Lines changed: 1 addition & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

0 commit comments

Comments
 (0)