-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathElastio_stack.yaml
More file actions
326 lines (312 loc) · 12.7 KB
/
Elastio_stack.yaml
File metadata and controls
326 lines (312 loc) · 12.7 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
AWSTemplateFormatVersion: 2010-09-09
Description: Deploys the Integration for Elastio with AWS Backup
Metadata:
AWS::CloudFormation::Interface:
ParameterGroups:
- Label:
default: Elastio Integration Configuration
Parameters:
- LogsBucketName
- ElastioScanTag
- StackBinaryURL
ParameterLabels:
LogsBucketName:
default: S3 Bucket for Elastio Logs and Data
ElastioScanTag:
default: RecoveryPoint Tag to initiate Elastio Scan
StackBinaryURL:
default: The URL for the StackBinary Zip File
Parameters:
LogsBucketName:
Description: The S3 Bucket Name where the Job Logs and Reports are to be stored.
Type: String
ElastioScanTag:
Description: The Tag in an AWS Backup RecoveryPoint that will initiate an Elastio Scan
Type: String
Default: 'ElastioScanTag'
StackBinaryURL:
Description: The URL for the StackBinary Zip File
Type: String
Default: 'https://elastio-artifacts-us-east-2.s3.us-east-2.amazonaws.com/contrib/aws-backup-elastio-integration.zip'
Resources:
SolutionLocalCacheBucket:
Type: "AWS::S3::Bucket"
DeletionPolicy: Delete
UpdateReplacePolicy: Retain
Properties:
BucketEncryption:
ServerSideEncryptionConfiguration:
- ServerSideEncryptionByDefault:
SSEAlgorithm: AES256
PublicAccessBlockConfiguration:
BlockPublicAcls: true
BlockPublicPolicy: true
IgnorePublicAcls: true
RestrictPublicBuckets: true
CleanupSolutionLocalCacheBucketOnDelete:
Type: Custom::CleanupBucket
Properties:
ServiceToken: !GetAtt GlobalCfnCodeReplicatorLambda.Arn
S3BucketToCleanup: !Ref SolutionLocalCacheBucket
CopySolutionToLocalCacheBucket:
Type: Custom::ReplicateSolutionBinaries
Properties:
ServiceToken: !GetAtt GlobalCfnCodeReplicatorLambda.Arn
SolutionDestinationBucket: !Ref SolutionLocalCacheBucket
SolutionURL: !Ref StackBinaryURL
GlobalCfnCodeReplicatorLambda:
Type: AWS::Lambda::Function
Metadata:
cfn_nag:
rules_to_suppress:
- id: W89
reason: "Custom resource deployed in default VPC"
- id: W92
reason: "ReservedConcurrentExecutions not needed since this function runs once when CloudFormation deploys"
Properties:
Code:
ZipFile: |-
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import boto3
import urllib3
import os
import shutil
from urllib.parse import urlparse
physical_resource_id = 'GlobalCfnCodeReplicator'
def process_bucket_cleanup_request(bucket_name):
print(f"process_bucket_cleanup_request starting for bucket_name : {bucket_name}")
s3 = boto3.resource('s3')
bucket_to_delete = s3.Bucket(bucket_name)
response = bucket_to_delete.objects.all().delete()
print(f"process_bucket_cleanup_request all object delete done. Response : {response}")
def download_url(url, save_path):
c = urllib3.PoolManager()
with c.request('GET',url, preload_content=False) as resp, open(save_path, 'wb') as out_file:
shutil.copyfileobj(resp, out_file)
resp.release_conn()
def lambda_handler(event, context):
try:
print(f'Handling event : {event}')
request_type = event.get('RequestType')
solution_url = event['ResourceProperties'].get('SolutionURL')
solution_bucket = event['ResourceProperties'].get('SolutionDestinationBucket')
response_data = {
'RequestType': request_type,
'SolutionURL' : solution_url,
'SolutionDestinationBucket' : solution_bucket
}
if request_type == 'Create' or request_type == 'Update':
if solution_url:
print(f'downloading file from : {solution_url}')
a = urlparse(solution_url)
original_file_name = os.path.basename(a.path)
temp_file_name = '/tmp/'+original_file_name
download_url(solution_url,temp_file_name)
file_size = (os.stat(temp_file_name).st_size / 1024)
print(f'Downloaded report to File : {temp_file_name} , Size : {file_size}')
#Upload this to the Bucket
s3_client = boto3.client('s3')
print(f"uploading payload to : {solution_bucket} at {original_file_name}")
extraArgsForUpload = {'ACL':'bucket-owner-full-control', 'Tagging':'Source=StackBinaryURL'}
s3_client.upload_file(Filename=temp_file_name, Bucket=solution_bucket, Key='bin/' + original_file_name,ExtraArgs=extraArgsForUpload)
elif request_type == 'Delete':
solution_bucket = event['ResourceProperties'].get('S3BucketToCleanup')
if solution_bucket:
process_bucket_cleanup_request(solution_bucket)
send(event, context, 'SUCCESS', response_data, physical_resource_id)
except Exception as e:
print(f'{e}')
send(event, context, 'FAILED', response_data, physical_resource_id)
def send(event, context, response_status, response_data, physical_resource_id, no_echo=False):
http = urllib3.PoolManager()
response_url = event['ResponseURL']
json_response_body = json.dumps({
'Status': response_status,
'Reason': f'See the details in CloudWatch Log Stream: {context.log_stream_name}',
'PhysicalResourceId': physical_resource_id,
'StackId': event['StackId'],
'RequestId': event['RequestId'],
'LogicalResourceId': event['LogicalResourceId'],
'NoEcho': no_echo,
'Data': response_data
}).encode('utf-8')
headers = {
'content-type': '',
'content-length': str(len(json_response_body))
}
try:
http.request('PUT', response_url,
body=json_response_body, headers=headers)
except Exception as e: # pylint: disable = W0703
print(e)
Description: Copy Solutions Binary to Local Cache Bucket
Handler: index.lambda_handler
Role : !GetAtt ElastioStatusHandlerLambdaRole.Arn
Runtime: python3.10
Architectures:
- arm64
Timeout: 300
ProcessAWSBackupVaultStatusEventRuleForElastio:
Type: AWS::Events::Rule
Properties:
Name: ProcessAWSBackupVaultStatusEventRuleForElastio
Description: "Rule to direct AWS Backup Events to Elastio Status Handler Lambda"
State: "ENABLED"
EventPattern:
source:
- 'aws.backup'
detail-type:
- 'Recovery Point State Change'
Targets:
- Arn: !GetAtt
- ElastioStatusHandlerLambda
- Arn
Id: "ProcessAWSBackupEventsUsingLambda"
ProcessAWSBackupVaultStatusEventRuleForElastioInvokePermission:
Type: 'AWS::Lambda::Permission'
Properties:
Action: 'lambda:InvokeFunction'
FunctionName: !Ref ElastioStatusHandlerLambda
Principal: events.amazonaws.com
SourceArn: !Sub ${ProcessAWSBackupVaultStatusEventRuleForElastio.Arn}
ElastioStatusHandlerLambda:
Type: AWS::Lambda::Function
Metadata:
cfn_nag:
rules_to_suppress:
- id: W89
reason: "NA"
- id: W92
reason: "NA"
DependsOn: CopySolutionToLocalCacheBucket
Properties:
Code:
S3Bucket: !Ref SolutionLocalCacheBucket
S3Key: 'bin/aws-backup-elastio-integration.zip'
Description: Handle AWS Backup and Elastio Scan results
Handler: lambda_handler.handler
Role : !GetAtt ElastioStatusHandlerLambdaRole.Arn
Runtime: python3.10
Architectures:
- arm64
Timeout: 900
Environment:
Variables:
ElastioStatusEB : !Ref ElastioJobStatusEventBus
LogsBucketName: !Ref LogsBucketName
ElastioImportLambdaARN : !Sub "arn:${AWS::Partition}:lambda:${AWS::Region}:${AWS::AccountId}:function:elastio-bg-jobs-service-aws-backup-rp-import"
ElastioScanTag: !Ref ElastioScanTag
ElastioJobStatusEventBus:
Type: AWS::Events::EventBus
Properties:
Name: !Join [ '', ['ElastioJobStatusEventBus-', !Ref 'AWS::AccountId'] ]
ElastioStatusEventBridgeInvokePermission:
Type: 'AWS::Lambda::Permission'
Properties:
Action: 'lambda:InvokeFunction'
FunctionName: !Ref ElastioStatusHandlerLambda
Principal: events.amazonaws.com
SourceArn: !Sub ${ElastioStatusEventRule.Arn}
ElastioStatusEventRule:
Type: AWS::Events::Rule
Properties:
Description: "Send Elastio events to Lambda"
EventBusName: !Ref ElastioJobStatusEventBus
State: "ENABLED"
EventPattern:
source:
- 'elastio.iscan'
Targets:
- Arn: !GetAtt ElastioStatusHandlerLambda.Arn
Id: "ElastioStatusEvent"
ElastioJobStatusEventBusPolicy:
Type: AWS::Events::EventBusPolicy
Properties:
EventBusName: !Ref ElastioJobStatusEventBus
StatementId: "ElastioStatusEventBridgePolicyStmt"
Statement:
Effect: "Allow"
Principal: "*"
Action: "events:PutEvents"
Resource: !GetAtt "ElastioJobStatusEventBus.Arn"
ElastioStatusHandlerLambdaRole:
Type: 'AWS::IAM::Role'
Metadata:
cfn_nag:
rules_to_suppress:
- id: F3
- id: W11
Properties:
AssumeRolePolicyDocument:
Version: 2012-10-17
Statement:
- Effect: Allow
Principal:
Service: lambda.amazonaws.com
Action: 'sts:AssumeRole'
ManagedPolicyArns:
- !Sub 'arn:${AWS::Partition}:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole'
Policies:
- PolicyName: invokeLambda
PolicyDocument:
Version: '2012-10-17'
Statement:
- Effect: Allow
Action:
- lambda:InvokeFunction
Resource: '*'
- PolicyName: s3Permissions
PolicyDocument:
Statement:
- Effect: Allow
Action:
- kms:GenerateDataKey
- kms:Decrypt
- kms:Encrypt
- s3:PutObject*
- s3:GetObject*
- s3:DeleteObject
- s3:*BucketNotification
- s3:GetBucketLocation
- s3:ListBucket
- s3:ListBucketMultipartUploads
- s3:ListMultipartUploadParts
- s3:AbortMultipartUpload
Resource:
- !Sub 'arn:${AWS::Partition}:s3:::${LogsBucketName}/*'
- !Sub 'arn:${AWS::Partition}:s3:::${LogsBucketName}'
- !Sub 'arn:${AWS::Partition}:s3:::${SolutionLocalCacheBucket}/*'
- !Sub 'arn:${AWS::Partition}:s3:::${SolutionLocalCacheBucket}'
- PolicyName: logStreamPermissions
PolicyDocument:
Statement:
- Effect: Allow
Action:
- 'logs:CreateLogGroup'
- 'logs:CreateLogStream'
- 'logs:PutLogEvents'
Resource: !Sub 'arn:${AWS::Partition}:logs:*:*:*'
- PolicyName: backupPermissions
PolicyDocument:
Version: '2012-10-17'
Statement:
- Effect: Allow
Action:
- backup:ListTags
- ec2:DescribeTags
Resource: '*'
- PolicyName: secHubPermissions
PolicyDocument:
Statement:
- Effect: Allow
Action:
- 'securityhub:BatchImportFindings'
- 'securityhub:CreateInsight'
Resource:
- !Sub 'arn:${AWS::Partition}:securityhub:*:${AWS::AccountId}:product/*/*'
- !Sub 'arn:${AWS::Partition}:securityhub:*:${AWS::AccountId}:hub/default'
Outputs:
StackName:
Value: !Ref AWS::StackName