11import os
22
33from aws_cdk import (
4+ aws_ec2 as ec2 ,
5+ aws_ecs as ecs ,
6+ aws_ecr_assets as ecr_assets ,
47 aws_lambda as _lambda ,
5- aws_s3 as _s3 ,
8+ aws_iam as iam ,
9+ aws_s3 as s3 ,
10+ aws_batch as batch ,
611 aws_s3_notifications ,
7- Stack , Duration
12+ Stack ,
13+ Duration ,
14+ Size ,
815)
916from constructs import Construct
1017
11- class FiboaSdaStack (Stack ):
1218
19+ class FiboaSdaStack (Stack ):
1320 def __init__ (self , scope : Construct , construct_id : str , ** kwargs ) -> None :
1421 super ().__init__ (scope , construct_id , ** kwargs )
1522
16- # Create a new container image
17- ecr_image = _lambda .EcrImageCode .from_asset_image (
18- directory = os .path .join (os .getcwd (), "lambda-image" )
23+ # Create a VPC for the batch fargate cluster
24+ vpc = ec2 .Vpc (self , "VPC" )
25+
26+ # Create AWS Batch Job Queue
27+ self .batch_queue = batch .JobQueue (self , "JobQueue" )
28+ fargate_spot_environment = batch .FargateComputeEnvironment (
29+ self ,
30+ "FargateSpotEnv" ,
31+ vpc_subnets = ec2 .SubnetSelection (
32+ subnet_type = ec2 .SubnetType .PRIVATE_WITH_NAT
33+ ),
34+ vpc = vpc ,
35+ spot = True ,
36+ )
37+ self .batch_queue .add_compute_environment (fargate_spot_environment , 0 )
38+
39+ # Task execution IAM role for Fargate
40+ task_execution_role = iam .Role (
41+ self ,
42+ "TaskExecutionRole" ,
43+ assumed_by = iam .ServicePrincipal ("ecs-tasks.amazonaws.com" ),
44+ managed_policies = [
45+ iam .ManagedPolicy .from_aws_managed_policy_name (
46+ "service-role/AmazonECSTaskExecutionRolePolicy"
47+ )
48+ ],
49+ )
50+
51+ # image_asset = ecr_assets.DockerImageAsset(
52+ # self, "MyImageAsset",
53+ # directory=os.path.join(os.getcwd(), "..")
54+ # )
55+
56+ # Create Job Definition to submit job in batch job queue.
57+ batch .EcsJobDefinition (
58+ self ,
59+ "MyJobDef" ,
60+ container = batch .EcsFargateContainerDefinition (
61+ self ,
62+ "FargateCDKJobDef" ,
63+ image = ecs .ContainerImage .from_asset (
64+ directory = os .path .join (os .getcwd (), ".." )
65+ ),
66+ command = ["ingest-one" ],
67+ memory = Size .gibibytes (16 ),
68+ cpu = 2 ,
69+ execution_role = task_execution_role ,
70+ ),
1971 )
2072
2173 # create lambda function
22- function = _lambda .Function (self , "fiboa-s3-listener" ,
23- runtime = _lambda .Runtime .FROM_IMAGE ,
24- handler = _lambda .Handler .FROM_IMAGE ,
25- architecture = _lambda .Architecture .ARM_64 ,
26- timeout = Duration .seconds (10 ), code = ecr_image )
74+ # todo - create an IAM role with access to batch.
75+ # todo - inject environment
76+ function = _lambda .Function (
77+ self ,
78+ "fiboa-s3-listener" ,
79+ runtime = _lambda .Runtime .FROM_IMAGE ,
80+ handler = _lambda .Handler .FROM_IMAGE ,
81+ architecture = _lambda .Architecture .ARM_64 ,
82+ timeout = Duration .seconds (10 ),
83+ code = _lambda .EcrImageCode .from_asset_image (
84+ directory = os .path .join (os .getcwd (), "lambda-image" ),
85+ ),
86+ )
2787 # create s3 bucket
28- s3 = _s3 .Bucket (self , "fiboa-sda-testing" )
88+ bucket = s3 .Bucket (self , "fiboa-sda-testing" )
2989
3090 # create s3 notification for lambda function
3191 notification = aws_s3_notifications .LambdaDestination (function )
3292
3393 # assign notification for the s3 event type (ex: OBJECT_CREATED)
34- s3 .add_event_notification (_s3 .EventType .OBJECT_CREATED , notification , _s3 .NotificationKeyFilter (prefix = "fiboa/*" ))
94+ bucket .add_event_notification (
95+ s3 .EventType .OBJECT_CREATED ,
96+ notification ,
97+ s3 .NotificationKeyFilter (prefix = "fiboa/*" ),
98+ )
0 commit comments