diff --git a/python/ecs-serviceconnect/.gitignore b/python/ecs-serviceconnect/.gitignore new file mode 100644 index 0000000000..37833f8beb --- /dev/null +++ b/python/ecs-serviceconnect/.gitignore @@ -0,0 +1,10 @@ +*.swp +package-lock.json +__pycache__ +.pytest_cache +.venv +*.egg-info + +# CDK asset staging directory +.cdk.staging +cdk.out diff --git a/python/ecs-serviceconnect/README.md b/python/ecs-serviceconnect/README.md new file mode 100644 index 0000000000..60223f448e --- /dev/null +++ b/python/ecs-serviceconnect/README.md @@ -0,0 +1,55 @@ + +# Welcome to your CDK Python project! + +This is a CDK example showcasing ECS Service Connect. ECS Service Connect was released in 2022 and provides customers a way to build seamless communication between microservices. This example showcases a simple frontend container that can be accessed via an ALB URL. When the endpoint is hit it will call the backend container to retrieve data @ data.scapp.local:5001. + + +To manually create a virtualenv on MacOS and Linux: + +``` +$ python3 -m venv .venv +``` + +After the init process completes and the virtualenv is created, you can use the following +step to activate your virtualenv. + +``` +$ source .venv/bin/activate +``` + +If you are a Windows platform, you would activate the virtualenv like this: + +``` +% .venv\Scripts\activate.bat +``` + +Once the virtualenv is activated, you can install the required dependencies. + +``` +$ pip install -r requirements.txt +``` + +At this point you can now synthesize the CloudFormation template for this code. + +``` +$ cdk synth +``` + +To add additional dependencies, for example other CDK libraries, just add +them to your `setup.py` file and rerun the `pip install -r requirements.txt` +command. + +## Useful commands + + * `cdk ls` list all stacks in the app + * `cdk synth` emits the synthesized CloudFormation template + * `cdk deploy` deploy this stack to your default AWS account/region + * `cdk diff` compare deployed stack with current state + * `cdk docs` open CDK documentation + + +To deploy this stack run `cdk deploy --all` + +You can then see how the two containers by running `curl /get-data`, which will return an array from the backend service at its local domain. + +Enjoy! diff --git a/python/ecs-serviceconnect/app.py b/python/ecs-serviceconnect/app.py new file mode 100644 index 0000000000..9b947aeac9 --- /dev/null +++ b/python/ecs-serviceconnect/app.py @@ -0,0 +1,9 @@ +#!/usr/bin/env python3 +import aws_cdk as cdk +from cdk_examples_service_connect.cdk_examples_service_connect_stack import CdkExamplesServiceConnectStack + + +app = cdk.App() +CdkExamplesServiceConnectStack(app, "CdkExamplesServiceConnectStack") + +app.synth() diff --git a/python/ecs-serviceconnect/cdk.json b/python/ecs-serviceconnect/cdk.json new file mode 100644 index 0000000000..db9e766761 --- /dev/null +++ b/python/ecs-serviceconnect/cdk.json @@ -0,0 +1,70 @@ +{ + "app": "python3 app.py", + "watch": { + "include": [ + "**" + ], + "exclude": [ + "README.md", + "cdk*.json", + "requirements*.txt", + "source.bat", + "**/__init__.py", + "**/__pycache__", + "tests" + ] + }, + "context": { + "@aws-cdk/aws-lambda:recognizeLayerVersion": true, + "@aws-cdk/core:checkSecretUsage": true, + "@aws-cdk/core:target-partitions": [ + "aws", + "aws-cn" + ], + "@aws-cdk-containers/ecs-service-extensions:enableDefaultLogDriver": true, + "@aws-cdk/aws-ec2:uniqueImdsv2TemplateName": true, + "@aws-cdk/aws-ecs:arnFormatIncludesClusterName": true, + "@aws-cdk/aws-iam:minimizePolicies": true, + "@aws-cdk/core:validateSnapshotRemovalPolicy": true, + "@aws-cdk/aws-codepipeline:crossAccountKeyAliasStackSafeResourceName": true, + "@aws-cdk/aws-s3:createDefaultLoggingPolicy": true, + "@aws-cdk/aws-sns-subscriptions:restrictSqsDescryption": true, + "@aws-cdk/aws-apigateway:disableCloudWatchRole": true, + "@aws-cdk/core:enablePartitionLiterals": true, + "@aws-cdk/aws-events:eventsTargetQueueSameAccount": true, + "@aws-cdk/aws-iam:standardizedServicePrincipals": true, + "@aws-cdk/aws-ecs:disableExplicitDeploymentControllerForCircuitBreaker": true, + "@aws-cdk/aws-iam:importedRoleStackSafeDefaultPolicyName": true, + "@aws-cdk/aws-s3:serverAccessLogsUseBucketPolicy": true, + "@aws-cdk/aws-route53-patters:useCertificate": true, + "@aws-cdk/customresources:installLatestAwsSdkDefault": false, + "@aws-cdk/aws-rds:databaseProxyUniqueResourceName": true, + "@aws-cdk/aws-codedeploy:removeAlarmsFromDeploymentGroup": true, + "@aws-cdk/aws-apigateway:authorizerChangeDeploymentLogicalId": true, + "@aws-cdk/aws-ec2:launchTemplateDefaultUserData": true, + "@aws-cdk/aws-secretsmanager:useAttachedSecretResourcePolicyForSecretTargetAttachments": true, + "@aws-cdk/aws-redshift:columnId": true, + "@aws-cdk/aws-stepfunctions-tasks:enableEmrServicePolicyV2": true, + "@aws-cdk/aws-ec2:restrictDefaultSecurityGroup": true, + "@aws-cdk/aws-apigateway:requestValidatorUniqueId": true, + "@aws-cdk/aws-kms:aliasNameRef": true, + "@aws-cdk/aws-autoscaling:generateLaunchTemplateInsteadOfLaunchConfig": true, + "@aws-cdk/core:includePrefixInUniqueNameGeneration": true, + "@aws-cdk/aws-efs:denyAnonymousAccess": true, + "@aws-cdk/aws-opensearchservice:enableOpensearchMultiAzWithStandby": true, + "@aws-cdk/aws-lambda-nodejs:useLatestRuntimeVersion": true, + "@aws-cdk/aws-efs:mountTargetOrderInsensitiveLogicalId": true, + "@aws-cdk/aws-rds:auroraClusterChangeScopeOfInstanceParameterGroupWithEachParameters": true, + "@aws-cdk/aws-appsync:useArnForSourceApiAssociationIdentifier": true, + "@aws-cdk/aws-rds:preventRenderingDeprecatedCredentials": true, + "@aws-cdk/aws-codepipeline-actions:useNewDefaultBranchForCodeCommitSource": true, + "@aws-cdk/aws-cloudwatch-actions:changeLambdaPermissionLogicalIdForLambdaAction": true, + "@aws-cdk/aws-codepipeline:crossAccountKeysDefaultValueToFalse": true, + "@aws-cdk/aws-codepipeline:defaultPipelineTypeToV2": true, + "@aws-cdk/aws-kms:reduceCrossAccountRegionPolicyScope": true, + "@aws-cdk/aws-eks:nodegroupNameAttribute": true, + "@aws-cdk/aws-ec2:ebsDefaultGp3Volume": true, + "@aws-cdk/aws-ecs:removeDefaultDeploymentAlarm": true, + "@aws-cdk/custom-resources:logApiResponseDataPropertyTrueDefault": false + } +} diff --git a/python/ecs-serviceconnect/cdk_examples_service_connect/__init__.py b/python/ecs-serviceconnect/cdk_examples_service_connect/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/python/ecs-serviceconnect/cdk_examples_service_connect/cdk_examples_service_connect_stack.py b/python/ecs-serviceconnect/cdk_examples_service_connect/cdk_examples_service_connect_stack.py new file mode 100644 index 0000000000..3d889afd56 --- /dev/null +++ b/python/ecs-serviceconnect/cdk_examples_service_connect/cdk_examples_service_connect_stack.py @@ -0,0 +1,37 @@ +from aws_cdk import ( + Stack, + aws_ec2 as ec2, +) +from constructs import Construct +from ecs.ecs_stack import EcsStack +from ecr.ecr_stack import EcrStack +class CdkExamplesServiceConnectStack(Stack): + + def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None: + super().__init__(scope, construct_id, **kwargs) + # Creating a shared VPC with public subnets and private subnets with NAT Gateways + vpc = ec2.Vpc(self, "ServiceConnectVPC", + ip_addresses=ec2.IpAddresses.cidr("10.0.0.0/16"), + create_internet_gateway=True, + max_azs=2, + nat_gateways=2, + enable_dns_hostnames=True, + enable_dns_support=True, + vpc_name="App-Mesh-VPC", + subnet_configuration=[ + ec2.SubnetConfiguration( + subnet_type=ec2.SubnetType.PUBLIC, + name="Public", + cidr_mask=24 + ), + ec2.SubnetConfiguration( + subnet_type=ec2.SubnetType.PRIVATE_WITH_EGRESS, + name="Private", + cidr_mask=24 + ) + ] + ) + AWSRegion=Stack.of(self).region + AWSStackId=Stack.of(self).stack_id + ecr_stack = EcrStack(self, "EcrStack") + ecs_stack = EcsStack(self, "EcsStack", vpc=vpc, frontend_repository=ecr_stack.frontend_docker_asset, backend_data_repository=ecr_stack.backend_data_docker_asset) diff --git a/python/ecs-serviceconnect/ecr/__init__.py b/python/ecs-serviceconnect/ecr/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/python/ecs-serviceconnect/ecr/ecr_stack.py b/python/ecs-serviceconnect/ecr/ecr_stack.py new file mode 100644 index 0000000000..52de9669d5 --- /dev/null +++ b/python/ecs-serviceconnect/ecr/ecr_stack.py @@ -0,0 +1,46 @@ +from constructs import Construct +from aws_cdk.aws_ecr_assets import DockerImageAsset, Platform +import cdk_ecr_deployment as ecrdeploy + +from aws_cdk import ( + NestedStack, + aws_ecr as ecr, + Aws +) +class EcrStack(NestedStack): + + def __init__(self, scope: Construct, id: str, **kwargs, ) -> None: + super().__init__(scope, id, **kwargs ) + # Creates two ecr repositories that will host the docker images for the color teller gateway app and color teller app + FrontendRepository = ecr.Repository(self, "FrontendRepository", repository_name="frontend") + BackendDataRepository = ecr.Repository(self, "BackendDataRepository", repository_name="backend_data") + + # The docker images were built on a M1 Macbook Pro, you may have to rebuild your images + frontendAsset = DockerImageAsset(self, "frontendAsset", + directory="./services/frontend", + build_args={ + "SERVICE_B_URL_BUILD_ARG": "data.scapp.local" # This argument will be passed to the dockerfile and is the URL that the frontend app will use to call the backend + }, + + platform=Platform.LINUX_AMD64 + ) + dataAsset = DockerImageAsset(self, "dataAsset", + directory="./services/data", + + ) + # Deploying images to ECR + ecrdeploy.ECRDeployment(self, "DeployFrontendImage", + src=ecrdeploy.DockerImageName(frontendAsset.image_uri), + dest=ecrdeploy.DockerImageName(f"{Aws.ACCOUNT_ID}.dkr.ecr.{Aws.REGION}.amazonaws.com/frontend:latest") + ) + + + + ecrdeploy.ECRDeployment(self, "DeployBackendImage", + src=ecrdeploy.DockerImageName(dataAsset.image_uri), + dest=ecrdeploy.DockerImageName(f"{Aws.ACCOUNT_ID}.dkr.ecr.{Aws.REGION}.amazonaws.com/backend_data:latest") + ) + + # Exporting values to be used in other stacks + self.frontend_docker_asset = frontendAsset + self.backend_data_docker_asset = dataAsset \ No newline at end of file diff --git a/python/ecs-serviceconnect/ecs/__init__.py b/python/ecs-serviceconnect/ecs/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/python/ecs-serviceconnect/ecs/ecs_stack.py b/python/ecs-serviceconnect/ecs/ecs_stack.py new file mode 100644 index 0000000000..e587033c8d --- /dev/null +++ b/python/ecs-serviceconnect/ecs/ecs_stack.py @@ -0,0 +1,163 @@ +from aws_cdk import ( + NestedStack, + aws_ec2 as ec2, + aws_servicediscovery as servicediscovery, + aws_ecs as ecs, + Duration, + aws_logs as logs, + aws_iam as iam, + aws_iam as iam, + aws_ecr_assets as ecr_assets, + aws_elasticloadbalancingv2 as elbv2, + RemovalPolicy, + CfnOutput + +) +from constructs import Construct + +class EcsStack(NestedStack): + + def __init__(self, scope: Construct, construct_id: str, vpc: ec2.Vpc, frontend_repository: ecr_assets.DockerImageAsset, backend_data_repository: ecr_assets.DockerImageAsset, **kwargs) -> None: + super().__init__(scope, construct_id, **kwargs) + # Creating the ECS Cluster and the cloud map namespace + ecs_cluster = ecs.Cluster(self, "ECSCluster", + vpc=vpc, + cluster_name="App-Service-Connect-Cluster", + container_insights=True) + default_cloud_map_namespace=ecs_cluster.add_default_cloud_map_namespace(name="scapp.local", use_for_service_connect=True, type=servicediscovery.NamespaceType.DNS_PRIVATE) + # Creating the Cloudwatch log group where ECS Logs will be stored + ECSServiceLogGroup = logs.LogGroup(self, "ECSServiceLogGroup", + log_group_name=f"{ecs_cluster.cluster_name}-service", + removal_policy=RemovalPolicy.DESTROY, + retention=logs.RetentionDays.FIVE_DAYS, + ) + # Creating the task and execution IAM roles that the containers will assume to read and write to cloudwatch, Task Execution + # Role will read from ECR + ECSTaskIamRole = iam.Role(self, "ECSTaskIamRole", + assumed_by=iam.ServicePrincipal("ecs-tasks.amazonaws.com"), + managed_policies=[ + iam.ManagedPolicy.from_aws_managed_policy_name("CloudWatchFullAccess"), + ], + ) + TaskExecutionRole = iam.Role(self, "TaskexecutionRole", + assumed_by=iam.ServicePrincipal("ecs-tasks.amazonaws.com"), + managed_policies=[ + iam.ManagedPolicy.from_aws_managed_policy_name("AmazonEC2ContainerRegistryReadOnly"), + iam.ManagedPolicy.from_aws_managed_policy_name("CloudWatchLogsFullAccess"), + ], + ) + # ECS Security group, this will allow access from the Load Balancer and allow LAN access so that the + # ECS containers can talk to eachother on port 5001 (which is the port that the backend uses) + ECSSecurityGroup = ec2.SecurityGroup(self, "ECSSecurityGroup", + vpc=vpc, + description="ECS Security Group", + allow_all_outbound=True, + ) + ECSSecurityGroup.add_ingress_rule(ec2.Peer.ipv4(vpc.vpc_cidr_block), ec2.Port.tcp(5001), description="All traffic within VPC",) + # Task definitions for the frontend and backend + frontend_definition = ecs.FargateTaskDefinition( + self, f"FrontendTaskDefinition", + family="frontend", + cpu=256, + memory_limit_mib=512, + task_role=TaskExecutionRole, + execution_role=ECSTaskIamRole + ) + backend_definition = ecs.FargateTaskDefinition( + self, f"BackendTaskDefinition", + family="backend", + cpu=256, + memory_limit_mib=512, + task_role=TaskExecutionRole, + execution_role=ECSTaskIamRole + ) + + # Containers for each application, when the frontend is hit on /get-data it makes a call to the backend endpoint /data + frontend_container = frontend_definition.add_container("FrontendContainer", + container_name="frontend-app", + image=ecs.ContainerImage.from_docker_image_asset(frontend_repository), + port_mappings=[ + ecs.PortMapping( + container_port=5000, # Flask app is running on 5001 + host_port=5000, + name="frontend" # Name of the port mapping + ) + ], + logging=ecs.LogDriver.aws_logs(stream_prefix="ecs-logs")) + backend_container = backend_definition.add_container("BackendContainer", + image=ecs.ContainerImage.from_docker_image_asset(backend_data_repository), + port_mappings=[ + ecs.PortMapping( + container_port=5001, # Flask app is running on 5001 + host_port=5001, + name="data" # Name of the port mapping + + ) + ], + container_name="backend", + logging=ecs.LogDriver.aws_logs(stream_prefix="ecs-logs")) + # Creating the service definitions and port mappings + frontend_service = ecs.FargateService(self, "FrontendService", + cluster=ecs_cluster, + task_definition=frontend_definition, + desired_count=1, + max_healthy_percent=200, + min_healthy_percent=100, + vpc_subnets=ec2.SubnetSelection(one_per_az=True, subnet_type=ec2.SubnetType.PRIVATE_WITH_EGRESS), + security_groups=[ECSSecurityGroup], + service_connect_configuration=ecs.ServiceConnectProps( + namespace=default_cloud_map_namespace.namespace_name, + services=[ecs.ServiceConnectService( + port_mapping_name="frontend", # Logical name for the service + port=5000, # Container port + )]), + service_name="frontend-service") + backend_service = ecs.FargateService(self, "BackendService", + cluster=ecs_cluster, + task_definition=backend_definition, + desired_count=1, + max_healthy_percent=200, + min_healthy_percent=100, + vpc_subnets=ec2.SubnetSelection(one_per_az=True, subnet_type=ec2.SubnetType.PRIVATE_WITH_EGRESS), + security_groups=[ECSSecurityGroup], + service_connect_configuration=ecs.ServiceConnectProps( + namespace=default_cloud_map_namespace.namespace_name, + services=[ecs.ServiceConnectService( + port_mapping_name="data", # Logical name for the service + port=5001, # Container port + )]), + service_name="backend-service") + # Creating a public load balancer that will listen on port 80 and forward requests to the frontend ecs container, + # healthchecks are established on port 5000 + public_lb_sg = ec2.SecurityGroup(self, "PublicLBSG", vpc=vpc, description="Public LB SG", allow_all_outbound=True) + target_group = elbv2.ApplicationTargetGroup( + self, "TargetGroup", + target_group_name="ecs-target-group", + vpc=vpc, + port=80, + targets=[frontend_service], + target_type=elbv2.TargetType.IP, + protocol=elbv2.ApplicationProtocol.HTTP, + health_check=elbv2.HealthCheck( + path="/", + port="5000", + interval=Duration.seconds(6), + timeout=Duration.seconds(5), + healthy_threshold_count=2, + unhealthy_threshold_count=2, + ), + ) + target_group.set_attribute(key="deregistration_delay.timeout_seconds", + value="120") + public_lb_sg.add_ingress_rule(peer=ec2.Peer.any_ipv4(), connection=ec2.Port.tcp(80), description="Allow HTTP traffic") + public_lb = elbv2.ApplicationLoadBalancer(self, "FrontendLB", vpc=vpc, internet_facing=True, security_group=public_lb_sg, vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC)) + public_lb.set_attribute(key="idle_timeout.timeout_seconds", value="30") + listener = public_lb.add_listener("Listener", port=80, default_action=elbv2.ListenerAction.forward(target_groups=[target_group])) + lb_rule = elbv2.ApplicationListenerRule( + self, "ListenerRule", + listener=listener, + priority=1, + action=elbv2.ListenerAction.forward(target_groups=[target_group]), + conditions=[elbv2.ListenerCondition.path_patterns(["*"])], + ) + CfnOutput(self, "Load Balancer URL", value=f"http://{public_lb.load_balancer_dns_name}") diff --git a/python/ecs-serviceconnect/requirements-dev.txt b/python/ecs-serviceconnect/requirements-dev.txt new file mode 100644 index 0000000000..927094516e --- /dev/null +++ b/python/ecs-serviceconnect/requirements-dev.txt @@ -0,0 +1 @@ +pytest==6.2.5 diff --git a/python/ecs-serviceconnect/requirements.txt b/python/ecs-serviceconnect/requirements.txt new file mode 100644 index 0000000000..4a9cf8bb14 --- /dev/null +++ b/python/ecs-serviceconnect/requirements.txt @@ -0,0 +1,3 @@ +aws-cdk-lib==2.147.1 +constructs>=10.0.0,<11.0.0 +cdk_ecr_deployment \ No newline at end of file diff --git a/python/ecs-serviceconnect/services/data/Dockerfile b/python/ecs-serviceconnect/services/data/Dockerfile new file mode 100644 index 0000000000..dd3e8eee67 --- /dev/null +++ b/python/ecs-serviceconnect/services/data/Dockerfile @@ -0,0 +1,20 @@ +# Use the official Python image +FROM --platform=linux/amd64 python:3.12-slim + +# Set the working directory +WORKDIR /app + +# Copy the current directory contents into the container +COPY . . + +# Install all dependencies +RUN pip install -r requirements.txt + +# Set environment variables (optional) +ENV SERVICE_B_PORT=5001 + +# Expose the port your Flask app will run on +# EXPOSE 5001 + +# Command to run the application +CMD ["python", "data.py"] diff --git a/python/ecs-serviceconnect/services/data/data.py b/python/ecs-serviceconnect/services/data/data.py new file mode 100644 index 0000000000..8e4f8622d0 --- /dev/null +++ b/python/ecs-serviceconnect/services/data/data.py @@ -0,0 +1,18 @@ +from flask import Flask, jsonify +import os + +app = Flask(__name__) + +@app.route('/data', methods=['GET']) +def get_data(): + """Returns some sample data from the backend.""" + sample_data = { + "message": "Hello from Service B!", + "data": [1, 2, 3, 4, 5] + } + return jsonify(sample_data) +@app.route('/', methods=['GET']) +def main(): + return jsonify({"message": "Hello from backend Flask!"}), 200 +if __name__ == '__main__': + app.run(debug=True, port=int(os.environ.get("SERVICE_B_PORT", 5001))) # Use environment variable for port diff --git a/python/ecs-serviceconnect/services/data/requirements.txt b/python/ecs-serviceconnect/services/data/requirements.txt new file mode 100644 index 0000000000..2077213c37 --- /dev/null +++ b/python/ecs-serviceconnect/services/data/requirements.txt @@ -0,0 +1 @@ +Flask \ No newline at end of file diff --git a/python/ecs-serviceconnect/services/frontend/Dockerfile b/python/ecs-serviceconnect/services/frontend/Dockerfile new file mode 100644 index 0000000000..3ec2fec85d --- /dev/null +++ b/python/ecs-serviceconnect/services/frontend/Dockerfile @@ -0,0 +1,22 @@ +# Use the official Python image +FROM --platform=linux/amd64 python:3.12-slim + +ARG SERVICE_B_URL_BUILD_ARG +ENV SERVICE_B_URL=$SERVICE_B_URL_BUILD_ARG + +# Set the working directory +WORKDIR /app + +# Copy the current directory contents into the container +COPY . . + +# Install all dependencies +RUN pip install -r requirements.txt + +# Set environment variables (optional) + +# Expose the port your Flask app will run on +# EXPOSE 5001 + +# Command to run the application +CMD ["python", "frontend.py"] diff --git a/python/ecs-serviceconnect/services/frontend/frontend.py b/python/ecs-serviceconnect/services/frontend/frontend.py new file mode 100644 index 0000000000..bc3000f037 --- /dev/null +++ b/python/ecs-serviceconnect/services/frontend/frontend.py @@ -0,0 +1,26 @@ +from flask import Flask, jsonify +import requests +import os + +app = Flask(__name__) + +# URL for the backend service (service_b) from the environment variable +# service B URL will data.scapp.local +SERVICE_B_URL = os.environ.get('SERVICE_B_URL', 'http://localhost:5001/data') + +@app.route('/get-data', methods=['GET']) +def get_data(): + """Fetch data from the backend service (service_b).""" + try: + response = requests.get("http://"+SERVICE_B_URL+":5001/data") + response.raise_for_status() # Raise an error for bad responses + data = response.json() # Parse the JSON response + return jsonify(data), 200 # Return data from service_b + except requests.exceptions.RequestException as e: + return jsonify({"error": str(e)}), 500 # Return error if the request fails + +@app.route('/', methods=['GET']) +def main(): + return jsonify({"message": "Hello from frontend Flask!"}), 200 +if __name__ == '__main__': + app.run(debug=True, port=5000) diff --git a/python/ecs-serviceconnect/services/frontend/requirements.txt b/python/ecs-serviceconnect/services/frontend/requirements.txt new file mode 100644 index 0000000000..0eb56cde55 --- /dev/null +++ b/python/ecs-serviceconnect/services/frontend/requirements.txt @@ -0,0 +1,2 @@ +Flask +requests \ No newline at end of file