diff --git a/README.md b/README.md
index 6efd14d7..e393c52c 100644
--- a/README.md
+++ b/README.md
@@ -16,7 +16,8 @@ The software will only be used for signposting an individual to an appropriate s
- [Setup](#setup)
- [Prerequisites](#prerequisites)
- [Configuration](#configuration)
- - [Environment variables](#environment-variables)
+ - [Environment variables - Local](#environment-variables---local)
+ - [Environment variables - DEV, PROD or PRE-PROD](#environment-variables---dev-prod-or-pre-prod)
- [Usage](#usage)
- [Testing](#testing)
- [Sandbox and Specification](#sandbox-and-specification)
@@ -67,7 +68,7 @@ The following software packages, or their equivalents, are expected to be instal
### Configuration
-#### Environment variables
+#### Environment variables - Local
| Variable | Default | Description |
|--------------------------|------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
@@ -75,10 +76,24 @@ The following software packages, or their equivalents, are expected to be instal
| `AWS_DEFAULT_REGION` | `eu-west-1` | AWS Region |
| `AWS_SECRET_ACCESS_KEY` | `dummy_secret` | AWS Secret Access Key |
| `DYNAMODB_ENDPOINT` | `http://localhost:4566` | Endpoint for the app to access DynamoDB |
+| `S3_ENDPOINT` | `http://localhost:4566` | Endpoint for the app to access S3 |
| `ELIGIBILITY_TABLE_NAME` | `test_eligibility_datastore` | AWS DynamoDB table for person data. |
-| `LOG_LEVEL` | `WARNING` | Logging level. Must be one of `DEBUG`, `INFO`, `WARNING`, `ERROR` or `CRITICAL` as per [Logging Levels](https://docs.python.org/3/library/logging.html#logging-levels) |
+| `LOG_LEVEL` | `WARNING` | Logging level. Must be one of `DEBUG`, `INFO`, `WARNING`, `ERROR` or `CRITICAL` as per [Logging Levels](https://docs.python.org/3/library/logging.html#logging-levels) |
| `RULES_BUCKET_NAME` | `test-rules-bucket` | AWS S3 bucket from which to read rules. |
+#### Environment variables - DEV, PROD or PRE-PROD
+
+| Variable | Default | Description | Comments |
+|--------------------------|------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------|
+| `AWS_DEFAULT_REGION` | `eu-west-1` | AWS Region | |
+| `AWS_ACCESS_KEY_ID` | None | AWS Access Key | **AWS_ACCESS_KEY_ID** is set to None,
because it is provided by the AWS environment automatically. |
+| `AWS_SECRET_ACCESS_KEY` | None | AWS Secret Access Key | **AWS_SECRET_ACCESS_KEY** is set to None,
because it is provided by the AWS environment automatically. |
+| `DYNAMODB_ENDPOINT` | None | Endpoint for the app to access DynamoDB | **DYNAMODB_ENDPOINT** are set to None,
since we are using aws service default endpoints which are provided automatically. |
+| `S3_ENDPOINT` | None | Endpoint for the app to access S3 | **S3_ENDPOINT** are set to None,
since we are using aws service default endpoints which are provided automatically. |
+| `ELIGIBILITY_TABLE_NAME` | `test_eligibility_datastore` | AWS DynamoDB table for person data. | |
+| `LOG_LEVEL` | `WARNING` | Logging level. Must be one of `DEBUG`, `INFO`, `WARNING`, `ERROR` or `CRITICAL` as per [Logging Levels](https://docs.python.org/3/library/logging.html#logging-levels) | |
+| `RULES_BUCKET_NAME` | `test-rules-bucket` | AWS S3 bucket from which to read rules. | |
+
## Usage
After a successful installation, provide an informative example of how this project can be used. Additional code snippets, screenshots and demos work well in this space. You may also link to the other documentation resources, e.g. the [User Guide](./docs/user-guide.md) to demonstrate more use cases and to show more features.
diff --git a/infrastructure/modules/dynamodb/kms.tf b/infrastructure/modules/dynamodb/kms.tf
index 29898963..381b978b 100644
--- a/infrastructure/modules/dynamodb/kms.tf
+++ b/infrastructure/modules/dynamodb/kms.tf
@@ -3,6 +3,7 @@ resource "aws_kms_key" "dynamodb_cmk" {
deletion_window_in_days = 14
is_enabled = true
enable_key_rotation = true
+ tags = var.tags
}
resource "aws_kms_alias" "dynamodb_cmk" {
diff --git a/infrastructure/modules/dynamodb/outputs.tf b/infrastructure/modules/dynamodb/outputs.tf
index f8bbdb7d..3a1680a0 100644
--- a/infrastructure/modules/dynamodb/outputs.tf
+++ b/infrastructure/modules/dynamodb/outputs.tf
@@ -5,3 +5,12 @@ output "arn" {
output "table_name" {
value = aws_dynamodb_table.dynamodb_table.name
}
+
+output "dynamodb_kms_key_arn" {
+ value = aws_kms_key.dynamodb_cmk.arn
+}
+
+output "dynamodb_kms_key_id" {
+ value = aws_kms_key.dynamodb_cmk.id
+}
+
diff --git a/infrastructure/modules/lambda/default_variables.tf b/infrastructure/modules/lambda/default_variables.tf
new file mode 120000
index 00000000..062daf61
--- /dev/null
+++ b/infrastructure/modules/lambda/default_variables.tf
@@ -0,0 +1 @@
+../_shared/default_variables.tf
\ No newline at end of file
diff --git a/infrastructure/modules/lambda/kms.tf b/infrastructure/modules/lambda/kms.tf
new file mode 100644
index 00000000..c5287618
--- /dev/null
+++ b/infrastructure/modules/lambda/kms.tf
@@ -0,0 +1,12 @@
+resource "aws_kms_key" "lambda_cmk" {
+ description = "${terraform.workspace == "default" ? "" : "${terraform.workspace}-"}${var.lambda_func_name} Master Key"
+ deletion_window_in_days = 14
+ is_enabled = true
+ enable_key_rotation = true
+ tags = var.tags
+}
+
+resource "aws_kms_alias" "lambda_cmk" {
+ name = "alias/${terraform.workspace == "default" ? "" : "${terraform.workspace}-"}${var.lambda_func_name}-cmk"
+ target_key_id = aws_kms_key.lambda_cmk.key_id
+}
diff --git a/infrastructure/modules/lambda/lambda.tf b/infrastructure/modules/lambda/lambda.tf
new file mode 100644
index 00000000..c4fb95ca
--- /dev/null
+++ b/infrastructure/modules/lambda/lambda.tf
@@ -0,0 +1,26 @@
+resource "aws_lambda_function" "eligibility_signposting_lambda" {
+ # If the file is not in the current working directory you will need to include a
+ # path.module in the filename.
+ filename = var.file_name
+ function_name = var.lambda_func_name
+ role = var.eligibility_lambda_role_arn
+ handler = var.handler
+
+ source_code_hash = filebase64sha256(var.file_name)
+
+ runtime = "python3.13"
+ timeout = 30
+ memory_size = 128 # Default
+
+ environment {
+ variables = {
+ ELIGIBILITY_TABLE_NAME = var.eligibility_status_table_name,
+ RULES_BUCKET_NAME = var.eligibility_rules_bucket_name,
+ ENV = var.environment
+ }
+ }
+ vpc_config {
+ subnet_ids = var.vpc_intra_subnets
+ security_group_ids = var.security_group_ids
+ }
+}
diff --git a/infrastructure/modules/lambda/outputs.tf b/infrastructure/modules/lambda/outputs.tf
new file mode 100644
index 00000000..dfb6abc1
--- /dev/null
+++ b/infrastructure/modules/lambda/outputs.tf
@@ -0,0 +1,6 @@
+output "aws_lambda_function_id" {
+ value = aws_lambda_function.eligibility_signposting_lambda.id
+}
+output "aws_lambda_function_arn" {
+ value = aws_lambda_function.eligibility_signposting_lambda.arn
+}
diff --git a/infrastructure/modules/lambda/variables.tf b/infrastructure/modules/lambda/variables.tf
new file mode 100644
index 00000000..23185dd6
--- /dev/null
+++ b/infrastructure/modules/lambda/variables.tf
@@ -0,0 +1,45 @@
+variable "workspace" {
+ description = "Usually the developer short code or the name of the environment."
+ type = string
+}
+
+variable "eligibility_lambda_role_arn" {
+ description = "lambda read role arn for dynamodb"
+ type = string
+}
+
+variable "lambda_func_name" {
+ description = "Name of the Lambda function"
+ type = string
+}
+
+variable "vpc_intra_subnets" {
+ description = "vpc private subnets for lambda"
+ type = list(string)
+}
+
+variable "security_group_ids" {
+ description = "security groups for lambda"
+ type = list(string)
+}
+
+variable "file_name" {
+ description = "path of the the zipped lambda"
+ type = string
+}
+
+variable "handler" {
+ description = "lambda handler name"
+ type = string
+}
+
+variable "eligibility_rules_bucket_name" {
+ description = "campaign config rules bucket name"
+ type = string
+}
+
+variable "eligibility_status_table_name" {
+ description = "eligibility datastore table name"
+ type = string
+}
+
diff --git a/infrastructure/modules/s3/outputs.tf b/infrastructure/modules/s3/outputs.tf
index 4958cedc..d6518eb5 100644
--- a/infrastructure/modules/s3/outputs.tf
+++ b/infrastructure/modules/s3/outputs.tf
@@ -5,3 +5,11 @@ output "storage_bucket_access_logs_arn" {
output "storage_bucket_access_logs_id" {
value = aws_s3_bucket.storage_bucket_access_logs.id
}
+
+output "storage_bucket_arn" {
+ value = aws_s3_bucket.storage_bucket.arn
+}
+
+output "storage_bucket_name" {
+ value = aws_s3_bucket.storage_bucket.bucket
+}
diff --git a/infrastructure/stacks/_shared/locals.tf b/infrastructure/stacks/_shared/locals.tf
index 604e0dcb..38331871 100644
--- a/infrastructure/stacks/_shared/locals.tf
+++ b/infrastructure/stacks/_shared/locals.tf
@@ -18,13 +18,13 @@ locals {
}
sso_role_patterns = {
- dev = "AWSReservedSSO_vdselid_dev_*"
- test = "AWSReservedSSO_vdselid_test_*"
+ dev = "AWSReservedSSO_vdselid_dev_*"
+ test = "AWSReservedSSO_vdselid_test_*"
preprod = "AWSReservedSSO_vdselid_preprod_*"
}
terraform_state_bucket_name = "eligibility-signposting-api-${var.environment}-tfstate"
- terraform_state_bucket_arn = "arn:aws:s3:::eligibility-signposting-api-${var.environment}-tfstate"
+ terraform_state_bucket_arn = "arn:aws:s3:::eligibility-signposting-api-${var.environment}-tfstate"
account_ids = {
dev = "448049830832"
diff --git a/infrastructure/stacks/api-layer/backends/dev.api-layer.tfbackend b/infrastructure/stacks/api-layer/backends/dev.api-layer.tfbackend
index 30deef03..14cab852 100644
--- a/infrastructure/stacks/api-layer/backends/dev.api-layer.tfbackend
+++ b/infrastructure/stacks/api-layer/backends/dev.api-layer.tfbackend
@@ -2,3 +2,4 @@ bucket = "eligibility-signposting-api-dev-tfstate"
key = "tfstate/api-layer.tfstate"
region = "eu-west-2"
encrypt = true
+use_lockfile = true
diff --git a/infrastructure/stacks/api-layer/cloudwatch.tf b/infrastructure/stacks/api-layer/cloudwatch.tf
new file mode 100644
index 00000000..f5081f21
--- /dev/null
+++ b/infrastructure/stacks/api-layer/cloudwatch.tf
@@ -0,0 +1,10 @@
+# CloudWatch Log Group for lambda Flow Logs
+resource "aws_cloudwatch_log_group" "lambda_logs" {
+ name = "/aws/lambda/${module.eligibility_signposting_lambda_function.aws_lambda_function_id}"
+ retention_in_days = 14
+
+ tags = {
+ Name = "lambda-execution-logs"
+ Stack = local.stack_name
+ }
+}
diff --git a/infrastructure/stacks/api-layer/eligibility_dynamodb.tf b/infrastructure/stacks/api-layer/dynamodb.tf
similarity index 100%
rename from infrastructure/stacks/api-layer/eligibility_dynamodb.tf
rename to infrastructure/stacks/api-layer/dynamodb.tf
diff --git a/infrastructure/stacks/api-layer/iam_policies.tf b/infrastructure/stacks/api-layer/iam_policies.tf
index a53e798a..bad42048 100644
--- a/infrastructure/stacks/api-layer/iam_policies.tf
+++ b/infrastructure/stacks/api-layer/iam_policies.tf
@@ -1,33 +1,31 @@
# Read-only policy for DynamoDB
-data "aws_iam_policy_document" "dynamodb_read_policy" {
+data "aws_iam_policy_document" "dynamodb_read_policy_doc" {
statement {
actions = ["dynamodb:GetItem", "dynamodb:Query", "dynamodb:Scan"]
resources = [module.eligibility_status_table.arn]
}
}
+# Attach dynamoDB read policy to Lambda role
+resource "aws_iam_role_policy" "lambda_dynamodb_read_policy" {
+ name = "DynamoDBReadAccess"
+ role = aws_iam_role.eligibility_lambda_role.id
+ policy = data.aws_iam_policy_document.dynamodb_read_policy_doc.json
+}
+
# Write-only policy for DynamoDB
-data "aws_iam_policy_document" "dynamodb_write_policy" {
+data "aws_iam_policy_document" "dynamodb_write_policy_doc" {
statement {
- actions = ["dynamodb:PutItem", "dynamodb:UpdateItem", "dynamodb:DeleteItem"]
+ actions = ["dynamodb:PutItem", "dynamodb:UpdateItem", "dynamodb:DeleteItem", "dynamodb:BatchWriteItem"]
resources = [module.eligibility_status_table.arn]
}
}
-# Attach read policy to Lambda role (only in IAM default workspace)
-resource "aws_iam_role_policy" "lambda_read_policy" {
- count = local.is_iam_owner ? 1 : 0
- name = "DynamoDBReadAccess"
- role = local.lambda_read_role
- policy = data.aws_iam_policy_document.dynamodb_read_policy.json
-}
-
-# Attach write policy to external write role (only in IAM default workspace)
-resource "aws_iam_role_policy" "external_write_policy" {
- count = local.is_iam_owner ? 1 : 0
+# Attach dynamoDB write policy to external write role
+resource "aws_iam_role_policy" "external_dynamodb_write_policy" {
name = "DynamoDBWriteAccess"
- role = local.write_access_role
- policy = data.aws_iam_policy_document.dynamodb_write_policy.json
+ role = aws_iam_role.write_access_role.id
+ policy = data.aws_iam_policy_document.dynamodb_write_policy_doc.json
}
@@ -63,4 +61,104 @@ resource "aws_s3_bucket_policy" "storage_bucket_access_logs_policy" {
policy = data.aws_iam_policy_document.storage_bucket_access_logs_policy.json
}
+# Policy doc for S3 Rules bucket
+data "aws_iam_policy_document" "s3_rules_bucket_policy" {
+ statement {
+ sid = "AllowSSLRequestsOnly"
+ actions = [
+ "s3:GetObject",
+ "s3:ListBucket",
+ ]
+ resources = [
+ module.s3_rules_bucket.storage_bucket_arn,
+ "${module.s3_rules_bucket.storage_bucket_arn}/*",
+ ]
+ condition {
+ test = "Bool"
+ values = ["true"]
+ variable = "aws:SecureTransport"
+ }
+ }
+}
+
+
+# Attach s3 read policy to Lambda role
+resource "aws_iam_role_policy" "lambda_s3_read_policy" {
+ name = "S3ReadAccess"
+ role = aws_iam_role.eligibility_lambda_role.id
+ policy = data.aws_iam_policy_document.s3_rules_bucket_policy.json
+}
+
+# Attach AWSLambdaVPCAccessExecutionRole to Lambda
+resource "aws_iam_role_policy_attachment" "AWSLambdaVPCAccessExecutionRole" {
+ role = aws_iam_role.eligibility_lambda_role.id
+ policy_arn = "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole"
+}
+
+#Attach AWSLambdaBasicExecutionRole to Lambda
+resource "aws_iam_role_policy_attachment" "lambda_logs_policy_attachment" {
+ policy_arn = "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole"
+ role = aws_iam_role.eligibility_lambda_role.name
+}
+
+# Policy doc for S3 Audit bucket
+data "aws_iam_policy_document" "s3_audit_bucket_policy" {
+ statement {
+ sid = "AllowSSLRequestsOnly"
+ actions = ["s3:*"]
+ resources = [
+ module.s3_audit_bucket.storage_bucket_arn,
+ "${module.s3_audit_bucket.storage_bucket_arn}/*",
+ ]
+ condition {
+ test = "Bool"
+ values = ["true"]
+ variable = "aws:SecureTransport"
+ }
+ }
+}
+
+# Attach s3 write policy to external write role
+resource "aws_iam_role_policy" "external_s3_write_policy" {
+ name = "S3WriteAccess"
+ role = aws_iam_role.eligibility_lambda_role.id
+ policy = data.aws_iam_policy_document.s3_audit_bucket_policy.json
+}
+
+## KMS
+data "aws_iam_policy_document" "kms_key_policy" {
+ statement {
+ sid = "EnableIamUserPermissions"
+ effect = "Allow"
+ principals {
+ type = "AWS"
+ identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"]
+ }
+ actions = ["kms:*"]
+ resources = ["*"]
+ }
+ statement {
+ sid = "Allow lambda role"
+ effect = "Allow"
+ principals {
+ type = "AWS"
+ identifiers = [
+ aws_iam_role.eligibility_lambda_role.arn
+ ]
+ }
+ actions = [
+ "kms:Decrypt"
+ ]
+ resources = [
+ module.eligibility_status_table.dynamodb_kms_key_arn
+ ]
+ }
+}
+
+# attach kms decrypt policy kms key
+resource "aws_kms_key_policy" "kms_key" {
+ key_id = module.eligibility_status_table.dynamodb_kms_key_id
+ policy = data.aws_iam_policy_document.kms_key_policy.json
+}
+
diff --git a/infrastructure/stacks/api-layer/iam_roles.tf b/infrastructure/stacks/api-layer/iam_roles.tf
index 581c777c..7cd6d0fe 100644
--- a/infrastructure/stacks/api-layer/iam_roles.tf
+++ b/infrastructure/stacks/api-layer/iam_roles.tf
@@ -1,7 +1,9 @@
-module "iam_permissions_boundary" {
- source = "../iams-developer-roles"
+
+data "aws_iam_policy" "permissions_boundary" {
+ arn = "arn:aws:iam::${local.current_account_id}:policy/${upper(var.project_name)}-PermissionsBoundary"
}
+
# Lambda trust policy
data "aws_iam_policy_document" "lambda_assume_role" {
statement {
@@ -24,34 +26,16 @@ data "aws_iam_policy_document" "dps_assume_role" {
}
}
-# Lambda read role: only created in default workspace
-resource "aws_iam_role" "lambda_read_role" {
- count = local.is_iam_owner ? 1 : 0
- name = "lambda-read-role"
+
+resource "aws_iam_role" "eligibility_lambda_role" {
+ name = "eligibility_lambda-role"
assume_role_policy = data.aws_iam_policy_document.lambda_assume_role.json
- permissions_boundary = module.iam_permissions_boundary.permissions_boundary_arn
+ permissions_boundary = data.aws_iam_policy.permissions_boundary.arn
}
-# External write role: only created in default workspace
+
resource "aws_iam_role" "write_access_role" {
- count = local.is_iam_owner ? 1 : 0
name = "external-write-role"
assume_role_policy = data.aws_iam_policy_document.dps_assume_role.json
- permissions_boundary = module.iam_permissions_boundary.permissions_boundary_arn
-}
-
-# Data sources for referencing existing roles in non-default workspaces
-data "aws_iam_role" "lambda_read_role" {
- count = local.is_iam_owner ? 0 : 1
- name = "lambda-read-role"
-}
-
-data "aws_iam_role" "write_access_role" {
- count = local.is_iam_owner ? 0 : 1
- name = "external-write-role"
-}
-
-locals {
- lambda_read_role = local.is_iam_owner ? aws_iam_role.lambda_read_role[0].id : data.aws_iam_role.lambda_read_role[0].id
- write_access_role = local.is_iam_owner ? aws_iam_role.write_access_role[0].id : data.aws_iam_role.write_access_role[0].id
+ permissions_boundary = data.aws_iam_policy.permissions_boundary.arn
}
diff --git a/infrastructure/stacks/api-layer/lambda.tf b/infrastructure/stacks/api-layer/lambda.tf
new file mode 100644
index 00000000..e8ba85ff
--- /dev/null
+++ b/infrastructure/stacks/api-layer/lambda.tf
@@ -0,0 +1,26 @@
+data "aws_security_group" "main_sg" {
+ name = "main-security-group"
+}
+
+data "aws_subnet" "private_subnets" {
+ for_each = toset(["private-subnet-1", "private-subnet-2", "private-subnet-3"])
+
+ tags = {
+ Name = each.value
+ }
+}
+
+module "eligibility_signposting_lambda_function" {
+ source = "../../modules/lambda"
+ eligibility_lambda_role_arn = aws_iam_role.eligibility_lambda_role.arn
+ workspace = local.workspace
+ environment = var.environment
+ lambda_func_name = "eligibility_signposting_api"
+ security_group_ids = [data.aws_security_group.main_sg.id]
+ vpc_intra_subnets = [for v in data.aws_subnet.private_subnets : v.id]
+ file_name = "../../../dist/lambda.zip"
+ handler = "eligibility_signposting_api.app.lambda_handler"
+ eligibility_rules_bucket_name = module.s3_rules_bucket.storage_bucket_name
+ eligibility_status_table_name = module.eligibility_status_table.table_name
+}
+
diff --git a/infrastructure/stacks/api-layer/locals.tf b/infrastructure/stacks/api-layer/locals.tf
new file mode 100644
index 00000000..7d032685
--- /dev/null
+++ b/infrastructure/stacks/api-layer/locals.tf
@@ -0,0 +1,3 @@
+locals {
+ stack_name = "api-layer"
+}
diff --git a/infrastructure/stacks/api-layer/s3_buckets.tf b/infrastructure/stacks/api-layer/s3_buckets.tf
index a0ba7ea0..87f35ad8 100644
--- a/infrastructure/stacks/api-layer/s3_buckets.tf
+++ b/infrastructure/stacks/api-layer/s3_buckets.tf
@@ -1,13 +1,13 @@
module "s3_rules_bucket" {
source = "../../modules/s3"
- bucket_name = "eli-rules-3"
+ bucket_name = "eli-rules"
environment = var.environment
project_name = var.project_name
}
module "s3_audit_bucket" {
source = "../../modules/s3"
- bucket_name = "eli-audit-1"
+ bucket_name = "eli-audit"
environment = var.environment
project_name = var.project_name
bucket_expiration_days = 180
diff --git a/infrastructure/stacks/api-layer/state.tf b/infrastructure/stacks/api-layer/state.tf
index 4b9d1aa1..1ba86f4e 100644
--- a/infrastructure/stacks/api-layer/state.tf
+++ b/infrastructure/stacks/api-layer/state.tf
@@ -7,4 +7,5 @@ terraform {
version = "~> 5.6, != 5.71.0"
}
}
+ backend "s3" {}
}
diff --git a/infrastructure/stacks/iams-developer-roles/github_actions_policies.tf b/infrastructure/stacks/iams-developer-roles/github_actions_policies.tf
index 20693a47..a7b4e897 100644
--- a/infrastructure/stacks/iams-developer-roles/github_actions_policies.tf
+++ b/infrastructure/stacks/iams-developer-roles/github_actions_policies.tf
@@ -1,7 +1,5 @@
# Terraform State Management Policy
-# Create policies only in the IAM default workspace
resource "aws_iam_policy" "terraform_state" {
- count = local.is_iam_owner ? 1 : 0
name = "terraform-state-management"
description = "Policy granting access to S3 bucket for Terraform state"
path = "/service-policies/"
@@ -35,7 +33,6 @@ resource "aws_iam_policy" "terraform_state" {
# API Infrastructure Management Policy
resource "aws_iam_policy" "api_infrastructure" {
- count = local.is_iam_owner ? 1 : 0
name = "api-infrastructure-management"
description = "Policy granting permissions to manage API infrastructure"
path = "/service-policies/"
@@ -78,17 +75,6 @@ resource "aws_iam_policy" "api_infrastructure" {
)
}
-# Data sources for policies in non-default workspaces
-data "aws_iam_policy" "terraform_state" {
- count = local.is_iam_owner ? 0 : 1
- arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/service-policies/terraform-state-management"
-}
-
-data "aws_iam_policy" "api_infrastructure" {
- count = local.is_iam_owner ? 0 : 1
- arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/service-policies/api-infrastructure-management"
-}
-
# Assume role policy document for GitHub Actions
data "aws_iam_policy_document" "github_actions_assume_role" {
statement {
@@ -99,7 +85,7 @@ data "aws_iam_policy_document" "github_actions_assume_role" {
principals {
type = "Federated"
identifiers = [
- local.aws_iam_openid_connect_provider_arn
+ aws_iam_openid_connect_provider.github.arn
]
}
@@ -117,20 +103,13 @@ data "aws_iam_policy_document" "github_actions_assume_role" {
}
}
-# Attach the policies to the role (only in default workspace)
+# Attach the policies to the role
resource "aws_iam_role_policy_attachment" "terraform_state" {
- count = local.is_iam_owner ? 1 : 0
- role = local.github_actions_iam_role_name
- policy_arn = local.terraform_state_iam_policy_arn
+ role = aws_iam_role.github_actions.name
+ policy_arn = aws_iam_policy.terraform_state.arn
}
resource "aws_iam_role_policy_attachment" "api_infrastructure" {
- count = local.is_iam_owner ? 1 : 0
- role = local.github_actions_iam_role_name
- policy_arn = local.api_infrastructure_iam_policy_arn
-}
-
-locals {
- terraform_state_iam_policy_arn = local.is_iam_owner ? aws_iam_policy.terraform_state[0].arn : data.aws_iam_policy.terraform_state[0].arn
- api_infrastructure_iam_policy_arn = local.is_iam_owner ? aws_iam_policy.api_infrastructure[0].arn : data.aws_iam_policy.api_infrastructure[0].arn
+ role = aws_iam_role.github_actions.name
+ policy_arn = aws_iam_policy.api_infrastructure.arn
}
diff --git a/infrastructure/stacks/iams-developer-roles/github_actions_role.tf b/infrastructure/stacks/iams-developer-roles/github_actions_role.tf
index 447cae5e..34946f16 100644
--- a/infrastructure/stacks/iams-developer-roles/github_actions_role.tf
+++ b/infrastructure/stacks/iams-developer-roles/github_actions_role.tf
@@ -1,6 +1,5 @@
-# GitHub Actions OIDC Provider: create only in default workspace
+# GitHub Actions OIDC Provider
resource "aws_iam_openid_connect_provider" "github" {
- count = local.is_iam_owner ? 1 : 0
url = "https://token.actions.githubusercontent.com"
client_id_list = ["sts.amazonaws.com"]
thumbprint_list = ["6938fd4d98bab03faadb97b34396831e3780aea1"]
@@ -13,18 +12,12 @@ resource "aws_iam_openid_connect_provider" "github" {
)
}
-# Data source to reference existing OIDC provider in non-default workspaces
-data "aws_iam_openid_connect_provider" "github" {
- count = local.is_iam_owner ? 0 : 1
- url = "https://token.actions.githubusercontent.com"
-}
-# GitHub Actions Role: create only in default workspace
+# GitHub Actions Role
resource "aws_iam_role" "github_actions" {
- count = local.is_iam_owner ? 1 : 0
name = "github-actions-api-deployment-role"
description = "Role for GitHub Actions to deploy infrastructure via Terraform"
- permissions_boundary = local.permissions_boundary_arn
+ permissions_boundary = aws_iam_policy.permissions_boundary.arn
path = "/service-roles/"
# Trust policy allowing GitHub Actions to assume the role
@@ -37,14 +30,3 @@ resource "aws_iam_role" "github_actions" {
}
)
}
-
-# Data source to reference existing GitHub Actions role in non-default workspaces
-data "aws_iam_role" "github_actions" {
- count = local.is_iam_owner ? 0 : 1
- name = "github-actions-api-deployment-role"
-}
-
-locals {
- github_actions_iam_role_name = local.is_iam_owner ? aws_iam_role.github_actions[0].name : data.aws_iam_role.github_actions[0].name
- aws_iam_openid_connect_provider_arn=local.is_iam_owner ? aws_iam_openid_connect_provider.github[0].arn : data.aws_iam_openid_connect_provider.github[0].arn
-}
diff --git a/infrastructure/stacks/iams-developer-roles/iams_permissions_boundary.tf b/infrastructure/stacks/iams-developer-roles/iams_permissions_boundary.tf
index b35dbb2a..f6555f1b 100644
--- a/infrastructure/stacks/iams-developer-roles/iams_permissions_boundary.tf
+++ b/infrastructure/stacks/iams-developer-roles/iams_permissions_boundary.tf
@@ -65,9 +65,8 @@ data "aws_iam_policy_document" "permissions_boundary" {
}
}
-# Permissions Boundary policy created only in owner workspace
+# Permissions Boundary policy
resource "aws_iam_policy" "permissions_boundary" {
- count = local.is_iam_owner ? 1 : 0
name = "${upper(var.project_name)}-PermissionsBoundary"
description = "Allows access to AWS services in the regions the client uses only"
policy = data.aws_iam_policy_document.permissions_boundary.json
@@ -79,14 +78,3 @@ resource "aws_iam_policy" "permissions_boundary" {
}
)
}
-
-# Data source for non-owner workspaces (using ARN)
-data "aws_iam_policy" "permissions_boundary" {
- count = local.is_iam_owner ? 0 : 1
- arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/${upper(var.project_name)}-PermissionsBoundary"
-}
-
-# Local to always reference the correct policy ARN
-locals {
- permissions_boundary_arn = local.is_iam_owner ? aws_iam_policy.permissions_boundary[0].arn : data.aws_iam_policy.permissions_boundary[0].arn
-}
diff --git a/infrastructure/stacks/iams-developer-roles/outputs.tf b/infrastructure/stacks/iams-developer-roles/outputs.tf
deleted file mode 100644
index 2d4cb326..00000000
--- a/infrastructure/stacks/iams-developer-roles/outputs.tf
+++ /dev/null
@@ -1,14 +0,0 @@
-output "terraform_developer_role_arn" {
- description = "ARN of the Terraform developer role"
- value = local.terraform_developer_role_arn
-}
-
-output "assume_role_command" {
- description = "Command to assume the Terraform developer role"
- value = "aws sts assume-role --role-arn ${local.terraform_developer_role_arn} --role-session-name TerraformSession"
-}
-
-output "permissions_boundary_arn" {
- description = "ARN of the permissions boundary policy"
- value = local.permissions_boundary_arn
-}
diff --git a/infrastructure/stacks/iams-developer-roles/terraform_developer_policies.tf b/infrastructure/stacks/iams-developer-roles/terraform_developer_policies.tf
index 2389a952..21db5927 100644
--- a/infrastructure/stacks/iams-developer-roles/terraform_developer_policies.tf
+++ b/infrastructure/stacks/iams-developer-roles/terraform_developer_policies.tf
@@ -180,7 +180,6 @@ data "aws_iam_policy_document" "terraform_developer_policy" {
# Create policy from document
resource "aws_iam_policy" "terraform_developer_policy" {
- count = local.is_iam_owner ? 1 : 0
name = "terraform-developer-policy"
description = "Policy for terraform developers to manage resources"
policy = data.aws_iam_policy_document.terraform_developer_policy.json
@@ -188,24 +187,6 @@ resource "aws_iam_policy" "terraform_developer_policy" {
# Attach policy to role
resource "aws_iam_role_policy_attachment" "terraform_developer_attachment" {
- count = local.is_iam_owner ? 1 : 0
- role = local.terraform_developer_role_name
- policy_arn = local.terraform_developer_policy_arn
-}
-
-data "aws_iam_role" "terraform_developer" {
- count = local.is_iam_owner ? 0 : 1
- name = "terraform-developer-role"
-}
-
-
-data "aws_iam_policy" "terraform_developer_policy" {
- count = local.is_iam_owner ? 0 : 1
- arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:policy/terraform-developer-policy"
-}
-
-locals {
- terraform_developer_policy_arn = local.is_iam_owner ? aws_iam_policy.terraform_developer_policy[0].arn : data.aws_iam_policy.terraform_developer_policy[0].arn
-
- terraform_developer_role_arn = local.is_iam_owner ? aws_iam_role.terraform_developer[0].arn : data.aws_iam_role.terraform_developer[0].arn
+ role = aws_iam_role.terraform_developer.name
+ policy_arn = aws_iam_policy.terraform_developer_policy.arn
}
diff --git a/infrastructure/stacks/iams-developer-roles/terraform_developer_role.tf b/infrastructure/stacks/iams-developer-roles/terraform_developer_role.tf
index 83e2a5a3..571da4c4 100644
--- a/infrastructure/stacks/iams-developer-roles/terraform_developer_role.tf
+++ b/infrastructure/stacks/iams-developer-roles/terraform_developer_role.tf
@@ -1,9 +1,8 @@
resource "aws_iam_role" "terraform_developer" {
- count = local.is_iam_owner ? 1 : 0
name = "terraform-developer-role"
description = "Role for developers to plan and apply Terraform changes"
assume_role_policy = data.aws_iam_policy_document.terraform_developer_assume_role.json
- permissions_boundary = local.permissions_boundary_arn # Attach permissions boundary
+ permissions_boundary = aws_iam_policy.permissions_boundary.arn # Attach permissions boundary
max_session_duration = 14400 # 4 hours
tags = merge(
@@ -13,8 +12,3 @@ resource "aws_iam_role" "terraform_developer" {
}
)
}
-
-locals {
- terraform_developer_role_name = local.is_iam_owner ? aws_iam_role.terraform_developer[0].name : data.aws_iam_role.terraform_developer[0].name
-}
-
diff --git a/infrastructure/stacks/networking/internet_gateway.tf b/infrastructure/stacks/networking/internet_gateway.tf
index f3b2bc69..b2445c2d 100644
--- a/infrastructure/stacks/networking/internet_gateway.tf
+++ b/infrastructure/stacks/networking/internet_gateway.tf
@@ -1,7 +1,7 @@
resource "aws_internet_gateway" "vpc_external_access" {
vpc_id = aws_vpc.main.id
tags = {
- Name = "internet-gateway",
+ Name = "internet-gateway",
Stack = local.stack_name
}
}
diff --git a/infrastructure/stacks/networking/network_acls.tf b/infrastructure/stacks/networking/network_acls.tf
index 56620d64..7a9ca84d 100644
--- a/infrastructure/stacks/networking/network_acls.tf
+++ b/infrastructure/stacks/networking/network_acls.tf
@@ -1,6 +1,6 @@
# Network ACL for Private Subnets
resource "aws_network_acl" "private" {
- vpc_id = aws_vpc.main.id
+ vpc_id = aws_vpc.main.id
subnet_ids = [
aws_subnet.private_1.id,
aws_subnet.private_2.id,
@@ -11,7 +11,7 @@ resource "aws_network_acl" "private" {
egress {
rule_no = 100
action = "allow"
- cidr_block = local.vpc_cidr_block
+ cidr_block = "0.0.0.0/0"
protocol = -1
from_port = 0
to_port = 0
@@ -45,7 +45,7 @@ resource "aws_network_acl" "private" {
# Network ACL for Public Subnets
resource "aws_network_acl" "public" {
- vpc_id = aws_vpc.main.id
+ vpc_id = aws_vpc.main.id
subnet_ids = [
aws_subnet.public_1.id,
aws_subnet.public_2.id,
diff --git a/infrastructure/stacks/networking/route_tables.tf b/infrastructure/stacks/networking/route_tables.tf
index 34967398..44fa0619 100644
--- a/infrastructure/stacks/networking/route_tables.tf
+++ b/infrastructure/stacks/networking/route_tables.tf
@@ -2,7 +2,7 @@
resource "aws_route_table" "public_1" {
vpc_id = aws_vpc.main.id
tags = {
- Name = "public-route-1",
+ Name = "public-route-1",
Stack = local.stack_name
}
}
@@ -10,7 +10,7 @@ resource "aws_route_table" "public_1" {
resource "aws_route_table" "public_2" {
vpc_id = aws_vpc.main.id
tags = {
- Name = "public-route-2",
+ Name = "public-route-2",
Stack = local.stack_name
}
}
@@ -18,7 +18,7 @@ resource "aws_route_table" "public_2" {
resource "aws_route_table" "public_3" {
vpc_id = aws_vpc.main.id
tags = {
- Name = "public-route-3",
+ Name = "public-route-3",
Stack = local.stack_name
}
}
@@ -43,7 +43,7 @@ resource "aws_route_table_association" "public_3" {
resource "aws_route_table" "private_1" {
vpc_id = aws_vpc.main.id
tags = {
- Name = "private-route-1",
+ Name = "private-route-1",
Stack = local.stack_name
}
}
@@ -51,7 +51,7 @@ resource "aws_route_table" "private_1" {
resource "aws_route_table" "private_2" {
vpc_id = aws_vpc.main.id
tags = {
- Name = "private-route-2",
+ Name = "private-route-2",
Stack = local.stack_name
}
}
@@ -59,7 +59,7 @@ resource "aws_route_table" "private_2" {
resource "aws_route_table" "private_3" {
vpc_id = aws_vpc.main.id
tags = {
- Name = "private-route-3",
+ Name = "private-route-3",
Stack = local.stack_name
}
}
diff --git a/infrastructure/stacks/networking/state.tf b/infrastructure/stacks/networking/state.tf
index 30b6dd2e..4c95cc5d 100644
--- a/infrastructure/stacks/networking/state.tf
+++ b/infrastructure/stacks/networking/state.tf
@@ -8,9 +8,9 @@ terraform {
}
}
backend "s3" {
- bucket = "eligibility-signposting-api-dev-tfstate"
- key = "tfstate/networking.tfstate"
- region = "eu-west-2"
+ bucket = "eligibility-signposting-api-dev-tfstate"
+ key = "tfstate/networking.tfstate"
+ region = "eu-west-2"
use_lockfile = true
}
}
diff --git a/infrastructure/stacks/networking/subnets.tf b/infrastructure/stacks/networking/subnets.tf
index c0a0b3bb..9f74dae3 100644
--- a/infrastructure/stacks/networking/subnets.tf
+++ b/infrastructure/stacks/networking/subnets.tf
@@ -5,7 +5,7 @@ resource "aws_subnet" "public_1" {
availability_zone = local.availability_zone_1
map_public_ip_on_launch = false
tags = {
- Name = "public-subnet-1",
+ Name = "public-subnet-1",
Stack = local.stack_name
}
}
@@ -16,7 +16,7 @@ resource "aws_subnet" "public_2" {
availability_zone = local.availability_zone_2
map_public_ip_on_launch = false
tags = {
- Name = "public-subnet-2",
+ Name = "public-subnet-2",
Stack = local.stack_name
}
}
@@ -27,7 +27,7 @@ resource "aws_subnet" "public_3" {
availability_zone = local.availability_zone_3
map_public_ip_on_launch = false
tags = {
- Name = "public-subnet-3",
+ Name = "public-subnet-3",
Stack = local.stack_name
}
}
@@ -38,7 +38,7 @@ resource "aws_subnet" "private_1" {
cidr_block = local.private_subnet_1_cidr
availability_zone = local.availability_zone_1
tags = {
- Name = "private-subnet-1",
+ Name = "private-subnet-1",
Stack = local.stack_name
}
}
@@ -48,7 +48,7 @@ resource "aws_subnet" "private_2" {
cidr_block = local.private_subnet_2_cidr
availability_zone = local.availability_zone_2
tags = {
- Name = "private-subnet-2",
+ Name = "private-subnet-2",
Stack = local.stack_name
}
}
@@ -58,7 +58,7 @@ resource "aws_subnet" "private_3" {
cidr_block = local.private_subnet_3_cidr
availability_zone = local.availability_zone_3
tags = {
- Name = "private-subnet-3",
+ Name = "private-subnet-3",
Stack = local.stack_name
}
}
diff --git a/src/eligibility_signposting_api/config.py b/src/eligibility_signposting_api/config.py
index ccd8d6af..8ff9a317 100644
--- a/src/eligibility_signposting_api/config.py
+++ b/src/eligibility_signposting_api/config.py
@@ -19,14 +19,32 @@
@cache
def config() -> dict[str, Any]:
+ eligibility_table_name = TableName(os.getenv("ELIGIBILITY_TABLE_NAME", "test_eligibility_datastore"))
+ rules_bucket_name = BucketName(os.getenv("RULES_BUCKET_NAME", "test-rules-bucket"))
+ aws_default_region = AwsRegion(os.getenv("AWS_DEFAULT_REGION", "eu-west-1"))
+ log_level = LOG_LEVEL
+
+ if os.getenv("ENV"):
+ return {
+ "aws_access_key_id": None,
+ "aws_default_region": aws_default_region,
+ "aws_secret_access_key": None,
+ "dynamodb_endpoint": None,
+ "eligibility_table_name": eligibility_table_name,
+ "s3_endpoint": None,
+ "rules_bucket_name": rules_bucket_name,
+ "log_level": log_level,
+ }
+
return {
"aws_access_key_id": AwsAccessKey(os.getenv("AWS_ACCESS_KEY_ID", "dummy_key")),
- "aws_default_region": AwsRegion(os.getenv("AWS_DEFAULT_REGION", "eu-west-1")),
+ "aws_default_region": aws_default_region,
"aws_secret_access_key": AwsSecretAccessKey(os.getenv("AWS_SECRET_ACCESS_KEY", "dummy_secret")),
"dynamodb_endpoint": URL(os.getenv("DYNAMODB_ENDPOINT", "http://localhost:4566")),
- "eligibility_table_name": TableName(os.getenv("ELIGIBILITY_TABLE_NAME", "test_eligibility_datastore")),
- "log_level": LOG_LEVEL,
- "rules_bucket_name": BucketName(os.getenv("RULES_BUCKET_NAME", "test-rules-bucket")),
+ "eligibility_table_name": eligibility_table_name,
+ "s3_endpoint": URL(os.getenv("S3_ENDPOINT", "http://localhost:4566")),
+ "rules_bucket_name": rules_bucket_name,
+ "log_level": log_level,
}
diff --git a/src/eligibility_signposting_api/repos/factory.py b/src/eligibility_signposting_api/repos/factory.py
index c65d8539..5eb53088 100644
--- a/src/eligibility_signposting_api/repos/factory.py
+++ b/src/eligibility_signposting_api/repos/factory.py
@@ -27,11 +27,11 @@ def boto3_session_factory(
def dynamodb_resource_factory(
session: Session, dynamodb_endpoint: Annotated[URL, Inject(param="dynamodb_endpoint")]
) -> ServiceResource:
- return session.resource("dynamodb", endpoint_url=str(dynamodb_endpoint))
+ endpoint_url = str(dynamodb_endpoint) if dynamodb_endpoint is not None else None
+ return session.resource("dynamodb", endpoint_url=endpoint_url)
@service(qualifier="s3")
-def s3_service_factory(
- session: Session, dynamodb_endpoint: Annotated[URL, Inject(param="dynamodb_endpoint")]
-) -> BaseClient:
- return session.client("s3", endpoint_url=str(dynamodb_endpoint))
+def s3_service_factory(session: Session, s3_endpoint: Annotated[URL, Inject(param="s3_endpoint")]) -> BaseClient:
+ endpoint_url = str(s3_endpoint) if s3_endpoint is not None else None
+ return session.client("s3", endpoint_url=endpoint_url)
diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py
index 07dd7681..33929af6 100644
--- a/tests/integration/conftest.py
+++ b/tests/integration/conftest.py
@@ -165,6 +165,7 @@ def flask_function(lambda_client: BaseClient, iam_role: str, lambda_zip: Path) -
Environment={
"Variables": {
"DYNAMODB_ENDPOINT": os.getenv("LOCALSTACK_INTERNAL_ENDPOINT", "http://localstack:4566/"),
+ "S3_ENDPOINT": os.getenv("LOCALSTACK_INTERNAL_ENDPOINT", "http://localstack:4566/"),
"AWS_REGION": AWS_REGION,
"LOG_LEVEL": "DEBUG",
}
diff --git a/tests/unit/repos/__init__.py b/tests/unit/repos/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/unit/repos/test_factory.py b/tests/unit/repos/test_factory.py
new file mode 100644
index 00000000..cd16bc07
--- /dev/null
+++ b/tests/unit/repos/test_factory.py
@@ -0,0 +1,56 @@
+from unittest.mock import MagicMock
+
+import pytest
+from boto3 import Session
+from boto3.resources.base import ServiceResource
+from botocore.client import BaseClient
+from yarl import URL
+
+from eligibility_signposting_api.repos.factory import dynamodb_resource_factory, s3_service_factory
+
+
+@pytest.fixture
+def mock_session() -> Session:
+ return MagicMock(spec=Session)
+
+
+def test_dynamodb_resource_factory_with_endpoint(mock_session: Session):
+ mock_resource = MagicMock(spec=ServiceResource)
+ mock_session.resource = MagicMock(return_value=mock_resource)
+ endpoint = URL("http://localhost:4566")
+
+ result = dynamodb_resource_factory(mock_session, endpoint)
+
+ mock_session.resource.assert_called_once_with("dynamodb", endpoint_url="http://localhost:4566")
+ assert result is mock_resource
+
+
+def test_dynamodb_resource_factory_without_endpoint(mock_session):
+ mock_resource = MagicMock(spec=ServiceResource)
+ mock_session.resource = MagicMock(return_value=mock_resource)
+
+ result = dynamodb_resource_factory(mock_session, None)
+
+ mock_session.resource.assert_called_once_with("dynamodb", endpoint_url=None)
+ assert result is mock_resource
+
+
+def test_s3_service_factory_with_endpoint(mock_session):
+ mock_client = MagicMock(spec=BaseClient)
+ mock_session.client = MagicMock(return_value=mock_client)
+ endpoint = URL("http://localhost:4566")
+
+ result = s3_service_factory(mock_session, endpoint)
+
+ mock_session.client.assert_called_once_with("s3", endpoint_url="http://localhost:4566")
+ assert result is mock_client
+
+
+def test_s3_service_factory_without_endpoint(mock_session):
+ mock_client = MagicMock(spec=BaseClient)
+ mock_session.client = MagicMock(return_value=mock_client)
+
+ result = s3_service_factory(mock_session, None)
+
+ mock_session.client.assert_called_once_with("s3", endpoint_url=None)
+ assert result is mock_client
diff --git a/tests/unit/test_config.py b/tests/unit/test_config.py
new file mode 100644
index 00000000..20da43fb
--- /dev/null
+++ b/tests/unit/test_config.py
@@ -0,0 +1,50 @@
+import os
+
+import pytest
+from yarl import URL
+
+from eligibility_signposting_api.config import LOG_LEVEL, AwsAccessKey, AwsRegion, AwsSecretAccessKey, config
+from eligibility_signposting_api.repos.eligibility_repo import TableName
+from eligibility_signposting_api.repos.rules_repo import BucketName
+
+
+@pytest.fixture(autouse=True)
+def clear_config_cache(monkeypatch):
+ config.cache_clear()
+ monkeypatch.delenv("ENV", raising=False)
+
+
+def test_config_with_env_variable(monkeypatch):
+ # Given:
+ monkeypatch.setenv("ENV", "PROD")
+
+ # When:
+ config_data_with_env = config()
+
+ # Then:
+ assert os.getenv("ENV") == "PROD"
+ assert config_data_with_env["aws_access_key_id"] is None
+ assert config_data_with_env["aws_secret_access_key"] is None
+ assert config_data_with_env["aws_default_region"] == AwsRegion("eu-west-1")
+ assert config_data_with_env["dynamodb_endpoint"] is None
+ assert config_data_with_env["eligibility_table_name"] == TableName("test_eligibility_datastore")
+ assert config_data_with_env["s3_endpoint"] is None
+ assert config_data_with_env["rules_bucket_name"] == BucketName("test-rules-bucket")
+ assert config_data_with_env["log_level"] == LOG_LEVEL
+
+
+def test_config_without_env_variable():
+ # Given: The environment variable "ENV" isn't set
+ # When:
+ config_data_without_env = config()
+
+ # Then:
+ assert os.getenv("ENV") is None
+ assert config_data_without_env["aws_access_key_id"] == AwsAccessKey("dummy_key")
+ assert config_data_without_env["aws_secret_access_key"] == AwsSecretAccessKey("dummy_secret")
+ assert config_data_without_env["aws_default_region"] == AwsRegion("eu-west-1")
+ assert config_data_without_env["dynamodb_endpoint"] == URL("http://localhost:4566")
+ assert config_data_without_env["eligibility_table_name"] == TableName("test_eligibility_datastore")
+ assert config_data_without_env["s3_endpoint"] == URL("http://localhost:4566")
+ assert config_data_without_env["rules_bucket_name"] == BucketName("test-rules-bucket")
+ assert config_data_without_env["log_level"] == LOG_LEVEL