diff --git a/.github/scripts/generate-docs.sh b/.github/scripts/generate-docs.sh new file mode 100755 index 0000000..d339e11 --- /dev/null +++ b/.github/scripts/generate-docs.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash +set -euo pipefail + +# Determine repo root path relative to this script +REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)" + +# Path to terraform-docs config +TFDOCS_CONFIG="${REPO_ROOT}/.terraform-docs.yml" + +# Check terraform-docs is installed +if ! command -v terraform-docs &> /dev/null; then + echo "❌ terraform-docs is not installed. Install it from https://terraform-docs.io/" + exit 1 +fi + +echo "📘 Generating module documentation using config at: $TFDOCS_CONFIG" +echo + +# Loop through all submodules with a variables.tf file +find "${REPO_ROOT}/modules" -type f -name "variables.tf" | while read -r tf_file; do + MODULE_DIR="$(dirname "$tf_file")" + echo "📄 Updating docs for module: ${MODULE_DIR#$REPO_ROOT/}" + + terraform-docs --config "$TFDOCS_CONFIG" "$MODULE_DIR" > "$MODULE_DIR/README.md" +done + +echo +echo "✅ Documentation generated for all modules." diff --git a/.terraform-docs.yml b/.terraform-docs.yml index b2e8b8d..27717b9 100644 --- a/.terraform-docs.yml +++ b/.terraform-docs.yml @@ -1,12 +1,12 @@ formatter: markdown -header-from: "docs/header.md" -footer-from: "docs/footer.md" - -output: - file: README.md - mode: inject - -sort: - enabled: true - by: name +output-file: README.md +output-mode: inject +sections: + show-header: true + show-inputs: true + show-outputs: true + show-providers: false + show-requirements: false + show-resources: false + show-data-sources: false diff --git a/.terraform.lock.hcl b/.terraform.lock.hcl deleted file mode 100644 index e27b40d..0000000 --- a/.terraform.lock.hcl +++ /dev/null @@ -1,175 +0,0 @@ -# This file is maintained automatically by "terraform init". -# Manual edits may be lost in future updates. - -provider "registry.terraform.io/hashicorp/aws" { - version = "5.97.0" - constraints = ">= 4.33.0, ~> 5.0, >= 5.79.0, >= 5.92.0, >= 5.95.0" - hashes = [ - "h1:953uFkvlqGOHtO6j+aeJELqmwI5z7uV28TnkKYy6pSA=", - "h1:lI0I9GziJsdymNBcj+MJloqwD8fbogJw3EiR60j5FYU=", - "zh:02790ad98b767d8f24d28e8be623f348bcb45590205708334d52de2fb14f5a95", - "zh:088b4398a161e45762dc28784fcc41c4fa95bd6549cb708b82de577f2d39ffc7", - "zh:0c381a457b7af391c43fc0167919443f6105ad2702bde4d02ddea9fd7c9d3539", - "zh:1a4b57a5043dcca64d8b8bae8b30ef4f6b98ed2144f792f39c4e816d3f1e2c56", - "zh:1bf00a67f39e67664337bde065180d41d952242801ebcd1c777061d4ffaa1cc1", - "zh:24c549f53d6bd022af31426d3e78f21264d8a72409821669e7fd41966ae68b2b", - "zh:3abda50bbddb35d86081fe39522e995280aea7f004582c4af22112c03ac8b375", - "zh:7388ed7f21ce2eb46bd9066626ce5f3e2a5705f67f643acce8ae71972f66eaf6", - "zh:96740f2ff94e5df2b2d29a5035a1a1026fe821f61712b2099b224fb2c2277663", - "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425", - "zh:9f399f8e8683a3a3a6d63a41c7c3a5a5f266eedef40ea69eba75bacf03699879", - "zh:bcf2b288d4706ebd198f75d2159663d657535483331107f2cdef381f10688baf", - "zh:cc76c8a9fc3bad05a8779c1f80fe8c388734f1ec1dd0affa863343490527b466", - "zh:de4359cf1b057bfe7a563be93829ec64bf72e7a2b85a72d075238081ef5eb1db", - "zh:e208fa77051a1f9fa1eff6c5c58aabdcab0de1695b97cdea7b8dd81df3e0ed73", - ] -} - -provider "registry.terraform.io/hashicorp/cloudinit" { - version = "2.3.7" - constraints = ">= 2.0.0" - hashes = [ - "h1:dgBaiMxxU61piW30emM6251LMFW66TbKR+p5ylPZvqc=", - "h1:iZ27qylcH/2bs685LJTKOKcQ+g7cF3VwN3kHMrzm4Ow=", - "zh:06f1c54e919425c3139f8aeb8fcf9bceca7e560d48c9f0c1e3bb0a8ad9d9da1e", - "zh:0e1e4cf6fd98b019e764c28586a386dc136129fef50af8c7165a067e7e4a31d5", - "zh:1871f4337c7c57287d4d67396f633d224b8938708b772abfc664d1f80bd67edd", - "zh:2b9269d91b742a71b2248439d5e9824f0447e6d261bfb86a8a88528609b136d1", - "zh:3d8ae039af21426072c66d6a59a467d51f2d9189b8198616888c1b7fc42addc7", - "zh:3ef4e2db5bcf3e2d915921adced43929214e0946a6fb11793085d9a48995ae01", - "zh:42ae54381147437c83cbb8790cc68935d71b6357728a154109d3220b1beb4dc9", - "zh:4496b362605ae4cbc9ef7995d102351e2fe311897586ffc7a4a262ccca0c782a", - "zh:652a2401257a12706d32842f66dac05a735693abcb3e6517d6b5e2573729ba13", - "zh:7406c30806f5979eaed5f50c548eced2ea18ea121e01801d2f0d4d87a04f6a14", - "zh:7848429fd5a5bcf35f6fee8487df0fb64b09ec071330f3ff240c0343fe2a5224", - "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", - ] -} - -provider "registry.terraform.io/hashicorp/helm" { - version = "2.17.0" - constraints = "~> 2.0" - hashes = [ - "h1:If79Gw54AMearm13Sk9RmWuDesCQQMUtmlJXXqISxfU=", - "h1:K5FEjxvDnxb1JF1kG1xr8J3pNGxoaR3Z0IBG9Csm/Is=", - "h1:kQMkcPVvHOguOqnxoEU2sm1ND9vCHiT8TvZ2x6v/Rsw=", - "zh:06fb4e9932f0afc1904d2279e6e99353c2ddac0d765305ce90519af410706bd4", - "zh:104eccfc781fc868da3c7fec4385ad14ed183eb985c96331a1a937ac79c2d1a7", - "zh:129345c82359837bb3f0070ce4891ec232697052f7d5ccf61d43d818912cf5f3", - "zh:3956187ec239f4045975b35e8c30741f701aa494c386aaa04ebabffe7749f81c", - "zh:66a9686d92a6b3ec43de3ca3fde60ef3d89fb76259ed3313ca4eb9bb8c13b7dd", - "zh:88644260090aa621e7e8083585c468c8dd5e09a3c01a432fb05da5c4623af940", - "zh:a248f650d174a883b32c5b94f9e725f4057e623b00f171936dcdcc840fad0b3e", - "zh:aa498c1f1ab93be5c8fbf6d48af51dc6ef0f10b2ea88d67bcb9f02d1d80d3930", - "zh:bf01e0f2ec2468c53596e027d376532a2d30feb72b0b5b810334d043109ae32f", - "zh:c46fa84cc8388e5ca87eb575a534ebcf68819c5a5724142998b487cb11246654", - "zh:d0c0f15ffc115c0965cbfe5c81f18c2e114113e7a1e6829f6bfd879ce5744fbb", - "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", - ] -} - -provider "registry.terraform.io/hashicorp/kubernetes" { - version = "2.36.0" - constraints = "~> 2.0" - hashes = [ - "h1:94wlXkBzfXwyLVuJVhMdzK+VGjFnMjdmFkYhQ1RUFhI=", - "h1:PjjQs2jN1zKWjDt84r1RK2ffbfi4Y2N3Aoa3avYWMZc=", - "h1:vdY0sxo7ahwuz/y7flXTE04tSwn0Zhxyg6n62aTmAHI=", - "zh:07f38fcb7578984a3e2c8cf0397c880f6b3eb2a722a120a08a634a607ea495ca", - "zh:1adde61769c50dbb799d8bf8bfd5c8c504a37017dfd06c7820f82bcf44ca0d39", - "zh:39707f23ab58fd0e686967c0f973c0f5a39c14d6ccfc757f97c345fdd0cd4624", - "zh:4cc3dc2b5d06cc22d1c734f7162b0a8fdc61990ff9efb64e59412d65a7ccc92a", - "zh:8382dcb82ba7303715b5e67939e07dd1c8ecddbe01d12f39b82b2b7d7357e1d9", - "zh:88e8e4f90034186b8bfdea1b8d394621cbc46a064ff2418027e6dba6807d5227", - "zh:a6276a75ad170f76d88263fdb5f9558998bf3a3f7650d7bd3387b396410e59f3", - "zh:bc816c7e0606e5df98a0c7634b240bb0c8100c3107b8b17b554af702edc6a0c5", - "zh:cb2f31d58f37020e840af52755c18afd1f09a833c4903ac59270ab440fab57b7", - "zh:ee0d103b8d0089fb1918311683110b4492a9346f0471b136af46d3b019576b22", - "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", - "zh:f688b9ec761721e401f6859c19c083e3be20a650426f4747cd359cdc079d212a", - ] -} - -provider "registry.terraform.io/hashicorp/null" { - version = "3.2.4" - constraints = ">= 3.0.0" - hashes = [ - "h1:hkf5w5B6q8e2A42ND2CjAvgvSN3puAosDmOJb3zCVQM=", - "h1:wTNrZnwQdOOT/TW9pa+7GgJeFK2OvTvDmx78VmUmZXM=", - "zh:59f6b52ab4ff35739647f9509ee6d93d7c032985d9f8c6237d1f8a59471bbbe2", - "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", - "zh:795c897119ff082133150121d39ff26cb5f89a730a2c8c26f3a9c1abf81a9c43", - "zh:7b9c7b16f118fbc2b05a983817b8ce2f86df125857966ad356353baf4bff5c0a", - "zh:85e33ab43e0e1726e5f97a874b8e24820b6565ff8076523cc2922ba671492991", - "zh:9d32ac3619cfc93eb3c4f423492a8e0f79db05fec58e449dee9b2d5873d5f69f", - "zh:9e15c3c9dd8e0d1e3731841d44c34571b6c97f5b95e8296a45318b94e5287a6e", - "zh:b4c2ab35d1b7696c30b64bf2c0f3a62329107bd1a9121ce70683dec58af19615", - "zh:c43723e8cc65bcdf5e0c92581dcbbdcbdcf18b8d2037406a5f2033b1e22de442", - "zh:ceb5495d9c31bfb299d246ab333f08c7fb0d67a4f82681fbf47f2a21c3e11ab5", - "zh:e171026b3659305c558d9804062762d168f50ba02b88b231d20ec99578a6233f", - "zh:ed0fe2acdb61330b01841fa790be00ec6beaac91d41f311fb8254f74eb6a711f", - ] -} - -provider "registry.terraform.io/hashicorp/random" { - version = "3.7.2" - constraints = "~> 3.0, >= 3.1.0" - hashes = [ - "h1:356j/3XnXEKr9nyicLUufzoF4Yr6hRy481KIxRVpK0c=", - "h1:Lmv2TxyKKm9Vt4uxcPZHw1uf0Ax/yYizJlilbLSZN8E=", - "zh:14829603a32e4bc4d05062f059e545a91e27ff033756b48afbae6b3c835f508f", - "zh:1527fb07d9fea400d70e9e6eb4a2b918d5060d604749b6f1c361518e7da546dc", - "zh:1e86bcd7ebec85ba336b423ba1db046aeaa3c0e5f921039b3f1a6fc2f978feab", - "zh:24536dec8bde66753f4b4030b8f3ef43c196d69cccbea1c382d01b222478c7a3", - "zh:29f1786486759fad9b0ce4fdfbbfece9343ad47cd50119045075e05afe49d212", - "zh:4d701e978c2dd8604ba1ce962b047607701e65c078cb22e97171513e9e57491f", - "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", - "zh:7b8434212eef0f8c83f5a90c6d76feaf850f6502b61b53c329e85b3b281cba34", - "zh:ac8a23c212258b7976e1621275e3af7099e7e4a3d4478cf8d5d2a27f3bc3e967", - "zh:b516ca74431f3df4c6cf90ddcdb4042c626e026317a33c53f0b445a3d93b720d", - "zh:dc76e4326aec2490c1600d6871a95e78f9050f9ce427c71707ea412a2f2f1a62", - "zh:eac7b63e86c749c7d48f527671c7aee5b4e26c10be6ad7232d6860167f99dbb0", - ] -} - -provider "registry.terraform.io/hashicorp/time" { - version = "0.13.1" - constraints = ">= 0.9.0" - hashes = [ - "h1:+W+DMrVoVnoXo3f3M4W+OpZbkCrUn6PnqDF33D2Cuf0=", - "h1:P9h9GNlrWPECzIvIFjHOhF+HVzpxk0eCcdy1G0fWSHw=", - "zh:02cb9aab1002f0f2a94a4f85acec8893297dc75915f7404c165983f720a54b74", - "zh:04429b2b31a492d19e5ecf999b116d396dac0b24bba0d0fb19ecaefe193fdb8f", - "zh:26f8e51bb7c275c404ba6028c1b530312066009194db721a8427a7bc5cdbc83a", - "zh:772ff8dbdbef968651ab3ae76d04afd355c32f8a868d03244db3f8496e462690", - "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", - "zh:898db5d2b6bd6ca5457dccb52eedbc7c5b1a71e4a4658381bcbb38cedbbda328", - "zh:8de913bf09a3fa7bedc29fec18c47c571d0c7a3d0644322c46f3aa648cf30cd8", - "zh:9402102c86a87bdfe7e501ffbb9c685c32bbcefcfcf897fd7d53df414c36877b", - "zh:b18b9bb1726bb8cfbefc0a29cf3657c82578001f514bcf4c079839b6776c47f0", - "zh:b9d31fdc4faecb909d7c5ce41d2479dd0536862a963df434be4b16e8e4edc94d", - "zh:c951e9f39cca3446c060bd63933ebb89cedde9523904813973fbc3d11863ba75", - "zh:e5b773c0d07e962291be0e9b413c7a22c044b8c7b58c76e8aa91d1659990dfb5", - ] -} - -provider "registry.terraform.io/hashicorp/tls" { - version = "4.1.0" - constraints = ">= 3.0.0" - hashes = [ - "h1:Ka8mEwRFXBabR33iN/WTIEW6RP0z13vFsDlwn11Pf2I=", - "h1:UklaKJOCynnEJbpCVN0zJKIJ3SvO7RQJ00/6grBatnw=", - "zh:14c35d89307988c835a7f8e26f1b83ce771e5f9b41e407f86a644c0152089ac2", - "zh:2fb9fe7a8b5afdbd3e903acb6776ef1be3f2e587fb236a8c60f11a9fa165faa8", - "zh:35808142ef850c0c60dd93dc06b95c747720ed2c40c89031781165f0c2baa2fc", - "zh:35b5dc95bc75f0b3b9c5ce54d4d7600c1ebc96fbb8dfca174536e8bf103c8cdc", - "zh:38aa27c6a6c98f1712aa5cc30011884dc4b128b4073a4a27883374bfa3ec9fac", - "zh:51fb247e3a2e88f0047cb97bb9df7c228254a3b3021c5534e4563b4007e6f882", - "zh:62b981ce491e38d892ba6364d1d0cdaadcee37cc218590e07b310b1dfa34be2d", - "zh:bc8e47efc611924a79f947ce072a9ad698f311d4a60d0b4dfff6758c912b7298", - "zh:c149508bd131765d1bc085c75a870abb314ff5a6d7f5ac1035a8892d686b6297", - "zh:d38d40783503d278b63858978d40e07ac48123a2925e1a6b47e62179c046f87a", - "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", - "zh:fb07f708e3316615f6d218cec198504984c0ce7000b9f1eebff7516e384f4b54", - ] -} diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 6347600..bf362b3 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -27,7 +27,7 @@ Pull requests are the best way to propose changes to the codebase. We actively w This module uses [terraform-docs](https://terraform-docs.io/user-guide/introduction/) to generate documentation. To generate the documentation, run the following command from the root of the repository: ```bash -terraform-docs --config .terraform-docs.yml . +.github/scripts/generate-docs.sh ``` ## Development Process diff --git a/README.md b/README.md index d9d3ddf..4d24b3b 100644 --- a/README.md +++ b/README.md @@ -1,35 +1,76 @@ - -# Materialize on AWS Cloud Platform +# Materialize on AWS Terraform Modules -Terraform module for deploying Materialize on AWS Cloud Platform with all required infrastructure components. +This repository provides a set of reusable, **self-contained Terraform modules** to deploy Materialize on the AWS cloud platform. You can use these modules individually or combine them to create your own custom infrastructure stack. -The module has been tested with: +> **Note** +> These modules are intended for demonstration and prototyping purposes. If you're planning to use them in production, fork the repo and pin to a specific commit or tag to avoid breaking changes in future versions. -- PostgreSQL 15 -- Materialize Helm Operator Terraform Module v0.1.12 +--- -> [!WARNING] -> This module is intended for demonstration/evaluation purposes as well as for serving as a template when building your own production deployment of Materialize. -> -> This module should not be directly relied upon for production deployments: **future releases of the module will contain breaking changes.** Instead, to use as a starting point for your own production deployment, either: -> - Fork this repo and pin to a specific version, or -> - Use the code as a reference when developing your own deployment. +## Modular Architecture -## Providers Configuration +Each module is designed to be used independently. You can compose them in any way that fits your use case. -The module requires the following providers to be configured: +See [`examples/simple/`](./examples/simple/) for a working example that ties the modules together into a complete environment. + +--- + +## Available Modules + +| Module | Description | +|------------------------------------------------------------------|-----------------------------------------------------------------------| +| [`modules/networking`](./modules/networking) | VPC, subnets, NAT gateways, and basic networking resources | +| [`modules/eks`](./modules/eks) | EKS cluster setup | +| [`modules/eks-node-group`](./modules/eks-node-group) | EKS managed node groups with disk configuration | +| [`modules/database`](./modules/database) | RDS PostgreSQL database for Materialize metadata | +| [`modules/storage`](./modules/storage) | S3 bucket for Materialize persistence backend | +| [`modules/aws-lbc`](./modules/aws-lbc) | AWS Load Balancer Controller setup for NLBs | +| [`modules/openebs`](./modules/openebs) | OpenEBS setup for persistent volume storage using NVMe instance disks | +| [`modules/certificates`](./modules/certificates) | cert-manager installation and TLS management | +| [`modules/operator`](./modules/operator) | Materialize Kubernetes operator installation | +| [`modules/materialize-instance`](./modules/materialize-instance) | Materialize instance configuration and deployment | +| [`modules/nlb`](./modules/nlb) | Network Load Balancer for Materialize instance access | + +Depending on your needs, you can use the modules individually or combine them to create a setup that fits your needs. + +--- + +## Getting Started + +### Example Deployment + +To deploy a simple end-to-end environment, see the [`examples/simple`](./examples/simple) folder. + +```hcl +module "networking" { + source = "../../modules/networking" + name_prefix = "mz" + # ... networking vars +} + +module "eks" { + source = "../../modules/eks" + name_prefix = "mz" + vpc_id = module.networking.vpc_id + private_subnet_ids = module.networking.private_subnet_ids + # ... eks vars +} + +# See full working setup in the examples/simple/main.tf file +```` + +### Providers + +Ensure you configure the AWS, Kubernetes, and Helm providers. Here's a minimal setup: ```hcl provider "aws" { - region = "us-east-1" - # Other AWS provider configuration as needed + region = var.aws_region } -# Required for EKS authentication provider "kubernetes" { host = module.eks.cluster_endpoint cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) - exec { api_version = "client.authentication.k8s.io/v1beta1" args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] @@ -37,12 +78,10 @@ provider "kubernetes" { } } -# Required for Materialize Operator installation provider "helm" { kubernetes { host = module.eks.cluster_endpoint cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) - exec { api_version = "client.authentication.k8s.io/v1beta1" args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] @@ -50,226 +89,16 @@ provider "helm" { } } } - ``` -> **Note:** The Kubernetes and Helm providers are configured to use the AWS CLI for authentication with the EKS cluster. This requires that you have the AWS CLI installed and configured with access to the AWS account where the EKS cluster is deployed. - -You can also set the `AWS_PROFILE` environment variable to the name of the profile you want to use for authentication with the EKS cluster: - -```bash -export AWS_PROFILE=your-profile-name -``` - -## Disk Support for Materialize - -This module supports configuring disk support for Materialize using NVMe instance storage and OpenEBS and lgalloc. - -When using disk support, you need to use instance types from the `r7gd` or `r6gd` family or other instance types with NVMe instance storage. +--- -### Enabling Disk Support +## Local Development & Linting -To enable disk support with default settings: - -```hcl -enable_disk_support = true -``` - -This will: -1. Install OpenEBS via Helm -2. Configure NVMe instance store volumes using the bootstrap script -3. Create appropriate storage classes for Materialize - -### Advanced Configuration - -In case that you need more control over the disk setup: - -```hcl -enable_disk_support = true +Run this to format and generate docs across all modules: -disk_support_config = { - openebs_version = "4.2.0" - storage_class_name = "custom-storage-class" - storage_class_parameters = { - volgroup = "custom-volume-group" - } -} +```bash +.github/scripts/generate-docs.sh ``` -## Requirements - -| Name | Version | -|------|---------| -| [terraform](#requirement\_terraform) | >= 1.0 | -| [aws](#requirement\_aws) | ~> 5.0 | -| [helm](#requirement\_helm) | ~> 2.0 | -| [kubernetes](#requirement\_kubernetes) | ~> 2.0 | -| [random](#requirement\_random) | ~> 3.0 | - -## Providers - -| Name | Version | -|------|---------| -| [aws](#provider\_aws) | 5.97.0 | - -## Modules - -| Name | Source | Version | -|------|--------|---------| -| [aws\_lbc](#module\_aws\_lbc) | ./modules/aws-lbc | n/a | -| [certificates](#module\_certificates) | ./modules/certificates | n/a | -| [database](#module\_database) | ./modules/database | n/a | -| [eks](#module\_eks) | ./modules/eks | n/a | -| [networking](#module\_networking) | ./modules/networking | n/a | -| [nlb](#module\_nlb) | ./modules/nlb | n/a | -| [operator](#module\_operator) | github.com/MaterializeInc/terraform-helm-materialize | v0.1.14 | -| [storage](#module\_storage) | ./modules/storage | n/a | - -## Resources - -| Name | Type | -|------|------| -| [aws_cloudwatch_log_group.materialize](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_log_group) | resource | -| [aws_iam_role.materialize_s3](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | -| [aws_iam_role_policy.materialize_s3](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy) | resource | -| [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source | -| [aws_region.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source | - -## Inputs - -| Name | Description | Type | Default | Required | -|------|-------------|------|---------|:--------:| -| [availability\_zones](#input\_availability\_zones) | List of availability zones | `list(string)` |
[
"us-east-1a",
"us-east-1b",
"us-east-1c"
]
| no | -| [bucket\_force\_destroy](#input\_bucket\_force\_destroy) | Enable force destroy for the S3 bucket | `bool` | `true` | no | -| [bucket\_lifecycle\_rules](#input\_bucket\_lifecycle\_rules) | List of lifecycle rules for the S3 bucket |
list(object({
id = string
enabled = bool
prefix = string
transition_days = number
transition_storage_class = string
noncurrent_version_expiration_days = number
}))
|
[
{
"enabled": true,
"id": "cleanup",
"noncurrent_version_expiration_days": 90,
"prefix": "",
"transition_days": 90,
"transition_storage_class": "STANDARD_IA"
}
]
| no | -| [cert\_manager\_chart\_version](#input\_cert\_manager\_chart\_version) | Version of the cert-manager helm chart to install. | `string` | `"v1.17.1"` | no | -| [cert\_manager\_install\_timeout](#input\_cert\_manager\_install\_timeout) | Timeout for installing the cert-manager helm chart, in seconds. | `number` | `300` | no | -| [cert\_manager\_namespace](#input\_cert\_manager\_namespace) | The name of the namespace in which cert-manager is or will be installed. | `string` | `"cert-manager"` | no | -| [cluster\_enabled\_log\_types](#input\_cluster\_enabled\_log\_types) | List of desired control plane logging to enable | `list(string)` |
[
"api",
"audit",
"authenticator",
"controllerManager",
"scheduler"
]
| no | -| [cluster\_version](#input\_cluster\_version) | Kubernetes version for the EKS cluster | `string` | `"1.32"` | no | -| [create\_vpc](#input\_create\_vpc) | Controls if VPC should be created (it affects almost all resources) | `bool` | `true` | no | -| [database\_name](#input\_database\_name) | Name of the database to create | `string` | `"materialize"` | no | -| [database\_password](#input\_database\_password) | Password for the database (should be provided via tfvars or environment variable) | `string` | n/a | yes | -| [database\_username](#input\_database\_username) | Username for the database | `string` | `"materialize"` | no | -| [db\_allocated\_storage](#input\_db\_allocated\_storage) | Allocated storage for the RDS instance (in GB) | `number` | `20` | no | -| [db\_instance\_class](#input\_db\_instance\_class) | Instance class for the RDS instance. This is used for concensus and metadata and is general not bottlnecked by memory or disk. Recomended instance family m7i, m6i, m7g, and m8g | `string` | `"db.m6i.large"` | no | -| [db\_max\_allocated\_storage](#input\_db\_max\_allocated\_storage) | Maximum storage for autoscaling (in GB) | `number` | `100` | no | -| [db\_multi\_az](#input\_db\_multi\_az) | Enable multi-AZ deployment for RDS | `bool` | `false` | no | -| [disk\_support\_config](#input\_disk\_support\_config) | Advanced configuration for disk support (only used when enable\_disk\_support = true) |
object({
install_openebs = optional(bool, true)
run_disk_setup_script = optional(bool, true)
create_storage_class = optional(bool, true)
openebs_version = optional(string, "4.2.0")
openebs_namespace = optional(string, "openebs")
storage_class_name = optional(string, "openebs-lvm-instance-store-ext4")
storage_class_provisioner = optional(string, "local.csi.openebs.io")
storage_class_parameters = optional(object({
storage = optional(string, "lvm")
fsType = optional(string, "ext4")
volgroup = optional(string, "instance-store-vg")
}), {})
})
| `{}` | no | -| [enable\_bucket\_encryption](#input\_enable\_bucket\_encryption) | Enable server-side encryption for the S3 bucket | `bool` | `true` | no | -| [enable\_bucket\_versioning](#input\_enable\_bucket\_versioning) | Enable versioning for the S3 bucket | `bool` | `true` | no | -| [enable\_cluster\_creator\_admin\_permissions](#input\_enable\_cluster\_creator\_admin\_permissions) | To add the current caller identity as an administrator | `bool` | `true` | no | -| [enable\_disk\_support](#input\_enable\_disk\_support) | Enable disk support for Materialize using OpenEBS and NVMe instance storage. When enabled, this configures OpenEBS, runs the disk setup script for NVMe devices, and creates appropriate storage classes. | `bool` | `true` | no | -| [enable\_monitoring](#input\_enable\_monitoring) | Enable CloudWatch monitoring | `bool` | `true` | no | -| [environment](#input\_environment) | Environment name (e.g., prod, staging, dev) | `string` | n/a | yes | -| [helm\_chart](#input\_helm\_chart) | Chart name from repository or local path to chart. For local charts, set the path to the chart directory. | `string` | `"materialize-operator"` | no | -| [helm\_values](#input\_helm\_values) | Additional Helm values to merge with defaults | `any` | `{}` | no | -| [install\_aws\_load\_balancer\_controller](#input\_install\_aws\_load\_balancer\_controller) | Whether to install the AWS Load Balancer Controller | `bool` | `true` | no | -| [install\_cert\_manager](#input\_install\_cert\_manager) | Whether to install cert-manager. | `bool` | `true` | no | -| [install\_materialize\_operator](#input\_install\_materialize\_operator) | Whether to install the Materialize operator | `bool` | `true` | no | -| [install\_metrics\_server](#input\_install\_metrics\_server) | Whether to install the metrics-server for the Materialize Console | `bool` | `true` | no | -| [kubernetes\_namespace](#input\_kubernetes\_namespace) | The Kubernetes namespace for the Materialize resources | `string` | `"materialize-environment"` | no | -| [log\_group\_name\_prefix](#input\_log\_group\_name\_prefix) | Prefix for the CloudWatch log group name (will be combined with environment name) | `string` | `"materialize"` | no | -| [materialize\_instances](#input\_materialize\_instances) | Configuration for Materialize instances. Due to limitations in Terraform, `materialize_instances` cannot be defined on the first `terraform apply`. |
list(object({
name = string
namespace = optional(string)
database_name = string
environmentd_version = optional(string)
cpu_request = optional(string, "1")
memory_request = optional(string, "1Gi")
memory_limit = optional(string, "1Gi")
create_database = optional(bool, true)
create_nlb = optional(bool, true)
internal_nlb = optional(bool, true)
enable_cross_zone_load_balancing = optional(bool, true)
in_place_rollout = optional(bool, false)
request_rollout = optional(string)
force_rollout = optional(string)
balancer_memory_request = optional(string, "256Mi")
balancer_memory_limit = optional(string, "256Mi")
balancer_cpu_request = optional(string, "100m")
license_key = optional(string)
}))
| `[]` | no | -| [metrics\_retention\_days](#input\_metrics\_retention\_days) | Number of days to retain CloudWatch metrics | `number` | `7` | no | -| [namespace](#input\_namespace) | Namespace for all resources, usually the organization or project name | `string` | n/a | yes | -| [network\_id](#input\_network\_id) | The ID of the VPC in which resources will be deployed. Only used if create\_vpc is false. | `string` | `""` | no | -| [network\_private\_subnet\_ids](#input\_network\_private\_subnet\_ids) | A list of private subnet IDs in the VPC. Only used if create\_vpc is false. | `list(string)` | `[]` | no | -| [network\_public\_subnet\_ids](#input\_network\_public\_subnet\_ids) | A list of public subnet IDs in the VPC. Only used if create\_vpc is false. | `list(string)` | `[]` | no | -| [node\_group\_ami\_type](#input\_node\_group\_ami\_type) | AMI type for the node group | `string` | `"AL2023_ARM_64_STANDARD"` | no | -| [node\_group\_capacity\_type](#input\_node\_group\_capacity\_type) | Capacity type for worker nodes (ON\_DEMAND or SPOT) | `string` | `"ON_DEMAND"` | no | -| [node\_group\_desired\_size](#input\_node\_group\_desired\_size) | Desired number of worker nodes | `number` | `2` | no | -| [node\_group\_instance\_types](#input\_node\_group\_instance\_types) | Instance types for worker nodes.

Recommended Configuration for Running Materialize with disk:
- Tested instance types: `r6gd`, `r7gd` families (ARM-based Graviton instances)
- Enable disk setup when using `r7gd`
- Note: Ensure instance store volumes are available and attached to the nodes for optimal performance with disk-based workloads. | `list(string)` |
[
"r7gd.2xlarge"
]
| no | -| [node\_group\_max\_size](#input\_node\_group\_max\_size) | Maximum number of worker nodes | `number` | `4` | no | -| [node\_group\_min\_size](#input\_node\_group\_min\_size) | Minimum number of worker nodes | `number` | `1` | no | -| [operator\_namespace](#input\_operator\_namespace) | Namespace for the Materialize operator | `string` | `"materialize"` | no | -| [operator\_version](#input\_operator\_version) | Version of the Materialize operator to install | `string` | `null` | no | -| [orchestratord\_version](#input\_orchestratord\_version) | Version of the Materialize orchestrator to install | `string` | `null` | no | -| [postgres\_version](#input\_postgres\_version) | Version of PostgreSQL to use | `string` | `"17"` | no | -| [private\_subnet\_cidrs](#input\_private\_subnet\_cidrs) | CIDR blocks for private subnets | `list(string)` |
[
"10.0.1.0/24",
"10.0.2.0/24",
"10.0.3.0/24"
]
| no | -| [public\_subnet\_cidrs](#input\_public\_subnet\_cidrs) | CIDR blocks for public subnets | `list(string)` |
[
"10.0.101.0/24",
"10.0.102.0/24",
"10.0.103.0/24"
]
| no | -| [service\_account\_name](#input\_service\_account\_name) | Name of the service account | `string` | `"12345678-1234-1234-1234-123456789012"` | no | -| [single\_nat\_gateway](#input\_single\_nat\_gateway) | Use a single NAT Gateway for all private subnets | `bool` | `false` | no | -| [tags](#input\_tags) | Default tags to apply to all resources | `map(string)` |
{
"Environment": "dev",
"Project": "materialize",
"Terraform": "true"
}
| no | -| [use\_local\_chart](#input\_use\_local\_chart) | Whether to use a local chart instead of one from a repository | `bool` | `false` | no | -| [use\_self\_signed\_cluster\_issuer](#input\_use\_self\_signed\_cluster\_issuer) | Whether to install and use a self-signed ClusterIssuer for TLS. To work around limitations in Terraform, this will be treated as `false` if no materialize instances are defined. | `bool` | `true` | no | -| [vpc\_cidr](#input\_vpc\_cidr) | CIDR block for VPC | `string` | `"10.0.0.0/16"` | no | - -## Outputs - -| Name | Description | -|------|-------------| -| [cluster\_certificate\_authority\_data](#output\_cluster\_certificate\_authority\_data) | Base64 encoded certificate data required to communicate with the cluster | -| [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider | -| [database\_endpoint](#output\_database\_endpoint) | RDS instance endpoint | -| [eks\_cluster\_endpoint](#output\_eks\_cluster\_endpoint) | EKS cluster endpoint | -| [eks\_cluster\_name](#output\_eks\_cluster\_name) | EKS cluster name | -| [materialize\_s3\_role\_arn](#output\_materialize\_s3\_role\_arn) | The ARN of the IAM role for Materialize | -| [metadata\_backend\_url](#output\_metadata\_backend\_url) | PostgreSQL connection URL in the format required by Materialize | -| [nlb\_details](#output\_nlb\_details) | Details of the Materialize instance NLBs. | -| [oidc\_provider\_arn](#output\_oidc\_provider\_arn) | The ARN of the OIDC Provider | -| [operator\_details](#output\_operator\_details) | Details of the installed Materialize operator | -| [persist\_backend\_url](#output\_persist\_backend\_url) | S3 connection URL in the format required by Materialize using IRSA | -| [private\_subnet\_ids](#output\_private\_subnet\_ids) | List of private subnet IDs | -| [public\_subnet\_ids](#output\_public\_subnet\_ids) | List of public subnet IDs | -| [s3\_bucket\_name](#output\_s3\_bucket\_name) | Name of the S3 bucket | -| [vpc\_id](#output\_vpc\_id) | VPC ID | - -## Post-Deployment Setup - -After successfully deploying the infrastructure with this module, you'll need to: - -1. (Optional) Configure storage classes -1. Install the [Materialize Operator](https://github.com/MaterializeInc/materialize/tree/main/misc/helm-charts/operator) -1. Deploy your first Materialize environment - -See our [Operator Installation Guide](docs/operator-setup.md) for instructions. - -## Connecting to Materialize instances - -By default, Network Load Balancers are created for each Materialize instance, with three listeners: -1. Port 6875 for SQL connections to the database. -1. Port 6876 for HTTP(S) connections to the database. -1. Port 8080 for HTTP(S) connections to the web console. - -The DNS name and ARN for the NLBs will be in the `terraform output` as `nlb_details`. - -#### TLS support - -TLS support is provided by using `cert-manager` and a self-signed `ClusterIssuer`. - -More advanced TLS support using user-provided CAs or per-Materialize `Issuer`s are out of scope for this Terraform module. Please refer to the [cert-manager documentation](https://cert-manager.io/docs/configuration/) for detailed guidance on more advanced usage. - -## Upgrade Notes - -#### v0.4.0 -We now install `cert-manager` and configure a self-signed `ClusterIssuer` by default. - -Due to limitations in Terraform, it cannot plan Kubernetes resources using CRDs that do not exist yet. We have worked around this for new users by only generating the certificate resources when creating Materialize instances that use them, which also cannot be created on the first run. - -For existing users upgrading Materialize instances not previously configured for TLS: -1. Leave `install_cert_manager` at its default of `true`. -2. Set `use_self_signed_cluster_issuer` to `false`. -3. Run `terraform apply`. This will install cert-manager and its CRDs. -4. Set `use_self_signed_cluster_issuer` back to `true` (the default). -5. Update the `request_rollout` field of the Materialize instance. -6. Run `terraform apply`. This will generate the certificates and configure your Materialize instance to use them. - -#### v0.3.0 -We now install the AWS Load Balancer Controller and create Network Load Balancers for each Materialize instance. - -If managing Materialize instances with this module, additional action may be required to upgrade to this version. - -###### If you want to disable NLB support -* Set `install_aws_load_balancer_controller` to `false`. -* Set `materialize_instances[*].create_nlb` to `false`. - -###### If you want to enable NLB support -* Leave `install_aws_load_balancer_controller` set to its default of `true`. -* Set `materialize_instances[*].create_nlb` to `false`. -* Run `terraform apply`. -* Set `materialize_instances[*].create_nlb` to `true`. -* Run `terraform apply`. - -Due to limitations in Terraform, it cannot plan Kubernetes resources using CRDs that do not exist yet. We need to first install the AWS Load Balancer Controller in the first `terraform apply`, before defining any `TargetGroupBinding` resources which get created in the second `terraform apply`. - +Make sure `terraform-docs` and `tflint` are installed locally. diff --git a/datasources.tf b/datasources.tf deleted file mode 100644 index f0e9d01..0000000 --- a/datasources.tf +++ /dev/null @@ -1,2 +0,0 @@ -data "aws_region" "current" {} -data "aws_caller_identity" "current" {} diff --git a/docs/footer.md b/docs/footer.md deleted file mode 100644 index a152e88..0000000 --- a/docs/footer.md +++ /dev/null @@ -1,57 +0,0 @@ -## Post-Deployment Setup - -After successfully deploying the infrastructure with this module, you'll need to: - -1. (Optional) Configure storage classes -1. Install the [Materialize Operator](https://github.com/MaterializeInc/materialize/tree/main/misc/helm-charts/operator) -1. Deploy your first Materialize environment - -See our [Operator Installation Guide](docs/operator-setup.md) for instructions. - -## Connecting to Materialize instances - -By default, Network Load Balancers are created for each Materialize instance, with three listeners: -1. Port 6875 for SQL connections to the database. -1. Port 6876 for HTTP(S) connections to the database. -1. Port 8080 for HTTP(S) connections to the web console. - -The DNS name and ARN for the NLBs will be in the `terraform output` as `nlb_details`. - -#### TLS support - -TLS support is provided by using `cert-manager` and a self-signed `ClusterIssuer`. - -More advanced TLS support using user-provided CAs or per-Materialize `Issuer`s are out of scope for this Terraform module. Please refer to the [cert-manager documentation](https://cert-manager.io/docs/configuration/) for detailed guidance on more advanced usage. - -## Upgrade Notes - -#### v0.4.0 -We now install `cert-manager` and configure a self-signed `ClusterIssuer` by default. - -Due to limitations in Terraform, it cannot plan Kubernetes resources using CRDs that do not exist yet. We have worked around this for new users by only generating the certificate resources when creating Materialize instances that use them, which also cannot be created on the first run. - -For existing users upgrading Materialize instances not previously configured for TLS: -1. Leave `install_cert_manager` at its default of `true`. -2. Set `use_self_signed_cluster_issuer` to `false`. -3. Run `terraform apply`. This will install cert-manager and its CRDs. -4. Set `use_self_signed_cluster_issuer` back to `true` (the default). -5. Update the `request_rollout` field of the Materialize instance. -6. Run `terraform apply`. This will generate the certificates and configure your Materialize instance to use them. - -#### v0.3.0 -We now install the AWS Load Balancer Controller and create Network Load Balancers for each Materialize instance. - -If managing Materialize instances with this module, additional action may be required to upgrade to this version. - -###### If you want to disable NLB support -* Set `install_aws_load_balancer_controller` to `false`. -* Set `materialize_instances[*].create_nlb` to `false`. - -###### If you want to enable NLB support -* Leave `install_aws_load_balancer_controller` set to its default of `true`. -* Set `materialize_instances[*].create_nlb` to `false`. -* Run `terraform apply`. -* Set `materialize_instances[*].create_nlb` to `true`. -* Run `terraform apply`. - -Due to limitations in Terraform, it cannot plan Kubernetes resources using CRDs that do not exist yet. We need to first install the AWS Load Balancer Controller in the first `terraform apply`, before defining any `TargetGroupBinding` resources which get created in the second `terraform apply`. diff --git a/docs/header.md b/docs/header.md deleted file mode 100644 index 45cc380..0000000 --- a/docs/header.md +++ /dev/null @@ -1,96 +0,0 @@ -# Materialize on AWS Cloud Platform - -Terraform module for deploying Materialize on AWS Cloud Platform with all required infrastructure components. - -The module has been tested with: - -- PostgreSQL 15 -- Materialize Helm Operator Terraform Module v0.1.12 - -> [!WARNING] -> This module is intended for demonstration/evaluation purposes as well as for serving as a template when building your own production deployment of Materialize. -> -> This module should not be directly relied upon for production deployments: **future releases of the module will contain breaking changes.** Instead, to use as a starting point for your own production deployment, either: -> - Fork this repo and pin to a specific version, or -> - Use the code as a reference when developing your own deployment. - -## Providers Configuration - -The module requires the following providers to be configured: - -```hcl -provider "aws" { - region = "us-east-1" - # Other AWS provider configuration as needed -} - -# Required for EKS authentication -provider "kubernetes" { - host = module.eks.cluster_endpoint - cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) - - exec { - api_version = "client.authentication.k8s.io/v1beta1" - args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] - command = "aws" - } -} - -# Required for Materialize Operator installation -provider "helm" { - kubernetes { - host = module.eks.cluster_endpoint - cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) - - exec { - api_version = "client.authentication.k8s.io/v1beta1" - args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] - command = "aws" - } - } -} - -``` - -> **Note:** The Kubernetes and Helm providers are configured to use the AWS CLI for authentication with the EKS cluster. This requires that you have the AWS CLI installed and configured with access to the AWS account where the EKS cluster is deployed. - -You can also set the `AWS_PROFILE` environment variable to the name of the profile you want to use for authentication with the EKS cluster: - -```bash -export AWS_PROFILE=your-profile-name -``` - -## Disk Support for Materialize - -This module supports configuring disk support for Materialize using NVMe instance storage and OpenEBS and lgalloc. - -When using disk support, you need to use instance types from the `r7gd` or `r6gd` family or other instance types with NVMe instance storage. - -### Enabling Disk Support - -To enable disk support with default settings: - -```hcl -enable_disk_support = true -``` - -This will: -1. Install OpenEBS via Helm -2. Configure NVMe instance store volumes using the bootstrap script -3. Create appropriate storage classes for Materialize - -### Advanced Configuration - -In case that you need more control over the disk setup: - -```hcl -enable_disk_support = true - -disk_support_config = { - openebs_version = "4.2.0" - storage_class_name = "custom-storage-class" - storage_class_parameters = { - volgroup = "custom-volume-group" - } -} -``` diff --git a/docs/operator-setup.md b/docs/operator-setup.md deleted file mode 100644 index affb7a5..0000000 --- a/docs/operator-setup.md +++ /dev/null @@ -1,164 +0,0 @@ -# Installing the Materialize Operator - -After deploying the infrastructure using this Terraform module, follow these steps to install the Materialize Operator on your EKS cluster. - -## Prerequisites - -- `kubectl` configured to interact with your EKS cluster -- Helm 3.2.0+ -- AWS CLI configured with appropriate credentials - -## Configure kubectl - -First, update your kubeconfig to connect to the newly created EKS cluster: - -```bash -aws eks update-kubeconfig --name materialize-cluster --region -``` - -> Note: the exact authentication method may vary depending on your EKS configuration. For example, you might have to add an IAM access entry to the EKS cluster. - -Verify the connection: - -```bash -kubectl get nodes -``` - -## (Optional) Storage Configuration - -The Materialize Operator requires fast, locally-attached NVMe storage for optimal performance. We'll set up OpenEBS with LVM Local PV for managing local volumes. - -1. Install OpenEBS: -```bash -# Add the OpenEBS Helm repository -helm repo add openebs https://openebs.github.io/openebs -helm repo update - -# Install OpenEBS with only Local PV enabled -helm install openebs --namespace openebs openebs/openebs \ - --set engines.replicated.mayastor.enabled=false \ - --create-namespace -``` - -2. Verify the installation: -```bash -kubectl get pods -n openebs -l role=openebs-lvm -``` - -### LVM Configuration for AWS Bottlerocket nodes - -TODO: Add more detailed instructions for setting up LVM on Bottlerocket nodes. - -If you're using the recommended Bottlerocket AMI with the Terraform module, the LVM configuration needs to be done through the Bottlerocket bootstrap container. This is automatically handled by the EKS module using the provided user data script. - -To verify the LVM setup: -```bash -kubectl debug -it node/ --image=amazonlinux:2 -chroot /host -lvs -``` - -You should see a volume group named `instance-store-vg`. - -## Install the Materialize Operator - -The Materialize Operator is installed automatically when you set the following in your Terraform configuration: - -```hcl -# Enable and configure Materialize Operator -install_materialize_operator = true -``` - -This eliminates the need to manually install the operator via Helm. Make sure that this setting is enabled in your Terraform configuration before applying changes: - -```bash -terraform apply -``` - -You can verify that the Materialize Operator is installed by running: - -```bash -kubectl get pods -n materialize -``` - -For more details on installation and configuration, refer to the official Materialize documentation: [Materialize AWS Installation Guide](https://materialize.com/docs/self-managed/v25.1/installation/install-on-aws/). - -Alternatively, you can still install the [operator manually using Helm](https://github.com/MaterializeInc/materialize/tree/main/misc/helm-charts/operator#installing-the-chart). - -## Deploying Materialize Environments - -Once the infrastructure and the Materialize Operator are installed, you can deploy Materialize environments by setting the `materialize_instances` variable in your Terraform configuration. - -1. Define your Materialize instances in `terraform.tfvars`: - - ```hcl - materialize_instances = [ - { - name = "analytics" - namespace = "materialize-environment" - database_name = "analytics_db" - cpu_request = "2" - memory_request = "4Gi" - memory_limit = "4Gi" - }, - { - name = "demo" - namespace = "materialize-environment" - database_name = "demo_db" - cpu_request = "2" - memory_request = "4Gi" - memory_limit = "4Gi" - } - ] - ``` - -2. Re-apply the Terraform configuration to deploy the Materialize environments: - - ```bash - terraform apply - ``` - -Alternatively, you can manually deploy Materialize instances as described in the [Materialize Operator Helm Chart Documentation](https://github.com/MaterializeInc/materialize/tree/main/misc/helm-charts/operator#installing-the-chart). - -You can check the status of the Materialize instances by running: - -```bash -kubectl get pods -n materialize-environment -``` - -## Troubleshooting - -If you encounter issues: - -1. Check operator logs: -```bash -kubectl logs -l app.kubernetes.io/name=materialize-operator -n materialize -``` - -2. Check environment logs: -```bash -kubectl logs -l app.kubernetes.io/name=environmentd -n materialize-environment -``` - -3. Verify the storage configuration: -```bash -kubectl get sc -kubectl get pv -kubectl get pvc -A -``` - -## Cleanup - -Delete the Materialize environment: -```bash -kubectl delete -f materialize-environment.yaml -``` - -To uninstall the Materialize operator: -```bash -terraform destroy -``` - -This will remove all associated resources, including the operator and any deployed Materialize instances. - -For more details, visit the [Materialize documentation](https://materialize.com/docs/self-managed/v25.1/installation/install-on-aws/). diff --git a/examples/simple/README.md b/examples/simple/README.md index 1281da6..872d3a7 100644 --- a/examples/simple/README.md +++ b/examples/simple/README.md @@ -1,91 +1,65 @@ -# Simple Example for Terraform AWS Materialize Module +# Example: Simple Materialize Deployment on AWS -This directory contains a simple example of using the [Terraform AWS Materialize module](https://github.com/MaterializeInc/terraform-aws-materialize/) to deploy a basic infrastructure setup. +This example demonstrates how to deploy a complete Materialize environment on AWS using the modular Terraform setup from this repository. -## What This Example Does +It provisions the full infrastructure stack, including: +- VPC and networking +- EKS cluster and node group +- RDS PostgreSQL for metadata +- S3 for persistent storage +- Load Balancer Controller and cert-manager +- Materialize operator -- Creates a VPC. -- Provisions an EKS cluster with a basic node group. -- Sets up an RDS PostgreSQL instance. -- Creates an S3 bucket. -- Deploys the Materialize Operator. +> **Important:** +> Due to a limitation with the `kubernetes_manifest` resource in Terraform, the Materialize instance **cannot be installed on the first run**. The Kubernetes cluster must be fully provisioned before applying the instance configuration. -## How to Use +--- -1. Clone the repository: - ```bash - git clone https://github.com/MaterializeInc/terraform-aws-materialize.git - cd terraform-aws-materialize/examples/simple - ``` +## Getting Started -2. Initialize Terraform: - ```bash - terraform init - ``` +### Step 1: Set Required Variables -3. Copy the `terraform.tfvars.example` file to `terraform.tfvars` and update the variables: - ```hcl - namespace = "example-namespace" - environment = "dev" - ``` +Before running Terraform, create a `terraform.tfvars` file or pass the following variables: -4. Apply the configuration: - ```bash - terraform apply - ``` +```hcl +name_prefix = "simple-demo" +install_materialize_instance = false +```` + +--- -5. Review the outputs for details such as the VPC ID, EKS cluster endpoint, and S3 bucket name. +### Step 2: Deploy the Infrastructure -## Example Configuration +Run the usual Terraform workflow: -This example uses the following variables: +```bash +terraform init +terraform apply +``` -- **namespace**: A prefix for resource names, e.g., `mz-demo-namespace`. -- **environment**: The deployment environment (e.g., `dev`, `staging`). +This will provision all infrastructure components except the Materialize instance. -Refer to the `variables.tf` file for more details on available variables. +--- -## Deploying Materialize Instances +### Step 3: Deploy the Materialize Instance -Once the infrastructure and the Materialize Operator are installed, you can deploy Materialize instances by uncommenting and configuring the `materialize_instances` variable in your `terraform.tfvars` file. +Once the initial deployment completes successfully: -1. Open the `terraform.tfvars` file and uncomment the `materialize_instances` block. Customize it as needed. For example: +1. Update your variable: ```hcl - materialize_instances = [ - { - name = "analytics" - namespace = "materialize-environment" - database_name = "analytics_db" - cpu_request = "2" - memory_request = "4Gi" - memory_limit = "4Gi" - }, - { - name = "demo" - namespace = "materialize-environment" - database_name = "demo_db" - cpu_request = "2" - memory_request = "4Gi" - memory_limit = "4Gi" - } - ] - ``` - -2. Re-apply the Terraform configuration to create the Materialize instances: - ```bash - terraform apply + install_materialize_instance = true ``` -This will deploy the necessary CRDs for the specified Materialize instances within your cluster. You can check the status of the Materialize instances by running: +2. Run `terraform apply` again to deploy the instance. -```bash -kubectl get pods -n materialize-environment -``` +--- -## Cleaning Up +## Notes -To destroy the resources created by this example, run: +* You can customize each module independently. +* To reduce cost in your demo environment, you can tweak subnet CIDRs and instance types in `main.tf`. +* Don't forget to destroy resources when finished: ```bash terraform destroy diff --git a/examples/simple/main.tf b/examples/simple/main.tf index b4318c5..8a67e4d 100644 --- a/examples/simple/main.tf +++ b/examples/simple/main.tf @@ -1,231 +1,241 @@ provider "aws" { - region = "us-east-1" + region = var.aws_region } provider "kubernetes" { - host = module.materialize_infrastructure.eks_cluster_endpoint - cluster_ca_certificate = base64decode(module.materialize_infrastructure.cluster_certificate_authority_data) + host = module.eks.cluster_endpoint + cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) exec { api_version = "client.authentication.k8s.io/v1beta1" command = "aws" - args = ["eks", "get-token", "--cluster-name", module.materialize_infrastructure.eks_cluster_name] + args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] } } provider "helm" { kubernetes { - host = module.materialize_infrastructure.eks_cluster_endpoint - cluster_ca_certificate = base64decode(module.materialize_infrastructure.cluster_certificate_authority_data) + host = module.eks.cluster_endpoint + cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) exec { api_version = "client.authentication.k8s.io/v1beta1" command = "aws" - args = ["eks", "get-token", "--cluster-name", module.materialize_infrastructure.eks_cluster_name] + args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] } } } -module "materialize_infrastructure" { - # To pull this from GitHub, use the following: - # source = "git::https://github.com/MaterializeInc/terraform-aws-materialize.git" - source = "../../" +# 1. Create network infrastructure +module "networking" { + source = "../../modules/networking" - providers = { - aws = aws - kubernetes = kubernetes - helm = helm - } - - # The namespace and environment variables are used to construct the names of the resources - # e.g. ${namespace}-${environment}-storage, ${namespace}-${environment}-db etc. - namespace = var.namespace - environment = var.environment + name_prefix = var.name_prefix - # VPC Configuration vpc_cidr = "10.0.0.0/16" - availability_zones = ["us-east-1a", "us-east-1b"] - private_subnet_cidrs = ["10.0.1.0/24", "10.0.2.0/24"] - public_subnet_cidrs = ["10.0.101.0/24", "10.0.102.0/24"] - single_nat_gateway = true + availability_zones = ["us-east-1a", "us-east-1b", "us-east-1c"] + private_subnet_cidrs = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"] + public_subnet_cidrs = ["10.0.101.0/24", "10.0.102.0/24", "10.0.103.0/24"] + single_nat_gateway = true # Use single NAT gateway to reduce costs for this example +} - # EKS Configuration +# 2. Create EKS cluster +module "eks" { + source = "../../modules/eks" + name_prefix = var.name_prefix cluster_version = "1.32" - node_group_instance_types = ["r7gd.2xlarge"] - node_group_desired_size = 1 - node_group_min_size = 1 - node_group_max_size = 2 - node_group_capacity_type = "ON_DEMAND" + vpc_id = module.networking.vpc_id + private_subnet_ids = module.networking.private_subnet_ids + cluster_enabled_log_types = ["api", "audit"] enable_cluster_creator_admin_permissions = true + tags = {} +} + +# 2.1. Create EKS node group +module "eks_node_group" { + source = "../../modules/eks-node-group" + cluster_name = module.eks.cluster_name + subnet_ids = module.networking.private_subnet_ids + node_group_name = "${var.name_prefix}-mz" + enable_disk_setup = true + cluster_service_cidr = module.eks.cluster_service_cidr + cluster_primary_security_group_id = module.eks.node_security_group_id + + labels = { + GithubRepo = "materialize" + "materialize.cloud/disk" = "true" + "workload" = "materialize-instance" + } +} - # Storage Configuration - bucket_force_destroy = true +# 3. Install AWS Load Balancer Controller +module "aws_lbc" { + source = "../../modules/aws-lbc" + + name_prefix = var.name_prefix + eks_cluster_name = module.eks.cluster_name + oidc_provider_arn = module.eks.oidc_provider_arn + oidc_issuer_url = module.eks.cluster_oidc_issuer_url + vpc_id = module.networking.vpc_id + region = var.aws_region + + depends_on = [ + module.eks, + module.eks_node_group, + ] +} + +# 4. Install OpenEBS for storage +module "openebs" { + source = "../../modules/openebs" + + openebs_namespace = "openebs" + openebs_version = "4.2.0" + + depends_on = [ + module.networking, + module.eks, + module.eks_node_group, + module.aws_lbc, + ] +} + +# 5. Install Certificate Manager for TLS +module "certificates" { + source = "../../modules/certificates" + + install_cert_manager = true + cert_manager_install_timeout = 300 + cert_manager_chart_version = "v1.13.3" + use_self_signed_cluster_issuer = var.install_materialize_instance + cert_manager_namespace = "cert-manager" + name_prefix = var.name_prefix + + depends_on = [ + module.networking, + module.eks, + module.eks_node_group, + module.aws_lbc, + ] +} + +# 6. Install Materialize Operator +module "operator" { + source = "../../modules/operator" + + name_prefix = var.name_prefix + aws_region = var.aws_region + aws_account_id = data.aws_caller_identity.current.account_id + oidc_provider_arn = module.eks.oidc_provider_arn + cluster_oidc_issuer_url = module.eks.cluster_oidc_issuer_url + s3_bucket_arn = module.storage.bucket_arn + use_self_signed_cluster_issuer = true + + depends_on = [ + module.eks, + module.networking, + module.eks_node_group, + ] +} + +resource "random_password" "database_password" { + length = 16 + special = true + override_special = "!#$%&*()-_=+[]{}<>:?" +} + +# 7. Setup dedicated database instance for Materialize +module "database" { + source = "../../modules/database" + name_prefix = var.name_prefix + postgres_version = "15" + instance_class = "db.t3.large" + allocated_storage = 50 + max_allocated_storage = 100 + database_name = "materialize" + database_username = "materialize" + database_password = random_password.database_password.result + multi_az = false + database_subnet_ids = module.networking.private_subnet_ids + vpc_id = module.networking.vpc_id + eks_security_group_id = module.eks.cluster_security_group_id + eks_node_security_group_id = module.eks.node_security_group_id + tags = {} +} + +# 8. Setup S3 bucket for Materialize +module "storage" { + source = "../../modules/storage" + name_prefix = var.name_prefix + bucket_lifecycle_rules = [] + bucket_force_destroy = true # For testing purposes, we are disabling encryption and versioning to allow for easier cleanup # This should be enabled in production environments for security and data integrity enable_bucket_versioning = false enable_bucket_encryption = false - # Database Configuration - database_password = random_password.pass.result - postgres_version = "15" - db_instance_class = "db.t3.large" - db_allocated_storage = 20 - database_name = "materialize" - database_username = "materialize" - db_multi_az = false - - # Basic monitoring - enable_monitoring = true - metrics_retention_days = 3 - - # Certificates - install_cert_manager = var.install_cert_manager - use_self_signed_cluster_issuer = var.use_self_signed_cluster_issuer - - # Enable and configure Materialize operator - install_materialize_operator = true - operator_version = var.operator_version - orchestratord_version = var.orchestratord_version - helm_values = var.helm_values - - # Once the operator is installed, you can define your Materialize instances here. - materialize_instances = var.materialize_instances - - # Tags - tags = { - Environment = "dev" - Project = "materialize-simple" - Terraform = "true" - } -} - -resource "random_password" "pass" { - length = 20 - special = false -} - - -variable "namespace" { - description = "Namespace for the resources. Used to prefix the names of the resources" - type = string - default = "simple-mz-tf" -} - -variable "environment" { - description = "Environment name" - type = string - default = "dev" -} - -variable "operator_version" { - description = "Version of the Materialize operator to install" - type = string - default = null -} - -variable "orchestratord_version" { - description = "Version of the Materialize orchestrator to install" - type = string - default = null -} - -variable "materialize_instances" { - description = "List of Materialize instances to be created." - type = list(object({ - name = string - namespace = string - database_name = string - environmentd_version = optional(string) - cpu_request = string - memory_request = string - memory_limit = string - create_database = optional(bool) - create_nlb = optional(bool) - internal_nlb = optional(bool) - in_place_rollout = optional(bool, false) - request_rollout = optional(string) - force_rollout = optional(string) - balancer_memory_request = optional(string, "256Mi") - balancer_memory_limit = optional(string, "256Mi") - balancer_cpu_request = optional(string, "100m") - license_key = optional(string) - })) - default = [] -} - -variable "helm_values" { - description = "Additional Helm values to merge with defaults" - type = any - default = {} -} - -variable "install_cert_manager" { - description = "Whether to install cert-manager." - type = bool - default = true -} - -variable "use_self_signed_cluster_issuer" { - description = "Whether to install and use a self-signed ClusterIssuer for TLS. To work around limitations in Terraform, this will be treated as `false` if no materialize instances are defined." - type = bool - default = true -} - -# Outputs -output "vpc_id" { - description = "VPC ID" - value = module.materialize_infrastructure.vpc_id -} - -output "eks_cluster_endpoint" { - description = "EKS cluster endpoint" - value = module.materialize_infrastructure.eks_cluster_endpoint -} - -output "eks_cluster_name" { - description = "EKS cluster name" - value = module.materialize_infrastructure.eks_cluster_name -} - -output "database_endpoint" { - description = "RDS instance endpoint" - value = module.materialize_infrastructure.database_endpoint -} - -output "s3_bucket_name" { - description = "Name of the S3 bucket" - value = module.materialize_infrastructure.s3_bucket_name -} - -output "metadata_backend_url" { - description = "PostgreSQL connection URL in the format required by Materialize" - value = module.materialize_infrastructure.metadata_backend_url - sensitive = true -} - -output "persist_backend_url" { - description = "S3 connection URL in the format required by Materialize using IRSA" - value = module.materialize_infrastructure.persist_backend_url -} - -output "oidc_provider_arn" { - description = "The ARN of the OIDC Provider" - value = module.materialize_infrastructure.oidc_provider_arn -} - -output "materialize_s3_role_arn" { - description = "The ARN of the IAM role for Materialize" - value = module.materialize_infrastructure.materialize_s3_role_arn -} - -output "cluster_certificate_authority_data" { - description = "The CA certificate for the EKS cluster" - value = module.materialize_infrastructure.cluster_certificate_authority_data - sensitive = true -} - -output "nlb_details" { - description = "Details of the Materialize instance NLBs." - value = module.materialize_infrastructure.nlb_details -} + tags = {} +} + +# 9. Setup Materialize instance +module "materialize_instance" { + count = var.install_materialize_instance ? 1 : 0 + + source = "../../modules/materialize-instance" + instance_name = "main" + instance_namespace = "materialize-environment" + metadata_backend_url = local.metadata_backend_url + persist_backend_url = local.persist_backend_url + + depends_on = [ + module.eks, + module.database, + module.storage, + module.networking, + module.certificates, + module.operator, + module.aws_lbc, + module.eks_node_group, + module.openebs, + ] +} + +# 10. Setup dedicated NLB for Materialize instance +module "materialize_nlb" { + count = var.install_materialize_instance && var.create_nlb ? 1 : 0 + + source = "../../modules/nlb" + + instance_name = "main" + name_prefix = var.name_prefix + namespace = "materialize-environment" + subnet_ids = module.networking.private_subnet_ids + enable_cross_zone_load_balancing = true + vpc_id = module.networking.vpc_id + mz_resource_id = module.materialize_instance[0].instance_resource_id + + depends_on = [ + module.materialize_instance + ] +} + +locals { + metadata_backend_url = format( + "postgres://%s:%s@%s/%s?sslmode=require", + module.database.db_instance_username, + urlencode(random_password.database_password.result), + module.database.db_instance_endpoint, + module.database.db_instance_name + ) + + persist_backend_url = format( + "s3://%s/%s:serviceaccount:%s:%s", + module.storage.bucket_name, + var.name_prefix, + "materialize-environment", + "main" + ) +} + +data "aws_caller_identity" "current" {} diff --git a/outputs.tf b/examples/simple/outputs.tf similarity index 70% rename from outputs.tf rename to examples/simple/outputs.tf index 07a8887..194a9f4 100644 --- a/outputs.tf +++ b/examples/simple/outputs.tf @@ -41,10 +41,10 @@ output "s3_bucket_name" { output "metadata_backend_url" { description = "PostgreSQL connection URL in the format required by Materialize" value = format("postgres://%s:%s@%s/%s?sslmode=require", - var.database_username, - var.database_password, + module.database.db_instance_username, + urlencode(random_password.database_password.result), module.database.db_instance_endpoint, - var.database_name + module.database.db_instance_name ) sensitive = true } @@ -53,13 +53,12 @@ output "persist_backend_url" { description = "S3 connection URL in the format required by Materialize using IRSA" value = format("s3://%s/%s:serviceaccount:%s:%s", module.storage.bucket_name, - var.environment, + var.name_prefix, var.kubernetes_namespace, var.service_account_name ) } -# oidc_provider_arn output "oidc_provider_arn" { description = "The ARN of the OIDC Provider" value = module.eks.oidc_provider_arn @@ -70,29 +69,15 @@ output "cluster_oidc_issuer_url" { value = module.eks.cluster_oidc_issuer_url } -# aws_iam_role.materialize_s3.arn output "materialize_s3_role_arn" { description = "The ARN of the IAM role for Materialize" - value = aws_iam_role.materialize_s3.arn -} - -output "operator_details" { - description = "Details of the installed Materialize operator" - value = var.install_materialize_operator ? { - namespace = module.operator[0].operator_namespace - release_name = module.operator[0].operator_release_name - release_status = module.operator[0].operator_release_status - instances = module.operator[0].materialize_instances - instance_resource_ids = module.operator[0].materialize_instance_resource_ids - } : null + value = module.operator.materialize_s3_role_arn } output "nlb_details" { description = "Details of the Materialize instance NLBs." value = { - for nlb in module.nlb : nlb.instance_name => { - arn = nlb.nlb_arn - dns_name = nlb.nlb_dns_name - } + arn = try(module.materialize_nlb[0].nlb_arn, null) + dns_name = try(module.materialize_nlb[0].nlb_dns_name, null) } } diff --git a/examples/simple/terraform.tfvars.example b/examples/simple/terraform.tfvars.example deleted file mode 100644 index 37b2f6a..0000000 --- a/examples/simple/terraform.tfvars.example +++ /dev/null @@ -1,24 +0,0 @@ -namespace = "mz-self-mngd" // maximum 12 characters, starts with a letter, lowercase alphanumeric and hyphens -environment = "dev" // maximum 8 characters, lowercase alphanumeric only (e.g., dev, test) - -# Once the operator is installed, you can define your Materialize instances here. -# Uncomment the following block (or provide your own instances) to configure them: - -# materialize_instances = [ -# { -# name = "analytics" -# namespace = "materialize-environment" -# database_name = "analytics_db" -# cpu_request = "2" -# memory_request = "4Gi" -# memory_limit = "4Gi" -# }, -# { -# name = "demo" -# namespace = "materialize-environment" -# database_name = "demo_db" -# cpu_request = "2" -# memory_request = "4Gi" -# memory_limit = "4Gi" -# } -# ] diff --git a/examples/simple/variables.tf b/examples/simple/variables.tf new file mode 100644 index 0000000..89afc49 --- /dev/null +++ b/examples/simple/variables.tf @@ -0,0 +1,35 @@ +variable "aws_region" { + description = "The AWS region where the resources will be created." + type = string + default = "us-east-1" +} + +variable "name_prefix" { + description = "A prefix to add to all resource names." + type = string + default = "mz-demo" +} + +variable "install_materialize_instance" { + description = "Whether to install the Materialize instance. Default is false as it requires the Kubernetes cluster to be created first." + type = bool + default = false +} + +variable "create_nlb" { + description = "Whether to create a Network Load Balancer for the Materialize instance" + type = bool + default = true +} + +variable "kubernetes_namespace" { + description = "The Kubernetes namespace for the Materialize resources" + type = string + default = "materialize-environment" +} + +variable "service_account_name" { + description = "Name of the service account" + type = string + default = "12345678-1234-1234-1234-123456789012" +} diff --git a/examples/simple/versions.tf b/examples/simple/versions.tf deleted file mode 120000 index b7707ec..0000000 --- a/examples/simple/versions.tf +++ /dev/null @@ -1 +0,0 @@ -../../versions.tf \ No newline at end of file diff --git a/examples/simple/versions.tf b/examples/simple/versions.tf new file mode 100644 index 0000000..54bd60e --- /dev/null +++ b/examples/simple/versions.tf @@ -0,0 +1,22 @@ +terraform { + required_version = ">= 1.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = "~> 5.0" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = "~> 2.0" + } + helm = { + source = "hashicorp/helm" + version = "~> 2.0" + } + random = { + source = "hashicorp/random" + version = "~> 3.0" + } + } +} diff --git a/main.tf b/main.tf deleted file mode 100644 index c505f72..0000000 --- a/main.tf +++ /dev/null @@ -1,397 +0,0 @@ -module "networking" { - source = "./modules/networking" - - # The namespace and environment variables are used to construct the names of the resources - # e.g. ${namespace}-${environment}-vpc - namespace = var.namespace - environment = var.environment - - vpc_cidr = var.vpc_cidr - availability_zones = var.availability_zones - private_subnet_cidrs = var.private_subnet_cidrs - public_subnet_cidrs = var.public_subnet_cidrs - single_nat_gateway = var.single_nat_gateway - - tags = local.common_tags -} - -module "eks" { - source = "./modules/eks" - - # The namespace and environment variables are used to construct the names of the resources - # e.g. ${namespace}-${environment}-eks - namespace = var.namespace - environment = var.environment - - cluster_version = var.cluster_version - vpc_id = local.network_id - private_subnet_ids = local.network_private_subnet_ids - node_group_desired_size = var.node_group_desired_size - node_group_min_size = var.node_group_min_size - node_group_max_size = var.node_group_max_size - node_group_instance_types = var.node_group_instance_types - node_group_ami_type = var.node_group_ami_type - cluster_enabled_log_types = var.cluster_enabled_log_types - node_group_capacity_type = var.node_group_capacity_type - enable_cluster_creator_admin_permissions = var.enable_cluster_creator_admin_permissions - - install_openebs = local.disk_config.install_openebs - enable_disk_setup = local.disk_config.run_disk_setup_script - openebs_namespace = local.disk_config.openebs_namespace - openebs_version = local.disk_config.openebs_version - - tags = local.common_tags - - depends_on = [ - module.networking, - ] -} - -module "aws_lbc" { - source = "./modules/aws-lbc" - count = var.install_aws_load_balancer_controller ? 1 : 0 - - name_prefix = local.name_prefix - eks_cluster_name = module.eks.cluster_name - oidc_provider_arn = module.eks.oidc_provider_arn - oidc_issuer_url = module.eks.cluster_oidc_issuer_url - vpc_id = module.networking.vpc_id - region = data.aws_region.current.name - - depends_on = [ - module.eks, - ] -} - -module "storage" { - source = "./modules/storage" - - # The namespace and environment variables are used to construct the names of the resources - # e.g. ${namespace}-${environment}-storage - namespace = var.namespace - environment = var.environment - - bucket_lifecycle_rules = var.bucket_lifecycle_rules - enable_bucket_encryption = var.enable_bucket_encryption - enable_bucket_versioning = var.enable_bucket_versioning - bucket_force_destroy = var.bucket_force_destroy - - tags = local.common_tags -} - -module "database" { - source = "./modules/database" - - # The namespace and environment variables are used to construct the names of the resources - # e.g. ${namespace}-${environment}-db - namespace = var.namespace - environment = var.environment - - postgres_version = var.postgres_version - instance_class = var.db_instance_class - allocated_storage = var.db_allocated_storage - database_name = var.database_name - database_username = var.database_username - multi_az = var.db_multi_az - database_subnet_ids = local.network_private_subnet_ids - vpc_id = local.network_id - eks_security_group_id = module.eks.cluster_security_group_id - eks_node_security_group_id = module.eks.node_security_group_id - max_allocated_storage = var.db_max_allocated_storage - database_password = var.database_password - - tags = local.common_tags - - depends_on = [ - module.networking, - ] -} - -module "certificates" { - source = "./modules/certificates" - - install_cert_manager = var.install_cert_manager - cert_manager_install_timeout = var.cert_manager_install_timeout - cert_manager_chart_version = var.cert_manager_chart_version - use_self_signed_cluster_issuer = var.use_self_signed_cluster_issuer && length(var.materialize_instances) > 0 - cert_manager_namespace = var.cert_manager_namespace - name_prefix = local.name_prefix - - depends_on = [ - module.eks, - # The AWS LBC installs webhooks, and all other K8S stuff can fail - # if they are deployed, but the AWS LBC pods aren't up yet. - # This doesn't actually need the LBC, - # but we want to avoid concurrently updating these. - module.aws_lbc, - ] -} - -module "operator" { - source = "github.com/MaterializeInc/terraform-helm-materialize?ref=v0.1.14" - - count = var.install_materialize_operator ? 1 : 0 - - install_metrics_server = var.install_metrics_server - - depends_on = [ - module.eks, - module.database, - module.storage, - module.networking, - module.certificates, - # The AWS LBC installs webhooks, and all other K8S stuff can fail - # if they are deployed, but the AWS LBC pods aren't up yet. - # This doesn't actually need the LBC, - # but we want to avoid concurrently updating these. - module.aws_lbc, - ] - - namespace = var.namespace - environment = var.environment - operator_version = var.operator_version - operator_namespace = var.operator_namespace - - helm_values = local.merged_helm_values - instances = local.instances - - // For development purposes, you can use a local Helm chart instead of fetching it from the Helm repository - use_local_chart = var.use_local_chart - helm_chart = var.helm_chart - - providers = { - kubernetes = kubernetes - helm = helm - } -} - -module "nlb" { - source = "./modules/nlb" - - for_each = { for idx, instance in local.instances : instance.name => instance if lookup(instance, "create_nlb", true) } - - instance_name = each.value.name - name_prefix = "${local.name_prefix}-${each.value.name}" - namespace = each.value.namespace - internal = each.value.internal_nlb - subnet_ids = each.value.internal_nlb ? local.network_private_subnet_ids : local.network_public_subnet_ids - enable_cross_zone_load_balancing = each.value.enable_cross_zone_load_balancing - vpc_id = local.network_id - mz_resource_id = module.operator[0].materialize_instance_resource_ids[each.value.name] - - depends_on = [ - module.aws_lbc, - module.operator, - module.eks, - ] -} - -locals { - network_id = var.create_vpc ? module.networking.vpc_id : var.network_id - network_private_subnet_ids = var.create_vpc ? module.networking.private_subnet_ids : var.network_private_subnet_ids - network_public_subnet_ids = var.create_vpc ? module.networking.public_subnet_ids : var.network_public_subnet_ids - - default_helm_values = { - observability = { - podMetrics = { - enabled = true - } - } - operator = { - image = var.orchestratord_version == null ? {} : { - tag = var.orchestratord_version - }, - cloudProvider = { - type = "aws" - region = data.aws_region.current.name - providers = { - aws = { - enabled = true - accountID = data.aws_caller_identity.current.account_id - iam = { - roles = { - environment = aws_iam_role.materialize_s3.arn - } - } - } - } - } - } - storage = var.enable_disk_support ? { - storageClass = { - create = local.disk_config.create_storage_class - name = local.disk_config.storage_class_name - provisioner = local.disk_config.storage_class_provisioner - parameters = local.disk_config.storage_class_parameters - } - } : {} - tls = (var.use_self_signed_cluster_issuer && length(var.materialize_instances) > 0) ? { - defaultCertificateSpecs = { - balancerdExternal = { - dnsNames = [ - "balancerd", - ] - issuerRef = { - name = module.certificates.cluster_issuer_name - kind = "ClusterIssuer" - } - } - consoleExternal = { - dnsNames = [ - "console", - ] - issuerRef = { - name = module.certificates.cluster_issuer_name - kind = "ClusterIssuer" - } - } - internal = { - issuerRef = { - name = module.certificates.cluster_issuer_name - kind = "ClusterIssuer" - } - } - } - } : {} - } - - merged_helm_values = merge(local.default_helm_values, var.helm_values) - - instances = [ - for instance in var.materialize_instances : { - name = instance.name - namespace = instance.namespace - database_name = instance.database_name - create_database = instance.create_database - environmentd_version = instance.environmentd_version - create_nlb = instance.create_nlb - internal_nlb = instance.internal_nlb - enable_cross_zone_load_balancing = instance.enable_cross_zone_load_balancing - - metadata_backend_url = format( - "postgres://%s:%s@%s/%s?sslmode=require", - var.database_username, - urlencode(var.database_password), - module.database.db_instance_endpoint, - coalesce(instance.database_name, instance.name) - ) - - persist_backend_url = format( - "s3://%s/%s-%s:serviceaccount:%s:%s", - module.storage.bucket_name, - var.environment, - instance.name, - coalesce(instance.namespace, var.operator_namespace), - instance.name - ) - - license_key = instance.license_key - - cpu_request = instance.cpu_request - memory_request = instance.memory_request - memory_limit = instance.memory_limit - - balancer_cpu_request = instance.balancer_cpu_request - balancer_memory_request = instance.balancer_memory_request - balancer_memory_limit = instance.balancer_memory_limit - - # Rollout options - in_place_rollout = instance.in_place_rollout - request_rollout = instance.request_rollout - force_rollout = instance.force_rollout - } - ] - - # Common tags that apply to all resources - common_tags = merge( - var.tags, - { - Namespace = var.namespace - Environment = var.environment - ManagedBy = "terraform" - } - ) - - # Disk support configuration - disk_config = { - install_openebs = var.enable_disk_support ? lookup(var.disk_support_config, "install_openebs", true) : false - run_disk_setup_script = var.enable_disk_support ? lookup(var.disk_support_config, "run_disk_setup_script", true) : false - create_storage_class = var.enable_disk_support ? lookup(var.disk_support_config, "create_storage_class", true) : false - openebs_version = lookup(var.disk_support_config, "openebs_version", "4.2.0") - openebs_namespace = lookup(var.disk_support_config, "openebs_namespace", "openebs") - storage_class_name = lookup(var.disk_support_config, "storage_class_name", "openebs-lvm-instance-store-ext4") - storage_class_provisioner = lookup(var.disk_support_config, "storage_class_provisioner", "local.csi.openebs.io") - storage_class_parameters = { - storage = try(var.disk_support_config.storage_class_parameters.storage, "lvm") - fsType = try(var.disk_support_config.storage_class_parameters.fsType, "ext4") - volgroup = try(var.disk_support_config.storage_class_parameters.volgroup, "instance-store-vg") - } - } -} - -resource "aws_cloudwatch_log_group" "materialize" { - count = var.enable_monitoring ? 1 : 0 - - name = "/aws/${var.log_group_name_prefix}/${module.eks.cluster_name}/${var.environment}" - retention_in_days = var.metrics_retention_days - - tags = var.tags -} - -resource "aws_iam_role" "materialize_s3" { - name = "${local.name_prefix}-mz-role" - - # Trust policy allowing EKS to assume this role - assume_role_policy = jsonencode({ - Version = "2012-10-17" - Statement = [ - { - Effect = "Allow" - Principal = { - Federated = module.eks.oidc_provider_arn - } - Action = "sts:AssumeRoleWithWebIdentity" - Condition = { - StringLike = { - "${trimprefix(module.eks.cluster_oidc_issuer_url, "https://")}:sub" : "system:serviceaccount:*:*", - "${trimprefix(module.eks.cluster_oidc_issuer_url, "https://")}:aud" : "sts.amazonaws.com" - } - } - } - ] - }) - - tags = local.common_tags - - depends_on = [ - module.eks - ] -} - -resource "aws_iam_role_policy" "materialize_s3" { - name = "${local.name_prefix}-mz-role-policy" - role = aws_iam_role.materialize_s3.id - - policy = jsonencode({ - Version = "2012-10-17" - Statement = [ - { - Effect = "Allow" - Action = [ - "s3:GetObject", - "s3:PutObject", - "s3:DeleteObject", - "s3:ListBucket" - ] - Resource = [ - module.storage.bucket_arn, - "${module.storage.bucket_arn}/*" - ] - } - ] - }) -} - -locals { - name_prefix = "${var.namespace}-${var.environment}" -} diff --git a/modules/aws-lbc/README.md b/modules/aws-lbc/README.md new file mode 100644 index 0000000..b760b84 --- /dev/null +++ b/modules/aws-lbc/README.md @@ -0,0 +1,49 @@ +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0 | +| [aws](#requirement\_aws) | ~> 5.0 | +| [helm](#requirement\_helm) | ~> 2.0 | +| [kubernetes](#requirement\_kubernetes) | ~> 2.0 | +| [random](#requirement\_random) | ~> 3.0 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | ~> 5.0 | +| [helm](#provider\_helm) | ~> 2.0 | +| [kubernetes](#provider\_kubernetes) | ~> 2.0 | + +## Modules + +No modules. + +## Resources + +| Name | Type | +|------|------| +| [aws_iam_policy.aws_load_balancer_controller](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | +| [aws_iam_role.aws_load_balancer_controller](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | +| [aws_iam_role_policy_attachment.aws_load_balancer_controller](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | +| [helm_release.aws_load_balancer_controller](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource | +| [kubernetes_service_account.aws_load_balancer_controller](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/service_account) | resource | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [eks\_cluster\_name](#input\_eks\_cluster\_name) | Name of the EKS cluster | `string` | n/a | yes | +| [iam\_name](#input\_iam\_name) | Name of the AWS IAM role and policy | `string` | `"albc"` | no | +| [name\_prefix](#input\_name\_prefix) | Prefix to use for AWS LBC resources | `string` | `""` | no | +| [namespace](#input\_namespace) | Namespace to install the AWS LBC | `string` | `"kube-system"` | no | +| [oidc\_issuer\_url](#input\_oidc\_issuer\_url) | URL of the EKS cluster OIDC issuer | `string` | n/a | yes | +| [oidc\_provider\_arn](#input\_oidc\_provider\_arn) | ARN of the EKS cluster OIDC provider | `string` | n/a | yes | +| [region](#input\_region) | AWS region of the VPC | `string` | n/a | yes | +| [service\_account\_name](#input\_service\_account\_name) | Name of the Kubernetes service account used by the AWS LBC | `string` | `"aws-load-balancer-controller"` | no | +| [vpc\_id](#input\_vpc\_id) | ID of the VPC | `string` | n/a | yes | + +## Outputs + +No outputs. diff --git a/modules/certificates/README.md b/modules/certificates/README.md new file mode 100644 index 0000000..c1ca81d --- /dev/null +++ b/modules/certificates/README.md @@ -0,0 +1,47 @@ +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [aws](#requirement\_aws) | ~> 5.0 | +| [helm](#requirement\_helm) | >= 2.5.0 | +| [kubernetes](#requirement\_kubernetes) | >= 2.10.0 | +| [random](#requirement\_random) | ~> 3.0 | + +## Providers + +| Name | Version | +|------|---------| +| [helm](#provider\_helm) | >= 2.5.0 | +| [kubernetes](#provider\_kubernetes) | >= 2.10.0 | + +## Modules + +No modules. + +## Resources + +| Name | Type | +|------|------| +| [helm_release.cert_manager](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource | +| [kubernetes_manifest.root_ca_cluster_issuer](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/manifest) | resource | +| [kubernetes_manifest.self_signed_cluster_issuer](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/manifest) | resource | +| [kubernetes_manifest.self_signed_root_ca_certificate](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/manifest) | resource | +| [kubernetes_namespace.cert_manager](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/namespace) | resource | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [cert\_manager\_chart\_version](#input\_cert\_manager\_chart\_version) | Version of the cert-manager helm chart to install. | `string` | n/a | yes | +| [cert\_manager\_install\_timeout](#input\_cert\_manager\_install\_timeout) | Timeout for installing the cert-manager helm chart, in seconds. | `number` | n/a | yes | +| [cert\_manager\_namespace](#input\_cert\_manager\_namespace) | The name of the namespace in which cert-manager is or will be installed. | `string` | n/a | yes | +| [install\_cert\_manager](#input\_install\_cert\_manager) | Whether to install cert-manager. | `bool` | n/a | yes | +| [name\_prefix](#input\_name\_prefix) | The name prefix to use for Kubernetes resources. Does not apply to cert-manager itself, as that is a singleton per cluster. | `string` | n/a | yes | +| [use\_self\_signed\_cluster\_issuer](#input\_use\_self\_signed\_cluster\_issuer) | Whether to install and use a self-signed ClusterIssuer for TLS. Due to limitations in Terraform, this may not be enabled before the cert-manager CRDs are installed. | `bool` | `false` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [cluster\_issuer\_name](#output\_cluster\_issuer\_name) | Name of the ClusterIssuer | diff --git a/modules/certificates/main.tf b/modules/certificates/main.tf index 62af8e7..f5c679a 100644 --- a/modules/certificates/main.tf +++ b/modules/certificates/main.tf @@ -23,6 +23,11 @@ resource "helm_release" "cert_manager" { value = "true" } + set { + name = "installCRDs" + value = "true" + } + depends_on = [ kubernetes_namespace.cert_manager, ] diff --git a/modules/certificates/variables.tf b/modules/certificates/variables.tf index 2de3f2f..914a8d1 100644 --- a/modules/certificates/variables.tf +++ b/modules/certificates/variables.tf @@ -6,6 +6,7 @@ variable "install_cert_manager" { variable "use_self_signed_cluster_issuer" { description = "Whether to install and use a self-signed ClusterIssuer for TLS. Due to limitations in Terraform, this may not be enabled before the cert-manager CRDs are installed." type = bool + default = false } variable "cert_manager_namespace" { diff --git a/modules/certificates/versions.tf b/modules/certificates/versions.tf index 54bd60e..d668da7 100644 --- a/modules/certificates/versions.tf +++ b/modules/certificates/versions.tf @@ -1,5 +1,5 @@ terraform { - required_version = ">= 1.0" + required_version = ">= 1.0.0" required_providers { aws = { @@ -8,11 +8,11 @@ terraform { } kubernetes = { source = "hashicorp/kubernetes" - version = "~> 2.0" + version = ">= 2.10.0" } helm = { source = "hashicorp/helm" - version = "~> 2.0" + version = ">= 2.5.0" } random = { source = "hashicorp/random" diff --git a/modules/database/README.md b/modules/database/README.md new file mode 100644 index 0000000..b96e2e6 --- /dev/null +++ b/modules/database/README.md @@ -0,0 +1,57 @@ +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0 | +| [aws](#requirement\_aws) | ~> 5.0 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | ~> 5.0 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [db](#module\_db) | terraform-aws-modules/rds/aws | ~> 6.0 | + +## Resources + +| Name | Type | +|------|------| +| [aws_security_group.database](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [allocated\_storage](#input\_allocated\_storage) | Allocated storage for the RDS instance (in GB) | `number` | n/a | yes | +| [backup\_retention\_period](#input\_backup\_retention\_period) | Number of days to retain backups | `number` | `7` | no | +| [backup\_window](#input\_backup\_window) | Preferred backup window | `string` | `"03:00-06:00"` | no | +| [database\_name](#input\_database\_name) | Name of the database to create | `string` | n/a | yes | +| [database\_password](#input\_database\_password) | Password for the database | `string` | n/a | yes | +| [database\_subnet\_ids](#input\_database\_subnet\_ids) | List of subnet IDs for the database | `list(string)` | n/a | yes | +| [database\_username](#input\_database\_username) | Username for the database | `string` | n/a | yes | +| [eks\_node\_security\_group\_id](#input\_eks\_node\_security\_group\_id) | Security group ID of the EKS nodes | `string` | n/a | yes | +| [eks\_security\_group\_id](#input\_eks\_security\_group\_id) | Security group ID of the EKS cluster | `string` | n/a | yes | +| [instance\_class](#input\_instance\_class) | Instance class for the RDS instance | `string` | n/a | yes | +| [maintenance\_window](#input\_maintenance\_window) | Preferred maintenance window | `string` | `"Mon:00:00-Mon:03:00"` | no | +| [max\_allocated\_storage](#input\_max\_allocated\_storage) | Maximum storage for autoscaling (in GB) | `number` | n/a | yes | +| [multi\_az](#input\_multi\_az) | Enable multi-AZ deployment | `bool` | `false` | no | +| [name\_prefix](#input\_name\_prefix) | Prefix for all resource names (replaces separate namespace and environment variables) | `string` | n/a | yes | +| [postgres\_version](#input\_postgres\_version) | Version of PostgreSQL to use | `string` | n/a | yes | +| [tags](#input\_tags) | Tags to apply to all resources | `map(string)` | `{}` | no | +| [vpc\_id](#input\_vpc\_id) | ID of the VPC | `string` | n/a | yes | + +## Outputs + +| Name | Description | +|------|-------------| +| [db\_instance\_endpoint](#output\_db\_instance\_endpoint) | The connection endpoint for the RDS instance | +| [db\_instance\_id](#output\_db\_instance\_id) | The RDS instance ID | +| [db\_instance\_name](#output\_db\_instance\_name) | The database name | +| [db\_instance\_port](#output\_db\_instance\_port) | The database port | +| [db\_instance\_username](#output\_db\_instance\_username) | The master username for the database | +| [db\_security\_group\_id](#output\_db\_security\_group\_id) | The security group ID of the database | diff --git a/modules/database/main.tf b/modules/database/main.tf index e03acb9..a04bcbd 100644 --- a/modules/database/main.tf +++ b/modules/database/main.tf @@ -1,12 +1,8 @@ -locals { - name_prefix = "${var.namespace}-${var.environment}" -} - module "db" { source = "terraform-aws-modules/rds/aws" version = "~> 6.0" - identifier = "${local.name_prefix}-db" + identifier = "${var.name_prefix}-db" engine = "postgres" engine_version = var.postgres_version @@ -29,7 +25,7 @@ module "db" { subnet_ids = var.database_subnet_ids vpc_security_group_ids = [aws_security_group.database.id] create_db_subnet_group = true - db_subnet_group_name = "${local.name_prefix}-db-subnet" + db_subnet_group_name = "${var.name_prefix}-db-subnet" maintenance_window = var.maintenance_window backup_window = var.backup_window @@ -42,7 +38,7 @@ module "db" { } resource "aws_security_group" "database" { - name_prefix = "${local.name_prefix}-sg-" + name_prefix = "${var.name_prefix}-sg-" vpc_id = var.vpc_id ingress { @@ -69,7 +65,7 @@ resource "aws_security_group" "database" { } tags = merge(var.tags, { - Name = "${local.name_prefix}-sg" + Name = "${var.name_prefix}-sg" }) lifecycle { diff --git a/modules/database/variables.tf b/modules/database/variables.tf index 518f78d..aa216e6 100644 --- a/modules/database/variables.tf +++ b/modules/database/variables.tf @@ -1,10 +1,5 @@ -variable "namespace" { - description = "Namespace prefix for all resources" - type = string -} - -variable "environment" { - description = "Environment name" +variable "name_prefix" { + description = "Prefix for all resource names (replaces separate namespace and environment variables)" type = string } diff --git a/modules/eks-node-group/README.md b/modules/eks-node-group/README.md new file mode 100644 index 0000000..029bb1c --- /dev/null +++ b/modules/eks-node-group/README.md @@ -0,0 +1,46 @@ +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0 | +| [aws](#requirement\_aws) | ~> 5.0 | +| [kubernetes](#requirement\_kubernetes) | ~> 2.0 | + +## Providers + +No providers. + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [node\_group](#module\_node\_group) | terraform-aws-modules/eks/aws//modules/eks-managed-node-group | ~> 20.0 | + +## Resources + +No resources. + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [ami\_type](#input\_ami\_type) | AMI type for the node group. | `string` | `"AL2023_ARM_64_STANDARD"` | no | +| [capacity\_type](#input\_capacity\_type) | Capacity type for worker nodes (ON\_DEMAND or SPOT). | `string` | `"ON_DEMAND"` | no | +| [cluster\_name](#input\_cluster\_name) | Name of the EKS cluster to attach the node group to. | `string` | n/a | yes | +| [cluster\_primary\_security\_group\_id](#input\_cluster\_primary\_security\_group\_id) | The ID of the primary security group for the cluster | `string` | n/a | yes | +| [cluster\_service\_cidr](#input\_cluster\_service\_cidr) | The CIDR block for the cluster service | `string` | n/a | yes | +| [desired\_size](#input\_desired\_size) | Desired number of worker nodes. | `number` | `1` | no | +| [enable\_disk\_setup](#input\_enable\_disk\_setup) | Whether to enable disk setup using the bootstrap script | `bool` | `true` | no | +| [instance\_types](#input\_instance\_types) | Instance types for worker nodes.

Recommended Configuration for Running Materialize with disk:
- Tested instance types: `r6gd`, `r7gd` families (ARM-based Graviton instances)
- Enable disk setup when using `r7gd`
- Note: Ensure instance store volumes are available and attached to the nodes for optimal performance with disk-based workloads. | `list(string)` |
[
"r7gd.2xlarge"
]
| no | +| [labels](#input\_labels) | Labels to apply to the node group. | `map(string)` | `{}` | no | +| [max\_size](#input\_max\_size) | Maximum number of worker nodes. | `number` | `4` | no | +| [min\_size](#input\_min\_size) | Minimum number of worker nodes. | `number` | `1` | no | +| [node\_group\_name](#input\_node\_group\_name) | Name of the node group. | `string` | n/a | yes | +| [subnet\_ids](#input\_subnet\_ids) | List of subnet IDs for the node group. | `list(string)` | n/a | yes | + +## Outputs + +| Name | Description | +|------|-------------| +| [node\_group\_arn](#output\_node\_group\_arn) | ARN of the EKS managed node group. | +| [node\_group\_id](#output\_node\_group\_id) | ID of the EKS managed node group. | diff --git a/modules/eks-node-group/bootstrap.sh b/modules/eks-node-group/bootstrap.sh new file mode 100644 index 0000000..a1815d0 --- /dev/null +++ b/modules/eks-node-group/bootstrap.sh @@ -0,0 +1,37 @@ +#!/bin/bash +set -xeuo pipefail + +echo "Starting NVMe disk setup" + +# Install required tools +yum install -y nvme-cli lvm2 + +# Check if NVMe instance storage is available +if ! nvme list | grep -q "Amazon EC2 NVMe Instance Storage"; then + echo "No NVMe instance storage devices found" + exit 0 +fi + +# Collect NVMe instance storage devices +mapfile -t SSD_NVME_DEVICE_LIST < <(nvme list | grep "Amazon EC2 NVMe Instance Storage" | awk '{print $1}' || true) + +echo "Found NVMe devices: ${SSD_NVME_DEVICE_LIST[*]:-none}" + +if [ ${#SSD_NVME_DEVICE_LIST[@]} -eq 0 ]; then + echo "No usable NVMe instance storage devices found" + exit 0 +fi + +# Create physical volumes +for device in "${SSD_NVME_DEVICE_LIST[@]}"; do + pvcreate -f "$device" +done + +# Create volume group +vgcreate instance-store-vg "${SSD_NVME_DEVICE_LIST[@]}" + +# Display results +pvs +vgs + +echo "Disk setup completed" diff --git a/modules/eks-node-group/main.tf b/modules/eks-node-group/main.tf new file mode 100644 index 0000000..7f51334 --- /dev/null +++ b/modules/eks-node-group/main.tf @@ -0,0 +1,29 @@ +locals { + disk_setup_script = file("${path.module}/bootstrap.sh") +} + +module "node_group" { + source = "terraform-aws-modules/eks/aws//modules/eks-managed-node-group" + version = "~> 20.0" + + cluster_name = var.cluster_name + subnet_ids = var.subnet_ids + name = var.node_group_name + desired_size = var.desired_size + min_size = var.min_size + max_size = var.max_size + instance_types = var.instance_types + capacity_type = var.capacity_type + ami_type = var.ami_type + labels = var.labels + + cloudinit_pre_nodeadm = var.enable_disk_setup ? [ + { + content_type = "text/x-shellscript" + content = local.disk_setup_script + } + ] : [] + + cluster_service_cidr = var.cluster_service_cidr + cluster_primary_security_group_id = var.cluster_primary_security_group_id +} diff --git a/modules/eks-node-group/outputs.tf b/modules/eks-node-group/outputs.tf new file mode 100644 index 0000000..df0428a --- /dev/null +++ b/modules/eks-node-group/outputs.tf @@ -0,0 +1,9 @@ +output "node_group_arn" { + description = "ARN of the EKS managed node group." + value = module.node_group.node_group_arn +} + +output "node_group_id" { + description = "ID of the EKS managed node group." + value = module.node_group.node_group_id +} diff --git a/modules/eks-node-group/variables.tf b/modules/eks-node-group/variables.tf new file mode 100644 index 0000000..462c5f8 --- /dev/null +++ b/modules/eks-node-group/variables.tf @@ -0,0 +1,79 @@ +variable "cluster_name" { + description = "Name of the EKS cluster to attach the node group to." + type = string +} + +variable "subnet_ids" { + description = "List of subnet IDs for the node group." + type = list(string) +} + +variable "node_group_name" { + description = "Name of the node group." + type = string +} + +variable "desired_size" { + description = "Desired number of worker nodes." + type = number + default = 1 +} + +variable "min_size" { + description = "Minimum number of worker nodes." + type = number + default = 1 +} + +variable "max_size" { + description = "Maximum number of worker nodes." + type = number + default = 4 +} + +variable "instance_types" { + description = < [terraform](#requirement\_terraform) | >= 1.0 | +| [aws](#requirement\_aws) | ~> 5.0 | +| [helm](#requirement\_helm) | ~> 2.0 | +| [kubernetes](#requirement\_kubernetes) | ~> 2.0 | + +## Providers + +No providers. + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [eks](#module\_eks) | terraform-aws-modules/eks/aws | ~> 20.0 | + +## Resources + +No resources. + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [cluster\_enabled\_log\_types](#input\_cluster\_enabled\_log\_types) | List of desired control plane logging to enable | `list(string)` |
[
"api",
"audit",
"authenticator",
"controllerManager",
"scheduler"
]
| no | +| [cluster\_version](#input\_cluster\_version) | Kubernetes version for the EKS cluster | `string` | n/a | yes | +| [enable\_cluster\_creator\_admin\_permissions](#input\_enable\_cluster\_creator\_admin\_permissions) | To add the current caller identity as an administrator | `bool` | `true` | no | +| [name\_prefix](#input\_name\_prefix) | Prefix for all resource names (replaces separate namespace and environment variables) | `string` | n/a | yes | +| [private\_subnet\_ids](#input\_private\_subnet\_ids) | List of private subnet IDs for EKS | `list(string)` | n/a | yes | +| [tags](#input\_tags) | Tags to apply to all resources | `map(string)` | `{}` | no | +| [vpc\_id](#input\_vpc\_id) | ID of the VPC where EKS will be created | `string` | n/a | yes | + +## Outputs + +| Name | Description | +|------|-------------| +| [cluster\_certificate\_authority\_data](#output\_cluster\_certificate\_authority\_data) | Base64 encoded certificate data required to communicate with the cluster | +| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for EKS control plane | +| [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name for the cluster | +| [cluster\_id](#output\_cluster\_id) | The name/id of the EKS cluster | +| [cluster\_name](#output\_cluster\_name) | Name of the EKS cluster | +| [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider | +| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Security group ID attached to the EKS cluster | +| [cluster\_service\_cidr](#output\_cluster\_service\_cidr) | The CIDR block for the cluster service | +| [node\_security\_group\_id](#output\_node\_security\_group\_id) | Security group ID attached to the EKS nodes | +| [oidc\_provider\_arn](#output\_oidc\_provider\_arn) | The ARN of the OIDC Provider | diff --git a/modules/eks/main.tf b/modules/eks/main.tf index 2020095..11d6ba4 100644 --- a/modules/eks/main.tf +++ b/modules/eks/main.tf @@ -1,14 +1,8 @@ -locals { - name_prefix = "${var.namespace}-${var.environment}" - - disk_setup_script = file("${path.module}/bootstrap.sh") -} - module "eks" { source = "terraform-aws-modules/eks/aws" version = "~> 20.0" - cluster_name = "${local.name_prefix}-eks" + cluster_name = "${var.name_prefix}-eks" cluster_version = var.cluster_version @@ -19,35 +13,6 @@ module "eks" { cluster_enabled_log_types = var.cluster_enabled_log_types - eks_managed_node_groups = { - "${local.name_prefix}-mz" = { - desired_size = var.node_group_desired_size - min_size = var.node_group_min_size - max_size = var.node_group_max_size - - instance_types = var.node_group_instance_types - capacity_type = var.node_group_capacity_type - ami_type = var.node_group_ami_type - - name = local.name_prefix - - labels = { - Environment = var.environment - GithubRepo = "materialize" - "materialize.cloud/disk" = var.enable_disk_setup ? "true" : "false" - "workload" = "materialize-instance" - } - - cloudinit_pre_nodeadm = var.enable_disk_setup ? [ - { - content_type = "text/x-shellscript" - content = local.disk_setup_script - } - ] : [] - - } - } - node_security_group_additional_rules = { mz_ingress_http = { description = "Ingress to materialize balancers HTTP" @@ -84,29 +49,3 @@ module "eks" { tags = var.tags } - -# Install OpenEBS for lgalloc support -resource "kubernetes_namespace" "openebs" { - count = var.install_openebs ? 1 : 0 - - metadata { - name = var.openebs_namespace - } -} - -resource "helm_release" "openebs" { - count = var.install_openebs ? 1 : 0 - - name = "openebs" - namespace = kubernetes_namespace.openebs[0].metadata[0].name - repository = "https://openebs.github.io/openebs" - chart = "openebs" - version = var.openebs_version - - set { - name = "engines.replicated.mayastor.enabled" - value = "false" - } - - depends_on = [kubernetes_namespace.openebs] -} diff --git a/modules/eks/outputs.tf b/modules/eks/outputs.tf index 1ba5e27..fc8bb47 100644 --- a/modules/eks/outputs.tf +++ b/modules/eks/outputs.tf @@ -42,3 +42,8 @@ output "cluster_oidc_issuer_url" { description = "The URL on the EKS cluster for the OpenID Connect identity provider" value = module.eks.cluster_oidc_issuer_url } + +output "cluster_service_cidr" { + description = "The CIDR block for the cluster service" + value = module.eks.cluster_service_cidr +} diff --git a/modules/eks/variables.tf b/modules/eks/variables.tf index fd468b1..c9efa8f 100644 --- a/modules/eks/variables.tf +++ b/modules/eks/variables.tf @@ -1,10 +1,5 @@ -variable "namespace" { - description = "Namespace prefix for all resources" - type = string -} - -variable "environment" { - description = "Environment name" +variable "name_prefix" { + description = "Prefix for all resource names (replaces separate namespace and environment variables)" type = string } @@ -23,32 +18,6 @@ variable "private_subnet_ids" { type = list(string) } -variable "node_group_desired_size" { - description = "Desired number of worker nodes" - type = number -} - -variable "node_group_min_size" { - description = "Minimum number of worker nodes" - type = number -} - -variable "node_group_max_size" { - description = "Maximum number of worker nodes" - type = number -} - -variable "node_group_instance_types" { - description = "List of instance types for the node group" - type = list(string) -} - -variable "node_group_ami_type" { - description = "AMI type for the node group" - type = string - default = "AL2023_x86_64_STANDARD" -} - variable "cluster_enabled_log_types" { description = "List of desired control plane logging to enable" type = list(string) @@ -61,39 +30,8 @@ variable "tags" { default = {} } -variable "node_group_capacity_type" { - description = "Capacity type for worker nodes (ON_DEMAND or SPOT)" - type = string - default = "ON_DEMAND" -} - variable "enable_cluster_creator_admin_permissions" { description = "To add the current caller identity as an administrator" type = bool default = true } - -# OpenEBS configuration -variable "install_openebs" { - description = "Whether to install OpenEBS for NVMe storage" - type = bool - default = true -} - -variable "openebs_namespace" { - description = "Namespace for OpenEBS components" - type = string - default = "openebs" -} - -variable "openebs_version" { - description = "Version of OpenEBS Helm chart to install" - type = string - default = "4.2.0" -} - -variable "enable_disk_setup" { - description = "Whether to enable disk setup using the bootstrap script" - type = bool - default = true -} diff --git a/modules/materialize-instance/README.md b/modules/materialize-instance/README.md new file mode 100644 index 0000000..c01559b --- /dev/null +++ b/modules/materialize-instance/README.md @@ -0,0 +1,59 @@ +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0 | +| [aws](#requirement\_aws) | ~> 5.0 | +| [kubernetes](#requirement\_kubernetes) | ~> 2.0 | + +## Providers + +| Name | Version | +|------|---------| +| [kubernetes](#provider\_kubernetes) | ~> 2.0 | + +## Modules + +No modules. + +## Resources + +| Name | Type | +|------|------| +| [kubernetes_manifest.materialize_instance](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/manifest) | resource | +| [kubernetes_namespace.instance](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/namespace) | resource | +| [kubernetes_secret.materialize_backend](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/secret) | resource | +| [kubernetes_resource.materialize_instance](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/data-sources/resource) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [balancer\_cpu\_request](#input\_balancer\_cpu\_request) | CPU request for balancer | `string` | `"100m"` | no | +| [balancer\_memory\_limit](#input\_balancer\_memory\_limit) | Memory limit for balancer | `string` | `"256Mi"` | no | +| [balancer\_memory\_request](#input\_balancer\_memory\_request) | Memory request for balancer | `string` | `"256Mi"` | no | +| [cpu\_request](#input\_cpu\_request) | CPU request for environmentd | `string` | `"1"` | no | +| [create\_namespace](#input\_create\_namespace) | Whether to create the Kubernetes namespace. Set to false if the namespace already exists. | `bool` | `true` | no | +| [environmentd\_extra\_args](#input\_environmentd\_extra\_args) | Extra command line arguments for environmentd | `list(string)` | `[]` | no | +| [environmentd\_extra\_env](#input\_environmentd\_extra\_env) | Extra environment variables for environmentd |
list(object({
name = string
value = string
}))
| `[]` | no | +| [environmentd\_version](#input\_environmentd\_version) | Version of environmentd to use | `string` | `"v0.130.13"` | no | +| [force\_rollout](#input\_force\_rollout) | UUID to force a rollout | `string` | `"00000000-0000-0000-0000-000000000001"` | no | +| [in\_place\_rollout](#input\_in\_place\_rollout) | Whether to perform in-place rollouts | `bool` | `true` | no | +| [instance\_name](#input\_instance\_name) | Name of the Materialize instance | `string` | n/a | yes | +| [instance\_namespace](#input\_instance\_namespace) | Kubernetes namespace for the instance. | `string` | n/a | yes | +| [license\_key](#input\_license\_key) | Materialize license key | `string` | `null` | no | +| [memory\_limit](#input\_memory\_limit) | Memory limit for environmentd | `string` | `"1Gi"` | no | +| [memory\_request](#input\_memory\_request) | Memory request for environmentd | `string` | `"1Gi"` | no | +| [metadata\_backend\_url](#input\_metadata\_backend\_url) | PostgreSQL connection URL for metadata backend | `string` | n/a | yes | +| [persist\_backend\_url](#input\_persist\_backend\_url) | S3 connection URL for persist backend | `string` | n/a | yes | +| [request\_rollout](#input\_request\_rollout) | UUID to request a rollout | `string` | `"00000000-0000-0000-0000-000000000001"` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [instance\_name](#output\_instance\_name) | Name of the Materialize instance | +| [instance\_namespace](#output\_instance\_namespace) | Namespace of the Materialize instance | +| [instance\_resource\_id](#output\_instance\_resource\_id) | Resource ID of the Materialize instance | +| [metadata\_backend\_url](#output\_metadata\_backend\_url) | Metadata backend URL used by the Materialize instance | +| [persist\_backend\_url](#output\_persist\_backend\_url) | Persist backend URL used by the Materialize instance | diff --git a/modules/materialize-instance/main.tf b/modules/materialize-instance/main.tf new file mode 100644 index 0000000..adfad29 --- /dev/null +++ b/modules/materialize-instance/main.tf @@ -0,0 +1,99 @@ +# Create a namespace for this Materialize instance +resource "kubernetes_namespace" "instance" { + count = var.create_namespace ? 1 : 0 + + metadata { + name = var.instance_namespace + } +} + +# Create the Materialize instance using the kubernetes_manifest resource +resource "kubernetes_manifest" "materialize_instance" { + field_manager { + # force field manager conflicts to be overridden + name = "terraform" + force_conflicts = true + } + + manifest = { + apiVersion = "materialize.cloud/v1alpha1" + kind = "Materialize" + metadata = { + name = var.instance_name + namespace = var.instance_namespace + } + spec = { + environmentdImageRef = "materialize/environmentd:${var.environmentd_version}" + backendSecretName = "${var.instance_name}-materialize-backend" + inPlaceRollout = var.in_place_rollout + requestRollout = var.request_rollout + forceRollout = var.force_rollout + + environmentdExtraEnv = length(var.environmentd_extra_env) > 0 ? [{ + name = "MZ_SYSTEM_PARAMETER_DEFAULT" + value = join(";", [ + for item in var.environmentd_extra_env : + "${item.name}=${item.value}" + ]) + }] : null + + environmentdExtraArgs = length(var.environmentd_extra_args) > 0 ? var.environmentd_extra_args : null + + environmentdResourceRequirements = { + limits = { + memory = var.memory_limit + } + requests = { + cpu = var.cpu_request + memory = var.memory_request + } + } + balancerdResourceRequirements = { + limits = { + memory = var.balancer_memory_limit + } + requests = { + cpu = var.balancer_cpu_request + memory = var.balancer_memory_request + } + } + } + } + + depends_on = [ + kubernetes_secret.materialize_backend, + kubernetes_namespace.instance, + ] +} + +# Create a secret with connection information for the Materialize instance +resource "kubernetes_secret" "materialize_backend" { + metadata { + name = "${var.instance_name}-materialize-backend" + namespace = var.instance_namespace + } + + data = { + metadata_backend_url = var.metadata_backend_url + persist_backend_url = var.persist_backend_url + license_key = var.license_key == null ? "" : var.license_key + } + + depends_on = [ + kubernetes_namespace.instance + ] +} + +# Retrieve the resource ID of the Materialize instance +data "kubernetes_resource" "materialize_instance" { + api_version = "materialize.cloud/v1alpha1" + kind = "Materialize" + metadata { + name = var.instance_name + namespace = var.instance_namespace + } + + depends_on = [ + kubernetes_manifest.materialize_instance + ] +} diff --git a/modules/materialize-instance/outputs.tf b/modules/materialize-instance/outputs.tf new file mode 100644 index 0000000..b94360b --- /dev/null +++ b/modules/materialize-instance/outputs.tf @@ -0,0 +1,24 @@ +output "instance_name" { + description = "Name of the Materialize instance" + value = var.instance_name +} + +output "instance_namespace" { + description = "Namespace of the Materialize instance" + value = var.instance_namespace +} + +output "instance_resource_id" { + description = "Resource ID of the Materialize instance" + value = data.kubernetes_resource.materialize_instance.object.status.resourceId +} + +output "metadata_backend_url" { + description = "Metadata backend URL used by the Materialize instance" + value = var.metadata_backend_url +} + +output "persist_backend_url" { + description = "Persist backend URL used by the Materialize instance" + value = var.persist_backend_url +} diff --git a/modules/materialize-instance/variables.tf b/modules/materialize-instance/variables.tf new file mode 100644 index 0000000..9fa1372 --- /dev/null +++ b/modules/materialize-instance/variables.tf @@ -0,0 +1,122 @@ +variable "instance_name" { + description = "Name of the Materialize instance" + type = string +} + +variable "create_namespace" { + description = "Whether to create the Kubernetes namespace. Set to false if the namespace already exists." + type = bool + default = true +} + +variable "instance_namespace" { + description = "Kubernetes namespace for the instance." + type = string +} + +variable "metadata_backend_url" { + description = "PostgreSQL connection URL for metadata backend" + type = string + sensitive = true +} + +variable "persist_backend_url" { + description = "S3 connection URL for persist backend" + type = string +} + +variable "license_key" { + description = "Materialize license key" + type = string + default = null + sensitive = true +} + +# Environmentd Configuration +variable "environmentd_version" { + description = "Version of environmentd to use" + type = string + default = "v0.130.13" # META: mz version +} + +variable "environmentd_extra_env" { + description = "Extra environment variables for environmentd" + type = list(object({ + name = string + value = string + })) + default = [] +} + +variable "environmentd_extra_args" { + description = "Extra command line arguments for environmentd" + type = list(string) + default = [] +} + +# Resource Requirements +variable "cpu_request" { + description = "CPU request for environmentd" + type = string + default = "1" +} + +variable "memory_request" { + description = "Memory request for environmentd" + type = string + default = "1Gi" +} + +variable "memory_limit" { + description = "Memory limit for environmentd" + type = string + default = "1Gi" +} + +# Rollout Configuration +variable "in_place_rollout" { + description = "Whether to perform in-place rollouts" + type = bool + default = true +} + +variable "request_rollout" { + description = "UUID to request a rollout" + type = string + default = "00000000-0000-0000-0000-000000000001" + + validation { + condition = can(regex("^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$", var.request_rollout)) + error_message = "Request rollout must be a valid UUID in the format xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + } +} + +variable "force_rollout" { + description = "UUID to force a rollout" + type = string + default = "00000000-0000-0000-0000-000000000001" + + validation { + condition = can(regex("^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$", var.force_rollout)) + error_message = "Force rollout must be a valid UUID in the format xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + } +} + +# Balancer Resource Requirements +variable "balancer_memory_request" { + description = "Memory request for balancer" + type = string + default = "256Mi" +} + +variable "balancer_memory_limit" { + description = "Memory limit for balancer" + type = string + default = "256Mi" +} + +variable "balancer_cpu_request" { + description = "CPU request for balancer" + type = string + default = "100m" +} diff --git a/modules/materialize-instance/versions.tf b/modules/materialize-instance/versions.tf new file mode 100644 index 0000000..ba8641a --- /dev/null +++ b/modules/materialize-instance/versions.tf @@ -0,0 +1,14 @@ +terraform { + required_version = ">= 1.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = "~> 5.0" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = "~> 2.0" + } + } +} diff --git a/modules/networking/README.md b/modules/networking/README.md new file mode 100644 index 0000000..0a396b5 --- /dev/null +++ b/modules/networking/README.md @@ -0,0 +1,43 @@ +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0 | +| [aws](#requirement\_aws) | ~> 5.0 | + +## Providers + +No providers. + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 5.0 | + +## Resources + +No resources. + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [availability\_zones](#input\_availability\_zones) | List of availability zones | `list(string)` | n/a | yes | +| [create\_vpc](#input\_create\_vpc) | Controls if VPC should be created (it affects almost all resources) | `bool` | `true` | no | +| [name\_prefix](#input\_name\_prefix) | Prefix for all resource names (replaces separate namespace and environment variables) | `string` | n/a | yes | +| [private\_subnet\_cidrs](#input\_private\_subnet\_cidrs) | CIDR blocks for private subnets | `list(string)` | n/a | yes | +| [public\_subnet\_cidrs](#input\_public\_subnet\_cidrs) | CIDR blocks for public subnets | `list(string)` | n/a | yes | +| [single\_nat\_gateway](#input\_single\_nat\_gateway) | Use a single NAT Gateway for all private subnets | `bool` | `false` | no | +| [tags](#input\_tags) | Tags to apply to all resources | `map(string)` | `{}` | no | +| [vpc\_cidr](#input\_vpc\_cidr) | CIDR block for VPC | `string` | n/a | yes | + +## Outputs + +| Name | Description | +|------|-------------| +| [nat\_public\_ips](#output\_nat\_public\_ips) | List of public Elastic IPs created for AWS NAT Gateway | +| [private\_subnet\_ids](#output\_private\_subnet\_ids) | List of private subnet IDs | +| [public\_subnet\_ids](#output\_public\_subnet\_ids) | List of public subnet IDs | +| [vpc\_cidr\_block](#output\_vpc\_cidr\_block) | The CIDR block of the VPC | +| [vpc\_id](#output\_vpc\_id) | The ID of the VPC | diff --git a/modules/networking/main.tf b/modules/networking/main.tf index 12bb2f7..3ca109b 100644 --- a/modules/networking/main.tf +++ b/modules/networking/main.tf @@ -1,14 +1,10 @@ -locals { - name_prefix = "${var.namespace}-${var.environment}" -} - module "vpc" { source = "terraform-aws-modules/vpc/aws" version = "~> 5.0" create_vpc = var.create_vpc - name = "${local.name_prefix}-vpc" + name = "${var.name_prefix}-vpc" cidr = var.vpc_cidr azs = var.availability_zones @@ -22,13 +18,13 @@ module "vpc" { # Tags required for EKS private_subnet_tags = { - "kubernetes.io/role/internal-elb" = "1" - "kubernetes.io/cluster/${local.name_prefix}-eks" = "shared" + "kubernetes.io/role/internal-elb" = "1" + "kubernetes.io/cluster/${var.name_prefix}-eks" = "shared" } public_subnet_tags = { - "kubernetes.io/role/elb" = "1" - "kubernetes.io/cluster/${local.name_prefix}-eks" = "shared" + "kubernetes.io/role/elb" = "1" + "kubernetes.io/cluster/${var.name_prefix}-eks" = "shared" } tags = var.tags diff --git a/modules/networking/variables.tf b/modules/networking/variables.tf index 4093125..62d9061 100644 --- a/modules/networking/variables.tf +++ b/modules/networking/variables.tf @@ -1,10 +1,5 @@ -variable "namespace" { - description = "Namespace prefix for all resources" - type = string -} - -variable "environment" { - description = "Environment name" +variable "name_prefix" { + description = "Prefix for all resource names (replaces separate namespace and environment variables)" type = string } diff --git a/modules/nlb/README.md b/modules/nlb/README.md new file mode 100644 index 0000000..ed6ffaf --- /dev/null +++ b/modules/nlb/README.md @@ -0,0 +1,50 @@ +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0 | +| [aws](#requirement\_aws) | ~> 5.0 | +| [helm](#requirement\_helm) | ~> 2.0 | +| [kubernetes](#requirement\_kubernetes) | ~> 2.0 | +| [random](#requirement\_random) | ~> 3.0 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | ~> 5.0 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [target\_console](#module\_target\_console) | ./target | n/a | +| [target\_http](#module\_target\_http) | ./target | n/a | +| [target\_pgwire](#module\_target\_pgwire) | ./target | n/a | + +## Resources + +| Name | Type | +|------|------| +| [aws_lb.nlb](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lb) | resource | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [enable\_cross\_zone\_load\_balancing](#input\_enable\_cross\_zone\_load\_balancing) | Whether to enable cross zone load balancing on the NLB. | `bool` | `true` | no | +| [instance\_name](#input\_instance\_name) | The name of the Materialize instance. | `string` | n/a | yes | +| [internal](#input\_internal) | Whether the NLB is internal only. Defaults to true to avoid exposing Materialize to the internet. | `bool` | `true` | no | +| [mz\_resource\_id](#input\_mz\_resource\_id) | The resourceId from the Materialize CR | `string` | n/a | yes | +| [name\_prefix](#input\_name\_prefix) | Prefix to use for NLB, Target Groups, Listeners, and TargetGroupBindings | `string` | n/a | yes | +| [namespace](#input\_namespace) | Kubernetes namespace in which to install TargetGroupBindings | `string` | n/a | yes | +| [subnet\_ids](#input\_subnet\_ids) | A list of subnet IDs in which to install the NLB. Must be in the VPC. | `list(string)` | n/a | yes | +| [vpc\_id](#input\_vpc\_id) | ID of the VPC | `string` | n/a | yes | + +## Outputs + +| Name | Description | +|------|-------------| +| [instance\_name](#output\_instance\_name) | The name of the Materialize instance. | +| [nlb\_arn](#output\_nlb\_arn) | ARN of the Network Load Balancer. | +| [nlb\_dns\_name](#output\_nlb\_dns\_name) | DNS name of the Network Load Balancer. | diff --git a/modules/nlb/target/README.md b/modules/nlb/target/README.md new file mode 100644 index 0000000..53fe405 --- /dev/null +++ b/modules/nlb/target/README.md @@ -0,0 +1,44 @@ +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0 | +| [aws](#requirement\_aws) | ~> 5.0 | +| [helm](#requirement\_helm) | ~> 2.0 | +| [kubernetes](#requirement\_kubernetes) | ~> 2.0 | +| [random](#requirement\_random) | ~> 3.0 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | ~> 5.0 | +| [kubernetes](#provider\_kubernetes) | ~> 2.0 | + +## Modules + +No modules. + +## Resources + +| Name | Type | +|------|------| +| [aws_lb_listener.listener](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lb_listener) | resource | +| [aws_lb_target_group.target_group](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lb_target_group) | resource | +| [kubernetes_manifest.target_group_binding](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/manifest) | resource | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [health\_check\_path](#input\_health\_check\_path) | The URL path for target group health checks | `string` | n/a | yes | +| [name](#input\_name) | Name for Target Groups and TargetGroupBindings | `string` | n/a | yes | +| [namespace](#input\_namespace) | Kubernetes namespace in which to install TargetGroupBindings | `string` | n/a | yes | +| [nlb\_arn](#input\_nlb\_arn) | ARN of the NLB | `string` | n/a | yes | +| [port](#input\_port) | Port for the NLB listener and Kubernetes service | `number` | n/a | yes | +| [service\_name](#input\_service\_name) | The name of the Kubernetes service to connect to | `string` | n/a | yes | +| [vpc\_id](#input\_vpc\_id) | ID of the VPC | `string` | n/a | yes | + +## Outputs + +No outputs. diff --git a/modules/nlb/variables.tf b/modules/nlb/variables.tf index 751b0d0..89d2503 100644 --- a/modules/nlb/variables.tf +++ b/modules/nlb/variables.tf @@ -9,7 +9,7 @@ variable "name_prefix" { } variable "internal" { - description = "Whether the NLB is an internal only NLB." + description = "Whether the NLB is internal only. Defaults to true to avoid exposing Materialize to the internet." type = bool default = true } diff --git a/modules/openebs/README.md b/modules/openebs/README.md new file mode 100644 index 0000000..172714a --- /dev/null +++ b/modules/openebs/README.md @@ -0,0 +1,39 @@ +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0.0 | +| [helm](#requirement\_helm) | >= 2.5.0 | +| [kubernetes](#requirement\_kubernetes) | >= 2.10.0 | + +## Providers + +| Name | Version | +|------|---------| +| [helm](#provider\_helm) | >= 2.5.0 | +| [kubernetes](#provider\_kubernetes) | >= 2.10.0 | + +## Modules + +No modules. + +## Resources + +| Name | Type | +|------|------| +| [helm_release.openebs](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource | +| [kubernetes_namespace.openebs](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/namespace) | resource | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [create\_openebs\_namespace](#input\_create\_openebs\_namespace) | Whether to create the OpenEBS namespace. Set to false if the namespace already exists. | `bool` | `true` | no | +| [openebs\_namespace](#input\_openebs\_namespace) | Namespace for OpenEBS components | `string` | `"openebs"` | no | +| [openebs\_version](#input\_openebs\_version) | Version of OpenEBS Helm chart to install | `string` | `"4.2.0"` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [openebs\_namespace](#output\_openebs\_namespace) | Namespace where OpenEBS is installed | diff --git a/modules/openebs/main.tf b/modules/openebs/main.tf new file mode 100644 index 0000000..465a63e --- /dev/null +++ b/modules/openebs/main.tf @@ -0,0 +1,22 @@ +resource "kubernetes_namespace" "openebs" { + count = var.create_openebs_namespace ? 1 : 0 + + metadata { + name = var.openebs_namespace + } +} + +resource "helm_release" "openebs" { + name = "openebs" + namespace = var.openebs_namespace + repository = "https://openebs.github.io/openebs" + chart = "openebs" + version = var.openebs_version + + set { + name = "engines.replicated.mayastor.enabled" + value = "false" + } + + depends_on = [kubernetes_namespace.openebs] +} diff --git a/modules/openebs/outputs.tf b/modules/openebs/outputs.tf new file mode 100644 index 0000000..5320f2e --- /dev/null +++ b/modules/openebs/outputs.tf @@ -0,0 +1,4 @@ +output "openebs_namespace" { + description = "Namespace where OpenEBS is installed" + value = var.openebs_namespace +} diff --git a/modules/openebs/variables.tf b/modules/openebs/variables.tf new file mode 100644 index 0000000..cffa948 --- /dev/null +++ b/modules/openebs/variables.tf @@ -0,0 +1,17 @@ +variable "openebs_namespace" { + description = "Namespace for OpenEBS components" + type = string + default = "openebs" +} + +variable "create_openebs_namespace" { + description = "Whether to create the OpenEBS namespace. Set to false if the namespace already exists." + type = bool + default = true +} + +variable "openebs_version" { + description = "Version of OpenEBS Helm chart to install" + type = string + default = "4.2.0" +} diff --git a/modules/openebs/versions.tf b/modules/openebs/versions.tf new file mode 100644 index 0000000..f243122 --- /dev/null +++ b/modules/openebs/versions.tf @@ -0,0 +1,14 @@ +terraform { + required_version = ">= 1.0.0" + + required_providers { + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.10.0" + } + helm = { + source = "hashicorp/helm" + version = ">= 2.5.0" + } + } +} diff --git a/modules/operator/README.md b/modules/operator/README.md new file mode 100644 index 0000000..2ab523a --- /dev/null +++ b/modules/operator/README.md @@ -0,0 +1,64 @@ +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0 | +| [aws](#requirement\_aws) | ~> 5.0 | +| [helm](#requirement\_helm) | ~> 2.0 | +| [kubernetes](#requirement\_kubernetes) | ~> 2.0 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | ~> 5.0 | +| [helm](#provider\_helm) | ~> 2.0 | +| [kubernetes](#provider\_kubernetes) | ~> 2.0 | + +## Modules + +No modules. + +## Resources + +| Name | Type | +|------|------| +| [aws_iam_role.materialize_s3](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | +| [aws_iam_role_policy.materialize_s3](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy) | resource | +| [helm_release.materialize_operator](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource | +| [helm_release.metrics_server](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource | +| [kubernetes_namespace.materialize](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/namespace) | resource | +| [kubernetes_namespace.monitoring](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/namespace) | resource | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [aws\_account\_id](#input\_aws\_account\_id) | AWS account ID for the operator Helm values. | `string` | n/a | yes | +| [aws\_region](#input\_aws\_region) | AWS region for the operator Helm values. | `string` | n/a | yes | +| [cluster\_oidc\_issuer\_url](#input\_cluster\_oidc\_issuer\_url) | OIDC issuer URL for the EKS cluster | `string` | n/a | yes | +| [disk\_support\_config](#input\_disk\_support\_config) | Advanced configuration for disk support (only used when enable\_disk\_support = true) |
object({
run_disk_setup_script = optional(bool, true)
create_storage_class = optional(bool, true)
openebs_version = optional(string, "4.2.0")
openebs_namespace = optional(string, "openebs")
storage_class_name = optional(string, "openebs-lvm-instance-store-ext4")
storage_class_provisioner = optional(string, "local.csi.openebs.io")
storage_class_parameters = optional(object({
storage = optional(string, "lvm")
fsType = optional(string, "ext4")
volgroup = optional(string, "instance-store-vg")
}), {})
})
| `{}` | no | +| [enable\_disk\_support](#input\_enable\_disk\_support) | Enable disk support for Materialize using OpenEBS and NVMe instance storage. When enabled, this configures OpenEBS, runs the disk setup script for NVMe devices, and creates appropriate storage classes. | `bool` | `true` | no | +| [helm\_chart](#input\_helm\_chart) | Chart name from repository or local path to chart. For local charts, set the path to the chart directory. | `string` | `"materialize-operator"` | no | +| [helm\_repository](#input\_helm\_repository) | Repository URL for the Materialize operator Helm chart. Leave empty if using local chart. | `string` | `"https://materializeinc.github.io/materialize/"` | no | +| [helm\_values](#input\_helm\_values) | Values to pass to the Helm chart | `any` | `{}` | no | +| [install\_metrics\_server](#input\_install\_metrics\_server) | Whether to install the metrics-server | `bool` | `true` | no | +| [metrics\_server\_version](#input\_metrics\_server\_version) | Version of metrics-server to install | `string` | `"3.12.2"` | no | +| [monitoring\_namespace](#input\_monitoring\_namespace) | Namespace for monitoring resources | `string` | `"monitoring"` | no | +| [name\_prefix](#input\_name\_prefix) | Prefix for all resource names (replaces separate namespace and environment variables) | `string` | n/a | yes | +| [oidc\_provider\_arn](#input\_oidc\_provider\_arn) | ARN of the OIDC provider for the EKS cluster | `string` | n/a | yes | +| [operator\_namespace](#input\_operator\_namespace) | Namespace for the Materialize operator | `string` | `"materialize"` | no | +| [operator\_version](#input\_operator\_version) | Version of the Materialize operator to install | `string` | `"v25.1.12"` | no | +| [orchestratord\_version](#input\_orchestratord\_version) | Version of the Materialize orchestrator to install | `string` | `null` | no | +| [s3\_bucket\_arn](#input\_s3\_bucket\_arn) | ARN of the S3 bucket to allow access to. If null, allows all buckets. | `string` | `null` | no | +| [use\_local\_chart](#input\_use\_local\_chart) | Whether to use a local chart instead of one from a repository | `bool` | `false` | no | +| [use\_self\_signed\_cluster\_issuer](#input\_use\_self\_signed\_cluster\_issuer) | Whether to use a self-signed cluster issuer for cert-manager. | `bool` | `false` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [materialize\_s3\_role\_arn](#output\_materialize\_s3\_role\_arn) | ARN of the IAM role for Materialize S3 access. | +| [operator\_namespace](#output\_operator\_namespace) | Namespace where the operator is installed | +| [operator\_release\_name](#output\_operator\_release\_name) | Helm release name of the operator | +| [operator\_release\_status](#output\_operator\_release\_status) | Status of the helm release | diff --git a/modules/operator/main.tf b/modules/operator/main.tf new file mode 100644 index 0000000..892a2ee --- /dev/null +++ b/modules/operator/main.tf @@ -0,0 +1,187 @@ +resource "kubernetes_namespace" "materialize" { + metadata { + name = var.operator_namespace + } +} + +resource "kubernetes_namespace" "monitoring" { + metadata { + name = var.monitoring_namespace + } +} + +locals { + default_helm_values = { + observability = { + podMetrics = { + enabled = true + } + } + operator = { + image = var.orchestratord_version == null ? {} : { + tag = var.orchestratord_version + }, + cloudProvider = { + type = "aws" + region = var.aws_region + providers = { + aws = { + enabled = true + accountID = var.aws_account_id + iam = { + roles = { + environment = aws_iam_role.materialize_s3.arn + } + } + } + } + } + } + storage = var.enable_disk_support ? { + storageClass = { + create = local.disk_config.create_storage_class + name = local.disk_config.storage_class_name + provisioner = local.disk_config.storage_class_provisioner + parameters = local.disk_config.storage_class_parameters + } + } : {} + tls = var.use_self_signed_cluster_issuer ? { + defaultCertificateSpecs = { + balancerdExternal = { + dnsNames = [ + "balancerd", + ] + issuerRef = { + name = "${var.name_prefix}-root-ca" + kind = "ClusterIssuer" + } + } + consoleExternal = { + dnsNames = [ + "console", + ] + issuerRef = { + name = "${var.name_prefix}-root-ca" + kind = "ClusterIssuer" + } + } + internal = { + issuerRef = { + name = "${var.name_prefix}-root-ca" + kind = "ClusterIssuer" + } + } + } + } : {} + } + + # Requires OpenEBS to be installed + disk_config = { + run_disk_setup_script = var.enable_disk_support ? lookup(var.disk_support_config, "run_disk_setup_script", true) : false + local_ssd_count = lookup(var.disk_support_config, "local_ssd_count", 1) + create_storage_class = var.enable_disk_support ? lookup(var.disk_support_config, "create_storage_class", true) : false + openebs_version = lookup(var.disk_support_config, "openebs_version", "4.2.0") + openebs_namespace = lookup(var.disk_support_config, "openebs_namespace", "openebs") + storage_class_name = lookup(var.disk_support_config, "storage_class_name", "openebs-lvm-instance-store-ext4") + storage_class_provisioner = "local.csi.openebs.io" + storage_class_parameters = { + storage = "lvm" + fsType = "ext4" + volgroup = "instance-store-vg" + } + } +} + +resource "helm_release" "materialize_operator" { + name = var.name_prefix + namespace = kubernetes_namespace.materialize.metadata[0].name + + repository = var.use_local_chart ? null : var.helm_repository + chart = var.helm_chart + version = var.use_local_chart ? null : var.operator_version + + values = [ + yamlencode(merge(local.default_helm_values, var.helm_values)) + ] + + depends_on = [kubernetes_namespace.materialize] +} + +# Install the metrics-server for monitoring +# Required for the Materialize Console to display cluster metrics +resource "helm_release" "metrics_server" { + count = var.install_metrics_server ? 1 : 0 + + name = "${var.name_prefix}-metrics-server" + namespace = kubernetes_namespace.monitoring.metadata[0].name + repository = "https://kubernetes-sigs.github.io/metrics-server/" + chart = "metrics-server" + version = var.metrics_server_version + + # Common configuration values + set { + name = "args[0]" + value = "--kubelet-insecure-tls" + } + + set { + name = "metrics.enabled" + value = "true" + } + + depends_on = [ + kubernetes_namespace.monitoring + ] +} + +resource "aws_iam_role" "materialize_s3" { + name = "${var.name_prefix}-mz-role" + + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Effect = "Allow" + Principal = { + Federated = var.oidc_provider_arn + } + Action = "sts:AssumeRoleWithWebIdentity" + Condition = { + StringLike = { + "${trimprefix(var.cluster_oidc_issuer_url, "https://")}:sub" : "system:serviceaccount:*:*", + "${trimprefix(var.cluster_oidc_issuer_url, "https://")}:aud" : "sts.amazonaws.com" + } + } + } + ] + }) + + tags = { + Name = "${var.name_prefix}-mz-role" + ManagedBy = "terraform" + } +} + +resource "aws_iam_role_policy" "materialize_s3" { + name = "${var.name_prefix}-mz-role-policy" + role = aws_iam_role.materialize_s3.id + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Effect = "Allow" + Action = [ + "s3:GetObject", + "s3:PutObject", + "s3:DeleteObject", + "s3:ListBucket" + ] + Resource = [ + var.s3_bucket_arn != null ? var.s3_bucket_arn : "*", + var.s3_bucket_arn != null ? "${var.s3_bucket_arn}/*" : "*" + ] + } + ] + }) +} diff --git a/modules/operator/outputs.tf b/modules/operator/outputs.tf new file mode 100644 index 0000000..705a592 --- /dev/null +++ b/modules/operator/outputs.tf @@ -0,0 +1,19 @@ +output "operator_namespace" { + description = "Namespace where the operator is installed" + value = kubernetes_namespace.materialize.metadata[0].name +} + +output "operator_release_name" { + description = "Helm release name of the operator" + value = helm_release.materialize_operator.name +} + +output "operator_release_status" { + description = "Status of the helm release" + value = helm_release.materialize_operator.status +} + +output "materialize_s3_role_arn" { + description = "ARN of the IAM role for Materialize S3 access." + value = aws_iam_role.materialize_s3.arn +} diff --git a/modules/operator/variables.tf b/modules/operator/variables.tf new file mode 100644 index 0000000..0771f72 --- /dev/null +++ b/modules/operator/variables.tf @@ -0,0 +1,121 @@ +variable "name_prefix" { + description = "Prefix for all resource names (replaces separate namespace and environment variables)" + type = string +} + +variable "operator_version" { + description = "Version of the Materialize operator to install" + type = string + default = "v25.1.12" # META: helm-chart version + nullable = false +} + +variable "orchestratord_version" { + description = "Version of the Materialize orchestrator to install" + type = string + default = null +} + +variable "helm_repository" { + description = "Repository URL for the Materialize operator Helm chart. Leave empty if using local chart." + type = string + default = "https://materializeinc.github.io/materialize/" +} + +variable "helm_chart" { + description = "Chart name from repository or local path to chart. For local charts, set the path to the chart directory." + type = string + default = "materialize-operator" +} + +variable "use_local_chart" { + description = "Whether to use a local chart instead of one from a repository" + type = bool + default = false +} + +variable "helm_values" { + description = "Values to pass to the Helm chart" + type = any + default = {} +} + +variable "operator_namespace" { + description = "Namespace for the Materialize operator" + type = string + default = "materialize" +} + +variable "monitoring_namespace" { + description = "Namespace for monitoring resources" + type = string + default = "monitoring" +} + +variable "metrics_server_version" { + description = "Version of metrics-server to install" + type = string + default = "3.12.2" +} + +variable "install_metrics_server" { + description = "Whether to install the metrics-server" + type = bool + default = true +} + +variable "oidc_provider_arn" { + description = "ARN of the OIDC provider for the EKS cluster" + type = string +} + +variable "cluster_oidc_issuer_url" { + description = "OIDC issuer URL for the EKS cluster" + type = string +} + +variable "s3_bucket_arn" { + description = "ARN of the S3 bucket to allow access to. If null, allows all buckets." + type = string + default = null +} + +variable "aws_region" { + description = "AWS region for the operator Helm values." + type = string +} + +variable "aws_account_id" { + description = "AWS account ID for the operator Helm values." + type = string +} + +variable "use_self_signed_cluster_issuer" { + description = "Whether to use a self-signed cluster issuer for cert-manager." + type = bool + default = false +} + +variable "enable_disk_support" { + description = "Enable disk support for Materialize using OpenEBS and NVMe instance storage. When enabled, this configures OpenEBS, runs the disk setup script for NVMe devices, and creates appropriate storage classes." + type = bool + default = true +} + +variable "disk_support_config" { + description = "Advanced configuration for disk support (only used when enable_disk_support = true)" + type = object({ + run_disk_setup_script = optional(bool, true) + create_storage_class = optional(bool, true) + openebs_version = optional(string, "4.2.0") + openebs_namespace = optional(string, "openebs") + storage_class_name = optional(string, "openebs-lvm-instance-store-ext4") + storage_class_provisioner = optional(string, "local.csi.openebs.io") + storage_class_parameters = optional(object({ + storage = optional(string, "lvm") + fsType = optional(string, "ext4") + volgroup = optional(string, "instance-store-vg") + }), {}) + }) + default = {} +} diff --git a/versions.tf b/modules/operator/versions.tf similarity index 79% rename from versions.tf rename to modules/operator/versions.tf index 54bd60e..6f04d7c 100644 --- a/versions.tf +++ b/modules/operator/versions.tf @@ -14,9 +14,5 @@ terraform { source = "hashicorp/helm" version = "~> 2.0" } - random = { - source = "hashicorp/random" - version = "~> 3.0" - } } } diff --git a/modules/storage/README.md b/modules/storage/README.md new file mode 100644 index 0000000..49e7100 --- /dev/null +++ b/modules/storage/README.md @@ -0,0 +1,48 @@ +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0 | +| [aws](#requirement\_aws) | ~> 5.0 | +| [random](#requirement\_random) | ~> 3.0 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | ~> 5.0 | +| [random](#provider\_random) | ~> 3.0 | + +## Modules + +No modules. + +## Resources + +| Name | Type | +|------|------| +| [aws_s3_bucket.materialize_storage](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket) | resource | +| [aws_s3_bucket_lifecycle_configuration.materialize_storage](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket_lifecycle_configuration) | resource | +| [aws_s3_bucket_server_side_encryption_configuration.materialize_storage](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket_server_side_encryption_configuration) | resource | +| [aws_s3_bucket_versioning.materialize_storage](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket_versioning) | resource | +| [random_id.bucket_suffix](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/id) | resource | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [bucket\_force\_destroy](#input\_bucket\_force\_destroy) | Enable force destroy for the S3 bucket | `bool` | `true` | no | +| [bucket\_lifecycle\_rules](#input\_bucket\_lifecycle\_rules) | List of lifecycle rules for the S3 bucket |
list(object({
id = string
enabled = bool
prefix = string
transition_days = number
transition_storage_class = string
noncurrent_version_expiration_days = number
}))
| n/a | yes | +| [enable\_bucket\_encryption](#input\_enable\_bucket\_encryption) | Enable server-side encryption for the S3 bucket | `bool` | `true` | no | +| [enable\_bucket\_versioning](#input\_enable\_bucket\_versioning) | Enable versioning for the S3 bucket | `bool` | `true` | no | +| [name\_prefix](#input\_name\_prefix) | Prefix for all resource names (replaces separate namespace and environment variables) | `string` | n/a | yes | +| [tags](#input\_tags) | Tags to apply to all resources | `map(string)` | `{}` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [bucket\_arn](#output\_bucket\_arn) | The ARN of the S3 bucket | +| [bucket\_domain\_name](#output\_bucket\_domain\_name) | The domain name of the S3 bucket | +| [bucket\_id](#output\_bucket\_id) | The name of the S3 bucket | +| [bucket\_name](#output\_bucket\_name) | The name of the S3 bucket | diff --git a/modules/storage/main.tf b/modules/storage/main.tf index b428367..58cdd13 100644 --- a/modules/storage/main.tf +++ b/modules/storage/main.tf @@ -1,13 +1,9 @@ -locals { - name_prefix = "${var.namespace}-${var.environment}" -} - resource "random_id" "bucket_suffix" { byte_length = 4 } resource "aws_s3_bucket" "materialize_storage" { - bucket = "${local.name_prefix}-storage-${random_id.bucket_suffix.hex}" + bucket = "${var.name_prefix}-storage-${random_id.bucket_suffix.hex}" force_destroy = var.bucket_force_destroy tags = var.tags diff --git a/modules/storage/variables.tf b/modules/storage/variables.tf index d850e1a..4529b5c 100644 --- a/modules/storage/variables.tf +++ b/modules/storage/variables.tf @@ -1,10 +1,5 @@ -variable "namespace" { - description = "Namespace prefix for all resources" - type = string -} - -variable "environment" { - description = "Environment name" +variable "name_prefix" { + description = "Prefix for all resource names (replaces separate namespace and environment variables)" type = string } diff --git a/variables.tf b/variables.tf deleted file mode 100644 index 2409e17..0000000 --- a/variables.tf +++ /dev/null @@ -1,421 +0,0 @@ -# General Variables -variable "namespace" { - description = "Namespace for all resources, usually the organization or project name" - type = string - validation { - condition = length(var.namespace) <= 12 && can(regex("^[a-z][a-z0-9-]+$", var.namespace)) - error_message = "Namespace must be lowercase alphanumeric and hyphens only, start with a letter, max 12 characters" - } -} - -variable "environment" { - description = "Environment name (e.g., prod, staging, dev)" - type = string - validation { - condition = length(var.environment) <= 8 && can(regex("^[a-z0-9]+$", var.environment)) - error_message = "Environment must be lowercase alphanumeric only, max 8 characters" - } -} - -variable "tags" { - description = "Default tags to apply to all resources" - type = map(string) - default = { - Environment = "dev" - Terraform = "true" - Project = "materialize" - } -} - -# Networking Variables -variable "create_vpc" { - description = "Controls if VPC should be created (it affects almost all resources)" - type = bool - default = true -} - -variable "network_id" { - default = "" - description = "The ID of the VPC in which resources will be deployed. Only used if create_vpc is false." - type = string -} - -variable "network_private_subnet_ids" { - default = [] - description = "A list of private subnet IDs in the VPC. Only used if create_vpc is false." - type = list(string) -} - -variable "network_public_subnet_ids" { - default = [] - description = "A list of public subnet IDs in the VPC. Only used if create_vpc is false." - type = list(string) -} - -variable "vpc_cidr" { - description = "CIDR block for VPC" - type = string - default = "10.0.0.0/16" -} - -variable "availability_zones" { - description = "List of availability zones" - type = list(string) - default = ["us-east-1a", "us-east-1b", "us-east-1c"] -} - -variable "private_subnet_cidrs" { - description = "CIDR blocks for private subnets" - type = list(string) - default = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"] -} - -variable "public_subnet_cidrs" { - description = "CIDR blocks for public subnets" - type = list(string) - default = ["10.0.101.0/24", "10.0.102.0/24", "10.0.103.0/24"] -} - -variable "single_nat_gateway" { - description = "Use a single NAT Gateway for all private subnets" - type = bool - default = false -} - -# EKS Variables -variable "cluster_version" { - description = "Kubernetes version for the EKS cluster" - type = string - default = "1.32" -} - -variable "node_group_desired_size" { - description = "Desired number of worker nodes" - type = number - default = 2 -} - -variable "node_group_min_size" { - description = "Minimum number of worker nodes" - type = number - default = 1 -} - -variable "node_group_max_size" { - description = "Maximum number of worker nodes" - type = number - default = 4 -} - -variable "node_group_instance_types" { - description = <= 8 && can(regex("^[[:print:]]+$", var.database_password)) && !can(regex("[/@\" ]", var.database_password)) - error_message = "Database password must be at least 8 characters, contain only printable ASCII characters, excluding '/', '@', '\"' (double quotes), and ' ' (space)." - } -} - -variable "db_multi_az" { - description = "Enable multi-AZ deployment for RDS" - type = bool - default = false -} - -# S3 Variables -variable "bucket_force_destroy" { - description = "Enable force destroy for the S3 bucket" - type = bool - default = true -} - -variable "enable_bucket_versioning" { - description = "Enable versioning for the S3 bucket" - type = bool - default = true -} - -variable "enable_bucket_encryption" { - description = "Enable server-side encryption for the S3 bucket" - type = bool - default = true -} - -variable "bucket_lifecycle_rules" { - description = "List of lifecycle rules for the S3 bucket" - type = list(object({ - id = string - enabled = bool - prefix = string - transition_days = number - transition_storage_class = string - noncurrent_version_expiration_days = number - })) - default = [{ - id = "cleanup" - enabled = true - prefix = "" - transition_days = 90 - transition_storage_class = "STANDARD_IA" - noncurrent_version_expiration_days = 90 - }] -} - -# Monitoring Variables -variable "enable_monitoring" { - description = "Enable CloudWatch monitoring" - type = bool - default = true -} - -variable "metrics_retention_days" { - description = "Number of days to retain CloudWatch metrics" - type = number - default = 7 -} - -variable "kubernetes_namespace" { - description = "The Kubernetes namespace for the Materialize resources" - type = string - default = "materialize-environment" -} - -variable "service_account_name" { - description = "Name of the service account" - type = string - default = "12345678-1234-1234-1234-123456789012" -} - -variable "log_group_name_prefix" { - description = "Prefix for the CloudWatch log group name (will be combined with environment name)" - type = string - default = "materialize" -} - -variable "install_aws_load_balancer_controller" { - description = "Whether to install the AWS Load Balancer Controller" - type = bool - default = true -} - -variable "install_cert_manager" { - description = "Whether to install cert-manager." - type = bool - default = true -} - -variable "use_self_signed_cluster_issuer" { - description = "Whether to install and use a self-signed ClusterIssuer for TLS. To work around limitations in Terraform, this will be treated as `false` if no materialize instances are defined." - type = bool - default = true -} - -variable "cert_manager_namespace" { - description = "The name of the namespace in which cert-manager is or will be installed." - type = string - default = "cert-manager" -} - -variable "cert_manager_install_timeout" { - description = "Timeout for installing the cert-manager helm chart, in seconds." - type = number - default = 300 -} - -variable "cert_manager_chart_version" { - description = "Version of the cert-manager helm chart to install." - type = string - default = "v1.17.1" -} - -# Materialize Helm Chart Variables -variable "install_materialize_operator" { - description = "Whether to install the Materialize operator" - type = bool - default = true -} - -variable "helm_chart" { - description = "Chart name from repository or local path to chart. For local charts, set the path to the chart directory." - type = string - default = "materialize-operator" -} - -variable "use_local_chart" { - description = "Whether to use a local chart instead of one from a repository" - type = bool - default = false -} - -variable "operator_version" { - description = "Version of the Materialize operator to install" - type = string - default = null -} - -variable "operator_namespace" { - description = "Namespace for the Materialize operator" - type = string - default = "materialize" -} - -variable "orchestratord_version" { - description = "Version of the Materialize orchestrator to install" - type = string - default = null -} - -variable "helm_values" { - description = "Additional Helm values to merge with defaults" - type = any - default = {} -} - -variable "materialize_instances" { - description = "Configuration for Materialize instances. Due to limitations in Terraform, `materialize_instances` cannot be defined on the first `terraform apply`." - type = list(object({ - name = string - namespace = optional(string) - database_name = string - environmentd_version = optional(string) - cpu_request = optional(string, "1") - memory_request = optional(string, "1Gi") - memory_limit = optional(string, "1Gi") - create_database = optional(bool, true) - create_nlb = optional(bool, true) - internal_nlb = optional(bool, true) - enable_cross_zone_load_balancing = optional(bool, true) - in_place_rollout = optional(bool, false) - request_rollout = optional(string) - force_rollout = optional(string) - balancer_memory_request = optional(string, "256Mi") - balancer_memory_limit = optional(string, "256Mi") - balancer_cpu_request = optional(string, "100m") - license_key = optional(string) - })) - default = [] - - validation { - condition = alltrue([ - for instance in var.materialize_instances : - instance.request_rollout == null || - can(regex("^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$", instance.request_rollout)) - ]) - error_message = "Request rollout must be a valid UUID in the format xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" - } - - validation { - condition = alltrue([ - for instance in var.materialize_instances : - instance.force_rollout == null || - can(regex("^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$", instance.force_rollout)) - ]) - error_message = "Force rollout must be a valid UUID in the format xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" - } -} - -variable "install_metrics_server" { - description = "Whether to install the metrics-server for the Materialize Console" - type = bool - default = true -} - -variable "enable_disk_support" { - description = "Enable disk support for Materialize using OpenEBS and NVMe instance storage. When enabled, this configures OpenEBS, runs the disk setup script for NVMe devices, and creates appropriate storage classes." - type = bool - default = true -} - -variable "disk_support_config" { - description = "Advanced configuration for disk support (only used when enable_disk_support = true)" - type = object({ - install_openebs = optional(bool, true) - run_disk_setup_script = optional(bool, true) - create_storage_class = optional(bool, true) - openebs_version = optional(string, "4.2.0") - openebs_namespace = optional(string, "openebs") - storage_class_name = optional(string, "openebs-lvm-instance-store-ext4") - storage_class_provisioner = optional(string, "local.csi.openebs.io") - storage_class_parameters = optional(object({ - storage = optional(string, "lvm") - fsType = optional(string, "ext4") - volgroup = optional(string, "instance-store-vg") - }), {}) - }) - default = {} -}