+[| no | +| [ami\_filter](#input\_ami\_filter) | Map of lists used to create the AMI filter for the action runner AMI. | `map(list(string))` |
"amazon"
]
{
"state": [
"available"
]
} | no |
+| [ami\_housekeeper\_cleanup\_config](#input\_ami\_housekeeper\_cleanup\_config) | Configuration for AMI cleanup.object({
amiFilters = optional(list(object({
Name = string
Values = list(string)
})),
[{
Name : "state",
Values : ["available"],
},
{
Name : "image-type",
Values : ["machine"],
}]
)
dryRun = optional(bool, false)
launchTemplateNames = optional(list(string))
maxItems = optional(number)
minimumDaysOld = optional(number, 30)
ssmParameterNames = optional(list(string))
}) | `{}` | no |
+| [ami\_housekeeper\_lambda\_s3\_key](#input\_ami\_housekeeper\_lambda\_s3\_key) | S3 key for syncer lambda function. Required if using S3 bucket to specify lambdas. | `string` | `null` | no |
+| [ami\_housekeeper\_lambda\_s3\_object\_version](#input\_ami\_housekeeper\_lambda\_s3\_object\_version) | S3 object version for syncer lambda function. Useful if S3 versioning is enabled on source bucket. | `string` | `null` | no |
+| [ami\_housekeeper\_lambda\_schedule\_expression](#input\_ami\_housekeeper\_lambda\_schedule\_expression) | Scheduler expression for action runner binary syncer. | `string` | `"rate(1 day)"` | no |
+| [ami\_housekeeper\_lambda\_timeout](#input\_ami\_housekeeper\_lambda\_timeout) | Time out of the lambda in seconds. | `number` | `300` | no |
+| [ami\_housekeeper\_lambda\_zip](#input\_ami\_housekeeper\_lambda\_zip) | File location of the lambda zip file. | `string` | `null` | no |
+| [ami\_id\_ssm\_parameter\_name](#input\_ami\_id\_ssm\_parameter\_name) | Externally managed SSM parameter (of data type aws:ec2:image) that contains the AMI ID to launch runner instances from. Overrides ami\_filter | `string` | `null` | no |
+| [ami\_kms\_key\_arn](#input\_ami\_kms\_key\_arn) | Optional CMK Key ARN to be used to launch an instance from a shared encrypted AMI | `string` | `null` | no |
+| [ami\_owners](#input\_ami\_owners) | The list of owners used to select the AMI of action runner instances. | `list(string)` | [| no | +| [associate\_public\_ipv4\_address](#input\_associate\_public\_ipv4\_address) | Associate public IPv4 with the runner. Only tested with IPv4 | `bool` | `false` | no | | [aws\_partition](#input\_aws\_partition) | (optiona) partition in the arn namespace to use if not 'aws' | `string` | `"aws"` | no | | [aws\_region](#input\_aws\_region) | AWS region. | `string` | n/a | yes | -| [block\_device\_mappings](#input\_block\_device\_mappings) | The EC2 instance block device configuration. Takes the following keys: `device_name`, `delete_on_termination`, `volume_type`, `volume_size`, `encrypted`, `iops` |
"amazon"
]
list(object({
device_name = string
delete_on_termination = bool
volume_type = string
volume_size = number
encrypted = bool
iops = number
})) | [| no | -| [cloudwatch\_config](#input\_cloudwatch\_config) | (optional) Replaces the module default cloudwatch log config. See https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Agent-Configuration-File-Details.html for details. | `string` | `null` | no | -| [create\_service\_linked\_role\_spot](#input\_create\_service\_linked\_role\_spot) | (optional) create the serviced linked role for spot instances that is required by the scale-up lambda. | `bool` | `false` | no | +| [block\_device\_mappings](#input\_block\_device\_mappings) | The EC2 instance block device configuration. Takes the following keys: `device_name`, `delete_on_termination`, `volume_type`, `volume_size`, `encrypted`, `iops`, `throughput`, `kms_key_id`, `snapshot_id`. |
{
"delete_on_termination": true,
"device_name": "/dev/xvda",
"encrypted": true,
"iops": null,
"volume_size": 30,
"volume_type": "gp3"
}
]
list(object({
delete_on_termination = optional(bool, true)
device_name = optional(string, "/dev/xvda")
encrypted = optional(bool, true)
iops = optional(number)
kms_key_id = optional(string)
snapshot_id = optional(string)
throughput = optional(number)
volume_size = number
volume_type = optional(string, "gp3")
})) | [| no | +| [cloudwatch\_config](#input\_cloudwatch\_config) | (optional) Replaces the module's default cloudwatch log config. See https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Agent-Configuration-File-Details.html for details. | `string` | `null` | no | +| [create\_service\_linked\_role\_spot](#input\_create\_service\_linked\_role\_spot) | (optional) create the service linked role for spot instances that is required by the scale-up lambda. | `bool` | `false` | no | | [delay\_webhook\_event](#input\_delay\_webhook\_event) | The number of seconds the event accepted by the webhook is invisible on the queue before the scale up lambda will receive the event. | `number` | `30` | no | -| [disable\_runner\_autoupdate](#input\_disable\_runner\_autoupdate) | Disable the auto update of the github runner agent. Be-aware there is a grace period of 30 days, see also the [GitHub article](https://github.blog/changelog/2022-02-01-github-actions-self-hosted-runners-can-now-disable-automatic-updates/) | `bool` | `false` | no | -| [enable\_cloudwatch\_agent](#input\_enable\_cloudwatch\_agent) | Enabling the cloudwatch agent on the ec2 runner instances, the runner contains default config. Configuration can be overridden via `cloudwatch_config`. | `bool` | `true` | no | +| [disable\_runner\_autoupdate](#input\_disable\_runner\_autoupdate) | Disable the auto update of the github runner agent. Be aware there is a grace period of 30 days, see also the [GitHub article](https://github.blog/changelog/2022-02-01-github-actions-self-hosted-runners-can-now-disable-automatic-updates/) | `bool` | `false` | no | +| [enable\_ami\_housekeeper](#input\_enable\_ami\_housekeeper) | Option to disable the lambda to clean up old AMIs. | `bool` | `false` | no | +| [enable\_cloudwatch\_agent](#input\_enable\_cloudwatch\_agent) | Enables the cloudwatch agent on the ec2 runner instances. The runner uses a default config that can be overridden via `cloudwatch_config`. | `bool` | `true` | no | | [enable\_ephemeral\_runners](#input\_enable\_ephemeral\_runners) | Enable ephemeral runners, runners will only be used once. | `bool` | `false` | no | -| [enable\_job\_queued\_check](#input\_enable\_job\_queued\_check) | Only scale if the job event received by the scale up lambda is is in the state queued. By default enabled for non ephemeral runners and disabled for ephemeral. Set this variable to overwrite the default behavior. | `bool` | `null` | no | -| [enable\_managed\_runner\_security\_group](#input\_enable\_managed\_runner\_security\_group) | Enabling the default managed security group creation. Unmanaged security groups can be specified via `runner_additional_security_group_ids`. | `bool` | `true` | no | +| [enable\_event\_rule\_binaries\_syncer](#input\_enable\_event\_rule\_binaries\_syncer) | DEPRECATED: Replaced by `state_event_rule_binaries_syncer`. | `bool` | `null` | no | +| [enable\_fifo\_build\_queue](#input\_enable\_fifo\_build\_queue) | Enable a FIFO queue to keep the order of events received by the webhook. Recommended for repo level runners. | `bool` | `false` | no | +| [enable\_jit\_config](#input\_enable\_jit\_config) | Overwrite the default behavior for JIT configuration. By default JIT configuration is enabled for ephemeral runners and disabled for non-ephemeral runners. In case of GHES check first if the JIT config API is avaialbe. In case you upgradeing from 3.x to 4.x you can set `enable_jit_config` to `false` to avoid a breaking change when having your own AMI. | `bool` | `null` | no | +| [enable\_job\_queued\_check](#input\_enable\_job\_queued\_check) | Only scale if the job event received by the scale up lambda is in the queued state. By default enabled for non ephemeral runners and disabled for ephemeral. Set this variable to overwrite the default behavior. | `bool` | `null` | no | +| [enable\_managed\_runner\_security\_group](#input\_enable\_managed\_runner\_security\_group) | Enables creation of the default managed security group. Unmanaged security groups can be specified via `runner_additional_security_group_ids`. | `bool` | `true` | no | +| [enable\_metrics\_control\_plane](#input\_enable\_metrics\_control\_plane) | (Experimental) Enable or disable the metrics for the module. Feature can change or renamed without a major release. | `bool` | `null` | no | | [enable\_organization\_runners](#input\_enable\_organization\_runners) | Register runners to organization, instead of repo level | `bool` | `false` | no | +| [enable\_runner\_binaries\_syncer](#input\_enable\_runner\_binaries\_syncer) | Option to disable the lambda to sync GitHub runner distribution, useful when using a pre-build AMI. | `bool` | `true` | no | | [enable\_runner\_detailed\_monitoring](#input\_enable\_runner\_detailed\_monitoring) | Should detailed monitoring be enabled for the runner. Set this to true if you want to use detailed monitoring. See https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-cloudwatch-new.html for details. | `bool` | `false` | no | -| [enable\_ssm\_on\_runners](#input\_enable\_ssm\_on\_runners) | Enable to allow access the runner instances for debugging purposes via SSM. Note that this adds additional permissions to the runner instances. | `bool` | `false` | no | -| [enabled\_userdata](#input\_enabled\_userdata) | Should the userdata script be enabled for the runner. Set this to false if you are using your own prebuilt AMI. | `bool` | `true` | no | -| [environment](#input\_environment) | A name that identifies the environment, used as prefix and for tagging. | `string` | `null` | no | -| [fifo\_build\_queue](#input\_fifo\_build\_queue) | Enable a FIFO queue to remain the order of events received by the webhook. Suggest to set to true for repo level runners. | `bool` | `false` | no | +| [enable\_runner\_on\_demand\_failover\_for\_errors](#input\_enable\_runner\_on\_demand\_failover\_for\_errors) | Enable on-demand failover. For example to fall back to on demand when no spot capacity is available the variable can be set to `InsufficientInstanceCapacity`. When not defined the default behavior is to retry later. | `list(string)` | `[]` | no | +| [enable\_runner\_workflow\_job\_labels\_check\_all](#input\_enable\_runner\_workflow\_job\_labels\_check\_all) | If set to true all labels in the workflow job must match the GitHub labels (os, architecture and `self-hosted`). When false if __any__ label matches it will trigger the webhook. | `bool` | `true` | no | +| [enable\_ssm\_on\_runners](#input\_enable\_ssm\_on\_runners) | Enable to allow access to the runner instances for debugging purposes via SSM. Note that this adds additional permissions to the runner instances. | `bool` | `false` | no | +| [enable\_user\_data\_debug\_logging\_runner](#input\_enable\_user\_data\_debug\_logging\_runner) | Option to enable debug logging for user-data, this logs all secrets as well. | `bool` | `false` | no | +| [enable\_userdata](#input\_enable\_userdata) | Should the userdata script be enabled for the runner. Set this to false if you are using your own prebuilt AMI. | `bool` | `true` | no | +| [eventbridge](#input\_eventbridge) | Enable the use of EventBridge by the module. By enabling this feature events will be put on the EventBridge by the webhook instead of directly dispatching to queues for scaling.
{
"volume_size": 30
}
]
object({
enable = optional(bool, false)
accept_events = optional(list(string), null)
}) | `{}` | no |
| [ghes\_ssl\_verify](#input\_ghes\_ssl\_verify) | GitHub Enterprise SSL verification. Set to 'false' when custom certificate (chains) is used for GitHub Enterprise Server (insecure). | `bool` | `true` | no |
| [ghes\_url](#input\_ghes\_url) | GitHub Enterprise Server URL. Example: https://github.internal.co - DO NOT SET IF USING PUBLIC GITHUB | `string` | `null` | no |
-| [github\_app](#input\_github\_app) | GitHub app parameters, see your github app. Ensure the key is the base64-encoded `.pem` file (the output of `base64 app.private-key.pem`, not the content of `private-key.pem`). | object({
key_base64 = string
id = string
webhook_secret = string
}) | n/a | yes |
-| [idle\_config](#input\_idle\_config) | List of time period that can be defined as cron expression to keep a minimum amount of runners active instead of scaling down to 0. By defining this list you can ensure that in time periods that match the cron expression within 5 seconds a runner is kept idle. | list(object({
cron = string
timeZone = string
idleCount = number
})) | `[]` | no |
-| [instance\_allocation\_strategy](#input\_instance\_allocation\_strategy) | The allocation strategy for spot instances. AWS recommends to use `capacity-optimized` however the AWS default is `lowest-price`. | `string` | `"lowest-price"` | no |
-| [instance\_max\_spot\_price](#input\_instance\_max\_spot\_price) | Max price price for spot intances per hour. This variable will be passed to the create fleet as max spot price for the fleet. | `string` | `null` | no |
+| [github\_app](#input\_github\_app) | GitHub app parameters, see your github app. Ensure the key is the base64-encoded `.pem` file (the output of `base64 app.private-key.pem`, not the content of `private-key.pem`). | object({
key_base64 = string
id = string
webhook_secret = string
}) | n/a | yes |
+| [idle\_config](#input\_idle\_config) | List of time periods, defined as a cron expression, to keep a minimum amount of runners active instead of scaling down to 0. By defining this list you can ensure that in time periods that match the cron expression within 5 seconds a runner is kept idle. | list(object({
cron = string
timeZone = string
idleCount = number
evictionStrategy = optional(string, "oldest_first")
})) | `[]` | no |
+| [instance\_allocation\_strategy](#input\_instance\_allocation\_strategy) | The allocation strategy for spot instances. AWS recommends using `price-capacity-optimized` however the AWS default is `lowest-price`. | `string` | `"lowest-price"` | no |
+| [instance\_max\_spot\_price](#input\_instance\_max\_spot\_price) | Max price price for spot instances per hour. This variable will be passed to the create fleet as max spot price for the fleet. | `string` | `null` | no |
| [instance\_profile\_path](#input\_instance\_profile\_path) | The path that will be added to the instance\_profile, if not set the environment name will be used. | `string` | `null` | no |
| [instance\_target\_capacity\_type](#input\_instance\_target\_capacity\_type) | Default lifecycle used for runner instances, can be either `spot` or `on-demand`. | `string` | `"spot"` | no |
-| [instance\_type](#input\_instance\_type) | [DEPRECATED] See instance\_types. | `string` | `null` | no |
-| [instance\_types](#input\_instance\_types) | List of instance types for the action runner. Defaults are based on runner\_os (amzn2 for linux and Windows Server Core for win). | `list(string)` | [| no | -| [job\_queue\_retention\_in\_seconds](#input\_job\_queue\_retention\_in\_seconds) | The number of seconds the job is held in the queue before it is purged | `number` | `86400` | no | +| [instance\_termination\_watcher](#input\_instance\_termination\_watcher) | Configuration for the instance termination watcher. This feature is Beta, changes will not trigger a major release as long in beta.
"m5.large",
"c5.large"
]
object({
enable = optional(bool, false)
enable_metric = optional(string, null) # deprectaed
features = optional(object({
enable_spot_termination_handler = optional(bool, true)
enable_spot_termination_notification_watcher = optional(bool, true)
}), {})
memory_size = optional(number, null)
s3_key = optional(string, null)
s3_object_version = optional(string, null)
timeout = optional(number, null)
zip = optional(string, null)
}) | `{}` | no |
+| [instance\_types](#input\_instance\_types) | List of instance types for the action runner. Defaults are based on runner\_os (al2023 for linux and Windows Server Core for win). | `list(string)` | [| no | +| [job\_queue\_retention\_in\_seconds](#input\_job\_queue\_retention\_in\_seconds) | The number of seconds the job is held in the queue before it is purged. | `number` | `86400` | no | +| [job\_retry](#input\_job\_retry) | Experimental! Can be removed / changed without trigger a major release.Configure job retries. The configuration enables job retries (for ephemeral runners). After creating the insances a message will be published to a job retry queue. The job retry check lambda is checking after a delay if the job is queued. If not the message will be published again on the scale-up (build queue). Using this feature can impact the reate limit of the GitHub app.
"m5.large",
"c5.large"
]
object({
enable = optional(bool, false)
delay_in_seconds = optional(number, 300)
delay_backoff = optional(number, 2)
lambda_memory_size = optional(number, 256)
lambda_timeout = optional(number, 30)
max_attempts = optional(number, 1)
}) | `{}` | no |
| [key\_name](#input\_key\_name) | Key pair name | `string` | `null` | no |
| [kms\_key\_arn](#input\_kms\_key\_arn) | Optional CMK Key ARN to be used for Parameter Store. This key must be in the current account. | `string` | `null` | no |
-| [lambda\_architecture](#input\_lambda\_architecture) | AWS Lambda architecture. Lambda functions using Graviton processors ('arm64') tend to have better price/performance than 'x86\_64' functions. | `string` | `"x86_64"` | no |
-| [lambda\_principals](#input\_lambda\_principals) | (Optional) add extra principals to the role created for execution of the lambda, e.g. for local testing. | list(object({
type = string
identifiers = list(string)
})) | `[]` | no |
-| [lambda\_runtime](#input\_lambda\_runtime) | AWS Lambda runtime. | `string` | `"nodejs14.x"` | no |
-| [lambda\_s3\_bucket](#input\_lambda\_s3\_bucket) | S3 bucket from which to specify lambda functions. This is an alternative to providing local files directly. | `any` | `null` | no |
+| [lambda\_architecture](#input\_lambda\_architecture) | AWS Lambda architecture. Lambda functions using Graviton processors ('arm64') tend to have better price/performance than 'x86\_64' functions. | `string` | `"arm64"` | no |
+| [lambda\_principals](#input\_lambda\_principals) | (Optional) add extra principals to the role created for execution of the lambda, e.g. for local testing. | list(object({
type = string
identifiers = list(string)
})) | `[]` | no |
+| [lambda\_runtime](#input\_lambda\_runtime) | AWS Lambda runtime. | `string` | `"nodejs20.x"` | no |
+| [lambda\_s3\_bucket](#input\_lambda\_s3\_bucket) | S3 bucket from which to specify lambda functions. This is an alternative to providing local files directly. | `string` | `null` | no |
| [lambda\_security\_group\_ids](#input\_lambda\_security\_group\_ids) | List of security group IDs associated with the Lambda function. | `list(string)` | `[]` | no |
| [lambda\_subnet\_ids](#input\_lambda\_subnet\_ids) | List of subnets in which the action runners will be launched, the subnets needs to be subnets in the `vpc_id`. | `list(string)` | `[]` | no |
+| [lambda\_tags](#input\_lambda\_tags) | Map of tags that will be added to all the lambda function resources. Note these are additional tags to the default tags. | `map(string)` | `{}` | no |
+| [lambda\_tracing\_mode](#input\_lambda\_tracing\_mode) | DEPRECATED: Replaced by `tracing_config`. | `string` | `null` | no |
| [log\_level](#input\_log\_level) | Logging level for lambda logging. Valid values are 'silly', 'trace', 'debug', 'info', 'warn', 'error', 'fatal'. | `string` | `"info"` | no |
-| [log\_type](#input\_log\_type) | Logging format for lambda logging. Valid values are 'json', 'pretty', 'hidden'. | `string` | `"pretty"` | no |
-| [logging\_kms\_key\_id](#input\_logging\_kms\_key\_id) | Specifies the kms key id to encrypt the logs with | `string` | `null` | no |
+| [logging\_kms\_key\_id](#input\_logging\_kms\_key\_id) | Specifies the kms key id to encrypt the logs with. | `string` | `null` | no |
| [logging\_retention\_in\_days](#input\_logging\_retention\_in\_days) | Specifies the number of days you want to retain log events for the lambda log group. Possible values are: 0, 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, and 3653. | `number` | `180` | no |
-| [market\_options](#input\_market\_options) | DEPCRECATED: Replaced by `instance_target_capacity_type`. | `string` | `null` | no |
-| [minimum\_running\_time\_in\_minutes](#input\_minimum\_running\_time\_in\_minutes) | The time an ec2 action runner should be running at minimum before terminated if not busy. | `number` | `null` | no |
-| [pool\_config](#input\_pool\_config) | The configuration for updating the pool. The `pool_size` to adjust to by the events triggered by the the `schedule_expression. For example you can configure a cron expression for week days to adjust the pool to 10 and another expression for the weekend to adjust the pool to 1.` | list(object({
schedule_expression = string
size = number
})) | `[]` | no |
+| [matcher\_config\_parameter\_store\_tier](#input\_matcher\_config\_parameter\_store\_tier) | The tier of the parameter store for the matcher configuration. Valid values are `Standard`, and `Advanced`. | `string` | `"Standard"` | no |
+| [metrics](#input\_metrics) | Configuration for metrics created by the module, by default disabled to avoid additional costs. When metrics are enable all metrics are created unless explicit configured otherwise. | object({
enable = optional(bool, false)
namespace = optional(string, "GitHub Runners")
metric = optional(object({
enable_github_app_rate_limit = optional(bool, true)
enable_job_retry = optional(bool, true)
enable_spot_termination_warning = optional(bool, true)
}), {})
}) | `{}` | no |
+| [metrics\_namespace](#input\_metrics\_namespace) | The namespace for the metrics created by the module. Merics will only be created if explicit enabled. | `string` | `null` | no |
+| [minimum\_running\_time\_in\_minutes](#input\_minimum\_running\_time\_in\_minutes) | The time an ec2 action runner should be running at minimum before terminated, if not busy. | `number` | `null` | no |
+| [pool\_config](#input\_pool\_config) | The configuration for updating the pool. The `pool_size` to adjust to by the events triggered by the `schedule_expression`. For example you can configure a cron expression for weekdays to adjust the pool to 10 and another expression for the weekend to adjust the pool to 1. Use `schedule_expression_timezone` to override the schedule time zone (defaults to UTC). | list(object({
schedule_expression = string
schedule_expression_timezone = optional(string)
size = number
})) | `[]` | no |
+| [pool\_lambda\_memory\_size](#input\_pool\_lambda\_memory\_size) | Memory size limit for scale-up lambda. | `number` | `512` | no |
| [pool\_lambda\_reserved\_concurrent\_executions](#input\_pool\_lambda\_reserved\_concurrent\_executions) | Amount of reserved concurrent executions for the scale-up lambda function. A value of 0 disables lambda from being triggered and -1 removes any concurrency limitations. | `number` | `1` | no |
-| [pool\_lambda\_timeout](#input\_pool\_lambda\_timeout) | Time out for the pool lambda lambda in seconds. | `number` | `60` | no |
+| [pool\_lambda\_timeout](#input\_pool\_lambda\_timeout) | Time out for the pool lambda in seconds. | `number` | `60` | no |
| [pool\_runner\_owner](#input\_pool\_runner\_owner) | The pool will deploy runners to the GitHub org ID, set this value to the org to which you want the runners deployed. Repo level is not supported. | `string` | `null` | no |
| [prefix](#input\_prefix) | The prefix used for naming resources | `string` | `"github-actions"` | no |
-| [redrive\_build\_queue](#input\_redrive\_build\_queue) | Set options to attach (optional) a dead letter queue to the build queue, the queue between the webhook and the scale up lambda. You have the following options. 1. Disable by setting, `enalbed' to false. 2. Enable by setting `enabled` to `true`, `maxReceiveCount` to a number of max retries.` | object({
enabled = bool
maxReceiveCount = number
}) | {
"enabled": false,
"maxReceiveCount": null
} | no |
-| [repository\_white\_list](#input\_repository\_white\_list) | List of repositories allowed to use the github app | `list(string)` | `[]` | no |
+| [queue\_encryption](#input\_queue\_encryption) | Configure how data on queues managed by the modules in ecrypted at REST. Options are encryped via SSE, non encrypted and via KMSS. By default encryptes via SSE is enabled. See for more details the Terraform `aws_sqs_queue` resource https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/sqs_queue. | object({
kms_data_key_reuse_period_seconds = number
kms_master_key_id = string
sqs_managed_sse_enabled = bool
}) | {
"kms_data_key_reuse_period_seconds": null,
"kms_master_key_id": null,
"sqs_managed_sse_enabled": true
} | no |
+| [redrive\_build\_queue](#input\_redrive\_build\_queue) | Set options to attach (optional) a dead letter queue to the build queue, the queue between the webhook and the scale up lambda. You have the following options. 1. Disable by setting `enabled` to false. 2. Enable by setting `enabled` to `true`, `maxReceiveCount` to a number of max retries. | object({
enabled = bool
maxReceiveCount = number
}) | {
"enabled": false,
"maxReceiveCount": null
} | no |
+| [repository\_white\_list](#input\_repository\_white\_list) | List of github repository full names (owner/repo\_name) that will be allowed to use the github app. Leave empty for no filtering. | `list(string)` | `[]` | no |
| [role\_path](#input\_role\_path) | The path that will be added to role path for created roles, if not set the environment name will be used. | `string` | `null` | no |
| [role\_permissions\_boundary](#input\_role\_permissions\_boundary) | Permissions boundary that will be added to the created roles. | `string` | `null` | no |
-| [runner\_additional\_security\_group\_ids](#input\_runner\_additional\_security\_group\_ids) | (optional) List of additional security groups IDs to apply to the runner | `list(string)` | `[]` | no |
-| [runner\_allow\_prerelease\_binaries](#input\_runner\_allow\_prerelease\_binaries) | Allow the runners to update to prerelease binaries. | `bool` | `false` | no |
+| [runner\_additional\_security\_group\_ids](#input\_runner\_additional\_security\_group\_ids) | (optional) List of additional security groups IDs to apply to the runner. | `list(string)` | `[]` | no |
| [runner\_architecture](#input\_runner\_architecture) | The platform architecture of the runner instance\_type. | `string` | `"x64"` | no |
-| [runner\_as\_root](#input\_runner\_as\_root) | Run the action runner under the root user. Variable `runner_run_as` will be ingored. | `bool` | `false` | no |
-| [runner\_binaries\_s3\_sse\_configuration](#input\_runner\_binaries\_s3\_sse\_configuration) | Map containing server-side encryption configuration for runner-binaries S3 bucket. | `any` | `{}` | no |
+| [runner\_as\_root](#input\_runner\_as\_root) | Run the action runner under the root user. Variable `runner_run_as` will be ignored. | `bool` | `false` | no |
+| [runner\_binaries\_s3\_logging\_bucket](#input\_runner\_binaries\_s3\_logging\_bucket) | Bucket for action runner distribution bucket access logging. | `string` | `null` | no |
+| [runner\_binaries\_s3\_logging\_bucket\_prefix](#input\_runner\_binaries\_s3\_logging\_bucket\_prefix) | Bucket prefix for action runner distribution bucket access logging. | `string` | `null` | no |
+| [runner\_binaries\_s3\_sse\_configuration](#input\_runner\_binaries\_s3\_sse\_configuration) | Map containing server-side encryption configuration for runner-binaries S3 bucket. | `any` | {
"rule": {
"apply_server_side_encryption_by_default": {
"sse_algorithm": "AES256"
}
}
} | no |
+| [runner\_binaries\_s3\_versioning](#input\_runner\_binaries\_s3\_versioning) | Status of S3 versioning for runner-binaries S3 bucket. Once set to Enabled the change cannot be reverted via Terraform! | `string` | `"Disabled"` | no |
+| [runner\_binaries\_syncer\_lambda\_memory\_size](#input\_runner\_binaries\_syncer\_lambda\_memory\_size) | Memory size limit in MB for binary syncer lambda. | `number` | `256` | no |
| [runner\_binaries\_syncer\_lambda\_timeout](#input\_runner\_binaries\_syncer\_lambda\_timeout) | Time out of the binaries sync lambda in seconds. | `number` | `300` | no |
| [runner\_binaries\_syncer\_lambda\_zip](#input\_runner\_binaries\_syncer\_lambda\_zip) | File location of the binaries sync lambda zip file. | `string` | `null` | no |
| [runner\_boot\_time\_in\_minutes](#input\_runner\_boot\_time\_in\_minutes) | The minimum time for an EC2 runner to boot and register as a runner. | `number` | `5` | no |
-| [runner\_ec2\_tags](#input\_runner\_ec2\_tags) | Map of tags that will be added to the launch template instance tag specificatons. | `map(string)` | `{}` | no |
-| [runner\_egress\_rules](#input\_runner\_egress\_rules) | List of egress rules for the GitHub runner instances. | list(object({
cidr_blocks = list(string)
ipv6_cidr_blocks = list(string)
prefix_list_ids = list(string)
from_port = number
protocol = string
security_groups = list(string)
self = bool
to_port = number
description = string
})) | [| no | -| [runner\_enable\_workflow\_job\_labels\_check](#input\_runner\_enable\_workflow\_job\_labels\_check) | If set to true all labels in the workflow job even are matched agaist the custom labels and GitHub labels (os, architecture and `self-hosted`). When the labels are not matching the event is dropped at the webhook. | `bool` | `false` | no | -| [runner\_enable\_workflow\_job\_labels\_check\_all](#input\_runner\_enable\_workflow\_job\_labels\_check\_all) | If set to true all labels in the workflow job must match the GitHub labels (os, architecture and `self-hosted`). When false if __any__ label matches it will trigger the webhook. `runner_enable_workflow_job_labels_check` must be true for this to take effect. | `bool` | `true` | no | -| [runner\_extra\_labels](#input\_runner\_extra\_labels) | Extra (custom) labels for the runners (GitHub). Separate each label by a comma. Labels checks on the webhook can be enforced by setting `enable_workflow_job_labels_check`. GitHub read-only labels should not be provided. | `string` | `""` | no | +| [runner\_credit\_specification](#input\_runner\_credit\_specification) | The credit option for CPU usage of a T instance. Can be unset, "standard" or "unlimited". | `string` | `null` | no | +| [runner\_disable\_default\_labels](#input\_runner\_disable\_default\_labels) | Disable default labels for the runners (os, architecture and `self-hosted`). If enabled, the runner will only have the extra labels provided in `runner_extra_labels`. In case you on own start script is used, this configuration parameter needs to be parsed via SSM. | `bool` | `false` | no | +| [runner\_ec2\_tags](#input\_runner\_ec2\_tags) | Map of tags that will be added to the launch template instance tag specifications. | `map(string)` | `{}` | no | +| [runner\_egress\_rules](#input\_runner\_egress\_rules) | List of egress rules for the GitHub runner instances. |
{
"cidr_blocks": [
"0.0.0.0/0"
],
"description": null,
"from_port": 0,
"ipv6_cidr_blocks": [
"::/0"
],
"prefix_list_ids": null,
"protocol": "-1",
"security_groups": null,
"self": null,
"to_port": 0
}
]
list(object({
cidr_blocks = list(string)
ipv6_cidr_blocks = list(string)
prefix_list_ids = list(string)
from_port = number
protocol = string
security_groups = list(string)
self = bool
to_port = number
description = string
})) | [| no | +| [runner\_extra\_labels](#input\_runner\_extra\_labels) | Extra (custom) labels for the runners (GitHub). Separate each label by a comma. Labels checks on the webhook can be enforced by setting `enable_workflow_job_labels_check`. GitHub read-only labels should not be provided. | `list(string)` | `[]` | no | | [runner\_group\_name](#input\_runner\_group\_name) | Name of the runner group. | `string` | `"Default"` | no | | [runner\_iam\_role\_managed\_policy\_arns](#input\_runner\_iam\_role\_managed\_policy\_arns) | Attach AWS or customer-managed IAM policies (by ARN) to the runner IAM role | `list(string)` | `[]` | no | -| [runner\_log\_files](#input\_runner\_log\_files) | (optional) Replaces the module default cloudwatch log config. See https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Agent-Configuration-File-Details.html for details. |
{
"cidr_blocks": [
"0.0.0.0/0"
],
"description": null,
"from_port": 0,
"ipv6_cidr_blocks": [
"::/0"
],
"prefix_list_ids": null,
"protocol": "-1",
"security_groups": null,
"self": null,
"to_port": 0
}
]
list(object({
log_group_name = string
prefix_log_group = bool
file_path = string
log_stream_name = string
})) | `null` | no |
-| [runner\_metadata\_options](#input\_runner\_metadata\_options) | Metadata options for the ec2 runner instances. | `map(any)` | {
"http_endpoint": "enabled",
"http_put_response_hop_limit": 1,
"http_tokens": "optional"
} | no |
+| [runner\_log\_files](#input\_runner\_log\_files) | (optional) Replaces the module default cloudwatch log config. See https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Agent-Configuration-File-Details.html for details. | list(object({
log_group_name = string
prefix_log_group = bool
file_path = string
log_stream_name = string
})) | `null` | no |
+| [runner\_metadata\_options](#input\_runner\_metadata\_options) | Metadata options for the ec2 runner instances. By default, the module uses metadata tags for bootstrapping the runner, only disable `instance_metadata_tags` when using custom scripts for starting the runner. | `map(any)` | {
"http_endpoint": "enabled",
"http_put_response_hop_limit": 1,
"http_tokens": "required",
"instance_metadata_tags": "enabled"
} | no |
+| [runner\_name\_prefix](#input\_runner\_name\_prefix) | The prefix used for the GitHub runner name. The prefix will be used in the default start script to prefix the instance name when register the runner in GitHub. The value is availabe via an EC2 tag 'ghr:runner\_name\_prefix'. | `string` | `""` | no |
| [runner\_os](#input\_runner\_os) | The EC2 Operating System type to use for action runner instances (linux,windows). | `string` | `"linux"` | no |
| [runner\_run\_as](#input\_runner\_run\_as) | Run the GitHub actions agent as user. | `string` | `"ec2-user"` | no |
-| [runners\_lambda\_s3\_key](#input\_runners\_lambda\_s3\_key) | S3 key for runners lambda function. Required if using S3 bucket to specify lambdas. | `any` | `null` | no |
-| [runners\_lambda\_s3\_object\_version](#input\_runners\_lambda\_s3\_object\_version) | S3 object version for runners lambda function. Useful if S3 versioning is enabled on source bucket. | `any` | `null` | no |
+| [runners\_ebs\_optimized](#input\_runners\_ebs\_optimized) | Enable EBS optimization for the runner instances. | `bool` | `false` | no |
+| [runners\_lambda\_s3\_key](#input\_runners\_lambda\_s3\_key) | S3 key for runners lambda function. Required if using S3 bucket to specify lambdas. | `string` | `null` | no |
+| [runners\_lambda\_s3\_object\_version](#input\_runners\_lambda\_s3\_object\_version) | S3 object version for runners lambda function. Useful if S3 versioning is enabled on source bucket. | `string` | `null` | no |
| [runners\_lambda\_zip](#input\_runners\_lambda\_zip) | File location of the lambda zip file for scaling runners. | `string` | `null` | no |
| [runners\_maximum\_count](#input\_runners\_maximum\_count) | The maximum number of runners that will be created. | `number` | `3` | no |
+| [runners\_scale\_down\_lambda\_memory\_size](#input\_runners\_scale\_down\_lambda\_memory\_size) | Memory size limit in MB for scale-down lambda. | `number` | `512` | no |
| [runners\_scale\_down\_lambda\_timeout](#input\_runners\_scale\_down\_lambda\_timeout) | Time out for the scale down lambda in seconds. | `number` | `60` | no |
+| [runners\_scale\_up\_Lambda\_memory\_size](#input\_runners\_scale\_up\_Lambda\_memory\_size) | Memory size limit in MB for scale-up lambda. | `number` | `null` | no |
+| [runners\_scale\_up\_lambda\_memory\_size](#input\_runners\_scale\_up\_lambda\_memory\_size) | Memory size limit in MB for scale-up lambda. | `number` | `512` | no |
| [runners\_scale\_up\_lambda\_timeout](#input\_runners\_scale\_up\_lambda\_timeout) | Time out for the scale up lambda in seconds. | `number` | `30` | no |
+| [runners\_ssm\_housekeeper](#input\_runners\_ssm\_housekeeper) | Configuration for the SSM housekeeper lambda. This lambda deletes token / JIT config from SSM.object({
schedule_expression = optional(string, "rate(1 day)")
enabled = optional(bool, true)
lambda_memory_size = optional(number, 512)
lambda_timeout = optional(number, 60)
config = object({
tokenPath = optional(string)
minimumDaysOld = optional(number, 1)
dryRun = optional(bool, false)
})
}) | {
"config": {}
} | no |
| [scale\_down\_schedule\_expression](#input\_scale\_down\_schedule\_expression) | Scheduler expression to check every x for scale down. | `string` | `"cron(*/5 * * * ? *)"` | no |
| [scale\_up\_reserved\_concurrent\_executions](#input\_scale\_up\_reserved\_concurrent\_executions) | Amount of reserved concurrent executions for the scale-up lambda function. A value of 0 disables lambda from being triggered and -1 removes any concurrency limitations. | `number` | `1` | no |
-| [subnet\_ids](#input\_subnet\_ids) | List of subnets in which the action runners will be launched, the subnets needs to be subnets in the `vpc_id`. | `list(string)` | n/a | yes |
-| [syncer\_lambda\_s3\_key](#input\_syncer\_lambda\_s3\_key) | S3 key for syncer lambda function. Required if using S3 bucket to specify lambdas. | `any` | `null` | no |
-| [syncer\_lambda\_s3\_object\_version](#input\_syncer\_lambda\_s3\_object\_version) | S3 object version for syncer lambda function. Useful if S3 versioning is enabled on source bucket. | `any` | `null` | no |
+| [ssm\_paths](#input\_ssm\_paths) | The root path used in SSM to store configuration and secrets. | object({
root = optional(string, "github-action-runners")
app = optional(string, "app")
runners = optional(string, "runners")
webhook = optional(string, "webhook")
use_prefix = optional(bool, true)
}) | `{}` | no |
+| [state\_event\_rule\_binaries\_syncer](#input\_state\_event\_rule\_binaries\_syncer) | Option to disable EventBridge Lambda trigger for the binary syncer, useful to stop automatic updates of binary distribution | `string` | `"ENABLED"` | no |
+| [subnet\_ids](#input\_subnet\_ids) | List of subnets in which the action runner instances will be launched. The subnets need to exist in the configured VPC (`vpc_id`), and must reside in different availability zones (see https://github.com/philips-labs/terraform-aws-github-runner/issues/2904) | `list(string)` | n/a | yes |
+| [syncer\_lambda\_s3\_key](#input\_syncer\_lambda\_s3\_key) | S3 key for syncer lambda function. Required if using an S3 bucket to specify lambdas. | `string` | `null` | no |
+| [syncer\_lambda\_s3\_object\_version](#input\_syncer\_lambda\_s3\_object\_version) | S3 object version for syncer lambda function. Useful if S3 versioning is enabled on source bucket. | `string` | `null` | no |
| [tags](#input\_tags) | Map of tags that will be added to created resources. By default resources will be tagged with name and environment. | `map(string)` | `{}` | no |
+| [tracing\_config](#input\_tracing\_config) | Configuration for lambda tracing. | object({
mode = optional(string, null)
capture_http_requests = optional(bool, false)
capture_error = optional(bool, false)
}) | `{}` | no |
+| [userdata\_content](#input\_userdata\_content) | Alternative user-data content, replacing the templated one. By providing your own user\_data you have to take care of installing all required software, including the action runner and registering the runner. Be-aware configuration paramaters in SSM as well as tags are treated as internals. Changes will not trigger a breaking release. | `string` | `null` | no |
| [userdata\_post\_install](#input\_userdata\_post\_install) | Script to be ran after the GitHub Actions runner is installed on the EC2 instances | `string` | `""` | no |
| [userdata\_pre\_install](#input\_userdata\_pre\_install) | Script to be ran before the GitHub Actions runner is installed on the EC2 instances | `string` | `""` | no |
-| [userdata\_template](#input\_userdata\_template) | Alternative user-data template, replacing the default template. By providing your own user\_data you have to take care of installing all required software, including the action runner. Variables userdata\_pre/post\_install are ignored. | `string` | `null` | no |
+| [userdata\_template](#input\_userdata\_template) | Alternative user-data template file path, replacing the default template. By providing your own user\_data you have to take care of installing all required software, including the action runner. Variables userdata\_pre/post\_install are ignored. | `string` | `null` | no |
| [vpc\_id](#input\_vpc\_id) | The VPC for security groups of the action runners. | `string` | n/a | yes |
-| [webhook\_lambda\_s3\_key](#input\_webhook\_lambda\_s3\_key) | S3 key for webhook lambda function. Required if using S3 bucket to specify lambdas. | `any` | `null` | no |
-| [webhook\_lambda\_s3\_object\_version](#input\_webhook\_lambda\_s3\_object\_version) | S3 object version for webhook lambda function. Useful if S3 versioning is enabled on source bucket. | `any` | `null` | no |
+| [webhook\_lambda\_apigateway\_access\_log\_settings](#input\_webhook\_lambda\_apigateway\_access\_log\_settings) | Access log settings for webhook API gateway. | object({
destination_arn = string
format = string
}) | `null` | no |
+| [webhook\_lambda\_memory\_size](#input\_webhook\_lambda\_memory\_size) | Memory size limit in MB for webhook lambda in. | `number` | `256` | no |
+| [webhook\_lambda\_s3\_key](#input\_webhook\_lambda\_s3\_key) | S3 key for webhook lambda function. Required if using S3 bucket to specify lambdas. | `string` | `null` | no |
+| [webhook\_lambda\_s3\_object\_version](#input\_webhook\_lambda\_s3\_object\_version) | S3 object version for webhook lambda function. Useful if S3 versioning is enabled on source bucket. | `string` | `null` | no |
| [webhook\_lambda\_timeout](#input\_webhook\_lambda\_timeout) | Time out of the webhook lambda in seconds. | `number` | `10` | no |
| [webhook\_lambda\_zip](#input\_webhook\_lambda\_zip) | File location of the webhook lambda zip file. | `string` | `null` | no |
@@ -492,32 +253,13 @@ In case the setup does not work as intended follow the trace of events:
| Name | Description |
|------|-------------|
| [binaries\_syncer](#output\_binaries\_syncer) | n/a |
+| [instance\_termination\_handler](#output\_instance\_termination\_handler) | n/a |
+| [instance\_termination\_watcher](#output\_instance\_termination\_watcher) | n/a |
| [queues](#output\_queues) | SQS queues. |
| [runners](#output\_runners) | n/a |
| [ssm\_parameters](#output\_ssm\_parameters) | n/a |
| [webhook](#output\_webhook) | n/a |
-
-
-## Contribution
-
-We welcome contribution, please checkout the [contribution guide](CONTRIBUTING.md). Be-aware we use [pre commit hooks](https://pre-commit.com/) to update the docs.
-
-## Philips Forest
+
+
-This module is part of the Philips Forest.
-
-```bash
-
- ___ _
- / __\__ _ __ ___ ___| |_
- / _\/ _ \| '__/ _ \/ __| __|
- / / | (_) | | | __/\__ \ |_
- \/ \___/|_| \___||___/\__|
-
- Infrastructure
-
-```
-
-Talk to the forestkeepers in the `runners`-channel on Slack.
-
-[](https://join.slack.com/t/philips-software/shared_invite/zt-xecw65v5-i1531hGP~mdVwgxLFx7ckg)
+object({
id = string
key_base64 = string
}) | n/a | yes |
+
+## Outputs
+
+| Name | Description |
+|------|-------------|
+| [runners](#output\_runners) | n/a |
+| [webhook\_endpoint](#output\_webhook\_endpoint) | n/a |
+| [webhook\_secret](#output\_webhook\_secret) | n/a |
+
diff --git a/examples/arm64/lambdas-download/README.md b/examples/arm64/lambdas-download/README.md
new file mode 100644
index 0000000000..a9a0b890e9
--- /dev/null
+++ b/examples/arm64/lambdas-download/README.md
@@ -0,0 +1,31 @@
+
+## Requirements
+
+| Name | Version |
+|------|---------|
+| [terraform](#requirement\_terraform) | >= 1 |
+
+## Providers
+
+No providers.
+
+## Modules
+
+| Name | Source | Version |
+|------|--------|---------|
+| [lambdas](#module\_lambdas) | ../../../modules/download-lambda | n/a |
+
+## Resources
+
+No resources.
+
+## Inputs
+
+No inputs.
+
+## Outputs
+
+| Name | Description |
+|------|-------------|
+| [files](#output\_files) | n/a |
+
\ No newline at end of file
diff --git a/examples/arm64/lambdas-download/versions.tf b/examples/arm64/lambdas-download/versions.tf
new file mode 100644
index 0000000000..c934712b56
--- /dev/null
+++ b/examples/arm64/lambdas-download/versions.tf
@@ -0,0 +1,3 @@
+terraform {
+ required_version = ">= 1"
+}
diff --git a/examples/arm64/main.tf b/examples/arm64/main.tf
index c541177cd4..e7b0a2f349 100644
--- a/examples/arm64/main.tf
+++ b/examples/arm64/main.tf
@@ -12,12 +12,20 @@ resource "random_id" "random" {
### Hybrid account
################################################################################
+module "base" {
+ source = "../base"
+
+ prefix = local.environment
+ aws_region = local.aws_region
+}
+
+
module "runners" {
source = "../../"
create_service_linked_role_spot = true
aws_region = local.aws_region
- vpc_id = module.vpc.vpc_id
- subnet_ids = module.vpc.private_subnets
+ vpc_id = module.base.vpc.vpc_id
+ subnet_ids = module.base.vpc.private_subnets
prefix = local.environment
tags = {
@@ -25,19 +33,19 @@ module "runners" {
}
github_app = {
- key_base64 = var.github_app_key_base64
- id = var.github_app_id
+ key_base64 = var.github_app.key_base64
+ id = var.github_app.id
webhook_secret = random_id.random.hex
}
# Grab zip files via lambda_download, will automatically get the ARM64 build
- webhook_lambda_zip = "lambdas-download/webhook.zip"
- runner_binaries_syncer_lambda_zip = "lambdas-download/runner-binaries-syncer.zip"
- runners_lambda_zip = "lambdas-download/runners.zip"
+ webhook_lambda_zip = "../lambdas-download/webhook.zip"
+ runner_binaries_syncer_lambda_zip = "../lambdas-download/runner-binaries-syncer.zip"
+ runners_lambda_zip = "../lambdas-download/runners.zip"
enable_organization_runners = false
# Runners will automatically get the "arm64" label
- runner_extra_labels = "default,example"
+ runner_extra_labels = ["default", "example"]
# enable access to the runners via SSM
enable_ssm_on_runners = true
@@ -51,6 +59,9 @@ module "runners" {
# }
# }
+ # enable S3 versioning for runners S3 bucket
+ # runner_binaries_s3_versioning = "Enabled"
+
# Uncommet idle config to have idle runners from 9 to 5 in time zone Amsterdam
# idle_config = [{
# cron = "* * 9-17 * * *"
@@ -70,8 +81,19 @@ module "runners" {
runners_maximum_count = 1
# set up a fifo queue to remain order
- fifo_build_queue = true
+ enable_fifo_build_queue = true
# override scaling down
scale_down_schedule_expression = "cron(* * * * ? *)"
}
+
+module "webhook_github_app" {
+ source = "../../modules/webhook-github-app"
+
+ github_app = {
+ key_base64 = var.github_app.key_base64
+ id = var.github_app.id
+ webhook_secret = random_id.random.hex
+ }
+ webhook_endpoint = module.runners.webhook.endpoint
+}
diff --git a/examples/arm64/variables.tf b/examples/arm64/variables.tf
index 69dcd0c61c..35a65dbd4a 100644
--- a/examples/arm64/variables.tf
+++ b/examples/arm64/variables.tf
@@ -1,4 +1,8 @@
+variable "github_app" {
+ description = "GitHub App for API usages."
-variable "github_app_key_base64" {}
-
-variable "github_app_id" {}
+ type = object({
+ id = string
+ key_base64 = string
+ })
+}
diff --git a/examples/arm64/versions.tf b/examples/arm64/versions.tf
index 750fd6c978..349e8243a5 100644
--- a/examples/arm64/versions.tf
+++ b/examples/arm64/versions.tf
@@ -2,14 +2,16 @@ terraform {
required_providers {
aws = {
source = "hashicorp/aws"
- version = "~> 4.0"
+ version = "~> 5.27"
}
local = {
- source = "hashicorp/local"
+ source = "hashicorp/local"
+ version = "~> 2.0"
}
random = {
- source = "hashicorp/random"
+ source = "hashicorp/random"
+ version = "~> 3.0"
}
}
- required_version = ">= 0.14"
+ required_version = ">= 1.3.0"
}
diff --git a/examples/arm64/vpc.tf b/examples/arm64/vpc.tf
deleted file mode 100644
index a7d21422f1..0000000000
--- a/examples/arm64/vpc.tf
+++ /dev/null
@@ -1,7 +0,0 @@
-module "vpc" {
- source = "git::https://github.com/philips-software/terraform-aws-vpc.git?ref=2.2.0"
-
- environment = local.environment
- aws_region = local.aws_region
- create_private_hosted_zone = false
-}
diff --git a/examples/base/.terraform.lock.hcl b/examples/base/.terraform.lock.hcl
new file mode 100644
index 0000000000..2cee4d651f
--- /dev/null
+++ b/examples/base/.terraform.lock.hcl
@@ -0,0 +1,25 @@
+# This file is maintained automatically by "terraform init".
+# Manual edits may be lost in future updates.
+
+provider "registry.terraform.io/hashicorp/aws" {
+ version = "5.31.0"
+ constraints = ">= 5.0.0, ~> 5.27"
+ hashes = [
+ "h1:ltxyuBWIy9cq0kIKDJH1jeWJy/y7XJLjS4QrsQK4plA=",
+ "zh:0cdb9c2083bf0902442384f7309367791e4640581652dda456f2d6d7abf0de8d",
+ "zh:2fe4884cb9642f48a5889f8dff8f5f511418a18537a9dfa77ada3bcdad391e4e",
+ "zh:36d8bdd72fe61d816d0049c179f495bc6f1e54d8d7b07c45b62e5e1696882a89",
+ "zh:539dd156e3ec608818eb21191697b230117437a58587cbd02ce533202a4dd520",
+ "zh:6a53f4b57ac4eb3479fc0d8b6e301ca3a27efae4c55d9f8bd24071b12a03361c",
+ "zh:6faeb8ff6792ca7af1c025255755ad764667a300291cc10cea0c615479488c87",
+ "zh:7d9423149b323f6d0df5b90c4d9029e5455c670aea2a7eb6fef4684ba7eb2e0b",
+ "zh:8235badd8a5d0993421cacf5ead48fac73d3b5a25c8a68599706a404b1f70730",
+ "zh:860b4f60842b2879c5128b7e386c8b49adeda9287fed12c5cd74861bb659bbcd",
+ "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425",
+ "zh:b021fceaf9382c8fe3c6eb608c24d01dce3d11ba7e65bb443d51ca9b90e9b237",
+ "zh:b38b0bfc1c69e714e80cf1c9ea06e687ee86aa9f45694be28eb07adcebbe0489",
+ "zh:c972d155f6c01af9690a72adfb99cfc24ef5ef311ca92ce46b9b13c5c153f572",
+ "zh:e0dd29920ec84fdb6026acff44dcc1fb1a24a0caa093fa04cdbc713d384c651d",
+ "zh:e3127ebd2cb0374cd1808f911e6bffe2f4ac4d84317061381242353f3a7bc27d",
+ ]
+}
diff --git a/examples/base/README.md b/examples/base/README.md
new file mode 100644
index 0000000000..96ca2a857b
--- /dev/null
+++ b/examples/base/README.md
@@ -0,0 +1,39 @@
+
+## Requirements
+
+| Name | Version |
+|------|---------|
+| [terraform](#requirement\_terraform) | >= 1 |
+| [aws](#requirement\_aws) | ~> 5.27 |
+
+## Providers
+
+| Name | Version |
+|------|---------|
+| [aws](#provider\_aws) | 5.31.0 |
+
+## Modules
+
+| Name | Source | Version |
+|------|--------|---------|
+| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | 5.0.0 |
+
+## Resources
+
+| Name | Type |
+|------|------|
+| [aws_resourcegroups_group.resourcegroups_group](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/resourcegroups_group) | resource |
+
+## Inputs
+
+| Name | Description | Type | Default | Required |
+|------|-------------|------|---------|:--------:|
+| [aws\_region](#input\_aws\_region) | AWS region to create the VPC, assuming zones `a` and `b` exists. | `string` | n/a | yes |
+| [prefix](#input\_prefix) | Prefix used for resource naming. | `string` | n/a | yes |
+
+## Outputs
+
+| Name | Description |
+|------|-------------|
+| [vpc](#output\_vpc) | n/a |
+
diff --git a/examples/base/main.tf b/examples/base/main.tf
new file mode 100644
index 0000000000..6f0cf307a7
--- /dev/null
+++ b/examples/base/main.tf
@@ -0,0 +1,8 @@
+resource "aws_resourcegroups_group" "resourcegroups_group" {
+ name = "${var.prefix}-group"
+ resource_query {
+ query = templatefile("${path.module}/templates/resource-group.json", {
+ example = var.prefix
+ })
+ }
+}
diff --git a/examples/base/outputs.tf b/examples/base/outputs.tf
new file mode 100644
index 0000000000..85fc1edf43
--- /dev/null
+++ b/examples/base/outputs.tf
@@ -0,0 +1,3 @@
+output "vpc" {
+ value = module.vpc
+}
diff --git a/templates/resource-group.json b/examples/base/templates/resource-group.json
similarity index 57%
rename from templates/resource-group.json
rename to examples/base/templates/resource-group.json
index 753c6b4843..202d42efad 100644
--- a/templates/resource-group.json
+++ b/examples/base/templates/resource-group.json
@@ -2,8 +2,8 @@
"ResourceTypeFilters": ["AWS::AllSupported"],
"TagFilters": [
{
- "Key": "Environment",
- "Values": ["${environment}"]
+ "Key": "Example",
+ "Values": ["${example}"]
}
]
}
diff --git a/examples/base/variables.tf b/examples/base/variables.tf
new file mode 100644
index 0000000000..895e80ab5e
--- /dev/null
+++ b/examples/base/variables.tf
@@ -0,0 +1,9 @@
+variable "prefix" {
+ description = "Prefix used for resource naming."
+ type = string
+}
+
+variable "aws_region" {
+ description = "AWS region to create the VPC, assuming zones `a` and `b` exists."
+ type = string
+}
diff --git a/examples/base/versions.tf b/examples/base/versions.tf
new file mode 100644
index 0000000000..2685117f32
--- /dev/null
+++ b/examples/base/versions.tf
@@ -0,0 +1,9 @@
+terraform {
+ required_providers {
+ aws = {
+ source = "hashicorp/aws"
+ version = "~> 5.27"
+ }
+ }
+ required_version = ">= 1"
+}
diff --git a/examples/base/vpc.tf b/examples/base/vpc.tf
new file mode 100644
index 0000000000..cc6a3b7649
--- /dev/null
+++ b/examples/base/vpc.tf
@@ -0,0 +1,16 @@
+module "vpc" {
+ source = "terraform-aws-modules/vpc/aws"
+ version = "5.0.0"
+
+ name = "${var.prefix}-vpc"
+ cidr = "10.0.0.0/16"
+
+ azs = ["${var.aws_region}a", "${var.aws_region}b"]
+ private_subnets = ["10.0.1.0/24", "10.0.2.0/24"]
+ public_subnets = ["10.0.101.0/24", "10.0.102.0/24"]
+
+ enable_dns_hostnames = true
+ enable_nat_gateway = true
+ map_public_ip_on_launch = false
+ single_nat_gateway = true
+}
diff --git a/examples/default/.terraform.lock.hcl b/examples/default/.terraform.lock.hcl
index 04f4b49cbf..60a47db0b8 100644
--- a/examples/default/.terraform.lock.hcl
+++ b/examples/default/.terraform.lock.hcl
@@ -2,59 +2,84 @@
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/aws" {
- version = "4.15.1"
- constraints = ">= 3.63.0, ~> 4.0"
+ version = "5.31.0"
+ constraints = ">= 5.0.0, ~> 5.27"
hashes = [
- "h1:KNkM4pOCRzbjlGoCxt4Yl4qGUESLQ2uKIOSHb+aiMlY=",
- "zh:1d944144f8d613b8090c0c8391e4b205ca036086d70aceb4cdf664856fa8410c",
- "zh:2a0ca16a6b12c0ac509f64512f80bd2ed6e7ea0ec369212efd4be3fa65e9773d",
- "zh:3f9efdce4f1c320ffd061e8715e1d031deac1be0b959eaa60c25a274925653e4",
- "zh:4cf82f3267b0c3e08be29b0345f711ab84ea1ea75f0e8ce81f5a2fe635ba67b4",
- "zh:58474a0b7da438e1bcd53e87f10e28830836ff9b46cce5f09413c90952ae4f78",
- "zh:6eb1be8afb0314b6b8424fe212b13beeb04f3f24692f0f3ee86c5153c7eb2e63",
- "zh:8022da7d3b050d452ce6c679844e13729bdb4e1b3e75dcf68931af17a06b9277",
- "zh:8e2683d00fff1df43440d6e7c04a2c1eb432c7d5dacff32fe8ce9045bc948fe6",
+ "h1:ltxyuBWIy9cq0kIKDJH1jeWJy/y7XJLjS4QrsQK4plA=",
+ "zh:0cdb9c2083bf0902442384f7309367791e4640581652dda456f2d6d7abf0de8d",
+ "zh:2fe4884cb9642f48a5889f8dff8f5f511418a18537a9dfa77ada3bcdad391e4e",
+ "zh:36d8bdd72fe61d816d0049c179f495bc6f1e54d8d7b07c45b62e5e1696882a89",
+ "zh:539dd156e3ec608818eb21191697b230117437a58587cbd02ce533202a4dd520",
+ "zh:6a53f4b57ac4eb3479fc0d8b6e301ca3a27efae4c55d9f8bd24071b12a03361c",
+ "zh:6faeb8ff6792ca7af1c025255755ad764667a300291cc10cea0c615479488c87",
+ "zh:7d9423149b323f6d0df5b90c4d9029e5455c670aea2a7eb6fef4684ba7eb2e0b",
+ "zh:8235badd8a5d0993421cacf5ead48fac73d3b5a25c8a68599706a404b1f70730",
+ "zh:860b4f60842b2879c5128b7e386c8b49adeda9287fed12c5cd74861bb659bbcd",
"zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425",
- "zh:b0c22d9a306e8ac2de57b5291a3d0a7a2c1713e33b7d076005662451afdc4d29",
- "zh:ba6b7d7d91388b636145b133da6b4e32620cdc8046352e2dc8f3f0f81ff5d2e2",
- "zh:d38a816eb60f4419d99303136a3bb61a0d2df3ca8a1dce2ced9b99bf23efa9f7",
+ "zh:b021fceaf9382c8fe3c6eb608c24d01dce3d11ba7e65bb443d51ca9b90e9b237",
+ "zh:b38b0bfc1c69e714e80cf1c9ea06e687ee86aa9f45694be28eb07adcebbe0489",
+ "zh:c972d155f6c01af9690a72adfb99cfc24ef5ef311ca92ce46b9b13c5c153f572",
+ "zh:e0dd29920ec84fdb6026acff44dcc1fb1a24a0caa093fa04cdbc713d384c651d",
+ "zh:e3127ebd2cb0374cd1808f911e6bffe2f4ac4d84317061381242353f3a7bc27d",
]
}
provider "registry.terraform.io/hashicorp/local" {
- version = "2.2.3"
+ version = "2.4.1"
+ constraints = "~> 2.0"
hashes = [
- "h1:FvRIEgCmAezgZUqb2F+PZ9WnSSnR5zbEM2ZI+GLmbMk=",
- "zh:04f0978bb3e052707b8e82e46780c371ac1c66b689b4a23bbc2f58865ab7d5c0",
- "zh:6484f1b3e9e3771eb7cc8e8bab8b35f939a55d550b3f4fb2ab141a24269ee6aa",
- "zh:78a56d59a013cb0f7eb1c92815d6eb5cf07f8b5f0ae20b96d049e73db915b238",
+ "h1:gpp25uNkYJYzJVnkyRr7RIBVfwLs9GSq2HNnFpTRBg0=",
+ "zh:244b445bf34ddbd167731cc6c6b95bbed231dc4493f8cc34bd6850cfe1f78528",
+ "zh:3c330bdb626123228a0d1b1daa6c741b4d5d484ab1c7ae5d2f48d4c9885cc5e9",
+ "zh:5ff5f9b791ddd7557e815449173f2db38d338e674d2d91800ac6e6d808de1d1d",
+ "zh:70206147104f4bf26ae67d730c995772f85bf23e28c2c2e7612c74f4dae3c46f",
+ "zh:75029676993accd6bef933c196b2fad51a9ec8a69a847dbbe96ec8ebf7926cdc",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
- "zh:8aa9950f4c4db37239bcb62e19910c49e47043f6c8587e5b0396619923657797",
- "zh:996beea85f9084a725ff0e6473a4594deb5266727c5f56e9c1c7c62ded6addbb",
- "zh:9a7ef7a21f48fabfd145b2e2a4240ca57517ad155017e86a30860d7c0c109de3",
- "zh:a63e70ac052aa25120113bcddd50c1f3cfe61f681a93a50cea5595a4b2cc3e1c",
- "zh:a6e8d46f94108e049ad85dbed60354236dc0b9b5ec8eabe01c4580280a43d3b8",
- "zh:bb112ce7efbfcfa0e65ed97fa245ef348e0fd5bfa5a7e4ab2091a9bd469f0a9e",
- "zh:d7bec0da5c094c6955efed100f3fe22fca8866859f87c025be1760feb174d6d9",
- "zh:fb9f271b72094d07cef8154cd3d50e9aa818a0ea39130bc193132ad7b23076fd",
+ "zh:7d48d5999fe1fcdae9295a7c3448ac1541f5a24c474bd82df6d4fa3732483f2b",
+ "zh:b766b38b027f0f84028244d1c2f990431a37d4fc3ac645962924554016507e77",
+ "zh:bfc7ad301dada204cf51c59d8bd6a9a87de5fddb42190b4d6ba157d6e08a1f10",
+ "zh:c902b527702a8c5e2c25a6637d07bbb1690cb6c1e63917a5f6dc460efd18d43f",
+ "zh:d68ae0e1070cf429c46586bc87580c3ed113f76241da2b6e4f1a8348126b3c46",
+ "zh:f4903fd89f7c92a346ae9e666c2d0b6884c4474ae109e9b4bd15e7efaa4bfc29",
+ ]
+}
+
+provider "registry.terraform.io/hashicorp/null" {
+ version = "3.2.2"
+ constraints = "~> 3.0"
+ hashes = [
+ "h1:IMVAUHKoydFrlPrl9OzasDnw/8ntZFerCC9iXw1rXQY=",
+ "zh:3248aae6a2198f3ec8394218d05bd5e42be59f43a3a7c0b71c66ec0df08b69e7",
+ "zh:32b1aaa1c3013d33c245493f4a65465eab9436b454d250102729321a44c8ab9a",
+ "zh:38eff7e470acb48f66380a73a5c7cdd76cc9b9c9ba9a7249c7991488abe22fe3",
+ "zh:4c2f1faee67af104f5f9e711c4574ff4d298afaa8a420680b0cb55d7bbc65606",
+ "zh:544b33b757c0b954dbb87db83a5ad921edd61f02f1dc86c6186a5ea86465b546",
+ "zh:696cf785090e1e8cf1587499516b0494f47413b43cb99877ad97f5d0de3dc539",
+ "zh:6e301f34757b5d265ae44467d95306d61bef5e41930be1365f5a8dcf80f59452",
+ "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
+ "zh:913a929070c819e59e94bb37a2a253c228f83921136ff4a7aa1a178c7cce5422",
+ "zh:aa9015926cd152425dbf86d1abdbc74bfe0e1ba3d26b3db35051d7b9ca9f72ae",
+ "zh:bb04798b016e1e1d49bcc76d62c53b56c88c63d6f2dfe38821afef17c416a0e1",
+ "zh:c23084e1b23577de22603cff752e59128d83cfecc2e6819edadd8cf7a10af11e",
]
}
provider "registry.terraform.io/hashicorp/random" {
- version = "3.2.0"
+ version = "3.6.0"
+ constraints = "~> 3.0"
hashes = [
- "h1:NvMyFNHHq65GUNyBGjLuLD4ABA6sTlRebZCIK5OtvFU=",
- "zh:2960977ce9a7d6a7d3e934e75ec5814735626f95c186ad95a9102344a1a38ac1",
- "zh:2fd012abfabe7076f3f2f402eeef4970e20574d20ffec57c162b02b6e848c32f",
- "zh:4cd3234671cf01c913023418b227eb78b0659f2cd2e0b387be1f0bb607d29889",
- "zh:52e695b4fa3fae735ffc901edff8183745f980923510a744db7616e8f10dc499",
+ "h1:I8MBeauYA8J8yheLJ8oSMWqB0kovn16dF/wKZ1QTdkk=",
+ "zh:03360ed3ecd31e8c5dac9c95fe0858be50f3e9a0d0c654b5e504109c2159287d",
+ "zh:1c67ac51254ba2a2bb53a25e8ae7e4d076103483f55f39b426ec55e47d1fe211",
+ "zh:24a17bba7f6d679538ff51b3a2f378cedadede97af8a1db7dad4fd8d6d50f829",
+ "zh:30ffb297ffd1633175d6545d37c2217e2cef9545a6e03946e514c59c0859b77d",
+ "zh:454ce4b3dbc73e6775f2f6605d45cee6e16c3872a2e66a2c97993d6e5cbd7055",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
- "zh:848b4a294e5ba15192ee4bfd199c07f60a437d7572efcd2d89db036e1ebc0e6e",
- "zh:9d49aa432a05748a9527e95448cebee1238c87c97c7e8dec694bfd709683f9c7",
- "zh:b4ad4cf289d3f7408649b74b8639918833613f2a1f3cf51b51f4b2fdaa412dd2",
- "zh:c1544c4b416096fb8d8dbf84c4488584a2844a30dd533b957e9e9e60a165f24e",
- "zh:dc737d6b4591cad8c9a1d0b347e587e846d8d901789b29b4dd401b6cdf82c017",
- "zh:f5645fd39f749dbbf847cbdc87ba0dbd141143f12917a6a8904faf8a9b64111e",
- "zh:fdedf610e0d020878a8f1fedda8105e0c33a7e23c4792fca54460685552de308",
+ "zh:91df0a9fab329aff2ff4cf26797592eb7a3a90b4a0c04d64ce186654e0cc6e17",
+ "zh:aa57384b85622a9f7bfb5d4512ca88e61f22a9cea9f30febaa4c98c68ff0dc21",
+ "zh:c4a3e329ba786ffb6f2b694e1fd41d413a7010f3a53c20b432325a94fa71e839",
+ "zh:e2699bc9116447f96c53d55f2a00570f982e6f9935038c3810603572693712d0",
+ "zh:e747c0fd5d7684e5bfad8aa0ca441903f15ae7a98a737ff6aca24ba223207e2c",
+ "zh:f1ca75f417ce490368f047b63ec09fd003711ae48487fba90b4aba2ccf71920e",
]
}
diff --git a/examples/default/README.md b/examples/default/README.md
index 9cf612ce17..a3d057bb04 100644
--- a/examples/default/README.md
+++ b/examples/default/README.md
@@ -1,31 +1,76 @@
-# Action runners deployment default example
+# Amazon Linux X64 (default)
This module shows how to create GitHub action runners. Lambda release will be downloaded from GitHub.
## Usages
-Steps for the full setup, such as creating a GitHub app can be found in the root module's [README](../../README.md). First download the Lambda releases from GitHub. Alternatively you can build the lambdas locally with Node or Docker, there is a simple build script in `object({
id = string
key_base64 = string
}) | n/a | yes |
+
+## Outputs
+
+| Name | Description |
+|------|-------------|
+| [runners](#output\_runners) | n/a |
+| [webhook\_endpoint](#output\_webhook\_endpoint) | n/a |
+| [webhook\_secret](#output\_webhook\_secret) | n/a |
+
diff --git a/examples/default/lambdas-download/main.tf b/examples/default/lambdas-download/main.tf
deleted file mode 100644
index 87f31bd8a9..0000000000
--- a/examples/default/lambdas-download/main.tf
+++ /dev/null
@@ -1,25 +0,0 @@
-locals {
- version = "object({
id = string
key_base64 = string
}) | n/a | yes |
+
+## Outputs
+
+| Name | Description |
+|------|-------------|
+| [runners](#output\_runners) | n/a |
+| [webhook\_endpoint](#output\_webhook\_endpoint) | n/a |
+| [webhook\_secret](#output\_webhook\_secret) | n/a |
+
diff --git a/examples/ephemeral/lambdas-download/main.tf b/examples/ephemeral/lambdas-download/main.tf
deleted file mode 100644
index 87f31bd8a9..0000000000
--- a/examples/ephemeral/lambdas-download/main.tf
+++ /dev/null
@@ -1,25 +0,0 @@
-locals {
- version = "object({
id = string
key_base64 = string
}) | n/a | yes |
+
+## Outputs
+
+| Name | Description |
+|------|-------------|
+| [webhook\_endpoint](#output\_webhook\_endpoint) | n/a |
+| [webhook\_secret](#output\_webhook\_secret) | n/a |
+
diff --git a/examples/multi-runner/main.tf b/examples/multi-runner/main.tf
new file mode 100644
index 0000000000..9be340ab47
--- /dev/null
+++ b/examples/multi-runner/main.tf
@@ -0,0 +1,129 @@
+locals {
+ environment = var.environment != null ? var.environment : "multi-runner"
+ aws_region = var.aws_region
+
+ # Load runner configurations from Yaml files
+ multi_runner_config_files = {
+ for c in fileset("${path.module}/templates/runner-configs", "*.yaml") :
+
+ trimsuffix(c, ".yaml") => yamldecode(file("${path.module}/templates/runner-configs/${c}"))
+ }
+ multi_runner_config = {
+ for k, v in local.multi_runner_config_files :
+
+ k => merge(
+ v,
+ {
+ runner_config = merge(
+ v.runner_config,
+ {
+ subnet_ids = lookup(v.runner_config, "subnet_ids", null) != null ? [module.base.vpc.private_subnets[0]] : null
+ vpc_id = lookup(v.runner_config, "vpc_id", null) != null ? module.base.vpc.vpc_id : null
+ }
+ )
+ }
+ )
+ }
+}
+
+resource "random_id" "random" {
+ byte_length = 20
+}
+
+module "base" {
+ source = "../base"
+
+ prefix = local.environment
+ aws_region = local.aws_region
+}
+
+module "runners" {
+ source = "../../modules/multi-runner"
+ multi_runner_config = local.multi_runner_config
+ # Alternative to loading runner configuration from Yaml files is using static configuration:
+ # multi_runner_config = {
+ # "linux-x64" = {
+ # matcherConfig : {
+ # labelMatchers = [["self-hosted", "linux", "x64", "amazon"]]
+ # exactMatch = false
+ # }
+ # fifo = true
+ # delay_webhook_event = 0
+ # runner_config = {
+ # runner_os = "linux"
+ # runner_architecture = "x64"
+ # runner_name_prefix = "amazon-x64_"
+ # create_service_linked_role_spot = true
+ # enable_ssm_on_runners = true
+ # instance_types = ["m5ad.large", "m5a.large"]
+ # runner_extra_labels = ["amazon"]
+ # runners_maximum_count = 1
+ # enable_ephemeral_runners = true
+ # scale_down_schedule_expression = "cron(* * * * ? *)"
+ # }
+ # }
+ # }
+ aws_region = local.aws_region
+ vpc_id = module.base.vpc.vpc_id
+ subnet_ids = module.base.vpc.private_subnets
+ runners_scale_up_lambda_timeout = 60
+ runners_scale_down_lambda_timeout = 60
+ prefix = local.environment
+ tags = {
+ Project = "ProjectX"
+ }
+ github_app = {
+ key_base64 = var.github_app.key_base64
+ id = var.github_app.id
+ webhook_secret = random_id.random.hex
+ }
+
+ # Deploy webhook using the EventBridge
+ eventbridge = {
+ enable = true
+ # adjust the allow events to only allow specific events, like workflow_job
+ accept_events = ["workflow_job"]
+ }
+
+ # enable this section for tracing
+ # tracing_config = {
+ # mode = "Active"
+ # capture_error = true
+ # capture_http_requests = true
+ # }
+ # Assuming local build lambda's to use pre build ones, uncomment the lines below and download the
+ # lambda zip files lambda_download
+ # webhook_lambda_zip = "../lambdas-download/webhook.zip"
+ # runner_binaries_syncer_lambda_zip = "../lambdas-download/runner-binaries-syncer.zip"
+ # runners_lambda_zip = "../lambdas-download/runners.zip"
+
+ # Enable debug logging for the lambda functions
+ # log_level = "debug"
+
+ # Enable to track the spot instance termination warning
+ # instance_termination_watcher = {
+ # enable = true
+ # }
+
+ # Enable metrics
+ # metrics = {
+ # enable = true
+ # metric = {
+ # enable_github_app_rate_limit = true
+ # enable_job_retry = false
+ # enable_spot_termination_warning = true
+ # }
+ # }
+}
+
+module "webhook_github_app" {
+ source = "../../modules/webhook-github-app"
+ depends_on = [module.runners]
+
+ github_app = {
+ key_base64 = var.github_app.key_base64
+ id = var.github_app.id
+ webhook_secret = random_id.random.hex
+ }
+ webhook_endpoint = module.runners.webhook.endpoint
+}
diff --git a/examples/multi-runner/outputs.tf b/examples/multi-runner/outputs.tf
new file mode 100644
index 0000000000..1feaf2e671
--- /dev/null
+++ b/examples/multi-runner/outputs.tf
@@ -0,0 +1,8 @@
+output "webhook_endpoint" {
+ value = module.runners.webhook.endpoint
+}
+
+output "webhook_secret" {
+ sensitive = true
+ value = random_id.random.hex
+}
diff --git a/examples/multi-runner/providers.tf b/examples/multi-runner/providers.tf
new file mode 100644
index 0000000000..eca2fe96a7
--- /dev/null
+++ b/examples/multi-runner/providers.tf
@@ -0,0 +1,9 @@
+provider "aws" {
+ region = local.aws_region
+
+ default_tags {
+ tags = {
+ Example = local.environment
+ }
+ }
+}
diff --git a/examples/multi-runner/templates/runner-configs/linux-arm64.yaml b/examples/multi-runner/templates/runner-configs/linux-arm64.yaml
new file mode 100644
index 0000000000..7cfe859242
--- /dev/null
+++ b/examples/multi-runner/templates/runner-configs/linux-arm64.yaml
@@ -0,0 +1,20 @@
+matcherConfig:
+ exactMatch: true
+ labelMatchers:
+ - [self-hosted, linux, arm64, amazon]
+fifo: true
+redrive_build_queue:
+ enabled: false
+ maxReceiveCount: null
+runner_config:
+ runner_os: linux
+ runner_architecture: arm64
+ runner_name_prefix: amazon-arm64_
+ enable_ssm_on_runners: true
+ credit_specification: unlimited
+ instance_types:
+ - t4g.large
+ - c6g.large
+ runners_maximum_count: 1
+ delay_webhook_event: 0
+ scale_down_schedule_expression: cron(* * * * ? *)
diff --git a/examples/multi-runner/templates/runner-configs/linux-x64-ubuntu.yaml b/examples/multi-runner/templates/runner-configs/linux-x64-ubuntu.yaml
new file mode 100644
index 0000000000..4b555d194c
--- /dev/null
+++ b/examples/multi-runner/templates/runner-configs/linux-x64-ubuntu.yaml
@@ -0,0 +1,54 @@
+matcherConfig:
+ exactMatch: true
+ labelMatchers:
+ - [self-hosted, linux, x64, ubuntu-latest]
+ - [self-hosted, linux, x64, ubuntu-2204]
+fifo: true
+redrive_build_queue:
+ enabled: false
+ maxReceiveCount: null
+runner_config:
+ runner_os: linux
+ runner_architecture: x64
+ runner_run_as: ubuntu
+ runner_name_prefix: ubuntu-2204-x64_
+ enable_ssm_on_runners: true
+ credit_specification: standard
+ instance_types:
+ - t3a.large
+ - m5ad.large
+ - m5a.large
+ runners_maximum_count: 1
+ delay_webhook_event: 0
+ scale_down_schedule_expression: cron(* * * * ? *)
+ userdata_template: ./templates/user-data.sh
+ ami_owners:
+ - "099720109477" # Canonical's Amazon account ID
+ ami_filter:
+ name:
+ - ubuntu/images/hvm-ssd/ubuntu-jammy-22.04-amd64-server-*
+ state:
+ - available
+ block_device_mappings:
+ - device_name: /dev/sda1
+ delete_on_termination: true
+ volume_type: gp3
+ volume_size: 30
+ encrypted: true
+ iops: null
+ throughput: null
+ kms_key_id: null
+ snapshot_id: null
+ runner_log_files:
+ - log_group_name: syslog
+ prefix_log_group: true
+ file_path: /var/log/syslog
+ log_stream_name: "{instance_id}"
+ - log_group_name: user_data
+ prefix_log_group: true
+ file_path: /var/log/user-data.log
+ log_stream_name: "{instance_id}/user_data"
+ - log_group_name: runner
+ prefix_log_group: true
+ file_path: /opt/actions-runner/_diag/Runner_**.log
+ log_stream_name: "{instance_id}/runner"
diff --git a/examples/multi-runner/templates/runner-configs/linux-x64.yaml b/examples/multi-runner/templates/runner-configs/linux-x64.yaml
new file mode 100644
index 0000000000..bc3527baca
--- /dev/null
+++ b/examples/multi-runner/templates/runner-configs/linux-x64.yaml
@@ -0,0 +1,31 @@
+matcherConfig:
+ exactMatch: false
+ labelMatchers:
+ - [ self-hosted, linux, x64, amazon ]
+ priority: 1 # set ephemeral runner priority to 1
+fifo: true
+runner_config:
+ runner_os: linux
+ runner_architecture: x64
+ runner_name_prefix: amazon-x64_
+ enable_ssm_on_runners: true
+ vpc_id: ${vpc_id}
+ subnet_ids: ${subnet_ids}
+ instance_types:
+ - m5ad.large
+ - m5a.large
+ runners_maximum_count: 1
+ enable_ephemeral_runners: true
+ enable_on_demand_failover_for_errors: ['InsufficientInstanceCapacity']
+ create_service_linked_role_spot: true
+ delay_webhook_event: 0
+ scale_down_schedule_expression: cron(* * * * ? *)
+ runner_metadata_options:
+ instance_metadata_tags: disabled
+ http_endpoint: enabled
+ http_tokens: optional
+ http_put_response_hop_limit: 1
+ job_retry:
+ enable: true
+ max_attempts: 1
+ delay_in_seconds: 180
diff --git a/examples/multi-runner/templates/runner-configs/windows-x64.yaml b/examples/multi-runner/templates/runner-configs/windows-x64.yaml
new file mode 100644
index 0000000000..fdf8be6533
--- /dev/null
+++ b/examples/multi-runner/templates/runner-configs/windows-x64.yaml
@@ -0,0 +1,22 @@
+matcherConfig:
+ exactMatch: true
+ labelMatchers:
+ - [self-hosted, windows, x64, servercore-2022]
+fifo: true
+runner_config:
+ runner_os: windows
+ runner_architecture: x64
+ runner_name_prefix: servercore-2022-x64_
+ enable_ssm_on_runners: true
+ instance_types:
+ - m5.large
+ - c5.large
+ runners_maximum_count: 1
+ delay_webhook_event: 5
+ scale_down_schedule_expression: cron(* * * * ? *)
+ runner_boot_time_in_minutes: 20
+ ami_filter:
+ name:
+ - Windows_Server-2022-English-Full-ECS_Optimized-*
+ state:
+ - available
diff --git a/examples/multi-runner/templates/user-data.sh b/examples/multi-runner/templates/user-data.sh
new file mode 100644
index 0000000000..752a0de0e3
--- /dev/null
+++ b/examples/multi-runner/templates/user-data.sh
@@ -0,0 +1,84 @@
+#!/bin/bash
+exec > >(tee /var/log/user-data.log | logger -t user-data -s 2>/dev/console) 2>&1
+
+
+# AWS suggest to create a log for debug purpose based on https://aws.amazon.com/premiumsupport/knowledge-center/ec2-linux-log-user-data/
+# As side effect all command, set +x disable debugging explicitly.
+#
+# An alternative for masking tokens could be: exec > >(sed 's/--token\ [^ ]* /--token\ *** /g' > /var/log/user-data.log) 2>&1
+set +x
+
+%{ if enable_debug_logging }
+set -x
+%{ endif }
+
+${pre_install}
+
+# Install AWS CLI
+apt-get update
+DEBIAN_FRONTEND=noninteractive apt-get install -y \
+ awscli \
+ build-essential \
+ curl \
+ git \
+ iptables \
+ jq \
+ uidmap \
+ unzip \
+ wget
+
+user_name=ubuntu
+user_id=$(id -ru $user_name)
+
+# install and configure cloudwatch logging agent
+wget https://s3.amazonaws.com/amazoncloudwatch-agent/ubuntu/amd64/latest/amazon-cloudwatch-agent.deb
+dpkg -i -E ./amazon-cloudwatch-agent.deb
+amazon-cloudwatch-agent-ctl -a fetch-config -m ec2 -s -c ssm:${ssm_key_cloudwatch_agent_config}
+
+# configure systemd for running service in users accounts
+cat >/etc/systemd/user@UID.service <<-EOF
+
+[Unit]
+Description=User Manager for UID %i
+After=user-runtime-dir@%i.service
+Wants=user-runtime-dir@%i.service
+
+[Service]
+LimitNOFILE=infinity
+LimitNPROC=infinity
+User=%i
+PAMName=systemd-user
+Type=notify
+
+[Install]
+WantedBy=default.target
+
+EOF
+
+echo export XDG_RUNTIME_DIR=/run/user/$user_id >>/home/$user_name/.bashrc
+
+systemctl daemon-reload
+systemctl enable user@UID.service
+systemctl start user@UID.service
+
+curl -fsSL https://get.docker.com/rootless >>/opt/rootless.sh && chmod 755 /opt/rootless.sh
+su -l $user_name -c /opt/rootless.sh
+echo export DOCKER_HOST=unix:///run/user/$user_id/docker.sock >>/home/$user_name/.bashrc
+echo export PATH=/home/$user_name/bin:$PATH >>/home/$user_name/.bashrc
+
+# Run docker service by default
+loginctl enable-linger $user_name
+su -l $user_name -c "systemctl --user enable docker"
+
+${install_runner}
+
+# config runner for rootless docker
+cd /opt/actions-runner/
+echo DOCKER_HOST=unix:///run/user/$user_id/docker.sock >>.env
+echo PATH=/home/$user_name/bin:$PATH >>.env
+
+${post_install}
+
+cd /opt/actions-runner
+
+${start_runner}
diff --git a/examples/multi-runner/variables.tf b/examples/multi-runner/variables.tf
new file mode 100644
index 0000000000..009c3643db
--- /dev/null
+++ b/examples/multi-runner/variables.tf
@@ -0,0 +1,22 @@
+variable "github_app" {
+ description = "GitHub for API usages."
+
+ type = object({
+ id = string
+ key_base64 = string
+ })
+}
+
+variable "environment" {
+ description = "Environment name, used as prefix"
+
+ type = string
+ default = null
+}
+
+variable "aws_region" {
+ description = "AWS region to deploy to"
+
+ type = string
+ default = "eu-west-1"
+}
diff --git a/examples/multi-runner/versions.tf b/examples/multi-runner/versions.tf
new file mode 100644
index 0000000000..349e8243a5
--- /dev/null
+++ b/examples/multi-runner/versions.tf
@@ -0,0 +1,17 @@
+terraform {
+ required_providers {
+ aws = {
+ source = "hashicorp/aws"
+ version = "~> 5.27"
+ }
+ local = {
+ source = "hashicorp/local"
+ version = "~> 2.0"
+ }
+ random = {
+ source = "hashicorp/random"
+ version = "~> 3.0"
+ }
+ }
+ required_version = ">= 1.3.0"
+}
diff --git a/examples/permissions-boundary/.terraform.lock.hcl b/examples/permissions-boundary/.terraform.lock.hcl
index 7ca28893c6..f5f3c3a23c 100644
--- a/examples/permissions-boundary/.terraform.lock.hcl
+++ b/examples/permissions-boundary/.terraform.lock.hcl
@@ -2,59 +2,64 @@
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/aws" {
- version = "4.12.1"
- constraints = ">= 3.63.0, ~> 4.0"
+ version = "5.31.0"
+ constraints = ">= 5.0.0, ~> 5.27"
hashes = [
- "h1:YvwxXRDVzn9j6Gt7Vg8tCcyF/niapue5sxSUw1TH+9U=",
- "zh:2b432dc3bf7e0987bf9dcad5d397c384890d12fcd95827bc4581ca2955fc623a",
- "zh:2f79a448a4e5ad24a706ab634078d0ef159be3278eb24988b7d2185173f5dd8f",
- "zh:5d70074c10cefb30d4104af54f912e58ffa1b6871277b0a5324c8f13000f5009",
- "zh:63623743fb15d54787a96c9761b97a935ff396672e625730cb7a5c1971acf4b6",
- "zh:8263f376e6db684667c10e28df8d8d188e02fd09ad58e1ad7075e363c389e24c",
- "zh:8b5aa9fd1ddf1de0ab7d462891123405e5af04d7e4d1e4b03381634b3cae4884",
+ "h1:ltxyuBWIy9cq0kIKDJH1jeWJy/y7XJLjS4QrsQK4plA=",
+ "zh:0cdb9c2083bf0902442384f7309367791e4640581652dda456f2d6d7abf0de8d",
+ "zh:2fe4884cb9642f48a5889f8dff8f5f511418a18537a9dfa77ada3bcdad391e4e",
+ "zh:36d8bdd72fe61d816d0049c179f495bc6f1e54d8d7b07c45b62e5e1696882a89",
+ "zh:539dd156e3ec608818eb21191697b230117437a58587cbd02ce533202a4dd520",
+ "zh:6a53f4b57ac4eb3479fc0d8b6e301ca3a27efae4c55d9f8bd24071b12a03361c",
+ "zh:6faeb8ff6792ca7af1c025255755ad764667a300291cc10cea0c615479488c87",
+ "zh:7d9423149b323f6d0df5b90c4d9029e5455c670aea2a7eb6fef4684ba7eb2e0b",
+ "zh:8235badd8a5d0993421cacf5ead48fac73d3b5a25c8a68599706a404b1f70730",
+ "zh:860b4f60842b2879c5128b7e386c8b49adeda9287fed12c5cd74861bb659bbcd",
"zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425",
- "zh:d00b2d0b374ab92e934eb597668c5f3e415c4cf8335e6a52ab99949b8fcf57dd",
- "zh:d0e037725aced6cacc2e0a1903b31083c64f8765fb1263e4f8f891745266b7fb",
- "zh:e6e244123bc1df109db90bef0af2a875a0b3afb268f21c3e5bc34753657102ad",
- "zh:ec6901ab8b99ae3df50340e9aa86ed3bac1369f5e1403c0362edd9944640fa22",
- "zh:f6a4d0ce3bd3d4b81163c4ae75b66e50c10b935c60a63d7fb96df285c0eeca40",
+ "zh:b021fceaf9382c8fe3c6eb608c24d01dce3d11ba7e65bb443d51ca9b90e9b237",
+ "zh:b38b0bfc1c69e714e80cf1c9ea06e687ee86aa9f45694be28eb07adcebbe0489",
+ "zh:c972d155f6c01af9690a72adfb99cfc24ef5ef311ca92ce46b9b13c5c153f572",
+ "zh:e0dd29920ec84fdb6026acff44dcc1fb1a24a0caa093fa04cdbc713d384c651d",
+ "zh:e3127ebd2cb0374cd1808f911e6bffe2f4ac4d84317061381242353f3a7bc27d",
]
}
provider "registry.terraform.io/hashicorp/local" {
- version = "2.2.2"
+ version = "2.4.1"
+ constraints = "~> 2.0"
hashes = [
- "h1:BVEZnjtpWxKPG9OOQh4dFa1z5pwMO/uuzYtu6AR2LyM=",
- "zh:027e4873c69da214e2fed131666d5de92089732a11d096b68257da54d30b6f9d",
- "zh:0ba2216e16cfb72538d76a4c4945b4567a76f7edbfef926b1c5a08d7bba2a043",
- "zh:1fee8f6aae1833c27caa96e156cf99a681b6f085e476d7e1b77d285e21d182c1",
- "zh:2e8a3e72e877003df1c390a231e0d8e827eba9f788606e643f8e061218750360",
- "zh:719008f9e262aa1523a6f9132adbe9eee93c648c2981f8359ce41a40e6425433",
+ "h1:gpp25uNkYJYzJVnkyRr7RIBVfwLs9GSq2HNnFpTRBg0=",
+ "zh:244b445bf34ddbd167731cc6c6b95bbed231dc4493f8cc34bd6850cfe1f78528",
+ "zh:3c330bdb626123228a0d1b1daa6c741b4d5d484ab1c7ae5d2f48d4c9885cc5e9",
+ "zh:5ff5f9b791ddd7557e815449173f2db38d338e674d2d91800ac6e6d808de1d1d",
+ "zh:70206147104f4bf26ae67d730c995772f85bf23e28c2c2e7612c74f4dae3c46f",
+ "zh:75029676993accd6bef933c196b2fad51a9ec8a69a847dbbe96ec8ebf7926cdc",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
- "zh:9a70fdbe6ef955c4919a4519caca116f34c19c7ddedd77990fbe4f80fe66dc84",
- "zh:abc412423d670cbb6264827fa80e1ffdc4a74aff3f19ba6a239dd87b85b15bec",
- "zh:ae953a62c94d2a2a0822e5717fafc54e454af57bd6ed02cd301b9786765c1dd3",
- "zh:be0910bdf46698560f9e86f51a4ff795c62c02f8dc82b2b1dab77a0b3a93f61e",
- "zh:e58f9083b7971919b95f553227adaa7abe864fce976f0166cf4d65fc17257ff2",
- "zh:ff4f77cbdbb22cc98182821c7ef84dce16298ab0e997d5c7fae97247f7a4bcb0",
+ "zh:7d48d5999fe1fcdae9295a7c3448ac1541f5a24c474bd82df6d4fa3732483f2b",
+ "zh:b766b38b027f0f84028244d1c2f990431a37d4fc3ac645962924554016507e77",
+ "zh:bfc7ad301dada204cf51c59d8bd6a9a87de5fddb42190b4d6ba157d6e08a1f10",
+ "zh:c902b527702a8c5e2c25a6637d07bbb1690cb6c1e63917a5f6dc460efd18d43f",
+ "zh:d68ae0e1070cf429c46586bc87580c3ed113f76241da2b6e4f1a8348126b3c46",
+ "zh:f4903fd89f7c92a346ae9e666c2d0b6884c4474ae109e9b4bd15e7efaa4bfc29",
]
}
provider "registry.terraform.io/hashicorp/random" {
- version = "3.1.3"
+ version = "3.6.0"
+ constraints = "~> 3.0"
hashes = [
- "h1:LPSVX+oXKGaZmxgtaPf2USxoEsWK/pnhmm/5FKw+PtU=",
- "zh:26e07aa32e403303fc212a4367b4d67188ac965c37a9812e07acee1470687a73",
- "zh:27386f48e9c9d849fbb5a8828d461fde35e71f6b6c9fc235bc4ae8403eb9c92d",
- "zh:5f4edda4c94240297bbd9b83618fd362348cadf6bf24ea65ea0e1844d7ccedc0",
- "zh:646313a907126cd5e69f6a9fafe816e9154fccdc04541e06fed02bb3a8fa2d2e",
- "zh:7349692932a5d462f8dee1500ab60401594dddb94e9aa6bf6c4c0bd53e91bbb8",
+ "h1:I8MBeauYA8J8yheLJ8oSMWqB0kovn16dF/wKZ1QTdkk=",
+ "zh:03360ed3ecd31e8c5dac9c95fe0858be50f3e9a0d0c654b5e504109c2159287d",
+ "zh:1c67ac51254ba2a2bb53a25e8ae7e4d076103483f55f39b426ec55e47d1fe211",
+ "zh:24a17bba7f6d679538ff51b3a2f378cedadede97af8a1db7dad4fd8d6d50f829",
+ "zh:30ffb297ffd1633175d6545d37c2217e2cef9545a6e03946e514c59c0859b77d",
+ "zh:454ce4b3dbc73e6775f2f6605d45cee6e16c3872a2e66a2c97993d6e5cbd7055",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
- "zh:9034daba8d9b32b35930d168f363af04cecb153d5849a7e4a5966c97c5dc956e",
- "zh:bb81dfca59ef5f949ef39f19ea4f4de25479907abc28cdaa36d12ecd7c0a9699",
- "zh:bcf7806b99b4c248439ae02c8e21f77aff9fadbc019ce619b929eef09d1221bb",
- "zh:d708e14d169e61f326535dd08eecd3811cd4942555a6f8efabc37dbff9c6fc61",
- "zh:dc294e19a46e1cefb9e557a7b789c8dd8f319beca99b8c265181bc633dc434cc",
- "zh:f9d758ee53c55dc016dd736427b6b0c3c8eb4d0dbbc785b6a3579b0ffedd9e42",
+ "zh:91df0a9fab329aff2ff4cf26797592eb7a3a90b4a0c04d64ce186654e0cc6e17",
+ "zh:aa57384b85622a9f7bfb5d4512ca88e61f22a9cea9f30febaa4c98c68ff0dc21",
+ "zh:c4a3e329ba786ffb6f2b694e1fd41d413a7010f3a53c20b432325a94fa71e839",
+ "zh:e2699bc9116447f96c53d55f2a00570f982e6f9935038c3810603572693712d0",
+ "zh:e747c0fd5d7684e5bfad8aa0ca441903f15ae7a98a737ff6aca24ba223207e2c",
+ "zh:f1ca75f417ce490368f047b63ec09fd003711ae48487fba90b4aba2ccf71920e",
]
}
diff --git a/examples/permissions-boundary/README.md b/examples/permissions-boundary/README.md
index 60d21345f7..ebcd3acea4 100644
--- a/examples/permissions-boundary/README.md
+++ b/examples/permissions-boundary/README.md
@@ -4,11 +4,6 @@ This module shows how to create GitHub action runners with permissions boundarie
## Usages
-Steps for the full setup, such as creating a GitHub app can be find the module [README](../../README.md). First create the deploy role and boundary policies. These steps require an admin user.
-
-> Ensure you have set the version in `lambdas-download/main.tf` for running the example. The version needs to be set to a GitHub release version, see https://github.com/philips-labs/terraform-aws-github-runner/releases
-
-
```bash
cd setup
terraform init
@@ -21,10 +16,10 @@ Now a new role and policies should be created. The output of the previous step i
Download the lambda releases.
```bash
-cd lambdas-download
+cd ../lambdas-download
terraform init
-terraform apply
-cd ..
+terraform apply -var=module_version=object({
id = string
key_base64 = string
}) | n/a | yes |
+
+## Outputs
+
+| Name | Description |
+|------|-------------|
+| [runners](#output\_runners) | n/a |
+| [webhook](#output\_webhook) | n/a |
+
diff --git a/examples/permissions-boundary/lambdas-download/main.tf b/examples/permissions-boundary/lambdas-download/main.tf
deleted file mode 100644
index 87f31bd8a9..0000000000
--- a/examples/permissions-boundary/lambdas-download/main.tf
+++ /dev/null
@@ -1,25 +0,0 @@
-locals {
- version = "object({
id = string
key_base64 = string
}) | n/a | yes |
+| [runner\_os](#input\_runner\_os) | The EC2 Operating System type to use for action runner instances (linux,windows). | `string` | `"linux"` | no |
+
+## Outputs
+
+| Name | Description |
+|------|-------------|
+| [webhook\_endpoint](#output\_webhook\_endpoint) | n/a |
+| [webhook\_secret](#output\_webhook\_secret) | n/a |
+
diff --git a/examples/prebuilt/lambdas-download/main.tf b/examples/prebuilt/lambdas-download/main.tf
deleted file mode 100644
index 87f31bd8a9..0000000000
--- a/examples/prebuilt/lambdas-download/main.tf
+++ /dev/null
@@ -1,25 +0,0 @@
-locals {
- version = "object({
id = string
key_base64 = string
}) | n/a | yes |
+
+## Outputs
+
+| Name | Description |
+|------|-------------|
+| [runners](#output\_runners) | n/a |
+| [webhook\_endpoint](#output\_webhook\_endpoint) | n/a |
+| [webhook\_secret](#output\_webhook\_secret) | n/a |
+
diff --git a/examples/ubuntu/lambdas-download/main.tf b/examples/ubuntu/lambdas-download/main.tf
deleted file mode 100644
index 87f31bd8a9..0000000000
--- a/examples/ubuntu/lambdas-download/main.tf
+++ /dev/null
@@ -1,25 +0,0 @@
-locals {
- version = "object({
id = string
key_base64 = string
}) | n/a | yes |
+
+## Outputs
+
+| Name | Description |
+|------|-------------|
+| [runners](#output\_runners) | n/a |
+| [webhook\_endpoint](#output\_webhook\_endpoint) | n/a |
+| [webhook\_secret](#output\_webhook\_secret) | n/a |
+
diff --git a/examples/windows/lambdas-download/README.md b/examples/windows/lambdas-download/README.md
new file mode 100644
index 0000000000..a9a0b890e9
--- /dev/null
+++ b/examples/windows/lambdas-download/README.md
@@ -0,0 +1,31 @@
+
+## Requirements
+
+| Name | Version |
+|------|---------|
+| [terraform](#requirement\_terraform) | >= 1 |
+
+## Providers
+
+No providers.
+
+## Modules
+
+| Name | Source | Version |
+|------|--------|---------|
+| [lambdas](#module\_lambdas) | ../../../modules/download-lambda | n/a |
+
+## Resources
+
+No resources.
+
+## Inputs
+
+No inputs.
+
+## Outputs
+
+| Name | Description |
+|------|-------------|
+| [files](#output\_files) | n/a |
+
\ No newline at end of file
diff --git a/examples/windows/lambdas-download/versions.tf b/examples/windows/lambdas-download/versions.tf
new file mode 100644
index 0000000000..c934712b56
--- /dev/null
+++ b/examples/windows/lambdas-download/versions.tf
@@ -0,0 +1,3 @@
+terraform {
+ required_version = ">= 1"
+}
diff --git a/examples/windows/main.tf b/examples/windows/main.tf
index 359b654c9a..fea31e79e9 100644
--- a/examples/windows/main.tf
+++ b/examples/windows/main.tf
@@ -7,17 +7,24 @@ resource "random_id" "random" {
byte_length = 20
}
+module "base" {
+ source = "../base"
+
+ prefix = local.environment
+ aws_region = local.aws_region
+}
+
module "runners" {
source = "../../"
aws_region = local.aws_region
- vpc_id = module.vpc.vpc_id
- subnet_ids = module.vpc.private_subnets
+ vpc_id = module.base.vpc.vpc_id
+ subnet_ids = module.base.vpc.private_subnets
prefix = local.environment
github_app = {
- key_base64 = var.github_app_key_base64
- id = var.github_app_id
+ key_base64 = var.github_app.key_base64
+ id = var.github_app.id
webhook_secret = random_id.random.hex
}
@@ -28,7 +35,7 @@ module "runners" {
enable_organization_runners = false
# no need to add extra windows tag here as it is automatically added by GitHub
- runner_extra_labels = "default,example"
+ runner_extra_labels = ["default", "example"]
# Set the OS to Windows
runner_os = "windows"
@@ -46,3 +53,15 @@ module "runners" {
# override scaling down for testing
scale_down_schedule_expression = "cron(* * * * ? *)"
}
+
+module "webhook_github_app" {
+ source = "../../modules/webhook-github-app"
+ depends_on = [module.runners]
+
+ github_app = {
+ key_base64 = var.github_app.key_base64
+ id = var.github_app.id
+ webhook_secret = random_id.random.hex
+ }
+ webhook_endpoint = module.runners.webhook.endpoint
+}
diff --git a/examples/windows/providers.tf b/examples/windows/providers.tf
index b6c81d5415..ccdd0b1622 100644
--- a/examples/windows/providers.tf
+++ b/examples/windows/providers.tf
@@ -1,3 +1,8 @@
provider "aws" {
region = local.aws_region
+ default_tags {
+ tags = {
+ Example = local.environment
+ }
+ }
}
diff --git a/examples/windows/variables.tf b/examples/windows/variables.tf
index 69dcd0c61c..d8b5356484 100644
--- a/examples/windows/variables.tf
+++ b/examples/windows/variables.tf
@@ -1,4 +1,8 @@
+variable "github_app" {
+ description = "GitHub for API usages."
-variable "github_app_key_base64" {}
-
-variable "github_app_id" {}
+ type = object({
+ id = string
+ key_base64 = string
+ })
+}
diff --git a/examples/windows/versions.tf b/examples/windows/versions.tf
index 376c182312..349e8243a5 100644
--- a/examples/windows/versions.tf
+++ b/examples/windows/versions.tf
@@ -2,14 +2,16 @@ terraform {
required_providers {
aws = {
source = "hashicorp/aws"
- version = "~> 4.0"
+ version = "~> 5.27"
}
local = {
- source = "hashicorp/local"
+ source = "hashicorp/local"
+ version = "~> 2.0"
}
random = {
- source = "hashicorp/random"
+ source = "hashicorp/random"
+ version = "~> 3.0"
}
}
- required_version = ">= 1"
+ required_version = ">= 1.3.0"
}
diff --git a/examples/windows/vpc.tf b/examples/windows/vpc.tf
deleted file mode 100644
index 6b19a06b3f..0000000000
--- a/examples/windows/vpc.tf
+++ /dev/null
@@ -1,21 +0,0 @@
-module "vpc" {
- source = "terraform-aws-modules/vpc/aws"
- version = "3.11.2"
-
- name = "vpc-${local.environment}"
- cidr = "10.0.0.0/16"
-
- azs = ["${local.aws_region}a", "${local.aws_region}b", "${local.aws_region}c"]
- private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
- public_subnets = ["10.0.101.0/24", "10.0.102.0/24", "10.0.103.0/24"]
-
- enable_dns_hostnames = true
- enable_nat_gateway = true
- map_public_ip_on_launch = false
- single_nat_gateway = true
-
- tags = {
- Environment = local.environment
- }
-
-}
diff --git a/images/README.md b/images/README.md
index a3cbe7bec9..d3599710ae 100644
--- a/images/README.md
+++ b/images/README.md
@@ -1,17 +1,21 @@
# Prebuilt Images
+> :warning: These images are provided as an example/
+
The images inside this folder are pre-built images designed to shorten the boot time of your runners and make using ephemeral runners a faster experience.
-These images share the same scripting as used in the user-data mechanism in `/modules/runners/templates/`. We use a `tempaltefile` mechanism to insert the relevant script fragments into the scripts used for provisioning the images.
+These images share the same scripting as used in the user-data mechanism in `/modules/runners/templates/`. We use a `templatefile` mechanism to insert the relevant script fragments into the scripts used for provisioning the images.
+
+The examples in `linux-al2023` and `windows-core-2019` also upload a `start-runner` script that uses the exact same startup process as used in the user-data mechanism. This means that the image created here does not need any extra scripts injected or changes to boot up and connect to GH.
-The examples in `linux-amzn2` and `windows-core-2019` also upload a `start-runner` script that uses the exact same startup process as used in the user-data mechanism. This means that the image created here does not need any extra scripts injected or changes to boot up and connect to GH.
+To remove old images the [AMI house keeper module](https://philips-labs.github.io/terraform-aws-github-runner/modules/public/ami-housekeeper/) can be used.
## Building your own
To build these images you first need to install packer.
You will also need an amazon account and to have provisioned your credentials for packer to consume.
-Assuming you are building the `linux-amzn2` image. Then run the following from within the `linux-amzn2` folder
+Assuming you are building the `linux-al2023` image. Then run the following from within the `linux-al2023` folder
```bash
packer init .
@@ -25,13 +29,13 @@ Your image will then begin to build inside AWS and when finished you will be pro
To use your image in the terraform modules you will need to set some values on the module.
-Assuming you have built the `linux-amzn2` image which has a pre-defined AMI name in the following format `github-runner-amzn2-x86_64-YYYYMMDDhhmm` you can use the following values.
+Assuming you have built the `linux-al2023` image which has a pre-defined AMI name in the following format `github-runner-al2023-x86_64-YYYYMMDDhhmm` you can use the following values.
```hcl
# set the name of the ami to use
-ami_filter = { name = ["github-runner-amzn2-x86_64-2021*"] }
-# provide the owner id of
+ami_filter = { name = ["github-runner-al2023-x86_64-2023*"] }
+# provide the owner id of
ami_owners = ["1024,I&&(t.dump&&Xw===t.dump.charCodeAt(0)?v+="?":v+="? "),v+=t.dump,I&&(v+=CT(t,e)),ed(t,e+1,E,!0,I)&&(t.dump&&Xw===t.dump.charCodeAt(0)?v+=":":v+=": ",v+=t.dump,a+=v));t.tag=n,t.dump=a||"{}"}function kK(t,e,r){var o,a,n,u,A,p;for(a=r?t.explicitTypes:t.implicitTypes,n=0,u=a.length;n tag resolver accepts not "'+p+'" style');t.dump=o}return!0}return!1}function ed(t,e,r,o,a,n){t.tag=null,t.dump=r,kK(t,r,!1)||kK(t,r,!0);var u=QK.call(t.dump);o&&(o=t.flowLevel<0||t.flowLevel>e);var A=u==="[object Object]"||u==="[object Array]",p,h;if(A&&(p=t.duplicates.indexOf(r),h=p!==-1),(t.tag!==null&&t.tag!=="?"||h||t.indent!==2&&e>0)&&(a=!1),h&&t.usedDuplicates[p])t.dump="*ref_"+p;else{if(A&&h&&!t.usedDuplicates[p]&&(t.usedDuplicates[p]=!0),u==="[object Object]")o&&Object.keys(t.dump).length!==0?(q6e(t,e,t.dump,a),h&&(t.dump="&ref_"+p+t.dump)):(H6e(t,e,t.dump),h&&(t.dump="&ref_"+p+" "+t.dump));else if(u==="[object Array]"){var E=t.noArrayIndent&&e>0?e-1:e;o&&t.dump.length!==0?(_6e(t,E,t.dump,a),h&&(t.dump="&ref_"+p+t.dump)):(U6e(t,E,t.dump),h&&(t.dump="&ref_"+p+" "+t.dump))}else if(u==="[object String]")t.tag!=="?"&&N6e(t,t.dump,e,n);else{if(t.skipInvalid)return!1;throw new $w("unacceptable kind of an object to dump "+u)}t.tag!==null&&t.tag!=="?"&&(t.dump="!<"+t.tag+"> "+t.dump)}return!0}function G6e(t,e){var r=[],o=[],a,n;for(wT(t,r,o),a=0,n=o.length;a {var n,u;if(Object.getPrototypeOf(o).toString()==="[object Set]")if(typeof a?.coercions<"u"){if(typeof a?.coercion>"u")return pr(a,"Unbound coercion result");let A=[...o],p=[...o];if(!r(p,Object.assign(Object.assign({},a),{coercion:void 0})))return!1;let h=()=>p.some((E,I)=>E!==A[I])?new Set(p):o;return a.coercions.push([(n=a.p)!==null&&n!==void 0?n:".",nI(a.coercion,o,h)]),!0}else{let A=!0;for(let p of o)if(A=t(p,Object.assign({},a))&&A,!A&&a?.errors==null)break;return A}if(typeof a?.coercions<"u"){if(typeof a?.coercion>"u")return pr(a,"Unbound coercion result");let A={value:o};return r(o,Object.assign(Object.assign({},a),{coercion:Wu(A,"value")}))?(a.coercions.push([(u=a.p)!==null&&u!==void 0?u:".",nI(a.coercion,o,()=>new Set(A.value))]),!0):!1}return pr(a,`Expected a set (got ${qn(o)})`)}})}function fqe(t,e){let r=iD(sD([t,e])),o=oD(e,{keys:t});return Hr({test:(a,n)=>{var u,A,p;if(Object.getPrototypeOf(a).toString()==="[object Map]")if(typeof n?.coercions<"u"){if(typeof n?.coercion>"u")return pr(n,"Unbound coercion result");let h=[...a],E=[...a];if(!r(E,Object.assign(Object.assign({},n),{coercion:void 0})))return!1;let I=()=>E.some((v,x)=>v[0]!==h[x][0]||v[1]!==h[x][1])?new Map(E):a;return n.coercions.push([(u=n.p)!==null&&u!==void 0?u:".",nI(n.coercion,a,I)]),!0}else{let h=!0;for(let[E,I]of a)if(h=t(E,Object.assign({},n))&&h,!h&&n?.errors==null||(h=e(I,Object.assign(Object.assign({},n),{p:Kp(n,E)}))&&h,!h&&n?.errors==null))break;return h}if(typeof n?.coercions<"u"){if(typeof n?.coercion>"u")return pr(n,"Unbound coercion result");let h={value:a};return Array.isArray(a)?r(a,Object.assign(Object.assign({},n),{coercion:void 0}))?(n.coercions.push([(A=n.p)!==null&&A!==void 0?A:".",nI(n.coercion,a,()=>new Map(h.value))]),!0):!1:o(a,Object.assign(Object.assign({},n),{coercion:Wu(h,"value")}))?(n.coercions.push([(p=n.p)!==null&&p!==void 0?p:".",nI(n.coercion,a,()=>new Map(Object.entries(h.value)))]),!0):!1}return pr(n,`Expected a map (got ${qn(a)})`)}})}function sD(t,{delimiter:e}={}){let r=dz(t.length);return Hr({test:(o,a)=>{var n;if(typeof o=="string"&&typeof e<"u"&&typeof a?.coercions<"u"){if(typeof a?.coercion>"u")return pr(a,"Unbound coercion result");o=o.split(e),a.coercions.push([(n=a.p)!==null&&n!==void 0?n:".",a.coercion.bind(null,o)])}if(!Array.isArray(o))return pr(a,`Expected a tuple (got ${qn(o)})`);let u=r(o,Object.assign({},a));for(let A=0,p=o.length;A {var n;if(Array.isArray(o)&&typeof a?.coercions<"u")return typeof a?.coercion>"u"?pr(a,"Unbound coercion result"):r(o,Object.assign(Object.assign({},a),{coercion:void 0}))?(o=Object.fromEntries(o),a.coercions.push([(n=a.p)!==null&&n!==void 0?n:".",a.coercion.bind(null,o)]),!0):!1;if(typeof o!="object"||o===null)return pr(a,`Expected an object (got ${qn(o)})`);let u=Object.keys(o),A=!0;for(let p=0,h=u.length;p `:`[${x}]`)}o.push(...this.arity.leading.map(u=>`<${u}>`)),this.arity.extra===tl?o.push("..."):o.push(...this.arity.extra.map(u=>`[${u}]`)),o.push(...this.arity.trailing.map(u=>`<${u}>`))}return{usage:o.join(" "),options:a}}compile(){if(typeof this.context>"u")throw new Error("Assertion failed: No context attached");let e=yz(),r=un.InitialNode,o=this.usage().usage,a=this.options.filter(A=>A.required).map(A=>A.nameSet);r=Mc(e,el()),zo(e,un.InitialNode,Hn.StartOfInput,r,["setCandidateState",{candidateUsage:o,requiredOptions:a}]);let n=this.arity.proxy?"always":"isNotOptionLike",u=this.paths.length>0?this.paths:[[]];for(let A of u){let p=r;if(A.length>0){let v=Mc(e,el());Cy(e,p,v),this.registerOptions(e,v),p=v}for(let v=0;v {var XJe=Xp(),ZJe=Hl(),$Je=XJe(ZJe,"DataView");Cte.exports=$Je});var Bte=_((kFt,Ite)=>{var eVe=Xp(),tVe=Hl(),rVe=eVe(tVe,"Promise");Ite.exports=rVe});var Pte=_((QFt,vte)=>{var nVe=Xp(),iVe=Hl(),sVe=nVe(iVe,"Set");vte.exports=sVe});var Ste=_((RFt,Dte)=>{var oVe=Xp(),aVe=Hl(),lVe=oVe(aVe,"WeakMap");Dte.exports=lVe});var qI=_((FFt,Tte)=>{var $L=wte(),eN=UD(),tN=Bte(),rN=Pte(),nN=Ste(),Fte=gd(),Hy=qL(),bte="[object Map]",cVe="[object Object]",xte="[object Promise]",kte="[object Set]",Qte="[object WeakMap]",Rte="[object DataView]",uVe=Hy($L),AVe=Hy(eN),fVe=Hy(tN),pVe=Hy(rN),hVe=Hy(nN),dd=Fte;($L&&dd(new $L(new ArrayBuffer(1)))!=Rte||eN&&dd(new eN)!=bte||tN&&dd(tN.resolve())!=xte||rN&&dd(new rN)!=kte||nN&&dd(new nN)!=Qte)&&(dd=function(t){var e=Fte(t),r=e==cVe?t.constructor:void 0,o=r?Hy(r):"";if(o)switch(o){case uVe:return Rte;case AVe:return bte;case fVe:return xte;case pVe:return kte;case hVe:return Qte}return e});Tte.exports=dd});var qte=_((TFt,Hte)=>{var iN=HD(),gVe=jL(),dVe=kee(),mVe=Ete(),Lte=qI(),Nte=ql(),Ote=OI(),yVe=zD(),EVe=1,Mte="[object Arguments]",Ute="[object Array]",XD="[object Object]",CVe=Object.prototype,_te=CVe.hasOwnProperty;function wVe(t,e,r,o,a,n){var u=Nte(t),A=Nte(e),p=u?Ute:Lte(t),h=A?Ute:Lte(e);p=p==Mte?XD:p,h=h==Mte?XD:h;var E=p==XD,I=h==XD,v=p==h;if(v&&Ote(t)){if(!Ote(e))return!1;u=!0,E=!1}if(v&&!E)return n||(n=new iN),u||yVe(t)?gVe(t,e,r,o,a,n):dVe(t,e,p,r,o,a,n);if(!(r&EVe)){var x=E&&_te.call(t,"__wrapped__"),C=I&&_te.call(e,"__wrapped__");if(x||C){var F=x?t.value():t,N=C?e.value():e;return n||(n=new iN),a(F,N,r,o,n)}}return v?(n||(n=new iN),mVe(t,e,r,o,a,n)):!1}Hte.exports=wVe});var Wte=_((LFt,Yte)=>{var IVe=qte(),Gte=Vu();function jte(t,e,r,o,a){return t===e?!0:t==null||e==null||!Gte(t)&&!Gte(e)?t!==t&&e!==e:IVe(t,e,r,o,jte,a)}Yte.exports=jte});var zte=_((NFt,Kte)=>{var BVe=Wte();function vVe(t,e){return BVe(t,e)}Kte.exports=vVe});var sN=_((OFt,Jte)=>{var PVe=Xp(),DVe=function(){try{var t=PVe(Object,"defineProperty");return t({},"",{}),t}catch{}}();Jte.exports=DVe});var ZD=_((MFt,Xte)=>{var Vte=sN();function SVe(t,e,r){e=="__proto__"&&Vte?Vte(t,e,{configurable:!0,enumerable:!0,value:r,writable:!0}):t[e]=r}Xte.exports=SVe});var oN=_((UFt,Zte)=>{var bVe=ZD(),xVe=Ty();function kVe(t,e,r){(r!==void 0&&!xVe(t[e],r)||r===void 0&&!(e in t))&&bVe(t,e,r)}Zte.exports=kVe});var ere=_((_Ft,$te)=>{function QVe(t){return function(e,r,o){for(var a=-1,n=Object(e),u=o(e),A=u.length;A--;){var p=u[t?A:++a];if(r(n[p],p,n)===!1)break}return e}}$te.exports=QVe});var rre=_((HFt,tre)=>{var RVe=ere(),FVe=RVe();tre.exports=FVe});var aN=_((GI,qy)=>{var TVe=Hl(),ore=typeof GI=="object"&&GI&&!GI.nodeType&&GI,nre=ore&&typeof qy=="object"&&qy&&!qy.nodeType&&qy,LVe=nre&&nre.exports===ore,ire=LVe?TVe.Buffer:void 0,sre=ire?ire.allocUnsafe:void 0;function NVe(t,e){if(e)return t.slice();var r=t.length,o=sre?sre(r):new t.constructor(r);return t.copy(o),o}qy.exports=NVe});var $D=_((qFt,lre)=>{var are=YL();function OVe(t){var e=new t.constructor(t.byteLength);return new are(e).set(new are(t)),e}lre.exports=OVe});var lN=_((GFt,cre)=>{var MVe=$D();function UVe(t,e){var r=e?MVe(t.buffer):t.buffer;return new t.constructor(r,t.byteOffset,t.length)}cre.exports=UVe});var eS=_((jFt,ure)=>{function _Ve(t,e){var r=-1,o=t.length;for(e||(e=Array(o));++r