diff --git a/config/staging.yaml b/config/staging.yaml index 5fec9f457..72697cfb1 100644 --- a/config/staging.yaml +++ b/config/staging.yaml @@ -1,4 +1,4 @@ -projectName: binderhub-288415 +projectName: staging # binderhubEnabled: false @@ -6,18 +6,21 @@ binderhub: config: BinderHub: pod_quota: 20 - hub_url: https://hub.gke2.staging.mybinder.org - badge_base_url: https://staging.mybinder.org - image_prefix: us-central1-docker.pkg.dev/binderhub-288415/staging/r2d-2023-04- + hub_url: https://hub.ovh.staging.mybinder.2i2c.cloud + badge_base_url: https://staging.mybinder.2i2c.cloud + image_prefix: 985ed7pp.c1.va1.container-registry.ovh.us/mybinder-builds/ sticky_builds: true - DockerRegistry: - token_url: "https://us-central1-docker.pkg.dev/v2/token" - - registry: - url: "https://us-central1-docker.pkg.dev" - username: "_json_key" + extraVolumes: + - name: secrets + secret: + secretName: events-archiver-secrets + extraVolumeMounts: + - name: secrets + mountPath: /secrets + readOnly: true extraEnv: + GOOGLE_APPLICATION_CREDENTIALS: /secrets/service-account.json EVENT_LOG_NAME: "binderhub-staging-events-text" resources: @@ -25,12 +28,6 @@ binderhub: cpu: 0.1 memory: 512Mi - hpa: - enabled: true - maxReplicas: 3 - minReplicas: 1 - targetCPU: 90 # 90% of cpu request, so 90m CPU - dind: resources: requests: @@ -42,8 +39,7 @@ binderhub: ingress: hosts: - - gke.staging.mybinder.org - - gke2.staging.mybinder.org + - ovh.staging.mybinder.2i2c.cloud jupyterhub: hub: @@ -60,13 +56,11 @@ binderhub: limit: 0.5 ingress: hosts: - - hub.gke.staging.mybinder.org - - hub.gke2.staging.mybinder.org + - hub.ovh.staging.mybinder.2i2c.cloud tls: - secretName: kubelego-tls-jupyterhub-staging hosts: - - hub.gke.staging.mybinder.org - - hub.gke2.staging.mybinder.org + - hub.ovh.staging.mybinder.2i2c.cloud scheduling: userPlaceholder: replicas: 1 @@ -87,12 +81,10 @@ minesweeper: grafana: ingress: hosts: - - grafana.staging.mybinder.org - - grafana.gke2.staging.mybinder.org + - grafana.ovh.staging.mybinder.2i2c.cloud tls: - hosts: - - grafana.staging.mybinder.org - - grafana.gke2.staging.mybinder.org + - grafana.ovh.staging.mybinder.2i2c.cloud secretName: kubelego-tls-grafana datasources: datasources.yaml: @@ -101,22 +93,18 @@ grafana: - name: prometheus orgId: 1 type: prometheus - url: https://prometheus.staging.mybinder.org + url: https://prometheus.ovh.staging.mybinder.2i2c.cloud isDefault: true editable: false - persistence: - storageClassName: standard prometheus: server: ingress: hosts: - - prometheus.staging.mybinder.org - - prometheus.gke2.staging.mybinder.org + - prometheus.ovh.staging.mybinder.2i2c.cloud tls: - hosts: - - prometheus.staging.mybinder.org - - prometheus.gke2.staging.mybinder.org + - prometheus.ovh.staging.mybinder.2i2c.cloud secretName: kubelego-tls-prometheus persistentVolume: size: 12G @@ -126,8 +114,8 @@ prometheus: ingress-nginx: controller: replicaCount: 2 - service: - loadBalancerIP: 35.222.35.25 + # service: + # loadBalancerIP: 35.222.35.25 resources: requests: cpu: 10m @@ -139,31 +127,22 @@ ingress-nginx: static: ingress: hosts: - - static.staging.mybinder.org - - static.gke2.staging.mybinder.org + - static.ovh.staging.mybinder.2i2c.cloud redirector: enabled: true redirects: - type: host host: - from: docs.staging.mybinder.org + from: docs.staging.mybinder.2i2c.cloud to: mybinder.readthedocs.io matomo: - enabled: true - db: - instanceName: binderhub-288415:us-central1:matomo-staging - trustedHosts: - - staging.mybinder.org - - gke2.staging.mybinder.org - ingress: - hosts: - - staging.mybinder.org - - gke2.staging.mybinder.org + enabled: false analyticsPublisher: - enabled: true + # FIXME: Enable this + enabled: false project: binderhub-288415 destinationBucket: binder-staging-events-archive events: @@ -177,15 +156,16 @@ analyticsPublisher: sourceBucket: binder-billing-archive gcsProxy: - enabled: true + # FIXME: Enable this + enabled: false buckets: - name: binder-staging-events-archive - host: archive.analytics.gke2.staging.mybinder.org + host: archive.analytics.gke2.staging.mybinder.2i2c.cloud - name: binder-staging-events-archive - host: archive.analytics.staging.mybinder.org + host: archive.analytics.staging.mybinder.2i2c.cloud federationRedirect: - host: staging.mybinder.org + host: staging.mybinder.2i2c.cloud enabled: true resources: requests: @@ -197,12 +177,7 @@ federationRedirect: hosts: gke: prime: true - url: https://gke2.staging.mybinder.org - weight: 4 - health: https://gke2.staging.mybinder.org/health - versions: https://gke2.staging.mybinder.org/versions - gesis: - url: https://notebooks.gesis.org/binder + url: https://ovh.staging.mybinder.2i2c.cloud weight: 100 - health: https://notebooks.gesis.org/binder/health - versions: https://notebooks.gesis.org/binder/versions + health: https://ovh.staging.mybinder.2i2c.cloud/health + versions: https://ovh.staging.mybinder.2i2c.cloud/versions diff --git a/deploy.py b/deploy.py index 49326eb48..610c774bd 100755 --- a/deploy.py +++ b/deploy.py @@ -21,7 +21,6 @@ ABSOLUTE_HERE = os.path.dirname(os.path.realpath(__file__)) GCP_PROJECTS = { - "staging": "binderhub-288415", "prod": "binderhub-288415", } @@ -31,12 +30,7 @@ } # Projects using raw KUBECONFIG files -KUBECONFIG_CLUSTERS = { - "localhost", - "ovh2", - "hetzner-2i2c", - "hetzner-gesis", -} +KUBECONFIG_CLUSTERS = {"localhost", "ovh2", "hetzner-2i2c", "hetzner-gesis", "staging"} # Mapping of config name to cluster name for AWS EKS deployments AWS_DEPLOYMENTS = {"curvenote": "binderhub"} diff --git a/secrets/staging-kubeconfig.yml b/secrets/staging-kubeconfig.yml new file mode 100644 index 000000000..9fc3bf704 Binary files /dev/null and b/secrets/staging-kubeconfig.yml differ diff --git a/secrets/staging.key b/secrets/staging.key new file mode 100644 index 000000000..db0770158 Binary files /dev/null and b/secrets/staging.key differ diff --git a/secrets/staging.key.pub b/secrets/staging.key.pub new file mode 100644 index 000000000..f88b2ace0 Binary files /dev/null and b/secrets/staging.key.pub differ diff --git a/terraform/ovh/.terraform.lock.hcl b/terraform/ovh/.terraform.lock.hcl deleted file mode 100644 index fb0dc497a..000000000 --- a/terraform/ovh/.terraform.lock.hcl +++ /dev/null @@ -1,64 +0,0 @@ -# This file is maintained automatically by "terraform init". -# Manual edits may be lost in future updates. - -provider "registry.terraform.io/bestseller/harbor" { - version = "3.7.1" - constraints = "~> 3.0" - hashes = [ - "h1:DFSSRCMi3EACnn8F76IUun3TSCCJhYVimBbsqmcf53k=", - "zh:033aad0bdba25897a8869f1e63bff53518463dac6686867d25872709e82900ad", - "zh:1cb5330577c2844d60d12e10e94184d6c9da9dadf4c6fe7e4b67024eb4f379c9", - "zh:3ee00bf9ed398a5987089ad753473a3bf116c3a8cb2d7ec924efb795024ad563", - "zh:45f6ac380be435b68a90a0d8aa8d19cdcbe173af8c4f2385bd7feeaf11be7315", - "zh:5edc3a76b86407b353be12b3fe66513be29c50c2c9f6c458d8ba17fbef350c2c", - "zh:82f2e2b9e31f4c45f3094b52c2ebd8c98186cb74356be67ccf1992b7f27c10af", - "zh:8b359f04f75d2ac92ab0d6d6a080feb0c439d2e6109d4fa014f9ba32484e7ee2", - "zh:a453fd9b4be39dedbc65f50ea72424029e1fac5815320994af47746a09c10983", - "zh:b4cea902660d2ec6c6c53deca5c3f8ba54a529f26000a03f44b9c293bb1d1554", - "zh:b68e363e918bb3ed9eb5b3ab30cc84075608fb8dc21b69b02d5b97e36325eec4", - "zh:c34c6a8d1a9c15f948d593754b512d3552a37dd13d164c57d8e9ba8967a8b127", - "zh:d93117390ff2448790689f962d06cc897571ad972e12b74734fb80588f7e5dba", - "zh:d9a30a706d6c1a6c451d0d0349605a092087694e7c42cf37c6044559682d9eb1", - "zh:e2f6c279da60aa9ccde6d52a42ec596052f997f023ffc1d9d0ad4a56fbf5a8ca", - ] -} - -provider "registry.terraform.io/hashicorp/random" { - version = "3.3.2" - constraints = "~> 3.3.2" - hashes = [ - "h1:YChjos7Hrvr2KgTc9GzQ+de/QE2VLAeRJgxFemnCltU=", - "zh:038293aebfede983e45ee55c328e3fde82ae2e5719c9bd233c324cfacc437f9c", - "zh:07eaeab03a723d83ac1cc218f3a59fceb7bbf301b38e89a26807d1c93c81cef8", - "zh:427611a4ce9d856b1c73bea986d841a969e4c2799c8ac7c18798d0cc42b78d32", - "zh:49718d2da653c06a70ba81fd055e2b99dfd52dcb86820a6aeea620df22cd3b30", - "zh:5574828d90b19ab762604c6306337e6cd430e65868e13ef6ddb4e25ddb9ad4c0", - "zh:7222e16f7833199dabf1bc5401c56d708ec052b2a5870988bc89ff85b68a5388", - "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", - "zh:b1b2d7d934784d2aee98b0f8f07a8ccfc0410de63493ae2bf2222c165becf938", - "zh:b8f85b6a20bd264fcd0814866f415f0a368d1123cd7879c8ebbf905d370babc8", - "zh:c3813133acc02bbebddf046d9942e8ba5c35fc99191e3eb057957dafc2929912", - "zh:e7a41dbc919d1de800689a81c240c27eec6b9395564630764ebb323ea82ac8a9", - "zh:ee6d23208449a8eaa6c4f203e33f5176fa795b4b9ecf32903dffe6e2574732c2", - ] -} - -provider "registry.terraform.io/ovh/ovh" { - version = "0.22.0" - hashes = [ - "h1:bGXnKydwFKZ5EJHgvyOdp0sBc+l8NyahdheA8WNl4oQ=", - "zh:1f7f344887dacf2ab18561e6c4ada43a36d87b0e6f4141b575a707243eca9e15", - "zh:3edf11907685f23b65324850dbc2a26d2f36f272e96c452bd063236fd9210a59", - "zh:438d9e6b523e1880bc6b2b97aa5792a3837639e43a179d0e7515356f9f54e083", - "zh:5351dcf3cc3bf367986c92a31050c6f4fd9588afebd0a3d8d1fbc148480cfda0", - "zh:5950a3c95426c67b3d7412674b539b823ae0a7c0ae629102394675fedaa14ab1", - "zh:6798a54fc9c5d7da8ebfac80923b7cb5e47d0ddc861a18ea507f36a87928cd2d", - "zh:79cb055210324b307d09aa0ef11ee4ad80303db4653672b1b87e83cfe8ec0a76", - "zh:9379906e17a74dfbafae678f03a0d459508e64734442b331c6cac503bc037a49", - "zh:9bcb071abe38abad1553b3cfb697894e568b2b46ed3bbedbdfb310fcefde5099", - "zh:b467950ee1aebda1b0a45fa3780a7f971d9b882d2ceb4effc65108e589994d6b", - "zh:deac69ac94b315e0d71e6700fff62de84b937f5deee23f0fb95be3606aabbe81", - "zh:e9fcb4046056e3b105e638b23c3ed109c10291f02e292f70bf90679821811d11", - "zh:ef79e5d5dd8334ee1330f60c0f379ccc71b1c96f6b463a287c400ab3c51efae3", - ] -} diff --git a/terraform/ovh/harbor.tf b/terraform/ovh/harbor.tf new file mode 100644 index 000000000..5618fb9d1 --- /dev/null +++ b/terraform/ovh/harbor.tf @@ -0,0 +1,121 @@ +# now configure the registry via harbor itself +provider "harbor" { + url = ovh_cloud_project_containerregistry.registry.url + username = ovh_cloud_project_containerregistry_user.admin.login + password = ovh_cloud_project_containerregistry_user.admin.password +} + +# user builds go in mybinder-builds +# these are separate for easier separation of retention policies +resource "harbor_project" "mybinder-builds" { + name = "mybinder-builds" +} + +resource "harbor_robot_account" "builder" { + name = "builder" + description = "BinderHub builder: push new user images" + level = "project" + permissions { + access { + action = "push" + resource = "repository" + } + access { + action = "pull" + resource = "repository" + } + kind = "project" + namespace = harbor_project.mybinder-builds.name + } +} + +resource "harbor_robot_account" "user-puller" { + name = "user-puller" + description = "Pull access to user images" + level = "project" + permissions { + access { + action = "pull" + resource = "repository" + } + kind = "project" + namespace = harbor_project.mybinder-builds.name + } +} + + +resource "harbor_retention_policy" "builds" { + # run retention policy on Saturday morning + scope = harbor_project.mybinder-builds.id + schedule = "0 0 7 * * 6" + # rule { + # repo_matching = "**" + # tag_matching = "**" + # most_recently_pulled = 1 + # untagged_artifacts = false + # } + rule { + repo_matching = "**" + tag_matching = "**" + n_days_since_last_pull = 30 + untagged_artifacts = false + } + rule { + repo_matching = "**" + tag_matching = "**" + n_days_since_last_push = 7 + untagged_artifacts = false + } +} + +resource "harbor_garbage_collection" "gc" { + # run garbage collection on Sunday morning + # try to make sure it's not run at the same time as the retention policy + schedule = "0 0 7 * * 0" + delete_untagged = true +} + +# registry outputs + +output "registry_url" { + value = ovh_cloud_project_containerregistry.registry.url +} + +output "registry_admin_login" { + value = ovh_cloud_project_containerregistry_user.admin.login + sensitive = true +} + +output "registry_admin_password" { + value = ovh_cloud_project_containerregistry_user.admin.password + sensitive = true +} + +# output "registry_culler_name" { +# value = harbor_user.culler.username +# sensitive = true +# } + +# output "registry_culler_password" { +# value = harbor_user.culler.password +# sensitive = true +# } + +output "registry_builder_name" { + value = harbor_robot_account.builder.full_name + sensitive = true +} + +output "registry_builder_token" { + value = harbor_robot_account.builder.secret + sensitive = true +} + +output "registry_user_puller_name" { + value = harbor_robot_account.user-puller.full_name + sensitive = true +} +output "registry_user_puller_token" { + value = harbor_robot_account.user-puller.secret + sensitive = true +} diff --git a/terraform/ovh/main.tf b/terraform/ovh/main.tf index 9a9ec9d3c..bb6334cba 100644 --- a/terraform/ovh/main.tf +++ b/terraform/ovh/main.tf @@ -2,334 +2,25 @@ terraform { required_providers { ovh = { source = "ovh/ovh" - version = "~> 0.22.0" - } - random = { - source = "hashicorp/random" - version = "~> 3.3.2" + version = "2.8.0" } harbor = { - source = "BESTSELLER/harbor" - version = "~> 3.0" + source = "goharbor/harbor" + version = "3.11.2" } } # store state on gcs, like other clusters backend "s3" { - bucket = "tf-state-ovh" + bucket = "mybinder-2i2c-tf-state" key = "terraform.tfstate" - region = "gra" - endpoint = "s3.gra.io.cloud.ovh.net" + region = "us-east-va" + endpoint = "https://s3.us-east-va.io.cloud.ovh.us" skip_credentials_validation = true skip_region_validation = true } } provider "ovh" { - endpoint = "ovh-eu" + endpoint = var.endpoint # credentials loaded via source ./secrets/ovh-creds.sh } - -locals { - service_name = "b309c78177f1458187add722e8db8dc2" - cluster_name = "ovh2" - # GRA9 is colocated with registry - region = "GRA9" -} - -# create a private network for our cluster -resource "ovh_cloud_project_network_private" "network" { - service_name = local.service_name - name = local.cluster_name - regions = [local.region] -} - -resource "ovh_cloud_project_network_private_subnet" "subnet" { - service_name = local.service_name - network_id = ovh_cloud_project_network_private.network.id - - region = local.region - start = "10.0.0.100" - end = "10.0.0.254" - network = "10.0.0.0/24" - dhcp = true -} - -resource "ovh_cloud_project_kube" "cluster" { - service_name = local.service_name - name = local.cluster_name - region = local.region - version = "1.24" - # make sure we wait for the subnet to exist - depends_on = [ovh_cloud_project_network_private_subnet.subnet] - - # private_network_id is an openstackid for some reason? - private_network_id = tolist(ovh_cloud_project_network_private.network.regions_attributes)[0].openstackid - - customization { - apiserver { - admissionplugins { - enabled = ["NodeRestriction"] - # disable AlwaysPullImages, which causes problems - disabled = ["AlwaysPullImages"] - } - } - } - update_policy = "MINIMAL_DOWNTIME" -} - -# ovh node flavors: https://www.ovhcloud.com/en/public-cloud/prices/ - -resource "ovh_cloud_project_kube_nodepool" "core" { - service_name = local.service_name - kube_id = ovh_cloud_project_kube.cluster.id - name = "core-202211" - # b2-15 is 4 core, 15GB - flavor_name = "b2-15" - max_nodes = 3 - min_nodes = 1 - autoscale = true - template { - metadata { - labels = { - "mybinder.org/pool-type" = "core" - } - } - } - lifecycle { - ignore_changes = [ - # don't interfere with autoscaling - desired_nodes - ] - } -} - -resource "ovh_cloud_project_kube_nodepool" "user-a" { - service_name = local.service_name - kube_id = ovh_cloud_project_kube.cluster.id - name = "user-202211a" - # r2-120 is 8 core, 120GB - flavor_name = "r2-120" - max_nodes = 6 - min_nodes = 2 - autoscale = true - template { - metadata { - labels = { - "mybinder.org/pool-type" = "users" - } - } - } - lifecycle { - ignore_changes = [ - # don't interfere with autoscaling - desired_nodes - ] - } -} - -resource "ovh_cloud_project_kube_nodepool" "builds" { - service_name = local.service_name - kube_id = ovh_cloud_project_kube.cluster.id - name = "builds-2304" - # b2-30 is 8-core, 30GB - flavor_name = "b2-30" - max_nodes = 3 - min_nodes = 1 - autoscale = true - template { - metadata { - labels = { - "mybinder.org/pool-type" = "builds" - } - } - } - lifecycle { - ignore_changes = [ - # don't interfere with autoscaling - desired_nodes - ] - } -} - -# outputs - -output "kubeconfig" { - value = ovh_cloud_project_kube.cluster.kubeconfig - sensitive = true - description = < $KUBECONFIG - chmod 600 $KUBECONFIG - kubectl config rename-context kubernetes-admin@ovh2 ovh2 - kubectl config use-context ovh2 - EOF -} - -# registry - -data "ovh_cloud_project_capabilities_containerregistry_filter" "registry_plan" { - service_name = local.service_name - # SMALL is 200GB (too small) - # MEDIUM is 600GB - # LARGE is 5TiB - plan_name = "LARGE" - region = "GRA" -} - -resource "ovh_cloud_project_containerregistry" "registry" { - service_name = local.service_name - plan_id = data.ovh_cloud_project_capabilities_containerregistry_filter.registry_plan.id - region = data.ovh_cloud_project_capabilities_containerregistry_filter.registry_plan.region - name = "mybinder-ovh" -} - -# admin user (needed for harbor provider) -resource "ovh_cloud_project_containerregistry_user" "admin" { - service_name = ovh_cloud_project_containerregistry.registry.service_name - registry_id = ovh_cloud_project_containerregistry.registry.id - email = "mybinder-admin@mybinder.org" - login = "mybinder-admin" -} - - -# now configure the registry via harbor itself -provider "harbor" { - url = ovh_cloud_project_containerregistry.registry.url - username = ovh_cloud_project_containerregistry_user.admin.login - password = ovh_cloud_project_containerregistry_user.admin.password -} - -# user builds go in mybinder-builds -# these are separate for easier separation of retention policies -resource "harbor_project" "mybinder-builds" { - name = "mybinder-builds" -} - -resource "harbor_robot_account" "builder" { - name = "builder" - description = "BinderHub builder: push new user images" - level = "project" - permissions { - access { - action = "push" - resource = "repository" - } - access { - action = "pull" - resource = "repository" - } - kind = "project" - namespace = harbor_project.mybinder-builds.name - } -} - -resource "harbor_robot_account" "user-puller" { - name = "user-puller" - description = "Pull access to user images" - level = "project" - permissions { - access { - action = "pull" - resource = "repository" - } - kind = "project" - namespace = harbor_project.mybinder-builds.name - } -} - - -# robot accounts don't seem to have permission to delete repositories -resource "random_password" "culler" { - length = 16 - special = true -} - -resource "harbor_user" "culler" { - username = "mybinder-culler" - password = random_password.culler.result - full_name = "MyBinder culler" - email = "culler@mybinder.org" -} - -resource "harbor_project_member_user" "culler" { - project_id = harbor_project.mybinder-builds.id - user_name = harbor_user.culler.username - role = "maintainer" -} - -resource "harbor_retention_policy" "builds" { - # run retention policy on Saturday morning - scope = harbor_project.mybinder-builds.id - schedule = "0 0 7 * * 6" - # rule { - # repo_matching = "**" - # tag_matching = "**" - # most_recently_pulled = 1 - # untagged_artifacts = false - # } - rule { - repo_matching = "**" - tag_matching = "**" - n_days_since_last_pull = 30 - untagged_artifacts = false - } - rule { - repo_matching = "**" - tag_matching = "**" - n_days_since_last_push = 7 - untagged_artifacts = false - } -} - -resource "harbor_garbage_collection" "gc" { - # run garbage collection on Sunday morning - # try to make sure it's not run at the same time as the retention policy - schedule = "0 0 7 * * 0" - delete_untagged = true -} - -# registry outputs - -output "registry_url" { - value = ovh_cloud_project_containerregistry.registry.url -} - -output "registry_admin_login" { - value = ovh_cloud_project_containerregistry_user.admin.login - sensitive = true -} - -output "registry_admin_password" { - value = ovh_cloud_project_containerregistry_user.admin.password - sensitive = true -} - -output "registry_culler_name" { - value = harbor_user.culler.username - sensitive = true -} - -output "registry_culler_password" { - value = harbor_user.culler.password - sensitive = true -} - -output "registry_builder_name" { - value = harbor_robot_account.builder.full_name - sensitive = true -} - -output "registry_builder_token" { - value = harbor_robot_account.builder.secret - sensitive = true -} - -output "registry_user_puller_name" { - value = harbor_robot_account.user-puller.full_name - sensitive = true -} -output "registry_user_puller_token" { - value = harbor_robot_account.user-puller.secret - sensitive = true -} diff --git a/terraform/ovh/registry.tf b/terraform/ovh/registry.tf new file mode 100644 index 000000000..cb7e89740 --- /dev/null +++ b/terraform/ovh/registry.tf @@ -0,0 +1,20 @@ +data "ovh_cloud_project_capabilities_containerregistry_filter" "registry_plan" { + service_name = var.service_name + plan_name = var.registry_plan + region = var.region +} + +resource "ovh_cloud_project_containerregistry" "registry" { + service_name = var.service_name + plan_id = data.ovh_cloud_project_capabilities_containerregistry_filter.registry_plan.id + region = data.ovh_cloud_project_capabilities_containerregistry_filter.registry_plan.region + name = var.registry_name +} + +# admin user (needed for harbor provider) +resource "ovh_cloud_project_containerregistry_user" "admin" { + service_name = ovh_cloud_project_containerregistry.registry.service_name + registry_id = ovh_cloud_project_containerregistry.registry.id + email = "mybinder-admin@mybinder.org" + login = "mybinder-admin" +} diff --git a/terraform/ovh/secrets/ovh-creds.sh b/terraform/ovh/secrets/ovh-creds.sh index 122e8c162..349fe1cd7 100644 Binary files a/terraform/ovh/secrets/ovh-creds.sh and b/terraform/ovh/secrets/ovh-creds.sh differ diff --git a/terraform/ovh/staging.tfvars b/terraform/ovh/staging.tfvars new file mode 100644 index 000000000..2fe54436d --- /dev/null +++ b/terraform/ovh/staging.tfvars @@ -0,0 +1,7 @@ +endpoint = "ovh-us" + +service_name = "5e4c805d3c614a1085d7b7bc1fee46d6" +region = "US-EAST-VA" + +registry_plan = "SMALL" +registry_name = "mybinder-ovh-staging" diff --git a/terraform/ovh/variables.tf b/terraform/ovh/variables.tf new file mode 100644 index 000000000..45c166bae --- /dev/null +++ b/terraform/ovh/variables.tf @@ -0,0 +1,45 @@ +variable "endpoint" { + type = string + description = <<-EOT + OVH Endpoint to use for making API calls + + One of "ovh-us", "ovh-ca" or "ovh-eu", based on which *kind of OVH account* + is being used. + EOT +} + + +variable "region" { + type = string + description = <<-EOT + OVH Region to put all infrastructure in + EOT +} + + +variable "service_name" { + type = string + description = <<-EOT + OVH Public Cloud Project ID to create infrastructure in + EOT +} + +variable "registry_name" { + type = string + description = <<-EOT + Name of the managed registry to create + EOT +} + +variable "registry_plan" { + type = string + description = <<-EOT + What registry plan to put this registry on. + + Options are: + + - SMALL is 200GB (too small for production loads) + - MEDIUM is 600GB + - LARGE is 5TiB + EOT +}