diff --git a/Cargo.lock b/Cargo.lock index 222f9ad..ffbfd58 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -263,13 +263,21 @@ version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" +[[package]] +name = "base64ct" +version = "1.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89e25b6adfb930f02d1981565a6e5d9c547ac15a96606256d3b59040e5cd4ca3" + [[package]] name = "beavercds-ng" version = "0.1.0" dependencies = [ "anyhow", + "base64ct", "bollard", "clap", + "docker_credential", "duct", "fastrand", "figment", @@ -627,6 +635,17 @@ dependencies = [ "const-random", ] +[[package]] +name = "docker_credential" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d89dfcba45b4afad7450a99b39e751590463e45c04728cf555d36bb66940de8" +dependencies = [ + "base64 0.21.7", + "serde", + "serde_json", +] + [[package]] name = "duct" version = "0.13.7" @@ -2470,9 +2489,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.132" +version = "1.0.140" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d726bfaff4b320266d395898905d0eba0345aae23b54aee3a737e260fd46db03" +checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" dependencies = [ "itoa", "memchr", diff --git a/Cargo.toml b/Cargo.toml index 8429233..0ee4c3d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -39,6 +39,8 @@ rust-s3 = { version = "0.35.1", default-features = false, features = [ minijinja = { version = "2.6.0", features = ["json"] } duct = "0.13.7" fastrand = "2.3.0" +base64ct = { version = "1.7.3", features = ["alloc"] } +docker_credential = "1.3.2" [dev-dependencies] diff --git a/docs/for-challenge-authors.md b/docs/for-challenge-authors.md new file mode 100644 index 0000000..f9f13c7 --- /dev/null +++ b/docs/for-challenge-authors.md @@ -0,0 +1,199 @@ +# How to write beaverCDS challenge.yaml config + +tldr: see [the TCP example](#full-tcp-example) or [the web example](#full-http-example). + +### Metadata + +Self explanatory. + +```yaml +name: yet another pyjail +author: somebody, John Author +``` + +### Description + +Challenge description supports markdown and Jinja-style templating for challenge info. +The Jinja template fields available are: + +| Field name | Description | +| ----------- | ----------- | +| `hostname` | The hostname or domain for the challenge +| `port` | The port that the challenge is listening on +| `nc` | Insert the `nc` command to connect to TCP challenges (`nc {{hostname}} {{port}}`) +| `link` | Create a Markdown link to the exposed hostname/port +| `url` | The URL from `link` without the accompanying Markdown +| `challenge` | The full challenge.yaml config for this challenge, with subfields + +You probably only want `{{ nc }}` or `{{ link }}`. + +Example: + +```yaml +description: | + Some example challenge. Blah blah blah flavor text. + + In case you missed it, this was written by {{ challenge.author }} + and is called {{ challenge.name }}. + + {{ link }} # -becomes-> [example.chals.thectf.com](https://example.chals.thectf.com) + {{ nc }} # -becomes-> `nc example.chals.thectf.com 12345` +``` + + +### Flag + +Read flag from file: + +```yaml +flag: + file: ./flag +``` + +### Pods + +Defines how any container images for this challenge are built and deployed. + +The pod `name` is also used for extracting files, see [Providing files to +users](). + +`build` works similar to [Docker Compose](https://docs.docker.com/reference/compose-file/build/#illustrative-example), +either: + - a string path to the build context folder + - yaml with explicit `context` path, `dockerfile` path within context folder, and `args` build args \ + (only `context`, `dockerfile`, and `args` are supported for the detailed form) + +`ports` controls how the container is exposed. This should be a list of what port the container is listening, and how +that port should be exposed to players: +- For TCP challenges, set `expose.tcp` to the subdomain and port: `:` +- For HTTP challenges, set `expose.http` to the subdomain only: `` \ + The website domain will automatically be set up with an HTTPS cert. + + +```yaml +pods: + - name: tcp-example + build: . + replicas: 2 + ports: + - internal: 31337 + expose: + tcp: thechal:30124 # exposed at thechal.:30124 + + - name: web-example + build: + context: src/ + dockerfile: Containerfile + replicas: 2 + ports: + - internal: 31337 + expose: + http: webchal # exposed at https://webchal. +``` + + + + +This can be omitted if there are no containers for the challenge. + +### Providing files to users + +Files to give to players as downloads in frontend. + +These can be from the challenge folder in the repository, or from the +challenge's built container. These can also be zipped together into one file, or +uploaded separately. These need to be files, directories or globs are not (yet) +supported. + +This can be omitted if there are no files provided. + +```yaml +provide: + # file from the challenge folder in the repo + - somefile.txt + + # multiple files from chal_folder/src/, zipped as together.zip + - as: together.zip + include: + - src/file1 + - src/file2 + - src/file3 + + # extract these files from inside of the container image + # for the `main` pod (see previous section) + - from: main + include: + - /chal/notsh + - /lib/x86_64-linux-gnu/libc.so.6 + + # same as above, but now zipped together + - from: main + as: notsh.zip + include: + - /chal/notsh + - /lib/x86_64-linux-gnu/libc.so.6 +``` + + + + + +# Examples + +## Full TCP example + +```yaml +name: notsh +author: John Author +description: |- + This challenge isn't a shell + + {{ nc }} + +provide: + - from: main + include: + - /chal/notsh + - /lib/x86_64-linux-gnu/libc.so.6 + +flag: + file: ./flag + +pods: + - name: main + build: . + replicas: 2 + ports: + - internal: 31337 + expose: + tcp: 30124 +``` + +## Full HTTP example + +```yaml +name: bar +author: somebody +description: | + can you order a drink from the webserver? + + {{ url }} + +difficulty: 1 + +flag: + file: ./flag + +# no provide: section needed if no files + +pods: + - name: bar + build: + context: . + dockerfile: Containerfile + replicas: 1 + ports: + - internal: 80 + expose: + http: bar # subdomain only +``` diff --git a/src/access_handlers/docker.rs b/src/access_handlers/docker.rs index 071e3c9..97d39d5 100644 --- a/src/access_handlers/docker.rs +++ b/src/access_handlers/docker.rs @@ -10,8 +10,8 @@ use minijinja; use tokio; use tracing::{debug, error, info, trace, warn}; -use crate::clients::{docker, render_strict}; use crate::configparser::{get_config, get_profile_config}; +use crate::{clients::docker, utils::render_strict}; /// container registry / daemon access checks #[tokio::main(flavor = "current_thread")] // make this a sync function diff --git a/src/asset_files/challenge_templates/deployment.yaml.j2 b/src/asset_files/challenge_templates/deployment.yaml.j2 index 0157a0f..84f3632 100644 --- a/src/asset_files/challenge_templates/deployment.yaml.j2 +++ b/src/asset_files/challenge_templates/deployment.yaml.j2 @@ -19,9 +19,14 @@ spec: labels: rctf/part-of: "{{ slug }}-{{ pod.name }}" spec: + imagePullSecrets: + - name: "rcds-{{ slug }}-pull" + nodeSelector: + kubernetes.io/arch: {{ pod.architecture }} containers: - name: "{{ pod.name }}" image: "{{ pod_image }}" + imagePullPolicy: Always ports: {% for p in pod.ports -%} - containerPort: {{ p.internal }} diff --git a/src/asset_files/challenge_templates/http.yaml.j2 b/src/asset_files/challenge_templates/http.yaml.j2 index 9a1bd17..feaa97b 100644 --- a/src/asset_files/challenge_templates/http.yaml.j2 +++ b/src/asset_files/challenge_templates/http.yaml.j2 @@ -24,6 +24,7 @@ metadata: namespace: "rcds-{{ slug }}" annotations: app.kubernetes.io/managed-by: rcds + cert-manager.io/cluster-issuer: letsencrypt spec: ingressClassName: beavercds rules: @@ -39,3 +40,10 @@ spec: port: number: {{ p.internal }} {% endfor -%} + + tls: + - hosts: + {%- for p in http_ports %} + - "{{ p.expose.http }}.{{ domain }}" + {% endfor -%} + secretName: "rcds-tls-{{ slug }}-{{ pod.name }}" diff --git a/src/asset_files/challenge_templates/namespace.yaml.j2 b/src/asset_files/challenge_templates/namespace.yaml.j2 index fb6979c..4a220da 100644 --- a/src/asset_files/challenge_templates/namespace.yaml.j2 +++ b/src/asset_files/challenge_templates/namespace.yaml.j2 @@ -2,7 +2,7 @@ apiVersion: v1 kind: Namespace metadata: - name: rcds-{{ slug }} + name: "rcds-{{ slug }}" annotations: app.kubernetes.io/managed-by: rcds rctf/challenge: "{{ chal.name }}" diff --git a/src/asset_files/challenge_templates/pull-secret.yaml.j2 b/src/asset_files/challenge_templates/pull-secret.yaml.j2 new file mode 100644 index 0000000..cb0dfe8 --- /dev/null +++ b/src/asset_files/challenge_templates/pull-secret.yaml.j2 @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Secret +type: kubernetes.io/dockerconfigjson +metadata: + name: "rcds-{{ slug }}-pull" + namespace: "rcds-{{ slug }}" +stringData: + .dockerconfigjson: | + { + "auths": { + "{{ registry_domain }}": { + "auth": "{{ creds_b64 }}" + } + } + } diff --git a/src/asset_files/challenge_templates/tcp.yaml.j2 b/src/asset_files/challenge_templates/tcp.yaml.j2 index f01dfd2..61f6084 100644 --- a/src/asset_files/challenge_templates/tcp.yaml.j2 +++ b/src/asset_files/challenge_templates/tcp.yaml.j2 @@ -8,7 +8,15 @@ metadata: app.kubernetes.io/managed-by: rcds # still use separate domain for these, since exposed LoadBalancer services # will all have different ips from each other - external-dns.alpha.kubernetes.io/hostname: "{{ slug }}.{{ domain }}" + external-dns.alpha.kubernetes.io/hostname: "{{ name_slug }}.{{ domain }}" + + # aws-specific annotations for lb options + service.beta.kubernetes.io/aws-load-balancer-scheme: "internet-facing" + service.beta.kubernetes.io/aws-load-balancer-backend-protocol: tcp + service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled: "true" + service.beta.kubernetes.io/aws-load-balancer-type: nlb + service.beta.kubernetes.io/aws-load-balancer-manage-backend-security-group-rules: "true" + spec: type: LoadBalancer selector: diff --git a/src/asset_files/setup_manifests/external-dns.helm.yaml.j2 b/src/asset_files/setup_manifests/external-dns.helm.yaml.j2 index 7d484f9..c559c11 100644 --- a/src/asset_files/setup_manifests/external-dns.helm.yaml.j2 +++ b/src/asset_files/setup_manifests/external-dns.helm.yaml.j2 @@ -2,8 +2,6 @@ rbac: create: true -{{ provider_credentials }} - # Watch these resources for new DNS records sources: - service @@ -20,10 +18,10 @@ txtOwnerId: "k8s-external-dns" txtPrefix: "k8s-owner." extraArgs: - # ignore any services with internal ips - #exclude-target-net: "10.0.0.0/8" # special character replacement - txt-wildcard-replacement: star + - --txt-wildcard-replacement=star + # use CNAME instead of ALIAS for alb targets + - --aws-prefer-cname ## Limit external-dns resources resources: @@ -34,3 +32,6 @@ resources: cpu: 10m logLevel: debug + +# assign last to override any previous values if required +{{ provider_credentials }} diff --git a/src/asset_files/setup_manifests/ingress-nginx.helm.yaml b/src/asset_files/setup_manifests/ingress-nginx.helm.yaml index 025ec0b..517e286 100644 --- a/src/asset_files/setup_manifests/ingress-nginx.helm.yaml +++ b/src/asset_files/setup_manifests/ingress-nginx.helm.yaml @@ -3,6 +3,15 @@ controller: ingressClassResource: name: beavercds + # set variety of annotations needed for the cloud providers + + annotations: + service.beta.kubernetes.io/aws-load-balancer-scheme: "internet-facing" + service.beta.kubernetes.io/aws-load-balancer-backend-protocol: tcp + service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled: "true" + service.beta.kubernetes.io/aws-load-balancer-type: nlb + service.beta.kubernetes.io/aws-load-balancer-manage-backend-security-group-rules: "true" + # nginx values for tcp ports will be set separately in other values file # this will make it easier for `deploy` to update those values without # subsequent calls to `cluster-setup` overwriting changes. diff --git a/src/asset_files/setup_manifests/letsencrypt.issuers.yaml b/src/asset_files/setup_manifests/letsencrypt.issuers.yaml index 0cc1fd3..eb8c38a 100644 --- a/src/asset_files/setup_manifests/letsencrypt.issuers.yaml +++ b/src/asset_files/setup_manifests/letsencrypt.issuers.yaml @@ -4,15 +4,15 @@ metadata: name: letsencrypt spec: acme: - server: https://acme-v02.api.letsencrypt.org/directory" + server: https://acme-v02.api.letsencrypt.org/directory # TODO: use user email? - email: beavercds-prod@example.com + email: beavercds-prod@{{ chal_domain }} privateKeySecretRef: name: letsencrypt-secret solvers: - http01: ingress: - class: nginx + ingressClassName: beavercds --- apiVersion: cert-manager.io/v1 @@ -23,10 +23,10 @@ spec: acme: server: https://acme-staging-v02.api.letsencrypt.org/directory # TODO: use user email? - email: beavercds-staging@example.com + email: beavercds-staging@{{ chal_domain }} privateKeySecretRef: name: letsencrypt-staging-secret solvers: - http01: ingress: - class: nginx + ingressClassName: beavercds diff --git a/src/builder/artifacts.rs b/src/builder/artifacts.rs index dcfd728..1d6fc79 100644 --- a/src/builder/artifacts.rs +++ b/src/builder/artifacts.rs @@ -38,7 +38,7 @@ pub async fn extract_asset( let docker = docker().await?; - match provide { + let extracted_files = match provide { // Repo file paths are relative to the challenge directory, so prepend chal dir // No action necessary, return path as-is @@ -54,12 +54,13 @@ pub async fn extract_asset( files, archive_name, } => { + let archive_path = chal.directory.join(archive_name); zip_files( - &chal.directory.join(archive_name), + &archive_path, &files.iter().map(|f| chal.directory.join(f)).collect_vec(), ) .with_context(|| format!("could not create archive {archive_name:?}"))?; - Ok(vec![archive_name.clone()]) + Ok(vec![archive_path]) } // handle all container events together to manage container, then match again @@ -132,7 +133,18 @@ pub async fn extract_asset( files } + }?; + + // assert all files have chal dir prepended + for path in &extracted_files { + assert!( + path.starts_with(&chal.directory), + "extracted path {path:?} for {:?} is missing challenge directory!", + &chal.directory + ) } + + Ok(extracted_files) } /// Extract multiple files from container diff --git a/src/builder/docker.rs b/src/builder/docker.rs index 85e5d54..e3fb4b4 100644 --- a/src/builder/docker.rs +++ b/src/builder/docker.rs @@ -20,7 +20,7 @@ use tempfile::Builder; use tokio; use tracing::{debug, error, info, trace, warn}; -use crate::clients::docker; +use crate::clients::{docker, docker_creds}; use crate::configparser::challenge::BuildObject; use crate::configparser::UserPass; @@ -29,7 +29,12 @@ pub struct ContainerInfo { id: String, } -pub async fn build_image(context: &Path, options: &BuildObject, tag: &str) -> Result { +pub async fn build_image( + context: &Path, + options: &BuildObject, + tag: &str, + arch: &str, +) -> Result { trace!("building image in directory {context:?} to tag {tag:?}"); let client = docker().await?; @@ -38,6 +43,7 @@ pub async fn build_image(context: &Path, options: &BuildObject, tag: &str) -> Re buildargs: options.args.clone(), t: tag.to_string(), forcerm: true, + platform: format!("linux/{arch}"), ..Default::default() }; @@ -49,8 +55,12 @@ pub async fn build_image(context: &Path, options: &BuildObject, tag: &str) -> Re .with_context(|| "could not create image context tarball")?; let tarball = tar.into_inner()?; + // fetch dockerhub creds from ~/.docker/auth.json for pull reasons + // if creds fail to fetch, go anonymous + let credentials = docker_creds()?; + // send to docker daemon - let mut build_stream = client.build_image(build_opts, None, Some(tarball.into())); + let mut build_stream = client.build_image(build_opts, Some(credentials), Some(tarball.into())); // stream output to stdout while let Some(item) = build_stream.next().await { diff --git a/src/builder/mod.rs b/src/builder/mod.rs index 1680155..0801546 100644 --- a/src/builder/mod.rs +++ b/src/builder/mod.rs @@ -80,7 +80,7 @@ async fn build_challenge( Build(build) => { let tag = chal.container_tag_for_pod(profile_name, &p.name)?; - let res = docker::build_image(&chal.directory, build, &tag) + let res = docker::build_image(&chal.directory, build, &tag, &p.architecture) .await .with_context(|| { format!( diff --git a/src/clients.rs b/src/clients.rs index 545da34..dd22694 100644 --- a/src/clients.rs +++ b/src/clients.rs @@ -1,15 +1,10 @@ // Builders for the various client structs for Docker/Kube etc. -use std::sync::OnceLock; +use std::{collections::HashMap, sync::OnceLock}; -use anyhow::{anyhow, bail, Context, Error, Result}; +use anyhow::{anyhow, bail, Context, Result}; use bollard; -use futures::TryFutureExt; -use k8s_openapi::api::{ - apps::v1::Deployment, - core::v1::{Pod, Service}, - networking::v1::Ingress, -}; +use k8s_openapi::api::core::v1::Service; use kube::{ self, api::{DynamicObject, GroupVersionKind, Patch, PatchParams}, @@ -48,6 +43,93 @@ pub async fn docker() -> Result<&'static bollard::Docker> { } } +/// Fetch registry login credentials from ~/.docker/config.json or $DOCKER_CONFIG +/// +/// For now, this is only `docker.io` credentials, as it is the only registry +/// that effectively requires auth for public images. We don't intend for +/// challenge images to be built from private images. +/// +/// If lookup fails, return empty hashmap as anonymous user. +pub fn docker_creds() -> Result> { + let cred_r = docker_credential::get_credential("docker.io"); + + let cred = match cred_r { + Ok(cred) => cred, + Err(e) => { + // dont die if the credentials could not be found. Warn and continue as anonymous + warn!("could not fetch docker.io registry credentials from Docker config (are you logged in?)"); + // log full error for debug + trace!("credentials error: {e:?}"); + + warn!("continuing as with anonymous build credentials"); + return Ok(HashMap::new()); + } + }; + + // convert docker_credential enum to bollad + let converted = match cred { + docker_credential::DockerCredential::IdentityToken(token) => { + bollard::auth::DockerCredentials { + identitytoken: Some(token), + ..Default::default() + } + } + docker_credential::DockerCredential::UsernamePassword(u, p) => { + bollard::auth::DockerCredentials { + username: Some(u), + password: Some(p), + ..Default::default() + } + } + }; + + Ok(std::collections::HashMap::from([( + "docker.io".to_string(), + converted, + )])) +} + +// /// wip to pull all docker creds from json +// pub async fn all_docker_creds() -> Result> { +// let auth_path = dirs::home_dir() +// .expect("could not fetch homedir") +// .join(".docker") +// .join("config.json"); +// let auth_file = File::open(auth_path).context("could not read docker auth config.json")?; +// // json is technically yaml so use the dependency we already bring in +// let auth_json: serde_yml::Value = serde_yml::from_reader(auth_file).unwrap(); + +// let mut map = HashMap::new(); +// for (raw_reg, _raw_auth) in auth_json.get("auths").unwrap().as_mapping().unwrap() { +// let reg = raw_reg.as_str().unwrap(); +// let cred = match engine_type().await { +// EngineType::Docker => docker_credential::get_credential(reg), +// EngineType::Podman => docker_credential::get_podman_credential(reg), +// } +// .context("could not fetch Docker registry credentials from Docker config")?; + +// let creds = match cred { +// docker_credential::DockerCredential::IdentityToken(token) => { +// bollard::auth::DockerCredentials { +// identitytoken: Some(token), +// ..Default::default() +// } +// } +// docker_credential::DockerCredential::UsernamePassword(u, p) => { +// bollard::auth::DockerCredentials { +// username: Some(u), +// password: Some(p), +// ..Default::default() +// } +// } +// }; + +// map.insert(reg.to_string(), creds); +// } + +// Ok(map) +// } + #[derive(Debug)] pub enum EngineType { Docker, @@ -222,7 +304,7 @@ pub async fn apply_manifest_yaml( // this manifest has multiple documents (crds, deployment) for yaml in multidoc_deserialize(manifest)? { let obj: DynamicObject = serde_yml::from_value(yaml)?; - debug!( + trace!( "applying resource {} {}", obj.types.clone().unwrap_or_default().kind, obj.name_any() @@ -345,19 +427,3 @@ pub async fn wait_for_status(client: &kube::Client, object: &DynamicObject) -> R Ok(()) } - -// -// Minijinja strict rendering with error -// - -/// Similar to minijinja.render!(), but return Error if any undefined values. -pub fn render_strict(template: &str, context: minijinja::Value) -> Result { - let mut strict_env = minijinja::Environment::new(); - // error on any undefined template variables - strict_env.set_undefined_behavior(minijinja::UndefinedBehavior::Strict); - - let r = strict_env - .render_str(template, context) - .context(format!("could not render template {:?}", template))?; - Ok(r) -} diff --git a/src/cluster_setup/mod.rs b/src/cluster_setup/mod.rs index 8eb01a9..c55ddb1 100644 --- a/src/cluster_setup/mod.rs +++ b/src/cluster_setup/mod.rs @@ -22,6 +22,7 @@ use tracing::{debug, error, info, trace, warn}; use crate::clients::{apply_manifest_yaml, kube_client}; use crate::configparser::{config, get_config, get_profile_config}; +use crate::utils::render_strict; // Deploy cluster resources needed for challenges to work. // @@ -40,7 +41,8 @@ pub async fn install_ingress(profile: &config::ProfileConfig) -> Result<()> { install_helm_chart( profile, "ingress-nginx", - Some("https://kubernetes.github.io/ingress-nginx"), + "https://kubernetes.github.io/ingress-nginx", + None, "ingress-nginx", INGRESS_NAMESPACE, VALUES, @@ -57,7 +59,8 @@ pub async fn install_certmanager(profile: &config::ProfileConfig) -> Result<()> install_helm_chart( profile, "cert-manager", - Some("https://charts.jetstack.io"), + "https://charts.jetstack.io", + None, "cert-manager", INGRESS_NAMESPACE, VALUES, @@ -67,9 +70,17 @@ pub async fn install_certmanager(profile: &config::ProfileConfig) -> Result<()> let client = kube_client(profile).await?; // letsencrypt and letsencrypt-staging - const ISSUERS_YAML: &str = + const ISSUERS_TEMPLATE: &str = include_str!("../asset_files/setup_manifests/letsencrypt.issuers.yaml"); - apply_manifest_yaml(&client, ISSUERS_YAML).await?; + + let issuers_yaml = render_strict( + ISSUERS_TEMPLATE, + minijinja::context! { + chal_domain => profile.challenges_domain + }, + )?; + + apply_manifest_yaml(&client, &issuers_yaml).await?; Ok(()) } @@ -81,16 +92,20 @@ pub async fn install_extdns(profile: &config::ProfileConfig) -> Result<()> { include_str!("../asset_files/setup_manifests/external-dns.helm.yaml.j2"); // add profile dns: field directly to chart values - let values = minijinja::render!( + let values = render_strict( VALUES_TEMPLATE, - provider_credentials => serde_yml::to_string(&profile.dns)?, - chal_domain => profile.challenges_domain - ); + minijinja::context! { + provider_credentials => serde_yml::to_string(&profile.dns)?, + chal_domain => profile.challenges_domain + }, + )?; + trace!("deploying templated external-dns values:\n{}", values); install_helm_chart( profile, - "oci://registry-1.docker.io/bitnamicharts/external-dns", + "external-dns", + "https://kubernetes-sigs.github.io/external-dns", None, "external-dns", INGRESS_NAMESPACE, @@ -106,11 +121,17 @@ pub async fn install_extdns(profile: &config::ProfileConfig) -> Result<()> { fn install_helm_chart( profile: &config::ProfileConfig, chart: &str, - repo: Option<&str>, + repo: &str, + version: Option<&str>, release_name: &str, namespace: &str, values: &str, ) -> Result<()> { + // make sure `helm` is available to run + duct::cmd!("helm", "version") + .read() + .context("helm binary is not available")?; + // write values to tempfile let mut temp_values = tempfile::Builder::new() .prefix(release_name) @@ -118,8 +139,8 @@ fn install_helm_chart( .tempfile()?; temp_values.write_all(values.as_bytes())?; - let repo_arg = match repo { - Some(r) => format!("--repo {r}"), + let version_arg = match version { + Some(v) => format!("--version {v}"), None => "".to_string(), }; @@ -134,7 +155,7 @@ fn install_helm_chart( r#" upgrade --install {release_name} - {chart} {repo_arg} + {chart} --repo {repo} {version_arg} --namespace {namespace} --create-namespace --values {} --wait --timeout 1m diff --git a/src/commands/build.rs b/src/commands/build.rs index 676bd80..602ee5e 100644 --- a/src/commands/build.rs +++ b/src/commands/build.rs @@ -1,3 +1,4 @@ +use anyhow::Result; use itertools::Itertools; use std::process::exit; use tracing::{debug, error, info, trace, warn}; @@ -6,15 +7,12 @@ use crate::builder::build_challenges; use crate::configparser::{get_config, get_profile_config}; #[tokio::main(flavor = "current_thread")] // make this a sync function -pub async fn run(profile_name: &str, push: &bool, extract: &bool) { +pub async fn run(profile_name: &str, push: &bool, extract: &bool) -> Result<()> { info!("building images..."); - let results = match build_challenges(profile_name, *push, *extract).await { - Ok(results) => results, - Err(e) => { - error!("{e:?}"); - exit(1) - } - }; + let results = build_challenges(profile_name, *push, *extract).await?; + info!("images built successfully!"); + + Ok(()) } diff --git a/src/commands/check_access.rs b/src/commands/check_access.rs index bae35d1..841a117 100644 --- a/src/commands/check_access.rs +++ b/src/commands/check_access.rs @@ -1,4 +1,4 @@ -use anyhow::{Context, Error, Result}; +use anyhow::{bail, Context, Error, Result}; use itertools::Itertools; use std::process::exit; use tracing::{debug, error, info, trace, warn}; @@ -6,7 +6,13 @@ use tracing::{debug, error, info, trace, warn}; use crate::access_handlers as access; use crate::configparser::{get_config, get_profile_config}; -pub fn run(profile: &str, kubernetes: &bool, frontend: &bool, registry: &bool, bucket: &bool) { +pub fn run( + profile: &str, + kubernetes: &bool, + frontend: &bool, + registry: &bool, + bucket: &bool, +) -> Result<()> { // if user did not give a specific check, check all of them let check_all = !kubernetes && !frontend && !registry && !bucket; @@ -36,6 +42,7 @@ pub fn run(profile: &str, kubernetes: &bool, frontend: &bool, registry: &bool, b debug!("access results: {results:?}"); // die if there were any errors + // TODO: figure out how to return this error directly let mut should_exit = false; for (profile, result) in results.iter() { match result { @@ -48,8 +55,10 @@ pub fn run(profile: &str, kubernetes: &bool, frontend: &bool, registry: &bool, b } } if should_exit { - exit(1); + bail!("config validation failed"); } + + Ok(()) } /// checks a single profile (`profile`) for the given accesses diff --git a/src/commands/cluster_setup.rs b/src/commands/cluster_setup.rs index b6cb16b..c1e0289 100644 --- a/src/commands/cluster_setup.rs +++ b/src/commands/cluster_setup.rs @@ -7,22 +7,15 @@ use crate::cluster_setup as setup; use crate::configparser::{get_config, get_profile_config}; #[tokio::main(flavor = "current_thread")] // make this a sync function -pub async fn run(profile_name: &str) { +pub async fn run(profile_name: &str) -> Result<()> { info!("setting up cluster..."); let config = get_profile_config(profile_name).unwrap(); - if let Err(e) = setup::install_ingress(config).await { - error!("{e:?}"); - exit(1); - } - if let Err(e) = setup::install_certmanager(config).await { - error!("{e:?}"); - exit(1); - } - if let Err(e) = setup::install_extdns(config).await { - error!("{e:?}"); - exit(1); - } + setup::install_ingress(config).await?; + setup::install_certmanager(config).await?; + setup::install_extdns(config).await?; - info!("charts deployed!") + info!("charts deployed!"); + + Ok(()) } diff --git a/src/commands/deploy.rs b/src/commands/deploy.rs index 930b052..392718c 100644 --- a/src/commands/deploy.rs +++ b/src/commands/deploy.rs @@ -1,3 +1,4 @@ +use anyhow::{Context, Result}; use itertools::Itertools; use std::process::exit; use tracing::{debug, error, info, trace, warn}; @@ -7,14 +8,11 @@ use crate::configparser::{get_config, get_profile_config}; use crate::deploy; #[tokio::main(flavor = "current_thread")] // make this a sync function -pub async fn run(profile_name: &str, no_build: &bool, _dry_run: &bool) { +pub async fn run(profile_name: &str, no_build: &bool, _dry_run: &bool) -> Result<()> { let profile = get_profile_config(profile_name).unwrap(); // has the cluster been setup? - if let Err(e) = deploy::check_setup(profile).await { - error!("{e:?}"); - exit(1); - } + deploy::check_setup(profile).await?; // build before deploying if *no_build { @@ -24,43 +22,16 @@ pub async fn run(profile_name: &str, no_build: &bool, _dry_run: &bool) { } info!("building challenges..."); - let build_results = match build_challenges(profile_name, true, true).await { - Ok(result) => result, - Err(e) => { - error!("{e:?}"); - exit(1); - } - }; + let build_results = build_challenges(profile_name, true, true).await?; trace!( "got built results: {:#?}", build_results.iter().map(|b| &b.1).collect_vec() ); - // deploy needs to: - // A) render kubernetes manifests - // - namespace, deployment, service, ingress - // - upgrade ingress config with new listen ports - // - // B) upload asset files to bucket - // - // C) update frontend with new state of challenges - - // A) - if let Err(e) = deploy::kubernetes::deploy_challenges(profile_name, &build_results).await { - error!("{e:?}"); - exit(1); - } - - // B) - if let Err(e) = deploy::s3::upload_assets(profile_name, &build_results).await { - error!("{e:?}"); - exit(1); - } + deploy::deploy_challenges(profile_name, &build_results) + .await + .context("could not deploy challenges")?; - // C) - if let Err(e) = deploy::frontend::update_frontend(profile_name, &build_results).await { - error!("{e:?}"); - exit(1); - } + Ok(()) } diff --git a/src/commands/validate.rs b/src/commands/validate.rs index 0399f88..ccad57d 100644 --- a/src/commands/validate.rs +++ b/src/commands/validate.rs @@ -1,28 +1,26 @@ +use anyhow::{bail, Result}; use std::path::Path; use std::process::exit; use tracing::{debug, error, info, trace, warn}; use crate::configparser::{get_challenges, get_config, get_profile_deploy}; -pub fn run() { +pub fn run() -> Result<()> { info!("validating config..."); - let config = match get_config() { - Ok(c) => c, - Err(err) => { - error!("{err:#}"); - exit(1); - } - }; + + let config = get_config()?; info!(" config ok!"); info!("validating challenges..."); + // print these errors here instead of returning, since its a vec of them + // TODO: figure out how to return this error directly let chals = match get_challenges() { Ok(c) => c, Err(errors) => { for e in errors.iter() { error!("{e:#}"); } - exit(1); + bail!("failed to validate challenges"); } }; info!(" challenges ok!"); @@ -31,29 +29,28 @@ pub fn run() { info!("validating deploy config..."); for (profile_name, _pconfig) in config.profiles.iter() { // fetch from config - let deploy_challenges = match get_profile_deploy(profile_name) { - Ok(d) => &d.challenges, - Err(err) => { - error!("{err:#}"); - exit(1); - } - }; + let deploy_challenges = get_profile_deploy(profile_name)?; // check for missing let missing: Vec<_> = deploy_challenges + .challenges .keys() .filter( // try to find any challenge paths in deploy config that do not exist |path| !chals.iter().any(|c| c.directory == Path::new(path)), ) .collect(); + + // TODO: figure out how to return this error directly if !missing.is_empty() { error!( "Deploy settings for profile '{profile_name}' has challenges that do not exist:" ); missing.iter().for_each(|path| error!(" - {path}")); - exit(1) + bail!("failed to validate deploy config"); } } - info!(" deploy ok!") + info!(" deploy ok!"); + + Ok(()) } diff --git a/src/configparser/challenge.rs b/src/configparser/challenge.rs index 960330a..18c675d 100644 --- a/src/configparser/challenge.rs +++ b/src/configparser/challenge.rs @@ -12,10 +12,10 @@ use std::str::FromStr; use tracing::{debug, error, info, trace, warn}; use void::Void; -use crate::clients::render_strict; use crate::configparser::config::Resource; use crate::configparser::field_coersion::string_or_struct; use crate::configparser::get_config; +use crate::utils::render_strict; pub fn parse_all() -> Result, Vec> { // find all challenge.yaml files @@ -122,7 +122,36 @@ pub fn parse_one(path: &PathBuf) -> Result { pub struct ChallengeConfig { name: String, author: String, + + /// Challenge description, displayed to players on the frontend. + /// Supports markdown and Jinja-style templating for challenge info via + /// [minijinja](https://docs.rs/minijinja). + /// + /// The Jinja template fields available are: + /// + /// | Field name | Description | + /// | ----------- | ----------- | + /// | `hostname` | The hostname or domain for the challenge + /// | `port` | The port that the challenge is listening on + /// | `nc` | Insert the `nc` command to connect to TCP challenges (`nc {{hostname}} {{port}}`) + /// | `link` | Create a Markdown link to the exposed hostname/port + /// | `url` | The URL from `link` without the accompanying Markdown + /// | `challenge` | The full challenge.yaml config for this challenge, with subfields + /// + /// Example: + /// + /// ```yaml + /// description: | + /// Some example challenge. Blah blah blah flavor text. + /// + /// In case you missed it, this was written by {{ challenge.author }} + /// and is called {{ challenge.name }}. + /// + /// {{ link }} # -becomes-> [example.chals.thectf.com](https://example.chals.thectf.com) + /// {{ nc }} # -becomes-> `nc example.chals.thectf.com 12345` + /// ``` description: String, + category: String, directory: PathBuf, @@ -180,6 +209,17 @@ impl ChallengeConfig { .split_whitespace() .join("-") } + + /// Create challenge name slug from directory path (without category) + pub fn slugify_name(&self) -> String { + self.directory + .file_name() + .unwrap() + .to_string_lossy() + .to_lowercase() + .split_whitespace() + .join("-") + } } fn default_difficulty() -> i64 { @@ -276,11 +316,17 @@ struct Pod { #[serde(default)] env: ListOrMap, + #[serde(default = "default_architecture")] + architecture: String, + resources: Option, replicas: i64, ports: Vec, volume: Option, } +fn default_architecture() -> String { + "amd64".to_string() +} #[derive(Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] diff --git a/src/deploy/frontend.rs b/src/deploy/frontend.rs index 12dc8ca..8e063e6 100644 --- a/src/deploy/frontend.rs +++ b/src/deploy/frontend.rs @@ -1,20 +1,148 @@ -use std::path::PathBuf; +use std::fs::File; +use std::io::Read; +use std::path::{Path, PathBuf}; use anyhow::{anyhow, bail, Context, Error, Ok, Result}; use itertools::Itertools; use tracing::{debug, error, info, trace, warn}; use crate::builder::BuildResult; +use crate::configparser::challenge::{ExposeType, FlagType}; use crate::configparser::config::ProfileConfig; use crate::configparser::{enabled_challenges, get_config, get_profile_config, ChallengeConfig}; +use crate::utils::render_strict; + +use super::kubernetes::KubeDeployResult; +use super::s3::S3DeployResult; /// Sync deployed challenges with rCTF frontend pub async fn update_frontend( profile_name: &str, - build_results: &[(&ChallengeConfig, BuildResult)], -) -> Result<()> { + chal: &ChallengeConfig, + build_result: &BuildResult, + kube_result: &KubeDeployResult, + s3_result: &S3DeployResult, +) -> Result { let profile = get_profile_config(profile_name)?; let enabled_challenges = enabled_challenges(profile_name)?; - todo!() + // TODO: hook this up to real frontend! Waiting on rCTF frontend reimplementation + + // for now, render out all challenge information to a markdown file for + // admins to enter manually + + let hostname = chal_domain(chal, &profile.challenges_domain); + let rendered_desc = render_strict( + &chal.description, + minijinja::context! { + challenge => chal, + host => hostname, + hostname => hostname, + port => chal_port(chal), + nc => format!("`nc {} {}`", hostname, chal_port(chal)), + url => format!("[https://{hostname}](https://{hostname})", ), + link => format!("https://{hostname}"), + }, + )?; + + // urls to markdown links + let asset_urls = s3_result + .uploaded_asset_urls + .iter() + .map(|url| { + format!( + "[{}]({})", + Path::new(url) + .file_name() + .expect("asset URL has no path!") + .to_string_lossy(), + url + ) + }) + .join("\n\n"); + let flag = match &chal.flag { + FlagType::RawString(f) => f.clone(), + FlagType::File { file } => { + let full_path = chal.directory.join(file); + let mut flag = String::new(); + let f = File::open(&full_path) + .with_context(|| { + format!( + "could not open flag file {:?} for challenge {:?}", + &full_path, chal.directory + ) + })? + .read_to_string(&mut flag); + flag + } + FlagType::Text { text } => text.clone(), + FlagType::Regex { regex } => unimplemented!(), + FlagType::Verifier { verifier } => unimplemented!(), + }; + + let info_md = format!( + r" +## `{slug}` + +| | | +--------:|---| +name | `{name}` +category | `{cat}` +author | `{author}` + +### description + +``` +{desc} + +{asset_urls} +``` + +### flag + +`{flag}` + +--- +", + slug = chal.slugify_slash(), + name = chal.name, + cat = chal.category, + author = chal.author, + desc = rendered_desc, + asset_urls = asset_urls, + flag = flag.trim(), + ); + + // TODO: proper frontend updates + + Ok(info_md) +} + +// TODO: move to impl ChallengeConfig? +// TODO: return Option and report errors when missing +fn chal_domain(chal: &ChallengeConfig, chal_domain: &str) -> String { + // find first container with expose + match chal.pods.iter().find(|p| !p.ports.is_empty()) { + Some(p) => { + let subdomain = match &p.ports[0].expose { + ExposeType::Tcp(_port) => &chal.slugify_name(), + ExposeType::Http(hostname) => hostname, + }; + format!("{subdomain}.{chal_domain}") + } + // no pods have expose, no hostname for challenge + None => "".to_string(), + } +} + +fn chal_port(chal: &ChallengeConfig) -> &i64 { + // find first container with expose + match chal.pods.iter().find(|p| !p.ports.is_empty()) { + Some(p) => match &p.ports[0].expose { + ExposeType::Tcp(port) => port, + ExposeType::Http(_hostname) => &443, + }, + // no pods have expose, no hostname for challenge + None => &0, + } } diff --git a/src/deploy/kubernetes/mod.rs b/src/deploy/kubernetes/mod.rs index d1f2a64..4adbdda 100644 --- a/src/deploy/kubernetes/mod.rs +++ b/src/deploy/kubernetes/mod.rs @@ -2,6 +2,8 @@ use std::path::PathBuf; use std::time::Duration; use anyhow::{anyhow, bail, Context, Error, Ok, Result}; +use base64ct::{Base64, Encoding}; +use bollard::auth::DockerCredentials; use itertools::Itertools; use minijinja; use tokio::time::timeout; @@ -12,12 +14,12 @@ use crate::clients::{apply_manifest_yaml, kube_client, wait_for_status}; use crate::configparser::challenge::ExposeType; use crate::configparser::config::ProfileConfig; use crate::configparser::{get_config, get_profile_config, ChallengeConfig}; -use crate::utils::TryJoinAll; +use crate::utils::{render_strict, TryJoinAll}; pub mod templates; /// How and where a challenge was deployed/exposed at -pub struct DeployResult { +pub struct KubeDeployResult { // challenges could have multiple exposed services pub exposed: Vec, } @@ -27,56 +29,28 @@ pub enum PodDeployResult { Tcp { port: usize }, } -/// Render challenge manifest templates and apply to cluster -pub async fn deploy_challenges( - profile_name: &str, - build_results: &[(&ChallengeConfig, BuildResult)], -) -> Result> { - let profile = get_profile_config(profile_name)?; - - // Kubernetes deployment needs to: - // 1. render manifests - // - namespace - // - challenge pod deployment(s) - // - service - // - ingress - // - // 2. update ingress controller tcp ports - // - // 3. wait for all challenges to become ready - // - // 4. record domains and IPs of challenges to pass to frontend (?) - - let results = build_results - .iter() - .map(|(chal, _)| deploy_single_challenge(profile_name, chal)) - .try_join_all() - .await?; - - update_ingress_tcp().await?; - - Ok(results) -} - // Deploy all K8S resources for a single challenge `chal`. // // Creates the challenge namespace, deployments, services, and ingresses needed // to deploy and expose the challenge. -async fn deploy_single_challenge( +pub async fn apply_challenge_resources( profile_name: &str, chal: &ChallengeConfig, -) -> Result { - info!(" deploying chal {:?}...", chal.directory); +) -> Result { + info!( + " deploying kube resources for chal {:?}...", + chal.directory + ); // render templates let profile = get_profile_config(profile_name)?; let kube = kube_client(profile).await?; - let ns_manifest = minijinja::render!( + let ns_manifest = render_strict( templates::CHALLENGE_NAMESPACE, - chal, slug => chal.slugify() - ); + minijinja::context! { chal, slug => chal.slugify() }, + )?; trace!("NAMESPACE:\n{}", ns_manifest); debug!("applying namespace for chal {:?}", chal.directory); @@ -90,19 +64,45 @@ async fn deploy_single_challenge( .try_join_all() .await?; - let results = DeployResult { exposed: vec![] }; + // add image pull credentials to the new namespace + debug!( + "applying namespace pull credentials for chal {:?}", + chal.directory + ); + + let registry = &get_config()?.registry; + let creds_manifest = render_strict( + templates::IMAGE_PULL_CREDS_SECRET, + minijinja::context! { + slug => chal.slugify(), + registry_domain => registry.domain, + creds_b64 => Base64::encode_string(format!("{}:{}", + registry.cluster.user, + registry.cluster.pass, + ).as_bytes()), + }, + )?; + apply_manifest_yaml(&kube, &creds_manifest).await?; + + // namespace boilerplate over, deploy actual challenge pods + + let results = KubeDeployResult { exposed: vec![] }; for pod in &chal.pods { let pod_image = chal.container_tag_for_pod(profile_name, &pod.name)?; - let depl_manifest = minijinja::render!( + let depl_manifest = render_strict( templates::CHALLENGE_DEPLOYMENT, - chal, pod, pod_image, profile_name, slug => chal.slugify(), - ); + minijinja::context! { + chal, pod, pod_image, profile_name, + slug => chal.slugify(), + }, + )?; trace!("DEPLOYMENT:\n{}", depl_manifest); - debug!( + trace!( "applying deployment for chal {:?} pod {:?}", - chal.directory, pod.name + chal.directory, + pod.name ); let depl = apply_manifest_yaml(&kube, &depl_manifest).await?; for object in depl { @@ -132,10 +132,13 @@ async fn deploy_single_challenge( .partition(|p| matches!(p.expose, ExposeType::Tcp(_))); if !tcp_ports.is_empty() { - let tcp_manifest = minijinja::render!( + let tcp_manifest = render_strict( templates::CHALLENGE_SERVICE_TCP, - chal, pod, tcp_ports, slug => chal.slugify(), domain => profile.challenges_domain - ); + minijinja::context! { + chal, pod, tcp_ports, + slug => chal.slugify(), name_slug => chal.slugify_name(), domain => profile.challenges_domain + }, + )?; trace!("TCP SERVICE:\n{}", tcp_manifest); debug!( @@ -168,10 +171,13 @@ async fn deploy_single_challenge( } if !http_ports.is_empty() { - let http_manifest = minijinja::render!( + let http_manifest = render_strict( templates::CHALLENGE_SERVICE_HTTP, - chal, pod, http_ports, slug => chal.slugify(), domain => profile.challenges_domain - ); + minijinja::context! { + chal, pod, http_ports, + slug => chal.slugify(), domain => profile.challenges_domain + }, + )?; trace!("HTTP INGRESS:\n{}", http_manifest); debug!( @@ -207,6 +213,7 @@ async fn deploy_single_challenge( // Updates the current ingress controller chart with the current set of TCP // ports needed for challenges. // TODO: move to Gateway to avoid needing to redeploy ingress? -async fn update_ingress_tcp() -> Result<()> { - Ok(()) -} +// TODO: is this needed? currently TCP challenges are separate LoadBalancer svcs +// async fn update_ingress_tcp() -> Result<()> { +// Ok(()) +// } diff --git a/src/deploy/kubernetes/templates.rs b/src/deploy/kubernetes/templates.rs index e1ecc74..420486b 100644 --- a/src/deploy/kubernetes/templates.rs +++ b/src/deploy/kubernetes/templates.rs @@ -11,3 +11,6 @@ pub static CHALLENGE_SERVICE_HTTP: &str = pub static CHALLENGE_SERVICE_TCP: &str = include_str!("../../asset_files/challenge_templates/tcp.yaml.j2"); + +pub static IMAGE_PULL_CREDS_SECRET: &str = + include_str!("../../asset_files/challenge_templates/pull-secret.yaml.j2"); diff --git a/src/deploy/mod.rs b/src/deploy/mod.rs index df13701..09c2e63 100644 --- a/src/deploy/mod.rs +++ b/src/deploy/mod.rs @@ -7,11 +7,16 @@ use itertools::Itertools; use k8s_openapi::api::core::v1::Secret; use kube::api::ListParams; use std::env::current_exe; +use std::fs::File; +use std::io::Write; use tracing::{debug, error, info, trace, warn}; +use crate::builder::BuildResult; use crate::clients::kube_client; use crate::cluster_setup; use crate::configparser::config::ProfileConfig; +use crate::configparser::{get_profile_config, ChallengeConfig}; +use crate::utils::TryJoinAll; /// check to make sure that the needed ingress charts are deployed and running pub async fn check_setup(profile: &ProfileConfig) -> Result<()> { @@ -106,3 +111,57 @@ pub async fn check_setup(profile: &ProfileConfig) -> Result<()> { Ok(()) } } + +/// For each challenge, deploy/upload all components of the challenge +pub async fn deploy_challenges( + profile_name: &str, + build_results: &[(&ChallengeConfig, BuildResult)], +) -> Result> { + let profile = get_profile_config(profile_name)?; + + let mut md_file = File::create(format!("challenge-info-{profile_name}.md"))?; + md_file.write_all(b"# Challenge Information\n\n")?; + let md_lock = std::sync::Mutex::new(md_file); + + build_results + .iter() + .map(|(chal, build)| async { + let chal_md = deploy_single_challenge(profile_name, chal, build) + .await + .with_context(|| format!("could not deploy challenge {:?}", chal.directory))?; + + debug!("writing chal {:?} info to file", chal.directory); + md_lock.lock().unwrap().write_all(chal_md.as_bytes())?; + + Ok(()) + }) + .try_join_all() + .await +} + +/// Deploy / upload all components of a single challenge. +async fn deploy_single_challenge( + profile_name: &str, + chal: &ChallengeConfig, + build_result: &BuildResult, +) -> Result { + info!(" deploying chal {:?}...", chal.directory); + // deploy needs to: + // A) render kubernetes manifests + // - namespace, deployment, service, ingress + // - upgrade ingress config with new listen ports + // + // B) upload asset files to bucket + // + // C) update frontend with new state of challenges + + let kube_results = kubernetes::apply_challenge_resources(profile_name, chal).await?; + + let s3_urls = s3::upload_challenge_assets(profile_name, chal, build_result).await?; + + let frontend_info = + frontend::update_frontend(profile_name, chal, build_result, &kube_results, &s3_urls) + .await?; + + Ok(frontend_info) +} diff --git a/src/deploy/s3.rs b/src/deploy/s3.rs index d7712dc..c817345 100644 --- a/src/deploy/s3.rs +++ b/src/deploy/s3.rs @@ -14,49 +14,53 @@ use crate::configparser::config::ProfileConfig; use crate::configparser::{enabled_challenges, get_config, get_profile_config, ChallengeConfig}; use crate::utils::TryJoinAll; +/// Artifacts and information about a deployed challenges. +pub struct S3DeployResult { + pub uploaded_asset_urls: Vec, +} + /// Upload files to frontend asset bucket /// Returns urls of upload files. -pub async fn upload_assets( +pub async fn upload_challenge_assets( profile_name: &str, - build_results: &[(&ChallengeConfig, BuildResult)], -) -> Result> { + chal: &ChallengeConfig, + build_result: &BuildResult, +) -> Result { let profile = get_profile_config(profile_name)?; let enabled_challenges = enabled_challenges(profile_name)?; let bucket = bucket_client(&profile.s3)?; - info!("uploading assets..."); + info!("uploading assets for chal {:?}...", chal.directory); - // upload all files for each challenge - build_results + let uploaded = build_result + .assets .iter() - .map(|(chal, result)| async move { - // upload all files for a specific challenge - - info!(" for chal {:?}...", chal.directory); - - let uploaded = result - .assets - .iter() - .map(|asset_file| async move { - upload_single_file(bucket, chal, asset_file) - .await - .with_context(|| format!("failed to upload file {asset_file:?}")) - }) - .try_join_all() + .map(|asset_file| async move { + debug!("uploading file {:?}", asset_file); + // upload to bucket + let bucket_path = upload_single_file(bucket, chal, asset_file) .await - .with_context(|| { - format!("failed to upload asset files for chal {:?}", chal.directory) - })?; + .with_context(|| format!("failed to upload file {asset_file:?}"))?; - // return new BuildResult with assets as bucket path - Ok(BuildResult { - tags: result.tags.clone(), - assets: uploaded, - }) + // return link to the uploaded file + // TODO: only works for AWS rn! support other providers + let url = format!( + "https://{bucket}.s3.{region}.amazonaws.com/{path}", + bucket = &profile.s3.bucket_name, + region = &profile.s3.region, + path = bucket_path.to_string_lossy(), + ); + Ok(url) }) .try_join_all() .await + .with_context(|| format!("failed to upload asset files for chal {:?}", chal.directory))?; + + // return new BuildResult with assets as bucket path + Ok(S3DeployResult { + uploaded_asset_urls: uploaded, + }) } async fn upload_single_file( diff --git a/src/main.rs b/src/main.rs index 6f9ae5f..f5c71c5 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,6 +1,6 @@ use beavercds_ng::commands; use clap::Parser; -use tracing::{trace, Level}; +use tracing::{error, trace, Level}; use tracing_subscriber::{ fmt::{format::FmtSpan, time}, EnvFilter, @@ -38,7 +38,19 @@ fn main() { .init(); trace!("args: {:?}", cli); + // dispatch commands + match dispatch(cli) { + Ok(_) => (), + Err(e) => { + error!("{e:?}"); + std::process::exit(1) + } + }; +} + +/// dispatch commands +fn dispatch(cli: cli::Cli) -> anyhow::Result<()> { match &cli.command { cli::Commands::Validate => commands::validate::run(), @@ -49,7 +61,7 @@ fn main() { registry, bucket, } => { - commands::validate::run(); + commands::validate::run()?; commands::check_access::run(profile, kubernetes, frontend, registry, bucket) } @@ -60,7 +72,7 @@ fn main() { no_push, extract_assets, } => { - commands::validate::run(); + commands::validate::run()?; commands::build::run(profile, &!no_push, extract_assets) } @@ -69,12 +81,13 @@ fn main() { no_build, dry_run, } => { - commands::validate::run(); + commands::validate::run()?; commands::deploy::run(profile, no_build, dry_run) } cli::Commands::ClusterSetup { profile } => { - commands::cluster_setup::run(profile); + commands::validate::run()?; + commands::cluster_setup::run(profile) } } } diff --git a/src/tests/parsing/challenges.rs b/src/tests/parsing/challenges.rs index 2ad84c9..5e6a319 100644 --- a/src/tests/parsing/challenges.rs +++ b/src/tests/parsing/challenges.rs @@ -357,6 +357,7 @@ fn challenge_pods() { replicas: 2, env: ListOrMap::Map(HashMap::new()), resources: None, + architecture: "amd64".to_string(), ports: vec![PortConfig { internal: 80, expose: ExposeType::Http("test.chals.example.com".to_string()) @@ -373,6 +374,7 @@ fn challenge_pods() { replicas: 1, env: ListOrMap::Map(HashMap::new()), resources: None, + architecture: "amd64".to_string(), ports: vec![PortConfig { internal: 8000, expose: ExposeType::Tcp(12345) @@ -442,6 +444,7 @@ fn challenge_pod_build() { replicas: 1, env: ListOrMap::Map(HashMap::new()), resources: None, + architecture: "amd64".to_string(), ports: vec![PortConfig { internal: 80, expose: ExposeType::Http("test.chals.example.com".to_string()) @@ -461,6 +464,7 @@ fn challenge_pod_build() { replicas: 1, env: ListOrMap::Map(HashMap::new()), resources: None, + architecture: "amd64".to_string(), ports: vec![PortConfig { internal: 80, expose: ExposeType::Http("test2.chals.example.com".to_string()) @@ -530,6 +534,7 @@ fn challenge_pod_env() { ("BAR".to_string(), "that".to_string()), ])), resources: None, + architecture: "amd64".to_string(), ports: vec![PortConfig { internal: 80, expose: ExposeType::Http("test.chals.example.com".to_string()) @@ -545,6 +550,7 @@ fn challenge_pod_env() { ("BAR".to_string(), "that".to_string()), ])), resources: None, + architecture: "amd64".to_string(), ports: vec![PortConfig { internal: 80, expose: ExposeType::Http("test2.chals.example.com".to_string()) diff --git a/src/utils/mod.rs b/src/utils/mod.rs index 2b6d037..830084f 100644 --- a/src/utils/mod.rs +++ b/src/utils/mod.rs @@ -1,3 +1,4 @@ +use anyhow::{Context, Result}; use futures::{future::try_join_all, TryFuture}; /// Helper trait for `Iterator` to add futures::try_await_all() as chain method. @@ -26,3 +27,19 @@ where try_join_all(self).await } } + +// +// Minijinja strict rendering with error +// + +/// Similar to minijinja.render!(), but return Error if any undefined values. +pub fn render_strict(template: &str, context: minijinja::Value) -> Result { + let mut strict_env = minijinja::Environment::new(); + // error on any undefined template variables + strict_env.set_undefined_behavior(minijinja::UndefinedBehavior::Strict); + + let r = strict_env + .render_str(template, context) + .context(format!("could not render template {:?}", template))?; + Ok(r) +} diff --git a/tests/repo/pwn/notsh/challenge.yaml b/tests/repo/pwn/notsh/challenge.yaml index fad5fe9..60ef67a 100644 --- a/tests/repo/pwn/notsh/challenge.yaml +++ b/tests/repo/pwn/notsh/challenge.yaml @@ -1,4 +1,4 @@ -name: notsh +name: not a shell? author: captainGeech description: |- This challenge isn't a shell diff --git a/tests/repo/web/bar/challenge.yaml b/tests/repo/web/bar/challenge.yaml index a2cafec..3e1d416 100644 --- a/tests/repo/web/bar/challenge.yaml +++ b/tests/repo/web/bar/challenge.yaml @@ -8,7 +8,7 @@ description: | difficulty: 1 flag: - file: ./flag + file: ./site_source/flag # each individual pod is gonna allow only 1 container for now pods: