Skip to content

Commit 034c11d

Browse files
committed
render out challenge information to markdown temporarily
Signed-off-by: Robert Detjens <[email protected]>
1 parent 9880f03 commit 034c11d

File tree

6 files changed

+239
-89
lines changed

6 files changed

+239
-89
lines changed

src/clients.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -222,7 +222,7 @@ pub async fn apply_manifest_yaml(
222222
// this manifest has multiple documents (crds, deployment)
223223
for yaml in multidoc_deserialize(manifest)? {
224224
let obj: DynamicObject = serde_yml::from_value(yaml)?;
225-
debug!(
225+
trace!(
226226
"applying resource {} {}",
227227
obj.types.clone().unwrap_or_default().kind,
228228
obj.name_any()

src/commands/deploy.rs

Lines changed: 4 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
use anyhow::Result;
1+
use anyhow::{Context, Result};
22
use itertools::Itertools;
33
use std::process::exit;
44
use tracing::{debug, error, info, trace, warn};
@@ -29,23 +29,9 @@ pub async fn run(profile_name: &str, no_build: &bool, _dry_run: &bool) -> Result
2929
build_results.iter().map(|b| &b.1).collect_vec()
3030
);
3131

32-
// deploy needs to:
33-
// A) render kubernetes manifests
34-
// - namespace, deployment, service, ingress
35-
// - upgrade ingress config with new listen ports
36-
//
37-
// B) upload asset files to bucket
38-
//
39-
// C) update frontend with new state of challenges
40-
41-
// A)
42-
deploy::kubernetes::deploy_challenges(profile_name, &build_results).await?;
43-
44-
// B)
45-
deploy::s3::upload_assets(profile_name, &build_results).await?;
46-
47-
// C)
48-
deploy::frontend::update_frontend(profile_name, &build_results).await?;
32+
deploy::deploy_challenges(profile_name, &build_results)
33+
.await
34+
.context("could not deploy challenges")?;
4935

5036
Ok(())
5137
}

src/deploy/frontend.rs

Lines changed: 131 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,20 +1,147 @@
1-
use std::path::PathBuf;
1+
use std::fs::File;
2+
use std::io::Read;
3+
use std::path::{Path, PathBuf};
24

35
use anyhow::{anyhow, bail, Context, Error, Ok, Result};
46
use itertools::Itertools;
57
use tracing::{debug, error, info, trace, warn};
68

79
use crate::builder::BuildResult;
10+
use crate::configparser::challenge::{ExposeType, FlagType};
811
use crate::configparser::config::ProfileConfig;
912
use crate::configparser::{enabled_challenges, get_config, get_profile_config, ChallengeConfig};
13+
use crate::utils::render_strict;
14+
15+
use super::kubernetes::KubeDeployResult;
16+
use super::s3::S3DeployResult;
1017

1118
/// Sync deployed challenges with rCTF frontend
1219
pub async fn update_frontend(
1320
profile_name: &str,
14-
build_results: &[(&ChallengeConfig, BuildResult)],
15-
) -> Result<()> {
21+
chal: &ChallengeConfig,
22+
build_result: &BuildResult,
23+
kube_result: &KubeDeployResult,
24+
s3_result: &S3DeployResult,
25+
) -> Result<String> {
1626
let profile = get_profile_config(profile_name)?;
1727
let enabled_challenges = enabled_challenges(profile_name)?;
1828

19-
todo!()
29+
// TODO: hook this up to real frontend! Waiting on rCTF frontend reimplementation
30+
31+
// for now, render out all challenge information to a markdown file for
32+
// admins to enter manually
33+
34+
let hostname = chal_domain(chal, &profile.challenges_domain);
35+
let rendered_desc = render_strict(
36+
&chal.description,
37+
minijinja::context! {
38+
challenge => chal,
39+
host => hostname,
40+
port => chal_port(chal),
41+
nc => format!("`nc {} {}`", hostname, chal_port(chal)),
42+
url => format!("[https://{hostname}](https://{hostname})", ),
43+
link => format!("https://{hostname}"),
44+
},
45+
)?;
46+
47+
// urls to markdown links
48+
let asset_urls = s3_result
49+
.uploaded_asset_urls
50+
.iter()
51+
.map(|url| {
52+
format!(
53+
"[{}]({})",
54+
Path::new(url)
55+
.file_name()
56+
.expect("asset URL has no path!")
57+
.to_string_lossy(),
58+
url
59+
)
60+
})
61+
.join("\n\n");
62+
let flag = match &chal.flag {
63+
FlagType::RawString(f) => f.clone(),
64+
FlagType::File { file } => {
65+
let full_path = chal.directory.join(file);
66+
let mut flag = String::new();
67+
let f = File::open(&full_path)
68+
.with_context(|| {
69+
format!(
70+
"could not open flag file {:?} for challenge {:?}",
71+
&full_path, chal.directory
72+
)
73+
})?
74+
.read_to_string(&mut flag);
75+
flag
76+
}
77+
FlagType::Text { text } => text.clone(),
78+
FlagType::Regex { regex } => unimplemented!(),
79+
FlagType::Verifier { verifier } => unimplemented!(),
80+
};
81+
82+
let info_md = format!(
83+
r"
84+
## `{slug}`
85+
86+
| | |
87+
--------:|---|
88+
name | `{name}`
89+
category | `{cat}`
90+
author | `{author}`
91+
92+
### description
93+
94+
```
95+
{desc}
96+
97+
{asset_urls}
98+
```
99+
100+
### flag
101+
102+
`{flag}`
103+
104+
---
105+
",
106+
slug = chal.slugify_slash(),
107+
name = chal.name,
108+
cat = chal.category,
109+
author = chal.author,
110+
desc = rendered_desc,
111+
asset_urls = asset_urls,
112+
flag = flag,
113+
);
114+
115+
// TODO: proper frontend updates
116+
117+
Ok(info_md)
118+
}
119+
120+
// TODO: move to impl ChallengeConfig?
121+
// TODO: return Option and report errors when missing
122+
fn chal_domain(chal: &ChallengeConfig, chal_domain: &str) -> String {
123+
// find first container with expose
124+
match chal.pods.iter().find(|p| !p.ports.is_empty()) {
125+
Some(p) => {
126+
let subdomain = match &p.ports[0].expose {
127+
ExposeType::Tcp(_port) => &chal.name,
128+
ExposeType::Http(hostname) => hostname,
129+
};
130+
format!("{subdomain}.{chal_domain}")
131+
}
132+
// no pods have expose, no hostname for challenge
133+
None => "".to_string(),
134+
}
135+
}
136+
137+
fn chal_port(chal: &ChallengeConfig) -> &i64 {
138+
// find first container with expose
139+
match chal.pods.iter().find(|p| !p.ports.is_empty()) {
140+
Some(p) => match &p.ports[0].expose {
141+
ExposeType::Tcp(port) => port,
142+
ExposeType::Http(_hostname) => &443,
143+
},
144+
// no pods have expose, no hostname for challenge
145+
None => &0,
146+
}
20147
}

src/deploy/kubernetes/mod.rs

Lines changed: 12 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -29,46 +29,18 @@ pub enum PodDeployResult {
2929
Tcp { port: usize },
3030
}
3131

32-
/// Render challenge manifest templates and apply to cluster
33-
pub async fn deploy_challenges(
34-
profile_name: &str,
35-
build_results: &[(&ChallengeConfig, BuildResult)],
36-
) -> Result<Vec<KubeDeployResult>> {
37-
let profile = get_profile_config(profile_name)?;
38-
39-
// Kubernetes deployment needs to:
40-
// 1. render manifests
41-
// - namespace
42-
// - challenge pod deployment(s)
43-
// - service
44-
// - ingress
45-
//
46-
// 2. update ingress controller tcp ports
47-
//
48-
// 3. wait for all challenges to become ready
49-
//
50-
// 4. record domains and IPs of challenges to pass to frontend (?)
51-
52-
let results = build_results
53-
.iter()
54-
.map(|(chal, _)| deploy_single_challenge(profile_name, chal))
55-
.try_join_all()
56-
.await?;
57-
58-
update_ingress_tcp().await?;
59-
60-
Ok(results)
61-
}
62-
6332
// Deploy all K8S resources for a single challenge `chal`.
6433
//
6534
// Creates the challenge namespace, deployments, services, and ingresses needed
6635
// to deploy and expose the challenge.
67-
async fn deploy_single_challenge(
36+
pub async fn apply_challenge_resources(
6837
profile_name: &str,
6938
chal: &ChallengeConfig,
7039
) -> Result<KubeDeployResult> {
71-
info!(" deploying chal {:?}...", chal.directory);
40+
info!(
41+
" deploying kube resources for chal {:?}...",
42+
chal.directory
43+
);
7244
// render templates
7345

7446
let profile = get_profile_config(profile_name)?;
@@ -127,9 +99,10 @@ async fn deploy_single_challenge(
12799
)?;
128100
trace!("DEPLOYMENT:\n{}", depl_manifest);
129101

130-
debug!(
102+
trace!(
131103
"applying deployment for chal {:?} pod {:?}",
132-
chal.directory, pod.name
104+
chal.directory,
105+
pod.name
133106
);
134107
let depl = apply_manifest_yaml(&kube, &depl_manifest).await?;
135108
for object in depl {
@@ -240,6 +213,7 @@ async fn deploy_single_challenge(
240213
// Updates the current ingress controller chart with the current set of TCP
241214
// ports needed for challenges.
242215
// TODO: move to Gateway to avoid needing to redeploy ingress?
243-
async fn update_ingress_tcp() -> Result<()> {
244-
Ok(())
245-
}
216+
// TODO: is this needed? currently TCP challenges are separate LoadBalancer svcs
217+
// async fn update_ingress_tcp() -> Result<()> {
218+
// Ok(())
219+
// }

src/deploy/mod.rs

Lines changed: 59 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,11 +7,16 @@ use itertools::Itertools;
77
use k8s_openapi::api::core::v1::Secret;
88
use kube::api::ListParams;
99
use std::env::current_exe;
10+
use std::fs::File;
11+
use std::io::Write;
1012
use tracing::{debug, error, info, trace, warn};
1113

14+
use crate::builder::BuildResult;
1215
use crate::clients::kube_client;
1316
use crate::cluster_setup;
1417
use crate::configparser::config::ProfileConfig;
18+
use crate::configparser::{get_profile_config, ChallengeConfig};
19+
use crate::utils::TryJoinAll;
1520

1621
/// check to make sure that the needed ingress charts are deployed and running
1722
pub async fn check_setup(profile: &ProfileConfig) -> Result<()> {
@@ -106,3 +111,57 @@ pub async fn check_setup(profile: &ProfileConfig) -> Result<()> {
106111
Ok(())
107112
}
108113
}
114+
115+
/// For each challenge, deploy/upload all components of the challenge
116+
pub async fn deploy_challenges(
117+
profile_name: &str,
118+
build_results: &[(&ChallengeConfig, BuildResult)],
119+
) -> Result<Vec<()>> {
120+
let profile = get_profile_config(profile_name)?;
121+
122+
let mut md_file = File::create(format!("challenge-info-{profile_name}.md"))?;
123+
md_file.write_all(b"# Challenge Information\n\n")?;
124+
let md_lock = std::sync::Mutex::new(md_file);
125+
126+
build_results
127+
.iter()
128+
.map(|(chal, build)| async {
129+
let chal_md = deploy_single_challenge(profile_name, chal, build)
130+
.await
131+
.with_context(|| format!("could not deploy challenge {:?}", chal.directory))?;
132+
133+
debug!("writing chal {:?} info to file", chal.directory);
134+
md_lock.lock().unwrap().write_all(chal_md.as_bytes())?;
135+
136+
Ok(())
137+
})
138+
.try_join_all()
139+
.await
140+
}
141+
142+
/// Deploy / upload all components of a single challenge.
143+
async fn deploy_single_challenge(
144+
profile_name: &str,
145+
chal: &ChallengeConfig,
146+
build_result: &BuildResult,
147+
) -> Result<String> {
148+
info!(" deploying chal {:?}...", chal.directory);
149+
// deploy needs to:
150+
// A) render kubernetes manifests
151+
// - namespace, deployment, service, ingress
152+
// - upgrade ingress config with new listen ports
153+
//
154+
// B) upload asset files to bucket
155+
//
156+
// C) update frontend with new state of challenges
157+
158+
let kube_results = kubernetes::apply_challenge_resources(profile_name, chal).await?;
159+
160+
let s3_urls = s3::upload_challenge_assets(profile_name, chal, build_result).await?;
161+
162+
let frontend_info =
163+
frontend::update_frontend(profile_name, chal, build_result, &kube_results, &s3_urls)
164+
.await?;
165+
166+
Ok(frontend_info)
167+
}

0 commit comments

Comments
 (0)