diff --git a/bottlecap/src/bin/bottlecap/main.rs b/bottlecap/src/bin/bottlecap/main.rs index a34879556..a202337f7 100644 --- a/bottlecap/src/bin/bottlecap/main.rs +++ b/bottlecap/src/bin/bottlecap/main.rs @@ -162,7 +162,7 @@ fn build_function_arn(account_id: &str, region: &str, function_name: &str) -> St #[tokio::main] async fn main() -> Result<()> { - let (aws_config, config) = load_configs(); + let (mut aws_config, config) = load_configs(); enable_logging_subsystem(&config); let client = reqwest::Client::builder().no_proxy().build().map_err(|e| { @@ -176,7 +176,7 @@ async fn main() -> Result<()> { .await .map_err(|e| Error::new(std::io::ErrorKind::InvalidData, e.to_string()))?; - if let Some(resolved_api_key) = resolve_secrets(Arc::clone(&config), &aws_config).await { + if let Some(resolved_api_key) = resolve_secrets(Arc::clone(&config), &mut aws_config).await { match extension_loop_active(&aws_config, &config, &client, &r, resolved_api_key).await { Ok(()) => { debug!("Extension loop completed successfully"); @@ -202,6 +202,10 @@ fn load_configs() -> (AwsConfig, Arc) { aws_access_key_id: env::var("AWS_ACCESS_KEY_ID").unwrap_or_default(), aws_secret_access_key: env::var("AWS_SECRET_ACCESS_KEY").unwrap_or_default(), aws_session_token: env::var("AWS_SESSION_TOKEN").unwrap_or_default(), + aws_container_credentials_full_uri: env::var("AWS_CONTAINER_CREDENTIALS_FULL_URI") + .unwrap_or_default(), + aws_container_authorization_token: env::var("AWS_CONTAINER_AUTHORIZATION_TOKEN") + .unwrap_or_default(), function_name: env::var("AWS_LAMBDA_FUNCTION_NAME").unwrap_or_default(), sandbox_init_time: Instant::now(), }; diff --git a/bottlecap/src/config/mod.rs b/bottlecap/src/config/mod.rs index 533ec6030..e46f059d7 100644 --- a/bottlecap/src/config/mod.rs +++ b/bottlecap/src/config/mod.rs @@ -334,6 +334,8 @@ pub struct AwsConfig { pub aws_session_token: String, pub function_name: String, pub sandbox_init_time: Instant, + pub aws_container_credentials_full_uri: String, + pub aws_container_authorization_token: String, } #[must_use] diff --git a/bottlecap/src/lifecycle/invocation/processor.rs b/bottlecap/src/lifecycle/invocation/processor.rs index e7835c1cf..b228d6e45 100644 --- a/bottlecap/src/lifecycle/invocation/processor.rs +++ b/bottlecap/src/lifecycle/invocation/processor.rs @@ -632,6 +632,8 @@ mod tests { aws_access_key_id: "***".into(), aws_secret_access_key: "***".into(), aws_session_token: "***".into(), + aws_container_credentials_full_uri: "***".into(), + aws_container_authorization_token: "***".into(), function_name: "test-function".into(), sandbox_init_time: Instant::now(), }; diff --git a/bottlecap/src/secrets/decrypt.rs b/bottlecap/src/secrets/decrypt.rs index 36442f164..eb96612f7 100644 --- a/bottlecap/src/secrets/decrypt.rs +++ b/bottlecap/src/secrets/decrypt.rs @@ -12,7 +12,7 @@ use std::time::Instant; use tracing::debug; use tracing::error; -pub async fn resolve_secrets(config: Arc, aws_config: &AwsConfig) -> Option { +pub async fn resolve_secrets(config: Arc, aws_config: &mut AwsConfig) -> Option { let api_key_candidate = if !config.api_key_secret_arn.is_empty() || !config.kms_api_key.is_empty() { let before_decrypt = Instant::now(); @@ -25,6 +25,33 @@ pub async fn resolve_secrets(config: Arc, aws_config: &AwsConfig) -> Opt } }; + if aws_config.aws_secret_access_key.is_empty() + && aws_config.aws_access_key_id.is_empty() + && !aws_config.aws_container_credentials_full_uri.is_empty() + && !aws_config.aws_container_authorization_token.is_empty() + { + // We're in Snap Start + let credentials = match get_snapstart_credentials(aws_config, &client).await { + Ok(credentials) => credentials, + Err(err) => { + error!("Error getting Snap Start credentials: {}", err); + return None; + } + }; + aws_config.aws_access_key_id = credentials["AccessKeyId"] + .as_str() + .unwrap_or_default() + .to_string(); + aws_config.aws_secret_access_key = credentials["SecretAccessKey"] + .as_str() + .unwrap_or_default() + .to_string(); + aws_config.aws_session_token = credentials["Token"] + .as_str() + .unwrap_or_default() + .to_string(); + } + let decrypted_key = if config.kms_api_key.is_empty() { decrypt_aws_sm(&client, config.api_key_secret_arn.clone(), aws_config).await } else { @@ -146,6 +173,24 @@ async fn decrypt_aws_sm( } } +async fn get_snapstart_credentials( + aws_config: &AwsConfig, + client: &Client, +) -> Result> { + let mut headers = HeaderMap::new(); + headers.insert( + "Authorization", + HeaderValue::from_str(&aws_config.aws_container_authorization_token)?, + ); + + let req = client + .get(&aws_config.aws_container_credentials_full_uri) + .headers(headers); + let body = req.send().await?.text().await?; + let v: Value = serde_json::from_str(&body)?; + Ok(v) +} + async fn request( json_body: &Value, headers: HeaderMap, @@ -283,11 +328,13 @@ mod tests { &NaiveDateTime::parse_from_str("2024-05-30 09:10:11", "%Y-%m-%d %H:%M:%S").unwrap(), ); let headers = build_get_secret_signed_headers( - &AwsConfig { + &AwsConfig{ region: "us-east-1".to_string(), aws_access_key_id: "AKIDEXAMPLE".to_string(), aws_secret_access_key: "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY".to_string(), aws_session_token: "AQoDYXdzEJr...".to_string(), + aws_container_authorization_token: String::new(), + aws_container_credentials_full_uri: String::new(), function_name: "arn:some-function".to_string(), sandbox_init_time: Instant::now(), }, diff --git a/bottlecap/src/telemetry/events.rs b/bottlecap/src/telemetry/events.rs index 6c8e3b3d6..2d391fe75 100644 --- a/bottlecap/src/telemetry/events.rs +++ b/bottlecap/src/telemetry/events.rs @@ -125,6 +125,30 @@ pub enum TelemetryRecord { /// Total size of the dropped records dropped_bytes: u64, }, + + /// Snapstart + #[serde(rename = "platform.restoreStart", rename_all = "camelCase")] + PlatformRestoreStart { + // function name and function version are here + // but we don't care about those + // https://docs.aws.amazon.com/lambda/latest/dg/telemetry-schema-reference.html#platform-restoreStart + // runtime version may be nice + }, + + #[serde(rename = "platform.restoreReport", rename_all = "camelCase")] + PlatformRestoreReport { + /// Status of the invocation + status: Status, + /// When unsuccessful, the `error_type` describes what kind of error occurred + error_type: Option, + }, + #[serde(rename = "platform.restoreRuntimeDone", rename_all = "camelCase")] + PlatformRestoreRuntimeDone { + /// Status of the invocation + status: Status, + /// When unsuccessful, the `error_type` describes what kind of error occurred + error_type: Option, + }, } /// Type of Initialization diff --git a/bottlecap/src/telemetry/listener.rs b/bottlecap/src/telemetry/listener.rs index 7d113efd7..22bebad5f 100644 --- a/bottlecap/src/telemetry/listener.rs +++ b/bottlecap/src/telemetry/listener.rs @@ -6,7 +6,7 @@ use tokio::sync::mpsc::Sender; use hyper::service::{make_service_fn, service_fn}; use hyper::{Body, Request, Response, Server}; -use tracing::{debug, error}; +use tracing::debug; #[allow(clippy::module_name_repetitions)] #[derive(Debug, Clone, Copy)] @@ -58,9 +58,10 @@ impl TelemetryListener { Err(e) => { // If we can't parse the event, we will receive it again in a new batch // causing an infinite loop and resource contention. - // Instead, log it as fatal and move on. - // This will result in a dropped payload. - error!("[FATAL] Failed to parse telemetry events: {:?}", e); + // Instead, log it and move on. + // This will result in a dropped payload, but may be from + // events we haven't added support for yet + debug!("Failed to parse telemetry events: {:?}", e); return Ok(Response::builder() .status(hyper::StatusCode::OK) .body(Body::from("Failed to parse telemetry events"))