From c1c6f8751279fd56d1def7cbaa12b55458818837 Mon Sep 17 00:00:00 2001 From: Alexander Petric Date: Mon, 29 Dec 2025 20:25:17 +0000 Subject: [PATCH 1/8] use tini with unshare to preserve signals (e.g oom) --- Dockerfile | 2 +- backend/windmill-worker/src/common.rs | 14 ++++- backend/windmill-worker/src/worker.rs | 73 +++++++++++++++++++++++---- 3 files changed, 78 insertions(+), 11 deletions(-) diff --git a/Dockerfile b/Dockerfile index 3ad32f95937d6..d57176dd4b0e2 100644 --- a/Dockerfile +++ b/Dockerfile @@ -129,7 +129,7 @@ ENV PATH /usr/local/bin:/root/.local/bin:/tmp/.local/bin:$PATH RUN apt-get update \ - && apt-get install -y --no-install-recommends netbase tzdata ca-certificates wget curl jq unzip build-essential unixodbc xmlsec1 software-properties-common \ + && apt-get install -y --no-install-recommends netbase tzdata ca-certificates wget curl jq unzip build-essential unixodbc xmlsec1 software-properties-common tini \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* diff --git a/backend/windmill-worker/src/common.rs b/backend/windmill-worker/src/common.rs index ea1f6a763d5b0..95d6d7060e128 100644 --- a/backend/windmill-worker/src/common.rs +++ b/backend/windmill-worker/src/common.rs @@ -642,7 +642,19 @@ pub fn build_command_with_isolation(program: &str, args: &[&str]) -> Command { cmd.arg(flag); } - cmd.arg("--"); + // If tini is available, use it for proper PID 1 signal handling + // (especially OOM exit codes which return 137 instead of sigprocmask errors). + // Note: --fork should already be in the flags for proper namespace setup. + if let Some(tini_path) = crate::TINI_AVAILABLE.as_ref() { + cmd.arg("--"); + cmd.arg(tini_path); + cmd.arg("-s"); + cmd.arg("--"); + } else { + // Without tini, just run the command directly (--fork is in flags) + cmd.arg("--"); + } + cmd.arg(program); cmd.args(args); cmd diff --git a/backend/windmill-worker/src/worker.rs b/backend/windmill-worker/src/worker.rs index 36ec01b2fb250..04e65d91c4793 100644 --- a/backend/windmill-worker/src/worker.rs +++ b/backend/windmill-worker/src/worker.rs @@ -315,16 +315,73 @@ lazy_static::lazy_static! { .and_then(|x| x.parse::().ok()) .unwrap_or(false); + pub static ref UNSHARE_TINI_PATH: String = { + std::env::var("UNSHARE_TINI_PATH").unwrap_or_else(|_| "tini".to_string()) + }; + + // --fork is required for unshare to work with --pid --mount-proc. + // When tini is available, it runs as PID 1 inside the forked namespace for proper signal handling. pub static ref UNSHARE_ISOLATION_FLAGS: String = { std::env::var("UNSHARE_ISOLATION_FLAGS") .unwrap_or_else(|_| "--user --map-root-user --pid --fork --mount-proc".to_string()) }; + // Check if tini is available for proper PID 1 handling in unshare namespaces. + // tini handles OOM signals correctly, returning exit code 137 instead of sigprocmask errors. + pub static ref TINI_AVAILABLE: Option = { + let tini_path = UNSHARE_TINI_PATH.as_str(); + let test_result = std::process::Command::new(tini_path) + .args(["-s", "--", "true"]) + .output(); + + match test_result { + Ok(output) if output.status.success() => { + tracing::info!("tini available at: {}", tini_path); + Some(tini_path.to_string()) + } + Ok(output) => { + let stderr = String::from_utf8_lossy(&output.stderr); + tracing::warn!( + "tini test failed: {}. Proceeding without tini (OOM exit codes may be incorrect).", + stderr.trim() + ); + None + } + Err(e) => { + if e.kind() == std::io::ErrorKind::NotFound { + tracing::warn!( + "tini not found at '{}'. Install tini for correct OOM exit codes, or set UNSHARE_TINI_PATH.", + tini_path + ); + } else { + tracing::warn!( + "Failed to test tini: {}. Proceeding without tini.", + e + ); + } + None + } + } + }; + pub static ref UNSHARE_PATH: Option = { let flags = UNSHARE_ISOLATION_FLAGS.as_str(); let mut test_cmd_args: Vec<&str> = flags.split_whitespace().collect(); - test_cmd_args.push("--"); - test_cmd_args.push("true"); + + // Build the test command based on whether tini is available + // Note: --fork should already be in the flags for proper namespace setup + if let Some(tini_path) = TINI_AVAILABLE.as_ref() { + // Test with tini: unshare -- tini -s -- true + test_cmd_args.push("--"); + test_cmd_args.push(tini_path.as_str()); + test_cmd_args.push("-s"); + test_cmd_args.push("--"); + test_cmd_args.push("true"); + } else { + // Fallback without tini: unshare -- true + test_cmd_args.push("--"); + test_cmd_args.push("true"); + } let test_result = std::process::Command::new("unshare") .args(&test_cmd_args) @@ -332,7 +389,11 @@ lazy_static::lazy_static! { match test_result { Ok(output) if output.status.success() => { - tracing::info!("PID namespace isolation enabled. Flags: {}", flags); + if TINI_AVAILABLE.is_some() { + tracing::info!("PID namespace isolation enabled with tini. Flags: {}", flags); + } else { + tracing::info!("PID namespace isolation enabled. Flags: {}", flags); + } Some("unshare".to_string()) }, Ok(output) => { @@ -1140,12 +1201,6 @@ pub async fn run_worker( if *ENABLE_UNSHARE_PID { // Access UNSHARE_PATH to trigger lazy_static initialization and test let _ = &*UNSHARE_PATH; - - tracing::info!( - worker = %worker_name, hostname = %hostname, - "PID namespace isolation enabled via unshare with flags: {}", - UNSHARE_ISOLATION_FLAGS.as_str() - ); } let start_time = Instant::now(); From b19f7dbddc21bb9d422d11e9f606801fac99f478 Mon Sep 17 00:00:00 2001 From: Alexander Petric Date: Mon, 5 Jan 2026 16:02:32 +0000 Subject: [PATCH 2/8] fix ci --- .github/workflows/docker-image.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/docker-image.yml b/.github/workflows/docker-image.yml index d6b55aaa7895e..b46677304f852 100644 --- a/.github/workflows/docker-image.yml +++ b/.github/workflows/docker-image.yml @@ -593,7 +593,7 @@ jobs: ${{ steps.meta-ee-public.outputs.labels }} build_ee_slim: - if: ${{ startsWith(github.ref, 'refs/tags/v') }} || ((github.event_name != 'workflow_dispatch') || (github.event.inputs.slim)) + if: ${{ startsWith(github.ref, 'refs/tags/v') || (github.event_name == 'workflow_dispatch' && github.event.inputs.slim) }} needs: [build_ee] runs-on: ubicloud steps: From a7813506fa6a4a114c0cc0abedbef27a13f19d78 Mon Sep 17 00:00:00 2001 From: Alexander Petric Date: Mon, 5 Jan 2026 16:28:38 +0000 Subject: [PATCH 3/8] ci as nsjail --- .github/workflows/docker-image.yml | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/.github/workflows/docker-image.yml b/.github/workflows/docker-image.yml index b46677304f852..526812439f9d6 100644 --- a/.github/workflows/docker-image.yml +++ b/.github/workflows/docker-image.yml @@ -593,13 +593,15 @@ jobs: ${{ steps.meta-ee-public.outputs.labels }} build_ee_slim: - if: ${{ startsWith(github.ref, 'refs/tags/v') || (github.event_name == 'workflow_dispatch' && github.event.inputs.slim) }} needs: [build_ee] runs-on: ubicloud + if: (github.event_name != 'pull_request') && ((github.event_name != 'workflow_dispatch') || (github.event.inputs.ee || github.event.inputs.slim)) + steps: - uses: actions/checkout@v4 with: fetch-depth: 0 + ref: ${{ github.ref }} # - name: Set up Docker Buildx # uses: docker/setup-buildx-action@v2 @@ -612,10 +614,13 @@ jobs: with: images: | ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}-ee-slim + flavor: | + latest=false tags: | - type=ref,event=branch type=semver,pattern={{version}} type=semver,pattern={{major}}.{{minor}} + type=sha,enable=true,priority=100,prefix=,suffix=,format=short + type=ref,event=branch - name: Login to registry uses: docker/login-action@v3 @@ -624,6 +629,11 @@ jobs: username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} + - name: Update Dockerfile image reference + run: | + sed -i 's|COPY --from=ghcr.io/windmill-labs/windmill-ee:dev|COPY --from=ghcr.io/${{ env.IMAGE_NAME }}-ee:${{ env.DEV_SHA }}|' ./docker/DockerfileSlimEe + cat ./docker/DockerfileSlimEe | grep "COPY --from" + - name: Build and push publicly ee uses: depot/build-push-action@v1 with: From c738c770f35f676913ca0dce0759ede804792095 Mon Sep 17 00:00:00 2001 From: Alexander Petric Date: Mon, 5 Jan 2026 16:56:15 +0000 Subject: [PATCH 4/8] simplify --- .github/workflows/docker-image.yml | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/.github/workflows/docker-image.yml b/.github/workflows/docker-image.yml index 526812439f9d6..a8d6ea2f44267 100644 --- a/.github/workflows/docker-image.yml +++ b/.github/workflows/docker-image.yml @@ -601,7 +601,6 @@ jobs: - uses: actions/checkout@v4 with: fetch-depth: 0 - ref: ${{ github.ref }} # - name: Set up Docker Buildx # uses: docker/setup-buildx-action@v2 @@ -614,13 +613,10 @@ jobs: with: images: | ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}-ee-slim - flavor: | - latest=false tags: | + type=ref,event=branch type=semver,pattern={{version}} type=semver,pattern={{major}}.{{minor}} - type=sha,enable=true,priority=100,prefix=,suffix=,format=short - type=ref,event=branch - name: Login to registry uses: docker/login-action@v3 @@ -629,11 +625,6 @@ jobs: username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - - name: Update Dockerfile image reference - run: | - sed -i 's|COPY --from=ghcr.io/windmill-labs/windmill-ee:dev|COPY --from=ghcr.io/${{ env.IMAGE_NAME }}-ee:${{ env.DEV_SHA }}|' ./docker/DockerfileSlimEe - cat ./docker/DockerfileSlimEe | grep "COPY --from" - - name: Build and push publicly ee uses: depot/build-push-action@v1 with: From 97cdb2a4ce8264449d4276b10ef01680cef4234d Mon Sep 17 00:00:00 2001 From: Alexander Petric Date: Mon, 5 Jan 2026 18:00:33 +0000 Subject: [PATCH 5/8] fix flaky go integration test --- integration_tests/test/wmill_integration_test_utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/integration_tests/test/wmill_integration_test_utils.py b/integration_tests/test/wmill_integration_test_utils.py index b817cd4659d47..260986ef9b43d 100644 --- a/integration_tests/test/wmill_integration_test_utils.py +++ b/integration_tests/test/wmill_integration_test_utils.py @@ -56,6 +56,7 @@ def _init_client(self): return httpx.Client( base_url=self._url, headers=headers, + timeout=60.0, # Go/Rust compilation can take 10+ seconds on first run ) def _set_license_key(self): From 2d1767b657506fcbad913ea6729c249c293fbbd2 Mon Sep 17 00:00:00 2001 From: pyranota Date: Tue, 6 Jan 2026 17:10:36 +0100 Subject: [PATCH 6/8] feat(python): restart installation OOM Signed-off-by: pyranota --- .../windmill-worker/src/ansible_executor.rs | 1 + .../windmill-worker/src/python_executor.rs | 79 +++++++++++++++---- backend/windmill-worker/src/ruby_executor.rs | 3 +- .../windmill-worker/src/worker_lockfiles.rs | 1 + 4 files changed, 68 insertions(+), 16 deletions(-) diff --git a/backend/windmill-worker/src/ansible_executor.rs b/backend/windmill-worker/src/ansible_executor.rs index 3da3fa619d8da..5795c1ccd7d37 100644 --- a/backend/windmill-worker/src/ansible_executor.rs +++ b/backend/windmill-worker/src/ansible_executor.rs @@ -410,6 +410,7 @@ async fn handle_ansible_python_deps( worker_dir, &mut Some(occupancy_metrics), PyVAlias::default().into(), + None, ) .await?; additional_python_paths.append(&mut venv_path); diff --git a/backend/windmill-worker/src/python_executor.rs b/backend/windmill-worker/src/python_executor.rs index 3f64641813033..2020a20b6b732 100644 --- a/backend/windmill-worker/src/python_executor.rs +++ b/backend/windmill-worker/src/python_executor.rs @@ -7,6 +7,7 @@ use std::{ sync::Arc, }; +use std::os::unix::process::ExitStatusExt; use anyhow::anyhow; use itertools::Itertools; use regex::Regex; @@ -1299,6 +1300,7 @@ Returned from server: py_version - {:?}, py_version_v2 - {:?} worker_dir, occupancy_metrics, pyv.clone(), + None, ) .await?; additional_python_paths.append(&mut venv_path); @@ -1532,6 +1534,7 @@ pub async fn handle_python_reqs( worker_dir: &str, _occupancy_metrics: &mut Option<&mut OccupancyMetrics>, py_version: PyV, + reduced_concurrent_downloads: Option, ) -> error::Result> { let worker_dir = worker_dir.to_string(); @@ -1585,8 +1588,10 @@ pub async fn handle_python_reqs( } // Parallelism level (N) - let parallel_limit = // Semaphore will panic if value less then 1 - PY_CONCURRENT_DOWNLOADS.clamp(1, 30); + let parallel_limit = reduced_concurrent_downloads + .unwrap_or(*PY_CONCURRENT_DOWNLOADS) + .clamp(1, 30); + // Semaphore will panic if value less then 1 tracing::info!( workspace_id = %w_id, @@ -1616,7 +1621,7 @@ pub async fn handle_python_reqs( // Find out if there is already cached dependencies // If so, skip them let mut in_cache = vec![]; - for req in requirements { + for req in &requirements { // Ignore python version annotation backed into lockfile if req.starts_with('#') || req.starts_with('-') || req.trim().is_empty() { continue; @@ -1650,8 +1655,8 @@ pub async fn handle_python_reqs( .map(|_| kill_tx.subscribe()) .collect(); - // ________ Read comments at the end of the function to get more context - let (_done_tx, mut done_rx) = tokio::sync::mpsc::channel::<()>(1); + // _______ Read comments at the end of the function to get more context + let (done_tx, mut done_rx) = tokio::sync::mpsc::channel::<()>(1); let job_id_2 = job_id.clone(); let conn_2 = conn.clone(); @@ -1739,7 +1744,6 @@ pub async fn handle_python_reqs( }; if canceled { - tracing::info!( // If there is listener on other side, workspace_id = %w_id_2, @@ -1966,22 +1970,21 @@ pub async fn handle_python_reqs( _ = kill_rx.recv() => { Box::into_pin(uv_install_proccess.kill()).await?; pids.lock().await.get_mut(i).and_then(|e| e.take()); - return Err(anyhow::anyhow!("uv pip install was canceled")); + return Err(Error::from(anyhow::anyhow!("uv pip install was canceled"))); }, (_, _, exitstatus) = async { // See tokio::process::Child::wait_with_output() for more context - // Sometimes uv_install_proccess.wait() is not exiting if stderr is not awaited before it :/ + // Sometimes uv_install_proccess.wait() is not exiting if stderr is not awaited first (stderr_future.await, stdout_future.await, Box::into_pin(uv_install_proccess.wait()).await) } => match exitstatus { Ok(status) => if !status.success() { - let code = status.code(); + let code = status.signal(); tracing::warn!( workspace_id = %w_id, "uv install {} did not succeed, exit status: {:?}", &req, code ); - append_logs( &job_id, w_id, @@ -1994,7 +1997,7 @@ pub async fn handle_python_reqs( ) .await; pids.lock().await.get_mut(i).and_then(|e| e.take()); - return Err(anyhow!(stderr_buf)); + return Err(Error::ExitStatus(stderr_buf, code.unwrap_or(1))); }, Err(e) => { tracing::error!( @@ -2070,13 +2073,19 @@ pub async fn handle_python_reqs( })); } - let mut failed = false; + let (mut failed, mut oom_killed) = (false, false); for (handle, (_, venv_p)) in handles.into_iter().zip(req_with_penv.into_iter()) { if let Err(e) = handle .await - .unwrap_or(Err(anyhow!("Problem by joining handle"))) + .unwrap_or(Err(Error::from(anyhow!("Problem by joining handle")))) { failed = true; + + // OOM code is 9 or 137 + if matches!(e, Error::ExitStatus(_, 9)) { + oom_killed = true; + } + append_logs( &job_id, w_id, @@ -2112,7 +2121,49 @@ pub async fn handle_python_reqs( // it will be triggered // If there is no listener, it will be dropped safely return if failed { - Err(anyhow!("Env installation did not succeed, check logs").into()) + if oom_killed && parallel_limit > 1 { + // We want to drop it and stop monitor + // new invocation will create another one + drop(done_tx); + + let reduced_limit = parallel_limit / 2; + + append_logs( + &job_id, + w_id, + format!("\n + ====================== + ===== IMPORTANT! ===== + ====================== + +Some of installations have been killed by OOM, +restarting with reduced concurrency: {parallel_limit} -> {reduced_limit} + +This is not normal behavior, please make sure all workers have enough memory.\n +"), + conn, + ) + .await; + + // restart with half of concurrency + Box::pin(handle_python_reqs( + requirements, + job_id, + w_id, + mem_peak, + _canceled_by, + conn, + _worker_name, + job_dir, + &worker_dir, + _occupancy_metrics, + py_version, + Some(reduced_limit), + )) + .await + } else { + Err(anyhow!("Env installation did not succeed, check logs").into()) + } } else { Ok(req_paths) }; diff --git a/backend/windmill-worker/src/ruby_executor.rs b/backend/windmill-worker/src/ruby_executor.rs index 0039e6e8c26c1..5b12aa51898de 100644 --- a/backend/windmill-worker/src/ruby_executor.rs +++ b/backend/windmill-worker/src/ruby_executor.rs @@ -28,8 +28,7 @@ use crate::{ }, handle_child::{self}, universal_pkg_installer::{par_install_language_dependencies_seq, RequiredDependency}, - DISABLE_NSJAIL, DISABLE_NUSER, NSJAIL_PATH, PATH_ENV, PROXY_ENVS, - RUBY_CACHE_DIR, RUBY_REPOS, + DISABLE_NSJAIL, DISABLE_NUSER, NSJAIL_PATH, PATH_ENV, PROXY_ENVS, RUBY_CACHE_DIR, RUBY_REPOS, }; lazy_static::lazy_static! { static ref RUBY_CONCURRENT_DOWNLOADS: usize = std::env::var("RUBY_CONCURRENT_DOWNLOADS").ok().map(|flag| flag.parse().unwrap_or(20)).unwrap_or(20); diff --git a/backend/windmill-worker/src/worker_lockfiles.rs b/backend/windmill-worker/src/worker_lockfiles.rs index b7b11cf6af7c2..23768c07efce8 100644 --- a/backend/windmill-worker/src/worker_lockfiles.rs +++ b/backend/windmill-worker/src/worker_lockfiles.rs @@ -2447,6 +2447,7 @@ async fn python_dep( occupancy_metrics, // final_version, PyVAlias::default().into(), + None, ) .await; From eb387d10af6ec87a4cf76f16cf68646ffddcbbe9 Mon Sep 17 00:00:00 2001 From: pyranota Date: Tue, 6 Jan 2026 17:21:12 +0100 Subject: [PATCH 7/8] follow ups Signed-off-by: pyranota --- backend/windmill-worker/src/python_executor.rs | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/backend/windmill-worker/src/python_executor.rs b/backend/windmill-worker/src/python_executor.rs index 2020a20b6b732..1e5f829f18607 100644 --- a/backend/windmill-worker/src/python_executor.rs +++ b/backend/windmill-worker/src/python_executor.rs @@ -7,7 +7,9 @@ use std::{ sync::Arc, }; +#[cfg(unix)] use std::os::unix::process::ExitStatusExt; + use anyhow::anyhow; use itertools::Itertools; use regex::Regex; @@ -1875,7 +1877,7 @@ pub async fn handle_python_reqs( if let Some(os) = windmill_common::s3_helpers::get_object_store().await { tokio::select! { // Cancel was called on the job - _ = kill_rx.recv() => return Err(anyhow::anyhow!("S3 pull was canceled")), + _ = kill_rx.recv() => return Err(Error::from(anyhow::anyhow!("S3 pull was canceled"))), pull = pull_from_tar(os, venv_p.clone(), py_version.to_cache_dir_top_level(false), None, false) => { if let Err(e) = pull { tracing::info!( @@ -1936,7 +1938,7 @@ pub async fn handle_python_reqs( ) .await; pids.lock().await.get_mut(i).and_then(|e| e.take()); - return Err(e.into()); + return Err(Error::from(e)); } }; @@ -1978,7 +1980,11 @@ pub async fn handle_python_reqs( (stderr_future.await, stdout_future.await, Box::into_pin(uv_install_proccess.wait()).await) } => match exitstatus { Ok(status) => if !status.success() { + #[cfg(unix)] let code = status.signal(); + #[cfg(not(unix))] + let code = status.code(); + tracing::warn!( workspace_id = %w_id, "uv install {} did not succeed, exit status: {:?}", @@ -2005,7 +2011,7 @@ pub async fn handle_python_reqs( "Cannot wait for uv_install_proccess, ExitStatus is Err: {e:?}", ); pids.lock().await.get_mut(i).and_then(|e| e.take()); - return Err(e.into()); + return Err(Error::from(e)); } } }; @@ -2082,7 +2088,7 @@ pub async fn handle_python_reqs( failed = true; // OOM code is 9 or 137 - if matches!(e, Error::ExitStatus(_, 9)) { + if matches!(e, Error::ExitStatus(_, 9 | 137)) { oom_killed = true; } @@ -2121,7 +2127,7 @@ pub async fn handle_python_reqs( // it will be triggered // If there is no listener, it will be dropped safely return if failed { - if oom_killed && parallel_limit > 1 { + if cfg!(unix) && oom_killed && parallel_limit > 1 { // We want to drop it and stop monitor // new invocation will create another one drop(done_tx); From 0c00cc38683368a07f44226f036a49b89b2438e9 Mon Sep 17 00:00:00 2001 From: pyranota Date: Wed, 7 Jan 2026 13:39:52 +0100 Subject: [PATCH 8/8] fix oom_score_adj Signed-off-by: pyranota --- .../windmill-worker/src/python_executor.rs | 22 ++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) diff --git a/backend/windmill-worker/src/python_executor.rs b/backend/windmill-worker/src/python_executor.rs index 1e5f829f18607..a2919fcfce3bf 100644 --- a/backend/windmill-worker/src/python_executor.rs +++ b/backend/windmill-worker/src/python_executor.rs @@ -8,7 +8,7 @@ use std::{ }; #[cfg(unix)] -use std::os::unix::process::ExitStatusExt; +use std::os::unix::process::ExitStatusExt; use anyhow::anyhow; use itertools::Itertools; @@ -1960,6 +1960,20 @@ pub async fn handle_python_reqs( if let Some(pid) = pids.lock().await.get_mut(i) { *pid = uv_install_proccess.id(); + #[cfg(unix)] + if let Err(e) = uv_install_proccess + .id() + .ok_or(Error::InternalErr(format!( + "failed to get PID for python installation process: {}", + &req + ))) + .and_then(|pid| write_file(&format!("/proc/{pid}"), "oom_score_adj", "1000")) + { + tracing::error!( + req = %req, + "Failed to create oom_score_adj for python dependency installation process: {e}" + ); + } } else { tracing::error!( workspace_id = %w_id, @@ -2137,7 +2151,8 @@ pub async fn handle_python_reqs( append_logs( &job_id, w_id, - format!("\n + format!( + "\n ====================== ===== IMPORTANT! ===== ====================== @@ -2146,7 +2161,8 @@ Some of installations have been killed by OOM, restarting with reduced concurrency: {parallel_limit} -> {reduced_limit} This is not normal behavior, please make sure all workers have enough memory.\n -"), +" + ), conn, ) .await;