Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 14 additions & 9 deletions libs/imbue_common/imbue/imbue_common/ratchet_testing/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,36 +105,41 @@ def _get_all_files_with_extension(
folder_path: Path,
extension: FileExtension | None,
) -> tuple[Path, ...]:
"""Get all git-tracked files in a folder (cached).
"""Get all non-gitignored files that exist on disk in a folder (cached).

If extension is provided, only files matching that extension are returned.
If extension is None, all tracked files are returned.
If extension is None, all non-ignored files are returned.

Uses git ls-files with --cached and --others to include both tracked
and untracked files while respecting .gitignore rules. Filters the
result to only files that actually exist on disk, so deleted files
that are still in the git index are excluded.
"""
glob_pattern = f"*{extension}" if extension is not None else "*"
try:
result = subprocess.run(
["git", "ls-files", glob_pattern],
["git", "ls-files", "--cached", "--others", "--exclude-standard", glob_pattern],
cwd=folder_path,
capture_output=True,
text=True,
check=True,
)
except subprocess.CalledProcessError as e:
raise GitCommandError(f"Failed to list git-tracked files in {folder_path}") from e
raise GitCommandError(f"Failed to list files in {folder_path}") from e

file_paths = [folder_path / line.strip() for line in result.stdout.splitlines() if line.strip()]
return tuple(file_paths)
return tuple(f for f in file_paths if f.exists())


def _get_non_ignored_files_with_extension(
folder_path: Path,
extension: FileExtension | None,
excluded_path_patterns: tuple[str, ...] = (),
) -> tuple[Path, ...]:
"""Get git-tracked files in a folder, with optional path exclusions.
"""Get non-gitignored files on disk in a folder, with optional path exclusions.

If extension is provided, only files matching that extension are returned.
If extension is None, all tracked files are returned.
If extension is None, all non-ignored files are returned.

Each pattern in excluded_path_patterns is matched against file paths using Path.match(),
which matches from the right for relative patterns (e.g., "test_*.py" matches any file
Expand Down Expand Up @@ -259,10 +264,10 @@ def get_ratchet_failures(
pattern: RegexPattern,
excluded_path_patterns: tuple[str, ...] = (),
) -> tuple[RatchetMatchChunk, ...]:
"""Find all regex matches in git-tracked files and return them sorted by file path and line number.
"""Find all regex matches in non-gitignored files on disk and return them sorted by file path and line number.

If extension is provided, only files matching that extension are searched.
If extension is None, all tracked files are searched.
If extension is None, all non-ignored files are searched.

Blame dates are not computed here; they are resolved on demand via _resolve_blame_dates()
when a failure message needs to be formatted.
Expand Down
45 changes: 45 additions & 0 deletions libs/imbue_common/imbue/imbue_common/ratchet_testing/core_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
from imbue.imbue_common.ratchet_testing.core import LineNumber
from imbue.imbue_common.ratchet_testing.core import RatchetMatchChunk
from imbue.imbue_common.ratchet_testing.core import RegexPattern
from imbue.imbue_common.ratchet_testing.core import _get_all_files_with_extension
from imbue.imbue_common.ratchet_testing.core import _get_non_ignored_files_with_extension
from imbue.imbue_common.ratchet_testing.core import _read_file_contents
from imbue.imbue_common.ratchet_testing.core import format_ratchet_failure_message
Expand Down Expand Up @@ -156,6 +157,50 @@ def test_get_non_ignored_files_excludes_by_glob_pattern(git_repo: Path) -> None:
assert result[0].name == "main.py"


def test_get_all_files_excludes_deleted_but_tracked_files(git_repo: Path) -> None:
"""Deleted files still in the git index should not be returned."""
tracked_file = git_repo / "tracked.py"
tracked_file.write_text("print('hello')")

subprocess.run(["git", "add", "."], cwd=git_repo, check=True, capture_output=True)
subprocess.run(
["git", "commit", "-m", "Add tracked file"],
cwd=git_repo,
check=True,
capture_output=True,
)

# Delete the file from disk but don't stage the deletion
tracked_file.unlink()

result = _get_all_files_with_extension(git_repo, FileExtension(".py"))
result_names = [p.name for p in result]
assert "tracked.py" not in result_names


def test_get_all_files_includes_untracked_non_ignored_files(git_repo: Path) -> None:
"""Untracked files that are not gitignored should be returned."""
# Create a committed file
committed_file = git_repo / "committed.py"
committed_file.write_text("print('committed')")
subprocess.run(["git", "add", "."], cwd=git_repo, check=True, capture_output=True)
subprocess.run(
["git", "commit", "-m", "Initial commit"],
cwd=git_repo,
check=True,
capture_output=True,
)

# Create an untracked file (not added to git, not ignored)
untracked_file = git_repo / "untracked.py"
untracked_file.write_text("print('untracked')")

result = _get_all_files_with_extension(git_repo, FileExtension(".py"))
result_names = [p.name for p in result]
assert "committed.py" in result_names
assert "untracked.py" in result_names


def test_read_file_contents_caches_results(tmp_path: Path) -> None:
test_file = tmp_path / "test.txt"
test_file.write_text("original content")
Expand Down
35 changes: 20 additions & 15 deletions libs/mng/imbue/mng/cli/test_provision.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
import json
import time
from pathlib import Path

import pluggy
Expand All @@ -9,6 +8,7 @@
from imbue.mng.cli.create import create
from imbue.mng.cli.provision import provision
from imbue.mng.cli.stop import stop
from imbue.mng.utils.polling import wait_for
from imbue.mng.utils.testing import ModalSubprocessTestEnv
from imbue.mng.utils.testing import get_short_random_string
from imbue.mng.utils.testing import run_mng_subprocess
Expand All @@ -23,7 +23,7 @@ def test_provision_existing_agent(
plugin_manager: pluggy.PluginManager,
) -> None:
"""Test that provisioning an existing agent succeeds."""
agent_name = f"test-provision-{int(time.time())}"
agent_name = f"test-provision-{get_short_random_string()}"
create_test_agent(agent_name)

result = cli_runner.invoke(
Expand All @@ -44,7 +44,7 @@ def test_provision_with_user_command(
tmp_path: Path,
) -> None:
"""Test that provisioning with --user-command executes the command."""
agent_name = f"test-prov-cmd-{int(time.time())}"
agent_name = f"test-prov-cmd-{get_short_random_string()}"
marker_file = tmp_path / "provision_marker.txt"

create_test_agent(agent_name)
Expand Down Expand Up @@ -73,7 +73,7 @@ def test_provision_with_env_var(
plugin_manager: pluggy.PluginManager,
) -> None:
"""Test that provisioning with --env sets environment variables."""
agent_name = f"test-prov-env-{int(time.time())}"
agent_name = f"test-prov-env-{get_short_random_string()}"

create_test_agent(agent_name)

Expand Down Expand Up @@ -111,7 +111,7 @@ def test_provision_preserves_existing_env_vars(
plugin_manager: pluggy.PluginManager,
) -> None:
"""Test that provisioning preserves existing environment variables."""
agent_name = f"test-prov-env-preserve-{int(time.time())}"
agent_name = f"test-prov-env-preserve-{get_short_random_string()}"
session_name = f"{mng_test_prefix}{agent_name}"

with tmux_session_cleanup(session_name):
Expand Down Expand Up @@ -166,7 +166,7 @@ def test_provision_with_upload_file(
tmp_path: Path,
) -> None:
"""Test that provisioning with --upload-file transfers the file."""
agent_name = f"test-prov-upload-{int(time.time())}"
agent_name = f"test-prov-upload-{get_short_random_string()}"

# Create a local file to upload
local_file = tmp_path / "upload_source.txt"
Expand Down Expand Up @@ -212,7 +212,7 @@ def test_provision_with_agent_option(
plugin_manager: pluggy.PluginManager,
) -> None:
"""Test that --agent option works as an alternative to positional argument."""
agent_name = f"test-prov-opt-{int(time.time())}"
agent_name = f"test-prov-opt-{get_short_random_string()}"

create_test_agent(agent_name)

Expand Down Expand Up @@ -255,7 +255,7 @@ def test_provision_json_output(
plugin_manager: pluggy.PluginManager,
) -> None:
"""Test that --format json produces JSON output."""
agent_name = f"test-prov-json-{int(time.time())}"
agent_name = f"test-prov-json-{get_short_random_string()}"

create_test_agent(agent_name)

Expand Down Expand Up @@ -286,7 +286,7 @@ def test_provision_stopped_agent(
need the agent process running. Previously, provisioning a stopped agent
failed because the agent lookup required the agent to be running.
"""
agent_name = f"test-prov-stopped-{int(time.time())}"
agent_name = f"test-prov-stopped-{get_short_random_string()}"

create_test_agent(agent_name)

Expand Down Expand Up @@ -323,7 +323,7 @@ def test_provision_stopped_agent_with_user_command(
process is stopped, since provisioning operates on the host, not
the agent process.
"""
agent_name = f"test-prov-stopped-cmd-{int(time.time())}"
agent_name = f"test-prov-stopped-cmd-{get_short_random_string()}"
marker_file = tmp_path / "stopped_provision_marker.txt"

create_test_agent(agent_name)
Expand Down Expand Up @@ -365,7 +365,7 @@ def test_provision_running_agent_restarts_by_default(
The agent should be stopped before provisioning and restarted after,
and should be running after provisioning completes.
"""
agent_name = f"test-prov-restart-{int(time.time())}"
agent_name = f"test-prov-restart-{get_short_random_string()}"
session_name = create_test_agent(agent_name)

# Verify agent is running before provisioning
Expand All @@ -381,8 +381,13 @@ def test_provision_running_agent_restarts_by_default(

assert result.exit_code == 0, f"Provision failed with: {result.output}"

# Agent should still be running after provisioning (restarted)
assert tmux_session_exists(session_name), "Agent should be running after provision with restart"
# Agent should still be running after provisioning (restarted).
# Use wait_for to tolerate brief delays when the agent is restarted under heavy xdist load.
wait_for(
lambda: tmux_session_exists(session_name),
timeout=10.0,
error_message="Agent should be running after provision with restart",
)


@pytest.mark.tmux
Expand All @@ -392,7 +397,7 @@ def test_provision_running_agent_no_restart_keeps_running(
plugin_manager: pluggy.PluginManager,
) -> None:
"""Test that --no-restart does not stop/restart a running agent."""
agent_name = f"test-prov-norestart-{int(time.time())}"
agent_name = f"test-prov-norestart-{get_short_random_string()}"
session_name = create_test_agent(agent_name)

# Verify agent is running before provisioning
Expand Down Expand Up @@ -423,7 +428,7 @@ def test_provision_stopped_agent_stays_stopped_with_restart(
When is_restart=True, only agents that were running before provisioning should
be restarted. A stopped agent should remain stopped.
"""
agent_name = f"test-prov-stopped-norestart-{int(time.time())}"
agent_name = f"test-prov-stopped-norestart-{get_short_random_string()}"
session_name = create_test_agent(agent_name)

# Stop the agent
Expand Down
9 changes: 7 additions & 2 deletions libs/mng/imbue/mng/cli/test_rename.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,8 +116,13 @@ def test_rename_running_agent_renames_tmux_session(
assert rename_result.exit_code == 0, f"Rename failed: {rename_result.output}"
assert "Renamed agent:" in rename_result.output

# The old session should be gone, the new one should exist
assert tmux_session_exists(new_session_name), "New tmux session should exist"
# The old session should be gone, the new one should exist.
# Use wait_for to tolerate brief propagation delays under heavy xdist load.
wait_for(
lambda: tmux_session_exists(new_session_name),
timeout=10.0,
error_message=f"New tmux session {new_session_name} should exist after rename",
)
assert not tmux_session_exists(old_session_name), "Old tmux session should not exist"


Expand Down
9 changes: 4 additions & 5 deletions libs/mng/imbue/mng/cli/test_snapshot.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@

import json
import subprocess
import time
from pathlib import Path
from typing import Any

Expand All @@ -27,7 +26,7 @@ def test_snapshot_create_local_agent_rejects_unsupported_provider(
plugin_manager: pluggy.PluginManager,
) -> None:
"""Test that snapshot create fails for a local agent (unsupported provider)."""
agent_name = f"test-snap-create-{int(time.time())}"
agent_name = f"test-snap-create-{get_short_random_string()}"
create_test_agent(agent_name)

result = cli_runner.invoke(
Expand All @@ -48,7 +47,7 @@ def test_snapshot_create_dry_run_jsonl_resolves_local_agent(
plugin_manager: pluggy.PluginManager,
) -> None:
"""Test that --dry-run with --format jsonl outputs structured data on stdout."""
agent_name = f"test-snap-dryrun-jsonl-{int(time.time())}"
agent_name = f"test-snap-dryrun-jsonl-{get_short_random_string()}"
create_test_agent(agent_name)

result = cli_runner.invoke(
Expand Down Expand Up @@ -117,7 +116,7 @@ def test_snapshot_create_on_error_continue_reports_failure(
plugin_manager: pluggy.PluginManager,
) -> None:
"""Test that --on-error continue reports the error and exits 1 (doesn't crash)."""
agent_name = f"test-snap-onerror-cont-{int(time.time())}"
agent_name = f"test-snap-onerror-cont-{get_short_random_string()}"
create_test_agent(agent_name)

result = cli_runner.invoke(
Expand All @@ -138,7 +137,7 @@ def test_snapshot_create_on_error_abort_reports_failure(
plugin_manager: pluggy.PluginManager,
) -> None:
"""Test that --on-error abort also fails (with abort message)."""
agent_name = f"test-snap-onerror-abort-{int(time.time())}"
agent_name = f"test-snap-onerror-abort-{get_short_random_string()}"
create_test_agent(agent_name)

result = cli_runner.invoke(
Expand Down
4 changes: 2 additions & 2 deletions libs/mng_recursive/imbue/mng_recursive/provisioning.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ def _upload_deploy_files(
# do this in parallel, since there can sometimes be a bunch of things to transfer
# first, figure out all directories and do a single mkdir -p that captures all of them:
remote_paths: list[str] = []
for dest_path, source in deploy_files.items():
for dest_path in deploy_files:
resolved_path = _resolve_remote_path(dest_path, remote_home)
remote_paths.append(shlex.quote(str(resolved_path.parent)))
mkdir_result = host.execute_command(f"mkdir -p {' '.join(remote_paths)}")
Expand All @@ -71,7 +71,7 @@ def _upload_deploy_files(
count = 0
with ConcurrencyGroupExecutor(
parent_cg=mng_ctx.concurrency_group, name="upload_deploy_files", max_workers=32
) as executor:
) as _executor:
for dest_path, source in deploy_files.items():
resolved_path = _resolve_remote_path(dest_path, remote_home)

Expand Down
Loading
Loading