diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 35ca6b2..4b0ce52 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -66,14 +66,14 @@ jobs: cache-from: type=gha cache-to: type=gha,mode=max - - name: Build and push Copilot image + - name: Build and push Runner image uses: docker/build-push-action@v5 with: - context: ./containers/copilot + context: ./containers/runner push: true tags: | - ghcr.io/${{ github.repository }}/copilot:${{ steps.version_early.outputs.version_number }} - ghcr.io/${{ github.repository }}/copilot:latest + ghcr.io/${{ github.repository }}/runner:${{ steps.version_early.outputs.version_number }} + ghcr.io/${{ github.repository }}/runner:latest cache-from: type=gha cache-to: type=gha,mode=max @@ -153,9 +153,9 @@ jobs: Published to GitHub Container Registry: - `ghcr.io/${{ github.repository }}/squid:${{ steps.version_early.outputs.version_number }}` - - `ghcr.io/${{ github.repository }}/copilot:${{ steps.version_early.outputs.version_number }}` + - `ghcr.io/${{ github.repository }}/runner:${{ steps.version_early.outputs.version_number }}` - `ghcr.io/${{ github.repository }}/squid:latest` - - `ghcr.io/${{ github.repository }}/copilot:latest` + - `ghcr.io/${{ github.repository }}/runner:latest` EOF - name: Create GitHub Release diff --git a/AGENTS.md b/AGENTS.md index a6d696f..2f65549 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -123,13 +123,13 @@ The codebase follows a modular architecture with clear separation of concerns: 2. **Configuration Generation** (`src/squid-config.ts`, `src/docker-manager.ts`) - `generateSquidConfig()`: Creates Squid proxy configuration with domain ACL rules - - `generateDockerCompose()`: Creates Docker Compose YAML with two services (squid-proxy, copilot) + - `generateDockerCompose()`: Creates Docker Compose YAML with two services (squid-proxy, runner) - All configs are written to a temporary work directory (default: `/tmp/awf-`) 3. **Docker Management** (`src/docker-manager.ts`) - Manages container lifecycle using `execa` to run docker-compose commands - Fixed network topology: `172.30.0.0/24` subnet, Squid at `172.30.0.10`, Copilot at `172.30.0.20` - - Squid container uses healthcheck; Copilot waits for Squid to be healthy before starting + - Squid container uses healthcheck; Runner waits for Squid to be healthy before starting 4. **Type Definitions** (`src/types.ts`) - `WrapperConfig`: Main configuration interface @@ -150,13 +150,13 @@ The codebase follows a modular architecture with clear separation of concerns: - **Network:** Connected to `awf-net` at `172.30.0.10` - **Firewall Exemption:** Allowed unrestricted outbound access via iptables rule `-s 172.30.0.10 -j ACCEPT` -**Copilot Container** (`containers/copilot/`) +**Runner Container** (`containers/runner/`) - Based on `ubuntu:22.04` with iptables, curl, git, nodejs, npm, docker-cli - Mounts entire host filesystem at `/host` and user home directory for full access - Mounts Docker socket (`/var/run/docker.sock`) for docker-in-docker support - `NET_ADMIN` capability required for iptables manipulation - Two-stage entrypoint: - 1. `setup-iptables.sh`: Configures iptables NAT rules to redirect HTTP/HTTPS traffic to Squid (copilot container only) + 1. `setup-iptables.sh`: Configures iptables NAT rules to redirect HTTP/HTTPS traffic to Squid (runner container only) 2. `entrypoint.sh`: Tests connectivity, then executes user command - **Docker Wrapper** (`docker-wrapper.sh`): Intercepts `docker run` commands to inject network and proxy configuration - Symlinked at `/usr/bin/docker` (real docker at `/usr/bin/docker-real`) @@ -168,7 +168,7 @@ The codebase follows a modular architecture with clear separation of concerns: - Allow DNS queries - Allow traffic to Squid proxy itself - Redirect all HTTP (port 80) and HTTPS (port 443) to Squid via DNAT (NAT table) - - **Note:** These NAT rules only apply to the copilot container itself, not spawned containers + - **Note:** These NAT rules only apply to the runner container itself, not spawned containers ### Traffic Flow @@ -179,7 +179,7 @@ CLI generates configs (squid.conf, docker-compose.yml) ↓ Docker Compose starts Squid container (with healthcheck) ↓ -Docker Compose starts Copilot container (waits for Squid healthy) +Docker Compose starts Runner container (waits for Squid healthy) ↓ iptables rules applied in Copilot container ↓ @@ -200,8 +200,8 @@ Containers stopped, temporary files cleaned up ## Exit Code Handling -The wrapper propagates the exit code from the copilot container: -1. Command runs in copilot container +The wrapper propagates the exit code from the runner container: +1. Command runs in runner container 2. Container exits with command's exit code 3. Wrapper inspects container: `docker inspect --format={{.State.ExitCode}}` 4. Wrapper exits with same code @@ -236,7 +236,7 @@ The system uses a defense-in-depth cleanup strategy across four stages to preven ### Cleanup Script (`scripts/ci/cleanup.sh`) Removes all awf resources: -- Containers by name (`awf-squid`, `awf-copilot`) +- Containers by name (`awf-squid`, `awf-runner`) - All docker-compose services from work directories - Unused containers (`docker container prune -f`) - Unused networks (`docker network prune -f`) - **critical for subnet pool management** @@ -249,7 +249,7 @@ Removes all awf resources: All temporary files are created in `workDir` (default: `/tmp/awf-`): - `squid.conf`: Generated Squid proxy configuration - `docker-compose.yml`: Generated Docker Compose configuration -- `copilot-logs/`: Directory for Copilot CLI logs (automatically preserved if logs are created) +- `runner-logs/`: Directory for Copilot CLI logs (automatically preserved if logs are created) - `squid-logs/`: Directory for Squid proxy logs (automatically preserved if logs are created) Use `--keep-containers` to preserve containers and files after execution for debugging. @@ -268,25 +268,25 @@ Copilot CLI logs are automatically preserved for debugging: **Directory Structure:** - Container writes logs to: `~/.copilot/logs/` (Copilot's default location) -- Volume mount maps to: `${workDir}/copilot-logs/` -- After cleanup: Logs moved to `/tmp/copilot-logs-` (if they exist) +- Volume mount maps to: `${workDir}/runner-logs/` +- After cleanup: Logs moved to `/tmp/runner-logs-` (if they exist) **Automatic Preservation:** -- If Copilot creates logs, they're automatically moved to `/tmp/copilot-logs-/` before workDir cleanup +- If Copilot creates logs, they're automatically moved to `/tmp/runner-logs-/` before workDir cleanup - Empty log directories are not preserved (avoids cluttering /tmp) -- You'll see: `[INFO] Copilot logs preserved at: /tmp/copilot-logs-` when logs exist +- You'll see: `[INFO] Copilot logs preserved at: /tmp/runner-logs-` when logs exist **With `--keep-containers`:** -- Logs remain at: `${workDir}/copilot-logs/` +- Logs remain at: `${workDir}/runner-logs/` - All config files and containers are preserved -- You'll see: `[INFO] Copilot logs available at: /tmp/awf-/copilot-logs/` +- You'll see: `[INFO] Copilot logs available at: /tmp/awf-/runner-logs/` **Usage Examples:** ```bash # Logs automatically preserved (if created) awf --allow-domains github.com \ "npx @github/copilot@0.0.347 -p 'your prompt' --log-level debug --allow-all-tools" -# Output: [INFO] Copilot logs preserved at: /tmp/copilot-logs-1761073250147 +# Output: [INFO] Copilot logs preserved at: /tmp/runner-logs-1761073250147 # Increase log verbosity for debugging awf --allow-domains github.com \ @@ -380,7 +380,7 @@ To use a local, writable GitHub MCP server with Copilot CLI, you must: **Location:** The MCP configuration must be placed at: - `~/.copilot/mcp-config.json` (primary location) -The copilot container mounts the HOME directory, so this config file is automatically accessible to Copilot CLI running inside the container. +The runner container mounts the HOME directory, so this config file is automatically accessible to Copilot CLI running inside the container. **Format:** ```json @@ -435,16 +435,16 @@ sudo -E awf \ ``` **Critical requirements:** -- `sudo -E` - **REQUIRED** to pass environment variables through sudo to the copilot container +- `sudo -E` - **REQUIRED** to pass environment variables through sudo to the runner container - `--disable-builtin-mcps` - Disables the built-in read-only GitHub MCP server - `--allow-tool github` - Grants permission to use all tools from the `github` MCP server (must match server name in config) -- MCP config at `~/.copilot/mcp-config.json` - Automatically accessible since copilot container mounts HOME directory +- MCP config at `~/.copilot/mcp-config.json` - Automatically accessible since runner container mounts HOME directory **Why `sudo -E` is required:** 1. `awf` needs sudo for iptables manipulation 2. `-E` preserves GITHUB_TOKEN and GITHUB_PERSONAL_ACCESS_TOKEN -3. These variables are passed into the copilot container via the HOME directory mount -4. The GitHub MCP server Docker container inherits them from the copilot container's environment +3. These variables are passed into the runner container via the HOME directory mount +4. The GitHub MCP server Docker container inherits them from the runner container's environment ### Troubleshooting @@ -523,7 +523,7 @@ The firewall implements comprehensive logging at two levels: ### Key Files - `src/squid-config.ts` - Generates Squid config with custom `firewall_detailed` logformat -- `containers/copilot/setup-iptables.sh` - Configures iptables LOG rules for rejected traffic +- `containers/runner/setup-iptables.sh` - Configures iptables LOG rules for rejected traffic - `src/squid-config.test.ts` - Tests for logging configuration ### Squid Log Format diff --git a/CLAUDE.md b/CLAUDE.md index 4fe3795..855a685 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -181,7 +181,7 @@ The codebase follows a modular architecture with clear separation of concerns: - Mounts Docker socket (`/var/run/docker.sock`) for docker-in-docker support - `NET_ADMIN` capability required for iptables manipulation - Two-stage entrypoint: - 1. `setup-iptables.sh`: Configures iptables NAT rules to redirect HTTP/HTTPS traffic to Squid (copilot container only) + 1. `setup-iptables.sh`: Configures iptables NAT rules to redirect HTTP/HTTPS traffic to Squid (runner container only) 2. `entrypoint.sh`: Tests connectivity, then executes user command - **Docker Wrapper** (`docker-wrapper.sh`): Intercepts `docker run` commands to inject network and proxy configuration - Symlinked at `/usr/bin/docker` (real docker at `/usr/bin/docker-real`) @@ -193,7 +193,7 @@ The codebase follows a modular architecture with clear separation of concerns: - Allow DNS queries - Allow traffic to Squid proxy itself - Redirect all HTTP (port 80) and HTTPS (port 443) to Squid via DNAT (NAT table) - - **Note:** These NAT rules only apply to the copilot container itself, not spawned containers + - **Note:** These NAT rules only apply to the runner container itself, not spawned containers ### Traffic Flow @@ -225,8 +225,8 @@ Containers stopped, temporary files cleaned up ## Exit Code Handling -The wrapper propagates the exit code from the copilot container: -1. Command runs in copilot container +The wrapper propagates the exit code from the runner container: +1. Command runs in runner container 2. Container exits with command's exit code 3. Wrapper inspects container: `docker inspect --format={{.State.ExitCode}}` 4. Wrapper exits with same code @@ -261,7 +261,7 @@ The system uses a defense-in-depth cleanup strategy across four stages to preven ### Cleanup Script (`scripts/ci/cleanup.sh`) Removes all awf resources: -- Containers by name (`awf-squid`, `awf-copilot`) +- Containers by name (`awf-squid`, `awf-runner`) - All docker-compose services from work directories - Unused containers (`docker container prune -f`) - Unused networks (`docker network prune -f`) - **critical for subnet pool management** @@ -274,7 +274,7 @@ Removes all awf resources: All temporary files are created in `workDir` (default: `/tmp/awf-`): - `squid.conf`: Generated Squid proxy configuration - `docker-compose.yml`: Generated Docker Compose configuration -- `copilot-logs/`: Directory for Copilot CLI logs (automatically preserved if logs are created) +- `runner-logs/`: Directory for Copilot CLI logs (automatically preserved if logs are created) - `squid-logs/`: Directory for Squid proxy logs (automatically preserved if logs are created) Use `--keep-containers` to preserve containers and files after execution for debugging. @@ -293,25 +293,25 @@ Copilot CLI logs are automatically preserved for debugging: **Directory Structure:** - Container writes logs to: `~/.copilot/logs/` (Copilot's default location) -- Volume mount maps to: `${workDir}/copilot-logs/` -- After cleanup: Logs moved to `/tmp/copilot-logs-` (if they exist) +- Volume mount maps to: `${workDir}/runner-logs/` +- After cleanup: Logs moved to `/tmp/runner-logs-` (if they exist) **Automatic Preservation:** -- If Copilot creates logs, they're automatically moved to `/tmp/copilot-logs-/` before workDir cleanup +- If Copilot creates logs, they're automatically moved to `/tmp/runner-logs-/` before workDir cleanup - Empty log directories are not preserved (avoids cluttering /tmp) -- You'll see: `[INFO] Copilot logs preserved at: /tmp/copilot-logs-` when logs exist +- You'll see: `[INFO] Runner logs preserved at: /tmp/runner-logs-` when logs exist **With `--keep-containers`:** -- Logs remain at: `${workDir}/copilot-logs/` +- Logs remain at: `${workDir}/runner-logs/` - All config files and containers are preserved -- You'll see: `[INFO] Copilot logs available at: /tmp/awf-/copilot-logs/` +- You'll see: `[INFO] Runner logs available at: /tmp/awf-/runner-logs/` **Usage Examples:** ```bash # Logs automatically preserved (if created) awf --allow-domains github.com \ "npx @github/copilot@0.0.347 -p 'your prompt' --log-level debug --allow-all-tools" -# Output: [INFO] Copilot logs preserved at: /tmp/copilot-logs-1761073250147 +# Output: [INFO] Runner logs preserved at: /tmp/runner-logs-1761073250147 # Increase log verbosity for debugging awf --allow-domains github.com \ @@ -405,7 +405,7 @@ To use a local, writable GitHub MCP server with Copilot CLI, you must: **Location:** The MCP configuration must be placed at: - `~/.copilot/mcp-config.json` (primary location) -The copilot container mounts the HOME directory, so this config file is automatically accessible to Copilot CLI running inside the container. +The runner container mounts the HOME directory, so this config file is automatically accessible to Copilot CLI running inside the container. **Format:** ```json @@ -460,16 +460,16 @@ sudo -E awf \ ``` **Critical requirements:** -- `sudo -E` - **REQUIRED** to pass environment variables through sudo to the copilot container +- `sudo -E` - **REQUIRED** to pass environment variables through sudo to the runner container - `--disable-builtin-mcps` - Disables the built-in read-only GitHub MCP server - `--allow-tool github` - Grants permission to use all tools from the `github` MCP server (must match server name in config) -- MCP config at `~/.copilot/mcp-config.json` - Automatically accessible since copilot container mounts HOME directory +- MCP config at `~/.copilot/mcp-config.json` - Automatically accessible since runner container mounts HOME directory **Why `sudo -E` is required:** 1. `awf` needs sudo for iptables manipulation 2. `-E` preserves GITHUB_TOKEN and GITHUB_PERSONAL_ACCESS_TOKEN -3. These variables are passed into the copilot container via the HOME directory mount -4. The GitHub MCP server Docker container inherits them from the copilot container's environment +3. These variables are passed into the runner container via the HOME directory mount +4. The GitHub MCP server Docker container inherits them from the runner container's environment ### Troubleshooting diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index d5eb3f0..5ec8a43 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -129,7 +129,7 @@ logger.success('Operation completed successfully'); │ └── types.ts # TypeScript type definitions ├── containers/ # Docker container definitions │ ├── squid/ # Squid proxy container -│ └── copilot/ # Copilot CLI container +│ └── runner/ # Runner container ├── scripts/ # Utility scripts │ └── ci/ # CI/CD scripts ├── docs/ # Documentation diff --git a/containers/copilot/Dockerfile b/containers/runner/Dockerfile similarity index 100% rename from containers/copilot/Dockerfile rename to containers/runner/Dockerfile diff --git a/containers/copilot/docker-wrapper.sh b/containers/runner/docker-wrapper.sh similarity index 100% rename from containers/copilot/docker-wrapper.sh rename to containers/runner/docker-wrapper.sh diff --git a/containers/copilot/entrypoint.sh b/containers/runner/entrypoint.sh similarity index 97% rename from containers/copilot/entrypoint.sh rename to containers/runner/entrypoint.sh index 700026f..6e3815a 100644 --- a/containers/copilot/entrypoint.sh +++ b/containers/runner/entrypoint.sh @@ -1,7 +1,7 @@ #!/bin/bash set -e -echo "[entrypoint] Agentic Workflow Firewall - Copilot Container" +echo "[entrypoint] Agentic Workflow Firewall - Runner Container" echo "[entrypoint] ==================================" # Fix DNS configuration - ensure external DNS works alongside Docker's embedded DNS diff --git a/containers/copilot/setup-iptables.sh b/containers/runner/setup-iptables.sh similarity index 100% rename from containers/copilot/setup-iptables.sh rename to containers/runner/setup-iptables.sh diff --git a/docs/architecture.md b/docs/architecture.md index e7f0102..469ec77 100644 --- a/docs/architecture.md +++ b/docs/architecture.md @@ -27,7 +27,7 @@ The firewall uses a containerized architecture with Squid proxy for L7 (HTTP/HTT │ │ └────────────────────────────┘ │ │ │ │ ▲ │ │ │ │ ┌────────┼───────────────────┐ │ │ -│ │ │ Copilot Container │ │ │ +│ │ │ Runner Container │ │ │ │ │ │ - Full filesystem access │ │ │ │ │ │ - iptables redirect │ │ │ │ │ │ - Spawns MCP servers │ │ │ @@ -47,13 +47,13 @@ The firewall uses a containerized architecture with Squid proxy for L7 (HTTP/HTT ### 2. Configuration Generation - **`src/squid-config.ts`**: Generates Squid proxy configuration with domain ACL rules -- **`src/docker-manager.ts`**: Generates Docker Compose YAML with two services (squid-proxy, copilot) +- **`src/docker-manager.ts`**: Generates Docker Compose YAML with two services (squid-proxy, runner) - All configs are written to a temporary work directory (default: `/tmp/awf-`) ### 3. Docker Management (`src/docker-manager.ts`) - Manages container lifecycle using `execa` to run docker-compose commands - Fixed network topology: `172.30.0.0/24` subnet, Squid at `172.30.0.10`, Copilot at `172.30.0.20` -- Squid container uses healthcheck; Copilot waits for Squid to be healthy before starting +- Squid container uses healthcheck; Runner waits for Squid to be healthy before starting ### 4. Type Definitions (`src/types.ts`) - `WrapperConfig`: Main configuration interface @@ -74,13 +74,13 @@ The firewall uses a containerized architecture with Squid proxy for L7 (HTTP/HTT - **Network:** Connected to `awf-net` at `172.30.0.10` - **Firewall Exemption:** Allowed unrestricted outbound access via iptables rule `-s 172.30.0.10 -j ACCEPT` -### Copilot Container (`containers/copilot/`) +### Runner Container (`containers/runner/`) - Based on `ubuntu:22.04` with iptables, curl, git, nodejs, npm, docker-cli - Mounts entire host filesystem at `/host` and user home directory for full access - Mounts Docker socket (`/var/run/docker.sock`) for docker-in-docker support - `NET_ADMIN` capability required for iptables manipulation - Two-stage entrypoint: - 1. `setup-iptables.sh`: Configures iptables NAT rules to redirect HTTP/HTTPS traffic to Squid (copilot container only) + 1. `setup-iptables.sh`: Configures iptables NAT rules to redirect HTTP/HTTPS traffic to Squid (runner container only) 2. `entrypoint.sh`: Tests connectivity, then executes user command - **Docker Wrapper** (`docker-wrapper.sh`): Intercepts `docker run` commands to inject network and proxy configuration - Symlinked at `/usr/bin/docker` (real docker at `/usr/bin/docker-real`) @@ -92,7 +92,7 @@ The firewall uses a containerized architecture with Squid proxy for L7 (HTTP/HTT - Allow DNS queries - Allow traffic to Squid proxy itself - Redirect all HTTP (port 80) and HTTPS (port 443) to Squid via DNAT (NAT table) - - **Note:** These NAT rules only apply to the copilot container itself, not spawned containers + - **Note:** These NAT rules only apply to the runner container itself, not spawned containers ## Traffic Flow @@ -103,7 +103,7 @@ CLI generates configs (squid.conf, docker-compose.yml) ↓ Docker Compose starts Squid container (with healthcheck) ↓ -Docker Compose starts Copilot container (waits for Squid healthy) +Docker Compose starts Runner container (waits for Squid healthy) ↓ iptables rules applied in Copilot container ↓ @@ -124,8 +124,8 @@ The wrapper generates: ### 2. Container Startup 1. **Squid proxy starts first** with healthcheck -2. **Copilot container waits** for Squid to be healthy -3. **iptables rules applied** in copilot container to redirect all HTTP/HTTPS traffic +2. **Runner container waits** for Squid to be healthy +3. **iptables rules applied** in runner container to redirect all HTTP/HTTPS traffic ### 3. Traffic Routing - All HTTP (port 80) and HTTPS (port 443) traffic → Squid proxy @@ -146,7 +146,7 @@ The wrapper generates: ### 6. Cleanup - Containers stopped and removed - Logs moved to persistent locations: - - Copilot logs → `/tmp/copilot-logs-/` (if they exist) + - Runner logs → `/tmp/runner-logs-/` (if they exist) - Squid logs → `/tmp/squid-logs-/` (if they exist) - Temporary files deleted (unless `--keep-containers` specified) - Exit code propagated from copilot command @@ -181,7 +181,7 @@ The system uses a defense-in-depth cleanup strategy across four stages to preven ### Cleanup Script (`scripts/ci/cleanup.sh`) Removes all awf resources: -- Containers by name (`awf-squid`, `awf-copilot`) +- Containers by name (`awf-squid`, `awf-runner`) - All docker-compose services from work directories - Unused containers (`docker container prune -f`) - Unused networks (`docker network prune -f`) - **critical for subnet pool management** @@ -199,8 +199,8 @@ Removes all awf resources: ## Exit Code Handling -The wrapper propagates the exit code from the copilot container: -1. Command runs in copilot container +The wrapper propagates the exit code from the runner container: +1. Command runs in runner container 2. Container exits with command's exit code 3. Wrapper inspects container: `docker inspect --format={{.State.ExitCode}}` 4. Wrapper exits with same code @@ -210,7 +210,7 @@ The wrapper propagates the exit code from the copilot container: All temporary files are created in `workDir` (default: `/tmp/awf-`): - `squid.conf`: Generated Squid proxy configuration - `docker-compose.yml`: Generated Docker Compose configuration -- `copilot-logs/`: Directory for Copilot CLI logs (automatically preserved if logs are created) +- `runner-logs/`: Directory for Copilot CLI logs (automatically preserved if logs are created) - `squid-logs/`: Directory for Squid proxy logs (automatically preserved if logs are created) Use `--keep-containers` to preserve containers and files after execution for debugging. diff --git a/docs/github_actions.md b/docs/github_actions.md index 8f7b81b..0229efd 100644 --- a/docs/github_actions.md +++ b/docs/github_actions.md @@ -103,7 +103,7 @@ To use a local, writable GitHub MCP server with Copilot CLI, you must: **Location:** The MCP configuration must be placed at: - `~/.copilot/mcp-config.json` (primary location) -The copilot container mounts the HOME directory, so this config file is automatically accessible to Copilot CLI running inside the container. +The runner container mounts the HOME directory, so this config file is automatically accessible to Copilot CLI running inside the container. **Format:** ```json @@ -158,16 +158,16 @@ sudo -E awf \ ``` **Critical requirements:** -- `sudo -E` - **REQUIRED** to pass environment variables through sudo to the copilot container +- `sudo -E` - **REQUIRED** to pass environment variables through sudo to the runner container - `--disable-builtin-mcps` - Disables the built-in read-only GitHub MCP server - `--allow-tool github` - Grants permission to use all tools from the `github` MCP server (must match server name in config) -- MCP config at `~/.copilot/mcp-config.json` - Automatically accessible since copilot container mounts HOME directory +- MCP config at `~/.copilot/mcp-config.json` - Automatically accessible since runner container mounts HOME directory **Why `sudo -E` is required:** 1. `awf` needs sudo for iptables manipulation 2. `-E` preserves GITHUB_TOKEN and GITHUB_PERSONAL_ACCESS_TOKEN -3. These variables are passed into the copilot container via the HOME directory mount -4. The GitHub MCP server Docker container inherits them from the copilot container's environment +3. These variables are passed into the runner container via the HOME directory mount +4. The GitHub MCP server Docker container inherits them from the runner container's environment ### CI/CD Configuration diff --git a/docs/logging_quickref.md b/docs/logging_quickref.md index 993c298..842e825 100644 --- a/docs/logging_quickref.md +++ b/docs/logging_quickref.md @@ -23,7 +23,7 @@ docker exec awf-squid grep "TCP_TUNNEL\|TCP_MISS" /var/log/squid/access.log sudo dmesg | grep FW_BLOCKED # From copilot container -docker exec awf-copilot dmesg | grep FW_BLOCKED +docker exec awf-runner dmesg | grep FW_BLOCKED # Using journalctl (systemd) sudo journalctl -k | grep FW_BLOCKED diff --git a/docs/quickstart.md b/docs/quickstart.md index 68a87ad..d5531d5 100644 --- a/docs/quickstart.md +++ b/docs/quickstart.md @@ -120,11 +120,11 @@ awf \ # Then inspect logs docker logs awf-squid -docker logs awf-copilot +docker logs awf-runner # Clean up manually when done -docker stop awf-squid awf-copilot -docker rm awf-squid awf-copilot +docker stop awf-squid awf-runner +docker rm awf-squid awf-runner ``` ### Multiple Domains diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index 2304ecb..656daa9 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -124,9 +124,9 @@ ```bash copilot --disable-builtin-mcps --prompt "..." ``` -4. Review Copilot logs for MCP connection errors: +4. Review Runner logs for MCP connection errors: ```bash - cat /tmp/copilot-logs-/*.log + cat /tmp/runner-logs-/*.log ``` ## Log Analysis @@ -145,14 +145,14 @@ sudo grep "TCP_DENIED" /tmp/squid-logs-/access.log | awk '{print $3}' **While containers are running** (with `--keep-containers`): ```bash -docker logs awf-copilot +docker logs awf-runner docker logs awf-squid ``` **After command completes:** ```bash -# Copilot logs -cat /tmp/copilot-logs-/*.log +# Runner logs +cat /tmp/runner-logs-/*.log # Squid logs (requires sudo) sudo cat /tmp/squid-logs-/access.log @@ -167,7 +167,7 @@ Blocked UDP and non-standard protocols are logged to kernel logs: sudo dmesg | grep FW_BLOCKED # From within container -docker exec awf-copilot dmesg | grep FW_BLOCKED +docker exec awf-runner dmesg | grep FW_BLOCKED ``` ## Network Issues @@ -200,7 +200,7 @@ docker exec awf-copilot dmesg | grep FW_BLOCKED ``` 2. Verify iptables rules are applied: ```bash - docker exec awf-copilot iptables -t nat -L -n -v + docker exec awf-runner iptables -t nat -L -n -v ``` 3. Increase timeout in your command: ```bash @@ -224,7 +224,7 @@ docker exec awf-copilot dmesg | grep FW_BLOCKED ``` 3. Verify network connectivity: ```bash - docker exec awf-copilot ping -c 3 172.30.0.10 + docker exec awf-runner ping -c 3 172.30.0.10 ``` ## Docker-in-Docker Issues @@ -236,7 +236,7 @@ docker exec awf-copilot dmesg | grep FW_BLOCKED **Solution:** - Verify docker-wrapper.sh is working: ```bash - docker exec awf-copilot cat /tmp/docker-wrapper.log + docker exec awf-runner cat /tmp/docker-wrapper.log ``` - Check that spawned containers have correct network: ```bash @@ -276,7 +276,7 @@ docker exec awf-copilot dmesg | grep FW_BLOCKED **Solution:** 1. Manually clean up containers: ```bash - docker rm -f awf-copilot awf-squid + docker rm -f awf-runner awf-squid ``` 2. Clean up networks: ```bash @@ -294,7 +294,7 @@ docker exec awf-copilot dmesg | grep FW_BLOCKED **Solution:** 1. Manually remove old logs: ```bash - rm -rf /tmp/copilot-logs-* + rm -rf /tmp/runner-logs-* rm -rf /tmp/squid-logs-* rm -rf /tmp/awf-* ``` @@ -364,10 +364,10 @@ If you're still experiencing issues: ``` 3. **Review all logs:** - - Copilot logs: `/tmp/copilot-logs-/` + - Runner logs: `/tmp/runner-logs-/` - Squid logs: `/tmp/squid-logs-/` - - Docker wrapper logs: `docker exec awf-copilot cat /tmp/docker-wrapper.log` - - Container logs: `docker logs awf-copilot` + - Docker wrapper logs: `docker exec awf-runner cat /tmp/docker-wrapper.log` + - Container logs: `docker logs awf-runner` 4. **Check documentation:** - [Architecture](architecture.md) - Understand how the system works diff --git a/docs/usage.md b/docs/usage.md index 08512cf..fcc15be 100644 --- a/docs/usage.md +++ b/docs/usage.md @@ -260,14 +260,14 @@ sudo awf \ "npx @github/copilot@0.0.347 -p 'your prompt' --log-level debug --allow-all-tools" # Output: -# [INFO] Copilot logs preserved at: /tmp/copilot-logs- +# [INFO] Runner logs preserved at: /tmp/runner-logs- # [INFO] Squid logs preserved at: /tmp/squid-logs- ``` **Copilot Logs:** - Contains Copilot CLI debug output and session information -- Location: `/tmp/copilot-logs-/` -- View with: `cat /tmp/copilot-logs-/*.log` +- Location: `/tmp/runner-logs-/` +- View with: `cat /tmp/runner-logs-/*.log` **Squid Logs:** - Contains all HTTP/HTTPS traffic (allowed and denied) @@ -284,7 +284,7 @@ sudo cat /tmp/squid-logs-/access.log **How it works:** - Copilot writes to `~/.copilot/logs/`, Squid writes to `/var/log/squid/` -- Volume mounts map these to `${workDir}/copilot-logs/` and `${workDir}/squid-logs/` +- Volume mounts map these to `${workDir}/runner-logs/` and `${workDir}/squid-logs/` - Before cleanup, logs are automatically moved to `/tmp/*-logs-/` (if they exist) - Empty log directories are not preserved (avoids cluttering /tmp) @@ -301,7 +301,7 @@ docker logs awf-copilot docker logs awf-squid # Access preserved logs at: -# /tmp/awf-/copilot-logs/ +# /tmp/awf-/runner-logs/ # /tmp/awf-/squid-logs/ ``` diff --git a/scripts/ci/cleanup.sh b/scripts/ci/cleanup.sh index d6ad5de..26cf6e5 100755 --- a/scripts/ci/cleanup.sh +++ b/scripts/ci/cleanup.sh @@ -12,7 +12,7 @@ echo "===========================================" # First, explicitly remove containers by name (handles orphaned containers) echo "Removing awf containers by name..." -docker rm -f awf-squid awf-copilot 2>/dev/null || true +docker rm -f awf-squid awf-runner 2>/dev/null || true # Cleanup diagnostic test containers echo "Stopping docker compose services..." diff --git a/src/cli-workflow.test.ts b/src/cli-workflow.test.ts index 9097558..7b2d8a8 100644 --- a/src/cli-workflow.test.ts +++ b/src/cli-workflow.test.ts @@ -3,7 +3,7 @@ import { WrapperConfig } from './types'; const baseConfig: WrapperConfig = { allowedDomains: ['github.com'], - copilotCommand: 'echo "hello"', + runnerCommand: 'echo "hello"', logLevel: 'info', keepContainers: false, workDir: '/tmp/awf-test', @@ -35,8 +35,8 @@ describe('runMainWorkflow', () => { startContainers: jest.fn().mockImplementation(async () => { callOrder.push('startContainers'); }), - runCopilotCommand: jest.fn().mockImplementation(async () => { - callOrder.push('runCopilotCommand'); + runRunnerCommand: jest.fn().mockImplementation(async () => { + callOrder.push('runRunnerCommand'); return { exitCode: 0 }; }), }; @@ -55,7 +55,7 @@ describe('runMainWorkflow', () => { 'setupHostIptables', 'writeConfigs', 'startContainers', - 'runCopilotCommand', + 'runRunnerCommand', 'performCleanup', ]); expect(exitCode).toBe(0); @@ -79,8 +79,8 @@ describe('runMainWorkflow', () => { startContainers: jest.fn().mockImplementation(async () => { callOrder.push('startContainers'); }), - runCopilotCommand: jest.fn().mockImplementation(async () => { - callOrder.push('runCopilotCommand'); + runRunnerCommand: jest.fn().mockImplementation(async () => { + callOrder.push('runRunnerCommand'); return { exitCode: 42 }; }), }; @@ -100,7 +100,7 @@ describe('runMainWorkflow', () => { 'setupHostIptables', 'writeConfigs', 'startContainers', - 'runCopilotCommand', + 'runRunnerCommand', 'performCleanup', ]); expect(logger.warn).toHaveBeenCalledWith('Command completed with exit code: 42'); diff --git a/src/cli-workflow.ts b/src/cli-workflow.ts index 67ca528..2eb043a 100644 --- a/src/cli-workflow.ts +++ b/src/cli-workflow.ts @@ -5,7 +5,7 @@ export interface WorkflowDependencies { setupHostIptables: (squidIp: string, port: number) => Promise; writeConfigs: (config: WrapperConfig) => Promise; startContainers: (workDir: string, allowedDomains: string[]) => Promise; - runCopilotCommand: ( + runRunnerCommand: ( workDir: string, allowedDomains: string[] ) => Promise<{ exitCode: number }>; @@ -52,8 +52,8 @@ export async function runMainWorkflow( await dependencies.startContainers(config.workDir, config.allowedDomains); onContainersStarted?.(); - // Step 3: Wait for copilot to complete - const result = await dependencies.runCopilotCommand(config.workDir, config.allowedDomains); + // Step 3: Wait for runner to complete + const result = await dependencies.runRunnerCommand(config.workDir, config.allowedDomains); // Step 4: Cleanup (logs will be preserved automatically if they exist) await performCleanup(); diff --git a/src/cli.ts b/src/cli.ts index ca949a0..0eea227 100644 --- a/src/cli.ts +++ b/src/cli.ts @@ -8,7 +8,7 @@ import { logger } from './logger'; import { writeConfigs, startContainers, - runCopilotCommand, + runRunnerCommand, stopContainers, cleanup, } from './docker-manager'; @@ -116,8 +116,8 @@ program 'Pass all host environment variables to container (excludes system vars like PATH, DOCKER_HOST)', false ) - .argument('', 'Copilot command to execute (wrap in quotes)') - .action(async (copilotCommand: string, options) => { + .argument('', 'Command to execute (wrap in quotes)') + .action(async (runnerCommand: string, options) => { // Parse and validate options const logLevel = options.logLevel as LogLevel; if (!['debug', 'info', 'warn', 'error'].includes(logLevel)) { @@ -147,7 +147,7 @@ program const config: WrapperConfig = { allowedDomains, - copilotCommand, + runnerCommand, logLevel, keepContainers: options.keepContainers, workDir: options.workDir, @@ -167,7 +167,7 @@ program // Log config with redacted secrets const redactedConfig = { ...config, - copilotCommand: redactSecrets(config.copilotCommand), + runnerCommand: redactSecrets(config.runnerCommand), }; logger.debug('Configuration:', JSON.stringify(redactedConfig, null, 2)); logger.info(`Allowed domains: ${allowedDomains.join(', ')}`); @@ -196,7 +196,7 @@ program // across multiple runs. Cleanup script will handle removal if needed. } else { logger.info(`Configuration files preserved at: ${config.workDir}`); - logger.info(`Copilot logs available at: ${config.workDir}/copilot-logs/`); + logger.info(`Runner logs available at: ${config.workDir}/runner-logs/`); logger.info(`Squid logs available at: ${config.workDir}/squid-logs/`); logger.info(`Host iptables rules preserved (--keep-containers enabled)`); } @@ -221,7 +221,7 @@ program setupHostIptables, writeConfigs, startContainers, - runCopilotCommand, + runRunnerCommand, }, { logger, diff --git a/src/docker-manager.test.ts b/src/docker-manager.test.ts index c2ec0fe..93d3298 100644 --- a/src/docker-manager.test.ts +++ b/src/docker-manager.test.ts @@ -36,7 +36,7 @@ describe('docker-manager', () => { describe('generateDockerCompose', () => { const mockConfig: WrapperConfig = { allowedDomains: ['github.com', 'npmjs.org'], - copilotCommand: 'echo "test"', + runnerCommand: 'echo "test"', logLevel: 'info', keepContainers: false, workDir: '/tmp/awf-test', @@ -48,16 +48,16 @@ describe('docker-manager', () => { const mockNetworkConfig = { subnet: '172.30.0.0/24', squidIp: '172.30.0.10', - copilotIp: '172.30.0.20', + runnerIp: '172.30.0.20', }; it('should generate docker-compose config with GHCR images by default', () => { const result = generateDockerCompose(mockConfig, mockNetworkConfig); expect(result.services['squid-proxy'].image).toBe('ghcr.io/githubnext/gh-aw-firewall/squid:latest'); - expect(result.services.copilot.image).toBe('ghcr.io/githubnext/gh-aw-firewall/copilot:latest'); + expect(result.services.runner.image).toBe('ghcr.io/githubnext/gh-aw-firewall/runner:latest'); expect(result.services['squid-proxy'].build).toBeUndefined(); - expect(result.services.copilot.build).toBeUndefined(); + expect(result.services.runner.build).toBeUndefined(); }); it('should use local build when buildLocal is true', () => { @@ -65,9 +65,9 @@ describe('docker-manager', () => { const result = generateDockerCompose(localConfig, mockNetworkConfig); expect(result.services['squid-proxy'].build).toBeDefined(); - expect(result.services.copilot.build).toBeDefined(); + expect(result.services.runner.build).toBeDefined(); expect(result.services['squid-proxy'].image).toBeUndefined(); - expect(result.services.copilot.image).toBeUndefined(); + expect(result.services.runner.image).toBeUndefined(); }); it('should use custom registry and tag', () => { @@ -79,7 +79,7 @@ describe('docker-manager', () => { const result = generateDockerCompose(customConfig, mockNetworkConfig); expect(result.services['squid-proxy'].image).toBe('docker.io/myrepo/squid:v1.0.0'); - expect(result.services.copilot.image).toBe('docker.io/myrepo/copilot:v1.0.0'); + expect(result.services.runner.image).toBe('docker.io/myrepo/runner:v1.0.0'); }); it('should configure network with correct IPs', () => { @@ -90,7 +90,7 @@ describe('docker-manager', () => { const squidNetworks = result.services['squid-proxy'].networks as { [key: string]: { ipv4_address?: string } }; expect(squidNetworks['awf-net'].ipv4_address).toBe('172.30.0.10'); - const copilotNetworks = result.services.copilot.networks as { [key: string]: { ipv4_address?: string } }; + const copilotNetworks = result.services.runner.networks as { [key: string]: { ipv4_address?: string } }; expect(copilotNetworks['awf-net'].ipv4_address).toBe('172.30.0.20'); }); @@ -105,10 +105,10 @@ describe('docker-manager', () => { expect(squid.ports).toContain('3128:3128'); }); - it('should configure copilot container with proxy settings', () => { + it('should configure runner container with proxy settings', () => { const result = generateDockerCompose(mockConfig, mockNetworkConfig); - const copilot = result.services.copilot; - const env = copilot.environment as Record; + const runner = result.services.runner; + const env = runner.environment as Record; expect(env.HTTP_PROXY).toBe('http://172.30.0.10:3128'); expect(env.HTTPS_PROXY).toBe('http://172.30.0.10:3128'); @@ -116,49 +116,49 @@ describe('docker-manager', () => { expect(env.SQUID_PROXY_PORT).toBe('3128'); }); - it('should mount required volumes in copilot container', () => { + it('should mount required volumes in runner container', () => { const result = generateDockerCompose(mockConfig, mockNetworkConfig); - const copilot = result.services.copilot; - const volumes = copilot.volumes as string[]; + const runner = result.services.runner; + const volumes = runner.volumes as string[]; expect(volumes).toContain('/:/host:rw'); expect(volumes).toContain('/tmp:/tmp:rw'); expect(volumes).toContain('/var/run/docker.sock:/var/run/docker.sock:rw'); - expect(volumes.some((v: string) => v.includes('copilot-logs'))).toBe(true); + expect(volumes.some((v: string) => v.includes('runner-logs'))).toBe(true); }); - it('should set copilot to depend on healthy squid', () => { + it('should set runner to depend on healthy squid', () => { const result = generateDockerCompose(mockConfig, mockNetworkConfig); - const copilot = result.services.copilot; - const depends = copilot.depends_on as { [key: string]: { condition: string } }; + const runner = result.services.runner; + const depends = runner.depends_on as { [key: string]: { condition: string } }; expect(depends['squid-proxy'].condition).toBe('service_healthy'); }); - it('should add NET_ADMIN capability to copilot', () => { + it('should add NET_ADMIN capability to runner', () => { const result = generateDockerCompose(mockConfig, mockNetworkConfig); - const copilot = result.services.copilot; + const runner = result.services.runner; - expect(copilot.cap_add).toContain('NET_ADMIN'); + expect(runner.cap_add).toContain('NET_ADMIN'); }); it('should disable TTY to prevent ANSI escape sequences', () => { const result = generateDockerCompose(mockConfig, mockNetworkConfig); - const copilot = result.services.copilot; + const runner = result.services.runner; - expect(copilot.tty).toBe(false); + expect(runner.tty).toBe(false); }); it('should escape dollar signs in commands for docker-compose', () => { const configWithVars = { ...mockConfig, - copilotCommand: 'echo $HOME && echo ${USER}', + runnerCommand: 'echo $HOME && echo ${USER}', }; const result = generateDockerCompose(configWithVars, mockNetworkConfig); - const copilot = result.services.copilot; + const runner = result.services.runner; // Docker compose requires $$ to represent a literal $ - expect(copilot.command).toEqual(['/bin/bash', '-c', 'echo $$HOME && echo $${USER}']); + expect(runner.command).toEqual(['/bin/bash', '-c', 'echo $$HOME && echo $${USER}']); }); it('should pass through GITHUB_TOKEN when present in environment', () => { @@ -167,7 +167,7 @@ describe('docker-manager', () => { try { const result = generateDockerCompose(mockConfig, mockNetworkConfig); - const env = result.services.copilot.environment as Record; + const env = result.services.runner.environment as Record; expect(env.GITHUB_TOKEN).toBe('ghp_testtoken123'); } finally { if (originalEnv !== undefined) { @@ -184,7 +184,7 @@ describe('docker-manager', () => { try { const result = generateDockerCompose(mockConfig, mockNetworkConfig); - const env = result.services.copilot.environment as Record; + const env = result.services.runner.environment as Record; expect(env.GITHUB_TOKEN).toBeUndefined(); } finally { if (originalEnv !== undefined) { @@ -202,8 +202,8 @@ describe('docker-manager', () => { }, }; const result = generateDockerCompose(configWithEnv, mockNetworkConfig); - const copilot = result.services.copilot; - const env = copilot.environment as Record; + const runner = result.services.runner; + const env = runner.environment as Record; expect(env.CUSTOM_VAR).toBe('custom_value'); expect(env.ANOTHER_VAR).toBe('another_value'); @@ -217,8 +217,8 @@ describe('docker-manager', () => { try { const configWithEnvAll = { ...mockConfig, envAll: true }; const result = generateDockerCompose(configWithEnvAll, mockNetworkConfig); - const copilot = result.services.copilot; - const env = copilot.environment as Record; + const runner = result.services.runner; + const env = runner.environment as Record; // Should NOT pass through excluded vars expect(env.PATH).not.toBe(originalPath); @@ -233,10 +233,10 @@ describe('docker-manager', () => { it('should configure DNS to use Google DNS', () => { const result = generateDockerCompose(mockConfig, mockNetworkConfig); - const copilot = result.services.copilot; + const runner = result.services.runner; - expect(copilot.dns).toEqual(['8.8.8.8', '8.8.4.4']); - expect(copilot.dns_search).toEqual([]); + expect(runner.dns).toEqual(['8.8.8.8', '8.8.4.4']); + expect(runner.dns_search).toEqual([]); }); it('should override environment variables with additionalEnv', () => { @@ -251,7 +251,7 @@ describe('docker-manager', () => { }, }; const result = generateDockerCompose(configWithOverride, mockNetworkConfig); - const env = result.services.copilot.environment as Record; + const env = result.services.runner.environment as Record; // additionalEnv should win expect(env.GITHUB_TOKEN).toBe('overridden_token'); diff --git a/src/docker-manager.ts b/src/docker-manager.ts index 9a7865d..a9f25ef 100644 --- a/src/docker-manager.ts +++ b/src/docker-manager.ts @@ -77,7 +77,7 @@ export function subnetsOverlap(subnet1: string, subnet2: string): boolean { * Generates a random subnet in Docker's private IP range that doesn't conflict with existing networks * Uses 172.16-31.x.0/24 range (Docker's default bridge network range) */ -async function generateRandomSubnet(): Promise<{ subnet: string; squidIp: string; copilotIp: string }> { +async function generateRandomSubnet(): Promise<{ subnet: string; squidIp: string; runnerIp: string }> { const existingSubnets = await getExistingDockerSubnets(); const MAX_RETRIES = 50; @@ -94,8 +94,8 @@ async function generateRandomSubnet(): Promise<{ subnet: string; squidIp: string if (!hasConflict) { const squidIp = `172.${secondOctet}.${thirdOctet}.10`; - const copilotIp = `172.${secondOctet}.${thirdOctet}.20`; - return { subnet, squidIp, copilotIp }; + const runnerIp = `172.${secondOctet}.${thirdOctet}.20`; + return { subnet, squidIp, runnerIp }; } logger.debug(`Subnet ${subnet} conflicts with existing network, retrying... (attempt ${attempt + 1}/${MAX_RETRIES})`); @@ -113,7 +113,7 @@ async function generateRandomSubnet(): Promise<{ subnet: string; squidIp: string */ export function generateDockerCompose( config: WrapperConfig, - networkConfig: { subnet: string; squidIp: string; copilotIp: string } + networkConfig: { subnet: string; squidIp: string; runnerIp: string } ): DockerComposeConfig { const projectRoot = path.join(__dirname, '..'); @@ -154,7 +154,7 @@ export function generateDockerCompose( }; } - // Build environment variables for copilot container + // Build environment variables for runner container // System variables that must be overridden or excluded (would break container operation) const EXCLUDED_ENV_VARS = new Set([ 'PATH', // Must use container's PATH @@ -205,18 +205,18 @@ export function generateDockerCompose( Object.assign(environment, config.additionalEnv); } - // Copilot service configuration - const copilotService: any = { - container_name: 'awf-copilot', + // Runner service configuration + const runnerService: any = { + container_name: 'awf-runner', networks: { 'awf-net': { - ipv4_address: networkConfig.copilotIp, + ipv4_address: networkConfig.runnerIp, }, }, dns: ['8.8.8.8', '8.8.4.4'], // Use Google DNS instead of Docker's embedded DNS dns_search: [], // Disable DNS search domains to prevent embedded DNS fallback volumes: [ - // Mount host filesystem for copilot access + // Mount host filesystem for runner access '/:/host:rw', '/tmp:/tmp:rw', `${process.env.HOME}:${process.env.HOME}:rw`, @@ -227,8 +227,8 @@ export function generateDockerCompose( // Override host's .docker directory with clean config to prevent Docker CLI // from reading host's context (e.g., desktop-linux pointing to wrong socket) `${config.workDir}/.docker:${process.env.HOME}/.docker:rw`, - // Mount copilot logs directory to workDir for persistence - `${config.workDir}/copilot-logs:${process.env.HOME}/.copilot/logs:rw`, + // Mount runner logs directory to workDir for persistence + `${config.workDir}/runner-logs:${process.env.HOME}/.copilot/logs:rw`, ], environment, depends_on: { @@ -240,15 +240,15 @@ export function generateDockerCompose( stdin_open: true, tty: false, // Disable TTY to prevent ANSI escape sequences in logs // Escape $ with $$ for Docker Compose variable interpolation - command: ['/bin/bash', '-c', config.copilotCommand.replace(/\$/g, '$$$$')], + command: ['/bin/bash', '-c', config.runnerCommand.replace(/\$/g, '$$$$')], }; // Use GHCR image or build locally if (useGHCR) { - copilotService.image = `${registry}/copilot:${tag}`; + runnerService.image = `${registry}/runner:${tag}`; } else { - copilotService.build = { - context: path.join(projectRoot, 'containers/copilot'), + runnerService.build = { + context: path.join(projectRoot, 'containers/runner'), dockerfile: 'Dockerfile', }; } @@ -256,7 +256,7 @@ export function generateDockerCompose( return { services: { 'squid-proxy': squidService, - 'copilot': copilotService, + 'runner': runnerService, }, networks: { 'awf-net': { @@ -295,12 +295,12 @@ export async function writeConfigs(config: WrapperConfig): Promise { ); logger.debug(`Docker config written to: ${dockerConfigDir}/config.json`); - // Create copilot logs directory for persistence - const copilotLogsDir = path.join(config.workDir, 'copilot-logs'); - if (!fs.existsSync(copilotLogsDir)) { - fs.mkdirSync(copilotLogsDir, { recursive: true }); + // Create runner logs directory for persistence + const runnerLogsDir = path.join(config.workDir, 'runner-logs'); + if (!fs.existsSync(runnerLogsDir)) { + fs.mkdirSync(runnerLogsDir, { recursive: true }); } - logger.debug(`Copilot logs directory created at: ${copilotLogsDir}`); + logger.debug(`Runner logs directory created at: ${runnerLogsDir}`); // Create squid logs directory for persistence // Note: Squid runs as user 'proxy' (UID 13, GID 13 in ubuntu/squid image) @@ -315,9 +315,9 @@ export async function writeConfigs(config: WrapperConfig): Promise { const networkConfig = { subnet: '172.30.0.0/24', squidIp: '172.30.0.10', - copilotIp: '172.30.0.20', + runnerIp: '172.30.0.20', }; - logger.debug(`Using network config: ${networkConfig.subnet} (squid: ${networkConfig.squidIp}, copilot: ${networkConfig.copilotIp})`); + logger.debug(`Using network config: ${networkConfig.subnet} (squid: ${networkConfig.squidIp}, runner: ${networkConfig.runnerIp})`); // Write Squid config const squidConfig = generateSquidConfig({ @@ -408,7 +408,7 @@ export async function startContainers(workDir: string, allowedDomains: string[]) // This handles orphaned containers from failed/interrupted previous runs logger.debug('Removing any existing containers with conflicting names...'); try { - await execa('docker', ['rm', '-f', 'awf-squid', 'awf-copilot'], { + await execa('docker', ['rm', '-f', 'awf-squid', 'awf-runner'], { reject: false, }); } catch (error) { @@ -481,15 +481,15 @@ export async function startContainers(workDir: string, allowedDomains: string[]) } /** - * Runs the copilot command in the container and reports any blocked domains + * Runs the runner command in the container and reports any blocked domains */ -export async function runCopilotCommand(workDir: string, allowedDomains: string[]): Promise<{ exitCode: number; blockedDomains: string[] }> { - logger.info('Executing copilot command...'); +export async function runRunnerCommand(workDir: string, allowedDomains: string[]): Promise<{ exitCode: number; blockedDomains: string[] }> { + logger.info('Executing runner command...'); try { // Stream logs in real-time using docker logs -f (follow mode) // Run this in the background and wait for the container to exit separately - const logsProcess = execa('docker', ['logs', '-f', 'awf-copilot'], { + const logsProcess = execa('docker', ['logs', '-f', 'awf-runner'], { stdio: 'inherit', reject: false, }); @@ -497,7 +497,7 @@ export async function runCopilotCommand(workDir: string, allowedDomains: string[ // Wait for the container to exit (this will run concurrently with log streaming) const { stdout: exitCodeStr } = await execa('docker', [ 'wait', - 'awf-copilot', + 'awf-runner', ]); const exitCode = parseInt(exitCodeStr.trim(), 10); @@ -505,7 +505,7 @@ export async function runCopilotCommand(workDir: string, allowedDomains: string[ // Wait for the logs process to finish (it should exit automatically when container stops) await logsProcess; - logger.debug(`Copilot exit code: ${exitCode}`); + logger.debug(`Runner exit code: ${exitCode}`); // Small delay to ensure Squid logs are flushed to disk await new Promise(resolve => setTimeout(resolve, 500)); @@ -554,7 +554,7 @@ export async function runCopilotCommand(workDir: string, allowedDomains: string[ return { exitCode, blockedDomains: blockedTargets.map(b => b.domain) }; } catch (error) { - logger.error('Failed to run copilot command:', error); + logger.error('Failed to run runner command:', error); throw error; } } @@ -584,7 +584,7 @@ export async function stopContainers(workDir: string, keepContainers: boolean): /** * Cleans up temporary files - * Preserves copilot logs by moving them to a persistent location before cleanup + * Preserves runner logs by moving them to a persistent location before cleanup */ export async function cleanup(workDir: string, keepFiles: boolean): Promise { if (keepFiles) { @@ -597,15 +597,15 @@ export async function cleanup(workDir: string, keepFiles: boolean): Promise 0) { - const preservedLogsDir = path.join(os.tmpdir(), `copilot-logs-${timestamp}`); + // Preserve runner logs before cleanup by moving them to /tmp + const runnerLogsDir = path.join(workDir, 'runner-logs'); + if (fs.existsSync(runnerLogsDir) && fs.readdirSync(runnerLogsDir).length > 0) { + const preservedLogsDir = path.join(os.tmpdir(), `runner-logs-${timestamp}`); try { - fs.renameSync(copilotLogsDir, preservedLogsDir); - logger.info(`Copilot logs preserved at: ${preservedLogsDir}`); + fs.renameSync(runnerLogsDir, preservedLogsDir); + logger.info(`Runner logs preserved at: ${preservedLogsDir}`); } catch (error) { - logger.debug('Could not preserve copilot logs:', error); + logger.debug('Could not preserve runner logs:', error); } } diff --git a/src/host-iptables.test.ts b/src/host-iptables.test.ts index 46afd8c..b330ca1 100644 --- a/src/host-iptables.test.ts +++ b/src/host-iptables.test.ts @@ -35,7 +35,7 @@ describe('host-iptables', () => { expect(result).toEqual({ subnet: '172.30.0.0/24', squidIp: '172.30.0.10', - copilotIp: '172.30.0.20', + runnerIp: '172.30.0.20', }); // Should only check if network exists, not create it @@ -59,7 +59,7 @@ describe('host-iptables', () => { expect(result).toEqual({ subnet: '172.30.0.0/24', squidIp: '172.30.0.10', - copilotIp: '172.30.0.20', + runnerIp: '172.30.0.20', }); expect(mockedExeca).toHaveBeenCalledWith('docker', ['network', 'inspect', 'awf-net']); diff --git a/src/host-iptables.ts b/src/host-iptables.ts index b811fd1..17050b9 100644 --- a/src/host-iptables.ts +++ b/src/host-iptables.ts @@ -26,12 +26,12 @@ async function getNetworkBridgeName(): Promise { /** * Creates the dedicated firewall network if it doesn't exist - * Returns the Squid and Copilot IPs + * Returns the Squid and Runner IPs */ export async function ensureFirewallNetwork(): Promise<{ subnet: string; squidIp: string; - copilotIp: string; + runnerIp: string; }> { logger.debug(`Ensuring firewall network '${NETWORK_NAME}' exists...`); @@ -63,7 +63,7 @@ export async function ensureFirewallNetwork(): Promise<{ return { subnet: NETWORK_SUBNET, squidIp: '172.30.0.10', - copilotIp: '172.30.0.20', + runnerIp: '172.30.0.20', }; } diff --git a/src/types.ts b/src/types.ts index 87de196..e3b2776 100644 --- a/src/types.ts +++ b/src/types.ts @@ -4,7 +4,7 @@ export interface WrapperConfig { allowedDomains: string[]; - copilotCommand: string; + runnerCommand: string; logLevel: LogLevel; keepContainers: boolean; workDir: string; diff --git a/tests/fixtures/cleanup.ts b/tests/fixtures/cleanup.ts index cb54806..bb50999 100644 --- a/tests/fixtures/cleanup.ts +++ b/tests/fixtures/cleanup.ts @@ -25,7 +25,7 @@ export class Cleanup { async removeContainers(): Promise { this.log('Removing awf containers by name...'); try { - await execa('docker', ['rm', '-f', 'awf-squid', 'awf-copilot']); + await execa('docker', ['rm', '-f', 'awf-squid', 'awf-runner']); } catch (error) { // Ignore errors (containers may not exist) } diff --git a/tests/integration/basic-firewall.test.ts b/tests/integration/basic-firewall.test.ts index d80d59e..c2fbb72 100644 --- a/tests/integration/basic-firewall.test.ts +++ b/tests/integration/basic-firewall.test.ts @@ -153,13 +153,13 @@ describe('Basic Firewall Functionality', () => { const squidRunning = await docker.isRunning('awf-squid'); expect(squidRunning).toBe(true); - // Verify copilot container still exists (may have exited) - const copilotInfo = await docker.inspect('awf-copilot'); - expect(copilotInfo).not.toBeNull(); + // Verify runner container still exists (may have exited) + const runnerInfo = await docker.inspect('awf-runner'); + expect(runnerInfo).not.toBeNull(); // Clean up manually await docker.stop('awf-squid'); await docker.rm('awf-squid', true); - await docker.rm('awf-copilot', true); + await docker.rm('awf-runner', true); }, 120000); });