From 4a5f9df449eb50a57e76769c27e51ac00992e505 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Thu, 30 Oct 2025 19:14:32 +0000 Subject: [PATCH 1/5] Initial plan From b641a2f3d5505f2f5562d40afbb2e83a84b7beb6 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Thu, 30 Oct 2025 19:34:27 +0000 Subject: [PATCH 2/5] Rename copilot to agent in source files and unit tests Co-authored-by: Mossaka <5447827+Mossaka@users.noreply.github.com> --- containers/{copilot => agent}/Dockerfile | 0 .../{copilot => agent}/docker-wrapper.sh | 0 containers/{copilot => agent}/entrypoint.sh | 0 .../{copilot => agent}/setup-iptables.sh | 0 src/cli-workflow.test.ts | 14 ++-- src/cli-workflow.ts | 6 +- src/cli.ts | 14 ++-- src/docker-manager.test.ts | 74 ++++++++--------- src/docker-manager.ts | 80 +++++++++---------- src/host-iptables.test.ts | 4 +- src/host-iptables.ts | 6 +- src/types.ts | 2 +- 12 files changed, 100 insertions(+), 100 deletions(-) rename containers/{copilot => agent}/Dockerfile (100%) rename containers/{copilot => agent}/docker-wrapper.sh (100%) rename containers/{copilot => agent}/entrypoint.sh (100%) rename containers/{copilot => agent}/setup-iptables.sh (100%) diff --git a/containers/copilot/Dockerfile b/containers/agent/Dockerfile similarity index 100% rename from containers/copilot/Dockerfile rename to containers/agent/Dockerfile diff --git a/containers/copilot/docker-wrapper.sh b/containers/agent/docker-wrapper.sh similarity index 100% rename from containers/copilot/docker-wrapper.sh rename to containers/agent/docker-wrapper.sh diff --git a/containers/copilot/entrypoint.sh b/containers/agent/entrypoint.sh similarity index 100% rename from containers/copilot/entrypoint.sh rename to containers/agent/entrypoint.sh diff --git a/containers/copilot/setup-iptables.sh b/containers/agent/setup-iptables.sh similarity index 100% rename from containers/copilot/setup-iptables.sh rename to containers/agent/setup-iptables.sh diff --git a/src/cli-workflow.test.ts b/src/cli-workflow.test.ts index 9097558..02490c6 100644 --- a/src/cli-workflow.test.ts +++ b/src/cli-workflow.test.ts @@ -3,7 +3,7 @@ import { WrapperConfig } from './types'; const baseConfig: WrapperConfig = { allowedDomains: ['github.com'], - copilotCommand: 'echo "hello"', + agentCommand: 'echo "hello"', logLevel: 'info', keepContainers: false, workDir: '/tmp/awf-test', @@ -35,8 +35,8 @@ describe('runMainWorkflow', () => { startContainers: jest.fn().mockImplementation(async () => { callOrder.push('startContainers'); }), - runCopilotCommand: jest.fn().mockImplementation(async () => { - callOrder.push('runCopilotCommand'); + runAgentCommand: jest.fn().mockImplementation(async () => { + callOrder.push('runAgentCommand'); return { exitCode: 0 }; }), }; @@ -55,7 +55,7 @@ describe('runMainWorkflow', () => { 'setupHostIptables', 'writeConfigs', 'startContainers', - 'runCopilotCommand', + 'runAgentCommand', 'performCleanup', ]); expect(exitCode).toBe(0); @@ -79,8 +79,8 @@ describe('runMainWorkflow', () => { startContainers: jest.fn().mockImplementation(async () => { callOrder.push('startContainers'); }), - runCopilotCommand: jest.fn().mockImplementation(async () => { - callOrder.push('runCopilotCommand'); + runAgentCommand: jest.fn().mockImplementation(async () => { + callOrder.push('runAgentCommand'); return { exitCode: 42 }; }), }; @@ -100,7 +100,7 @@ describe('runMainWorkflow', () => { 'setupHostIptables', 'writeConfigs', 'startContainers', - 'runCopilotCommand', + 'runAgentCommand', 'performCleanup', ]); expect(logger.warn).toHaveBeenCalledWith('Command completed with exit code: 42'); diff --git a/src/cli-workflow.ts b/src/cli-workflow.ts index 67ca528..6c21304 100644 --- a/src/cli-workflow.ts +++ b/src/cli-workflow.ts @@ -5,7 +5,7 @@ export interface WorkflowDependencies { setupHostIptables: (squidIp: string, port: number) => Promise; writeConfigs: (config: WrapperConfig) => Promise; startContainers: (workDir: string, allowedDomains: string[]) => Promise; - runCopilotCommand: ( + runAgentCommand: ( workDir: string, allowedDomains: string[] ) => Promise<{ exitCode: number }>; @@ -52,8 +52,8 @@ export async function runMainWorkflow( await dependencies.startContainers(config.workDir, config.allowedDomains); onContainersStarted?.(); - // Step 3: Wait for copilot to complete - const result = await dependencies.runCopilotCommand(config.workDir, config.allowedDomains); + // Step 3: Wait for agent to complete + const result = await dependencies.runAgentCommand(config.workDir, config.allowedDomains); // Step 4: Cleanup (logs will be preserved automatically if they exist) await performCleanup(); diff --git a/src/cli.ts b/src/cli.ts index ca949a0..b513ae4 100644 --- a/src/cli.ts +++ b/src/cli.ts @@ -8,7 +8,7 @@ import { logger } from './logger'; import { writeConfigs, startContainers, - runCopilotCommand, + runAgentCommand, stopContainers, cleanup, } from './docker-manager'; @@ -116,8 +116,8 @@ program 'Pass all host environment variables to container (excludes system vars like PATH, DOCKER_HOST)', false ) - .argument('', 'Copilot command to execute (wrap in quotes)') - .action(async (copilotCommand: string, options) => { + .argument('', 'Agent command to execute (wrap in quotes)') + .action(async (agentCommand: string, options) => { // Parse and validate options const logLevel = options.logLevel as LogLevel; if (!['debug', 'info', 'warn', 'error'].includes(logLevel)) { @@ -147,7 +147,7 @@ program const config: WrapperConfig = { allowedDomains, - copilotCommand, + agentCommand, logLevel, keepContainers: options.keepContainers, workDir: options.workDir, @@ -167,7 +167,7 @@ program // Log config with redacted secrets const redactedConfig = { ...config, - copilotCommand: redactSecrets(config.copilotCommand), + agentCommand: redactSecrets(config.agentCommand), }; logger.debug('Configuration:', JSON.stringify(redactedConfig, null, 2)); logger.info(`Allowed domains: ${allowedDomains.join(', ')}`); @@ -196,7 +196,7 @@ program // across multiple runs. Cleanup script will handle removal if needed. } else { logger.info(`Configuration files preserved at: ${config.workDir}`); - logger.info(`Copilot logs available at: ${config.workDir}/copilot-logs/`); + logger.info(`Agent logs available at: ${config.workDir}/agent-logs/`); logger.info(`Squid logs available at: ${config.workDir}/squid-logs/`); logger.info(`Host iptables rules preserved (--keep-containers enabled)`); } @@ -221,7 +221,7 @@ program setupHostIptables, writeConfigs, startContainers, - runCopilotCommand, + runAgentCommand, }, { logger, diff --git a/src/docker-manager.test.ts b/src/docker-manager.test.ts index c2ec0fe..b580801 100644 --- a/src/docker-manager.test.ts +++ b/src/docker-manager.test.ts @@ -36,7 +36,7 @@ describe('docker-manager', () => { describe('generateDockerCompose', () => { const mockConfig: WrapperConfig = { allowedDomains: ['github.com', 'npmjs.org'], - copilotCommand: 'echo "test"', + agentCommand: 'echo "test"', logLevel: 'info', keepContainers: false, workDir: '/tmp/awf-test', @@ -48,16 +48,16 @@ describe('docker-manager', () => { const mockNetworkConfig = { subnet: '172.30.0.0/24', squidIp: '172.30.0.10', - copilotIp: '172.30.0.20', + agentIp: '172.30.0.20', }; it('should generate docker-compose config with GHCR images by default', () => { const result = generateDockerCompose(mockConfig, mockNetworkConfig); expect(result.services['squid-proxy'].image).toBe('ghcr.io/githubnext/gh-aw-firewall/squid:latest'); - expect(result.services.copilot.image).toBe('ghcr.io/githubnext/gh-aw-firewall/copilot:latest'); + expect(result.services.agent.image).toBe('ghcr.io/githubnext/gh-aw-firewall/agent:latest'); expect(result.services['squid-proxy'].build).toBeUndefined(); - expect(result.services.copilot.build).toBeUndefined(); + expect(result.services.agent.build).toBeUndefined(); }); it('should use local build when buildLocal is true', () => { @@ -65,9 +65,9 @@ describe('docker-manager', () => { const result = generateDockerCompose(localConfig, mockNetworkConfig); expect(result.services['squid-proxy'].build).toBeDefined(); - expect(result.services.copilot.build).toBeDefined(); + expect(result.services.agent.build).toBeDefined(); expect(result.services['squid-proxy'].image).toBeUndefined(); - expect(result.services.copilot.image).toBeUndefined(); + expect(result.services.agent.image).toBeUndefined(); }); it('should use custom registry and tag', () => { @@ -79,7 +79,7 @@ describe('docker-manager', () => { const result = generateDockerCompose(customConfig, mockNetworkConfig); expect(result.services['squid-proxy'].image).toBe('docker.io/myrepo/squid:v1.0.0'); - expect(result.services.copilot.image).toBe('docker.io/myrepo/copilot:v1.0.0'); + expect(result.services.agent.image).toBe('docker.io/myrepo/agent:v1.0.0'); }); it('should configure network with correct IPs', () => { @@ -90,8 +90,8 @@ describe('docker-manager', () => { const squidNetworks = result.services['squid-proxy'].networks as { [key: string]: { ipv4_address?: string } }; expect(squidNetworks['awf-net'].ipv4_address).toBe('172.30.0.10'); - const copilotNetworks = result.services.copilot.networks as { [key: string]: { ipv4_address?: string } }; - expect(copilotNetworks['awf-net'].ipv4_address).toBe('172.30.0.20'); + const agentNetworks = result.services.agent.networks as { [key: string]: { ipv4_address?: string } }; + expect(agentNetworks['awf-net'].ipv4_address).toBe('172.30.0.20'); }); it('should configure squid container correctly', () => { @@ -105,10 +105,10 @@ describe('docker-manager', () => { expect(squid.ports).toContain('3128:3128'); }); - it('should configure copilot container with proxy settings', () => { + it('should configure agent container with proxy settings', () => { const result = generateDockerCompose(mockConfig, mockNetworkConfig); - const copilot = result.services.copilot; - const env = copilot.environment as Record; + const agent = result.services.agent; + const env = agent.environment as Record; expect(env.HTTP_PROXY).toBe('http://172.30.0.10:3128'); expect(env.HTTPS_PROXY).toBe('http://172.30.0.10:3128'); @@ -116,49 +116,49 @@ describe('docker-manager', () => { expect(env.SQUID_PROXY_PORT).toBe('3128'); }); - it('should mount required volumes in copilot container', () => { + it('should mount required volumes in agent container', () => { const result = generateDockerCompose(mockConfig, mockNetworkConfig); - const copilot = result.services.copilot; - const volumes = copilot.volumes as string[]; + const agent = result.services.agent; + const volumes = agent.volumes as string[]; expect(volumes).toContain('/:/host:rw'); expect(volumes).toContain('/tmp:/tmp:rw'); expect(volumes).toContain('/var/run/docker.sock:/var/run/docker.sock:rw'); - expect(volumes.some((v: string) => v.includes('copilot-logs'))).toBe(true); + expect(volumes.some((v: string) => v.includes('agent-logs'))).toBe(true); }); - it('should set copilot to depend on healthy squid', () => { + it('should set agent to depend on healthy squid', () => { const result = generateDockerCompose(mockConfig, mockNetworkConfig); - const copilot = result.services.copilot; - const depends = copilot.depends_on as { [key: string]: { condition: string } }; + const agent = result.services.agent; + const depends = agent.depends_on as { [key: string]: { condition: string } }; expect(depends['squid-proxy'].condition).toBe('service_healthy'); }); - it('should add NET_ADMIN capability to copilot', () => { + it('should add NET_ADMIN capability to agent', () => { const result = generateDockerCompose(mockConfig, mockNetworkConfig); - const copilot = result.services.copilot; + const agent = result.services.agent; - expect(copilot.cap_add).toContain('NET_ADMIN'); + expect(agent.cap_add).toContain('NET_ADMIN'); }); it('should disable TTY to prevent ANSI escape sequences', () => { const result = generateDockerCompose(mockConfig, mockNetworkConfig); - const copilot = result.services.copilot; + const agent = result.services.agent; - expect(copilot.tty).toBe(false); + expect(agent.tty).toBe(false); }); it('should escape dollar signs in commands for docker-compose', () => { const configWithVars = { ...mockConfig, - copilotCommand: 'echo $HOME && echo ${USER}', + agentCommand: 'echo $HOME && echo ${USER}', }; const result = generateDockerCompose(configWithVars, mockNetworkConfig); - const copilot = result.services.copilot; + const agent = result.services.agent; // Docker compose requires $$ to represent a literal $ - expect(copilot.command).toEqual(['/bin/bash', '-c', 'echo $$HOME && echo $${USER}']); + expect(agent.command).toEqual(['/bin/bash', '-c', 'echo $$HOME && echo $${USER}']); }); it('should pass through GITHUB_TOKEN when present in environment', () => { @@ -167,7 +167,7 @@ describe('docker-manager', () => { try { const result = generateDockerCompose(mockConfig, mockNetworkConfig); - const env = result.services.copilot.environment as Record; + const env = result.services.agent.environment as Record; expect(env.GITHUB_TOKEN).toBe('ghp_testtoken123'); } finally { if (originalEnv !== undefined) { @@ -184,7 +184,7 @@ describe('docker-manager', () => { try { const result = generateDockerCompose(mockConfig, mockNetworkConfig); - const env = result.services.copilot.environment as Record; + const env = result.services.agent.environment as Record; expect(env.GITHUB_TOKEN).toBeUndefined(); } finally { if (originalEnv !== undefined) { @@ -202,8 +202,8 @@ describe('docker-manager', () => { }, }; const result = generateDockerCompose(configWithEnv, mockNetworkConfig); - const copilot = result.services.copilot; - const env = copilot.environment as Record; + const agent = result.services.agent; + const env = agent.environment as Record; expect(env.CUSTOM_VAR).toBe('custom_value'); expect(env.ANOTHER_VAR).toBe('another_value'); @@ -217,8 +217,8 @@ describe('docker-manager', () => { try { const configWithEnvAll = { ...mockConfig, envAll: true }; const result = generateDockerCompose(configWithEnvAll, mockNetworkConfig); - const copilot = result.services.copilot; - const env = copilot.environment as Record; + const agent = result.services.agent; + const env = agent.environment as Record; // Should NOT pass through excluded vars expect(env.PATH).not.toBe(originalPath); @@ -233,10 +233,10 @@ describe('docker-manager', () => { it('should configure DNS to use Google DNS', () => { const result = generateDockerCompose(mockConfig, mockNetworkConfig); - const copilot = result.services.copilot; + const agent = result.services.agent; - expect(copilot.dns).toEqual(['8.8.8.8', '8.8.4.4']); - expect(copilot.dns_search).toEqual([]); + expect(agent.dns).toEqual(['8.8.8.8', '8.8.4.4']); + expect(agent.dns_search).toEqual([]); }); it('should override environment variables with additionalEnv', () => { @@ -251,7 +251,7 @@ describe('docker-manager', () => { }, }; const result = generateDockerCompose(configWithOverride, mockNetworkConfig); - const env = result.services.copilot.environment as Record; + const env = result.services.agent.environment as Record; // additionalEnv should win expect(env.GITHUB_TOKEN).toBe('overridden_token'); diff --git a/src/docker-manager.ts b/src/docker-manager.ts index 9a7865d..98ef898 100644 --- a/src/docker-manager.ts +++ b/src/docker-manager.ts @@ -77,7 +77,7 @@ export function subnetsOverlap(subnet1: string, subnet2: string): boolean { * Generates a random subnet in Docker's private IP range that doesn't conflict with existing networks * Uses 172.16-31.x.0/24 range (Docker's default bridge network range) */ -async function generateRandomSubnet(): Promise<{ subnet: string; squidIp: string; copilotIp: string }> { +async function generateRandomSubnet(): Promise<{ subnet: string; squidIp: string; agentIp: string }> { const existingSubnets = await getExistingDockerSubnets(); const MAX_RETRIES = 50; @@ -94,8 +94,8 @@ async function generateRandomSubnet(): Promise<{ subnet: string; squidIp: string if (!hasConflict) { const squidIp = `172.${secondOctet}.${thirdOctet}.10`; - const copilotIp = `172.${secondOctet}.${thirdOctet}.20`; - return { subnet, squidIp, copilotIp }; + const agentIp = `172.${secondOctet}.${thirdOctet}.20`; + return { subnet, squidIp, agentIp }; } logger.debug(`Subnet ${subnet} conflicts with existing network, retrying... (attempt ${attempt + 1}/${MAX_RETRIES})`); @@ -113,7 +113,7 @@ async function generateRandomSubnet(): Promise<{ subnet: string; squidIp: string */ export function generateDockerCompose( config: WrapperConfig, - networkConfig: { subnet: string; squidIp: string; copilotIp: string } + networkConfig: { subnet: string; squidIp: string; agentIp: string } ): DockerComposeConfig { const projectRoot = path.join(__dirname, '..'); @@ -154,7 +154,7 @@ export function generateDockerCompose( }; } - // Build environment variables for copilot container + // Build environment variables for agent container // System variables that must be overridden or excluded (would break container operation) const EXCLUDED_ENV_VARS = new Set([ 'PATH', // Must use container's PATH @@ -205,18 +205,18 @@ export function generateDockerCompose( Object.assign(environment, config.additionalEnv); } - // Copilot service configuration - const copilotService: any = { - container_name: 'awf-copilot', + // Agent service configuration + const agentService: any = { + container_name: 'awf-agent', networks: { 'awf-net': { - ipv4_address: networkConfig.copilotIp, + ipv4_address: networkConfig.agentIp, }, }, dns: ['8.8.8.8', '8.8.4.4'], // Use Google DNS instead of Docker's embedded DNS dns_search: [], // Disable DNS search domains to prevent embedded DNS fallback volumes: [ - // Mount host filesystem for copilot access + // Mount host filesystem for agent access '/:/host:rw', '/tmp:/tmp:rw', `${process.env.HOME}:${process.env.HOME}:rw`, @@ -227,8 +227,8 @@ export function generateDockerCompose( // Override host's .docker directory with clean config to prevent Docker CLI // from reading host's context (e.g., desktop-linux pointing to wrong socket) `${config.workDir}/.docker:${process.env.HOME}/.docker:rw`, - // Mount copilot logs directory to workDir for persistence - `${config.workDir}/copilot-logs:${process.env.HOME}/.copilot/logs:rw`, + // Mount agent logs directory to workDir for persistence + `${config.workDir}/agent-logs:${process.env.HOME}/.copilot/logs:rw`, ], environment, depends_on: { @@ -240,15 +240,15 @@ export function generateDockerCompose( stdin_open: true, tty: false, // Disable TTY to prevent ANSI escape sequences in logs // Escape $ with $$ for Docker Compose variable interpolation - command: ['/bin/bash', '-c', config.copilotCommand.replace(/\$/g, '$$$$')], + command: ['/bin/bash', '-c', config.agentCommand.replace(/\$/g, '$$$$')], }; // Use GHCR image or build locally if (useGHCR) { - copilotService.image = `${registry}/copilot:${tag}`; + agentService.image = `${registry}/agent:${tag}`; } else { - copilotService.build = { - context: path.join(projectRoot, 'containers/copilot'), + agentService.build = { + context: path.join(projectRoot, 'containers/agent'), dockerfile: 'Dockerfile', }; } @@ -256,7 +256,7 @@ export function generateDockerCompose( return { services: { 'squid-proxy': squidService, - 'copilot': copilotService, + 'agent': agentService, }, networks: { 'awf-net': { @@ -295,12 +295,12 @@ export async function writeConfigs(config: WrapperConfig): Promise { ); logger.debug(`Docker config written to: ${dockerConfigDir}/config.json`); - // Create copilot logs directory for persistence - const copilotLogsDir = path.join(config.workDir, 'copilot-logs'); - if (!fs.existsSync(copilotLogsDir)) { - fs.mkdirSync(copilotLogsDir, { recursive: true }); + // Create agent logs directory for persistence + const agentLogsDir = path.join(config.workDir, 'agent-logs'); + if (!fs.existsSync(agentLogsDir)) { + fs.mkdirSync(agentLogsDir, { recursive: true }); } - logger.debug(`Copilot logs directory created at: ${copilotLogsDir}`); + logger.debug(`Agent logs directory created at: ${agentLogsDir}`); // Create squid logs directory for persistence // Note: Squid runs as user 'proxy' (UID 13, GID 13 in ubuntu/squid image) @@ -315,9 +315,9 @@ export async function writeConfigs(config: WrapperConfig): Promise { const networkConfig = { subnet: '172.30.0.0/24', squidIp: '172.30.0.10', - copilotIp: '172.30.0.20', + agentIp: '172.30.0.20', }; - logger.debug(`Using network config: ${networkConfig.subnet} (squid: ${networkConfig.squidIp}, copilot: ${networkConfig.copilotIp})`); + logger.debug(`Using network config: ${networkConfig.subnet} (squid: ${networkConfig.squidIp}, agent: ${networkConfig.agentIp})`); // Write Squid config const squidConfig = generateSquidConfig({ @@ -408,7 +408,7 @@ export async function startContainers(workDir: string, allowedDomains: string[]) // This handles orphaned containers from failed/interrupted previous runs logger.debug('Removing any existing containers with conflicting names...'); try { - await execa('docker', ['rm', '-f', 'awf-squid', 'awf-copilot'], { + await execa('docker', ['rm', '-f', 'awf-squid', 'awf-agent'], { reject: false, }); } catch (error) { @@ -481,15 +481,15 @@ export async function startContainers(workDir: string, allowedDomains: string[]) } /** - * Runs the copilot command in the container and reports any blocked domains + * Runs the agent command in the container and reports any blocked domains */ -export async function runCopilotCommand(workDir: string, allowedDomains: string[]): Promise<{ exitCode: number; blockedDomains: string[] }> { - logger.info('Executing copilot command...'); +export async function runAgentCommand(workDir: string, allowedDomains: string[]): Promise<{ exitCode: number; blockedDomains: string[] }> { + logger.info('Executing agent command...'); try { // Stream logs in real-time using docker logs -f (follow mode) // Run this in the background and wait for the container to exit separately - const logsProcess = execa('docker', ['logs', '-f', 'awf-copilot'], { + const logsProcess = execa('docker', ['logs', '-f', 'awf-agent'], { stdio: 'inherit', reject: false, }); @@ -497,7 +497,7 @@ export async function runCopilotCommand(workDir: string, allowedDomains: string[ // Wait for the container to exit (this will run concurrently with log streaming) const { stdout: exitCodeStr } = await execa('docker', [ 'wait', - 'awf-copilot', + 'awf-agent', ]); const exitCode = parseInt(exitCodeStr.trim(), 10); @@ -505,7 +505,7 @@ export async function runCopilotCommand(workDir: string, allowedDomains: string[ // Wait for the logs process to finish (it should exit automatically when container stops) await logsProcess; - logger.debug(`Copilot exit code: ${exitCode}`); + logger.debug(`Agent exit code: ${exitCode}`); // Small delay to ensure Squid logs are flushed to disk await new Promise(resolve => setTimeout(resolve, 500)); @@ -554,7 +554,7 @@ export async function runCopilotCommand(workDir: string, allowedDomains: string[ return { exitCode, blockedDomains: blockedTargets.map(b => b.domain) }; } catch (error) { - logger.error('Failed to run copilot command:', error); + logger.error('Failed to run agent command:', error); throw error; } } @@ -584,7 +584,7 @@ export async function stopContainers(workDir: string, keepContainers: boolean): /** * Cleans up temporary files - * Preserves copilot logs by moving them to a persistent location before cleanup + * Preserves agent logs by moving them to a persistent location before cleanup */ export async function cleanup(workDir: string, keepFiles: boolean): Promise { if (keepFiles) { @@ -597,15 +597,15 @@ export async function cleanup(workDir: string, keepFiles: boolean): Promise 0) { - const preservedLogsDir = path.join(os.tmpdir(), `copilot-logs-${timestamp}`); + // Preserve agent logs before cleanup by moving them to /tmp + const agentLogsDir = path.join(workDir, 'agent-logs'); + if (fs.existsSync(agentLogsDir) && fs.readdirSync(agentLogsDir).length > 0) { + const preservedLogsDir = path.join(os.tmpdir(), `agent-logs-${timestamp}`); try { - fs.renameSync(copilotLogsDir, preservedLogsDir); - logger.info(`Copilot logs preserved at: ${preservedLogsDir}`); + fs.renameSync(agentLogsDir, preservedLogsDir); + logger.info(`Agent logs preserved at: ${preservedLogsDir}`); } catch (error) { - logger.debug('Could not preserve copilot logs:', error); + logger.debug('Could not preserve agent logs:', error); } } diff --git a/src/host-iptables.test.ts b/src/host-iptables.test.ts index 46afd8c..a082503 100644 --- a/src/host-iptables.test.ts +++ b/src/host-iptables.test.ts @@ -35,7 +35,7 @@ describe('host-iptables', () => { expect(result).toEqual({ subnet: '172.30.0.0/24', squidIp: '172.30.0.10', - copilotIp: '172.30.0.20', + agentIp: '172.30.0.20', }); // Should only check if network exists, not create it @@ -59,7 +59,7 @@ describe('host-iptables', () => { expect(result).toEqual({ subnet: '172.30.0.0/24', squidIp: '172.30.0.10', - copilotIp: '172.30.0.20', + agentIp: '172.30.0.20', }); expect(mockedExeca).toHaveBeenCalledWith('docker', ['network', 'inspect', 'awf-net']); diff --git a/src/host-iptables.ts b/src/host-iptables.ts index b811fd1..cda699d 100644 --- a/src/host-iptables.ts +++ b/src/host-iptables.ts @@ -26,12 +26,12 @@ async function getNetworkBridgeName(): Promise { /** * Creates the dedicated firewall network if it doesn't exist - * Returns the Squid and Copilot IPs + * Returns the Squid and Agent IPs */ export async function ensureFirewallNetwork(): Promise<{ subnet: string; squidIp: string; - copilotIp: string; + agentIp: string; }> { logger.debug(`Ensuring firewall network '${NETWORK_NAME}' exists...`); @@ -63,7 +63,7 @@ export async function ensureFirewallNetwork(): Promise<{ return { subnet: NETWORK_SUBNET, squidIp: '172.30.0.10', - copilotIp: '172.30.0.20', + agentIp: '172.30.0.20', }; } diff --git a/src/types.ts b/src/types.ts index 87de196..744b1aa 100644 --- a/src/types.ts +++ b/src/types.ts @@ -4,7 +4,7 @@ export interface WrapperConfig { allowedDomains: string[]; - copilotCommand: string; + agentCommand: string; logLevel: LogLevel; keepContainers: boolean; workDir: string; From 8973a03965d41aa854dd42930e01e63f0a0ca89b Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Thu, 30 Oct 2025 19:38:25 +0000 Subject: [PATCH 3/5] Update integration tests, scripts, and package.json Co-authored-by: Mossaka <5447827+Mossaka@users.noreply.github.com> --- package.json | 1 - scripts/ci/cleanup.sh | 2 +- tests/fixtures/cleanup.ts | 2 +- tests/integration/basic-firewall.test.ts | 8 ++++---- 4 files changed, 6 insertions(+), 7 deletions(-) diff --git a/package.json b/package.json index 1749544..fd79883 100644 --- a/package.json +++ b/package.json @@ -28,7 +28,6 @@ "domain-whitelisting", "proxy", "squid", - "copilot", "mcp", "egress-control" ], diff --git a/scripts/ci/cleanup.sh b/scripts/ci/cleanup.sh index d6ad5de..ae59652 100755 --- a/scripts/ci/cleanup.sh +++ b/scripts/ci/cleanup.sh @@ -12,7 +12,7 @@ echo "===========================================" # First, explicitly remove containers by name (handles orphaned containers) echo "Removing awf containers by name..." -docker rm -f awf-squid awf-copilot 2>/dev/null || true +docker rm -f awf-squid awf-agent 2>/dev/null || true # Cleanup diagnostic test containers echo "Stopping docker compose services..." diff --git a/tests/fixtures/cleanup.ts b/tests/fixtures/cleanup.ts index cb54806..ab4cb86 100644 --- a/tests/fixtures/cleanup.ts +++ b/tests/fixtures/cleanup.ts @@ -25,7 +25,7 @@ export class Cleanup { async removeContainers(): Promise { this.log('Removing awf containers by name...'); try { - await execa('docker', ['rm', '-f', 'awf-squid', 'awf-copilot']); + await execa('docker', ['rm', '-f', 'awf-squid', 'awf-agent']); } catch (error) { // Ignore errors (containers may not exist) } diff --git a/tests/integration/basic-firewall.test.ts b/tests/integration/basic-firewall.test.ts index d80d59e..c139c49 100644 --- a/tests/integration/basic-firewall.test.ts +++ b/tests/integration/basic-firewall.test.ts @@ -153,13 +153,13 @@ describe('Basic Firewall Functionality', () => { const squidRunning = await docker.isRunning('awf-squid'); expect(squidRunning).toBe(true); - // Verify copilot container still exists (may have exited) - const copilotInfo = await docker.inspect('awf-copilot'); - expect(copilotInfo).not.toBeNull(); + // Verify agent container still exists (may have exited) + const agentInfo = await docker.inspect('awf-agent'); + expect(agentInfo).not.toBeNull(); // Clean up manually await docker.stop('awf-squid'); await docker.rm('awf-squid', true); - await docker.rm('awf-copilot', true); + await docker.rm('awf-agent', true); }, 120000); }); From 28fa0451ed43f5d41b214d6ae119f3e00eb83f51 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Thu, 30 Oct 2025 19:44:59 +0000 Subject: [PATCH 4/5] Update documentation and workflows with agent terminology Co-authored-by: Mossaka <5447827+Mossaka@users.noreply.github.com> --- .github/workflows/release.yml | 12 +- .github/workflows/test-integration.yml | 6 +- AGENTS.md | 60 +-- AGENTS.md.bak | 602 +++++++++++++++++++++++++ CLAUDE.md | 48 +- docs/architecture.md | 26 +- docs/github_actions.md | 10 +- docs/logging_quickref.md | 4 +- docs/quickstart.md | 6 +- docs/squid_log_filtering.md | 2 +- docs/troubleshooting.md | 24 +- docs/usage.md | 12 +- 12 files changed, 707 insertions(+), 105 deletions(-) create mode 100644 AGENTS.md.bak diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 35ca6b2..2b8aae9 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -66,14 +66,14 @@ jobs: cache-from: type=gha cache-to: type=gha,mode=max - - name: Build and push Copilot image + - name: Build and push Agent image uses: docker/build-push-action@v5 with: - context: ./containers/copilot + context: ./containers/agent push: true tags: | - ghcr.io/${{ github.repository }}/copilot:${{ steps.version_early.outputs.version_number }} - ghcr.io/${{ github.repository }}/copilot:latest + ghcr.io/${{ github.repository }}/agent:${{ steps.version_early.outputs.version_number }} + ghcr.io/${{ github.repository }}/agent:latest cache-from: type=gha cache-to: type=gha,mode=max @@ -153,9 +153,9 @@ jobs: Published to GitHub Container Registry: - `ghcr.io/${{ github.repository }}/squid:${{ steps.version_early.outputs.version_number }}` - - `ghcr.io/${{ github.repository }}/copilot:${{ steps.version_early.outputs.version_number }}` + - `ghcr.io/${{ github.repository }}/agent:${{ steps.version_early.outputs.version_number }}` - `ghcr.io/${{ github.repository }}/squid:latest` - - `ghcr.io/${{ github.repository }}/copilot:latest` + - `ghcr.io/${{ github.repository }}/agent:latest` EOF - name: Create GitHub Release diff --git a/.github/workflows/test-integration.yml b/.github/workflows/test-integration.yml index d5ae601..8c59715 100644 --- a/.github/workflows/test-integration.yml +++ b/.github/workflows/test-integration.yml @@ -86,7 +86,7 @@ jobs: path: | /tmp/*-test.log /tmp/awf-*/ - /tmp/copilot-logs-*/ + /tmp/agent-logs-*/ /tmp/squid-logs-*/ retention-days: 7 @@ -141,7 +141,7 @@ jobs: path: | /tmp/*-test.log /tmp/awf-*/ - /tmp/copilot-logs-*/ + /tmp/agent-logs-*/ /tmp/squid-logs-*/ retention-days: 7 @@ -196,6 +196,6 @@ jobs: path: | /tmp/*-test.log /tmp/awf-*/ - /tmp/copilot-logs-*/ + /tmp/agent-logs-*/ /tmp/squid-logs-*/ retention-days: 7 diff --git a/AGENTS.md b/AGENTS.md index 05edf2a..fa43300 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -142,17 +142,17 @@ The codebase follows a modular architecture with clear separation of concerns: - Uses `commander` for argument parsing - Orchestrates the entire workflow: config generation → container startup → command execution → cleanup - Handles signal interrupts (SIGINT/SIGTERM) for graceful shutdown - - Main flow: `writeConfigs()` → `startContainers()` → `runCopilotCommand()` → `stopContainers()` → `cleanup()` + - Main flow: `writeConfigs()` → `startContainers()` → `runAgentCommand()` → `stopContainers()` → `cleanup()` 2. **Configuration Generation** (`src/squid-config.ts`, `src/docker-manager.ts`) - `generateSquidConfig()`: Creates Squid proxy configuration with domain ACL rules - - `generateDockerCompose()`: Creates Docker Compose YAML with two services (squid-proxy, copilot) + - `generateDockerCompose()`: Creates Docker Compose YAML with two services (squid-proxy, agent) - All configs are written to a temporary work directory (default: `/tmp/awf-`) 3. **Docker Management** (`src/docker-manager.ts`) - Manages container lifecycle using `execa` to run docker-compose commands - - Fixed network topology: `172.30.0.0/24` subnet, Squid at `172.30.0.10`, Copilot at `172.30.0.20` - - Squid container uses healthcheck; Copilot waits for Squid to be healthy before starting + - Fixed network topology: `172.30.0.0/24` subnet, Squid at `172.30.0.10`, Agent at `172.30.0.20` + - Squid container uses healthcheck; Agent waits for Squid to be healthy before starting 4. **Type Definitions** (`src/types.ts`) - `WrapperConfig`: Main configuration interface @@ -173,13 +173,13 @@ The codebase follows a modular architecture with clear separation of concerns: - **Network:** Connected to `awf-net` at `172.30.0.10` - **Firewall Exemption:** Allowed unrestricted outbound access via iptables rule `-s 172.30.0.10 -j ACCEPT` -**Copilot Container** (`containers/copilot/`) +**Agent Container** (`containers/agent/`) - Based on `ubuntu:22.04` with iptables, curl, git, nodejs, npm, docker-cli - Mounts entire host filesystem at `/host` and user home directory for full access - Mounts Docker socket (`/var/run/docker.sock`) for docker-in-docker support - `NET_ADMIN` capability required for iptables manipulation - Two-stage entrypoint: - 1. `setup-iptables.sh`: Configures iptables NAT rules to redirect HTTP/HTTPS traffic to Squid (copilot container only) + 1. `setup-iptables.sh`: Configures iptables NAT rules to redirect HTTP/HTTPS traffic to Squid (agent container only) 2. `entrypoint.sh`: Tests connectivity, then executes user command - **Docker Wrapper** (`docker-wrapper.sh`): Intercepts `docker run` commands to inject network and proxy configuration - Symlinked at `/usr/bin/docker` (real docker at `/usr/bin/docker-real`) @@ -191,7 +191,7 @@ The codebase follows a modular architecture with clear separation of concerns: - Allow DNS queries - Allow traffic to Squid proxy itself - Redirect all HTTP (port 80) and HTTPS (port 443) to Squid via DNAT (NAT table) - - **Note:** These NAT rules only apply to the copilot container itself, not spawned containers + - **Note:** These NAT rules only apply to the agent container itself, not spawned containers ### Traffic Flow @@ -202,11 +202,11 @@ CLI generates configs (squid.conf, docker-compose.yml) ↓ Docker Compose starts Squid container (with healthcheck) ↓ -Docker Compose starts Copilot container (waits for Squid healthy) +Docker Compose starts Agent container (waits for Squid healthy) ↓ -iptables rules applied in Copilot container +iptables rules applied in Agent container ↓ -User command executes in Copilot container +User command executes in Agent container ↓ All HTTP/HTTPS traffic → iptables DNAT → Squid proxy → domain ACL filtering ↓ @@ -223,8 +223,8 @@ Containers stopped, temporary files cleaned up ## Exit Code Handling -The wrapper propagates the exit code from the copilot container: -1. Command runs in copilot container +The wrapper propagates the exit code from the agent container: +1. Command runs in agent container 2. Container exits with command's exit code 3. Wrapper inspects container: `docker inspect --format={{.State.ExitCode}}` 4. Wrapper exits with same code @@ -234,7 +234,7 @@ The wrapper propagates the exit code from the copilot container: The system uses a defense-in-depth cleanup strategy across four stages to prevent Docker resource leaks: ### 1. Pre-Test Cleanup (CI/CD Scripts) -**Location:** `scripts/ci/test-copilot-*.sh` (start of each script) +**Location:** `scripts/ci/test-agent-*.sh` (start of each script) **What:** Runs `cleanup.sh` to remove orphaned resources from previous failed runs **Why:** Prevents Docker network subnet pool exhaustion and container name conflicts **Critical:** Without this, `timeout` commands that kill the wrapper mid-cleanup leave networks/containers behind @@ -253,13 +253,13 @@ The system uses a defense-in-depth cleanup strategy across four stages to preven **Limitation:** Cannot catch SIGKILL (9) from `timeout` after grace period ### 4. CI/CD Always Cleanup -**Location:** `.github/workflows/test-copilot-*.yml` (`if: always()`) +**Location:** `.github/workflows/test-agent-*.yml` (`if: always()`) **What:** Runs `cleanup.sh` regardless of job status **Why:** Safety net for SIGKILL, job cancellation, and unexpected failures ### Cleanup Script (`scripts/ci/cleanup.sh`) Removes all awf resources: -- Containers by name (`awf-squid`, `awf-copilot`) +- Containers by name (`awf-squid`, `awf-agent`) - All docker-compose services from work directories - Unused containers (`docker container prune -f`) - Unused networks (`docker network prune -f`) - **critical for subnet pool management** @@ -272,7 +272,7 @@ Removes all awf resources: All temporary files are created in `workDir` (default: `/tmp/awf-`): - `squid.conf`: Generated Squid proxy configuration - `docker-compose.yml`: Generated Docker Compose configuration -- `copilot-logs/`: Directory for Copilot CLI logs (automatically preserved if logs are created) +- `agent-logs/`: Directory for Copilot CLI logs (automatically preserved if logs are created) - `squid-logs/`: Directory for Squid proxy logs (automatically preserved if logs are created) Use `--keep-containers` to preserve containers and files after execution for debugging. @@ -281,7 +281,7 @@ Use `--keep-containers` to preserve containers and files after execution for deb ### Real-Time Log Streaming -The wrapper streams container logs in real-time using `docker logs -f`, allowing you to see output as commands execute rather than waiting until completion. This is implemented in `src/docker-manager.ts:runCopilotCommand()` which runs `docker logs -f` concurrently with `docker wait`. +The wrapper streams container logs in real-time using `docker logs -f`, allowing you to see output as commands execute rather than waiting until completion. This is implemented in `src/docker-manager.ts:runAgentCommand()` which runs `docker logs -f` concurrently with `docker wait`. **Note:** The container is configured with `tty: false` (line 202 in `src/docker-manager.ts`) to prevent ANSI escape sequences from appearing in log output. This provides cleaner, more readable streaming logs. @@ -291,25 +291,25 @@ Copilot CLI logs are automatically preserved for debugging: **Directory Structure:** - Container writes logs to: `~/.copilot/logs/` (Copilot's default location) -- Volume mount maps to: `${workDir}/copilot-logs/` -- After cleanup: Logs moved to `/tmp/copilot-logs-` (if they exist) +- Volume mount maps to: `${workDir}/agent-logs/` +- After cleanup: Logs moved to `/tmp/agent-logs-` (if they exist) **Automatic Preservation:** -- If Copilot creates logs, they're automatically moved to `/tmp/copilot-logs-/` before workDir cleanup +- If Copilot creates logs, they're automatically moved to `/tmp/agent-logs-/` before workDir cleanup - Empty log directories are not preserved (avoids cluttering /tmp) -- You'll see: `[INFO] Copilot logs preserved at: /tmp/copilot-logs-` when logs exist +- You'll see: `[INFO] Copilot logs preserved at: /tmp/agent-logs-` when logs exist **With `--keep-containers`:** -- Logs remain at: `${workDir}/copilot-logs/` +- Logs remain at: `${workDir}/agent-logs/` - All config files and containers are preserved -- You'll see: `[INFO] Copilot logs available at: /tmp/awf-/copilot-logs/` +- You'll see: `[INFO] Copilot logs available at: /tmp/awf-/agent-logs/` **Usage Examples:** ```bash # Logs automatically preserved (if created) awf --allow-domains github.com \ "npx @github/copilot@0.0.347 -p 'your prompt' --log-level debug --allow-all-tools" -# Output: [INFO] Copilot logs preserved at: /tmp/copilot-logs-1761073250147 +# Output: [INFO] Copilot logs preserved at: /tmp/agent-logs-1761073250147 # Increase log verbosity for debugging awf --allow-domains github.com \ @@ -403,7 +403,7 @@ To use a local, writable GitHub MCP server with Copilot CLI, you must: **Location:** The MCP configuration must be placed at: - `~/.copilot/mcp-config.json` (primary location) -The copilot container mounts the HOME directory, so this config file is automatically accessible to Copilot CLI running inside the container. +The agent container mounts the HOME directory, so this config file is automatically accessible to Copilot CLI running inside the container. **Format:** ```json @@ -458,16 +458,16 @@ sudo -E awf \ ``` **Critical requirements:** -- `sudo -E` - **REQUIRED** to pass environment variables through sudo to the copilot container +- `sudo -E` - **REQUIRED** to pass environment variables through sudo to the agent container - `--disable-builtin-mcps` - Disables the built-in read-only GitHub MCP server - `--allow-tool github` - Grants permission to use all tools from the `github` MCP server (must match server name in config) -- MCP config at `~/.copilot/mcp-config.json` - Automatically accessible since copilot container mounts HOME directory +- MCP config at `~/.copilot/mcp-config.json` - Automatically accessible since agent container mounts HOME directory **Why `sudo -E` is required:** 1. `awf` needs sudo for iptables manipulation 2. `-E` preserves GITHUB_TOKEN and GITHUB_PERSONAL_ACCESS_TOKEN -3. These variables are passed into the copilot container via the HOME directory mount -4. The GitHub MCP server Docker container inherits them from the copilot container's environment +3. These variables are passed into the agent container via the HOME directory mount +4. The GitHub MCP server Docker container inherits them from the agent container's environment ### Troubleshooting @@ -546,7 +546,7 @@ The firewall implements comprehensive logging at two levels: ### Key Files - `src/squid-config.ts` - Generates Squid config with custom `firewall_detailed` logformat -- `containers/copilot/setup-iptables.sh` - Configures iptables LOG rules for rejected traffic +- `containers/agent/setup-iptables.sh` - Configures iptables LOG rules for rejected traffic - `src/squid-config.test.ts` - Tests for logging configuration ### Squid Log Format diff --git a/AGENTS.md.bak b/AGENTS.md.bak new file mode 100644 index 0000000..05edf2a --- /dev/null +++ b/AGENTS.md.bak @@ -0,0 +1,602 @@ +# AGENTS.md + +This file provides guidance to coding agent when working with code in this repository. + +## Project Overview + +This is a firewall for GitHub Copilot CLI (package name: `@github/awf`) that provides L7 (HTTP/HTTPS) egress control using Squid proxy and Docker containers. The tool restricts network access to a whitelist of approved domains while maintaining full filesystem access for the Copilot CLI and its MCP servers. + +### Documentation Files + +- **[README.md](README.md)** - Main project documentation and usage guide +- **[LOGGING.md](LOGGING.md)** - Comprehensive logging documentation +- **[docs/logging_quickref.md](docs/logging_quickref.md)** - Quick reference for log queries and monitoring + +## Development Workflow + +### GitHub Actions Best Practices + +**IMPORTANT:** When writing or modifying GitHub Actions workflows: + +1. **Use TypeScript for workflow scripts, not bash** - All scripts that run in GitHub Actions workflows should be written in TypeScript and executed with `npx tsx`. This ensures: + - Type safety and better IDE support + - Consistency with the rest of the codebase + - Easier testing and maintenance + - Better error handling + +2. **Inline script execution** - Run TypeScript scripts directly in workflow steps using `npx tsx path/to/script.ts`, rather than creating bash wrapper scripts. Example: + ```yaml + - name: Generate test summary + run: | + npx tsx scripts/ci/generate-test-summary.ts "test-file.ts" "Test Name" test-output.log + ``` + +3. **Place scripts in `scripts/ci/`** - All CI/CD-related scripts should be in the `scripts/ci/` directory and written as TypeScript modules with proper type definitions. + +**Example:** +- ❌ Bad: `scripts/ci/generate-summary.sh` (bash script) +- ✅ Good: `scripts/ci/generate-test-summary.ts` (TypeScript script called with `npx tsx`) + +### Debugging GitHub Actions Failures + +**IMPORTANT:** When GitHub Actions workflows fail, always follow this debugging workflow: + +1. **Reproduce locally first** - Run the same commands/scripts that failed in CI on your local machine +2. **Understand the root cause** - Investigate logs, error messages, and system state to identify why it failed +3. **Test the fix locally** - Verify your solution works in your local environment +4. **Then update the action** - Only modify the GitHub Actions workflow after confirming the fix locally + +This approach prevents trial-and-error debugging in CI (which wastes runner time and makes debugging slower) and ensures fixes address the actual root cause rather than symptoms. + +**Downloading CI Logs for Local Analysis:** + +Use `scripts/download-latest-artifact.sh` to download logs from GitHub Actions runs: + +```bash +# Download logs from the latest integration test workflow run (default) +./scripts/download-latest-artifact.sh + +# Download logs from a specific run ID +./scripts/download-latest-artifact.sh 1234567890 + +# Download from test-integration workflow (latest run) +./scripts/download-latest-artifact.sh "" ".github/workflows/test-integration.yml" "integration-test-logs" +``` + +**Parameters:** +- `RUN_ID` (optional): Specific workflow run ID, or empty string for latest run +- `WORKFLOW_FILE` (optional): Path to workflow file (default: `.github/workflows/test-integration.yml`) +- `ARTIFACT_NAME` (optional): Artifact name (default: `integration-test-logs`) + +**Artifact name:** +- `integration-test-logs` - test-integration.yml + +This downloads artifacts to `./artifacts-run-$RUN_ID` for local examination. Requires GitHub CLI (`gh`) authenticated with the repository. + +**Example:** The "Pool overlaps" Docker network error was reproduced locally, traced to orphaned networks from `timeout`-killed processes, fixed by adding pre-test cleanup in scripts, then verified before updating workflows. + +## Development Commands + +### Build and Testing +```bash +# Build TypeScript to dist/ +npm run build + +# Watch mode (rebuilds on changes) +npm run dev + +# Run tests +npm test + +# Run tests in watch mode +npm test:watch + +# Lint TypeScript files +npm run lint + +# Clean build artifacts +npm run clean +``` + +### Local Installation + +**For regular use:** +```bash +# Link locally for testing +npm link + +# Use the CLI +awf --allow-domains github.com 'curl https://api.github.com' +``` + +**For sudo usage (required for iptables manipulation):** + +Since `npm link` creates symlinks in the user's npm directory which isn't in root's PATH, you need to create a wrapper script in `/usr/local/bin/`: + +```bash +# Build the project +npm run build + +# Create sudo wrapper script +sudo tee /usr/local/bin/awf > /dev/null <<'EOF' +#!/bin/bash +exec ~/.nvm/versions/node/v22.13.0/bin/node \ + ~/developer/gh-aw-firewall/dist/cli.js "$@" +EOF + +sudo chmod +x /usr/local/bin/awf + +# Verify it works +sudo awf --help +``` + +**Note:** After each `npm run build`, the wrapper automatically uses the latest compiled code. Update the paths in the wrapper script to match your node installation and project directory. + +## Architecture + +The codebase follows a modular architecture with clear separation of concerns: + +### Core Components + +1. **CLI Entry Point** (`src/cli.ts`) + - Uses `commander` for argument parsing + - Orchestrates the entire workflow: config generation → container startup → command execution → cleanup + - Handles signal interrupts (SIGINT/SIGTERM) for graceful shutdown + - Main flow: `writeConfigs()` → `startContainers()` → `runCopilotCommand()` → `stopContainers()` → `cleanup()` + +2. **Configuration Generation** (`src/squid-config.ts`, `src/docker-manager.ts`) + - `generateSquidConfig()`: Creates Squid proxy configuration with domain ACL rules + - `generateDockerCompose()`: Creates Docker Compose YAML with two services (squid-proxy, copilot) + - All configs are written to a temporary work directory (default: `/tmp/awf-`) + +3. **Docker Management** (`src/docker-manager.ts`) + - Manages container lifecycle using `execa` to run docker-compose commands + - Fixed network topology: `172.30.0.0/24` subnet, Squid at `172.30.0.10`, Copilot at `172.30.0.20` + - Squid container uses healthcheck; Copilot waits for Squid to be healthy before starting + +4. **Type Definitions** (`src/types.ts`) + - `WrapperConfig`: Main configuration interface + - `SquidConfig`, `DockerComposeConfig`: Typed configuration objects + +5. **Logging** (`src/logger.ts`) + - Singleton logger with configurable log levels (debug, info, warn, error) + - Uses `chalk` for colored output + - All logs go to stderr (console.error) to avoid interfering with command stdout + +### Container Architecture + +**Squid Container** (`containers/squid/`) +- Based on `ubuntu/squid:latest` +- Mounts dynamically-generated `squid.conf` from work directory +- Exposes port 3128 for proxy traffic +- Logs to shared volume `squid-logs:/var/log/squid` +- **Network:** Connected to `awf-net` at `172.30.0.10` +- **Firewall Exemption:** Allowed unrestricted outbound access via iptables rule `-s 172.30.0.10 -j ACCEPT` + +**Copilot Container** (`containers/copilot/`) +- Based on `ubuntu:22.04` with iptables, curl, git, nodejs, npm, docker-cli +- Mounts entire host filesystem at `/host` and user home directory for full access +- Mounts Docker socket (`/var/run/docker.sock`) for docker-in-docker support +- `NET_ADMIN` capability required for iptables manipulation +- Two-stage entrypoint: + 1. `setup-iptables.sh`: Configures iptables NAT rules to redirect HTTP/HTTPS traffic to Squid (copilot container only) + 2. `entrypoint.sh`: Tests connectivity, then executes user command +- **Docker Wrapper** (`docker-wrapper.sh`): Intercepts `docker run` commands to inject network and proxy configuration + - Symlinked at `/usr/bin/docker` (real docker at `/usr/bin/docker-real`) + - Automatically injects `--network awf-net` to all spawned containers + - Injects proxy environment variables: `HTTP_PROXY`, `HTTPS_PROXY`, `http_proxy`, `https_proxy` + - Logs all intercepted commands to `/tmp/docker-wrapper.log` for debugging +- Key iptables rules (in `setup-iptables.sh`): + - Allow localhost traffic (for stdio MCP servers) + - Allow DNS queries + - Allow traffic to Squid proxy itself + - Redirect all HTTP (port 80) and HTTPS (port 443) to Squid via DNAT (NAT table) + - **Note:** These NAT rules only apply to the copilot container itself, not spawned containers + +### Traffic Flow + +``` +User Command + ↓ +CLI generates configs (squid.conf, docker-compose.yml) + ↓ +Docker Compose starts Squid container (with healthcheck) + ↓ +Docker Compose starts Copilot container (waits for Squid healthy) + ↓ +iptables rules applied in Copilot container + ↓ +User command executes in Copilot container + ↓ +All HTTP/HTTPS traffic → iptables DNAT → Squid proxy → domain ACL filtering + ↓ +Containers stopped, temporary files cleaned up +``` + +## Domain Whitelisting + +- Domains in `--allow-domains` are normalized (protocol/trailing slash removed) +- Both exact matches and subdomain matches are added to Squid ACL: + - `github.com` → matches `github.com` and `.github.com` (subdomains) + - `.github.com` → matches all subdomains +- Squid denies any domain not in the allowlist + +## Exit Code Handling + +The wrapper propagates the exit code from the copilot container: +1. Command runs in copilot container +2. Container exits with command's exit code +3. Wrapper inspects container: `docker inspect --format={{.State.ExitCode}}` +4. Wrapper exits with same code + +## Cleanup Lifecycle + +The system uses a defense-in-depth cleanup strategy across four stages to prevent Docker resource leaks: + +### 1. Pre-Test Cleanup (CI/CD Scripts) +**Location:** `scripts/ci/test-copilot-*.sh` (start of each script) +**What:** Runs `cleanup.sh` to remove orphaned resources from previous failed runs +**Why:** Prevents Docker network subnet pool exhaustion and container name conflicts +**Critical:** Without this, `timeout` commands that kill the wrapper mid-cleanup leave networks/containers behind + +### 2. Normal Exit Cleanup (Built-in) +**Location:** `src/cli.ts:117-118` (`performCleanup()`) +**What:** +- `stopContainers()` → `docker compose down -v` (stops containers, removes volumes) +- `cleanup()` → Deletes workDir (`/tmp/awf-`) +**Trigger:** Successful command completion + +### 3. Signal/Error Cleanup (Built-in) +**Location:** `src/cli.ts:95-103, 122-126` (SIGINT/SIGTERM handlers, catch blocks) +**What:** Same as normal exit cleanup +**Trigger:** User interruption (Ctrl+C), timeout signals, or errors +**Limitation:** Cannot catch SIGKILL (9) from `timeout` after grace period + +### 4. CI/CD Always Cleanup +**Location:** `.github/workflows/test-copilot-*.yml` (`if: always()`) +**What:** Runs `cleanup.sh` regardless of job status +**Why:** Safety net for SIGKILL, job cancellation, and unexpected failures + +### Cleanup Script (`scripts/ci/cleanup.sh`) +Removes all awf resources: +- Containers by name (`awf-squid`, `awf-copilot`) +- All docker-compose services from work directories +- Unused containers (`docker container prune -f`) +- Unused networks (`docker network prune -f`) - **critical for subnet pool management** +- Temporary directories (`/tmp/awf-*`) + +**Note:** Test scripts use `timeout 60s` which can kill the wrapper before Stage 2/3 cleanup completes. Stage 1 (pre-test) and Stage 4 (always) prevent accumulation across test runs. + +## Configuration Files + +All temporary files are created in `workDir` (default: `/tmp/awf-`): +- `squid.conf`: Generated Squid proxy configuration +- `docker-compose.yml`: Generated Docker Compose configuration +- `copilot-logs/`: Directory for Copilot CLI logs (automatically preserved if logs are created) +- `squid-logs/`: Directory for Squid proxy logs (automatically preserved if logs are created) + +Use `--keep-containers` to preserve containers and files after execution for debugging. + +## Log Streaming and Persistence + +### Real-Time Log Streaming + +The wrapper streams container logs in real-time using `docker logs -f`, allowing you to see output as commands execute rather than waiting until completion. This is implemented in `src/docker-manager.ts:runCopilotCommand()` which runs `docker logs -f` concurrently with `docker wait`. + +**Note:** The container is configured with `tty: false` (line 202 in `src/docker-manager.ts`) to prevent ANSI escape sequences from appearing in log output. This provides cleaner, more readable streaming logs. + +### Copilot Logs Preservation + +Copilot CLI logs are automatically preserved for debugging: + +**Directory Structure:** +- Container writes logs to: `~/.copilot/logs/` (Copilot's default location) +- Volume mount maps to: `${workDir}/copilot-logs/` +- After cleanup: Logs moved to `/tmp/copilot-logs-` (if they exist) + +**Automatic Preservation:** +- If Copilot creates logs, they're automatically moved to `/tmp/copilot-logs-/` before workDir cleanup +- Empty log directories are not preserved (avoids cluttering /tmp) +- You'll see: `[INFO] Copilot logs preserved at: /tmp/copilot-logs-` when logs exist + +**With `--keep-containers`:** +- Logs remain at: `${workDir}/copilot-logs/` +- All config files and containers are preserved +- You'll see: `[INFO] Copilot logs available at: /tmp/awf-/copilot-logs/` + +**Usage Examples:** +```bash +# Logs automatically preserved (if created) +awf --allow-domains github.com \ + "npx @github/copilot@0.0.347 -p 'your prompt' --log-level debug --allow-all-tools" +# Output: [INFO] Copilot logs preserved at: /tmp/copilot-logs-1761073250147 + +# Increase log verbosity for debugging +awf --allow-domains github.com \ + "npx @github/copilot@0.0.347 -p 'your prompt' --log-level all --allow-all-tools" + +# Keep everything for detailed inspection +awf --allow-domains github.com --keep-containers \ + "npx @github/copilot@0.0.347 -p 'your prompt' --log-level debug" +``` + +**Implementation Details:** +- Volume mount added in `src/docker-manager.ts:172` +- Log directory creation in `src/docker-manager.ts:247-252` +- Preservation logic in `src/docker-manager.ts:540-550` (cleanup function) + +### Squid Logs Preservation + +Squid proxy logs are automatically preserved for debugging network traffic: + +**Directory Structure:** +- Container writes logs to: `/var/log/squid/` (Squid's default location) +- Volume mount maps to: `${workDir}/squid-logs/` +- After cleanup: Logs moved to `/tmp/squid-logs-` (if they exist) + +**Automatic Preservation:** +- If Squid creates logs, they're automatically moved to `/tmp/squid-logs-/` before workDir cleanup +- Empty log directories are not preserved (avoids cluttering /tmp) +- You'll see: `[INFO] Squid logs preserved at: /tmp/squid-logs-` when logs exist + +**With `--keep-containers`:** +- Logs remain at: `${workDir}/squid-logs/` +- All config files and containers are preserved +- You'll see: `[INFO] Squid logs available at: /tmp/awf-/squid-logs/` + +**Log Files:** +- `access.log`: All HTTP/HTTPS traffic with custom format showing domains, IPs, and allow/deny decisions +- `cache.log`: Squid internal diagnostic messages + +**Viewing Logs:** +```bash +# Logs are owned by the 'proxy' user (from container), requires sudo on host +sudo cat /tmp/squid-logs-/access.log + +# Example log entries: +# Allowed: TCP_TUNNEL:HIER_DIRECT with status 200 +# Denied: TCP_DENIED:HIER_NONE with status 403 +``` + +**Usage Examples:** +```bash +# Check which domains were blocked +sudo grep "TCP_DENIED" /tmp/squid-logs-/access.log + +# View all traffic +sudo cat /tmp/squid-logs-/access.log +``` + +**Implementation Details:** +- Volume mount in `src/docker-manager.ts:135` +- Log directory creation in `src/docker-manager.ts:254-261` +- Entrypoint script fixes permissions: `containers/squid/entrypoint.sh` +- Preservation logic in `src/docker-manager.ts:552-562` (cleanup function) + +## Key Dependencies + +- `commander`: CLI argument parsing +- `chalk`: Colored terminal output +- `execa`: Subprocess execution (docker-compose commands) +- `js-yaml`: YAML generation for Docker Compose config +- TypeScript 5.x, compiled to ES2020 CommonJS + +## Testing Notes + +- Tests use Jest (`npm test`) +- Currently no test files exist (tsconfig excludes `**/*.test.ts`) +- Integration testing: Run commands with `--log-level debug` and `--keep-containers` to inspect generated configs and container logs + +## MCP Server Configuration for Copilot CLI + +### Overview + +GitHub Copilot CLI v0.0.347+ includes a **built-in GitHub MCP server** that connects to a read-only remote endpoint (`https://api.enterprise.githubcopilot.com/mcp/readonly`). This built-in server takes precedence over local MCP configurations by default, which prevents write operations like creating issues or pull requests. + +To use a local, writable GitHub MCP server with Copilot CLI, you must: +1. Configure the MCP server in the correct location with the correct format +2. Disable the built-in GitHub MCP server +3. Ensure proper environment variable passing + +### Correct MCP Configuration + +**Location:** The MCP configuration must be placed at: +- `~/.copilot/mcp-config.json` (primary location) + +The copilot container mounts the HOME directory, so this config file is automatically accessible to Copilot CLI running inside the container. + +**Format:** +```json +{ + "mcpServers": { + "github": { + "type": "local", + "command": "docker", + "args": [ + "run", + "-i", + "--rm", + "-e", + "GITHUB_PERSONAL_ACCESS_TOKEN", + "-e", + "GITHUB_TOOLSETS=default", + "ghcr.io/github/github-mcp-server:v0.19.0" + ], + "tools": ["*"], + "env": { + "GITHUB_PERSONAL_ACCESS_TOKEN": "${GITHUB_PERSONAL_ACCESS_TOKEN}" + } + } + } +} +``` + +**Key Requirements:** +- ✅ **`"tools": ["*"]`** - Required field. Use `["*"]` to enable all tools, or list specific tool names + - ⚠️ Empty array `[]` means NO tools will be available +- ✅ **`"type": "local"`** - Required to specify local MCP server type +- ✅ **`"env"` section** - Environment variables must be declared here with `${VAR}` syntax for interpolation +- ✅ **Environment variable in args** - Use bare variable names in `-e` flags (e.g., `"GITHUB_PERSONAL_ACCESS_TOKEN"` without `$`) +- ✅ **Shell environment** - Variables must be exported in the shell before running awf +- ✅ **MCP server name** - Use `"github"` as the server name (must match `--allow-tool` flag) + +### Running Copilot CLI with Local MCP Through Firewall + +**Required setup:** +```bash +# Export environment variables (both required) +export GITHUB_TOKEN="" # For Copilot CLI authentication +export GITHUB_PERSONAL_ACCESS_TOKEN="" # For GitHub MCP server + +# Run awf with sudo -E to preserve environment variables +sudo -E awf \ + --allow-domains raw.githubusercontent.com,api.github.com,github.com,registry.npmjs.org,api.enterprise.githubcopilot.com \ + "npx @github/copilot@0.0.347 \ + --disable-builtin-mcps \ + --allow-tool github \ + --prompt 'your prompt here'" +``` + +**Critical requirements:** +- `sudo -E` - **REQUIRED** to pass environment variables through sudo to the copilot container +- `--disable-builtin-mcps` - Disables the built-in read-only GitHub MCP server +- `--allow-tool github` - Grants permission to use all tools from the `github` MCP server (must match server name in config) +- MCP config at `~/.copilot/mcp-config.json` - Automatically accessible since copilot container mounts HOME directory + +**Why `sudo -E` is required:** +1. `awf` needs sudo for iptables manipulation +2. `-E` preserves GITHUB_TOKEN and GITHUB_PERSONAL_ACCESS_TOKEN +3. These variables are passed into the copilot container via the HOME directory mount +4. The GitHub MCP server Docker container inherits them from the copilot container's environment + +### Troubleshooting + +**Problem:** MCP server starts but says "GITHUB_PERSONAL_ACCESS_TOKEN not set" +- **Cause:** Environment variable not passed correctly through sudo or to Docker container +- **Solution:** Use `sudo -E` when running awf, and ensure the variable is exported before running the command + +**Problem:** MCP config validation error: "Invalid input" +- **Cause:** Missing `"tools"` field +- **Solution:** Add `"tools": ["*"]` to the MCP server config + +**Problem:** Copilot uses read-only remote MCP instead of local +- **Cause:** Built-in MCP not disabled +- **Solution:** Add `--disable-builtin-mcps` flag to the copilot command + +**Problem:** Tools not available even with local MCP +- **Cause:** Wrong server name in `--allow-tool` flag +- **Solution:** Use `--allow-tool github` (must match the server name in mcp-config.json) + +**Problem:** Permission denied when running awf +- **Cause:** iptables requires root privileges +- **Solution:** Use `sudo -E awf` (not just `sudo awf`) + +### Verifying Local MCP Usage + +Check Copilot CLI logs (use `--log-level debug`) for these indicators: + +**Local MCP working:** +``` +Starting MCP client for github with command: docker +GitHub MCP Server running on stdio +readOnly=false +MCP client for github connected +``` + +**Built-in remote MCP (not what you want):** +``` +Using Copilot API endpoint: https://api.enterprise.githubcopilot.com/mcp/readonly +Starting remote MCP client for github-mcp-server +``` + +### CI/CD Configuration + +For GitHub Actions workflows: +1. Create MCP config script that writes to `~/.copilot/mcp-config.json` (note: `~` = `/home/runner` in GitHub Actions) +2. Export both `GITHUB_TOKEN` (for Copilot CLI) and `GITHUB_PERSONAL_ACCESS_TOKEN` (for GitHub MCP server) as environment variables +3. Pull the MCP server Docker image before running tests: `docker pull ghcr.io/github/github-mcp-server:v0.19.0` +4. Run awf with `sudo -E` to preserve environment variables +5. Always use `--disable-builtin-mcps` and `--allow-tool github` flags when running Copilot CLI + +**Example workflow step:** +```yaml +- name: Test Copilot CLI with GitHub MCP through firewall + env: + GITHUB_TOKEN: ${{ secrets.COPILOT_CLI_TOKEN }} + GITHUB_PERSONAL_ACCESS_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + sudo -E awf \ + --allow-domains raw.githubusercontent.com,api.github.com,github.com,registry.npmjs.org,api.enterprise.githubcopilot.com \ + "npx @github/copilot@0.0.347 \ + --disable-builtin-mcps \ + --allow-tool github \ + --log-level debug \ + --prompt 'your prompt here'" +``` + +## Logging Implementation + +### Overview + +The firewall implements comprehensive logging at two levels: + +1. **Squid Proxy Logs (L7)** - All HTTP/HTTPS traffic (allowed and blocked) +2. **iptables Kernel Logs (L3/L4)** - Non-HTTP protocols and UDP traffic + +### Key Files + +- `src/squid-config.ts` - Generates Squid config with custom `firewall_detailed` logformat +- `containers/copilot/setup-iptables.sh` - Configures iptables LOG rules for rejected traffic +- `src/squid-config.test.ts` - Tests for logging configuration + +### Squid Log Format + +Custom format defined in `src/squid-config.ts:40`: +``` +logformat firewall_detailed %ts.%03tu %>a:%>p %{Host}>h %Hs %Ss:%Sh %ru "%{User-Agent}>h" +``` + +Captures: +- Timestamp with milliseconds +- Client IP:port +- Domain (Host header / SNI) +- Destination IP:port +- Protocol version +- HTTP method +- Status code (200=allowed, 403=blocked) +- Decision code (TCP_TUNNEL=allowed, TCP_DENIED=blocked) +- URL +- User agent + +### iptables Logging + +Two LOG rules in `setup-iptables.sh`: + +1. **Line 80** - `[FW_BLOCKED_UDP]` prefix for blocked UDP traffic +2. **Line 95** - `[FW_BLOCKED_OTHER]` prefix for other blocked traffic + +Both use `--log-uid` flag to capture process UID. + +### Testing Logging + +Run tests: +```bash +npm test -- squid-config.test.ts +``` + +Manual testing: +```bash +# Test blocked traffic +awf --allow-domains example.com --keep-containers 'curl https://github.com' + +# View logs +docker exec awf-squid cat /var/log/squid/access.log +``` + +### Important Notes + +- Squid logs use Unix timestamps (convert with `date -d @TIMESTAMP`) +- Decision codes: `TCP_DENIED:HIER_NONE` = blocked, `TCP_TUNNEL:HIER_DIRECT` = allowed +- SNI is captured via CONNECT method for HTTPS (no SSL inspection) +- iptables logs go to kernel buffer (view with `dmesg`) +- PID not directly available (UID can be used for correlation) diff --git a/CLAUDE.md b/CLAUDE.md index 4fe3795..80336b9 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -144,7 +144,7 @@ The codebase follows a modular architecture with clear separation of concerns: - Uses `commander` for argument parsing - Orchestrates the entire workflow: config generation → container startup → command execution → cleanup - Handles signal interrupts (SIGINT/SIGTERM) for graceful shutdown - - Main flow: `writeConfigs()` → `startContainers()` → `runCopilotCommand()` → `stopContainers()` → `cleanup()` + - Main flow: `writeConfigs()` → `startContainers()` → `runAgentCommand()` → `stopContainers()` → `cleanup()` 2. **Configuration Generation** (`src/squid-config.ts`, `src/docker-manager.ts`) - `generateSquidConfig()`: Creates Squid proxy configuration with domain ACL rules @@ -153,8 +153,8 @@ The codebase follows a modular architecture with clear separation of concerns: 3. **Docker Management** (`src/docker-manager.ts`) - Manages container lifecycle using `execa` to run docker-compose commands - - Fixed network topology: `172.30.0.0/24` subnet, Squid at `172.30.0.10`, Copilot at `172.30.0.20` - - Squid container uses healthcheck; Copilot waits for Squid to be healthy before starting + - Fixed network topology: `172.30.0.0/24` subnet, Squid at `172.30.0.10`, Agent at `172.30.0.20` + - Squid container uses healthcheck; Agent waits for Squid to be healthy before starting 4. **Type Definitions** (`src/types.ts`) - `WrapperConfig`: Main configuration interface @@ -175,13 +175,13 @@ The codebase follows a modular architecture with clear separation of concerns: - **Network:** Connected to `awf-net` at `172.30.0.10` - **Firewall Exemption:** Allowed unrestricted outbound access via iptables rule `-s 172.30.0.10 -j ACCEPT` -**Copilot Container** (`containers/copilot/`) +**Agent Container** (`containers/agent/`) - Based on `ubuntu:22.04` with iptables, curl, git, nodejs, npm, docker-cli - Mounts entire host filesystem at `/host` and user home directory for full access - Mounts Docker socket (`/var/run/docker.sock`) for docker-in-docker support - `NET_ADMIN` capability required for iptables manipulation - Two-stage entrypoint: - 1. `setup-iptables.sh`: Configures iptables NAT rules to redirect HTTP/HTTPS traffic to Squid (copilot container only) + 1. `setup-iptables.sh`: Configures iptables NAT rules to redirect HTTP/HTTPS traffic to Squid (agent container only) 2. `entrypoint.sh`: Tests connectivity, then executes user command - **Docker Wrapper** (`docker-wrapper.sh`): Intercepts `docker run` commands to inject network and proxy configuration - Symlinked at `/usr/bin/docker` (real docker at `/usr/bin/docker-real`) @@ -193,7 +193,7 @@ The codebase follows a modular architecture with clear separation of concerns: - Allow DNS queries - Allow traffic to Squid proxy itself - Redirect all HTTP (port 80) and HTTPS (port 443) to Squid via DNAT (NAT table) - - **Note:** These NAT rules only apply to the copilot container itself, not spawned containers + - **Note:** These NAT rules only apply to the agent container itself, not spawned containers ### Traffic Flow @@ -225,8 +225,8 @@ Containers stopped, temporary files cleaned up ## Exit Code Handling -The wrapper propagates the exit code from the copilot container: -1. Command runs in copilot container +The wrapper propagates the exit code from the agent container: +1. Command runs in agent container 2. Container exits with command's exit code 3. Wrapper inspects container: `docker inspect --format={{.State.ExitCode}}` 4. Wrapper exits with same code @@ -261,7 +261,7 @@ The system uses a defense-in-depth cleanup strategy across four stages to preven ### Cleanup Script (`scripts/ci/cleanup.sh`) Removes all awf resources: -- Containers by name (`awf-squid`, `awf-copilot`) +- Containers by name (`awf-squid`, `awf-agent`) - All docker-compose services from work directories - Unused containers (`docker container prune -f`) - Unused networks (`docker network prune -f`) - **critical for subnet pool management** @@ -274,7 +274,7 @@ Removes all awf resources: All temporary files are created in `workDir` (default: `/tmp/awf-`): - `squid.conf`: Generated Squid proxy configuration - `docker-compose.yml`: Generated Docker Compose configuration -- `copilot-logs/`: Directory for Copilot CLI logs (automatically preserved if logs are created) +- `agent-logs/`: Directory for Copilot CLI logs (automatically preserved if logs are created) - `squid-logs/`: Directory for Squid proxy logs (automatically preserved if logs are created) Use `--keep-containers` to preserve containers and files after execution for debugging. @@ -283,7 +283,7 @@ Use `--keep-containers` to preserve containers and files after execution for deb ### Real-Time Log Streaming -The wrapper streams container logs in real-time using `docker logs -f`, allowing you to see output as commands execute rather than waiting until completion. This is implemented in `src/docker-manager.ts:runCopilotCommand()` which runs `docker logs -f` concurrently with `docker wait`. +The wrapper streams container logs in real-time using `docker logs -f`, allowing you to see output as commands execute rather than waiting until completion. This is implemented in `src/docker-manager.ts:runAgentCommand()` which runs `docker logs -f` concurrently with `docker wait`. **Note:** The container is configured with `tty: false` (line 202 in `src/docker-manager.ts`) to prevent ANSI escape sequences from appearing in log output. This provides cleaner, more readable streaming logs. @@ -293,25 +293,25 @@ Copilot CLI logs are automatically preserved for debugging: **Directory Structure:** - Container writes logs to: `~/.copilot/logs/` (Copilot's default location) -- Volume mount maps to: `${workDir}/copilot-logs/` -- After cleanup: Logs moved to `/tmp/copilot-logs-` (if they exist) +- Volume mount maps to: `${workDir}/agent-logs/` +- After cleanup: Logs moved to `/tmp/agent-logs-` (if they exist) **Automatic Preservation:** -- If Copilot creates logs, they're automatically moved to `/tmp/copilot-logs-/` before workDir cleanup +- If Copilot creates logs, they're automatically moved to `/tmp/agent-logs-/` before workDir cleanup - Empty log directories are not preserved (avoids cluttering /tmp) -- You'll see: `[INFO] Copilot logs preserved at: /tmp/copilot-logs-` when logs exist +- You'll see: `[INFO] Copilot logs preserved at: /tmp/agent-logs-` when logs exist **With `--keep-containers`:** -- Logs remain at: `${workDir}/copilot-logs/` +- Logs remain at: `${workDir}/agent-logs/` - All config files and containers are preserved -- You'll see: `[INFO] Copilot logs available at: /tmp/awf-/copilot-logs/` +- You'll see: `[INFO] Copilot logs available at: /tmp/awf-/agent-logs/` **Usage Examples:** ```bash # Logs automatically preserved (if created) awf --allow-domains github.com \ "npx @github/copilot@0.0.347 -p 'your prompt' --log-level debug --allow-all-tools" -# Output: [INFO] Copilot logs preserved at: /tmp/copilot-logs-1761073250147 +# Output: [INFO] Copilot logs preserved at: /tmp/agent-logs-1761073250147 # Increase log verbosity for debugging awf --allow-domains github.com \ @@ -405,7 +405,7 @@ To use a local, writable GitHub MCP server with Copilot CLI, you must: **Location:** The MCP configuration must be placed at: - `~/.copilot/mcp-config.json` (primary location) -The copilot container mounts the HOME directory, so this config file is automatically accessible to Copilot CLI running inside the container. +The agent container mounts the HOME directory, so this config file is automatically accessible to Copilot CLI running inside the container. **Format:** ```json @@ -460,16 +460,16 @@ sudo -E awf \ ``` **Critical requirements:** -- `sudo -E` - **REQUIRED** to pass environment variables through sudo to the copilot container +- `sudo -E` - **REQUIRED** to pass environment variables through sudo to the agent container - `--disable-builtin-mcps` - Disables the built-in read-only GitHub MCP server - `--allow-tool github` - Grants permission to use all tools from the `github` MCP server (must match server name in config) -- MCP config at `~/.copilot/mcp-config.json` - Automatically accessible since copilot container mounts HOME directory +- MCP config at `~/.copilot/mcp-config.json` - Automatically accessible since agent container mounts HOME directory **Why `sudo -E` is required:** 1. `awf` needs sudo for iptables manipulation 2. `-E` preserves GITHUB_TOKEN and GITHUB_PERSONAL_ACCESS_TOKEN -3. These variables are passed into the copilot container via the HOME directory mount -4. The GitHub MCP server Docker container inherits them from the copilot container's environment +3. These variables are passed into the agent container via the HOME directory mount +4. The GitHub MCP server Docker container inherits them from the agent container's environment ### Troubleshooting @@ -548,7 +548,7 @@ The firewall implements comprehensive logging at two levels: ### Key Files - `src/squid-config.ts` - Generates Squid config with custom `firewall_detailed` logformat -- `containers/copilot/setup-iptables.sh` - Configures iptables LOG rules for rejected traffic +- `containers/agent/setup-iptables.sh` - Configures iptables LOG rules for rejected traffic - `src/squid-config.test.ts` - Tests for logging configuration ### Squid Log Format diff --git a/docs/architecture.md b/docs/architecture.md index e7f0102..850523a 100644 --- a/docs/architecture.md +++ b/docs/architecture.md @@ -27,7 +27,7 @@ The firewall uses a containerized architecture with Squid proxy for L7 (HTTP/HTT │ │ └────────────────────────────┘ │ │ │ │ ▲ │ │ │ │ ┌────────┼───────────────────┐ │ │ -│ │ │ Copilot Container │ │ │ +│ │ │ Agent Container │ │ │ │ │ │ - Full filesystem access │ │ │ │ │ │ - iptables redirect │ │ │ │ │ │ - Spawns MCP servers │ │ │ @@ -43,7 +43,7 @@ The firewall uses a containerized architecture with Squid proxy for L7 (HTTP/HTT - Uses `commander` for argument parsing - Orchestrates the entire workflow: config generation → container startup → command execution → cleanup - Handles signal interrupts (SIGINT/SIGTERM) for graceful shutdown -- Main flow: `writeConfigs()` → `startContainers()` → `runCopilotCommand()` → `stopContainers()` → `cleanup()` +- Main flow: `writeConfigs()` → `startContainers()` → `runAgentCommand()` → `stopContainers()` → `cleanup()` ### 2. Configuration Generation - **`src/squid-config.ts`**: Generates Squid proxy configuration with domain ACL rules @@ -52,8 +52,8 @@ The firewall uses a containerized architecture with Squid proxy for L7 (HTTP/HTT ### 3. Docker Management (`src/docker-manager.ts`) - Manages container lifecycle using `execa` to run docker-compose commands -- Fixed network topology: `172.30.0.0/24` subnet, Squid at `172.30.0.10`, Copilot at `172.30.0.20` -- Squid container uses healthcheck; Copilot waits for Squid to be healthy before starting +- Fixed network topology: `172.30.0.0/24` subnet, Squid at `172.30.0.10`, Agent at `172.30.0.20` +- Squid container uses healthcheck; Agent waits for Squid to be healthy before starting ### 4. Type Definitions (`src/types.ts`) - `WrapperConfig`: Main configuration interface @@ -74,13 +74,13 @@ The firewall uses a containerized architecture with Squid proxy for L7 (HTTP/HTT - **Network:** Connected to `awf-net` at `172.30.0.10` - **Firewall Exemption:** Allowed unrestricted outbound access via iptables rule `-s 172.30.0.10 -j ACCEPT` -### Copilot Container (`containers/copilot/`) +### Agent Container (`containers/agent/`) - Based on `ubuntu:22.04` with iptables, curl, git, nodejs, npm, docker-cli - Mounts entire host filesystem at `/host` and user home directory for full access - Mounts Docker socket (`/var/run/docker.sock`) for docker-in-docker support - `NET_ADMIN` capability required for iptables manipulation - Two-stage entrypoint: - 1. `setup-iptables.sh`: Configures iptables NAT rules to redirect HTTP/HTTPS traffic to Squid (copilot container only) + 1. `setup-iptables.sh`: Configures iptables NAT rules to redirect HTTP/HTTPS traffic to Squid (agent container only) 2. `entrypoint.sh`: Tests connectivity, then executes user command - **Docker Wrapper** (`docker-wrapper.sh`): Intercepts `docker run` commands to inject network and proxy configuration - Symlinked at `/usr/bin/docker` (real docker at `/usr/bin/docker-real`) @@ -92,7 +92,7 @@ The firewall uses a containerized architecture with Squid proxy for L7 (HTTP/HTT - Allow DNS queries - Allow traffic to Squid proxy itself - Redirect all HTTP (port 80) and HTTPS (port 443) to Squid via DNAT (NAT table) - - **Note:** These NAT rules only apply to the copilot container itself, not spawned containers + - **Note:** These NAT rules only apply to the agent container itself, not spawned containers ## Traffic Flow @@ -125,7 +125,7 @@ The wrapper generates: ### 2. Container Startup 1. **Squid proxy starts first** with healthcheck 2. **Copilot container waits** for Squid to be healthy -3. **iptables rules applied** in copilot container to redirect all HTTP/HTTPS traffic +3. **iptables rules applied** in agent container to redirect all HTTP/HTTPS traffic ### 3. Traffic Routing - All HTTP (port 80) and HTTPS (port 443) traffic → Squid proxy @@ -146,7 +146,7 @@ The wrapper generates: ### 6. Cleanup - Containers stopped and removed - Logs moved to persistent locations: - - Copilot logs → `/tmp/copilot-logs-/` (if they exist) + - Copilot logs → `/tmp/agent-logs-/` (if they exist) - Squid logs → `/tmp/squid-logs-/` (if they exist) - Temporary files deleted (unless `--keep-containers` specified) - Exit code propagated from copilot command @@ -181,7 +181,7 @@ The system uses a defense-in-depth cleanup strategy across four stages to preven ### Cleanup Script (`scripts/ci/cleanup.sh`) Removes all awf resources: -- Containers by name (`awf-squid`, `awf-copilot`) +- Containers by name (`awf-squid`, `awf-agent`) - All docker-compose services from work directories - Unused containers (`docker container prune -f`) - Unused networks (`docker network prune -f`) - **critical for subnet pool management** @@ -199,8 +199,8 @@ Removes all awf resources: ## Exit Code Handling -The wrapper propagates the exit code from the copilot container: -1. Command runs in copilot container +The wrapper propagates the exit code from the agent container: +1. Command runs in agent container 2. Container exits with command's exit code 3. Wrapper inspects container: `docker inspect --format={{.State.ExitCode}}` 4. Wrapper exits with same code @@ -210,7 +210,7 @@ The wrapper propagates the exit code from the copilot container: All temporary files are created in `workDir` (default: `/tmp/awf-`): - `squid.conf`: Generated Squid proxy configuration - `docker-compose.yml`: Generated Docker Compose configuration -- `copilot-logs/`: Directory for Copilot CLI logs (automatically preserved if logs are created) +- `agent-logs/`: Directory for Copilot CLI logs (automatically preserved if logs are created) - `squid-logs/`: Directory for Squid proxy logs (automatically preserved if logs are created) Use `--keep-containers` to preserve containers and files after execution for debugging. diff --git a/docs/github_actions.md b/docs/github_actions.md index 8f7b81b..bf77d9f 100644 --- a/docs/github_actions.md +++ b/docs/github_actions.md @@ -103,7 +103,7 @@ To use a local, writable GitHub MCP server with Copilot CLI, you must: **Location:** The MCP configuration must be placed at: - `~/.copilot/mcp-config.json` (primary location) -The copilot container mounts the HOME directory, so this config file is automatically accessible to Copilot CLI running inside the container. +The agent container mounts the HOME directory, so this config file is automatically accessible to Copilot CLI running inside the container. **Format:** ```json @@ -158,16 +158,16 @@ sudo -E awf \ ``` **Critical requirements:** -- `sudo -E` - **REQUIRED** to pass environment variables through sudo to the copilot container +- `sudo -E` - **REQUIRED** to pass environment variables through sudo to the agent container - `--disable-builtin-mcps` - Disables the built-in read-only GitHub MCP server - `--allow-tool github` - Grants permission to use all tools from the `github` MCP server (must match server name in config) -- MCP config at `~/.copilot/mcp-config.json` - Automatically accessible since copilot container mounts HOME directory +- MCP config at `~/.copilot/mcp-config.json` - Automatically accessible since agent container mounts HOME directory **Why `sudo -E` is required:** 1. `awf` needs sudo for iptables manipulation 2. `-E` preserves GITHUB_TOKEN and GITHUB_PERSONAL_ACCESS_TOKEN -3. These variables are passed into the copilot container via the HOME directory mount -4. The GitHub MCP server Docker container inherits them from the copilot container's environment +3. These variables are passed into the agent container via the HOME directory mount +4. The GitHub MCP server Docker container inherits them from the agent container's environment ### CI/CD Configuration diff --git a/docs/logging_quickref.md b/docs/logging_quickref.md index 993c298..177750c 100644 --- a/docs/logging_quickref.md +++ b/docs/logging_quickref.md @@ -22,8 +22,8 @@ docker exec awf-squid grep "TCP_TUNNEL\|TCP_MISS" /var/log/squid/access.log # From host (requires sudo) sudo dmesg | grep FW_BLOCKED -# From copilot container -docker exec awf-copilot dmesg | grep FW_BLOCKED +# From agent container +docker exec awf-agent dmesg | grep FW_BLOCKED # Using journalctl (systemd) sudo journalctl -k | grep FW_BLOCKED diff --git a/docs/quickstart.md b/docs/quickstart.md index 68a87ad..3693e76 100644 --- a/docs/quickstart.md +++ b/docs/quickstart.md @@ -120,11 +120,11 @@ awf \ # Then inspect logs docker logs awf-squid -docker logs awf-copilot +docker logs awf-agent # Clean up manually when done -docker stop awf-squid awf-copilot -docker rm awf-squid awf-copilot +docker stop awf-squid awf-agent +docker rm awf-squid awf-agent ``` ### Multiple Domains diff --git a/docs/squid_log_filtering.md b/docs/squid_log_filtering.md index 4ab45aa..671b946 100644 --- a/docs/squid_log_filtering.md +++ b/docs/squid_log_filtering.md @@ -135,4 +135,4 @@ echo "Blocked: $(sudo grep -c "TCP_DENIED" /tmp/squid-logs-*/access.log)" - Use `$(ls -t /tmp/squid-logs-*/access.log | head -1)` to automatically target the latest log - Timestamps are Unix epoch seconds (use `date -d @` to convert) - Port `:443` indicates HTTPS traffic (most common) -- Client IPs: `172.30.0.20` = copilot container, `172.30.0.2` = spawned containers +- Client IPs: `172.30.0.20` = agent container, `172.30.0.2` = spawned containers diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index 2304ecb..8dfd0f8 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -126,7 +126,7 @@ ``` 4. Review Copilot logs for MCP connection errors: ```bash - cat /tmp/copilot-logs-/*.log + cat /tmp/agent-logs-/*.log ``` ## Log Analysis @@ -145,14 +145,14 @@ sudo grep "TCP_DENIED" /tmp/squid-logs-/access.log | awk '{print $3}' **While containers are running** (with `--keep-containers`): ```bash -docker logs awf-copilot +docker logs awf-agent docker logs awf-squid ``` **After command completes:** ```bash # Copilot logs -cat /tmp/copilot-logs-/*.log +cat /tmp/agent-logs-/*.log # Squid logs (requires sudo) sudo cat /tmp/squid-logs-/access.log @@ -167,7 +167,7 @@ Blocked UDP and non-standard protocols are logged to kernel logs: sudo dmesg | grep FW_BLOCKED # From within container -docker exec awf-copilot dmesg | grep FW_BLOCKED +docker exec awf-agent dmesg | grep FW_BLOCKED ``` ## Network Issues @@ -200,7 +200,7 @@ docker exec awf-copilot dmesg | grep FW_BLOCKED ``` 2. Verify iptables rules are applied: ```bash - docker exec awf-copilot iptables -t nat -L -n -v + docker exec awf-agent iptables -t nat -L -n -v ``` 3. Increase timeout in your command: ```bash @@ -224,7 +224,7 @@ docker exec awf-copilot dmesg | grep FW_BLOCKED ``` 3. Verify network connectivity: ```bash - docker exec awf-copilot ping -c 3 172.30.0.10 + docker exec awf-agent ping -c 3 172.30.0.10 ``` ## Docker-in-Docker Issues @@ -236,7 +236,7 @@ docker exec awf-copilot dmesg | grep FW_BLOCKED **Solution:** - Verify docker-wrapper.sh is working: ```bash - docker exec awf-copilot cat /tmp/docker-wrapper.log + docker exec awf-agent cat /tmp/docker-wrapper.log ``` - Check that spawned containers have correct network: ```bash @@ -276,7 +276,7 @@ docker exec awf-copilot dmesg | grep FW_BLOCKED **Solution:** 1. Manually clean up containers: ```bash - docker rm -f awf-copilot awf-squid + docker rm -f awf-agent awf-squid ``` 2. Clean up networks: ```bash @@ -294,7 +294,7 @@ docker exec awf-copilot dmesg | grep FW_BLOCKED **Solution:** 1. Manually remove old logs: ```bash - rm -rf /tmp/copilot-logs-* + rm -rf /tmp/agent-logs-* rm -rf /tmp/squid-logs-* rm -rf /tmp/awf-* ``` @@ -364,10 +364,10 @@ If you're still experiencing issues: ``` 3. **Review all logs:** - - Copilot logs: `/tmp/copilot-logs-/` + - Copilot logs: `/tmp/agent-logs-/` - Squid logs: `/tmp/squid-logs-/` - - Docker wrapper logs: `docker exec awf-copilot cat /tmp/docker-wrapper.log` - - Container logs: `docker logs awf-copilot` + - Docker wrapper logs: `docker exec awf-agent cat /tmp/docker-wrapper.log` + - Container logs: `docker logs awf-agent` 4. **Check documentation:** - [Architecture](architecture.md) - Understand how the system works diff --git a/docs/usage.md b/docs/usage.md index 08512cf..d03e89c 100644 --- a/docs/usage.md +++ b/docs/usage.md @@ -260,14 +260,14 @@ sudo awf \ "npx @github/copilot@0.0.347 -p 'your prompt' --log-level debug --allow-all-tools" # Output: -# [INFO] Copilot logs preserved at: /tmp/copilot-logs- +# [INFO] Copilot logs preserved at: /tmp/agent-logs- # [INFO] Squid logs preserved at: /tmp/squid-logs- ``` **Copilot Logs:** - Contains Copilot CLI debug output and session information -- Location: `/tmp/copilot-logs-/` -- View with: `cat /tmp/copilot-logs-/*.log` +- Location: `/tmp/agent-logs-/` +- View with: `cat /tmp/agent-logs-/*.log` **Squid Logs:** - Contains all HTTP/HTTPS traffic (allowed and denied) @@ -284,7 +284,7 @@ sudo cat /tmp/squid-logs-/access.log **How it works:** - Copilot writes to `~/.copilot/logs/`, Squid writes to `/var/log/squid/` -- Volume mounts map these to `${workDir}/copilot-logs/` and `${workDir}/squid-logs/` +- Volume mounts map these to `${workDir}/agent-logs/` and `${workDir}/squid-logs/` - Before cleanup, logs are automatically moved to `/tmp/*-logs-/` (if they exist) - Empty log directories are not preserved (avoids cluttering /tmp) @@ -297,11 +297,11 @@ sudo awf \ 'your-command' # View real-time container logs: -docker logs awf-copilot +docker logs awf-agent docker logs awf-squid # Access preserved logs at: -# /tmp/awf-/copilot-logs/ +# /tmp/awf-/agent-logs/ # /tmp/awf-/squid-logs/ ``` From feb3d914ca2acd0453552caa151e41629a9f38e1 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Thu, 30 Oct 2025 19:48:32 +0000 Subject: [PATCH 5/5] Complete documentation updates and remove backup file Co-authored-by: Mossaka <5447827+Mossaka@users.noreply.github.com> --- AGENTS.md.bak | 602 ---------------------------------------------- docs/releasing.md | 8 +- 2 files changed, 4 insertions(+), 606 deletions(-) delete mode 100644 AGENTS.md.bak diff --git a/AGENTS.md.bak b/AGENTS.md.bak deleted file mode 100644 index 05edf2a..0000000 --- a/AGENTS.md.bak +++ /dev/null @@ -1,602 +0,0 @@ -# AGENTS.md - -This file provides guidance to coding agent when working with code in this repository. - -## Project Overview - -This is a firewall for GitHub Copilot CLI (package name: `@github/awf`) that provides L7 (HTTP/HTTPS) egress control using Squid proxy and Docker containers. The tool restricts network access to a whitelist of approved domains while maintaining full filesystem access for the Copilot CLI and its MCP servers. - -### Documentation Files - -- **[README.md](README.md)** - Main project documentation and usage guide -- **[LOGGING.md](LOGGING.md)** - Comprehensive logging documentation -- **[docs/logging_quickref.md](docs/logging_quickref.md)** - Quick reference for log queries and monitoring - -## Development Workflow - -### GitHub Actions Best Practices - -**IMPORTANT:** When writing or modifying GitHub Actions workflows: - -1. **Use TypeScript for workflow scripts, not bash** - All scripts that run in GitHub Actions workflows should be written in TypeScript and executed with `npx tsx`. This ensures: - - Type safety and better IDE support - - Consistency with the rest of the codebase - - Easier testing and maintenance - - Better error handling - -2. **Inline script execution** - Run TypeScript scripts directly in workflow steps using `npx tsx path/to/script.ts`, rather than creating bash wrapper scripts. Example: - ```yaml - - name: Generate test summary - run: | - npx tsx scripts/ci/generate-test-summary.ts "test-file.ts" "Test Name" test-output.log - ``` - -3. **Place scripts in `scripts/ci/`** - All CI/CD-related scripts should be in the `scripts/ci/` directory and written as TypeScript modules with proper type definitions. - -**Example:** -- ❌ Bad: `scripts/ci/generate-summary.sh` (bash script) -- ✅ Good: `scripts/ci/generate-test-summary.ts` (TypeScript script called with `npx tsx`) - -### Debugging GitHub Actions Failures - -**IMPORTANT:** When GitHub Actions workflows fail, always follow this debugging workflow: - -1. **Reproduce locally first** - Run the same commands/scripts that failed in CI on your local machine -2. **Understand the root cause** - Investigate logs, error messages, and system state to identify why it failed -3. **Test the fix locally** - Verify your solution works in your local environment -4. **Then update the action** - Only modify the GitHub Actions workflow after confirming the fix locally - -This approach prevents trial-and-error debugging in CI (which wastes runner time and makes debugging slower) and ensures fixes address the actual root cause rather than symptoms. - -**Downloading CI Logs for Local Analysis:** - -Use `scripts/download-latest-artifact.sh` to download logs from GitHub Actions runs: - -```bash -# Download logs from the latest integration test workflow run (default) -./scripts/download-latest-artifact.sh - -# Download logs from a specific run ID -./scripts/download-latest-artifact.sh 1234567890 - -# Download from test-integration workflow (latest run) -./scripts/download-latest-artifact.sh "" ".github/workflows/test-integration.yml" "integration-test-logs" -``` - -**Parameters:** -- `RUN_ID` (optional): Specific workflow run ID, or empty string for latest run -- `WORKFLOW_FILE` (optional): Path to workflow file (default: `.github/workflows/test-integration.yml`) -- `ARTIFACT_NAME` (optional): Artifact name (default: `integration-test-logs`) - -**Artifact name:** -- `integration-test-logs` - test-integration.yml - -This downloads artifacts to `./artifacts-run-$RUN_ID` for local examination. Requires GitHub CLI (`gh`) authenticated with the repository. - -**Example:** The "Pool overlaps" Docker network error was reproduced locally, traced to orphaned networks from `timeout`-killed processes, fixed by adding pre-test cleanup in scripts, then verified before updating workflows. - -## Development Commands - -### Build and Testing -```bash -# Build TypeScript to dist/ -npm run build - -# Watch mode (rebuilds on changes) -npm run dev - -# Run tests -npm test - -# Run tests in watch mode -npm test:watch - -# Lint TypeScript files -npm run lint - -# Clean build artifacts -npm run clean -``` - -### Local Installation - -**For regular use:** -```bash -# Link locally for testing -npm link - -# Use the CLI -awf --allow-domains github.com 'curl https://api.github.com' -``` - -**For sudo usage (required for iptables manipulation):** - -Since `npm link` creates symlinks in the user's npm directory which isn't in root's PATH, you need to create a wrapper script in `/usr/local/bin/`: - -```bash -# Build the project -npm run build - -# Create sudo wrapper script -sudo tee /usr/local/bin/awf > /dev/null <<'EOF' -#!/bin/bash -exec ~/.nvm/versions/node/v22.13.0/bin/node \ - ~/developer/gh-aw-firewall/dist/cli.js "$@" -EOF - -sudo chmod +x /usr/local/bin/awf - -# Verify it works -sudo awf --help -``` - -**Note:** After each `npm run build`, the wrapper automatically uses the latest compiled code. Update the paths in the wrapper script to match your node installation and project directory. - -## Architecture - -The codebase follows a modular architecture with clear separation of concerns: - -### Core Components - -1. **CLI Entry Point** (`src/cli.ts`) - - Uses `commander` for argument parsing - - Orchestrates the entire workflow: config generation → container startup → command execution → cleanup - - Handles signal interrupts (SIGINT/SIGTERM) for graceful shutdown - - Main flow: `writeConfigs()` → `startContainers()` → `runCopilotCommand()` → `stopContainers()` → `cleanup()` - -2. **Configuration Generation** (`src/squid-config.ts`, `src/docker-manager.ts`) - - `generateSquidConfig()`: Creates Squid proxy configuration with domain ACL rules - - `generateDockerCompose()`: Creates Docker Compose YAML with two services (squid-proxy, copilot) - - All configs are written to a temporary work directory (default: `/tmp/awf-`) - -3. **Docker Management** (`src/docker-manager.ts`) - - Manages container lifecycle using `execa` to run docker-compose commands - - Fixed network topology: `172.30.0.0/24` subnet, Squid at `172.30.0.10`, Copilot at `172.30.0.20` - - Squid container uses healthcheck; Copilot waits for Squid to be healthy before starting - -4. **Type Definitions** (`src/types.ts`) - - `WrapperConfig`: Main configuration interface - - `SquidConfig`, `DockerComposeConfig`: Typed configuration objects - -5. **Logging** (`src/logger.ts`) - - Singleton logger with configurable log levels (debug, info, warn, error) - - Uses `chalk` for colored output - - All logs go to stderr (console.error) to avoid interfering with command stdout - -### Container Architecture - -**Squid Container** (`containers/squid/`) -- Based on `ubuntu/squid:latest` -- Mounts dynamically-generated `squid.conf` from work directory -- Exposes port 3128 for proxy traffic -- Logs to shared volume `squid-logs:/var/log/squid` -- **Network:** Connected to `awf-net` at `172.30.0.10` -- **Firewall Exemption:** Allowed unrestricted outbound access via iptables rule `-s 172.30.0.10 -j ACCEPT` - -**Copilot Container** (`containers/copilot/`) -- Based on `ubuntu:22.04` with iptables, curl, git, nodejs, npm, docker-cli -- Mounts entire host filesystem at `/host` and user home directory for full access -- Mounts Docker socket (`/var/run/docker.sock`) for docker-in-docker support -- `NET_ADMIN` capability required for iptables manipulation -- Two-stage entrypoint: - 1. `setup-iptables.sh`: Configures iptables NAT rules to redirect HTTP/HTTPS traffic to Squid (copilot container only) - 2. `entrypoint.sh`: Tests connectivity, then executes user command -- **Docker Wrapper** (`docker-wrapper.sh`): Intercepts `docker run` commands to inject network and proxy configuration - - Symlinked at `/usr/bin/docker` (real docker at `/usr/bin/docker-real`) - - Automatically injects `--network awf-net` to all spawned containers - - Injects proxy environment variables: `HTTP_PROXY`, `HTTPS_PROXY`, `http_proxy`, `https_proxy` - - Logs all intercepted commands to `/tmp/docker-wrapper.log` for debugging -- Key iptables rules (in `setup-iptables.sh`): - - Allow localhost traffic (for stdio MCP servers) - - Allow DNS queries - - Allow traffic to Squid proxy itself - - Redirect all HTTP (port 80) and HTTPS (port 443) to Squid via DNAT (NAT table) - - **Note:** These NAT rules only apply to the copilot container itself, not spawned containers - -### Traffic Flow - -``` -User Command - ↓ -CLI generates configs (squid.conf, docker-compose.yml) - ↓ -Docker Compose starts Squid container (with healthcheck) - ↓ -Docker Compose starts Copilot container (waits for Squid healthy) - ↓ -iptables rules applied in Copilot container - ↓ -User command executes in Copilot container - ↓ -All HTTP/HTTPS traffic → iptables DNAT → Squid proxy → domain ACL filtering - ↓ -Containers stopped, temporary files cleaned up -``` - -## Domain Whitelisting - -- Domains in `--allow-domains` are normalized (protocol/trailing slash removed) -- Both exact matches and subdomain matches are added to Squid ACL: - - `github.com` → matches `github.com` and `.github.com` (subdomains) - - `.github.com` → matches all subdomains -- Squid denies any domain not in the allowlist - -## Exit Code Handling - -The wrapper propagates the exit code from the copilot container: -1. Command runs in copilot container -2. Container exits with command's exit code -3. Wrapper inspects container: `docker inspect --format={{.State.ExitCode}}` -4. Wrapper exits with same code - -## Cleanup Lifecycle - -The system uses a defense-in-depth cleanup strategy across four stages to prevent Docker resource leaks: - -### 1. Pre-Test Cleanup (CI/CD Scripts) -**Location:** `scripts/ci/test-copilot-*.sh` (start of each script) -**What:** Runs `cleanup.sh` to remove orphaned resources from previous failed runs -**Why:** Prevents Docker network subnet pool exhaustion and container name conflicts -**Critical:** Without this, `timeout` commands that kill the wrapper mid-cleanup leave networks/containers behind - -### 2. Normal Exit Cleanup (Built-in) -**Location:** `src/cli.ts:117-118` (`performCleanup()`) -**What:** -- `stopContainers()` → `docker compose down -v` (stops containers, removes volumes) -- `cleanup()` → Deletes workDir (`/tmp/awf-`) -**Trigger:** Successful command completion - -### 3. Signal/Error Cleanup (Built-in) -**Location:** `src/cli.ts:95-103, 122-126` (SIGINT/SIGTERM handlers, catch blocks) -**What:** Same as normal exit cleanup -**Trigger:** User interruption (Ctrl+C), timeout signals, or errors -**Limitation:** Cannot catch SIGKILL (9) from `timeout` after grace period - -### 4. CI/CD Always Cleanup -**Location:** `.github/workflows/test-copilot-*.yml` (`if: always()`) -**What:** Runs `cleanup.sh` regardless of job status -**Why:** Safety net for SIGKILL, job cancellation, and unexpected failures - -### Cleanup Script (`scripts/ci/cleanup.sh`) -Removes all awf resources: -- Containers by name (`awf-squid`, `awf-copilot`) -- All docker-compose services from work directories -- Unused containers (`docker container prune -f`) -- Unused networks (`docker network prune -f`) - **critical for subnet pool management** -- Temporary directories (`/tmp/awf-*`) - -**Note:** Test scripts use `timeout 60s` which can kill the wrapper before Stage 2/3 cleanup completes. Stage 1 (pre-test) and Stage 4 (always) prevent accumulation across test runs. - -## Configuration Files - -All temporary files are created in `workDir` (default: `/tmp/awf-`): -- `squid.conf`: Generated Squid proxy configuration -- `docker-compose.yml`: Generated Docker Compose configuration -- `copilot-logs/`: Directory for Copilot CLI logs (automatically preserved if logs are created) -- `squid-logs/`: Directory for Squid proxy logs (automatically preserved if logs are created) - -Use `--keep-containers` to preserve containers and files after execution for debugging. - -## Log Streaming and Persistence - -### Real-Time Log Streaming - -The wrapper streams container logs in real-time using `docker logs -f`, allowing you to see output as commands execute rather than waiting until completion. This is implemented in `src/docker-manager.ts:runCopilotCommand()` which runs `docker logs -f` concurrently with `docker wait`. - -**Note:** The container is configured with `tty: false` (line 202 in `src/docker-manager.ts`) to prevent ANSI escape sequences from appearing in log output. This provides cleaner, more readable streaming logs. - -### Copilot Logs Preservation - -Copilot CLI logs are automatically preserved for debugging: - -**Directory Structure:** -- Container writes logs to: `~/.copilot/logs/` (Copilot's default location) -- Volume mount maps to: `${workDir}/copilot-logs/` -- After cleanup: Logs moved to `/tmp/copilot-logs-` (if they exist) - -**Automatic Preservation:** -- If Copilot creates logs, they're automatically moved to `/tmp/copilot-logs-/` before workDir cleanup -- Empty log directories are not preserved (avoids cluttering /tmp) -- You'll see: `[INFO] Copilot logs preserved at: /tmp/copilot-logs-` when logs exist - -**With `--keep-containers`:** -- Logs remain at: `${workDir}/copilot-logs/` -- All config files and containers are preserved -- You'll see: `[INFO] Copilot logs available at: /tmp/awf-/copilot-logs/` - -**Usage Examples:** -```bash -# Logs automatically preserved (if created) -awf --allow-domains github.com \ - "npx @github/copilot@0.0.347 -p 'your prompt' --log-level debug --allow-all-tools" -# Output: [INFO] Copilot logs preserved at: /tmp/copilot-logs-1761073250147 - -# Increase log verbosity for debugging -awf --allow-domains github.com \ - "npx @github/copilot@0.0.347 -p 'your prompt' --log-level all --allow-all-tools" - -# Keep everything for detailed inspection -awf --allow-domains github.com --keep-containers \ - "npx @github/copilot@0.0.347 -p 'your prompt' --log-level debug" -``` - -**Implementation Details:** -- Volume mount added in `src/docker-manager.ts:172` -- Log directory creation in `src/docker-manager.ts:247-252` -- Preservation logic in `src/docker-manager.ts:540-550` (cleanup function) - -### Squid Logs Preservation - -Squid proxy logs are automatically preserved for debugging network traffic: - -**Directory Structure:** -- Container writes logs to: `/var/log/squid/` (Squid's default location) -- Volume mount maps to: `${workDir}/squid-logs/` -- After cleanup: Logs moved to `/tmp/squid-logs-` (if they exist) - -**Automatic Preservation:** -- If Squid creates logs, they're automatically moved to `/tmp/squid-logs-/` before workDir cleanup -- Empty log directories are not preserved (avoids cluttering /tmp) -- You'll see: `[INFO] Squid logs preserved at: /tmp/squid-logs-` when logs exist - -**With `--keep-containers`:** -- Logs remain at: `${workDir}/squid-logs/` -- All config files and containers are preserved -- You'll see: `[INFO] Squid logs available at: /tmp/awf-/squid-logs/` - -**Log Files:** -- `access.log`: All HTTP/HTTPS traffic with custom format showing domains, IPs, and allow/deny decisions -- `cache.log`: Squid internal diagnostic messages - -**Viewing Logs:** -```bash -# Logs are owned by the 'proxy' user (from container), requires sudo on host -sudo cat /tmp/squid-logs-/access.log - -# Example log entries: -# Allowed: TCP_TUNNEL:HIER_DIRECT with status 200 -# Denied: TCP_DENIED:HIER_NONE with status 403 -``` - -**Usage Examples:** -```bash -# Check which domains were blocked -sudo grep "TCP_DENIED" /tmp/squid-logs-/access.log - -# View all traffic -sudo cat /tmp/squid-logs-/access.log -``` - -**Implementation Details:** -- Volume mount in `src/docker-manager.ts:135` -- Log directory creation in `src/docker-manager.ts:254-261` -- Entrypoint script fixes permissions: `containers/squid/entrypoint.sh` -- Preservation logic in `src/docker-manager.ts:552-562` (cleanup function) - -## Key Dependencies - -- `commander`: CLI argument parsing -- `chalk`: Colored terminal output -- `execa`: Subprocess execution (docker-compose commands) -- `js-yaml`: YAML generation for Docker Compose config -- TypeScript 5.x, compiled to ES2020 CommonJS - -## Testing Notes - -- Tests use Jest (`npm test`) -- Currently no test files exist (tsconfig excludes `**/*.test.ts`) -- Integration testing: Run commands with `--log-level debug` and `--keep-containers` to inspect generated configs and container logs - -## MCP Server Configuration for Copilot CLI - -### Overview - -GitHub Copilot CLI v0.0.347+ includes a **built-in GitHub MCP server** that connects to a read-only remote endpoint (`https://api.enterprise.githubcopilot.com/mcp/readonly`). This built-in server takes precedence over local MCP configurations by default, which prevents write operations like creating issues or pull requests. - -To use a local, writable GitHub MCP server with Copilot CLI, you must: -1. Configure the MCP server in the correct location with the correct format -2. Disable the built-in GitHub MCP server -3. Ensure proper environment variable passing - -### Correct MCP Configuration - -**Location:** The MCP configuration must be placed at: -- `~/.copilot/mcp-config.json` (primary location) - -The copilot container mounts the HOME directory, so this config file is automatically accessible to Copilot CLI running inside the container. - -**Format:** -```json -{ - "mcpServers": { - "github": { - "type": "local", - "command": "docker", - "args": [ - "run", - "-i", - "--rm", - "-e", - "GITHUB_PERSONAL_ACCESS_TOKEN", - "-e", - "GITHUB_TOOLSETS=default", - "ghcr.io/github/github-mcp-server:v0.19.0" - ], - "tools": ["*"], - "env": { - "GITHUB_PERSONAL_ACCESS_TOKEN": "${GITHUB_PERSONAL_ACCESS_TOKEN}" - } - } - } -} -``` - -**Key Requirements:** -- ✅ **`"tools": ["*"]`** - Required field. Use `["*"]` to enable all tools, or list specific tool names - - ⚠️ Empty array `[]` means NO tools will be available -- ✅ **`"type": "local"`** - Required to specify local MCP server type -- ✅ **`"env"` section** - Environment variables must be declared here with `${VAR}` syntax for interpolation -- ✅ **Environment variable in args** - Use bare variable names in `-e` flags (e.g., `"GITHUB_PERSONAL_ACCESS_TOKEN"` without `$`) -- ✅ **Shell environment** - Variables must be exported in the shell before running awf -- ✅ **MCP server name** - Use `"github"` as the server name (must match `--allow-tool` flag) - -### Running Copilot CLI with Local MCP Through Firewall - -**Required setup:** -```bash -# Export environment variables (both required) -export GITHUB_TOKEN="" # For Copilot CLI authentication -export GITHUB_PERSONAL_ACCESS_TOKEN="" # For GitHub MCP server - -# Run awf with sudo -E to preserve environment variables -sudo -E awf \ - --allow-domains raw.githubusercontent.com,api.github.com,github.com,registry.npmjs.org,api.enterprise.githubcopilot.com \ - "npx @github/copilot@0.0.347 \ - --disable-builtin-mcps \ - --allow-tool github \ - --prompt 'your prompt here'" -``` - -**Critical requirements:** -- `sudo -E` - **REQUIRED** to pass environment variables through sudo to the copilot container -- `--disable-builtin-mcps` - Disables the built-in read-only GitHub MCP server -- `--allow-tool github` - Grants permission to use all tools from the `github` MCP server (must match server name in config) -- MCP config at `~/.copilot/mcp-config.json` - Automatically accessible since copilot container mounts HOME directory - -**Why `sudo -E` is required:** -1. `awf` needs sudo for iptables manipulation -2. `-E` preserves GITHUB_TOKEN and GITHUB_PERSONAL_ACCESS_TOKEN -3. These variables are passed into the copilot container via the HOME directory mount -4. The GitHub MCP server Docker container inherits them from the copilot container's environment - -### Troubleshooting - -**Problem:** MCP server starts but says "GITHUB_PERSONAL_ACCESS_TOKEN not set" -- **Cause:** Environment variable not passed correctly through sudo or to Docker container -- **Solution:** Use `sudo -E` when running awf, and ensure the variable is exported before running the command - -**Problem:** MCP config validation error: "Invalid input" -- **Cause:** Missing `"tools"` field -- **Solution:** Add `"tools": ["*"]` to the MCP server config - -**Problem:** Copilot uses read-only remote MCP instead of local -- **Cause:** Built-in MCP not disabled -- **Solution:** Add `--disable-builtin-mcps` flag to the copilot command - -**Problem:** Tools not available even with local MCP -- **Cause:** Wrong server name in `--allow-tool` flag -- **Solution:** Use `--allow-tool github` (must match the server name in mcp-config.json) - -**Problem:** Permission denied when running awf -- **Cause:** iptables requires root privileges -- **Solution:** Use `sudo -E awf` (not just `sudo awf`) - -### Verifying Local MCP Usage - -Check Copilot CLI logs (use `--log-level debug`) for these indicators: - -**Local MCP working:** -``` -Starting MCP client for github with command: docker -GitHub MCP Server running on stdio -readOnly=false -MCP client for github connected -``` - -**Built-in remote MCP (not what you want):** -``` -Using Copilot API endpoint: https://api.enterprise.githubcopilot.com/mcp/readonly -Starting remote MCP client for github-mcp-server -``` - -### CI/CD Configuration - -For GitHub Actions workflows: -1. Create MCP config script that writes to `~/.copilot/mcp-config.json` (note: `~` = `/home/runner` in GitHub Actions) -2. Export both `GITHUB_TOKEN` (for Copilot CLI) and `GITHUB_PERSONAL_ACCESS_TOKEN` (for GitHub MCP server) as environment variables -3. Pull the MCP server Docker image before running tests: `docker pull ghcr.io/github/github-mcp-server:v0.19.0` -4. Run awf with `sudo -E` to preserve environment variables -5. Always use `--disable-builtin-mcps` and `--allow-tool github` flags when running Copilot CLI - -**Example workflow step:** -```yaml -- name: Test Copilot CLI with GitHub MCP through firewall - env: - GITHUB_TOKEN: ${{ secrets.COPILOT_CLI_TOKEN }} - GITHUB_PERSONAL_ACCESS_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - sudo -E awf \ - --allow-domains raw.githubusercontent.com,api.github.com,github.com,registry.npmjs.org,api.enterprise.githubcopilot.com \ - "npx @github/copilot@0.0.347 \ - --disable-builtin-mcps \ - --allow-tool github \ - --log-level debug \ - --prompt 'your prompt here'" -``` - -## Logging Implementation - -### Overview - -The firewall implements comprehensive logging at two levels: - -1. **Squid Proxy Logs (L7)** - All HTTP/HTTPS traffic (allowed and blocked) -2. **iptables Kernel Logs (L3/L4)** - Non-HTTP protocols and UDP traffic - -### Key Files - -- `src/squid-config.ts` - Generates Squid config with custom `firewall_detailed` logformat -- `containers/copilot/setup-iptables.sh` - Configures iptables LOG rules for rejected traffic -- `src/squid-config.test.ts` - Tests for logging configuration - -### Squid Log Format - -Custom format defined in `src/squid-config.ts:40`: -``` -logformat firewall_detailed %ts.%03tu %>a:%>p %{Host}>h %Hs %Ss:%Sh %ru "%{User-Agent}>h" -``` - -Captures: -- Timestamp with milliseconds -- Client IP:port -- Domain (Host header / SNI) -- Destination IP:port -- Protocol version -- HTTP method -- Status code (200=allowed, 403=blocked) -- Decision code (TCP_TUNNEL=allowed, TCP_DENIED=blocked) -- URL -- User agent - -### iptables Logging - -Two LOG rules in `setup-iptables.sh`: - -1. **Line 80** - `[FW_BLOCKED_UDP]` prefix for blocked UDP traffic -2. **Line 95** - `[FW_BLOCKED_OTHER]` prefix for other blocked traffic - -Both use `--log-uid` flag to capture process UID. - -### Testing Logging - -Run tests: -```bash -npm test -- squid-config.test.ts -``` - -Manual testing: -```bash -# Test blocked traffic -awf --allow-domains example.com --keep-containers 'curl https://github.com' - -# View logs -docker exec awf-squid cat /var/log/squid/access.log -``` - -### Important Notes - -- Squid logs use Unix timestamps (convert with `date -d @TIMESTAMP`) -- Decision codes: `TCP_DENIED:HIER_NONE` = blocked, `TCP_TUNNEL:HIER_DIRECT` = allowed -- SNI is captured via CONNECT method for HTTPS (no SSL inspection) -- iptables logs go to kernel buffer (view with `dmesg`) -- PID not directly available (UID can be used for correlation) diff --git a/docs/releasing.md b/docs/releasing.md index e1bf6c6..76db39c 100644 --- a/docs/releasing.md +++ b/docs/releasing.md @@ -64,7 +64,7 @@ Once the workflow completes: 3. Go to **Packages** page (in repository) 4. Verify Docker images are published: - `squid:` and `squid:latest` - - `copilot:` and `copilot:latest` + - `agent:` and `agent:latest` ## Manual Release @@ -89,7 +89,7 @@ Each release includes: ### GitHub Container Registry (GHCR) Docker images are published to `ghcr.io/githubnext/gh-aw-firewall`: - `squid:` and `squid:latest` - Squid proxy container -- `copilot:` and `copilot:latest` - Copilot execution environment +- `agent:` and `agent:latest` - Agent execution environment These images are automatically pulled by the CLI when running commands. @@ -119,7 +119,7 @@ pkg . --targets node18-linux-x64 --output release/awf ```bash # Build images locally docker build -t awf-test/squid:local ./containers/squid -docker build -t awf-test/copilot:local ./containers/copilot +docker build -t awf-test/agent:local ./containers/agent # Test with local images sudo ./dist/cli.js \ @@ -161,7 +161,7 @@ If users report that Docker images can't be pulled: To make packages public: 1. Go to repository **Packages** page -2. Click on the package (squid or copilot) +2. Click on the package (squid or agent) 3. Go to **Package settings** 4. Change visibility to **Public**