11import json
2+ import os
3+ import shutil
4+ import subprocess
25import time
36from unittest .mock import AsyncMock
47
710from shell_engine import ShellEngine
811
912
13+ def _bwrap_functional () -> bool :
14+ """Check if bwrap is installed and functional (can create namespaces)."""
15+ bwrap_path = shutil .which ("bwrap" )
16+ if not bwrap_path :
17+ return False
18+ try :
19+ # Try a minimal bwrap command to see if namespaces work
20+ # Need to bind /usr, /lib, /lib64 for dynamic executables to work
21+ cmd = [
22+ bwrap_path ,
23+ "--unshare-all" ,
24+ "--ro-bind" ,
25+ "/usr" ,
26+ "/usr" ,
27+ "--symlink" ,
28+ "/usr/bin" ,
29+ "/bin" ,
30+ "--dev" ,
31+ "/dev" ,
32+ "--proc" ,
33+ "/proc" ,
34+ ]
35+ # Add lib directories if they exist
36+ for lib_path in ["/lib" , "/lib64" ]:
37+ if os .path .exists (lib_path ):
38+ cmd .extend (["--ro-bind" , lib_path , lib_path ])
39+ cmd .extend (["--" , "/usr/bin/true" ])
40+
41+ result = subprocess .run (cmd , capture_output = True , timeout = 5 )
42+ return result .returncode == 0
43+ except (subprocess .TimeoutExpired , OSError ):
44+ return False
45+
46+
47+ # Skip marker for tests that require functional bwrap
48+ requires_bwrap = pytest .mark .skipif (
49+ not _bwrap_functional (),
50+ reason = "bwrap not functional (may lack namespace permissions in CI)" ,
51+ )
52+
53+
1054class MockToolResult :
1155 """Mock object that mimics MCP tool result structure."""
1256
@@ -90,6 +134,7 @@ async def test_validate_empty_command(self):
90134 engine .validate_command ("" )
91135
92136
137+ @requires_bwrap
93138@pytest .mark .asyncio
94139class TestShellStage :
95140 """Test shell_stage execution."""
@@ -349,6 +394,7 @@ async def test_tool_stage_non_dict_json_does_not_override_existing_input(self):
349394class TestExecutePipeline :
350395 """Test execute_pipeline with various pipeline configurations."""
351396
397+ @requires_bwrap
352398 async def test_execute_pipeline_single_command (self ):
353399 """Test pipeline with tool followed by shell command."""
354400 mock_caller = AsyncMock (return_value = MockToolResult ("test output" ))
@@ -363,6 +409,7 @@ async def test_execute_pipeline_single_command(self):
363409
364410 assert "test output" in result
365411
412+ @requires_bwrap
366413 async def test_execute_pipeline_multiple_commands (self ):
367414 """Test pipeline with tool and multiple shell commands."""
368415 mock_caller = AsyncMock (return_value = MockToolResult ("apple\n banana\n cherry" ))
@@ -398,6 +445,7 @@ async def test_execute_pipeline_with_tool(self):
398445 assert "tool result" in result
399446 mock_caller .assert_called_once ()
400447
448+ @requires_bwrap
401449 async def test_execute_pipeline_mixed_stages (self ):
402450 """Test pipeline with mixed command and tool stages."""
403451 mock_caller = AsyncMock (return_value = MockToolResult ('{"data": "test"}' ))
@@ -540,7 +588,9 @@ class TestPreviewStage:
540588
541589 async def test_preview_stage_basic (self ):
542590 """Test that preview stage summarizes JSON data."""
543- large_data = json .dumps ({"items" : [{"id" : i , "name" : f"Item { i } " } for i in range (100 )]})
591+ large_data = json .dumps (
592+ {"items" : [{"id" : i , "name" : f"Item { i } " } for i in range (100 )]}
593+ )
544594 mock_caller = AsyncMock (return_value = MockToolResult (large_data ))
545595 engine = ShellEngine (tool_caller = mock_caller )
546596
@@ -603,6 +653,7 @@ async def test_preview_stage_invalid_chars(self):
603653 with pytest .raises (RuntimeError , match = "chars.*must be a positive integer" ):
604654 await engine .execute_pipeline (pipeline )
605655
656+ @requires_bwrap
606657 async def test_preview_stage_in_middle_of_pipeline (self ):
607658 """Test that preview can be used mid-pipeline (though output won't be valid JSON)."""
608659 mock_caller = AsyncMock (return_value = MockToolResult ('{"value": 42}' ))
@@ -657,6 +708,7 @@ async def test_stage_error_includes_stage_number(self):
657708 with pytest .raises (RuntimeError , match = "Pipeline execution failed.*Stage 2" ):
658709 await engine .execute_pipeline (pipeline )
659710
711+ @requires_bwrap
660712 async def test_shell_command_error_with_stderr (self ):
661713 """Test that shell commands that fail with stderr output raise proper errors."""
662714 # jq fails with exit code 5 when trying to index a string with a field name
@@ -674,6 +726,7 @@ async def test_shell_command_error_with_stderr(self):
674726 ):
675727 await engine .execute_pipeline (pipeline )
676728
729+ @requires_bwrap
677730 async def test_shell_command_grep_no_match_no_error (self ):
678731 """Test that grep returning no match (exit 1) doesn't raise error."""
679732 mock_caller = AsyncMock (return_value = MockToolResult ("hello world\n " ))
@@ -691,6 +744,7 @@ async def test_shell_command_grep_no_match_no_error(self):
691744 assert result .strip () == ""
692745
693746
747+ @requires_bwrap
694748@pytest .mark .asyncio
695749class TestShellCommandTimeouts :
696750 """Test timeout functionality for shell commands."""
@@ -814,6 +868,7 @@ async def test_engine_initialization_with_default_timeout(self):
814868 assert engine .default_timeout == 5.0
815869
816870
871+ @requires_bwrap
817872@pytest .mark .asyncio
818873class TestStreamingForEach :
819874 """Test that for_each mode truly streams data instead of loading all into memory."""
0 commit comments