|
| 1 | +package server |
| 2 | + |
| 3 | +import ( |
| 4 | + "context" |
| 5 | + "encoding/json" |
| 6 | + "net/http" |
| 7 | + "net/http/httptest" |
| 8 | + "testing" |
| 9 | + |
| 10 | + "github.com/gin-gonic/gin" |
| 11 | + acp "github.com/humanlayer/agentcontrolplane/acp/api/v1alpha1" |
| 12 | + corev1 "k8s.io/api/core/v1" |
| 13 | + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" |
| 14 | + "k8s.io/apimachinery/pkg/runtime" |
| 15 | + "sigs.k8s.io/controller-runtime/pkg/client/fake" |
| 16 | +) |
| 17 | + |
| 18 | +func TestAgentEndpointStatus(t *testing.T) { |
| 19 | + // Create a scheme with our API types registered |
| 20 | + scheme := runtime.NewScheme() |
| 21 | + if err := corev1.AddToScheme(scheme); err != nil { |
| 22 | + t.Fatalf("Failed to add corev1 to scheme: %v", err) |
| 23 | + } |
| 24 | + if err := acp.AddToScheme(scheme); err != nil { |
| 25 | + t.Fatalf("Failed to add acp to scheme: %v", err) |
| 26 | + } |
| 27 | + |
| 28 | + // Create a fake client with the agent pre-loaded |
| 29 | + agent := &acp.Agent{ |
| 30 | + ObjectMeta: metav1.ObjectMeta{ |
| 31 | + Name: "status-test-agent", |
| 32 | + Namespace: "status-test-namespace", |
| 33 | + }, |
| 34 | + Spec: acp.AgentSpec{ |
| 35 | + LLMRef: acp.LocalObjectReference{Name: "status-test-llm"}, |
| 36 | + System: "Agent with status", |
| 37 | + }, |
| 38 | + Status: acp.AgentStatus{ |
| 39 | + Ready: true, |
| 40 | + Status: acp.AgentStatusReady, |
| 41 | + StatusDetail: "Everything is working", |
| 42 | + }, |
| 43 | + } |
| 44 | + |
| 45 | + llm := &acp.LLM{ |
| 46 | + ObjectMeta: metav1.ObjectMeta{ |
| 47 | + Name: "status-test-llm", |
| 48 | + Namespace: "status-test-namespace", |
| 49 | + }, |
| 50 | + Spec: acp.LLMSpec{ |
| 51 | + Provider: "test-provider", |
| 52 | + Parameters: acp.BaseConfig{ |
| 53 | + Model: "test-model", |
| 54 | + }, |
| 55 | + }, |
| 56 | + } |
| 57 | + |
| 58 | + mcpServer := &acp.MCPServer{ |
| 59 | + ObjectMeta: metav1.ObjectMeta{ |
| 60 | + Name: "status-test-agent-mcp1", |
| 61 | + Namespace: "status-test-namespace", |
| 62 | + }, |
| 63 | + Spec: acp.MCPServerSpec{ |
| 64 | + Transport: "stdio", |
| 65 | + Command: "python", |
| 66 | + Args: []string{"-m", "script.py"}, |
| 67 | + }, |
| 68 | + Status: acp.MCPServerStatus{ |
| 69 | + Connected: true, |
| 70 | + Status: "Ready", |
| 71 | + StatusDetail: "Connected to MCP server", |
| 72 | + }, |
| 73 | + } |
| 74 | + |
| 75 | + // Create namespace |
| 76 | + namespace := &corev1.Namespace{ |
| 77 | + ObjectMeta: metav1.ObjectMeta{Name: "status-test-namespace"}, |
| 78 | + } |
| 79 | + |
| 80 | + // Add these objects to our client |
| 81 | + k8sClient := fake.NewClientBuilder(). |
| 82 | + WithScheme(scheme). |
| 83 | + WithObjects(namespace, llm, agent, mcpServer). |
| 84 | + Build() |
| 85 | + |
| 86 | + agent.Spec.MCPServers = []acp.LocalObjectReference{ |
| 87 | + {Name: "status-test-agent-mcp1"}, |
| 88 | + } |
| 89 | + ctx := context.Background() |
| 90 | + if err := k8sClient.Update(ctx, agent); err != nil { |
| 91 | + t.Fatalf("Failed to update agent: %v", err) |
| 92 | + } |
| 93 | + |
| 94 | + // Create an API server with the client |
| 95 | + apiServer := NewAPIServer(k8sClient, ":8080") |
| 96 | + gin.SetMode(gin.TestMode) |
| 97 | + |
| 98 | + // Test GET /agents/:name |
| 99 | + t.Run("GET /agents/:name", func(t *testing.T) { |
| 100 | + recorder := httptest.NewRecorder() |
| 101 | + req := httptest.NewRequest(http.MethodGet, "/v1/agents/status-test-agent?namespace=status-test-namespace", nil) |
| 102 | + apiServer.Router().ServeHTTP(recorder, req) |
| 103 | + |
| 104 | + if recorder.Code != http.StatusOK { |
| 105 | + t.Errorf("Expected status code %d, got %d", http.StatusOK, recorder.Code) |
| 106 | + t.Logf("Response body: %s", recorder.Body.String()) |
| 107 | + return |
| 108 | + } |
| 109 | + |
| 110 | + var response AgentResponse |
| 111 | + if err := json.Unmarshal(recorder.Body.Bytes(), &response); err != nil { |
| 112 | + t.Fatalf("Failed to unmarshal response: %v", err) |
| 113 | + } |
| 114 | + |
| 115 | + // Verify status fields are included and correct |
| 116 | + if response.Status != string(acp.AgentStatusReady) { |
| 117 | + t.Errorf("Expected status %s, got %s", string(acp.AgentStatusReady), response.Status) |
| 118 | + } |
| 119 | + if response.StatusDetail != "Everything is working" { |
| 120 | + t.Errorf("Expected status detail %q, got %q", "Everything is working", response.StatusDetail) |
| 121 | + } |
| 122 | + if !response.Ready { |
| 123 | + t.Error("Expected ready to be true") |
| 124 | + } |
| 125 | + |
| 126 | + // Verify MCP server status is embedded in the MCPServer config |
| 127 | + mcpServer, ok := response.MCPServers["mcp1"] |
| 128 | + if !ok { |
| 129 | + t.Error("Expected MCPServers to have key 'mcp1'") |
| 130 | + } else { |
| 131 | + if mcpServer.Status != "Ready" { |
| 132 | + t.Errorf("Expected MCP status %q, got %q", "Ready", mcpServer.Status) |
| 133 | + } |
| 134 | + if mcpServer.StatusDetail != "Connected to MCP server" { |
| 135 | + t.Errorf("Expected MCP status detail %q, got %q", "Connected to MCP server", mcpServer.StatusDetail) |
| 136 | + } |
| 137 | + if !mcpServer.Ready { |
| 138 | + t.Error("Expected MCP ready to be true") |
| 139 | + } |
| 140 | + } |
| 141 | + }) |
| 142 | + |
| 143 | + // Test GET /agents (list) |
| 144 | + t.Run("GET /agents (list)", func(t *testing.T) { |
| 145 | + recorder := httptest.NewRecorder() |
| 146 | + req := httptest.NewRequest(http.MethodGet, "/v1/agents?namespace=status-test-namespace", nil) |
| 147 | + apiServer.Router().ServeHTTP(recorder, req) |
| 148 | + |
| 149 | + if recorder.Code != http.StatusOK { |
| 150 | + t.Errorf("Expected status code %d, got %d", http.StatusOK, recorder.Code) |
| 151 | + t.Logf("Response body: %s", recorder.Body.String()) |
| 152 | + return |
| 153 | + } |
| 154 | + |
| 155 | + var response []AgentResponse |
| 156 | + if err := json.Unmarshal(recorder.Body.Bytes(), &response); err != nil { |
| 157 | + t.Fatalf("Failed to unmarshal response: %v", err) |
| 158 | + } |
| 159 | + |
| 160 | + if len(response) == 0 { |
| 161 | + t.Fatal("Expected at least one agent in response") |
| 162 | + } |
| 163 | + |
| 164 | + // Find our test agent in the response |
| 165 | + var testAgentResponse AgentResponse |
| 166 | + found := false |
| 167 | + for _, agentResp := range response { |
| 168 | + if agentResp.Name == "status-test-agent" { |
| 169 | + testAgentResponse = agentResp |
| 170 | + found = true |
| 171 | + break |
| 172 | + } |
| 173 | + } |
| 174 | + |
| 175 | + if !found { |
| 176 | + t.Fatal("Test agent not found in response") |
| 177 | + } |
| 178 | + |
| 179 | + // Verify status fields are included and correct |
| 180 | + if testAgentResponse.Status != string(acp.AgentStatusReady) { |
| 181 | + t.Errorf("Expected status %s, got %s", string(acp.AgentStatusReady), testAgentResponse.Status) |
| 182 | + } |
| 183 | + if testAgentResponse.StatusDetail != "Everything is working" { |
| 184 | + t.Errorf("Expected status detail %q, got %q", "Everything is working", testAgentResponse.StatusDetail) |
| 185 | + } |
| 186 | + if !testAgentResponse.Ready { |
| 187 | + t.Error("Expected ready to be true") |
| 188 | + } |
| 189 | + |
| 190 | + // Verify MCP server status is embedded in the MCPServer config |
| 191 | + mcpServer, ok := testAgentResponse.MCPServers["mcp1"] |
| 192 | + if !ok { |
| 193 | + t.Error("Expected MCPServers to have key 'mcp1'") |
| 194 | + } else { |
| 195 | + if mcpServer.Status != "Ready" { |
| 196 | + t.Errorf("Expected MCP status %q, got %q", "Ready", mcpServer.Status) |
| 197 | + } |
| 198 | + if mcpServer.StatusDetail != "Connected to MCP server" { |
| 199 | + t.Errorf("Expected MCP status detail %q, got %q", "Connected to MCP server", mcpServer.StatusDetail) |
| 200 | + } |
| 201 | + if !mcpServer.Ready { |
| 202 | + t.Error("Expected MCP ready to be true") |
| 203 | + } |
| 204 | + } |
| 205 | + }) |
| 206 | +} |
0 commit comments