|
4 | 4 | "context"
|
5 | 5 | "net/http"
|
6 | 6 | "net/http/httptest"
|
| 7 | + "sync" |
7 | 8 | "testing"
|
| 9 | + "time" |
8 | 10 |
|
9 | 11 | "github.com/prometheus/client_golang/prometheus"
|
10 | 12 | "github.com/prometheus/client_golang/prometheus/promhttp"
|
@@ -171,3 +173,191 @@ func TestMetricRayJobDeploymentStatus(t *testing.T) {
|
171 | 173 | })
|
172 | 174 | }
|
173 | 175 | }
|
| 176 | + |
| 177 | +// TestRayJobMetricCleanupItem tests the structure of RayJobMetricCleanupItem |
| 178 | +func TestRayJobMetricCleanupItem(t *testing.T) { |
| 179 | + // Create a test cleanup item |
| 180 | + item := RayJobMetricCleanupItem{ |
| 181 | + Name: "test-job", |
| 182 | + Namespace: "default", |
| 183 | + DeleteAt: time.Now().Add(5 * time.Minute), |
| 184 | + } |
| 185 | + |
| 186 | + // Verify the fields are correctly set |
| 187 | + assert.Equal(t, "test-job", item.Name) |
| 188 | + assert.Equal(t, "default", item.Namespace) |
| 189 | + assert.WithinDuration(t, time.Now().Add(5*time.Minute), item.DeleteAt, time.Second) |
| 190 | +} |
| 191 | + |
| 192 | +// TestScheduleRayJobMetricForCleanup tests adding items to the cleanup queue |
| 193 | +func TestScheduleRayJobMetricForCleanup(t *testing.T) { |
| 194 | + // Create a fake client |
| 195 | + k8sScheme := runtime.NewScheme() |
| 196 | + require.NoError(t, rayv1.AddToScheme(k8sScheme)) |
| 197 | + client := fake.NewClientBuilder().WithScheme(k8sScheme).Build() |
| 198 | + // Create a metrics manager |
| 199 | + manager := NewRayJobMetricsManager(context.Background(), client) |
| 200 | + |
| 201 | + // Schedule a cleanup for a job |
| 202 | + manager.ScheduleRayJobMetricForCleanup("test-job", "default") |
| 203 | + |
| 204 | + // Verify the item was added to the queue |
| 205 | + manager.queueMutex.Lock() |
| 206 | + defer manager.queueMutex.Unlock() |
| 207 | + |
| 208 | + assert.Len(t, manager.cleanupQueue, 1) |
| 209 | + assert.Equal(t, "test-job", manager.cleanupQueue[0].Name) |
| 210 | + assert.Equal(t, "default", manager.cleanupQueue[0].Namespace) |
| 211 | + assert.WithinDuration(t, time.Now().Add(5*time.Minute), manager.cleanupQueue[0].DeleteAt, time.Second) |
| 212 | +} |
| 213 | + |
| 214 | +// TestCleanupExpiredRayJobMetrics tests the cleanup of expired metrics |
| 215 | +func TestCleanupExpiredRayJobMetrics(t *testing.T) { |
| 216 | + // Create a registry, fake client and metrics manager |
| 217 | + registry := prometheus.NewRegistry() |
| 218 | + k8sScheme := runtime.NewScheme() |
| 219 | + require.NoError(t, rayv1.AddToScheme(k8sScheme)) |
| 220 | + client := fake.NewClientBuilder().WithScheme(k8sScheme).Build() |
| 221 | + manager := NewRayJobMetricsManager(context.Background(), client) |
| 222 | + |
| 223 | + // Register the manager with the registry |
| 224 | + registry.MustRegister(manager) |
| 225 | + |
| 226 | + // Record a metric for a job |
| 227 | + manager.ObserveRayJobExecutionDuration("test-job", "default", rayv1.JobDeploymentStatusComplete, 0, 10.5) |
| 228 | + |
| 229 | + // Verify the metric exists |
| 230 | + metrics, err := registry.Gather() |
| 231 | + require.NoError(t, err) |
| 232 | + assert.Len(t, metrics, 1) |
| 233 | + assert.Equal(t, "kuberay_job_execution_duration_seconds", metrics[0].GetName()) |
| 234 | + |
| 235 | + // Add the job to cleanup queue with a past delete time |
| 236 | + manager.queueMutex.Lock() |
| 237 | + manager.cleanupQueue = append(manager.cleanupQueue, RayJobMetricCleanupItem{ |
| 238 | + Name: "test-job", |
| 239 | + Namespace: "default", |
| 240 | + DeleteAt: time.Now().Add(-1 * time.Minute), // Expired |
| 241 | + }) |
| 242 | + manager.queueMutex.Unlock() |
| 243 | + |
| 244 | + // Run cleanup |
| 245 | + manager.cleanupExpiredRayJobMetrics() |
| 246 | + |
| 247 | + // Verify the metric was deleted |
| 248 | + metrics, err = registry.Gather() |
| 249 | + require.NoError(t, err) |
| 250 | + // The metric should still exist but have no samples |
| 251 | + assert.Len(t, metrics, 1) |
| 252 | + assert.Equal(t, 0, len(metrics[0].GetMetric())) |
| 253 | +} |
| 254 | + |
| 255 | +// TestRayJobCleanupLoop tests the background cleanup loop |
| 256 | +func TestRayJobCleanupLoop(t *testing.T) { |
| 257 | + // Create a fake client and metrics manager |
| 258 | + k8sScheme := runtime.NewScheme() |
| 259 | + require.NoError(t, rayv1.AddToScheme(k8sScheme)) |
| 260 | + client := fake.NewClientBuilder().WithScheme(k8sScheme).Build() |
| 261 | + manager := NewRayJobMetricsManager(context.Background(), client) |
| 262 | + |
| 263 | + // Start the cleanup loop |
| 264 | + ctx, cancel := context.WithCancel(context.Background()) |
| 265 | + var wg sync.WaitGroup |
| 266 | + wg.Add(1) |
| 267 | + go func() { |
| 268 | + defer wg.Done() |
| 269 | + manager.startRayJobCleanupLoop(ctx) |
| 270 | + }() |
| 271 | + |
| 272 | + // Schedule a cleanup for a job |
| 273 | + manager.ScheduleRayJobMetricForCleanup("test-job", "default") |
| 274 | + |
| 275 | + // Wait for the TTL to expire and cleanup to run |
| 276 | + time.Sleep(2 * time.Second) |
| 277 | + |
| 278 | + // Verify the cleanup queue is empty |
| 279 | + manager.queueMutex.Lock() |
| 280 | + defer manager.queueMutex.Unlock() |
| 281 | + assert.Len(t, manager.cleanupQueue, 0) |
| 282 | + |
| 283 | + // Stop the cleanup loop |
| 284 | + cancel() |
| 285 | + wg.Wait() |
| 286 | +} |
| 287 | + |
| 288 | +// TestRayJobConditionProvisioned tests metrics when job is provisioned and cleaned up |
| 289 | +func TestRayJobConditionProvisioned(t *testing.T) { |
| 290 | + // Create a registry, fake client and metrics manager |
| 291 | + registry := prometheus.NewRegistry() |
| 292 | + k8sScheme := runtime.NewScheme() |
| 293 | + require.NoError(t, rayv1.AddToScheme(k8sScheme)) |
| 294 | + client := fake.NewClientBuilder().WithScheme(k8sScheme).Build() |
| 295 | + manager := NewRayJobMetricsManager(context.Background(), client) |
| 296 | + registry.MustRegister(manager) |
| 297 | + |
| 298 | + // Simulate a job becoming provisioned |
| 299 | + job := &rayv1.RayJob{ |
| 300 | + ObjectMeta: metav1.ObjectMeta{ |
| 301 | + Name: "test-job", |
| 302 | + Namespace: "default", |
| 303 | + }, |
| 304 | + Status: rayv1.RayJobStatus{ |
| 305 | + JobDeploymentStatus: rayv1.JobDeploymentStatusRunning, |
| 306 | + JobStatus: rayv1.JobStatusRunning, |
| 307 | + StartTime: &metav1.Time{Time: time.Now().Add(-10 * time.Second)}, |
| 308 | + }, |
| 309 | + } |
| 310 | + |
| 311 | + // Simulate job completion and emit metrics |
| 312 | + oldStatus := job.Status |
| 313 | + job.Status.JobDeploymentStatus = rayv1.JobDeploymentStatusComplete |
| 314 | + job.Status.JobStatus = rayv1.JobStatusSucceeded |
| 315 | + |
| 316 | + // Emit metrics and schedule cleanup |
| 317 | + emitRayJobExecutionDuration(manager, job.Name, job.Namespace, oldStatus, job.Status) |
| 318 | + |
| 319 | + // Verify the metric was recorded |
| 320 | + metrics, err := registry.Gather() |
| 321 | + require.NoError(t, err) |
| 322 | + assert.Len(t, metrics, 1) |
| 323 | + assert.Equal(t, "kuberay_job_execution_duration_seconds", metrics[0].GetName()) |
| 324 | + |
| 325 | + // Fast-forward time and run cleanup |
| 326 | + manager.queueMutex.Lock() |
| 327 | + for i := range manager.cleanupQueue { |
| 328 | + manager.cleanupQueue[i].DeleteAt = time.Now().Add(-1 * time.Minute) // Force expire |
| 329 | + } |
| 330 | + manager.queueMutex.Unlock() |
| 331 | + |
| 332 | + manager.cleanupExpiredRayJobMetrics() |
| 333 | + |
| 334 | + // Verify the metric was cleaned up |
| 335 | + metrics, err = registry.Gather() |
| 336 | + require.NoError(t, err) |
| 337 | + assert.Len(t, metrics[0].GetMetric(), 0) |
| 338 | +} |
| 339 | + |
| 340 | +// Helper function to match the one in controller |
| 341 | +func emitRayJobExecutionDuration(rayJobMetricsObserver RayJobMetricsObserver, rayJobName, rayJobNamespace string, originalRayJobStatus, rayJobStatus rayv1.RayJobStatus) { |
| 342 | + if rayJobStatus.StartTime == nil { |
| 343 | + // Set a default start time if not provided |
| 344 | + now := time.Now() |
| 345 | + rayJobStatus.StartTime = &metav1.Time{Time: now.Add(-10 * time.Second)} |
| 346 | + } |
| 347 | + if !rayv1.IsJobDeploymentTerminal(originalRayJobStatus.JobDeploymentStatus) && (rayv1.IsJobDeploymentTerminal(rayJobStatus.JobDeploymentStatus) || |
| 348 | + rayJobStatus.JobDeploymentStatus == rayv1.JobDeploymentStatusRetrying) { |
| 349 | + |
| 350 | + retryCount := 0 |
| 351 | + if originalRayJobStatus.Failed != nil { |
| 352 | + retryCount += int(*originalRayJobStatus.Failed) |
| 353 | + } |
| 354 | + |
| 355 | + rayJobMetricsObserver.ObserveRayJobExecutionDuration( |
| 356 | + rayJobName, |
| 357 | + rayJobNamespace, |
| 358 | + rayJobStatus.JobDeploymentStatus, |
| 359 | + retryCount, |
| 360 | + time.Since(rayJobStatus.StartTime.Time).Seconds(), |
| 361 | + ) |
| 362 | + } |
| 363 | +} |
0 commit comments