-
Notifications
You must be signed in to change notification settings - Fork 4
Expand file tree
/
Copy pathTaskfile.yaml
More file actions
635 lines (564 loc) · 28.2 KB
/
Taskfile.yaml
File metadata and controls
635 lines (564 loc) · 28.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
version: '3'
vars:
TOOL_DIR: "{{.USER_WORKING_DIR}}/bin"
CERTS_DIR: "{{.USER_WORKING_DIR}}/certs"
# renovate: datasource=go depName=sigs.k8s.io/controller-tools
CONTROLLER_TOOLS_VERSION: v0.18.0
# renovate: datasource=go depName=fybrik.io/crdoc
CRDOC_VERSION: v0.6.4
# renovate: datasource=go depName=github.com/kyverno/chainsaw
CHAINSAW_VERSION: v0.2.13
# Container image configuration
MILO_IMAGE_NAME: "ghcr.io/datum-cloud/milo"
MILO_IMAGE_TAG: "dev"
TEST_INFRA_CLUSTER_NAME: "test-infra"
# Test infra repository configuration - can be overridden with environment variable
TEST_INFRA_REPO_REF: 'v0.4.0'
includes:
# Must set TASK_X_REMOTE_TASKFILES=1 to use this feature.
#
# See: https://taskfile.dev/experiments/remote-taskfiles
test-infra:
taskfile: https://raw.githubusercontent.com/datum-cloud/test-infra/{{.TEST_INFRA_REPO_REF}}/Taskfile.yml
checksum: 4995ce0e9bc4290b3a0175dee77af2570d715b435d5f3ff65f9cb6c491837bcb
vars:
REPO_REF: "{{.TEST_INFRA_REPO_REF}}"
tasks:
default:
desc: List all available tasks
cmds:
- task --list
silent: true
kubectl:
desc: Run kubectl commands against the Milo API server
silent: true
cmds:
- KUBECONFIG=.milo/kubeconfig kubectl {{.CLI_ARGS}}
dev:build:
desc: Build the Milo container image for development
silent: true
cmds:
- |
set -e
echo "Building Milo container image: {{.MILO_IMAGE_NAME}}:{{.MILO_IMAGE_TAG}}"
# Get git information for version injection
GIT_COMMIT=$(git rev-parse HEAD 2>/dev/null || echo "unknown")
VERSION="v0.0.0-master+${GIT_COMMIT:0:7}"
GIT_TREE_STATE="clean"
if [ -n "$(git status --porcelain 2>/dev/null)" ]; then
GIT_TREE_STATE="dirty"
fi
BUILD_DATE=$(date -u '+%Y-%m-%dT%H:%M:%SZ' 2>/dev/null || echo "unknown")
echo "Version info: ${VERSION}, commit: ${GIT_COMMIT:0:7}, tree: ${GIT_TREE_STATE}"
docker build \
--build-arg VERSION="${VERSION}" \
--build-arg GIT_COMMIT="${GIT_COMMIT}" \
--build-arg GIT_TREE_STATE="${GIT_TREE_STATE}" \
--build-arg BUILD_DATE="${BUILD_DATE}" \
-t "{{.MILO_IMAGE_NAME}}:{{.MILO_IMAGE_TAG}}" .
echo "Successfully built {{.MILO_IMAGE_NAME}}:{{.MILO_IMAGE_TAG}}"
sources:
- "**/*.go"
- "go.mod"
- "go.sum"
- "Dockerfile"
generates:
- ".task-build-timestamp"
method: timestamp
dev:load:
desc: Load the Milo container image into the kind cluster
silent: true
cmds:
- |
set -e
echo "Loading image {{.MILO_IMAGE_NAME}}:{{.MILO_IMAGE_TAG}} into kind cluster '{{.TEST_INFRA_CLUSTER_NAME}}'..."
kind load docker-image "{{.MILO_IMAGE_NAME}}:{{.MILO_IMAGE_TAG}}" --name "{{.TEST_INFRA_CLUSTER_NAME}}"
echo "Successfully loaded image into kind cluster"
dev:deploy:
desc: Deploy complete Milo control plane (etcd + API server + controller manager) to test-infra cluster
silent: true
deps:
- generate:code
cmds:
- |
set -e
echo "🚀 Deploying complete Milo control plane to test-infra cluster..."
# Deploy all components including etcd, API server, controller manager, webhooks, RBAC, and networking
# Everything is deployed in the milo-system namespace for simplified testing
echo "📋 Deploying Milo control plane with etcd storage backend..."
task test-infra:kubectl -- apply -k config/overlays/test-infra/
# Wait for etcd Helm release to be complete (ensures deployment is fully reconciled)
echo "⏳ Waiting for etcd Helm release to be ready..."
task test-infra:kubectl -- wait --for=condition=Ready helmrelease/etcd -n milo-system --timeout=300s
# Wait for etcd pod readiness (API server needs etcd to store data)
echo "⏳ Waiting for etcd pod to be ready..."
task test-infra:kubectl -- wait --for=condition=Ready pod -l app.kubernetes.io/component=etcd -n milo-system --timeout=180s
# Wait for API server to be ready (required for CRD installation)
# API server must be running before we can install custom resources
echo "⏳ Waiting for API server to be ready..."
task test-infra:kubectl -- wait --for=condition=Ready pod -l app.kubernetes.io/name=milo-apiserver -n milo-system --timeout=180s
# Install CRDs into Milo API server (defines custom resource schemas)
# Controller manager needs these CRDs to watch and reconcile custom resources
echo "📋 Installing core control plane CRDs into Milo API server..."
task kubectl -- apply -k config/crd/overlays/core-control-plane/
task kubectl -- wait --for=condition=Established customresourcedefinitions --all
# Step 6b: Install infrastructure control plane CRDs into Milo API server
# This includes ProjectControlPlanes needed by the controller manager
echo "📋 Installing infrastructure control plane CRDs into the infrastructure cluster..."
task test-infra:kubectl -- apply -k config/crd/overlays/infra-control-plane/
task test-infra:kubectl -- wait --for=condition=Established customresourcedefinitions projectcontrolplanes.infrastructure.miloapis.com
# Step 7: Verify CRDs are properly installed (sanity check)
echo "✅ Verifying CRDs are installed..."
CRD_COUNT=$(task kubectl -- get crd --no-headers | grep miloapis | wc -l || echo "0")
echo "Installed $CRD_COUNT Milo CRDs in API server"
# Step 7b: Create test users in Milo API server
echo "👤 Creating test users in Milo API server..."
task kubectl -- apply -f config/overlays/test-infra/resources/test-users.yaml
# Step 8: Wait for controller manager (now that CRDs exist for it to reconcile)
# Controller manager can only start successfully after CRDs are available
echo "⏳ Waiting for controller manager to be ready..."
task test-infra:kubectl -- wait --for=condition=Ready pod -l app.kubernetes.io/name=milo-controller-manager -n milo-system --timeout=120s
# Step 8b: Install webhook configurations to Milo API server
# Webhooks validate and mutate resources submitted to the Milo API server
echo "🔒 Installing webhook configurations to Milo API server..."
task kubectl -- apply -k config/webhook/
# Webhook configurations don't have conditions to wait for - they're ready immediately after creation
echo "✅ Webhook configurations installed successfully"
WEBHOOK_COUNT=$(task kubectl -- get mutatingwebhookconfigurations,validatingwebhookconfigurations --no-headers | grep resourcemanager.miloapis.com | wc -l || echo "0")
echo "Installed $WEBHOOK_COUNT webhook configurations in Milo API server"
# Update kubeconfig for easy developer access
echo "📝 Updating kubeconfig for developer access..."
echo ""
echo "✅ Milo API server and storage deployed successfully!"
echo ""
echo "📊 Status:"
echo " etcd: task test-infra:kubectl -- get pods -n milo-system -l app.kubernetes.io/component=etcd"
echo " API server: task test-infra:kubectl -- get pods -n milo-system -l app.kubernetes.io/name=milo-apiserver"
echo " Controller manager: task test-infra:kubectl -- get pods -n milo-system -l app.kubernetes.io/name=milo-controller-manager"
echo ""
echo "🔗 Access:"
echo " Gateway: https://localhost:30443 (via Envoy Gateway)"
echo ""
echo "🔐 Authentication:"
echo " Kubeconfig: .milo/kubeconfig"
echo " Usage: task kubectl -- <command>"
echo ""
echo "🎯 Test custom resources:"
echo " task kubectl -- get organizations"
echo " task kubectl -- get projects"
echo " task kubectl -- get users # Should show 'admin' and 'test-user'"
echo ""
echo "📋 Available tokens:"
echo " Admin: test-admin-token (system:masters)"
echo " User: test-user-token (system:authenticated)"
dev:setup:
desc: Complete setup of test-infra cluster with full Milo control plane
silent: true
cmds:
- task: test-infra:cluster-up
- task: dev:build
- task: dev:load
- task: dev:deploy
dev:redeploy:
desc: Quick rebuild and redeploy for development iterations
deps:
- dev:build
- dev:load
- dev:deploy
cmds:
- |
set -e
echo "Redeploying Milo's apiserver..."
# Restart the deployment to pick up new image
task test-infra:kubectl -- rollout restart deployment/milo-apiserver -n milo-system
# Wait for rollout to complete
echo "Waiting for rollout to complete..."
task test-infra:kubectl -- rollout status deployment/milo-apiserver -n milo-system --timeout=120s
echo "Redeploying Milo controller manager..."
# Restart the deployment to pick up new image
task test-infra:kubectl -- rollout restart deployment/milo-controller-manager -n milo-system
# Wait for rollout to complete
echo "Waiting for rollout to complete..."
task test-infra:kubectl -- rollout status deployment/milo-controller-manager -n milo-system --timeout=120s
echo "✅ Redeployment complete!"
echo "Check logs with: task test-infra:kubectl -- logs -n milo-system -l app.kubernetes.io/name=milo-controller-manager"
dev:install-observability:
desc: "Install test-infra observability, then apply Milo-specific scraping manifests"
silent: true
cmds:
- |
set -euo pipefail
echo "🔧 Installing base observability stack via test-infra…"
task test-infra:install-observability
echo "📎 Applying Milo observability (ServiceMonitor + cert)…"
task test-infra:kubectl -- apply -k config/components/prometheus-monitoring/
echo "🔍 Installing Milo telemetry components (Vector audit log processor)…"
task test-infra:kubectl -- apply -k config/telemetry/
echo "✅ Observability stack with telemetry components deployed."
test:end-to-end:
desc: Run end to end tests using Chainsaw against Milo API server. Pass directory names to run specific tests (e.g., 'task test:end-to-end -- quota' or 'task test:end-to-end -- quota group')
deps:
- task: install-go-tool
vars:
NAME: chainsaw
PACKAGE: github.com/kyverno/chainsaw
VERSION: "{{.CHAINSAW_VERSION}}"
cmds:
- |
set -e
echo "Running Chainsaw end to end tests against Milo API server..."
echo "Using Milo API server via .milo/kubeconfig"
# Verify Milo kubeconfig exists
if [ ! -f ".milo/kubeconfig" ]; then
echo "Error: Milo kubeconfig not found at .milo/kubeconfig"
echo "Please run 'task dev:setup' to set up the test infrastructure first."
exit 1
fi
# Verify connectivity to Milo API server
echo "Verifying connectivity to Milo API server..."
if ! KUBECONFIG=.milo/kubeconfig kubectl get --raw /healthz &>/dev/null; then
echo "Error: Cannot connect to Milo API server"
echo "Please ensure the test infrastructure is running with 'task dev:setup'"
echo "You can check the status with:"
echo " task test-infra:kubectl -- get pods -n milo-system"
exit 1
fi
echo "✓ Successfully connected to Milo API server"
# Determine test paths based on CLI arguments
if [ -z "{{.CLI_ARGS}}" ]; then
# No arguments provided - run all tests
echo "No test directories specified - running all end-to-end tests..."
TEST_PATHS="test/"
else
# Arguments provided - construct test paths
echo "Running tests for specified directories: {{.CLI_ARGS}}"
TEST_PATHS=""
for dir in {{.CLI_ARGS}}; do
if [ -d "test/$dir" ]; then
TEST_PATHS="$TEST_PATHS test/$dir"
else
echo "Warning: Test directory 'test/$dir' does not exist, skipping..."
fi
done
# Check if we found any valid test directories
if [ -z "$TEST_PATHS" ]; then
echo "Error: No valid test directories found for arguments: {{.CLI_ARGS}}"
echo "Available test directories:"
ls -1 test/ | grep -v "^config$" | grep -v "^docker-compose.yaml$" || true
exit 1
fi
fi
echo "Test paths: $TEST_PATHS"
KUBECONFIG=.milo/kubeconfig "{{.TOOL_DIR}}/chainsaw" test $TEST_PATHS --selector "requires!=authorization-provider"
silent: true
test:unit:
desc: Run unit tests
cmds:
- |
set -e
echo "Running unit tests..."
go test ./... -v
silent: true
# Code generation tasks
generate:
desc: Generate code (alias for generate:code for backward compatibility)
cmds:
- task: generate:code
- task: generate:docs
generate:code:
desc: Generate code including deepcopy, objects, CRDs, and potentially protobuf marshallers
deps:
- task: install-go-tool
vars:
NAME: controller-gen
PACKAGE: sigs.k8s.io/controller-tools/cmd/controller-gen
VERSION: "{{.CONTROLLER_TOOLS_VERSION}}"
cmds:
- echo "Generating deepcopy and object files..."
- "\"{{.TOOL_DIR}}/controller-gen\" object paths=\"./pkg/apis/...\""
- echo "Generating CRD manifests for each package..."
- |
set -e
for package_dir in pkg/apis/*/; do
package_name=$(basename "$package_dir")
echo "Generating CRDs for package: $package_name"
mkdir -p "config/crd/bases/$package_name"
"{{.TOOL_DIR}}/controller-gen" crd paths="./$package_dir..." output:dir="./config/crd/bases/$package_name"
done
- echo "Generating webhook files..."
- "\"{{.TOOL_DIR}}/controller-gen\" webhook paths=\"./internal/webhooks/...\" output:dir=\"./config/webhook\""
# Generate RBAC rules for the controllers.
- echo "Generating RBAC rules for the controllers..."
- "\"{{.TOOL_DIR}}/controller-gen\" rbac:roleName=milo-controller-manager paths=\"./internal/controllers/...\" output:dir=\"./config/controller-manager/overlays/core-control-plane/rbac\""
silent: true
generate:docs:
desc: Generate API docs
deps:
- task: install-go-tool
vars:
NAME: crdoc
PACKAGE: fybrik.io/crdoc
VERSION: "{{.CRDOC_VERSION}}"
cmds:
- |
set -e ;
mkdir -p docs/api ;
for crdmanifest in config/crd/bases/*; do
filename="$(basename -s .resourcemanager.miloapis.com.yaml $crdmanifest)" ;
filename="${filename#apiextensions.k8s.io_v1_customresourcedefinition_}" ;
bin/crdoc --resources $crdmanifest --output docs/api/$filename.md ;
done;
silent: true
install-go-tool:
desc: Install a Go tool to {{.TOOL_DIR}}/{{.NAME}} (symlinked from {{.TOOL_DIR}}/{{.NAME}}-{{.VERSION}})
silent: true
internal: true
# vars: - Variables that need to be set when depending on this task
# NAME:
# PACKAGE:
# VERSION:
cmds:
- mkdir -p {{.TOOL_DIR}}
- |
set -e
# Capture Taskfile vars into shell vars for clarity and safety in the script
_NAME="{{.NAME}}"
_PACKAGE="{{.PACKAGE}}"
_VERSION="{{.VERSION}}"
_TOOL_DIR="{{.TOOL_DIR}}"
_VERSIONED_TOOL_PATH="$_TOOL_DIR/$_NAME-$_VERSION" # e.g., ./bin/crdoc-v0.6.4
_SYMLINK_PATH="$_TOOL_DIR/$_NAME" # e.g., ./bin/crdoc (this is where go install puts it first)
# Check if the correctly versioned binary already exists
if [ ! -f "$_VERSIONED_TOOL_PATH" ]; then
echo "Downloading $_PACKAGE@$_VERSION (binary name: $_NAME) to $_VERSIONED_TOOL_PATH"
# Ensure the path where `go install` will place the binary (before mv) is clear.
# This is $_SYMLINK_PATH (e.g., ./bin/crdoc).
if [ -d "$_SYMLINK_PATH" ]; then
echo "Error: Target path $_SYMLINK_PATH for 'go install' is an existing directory. Please remove it manually."
exit 1
fi
# Remove if it's a file or symlink, to mimic `rm -f $(1)` from Makefile.
# This ensures 'go install' doesn't conflict with an existing symlink or wrong file.
echo "Preparing $_SYMLINK_PATH for new installation..."
rm -f "$_SYMLINK_PATH" || true
echo "Installing with GOBIN=$_TOOL_DIR..."
# 'go install' will place the executable (named $_NAME) into $_TOOL_DIR.
# This relies on $_NAME being the actual binary name derived from $_PACKAGE.
if ! GOBIN="$_TOOL_DIR" go install "$_PACKAGE@$_VERSION"; then
echo "Failed to 'go install $_PACKAGE@$_VERSION' with GOBIN=$_TOOL_DIR"
exit 1
fi
# After `go install`, the binary should be at $_SYMLINK_PATH (e.g. $_TOOL_DIR/$_NAME)
if [ ! -f "$_SYMLINK_PATH" ]; then
echo "Error: 'go install' did not produce $_SYMLINK_PATH"
# As a fallback, check if it was installed with the package basename if _NAME was different
_PKG_BASENAME=$(basename "$_PACKAGE")
if [ "$_PKG_BASENAME" != "$_NAME" ] && [ -f "$_TOOL_DIR/$_PKG_BASENAME" ]; then
echo "Found $_TOOL_DIR/$_PKG_BASENAME instead (package basename). Moving this one."
mv "$_TOOL_DIR/$_PKG_BASENAME" "$_VERSIONED_TOOL_PATH"
else
echo "Please ensure the NAME variable ('$_NAME') accurately matches the binary name produced by 'go install $_PACKAGE'."
exit 1
fi
else
# Binary $_SYMLINK_PATH was created as expected. Now move it to its versioned path.
echo "Moving installed binary from $_SYMLINK_PATH to $_VERSIONED_TOOL_PATH"
mv "$_SYMLINK_PATH" "$_VERSIONED_TOOL_PATH"
fi
# Create/update the symlink (e.g., ./bin/crdoc -> crdoc-v0.6.4)
# The target of the symlink is relative to _TOOL_DIR.
echo "Creating/updating symlink: $_SYMLINK_PATH -> $_NAME-$_VERSION (within $_TOOL_DIR)"
(cd "$_TOOL_DIR" && ln -sf "$_NAME-$_VERSION" "$_NAME")
echo "Tool $_NAME is now available at $_SYMLINK_PATH (points to $_VERSIONED_TOOL_PATH)"
fi
validate-kustomizations:
desc: Validate all kustomization.yaml files using kustomize build
cmds:
- echo "# Kustomize Validation Results"
- echo ""
- |
HAS_ERRORS=0
KUSTOMIZATION_DIRS=$(find . -name "kustomization.yaml" -exec dirname {} \;)
for dir in $KUSTOMIZATION_DIRS; do
echo "🔍 Validating: $dir"
if ! OUTPUT=$(kustomize build "$dir" 2>&1); then
echo ""
echo "❌ Error in '$dir':"
echo "----------------------------------------"
echo "$OUTPUT"
echo "----------------------------------------"
echo ""
HAS_ERRORS=1
else
echo "✅ $dir is valid"
fi
done
if [ "$HAS_ERRORS" -eq 1 ]; then
echo ""
echo "🚨 One or more kustomizations failed validation."
exit 1
else
echo ""
echo "🎉 All kustomizations are valid."
fi
silent: false
test-prometheus-rules:
desc: Run unit tests for Prometheus alerting rules
cmds:
- echo "# Prometheus Rules Test Results"
- echo ""
- |
HAS_ERRORS=0
TEST_FILES=$(find test/prometheus-rules -name "*-tests.yaml" 2>/dev/null)
if [ -z "$TEST_FILES" ]; then
echo "⚠️ No Prometheus test files found in test/prometheus-rules/"
exit 0
fi
for test_file in $TEST_FILES; do
test_dir=$(dirname "$test_file")
test_name=$(basename "$test_file")
echo "🔍 Testing: $test_file"
if ! OUTPUT=$(cd "$test_dir" && promtool test rules "$test_name" 2>&1); then
echo ""
echo "❌ Test failed for '$test_file':"
echo "----------------------------------------"
echo "$OUTPUT"
echo "----------------------------------------"
echo ""
HAS_ERRORS=1
else
echo "✅ $test_file passed"
fi
done
if [ "$HAS_ERRORS" -eq 1 ]; then
echo ""
echo "🚨 One or more Prometheus rule tests failed."
exit 1
else
echo ""
echo "🎉 All Prometheus rule tests passed."
fi
silent: false
perf:run:
desc: Run Milo end-to-end performance scenario and download results
silent: true
cmds:
- |
set -euo pipefail
# Parse CLI key=value overrides passed after -- and export as env
for kv in {{.CLI_ARGS}}; do
case "$kv" in
*=*) key="${kv%%=*}"; val="${kv#*=}"; export "$key=$val" ;;
*) : ;; # ignore non key=value tokens
esac
done
NS="${NS:-milo-system}"
MILO_NS="${MILO_NAMESPACE:-milo-system}"
VM_NS="${VM_NAMESPACE:-telemetry-system}"
VM_SVC_NAME="${VM_SERVICE_NAME:-vmsingle-telemetry-system-vm-victoria-metrics-k8s-stack}"
VM_PORT="${VM_PORT:-8428}"
VM_BASE_URL="${VM_BASE_URL:-http://${VM_SVC_NAME}.${VM_NS}.svc.cluster.local:${VM_PORT}}"
APISERVER_REGEX="${APISERVER_POD_REGEX:-milo-apiserver.*}"
ETCD_REGEX="${ETCD_POD_REGEX:-etcd.*}"
MILO_KUBECONFIG_SECRET_NAME="${MILO_KUBECONFIG_SECRET_NAME:-milo-controller-manager-kubeconfig}"
MILO_KUBECONFIG_SECRET_KEY="${MILO_KUBECONFIG_SECRET_KEY:-kubeconfig}"
MILO_KUBECONFIG_PATH="${MILO_KUBECONFIG_PATH:-/work/milo-kubeconfig}"
NUM_PROJECTS="${NUM_PROJECTS:-{{default "100" .NUM_PROJECTS}}}"
NUM_SECRETS_PER_PROJECT="${NUM_SECRETS_PER_PROJECT:-{{default "100" .NUM_SECRETS_PER_PROJECT}}}"
NUM_CONFIGMAPS_PER_PROJECT="${NUM_CONFIGMAPS_PER_PROJECT:-{{default "100" .NUM_CONFIGMAPS_PER_PROJECT}}}"
PROJECT_CONCURRENCY="${PROJECT_CONCURRENCY:-{{default "4" .PROJECT_CONCURRENCY}}}"
OBJECT_CONCURRENCY="${OBJECT_CONCURRENCY:-{{default "8" .OBJECT_CONCURRENCY}}}"
RUN_OBJECTS_PHASE="${RUN_OBJECTS_PHASE:-{{default "true" .RUN_OBJECTS_PHASE}}}"
OUT_DIR="${OUT_DIR:-{{default "/work/out" .OUT_DIR}}}"
STABILIZE_SECONDS="${STABILIZE_SECONDS:-{{default "90" .STABILIZE_SECONDS}}}"
MEASURE_WINDOW="${MEASURE_WINDOW:-{{default "2m" .MEASURE_WINDOW}}}"
ORG_NAME="${ORG_NAME:-{{default "" .ORG_NAME}}}"
echo "🔎 Checking Milo kubeconfig …"
if [ ! -f ".milo/kubeconfig" ]; then
echo "Error: .milo/kubeconfig not found. Run 'task dev:setup' first." >&2
exit 1
fi
echo "🔐 Ensuring perf-runner RBAC is applied …"
sed "s/NAMESPACE_PLACEHOLDER/${NS}/g" test/performance/config/perf-runner-rbac.yaml | task test-infra:kubectl -- apply -f -
echo "🗂 Publishing perf script as ConfigMap …"
task test-infra:kubectl -- -n ${NS} create configmap perf-script \
--from-file=perf_run.py=test/performance/scripts/perf_run.py \
--dry-run=client -o yaml | task test-infra:kubectl -- apply -f -
echo "🚀 Launching perf runner Job …"
sed \
-e "s/MILO_NAMESPACE_PLACEHOLDER/${MILO_NS}/g" \
-e "s/NAMESPACE_PLACEHOLDER/${NS}/g" \
-e "s#VM_BASE_URL_PLACEHOLDER#${VM_BASE_URL}#g" \
-e "s/APISERVER_REGEX_PLACEHOLDER/${APISERVER_REGEX}/g" \
-e "s/ETCD_REGEX_PLACEHOLDER/${ETCD_REGEX}/g" \
-e "s/NUM_PROJECTS_PLACEHOLDER/${NUM_PROJECTS}/g" \
-e "s/NUM_SECRETS_PLACEHOLDER/${NUM_SECRETS_PER_PROJECT}/g" \
-e "s/NUM_CONFIGMAPS_PLACEHOLDER/${NUM_CONFIGMAPS_PER_PROJECT}/g" \
-e "s/STABILIZE_SECONDS_PLACEHOLDER/${STABILIZE_SECONDS}/g" \
-e "s/MEASURE_WINDOW_PLACEHOLDER/${MEASURE_WINDOW}/g" \
-e "s/ORG_NAME_PLACEHOLDER/${ORG_NAME}/g" \
-e "s/PROJECT_CONCURRENCY_PLACEHOLDER/${PROJECT_CONCURRENCY}/g" \
-e "s/OBJECT_CONCURRENCY_PLACEHOLDER/${OBJECT_CONCURRENCY}/g" \
-e "s/RUN_OBJECTS_PHASE_PLACEHOLDER/${RUN_OBJECTS_PHASE}/g" \
-e "s/MILO_KUBECONFIG_SECRET_PLACEHOLDER/${MILO_KUBECONFIG_SECRET_NAME}/g" \
-e "s/MILO_KUBECONFIG_KEY_PLACEHOLDER/${MILO_KUBECONFIG_SECRET_KEY}/g" \
test/performance/config/perf-runner-job.yaml | task test-infra:kubectl -- apply -f -
echo "⏳ Waiting for Job completion …"
task test-infra:kubectl -- -n ${NS} wait --for=condition=Complete job/perf-runner --timeout=45m
echo "⬇️ Downloading results …"
mkdir -p reports/perf
# Prefer ConfigMap (works even if pod already terminated)
TEST_ID=$(task test-infra:kubectl -- -n ${NS} get cm perf-results -o jsonpath='{.data.test_id}' 2>/dev/null || true)
OUT_DIR_LOCAL="reports/perf/${TEST_ID:-latest}"
mkdir -p "$OUT_DIR_LOCAL"
task test-infra:kubectl -- -n ${NS} get cm perf-results -o jsonpath='{.data.results\.json}' > "$OUT_DIR_LOCAL/results.json" || true
task test-infra:kubectl -- -n ${NS} get cm perf-results -o jsonpath='{.data.report\.html}' > "$OUT_DIR_LOCAL/report.html" || true
# Fallback to copying from the pod if ConfigMap wasn't available
if [ ! -s "$OUT_DIR_LOCAL/results.json" ] || [ ! -s "$OUT_DIR_LOCAL/report.html" ]; then
POD=$(task test-infra:kubectl -- -n ${NS} get pods -l job-name=perf-runner -o jsonpath='{.items[0].metadata.name}' 2>/dev/null || true)
if [ -n "$POD" ]; then
task test-infra:kubectl -- -n ${NS} cp "$POD:/work/out/results.json" "$OUT_DIR_LOCAL/results.json" || true
task test-infra:kubectl -- -n ${NS} cp "$POD:/work/out/report.html" "$OUT_DIR_LOCAL/report.html" || true
fi
fi
echo "✅ Results saved to $OUT_DIR_LOCAL"
perf:cleanup:
desc: Cleanup resources created by the last perf run (org/projects/secrets/configmaps)
silent: true
cmds:
- |
set -euo pipefail
NS="${NS:-milo-system}"
MILO_KUBECONFIG_SECRET_NAME="${MILO_KUBECONFIG_SECRET_NAME:-milo-controller-manager-kubeconfig}"
MILO_KUBECONFIG_SECRET_KEY="${MILO_KUBECONFIG_SECRET_KEY:-kubeconfig}"
MILO_KUBECONFIG_PATH="${MILO_KUBECONFIG_PATH:-/work/milo-kubeconfig}"
if [ ! -f ".milo/kubeconfig" ]; then
echo "Error: .milo/kubeconfig not found. Run 'task dev:setup' first." >&2
exit 1
fi
echo "🔎 Discovering last test identifiers …"
# Allow override from CLI envs if ConfigMap isn't present
TEST_ID_CM=$(task test-infra:kubectl -- -n ${NS} get cm perf-results -o jsonpath='{.data.test_id}' 2>/dev/null || true)
ORG_NAME_CM=$(task test-infra:kubectl -- -n ${NS} get cm perf-results -o jsonpath='{.data.org_name}' 2>/dev/null || true)
TEST_ID="${TEST_ID:-$TEST_ID_CM}"
ORG_NAME="${ORG_NAME:-$ORG_NAME_CM}"
if [ -z "${TEST_ID}" ] || [ -z "${ORG_NAME}" ]; then
echo "No existing results found in namespace ${NS} (ConfigMap perf-results). Nothing to cleanup."
exit 0
fi
echo "🚮 Launching cleanup Job for test ${TEST_ID} …"
sed \
-e "s/NAMESPACE_PLACEHOLDER/${NS}/g" \
-e "s/TEST_ID_PLACEHOLDER/${TEST_ID}/g" \
-e "s/ORG_NAME_PLACEHOLDER/${ORG_NAME}/g" \
-e "s/MILO_KUBECONFIG_SECRET_PLACEHOLDER/${MILO_KUBECONFIG_SECRET_NAME}/g" \
-e "s/MILO_KUBECONFIG_KEY_PLACEHOLDER/${MILO_KUBECONFIG_SECRET_KEY}/g" \
test/performance/config/perf-cleanup-job.yaml | task test-infra:kubectl -- apply -f -
echo "⏳ Waiting for cleanup Job completion …"
task test-infra:kubectl -- -n ${NS} wait --for=condition=Complete job/perf-cleanup --timeout=30m
echo "🧹 Removing runner artifacts (keeping downloaded results) …"
task test-infra:kubectl -- -n ${NS} delete job/perf-runner --ignore-not-found
task test-infra:kubectl -- -n ${NS} delete job/perf-cleanup --ignore-not-found
task test-infra:kubectl -- -n ${NS} delete configmap perf-script --ignore-not-found
task test-infra:kubectl -- -n ${NS} delete configmap perf-results --ignore-not-found
echo "✅ Cleanup complete."