diff --git a/Makefile b/Makefile index e1d1ba9e..9ac489ad 100644 --- a/Makefile +++ b/Makefile @@ -206,6 +206,17 @@ test: ensure-ch-binaries ensure-caddy-binaries build-embedded gen-jwt: $(GODOTENV) @$(GODOTENV) -f .env go run ./cmd/gen-jwt -user-id $${USER_ID:-test-user} +# Build the generic builder image for builds +build-builder: + docker build -t hypeman/builder:latest -f lib/builds/images/generic/Dockerfile . + +# Alias for backwards compatibility +build-builders: build-builder + +# Run E2E build system test (requires server running: make dev) +e2e-build-test: + @./scripts/e2e-build-test.sh + # Clean generated files and binaries clean: rm -rf $(BIN_DIR) diff --git a/cmd/api/api/api.go b/cmd/api/api/api.go index ec184ab2..90be4103 100644 --- a/cmd/api/api/api.go +++ b/cmd/api/api/api.go @@ -2,6 +2,7 @@ package api import ( "github.com/onkernel/hypeman/cmd/api/config" + "github.com/onkernel/hypeman/lib/builds" "github.com/onkernel/hypeman/lib/devices" "github.com/onkernel/hypeman/lib/images" "github.com/onkernel/hypeman/lib/ingress" @@ -21,6 +22,7 @@ type ApiService struct { NetworkManager network.Manager DeviceManager devices.Manager IngressManager ingress.Manager + BuildManager builds.Manager ResourceManager *resources.Manager } @@ -35,6 +37,7 @@ func New( networkManager network.Manager, deviceManager devices.Manager, ingressManager ingress.Manager, + buildManager builds.Manager, resourceManager *resources.Manager, ) *ApiService { return &ApiService{ @@ -45,6 +48,7 @@ func New( NetworkManager: networkManager, DeviceManager: deviceManager, IngressManager: ingressManager, + BuildManager: buildManager, ResourceManager: resourceManager, } } diff --git a/cmd/api/api/builds.go b/cmd/api/api/builds.go new file mode 100644 index 00000000..deaf6ec1 --- /dev/null +++ b/cmd/api/api/builds.go @@ -0,0 +1,313 @@ +package api + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "strconv" + + "github.com/onkernel/hypeman/lib/builds" + "github.com/onkernel/hypeman/lib/logger" + "github.com/onkernel/hypeman/lib/oapi" +) + +// ListBuilds returns all builds +func (s *ApiService) ListBuilds(ctx context.Context, request oapi.ListBuildsRequestObject) (oapi.ListBuildsResponseObject, error) { + log := logger.FromContext(ctx) + + domainBuilds, err := s.BuildManager.ListBuilds(ctx) + if err != nil { + log.ErrorContext(ctx, "failed to list builds", "error", err) + return oapi.ListBuilds500JSONResponse{ + Code: "internal_error", + Message: "failed to list builds", + }, nil + } + + oapiBuilds := make([]oapi.Build, len(domainBuilds)) + for i, b := range domainBuilds { + oapiBuilds[i] = buildToOAPI(b) + } + + return oapi.ListBuilds200JSONResponse(oapiBuilds), nil +} + +// CreateBuild creates a new build job +func (s *ApiService) CreateBuild(ctx context.Context, request oapi.CreateBuildRequestObject) (oapi.CreateBuildResponseObject, error) { + log := logger.FromContext(ctx) + + // Parse multipart form fields + var sourceData []byte + var baseImageDigest, cacheScope, dockerfile string + var timeoutSeconds int + var secrets []builds.SecretRef + + for { + part, err := request.Body.NextPart() + if err == io.EOF { + break + } + if err != nil { + return oapi.CreateBuild400JSONResponse{ + Code: "invalid_request", + Message: "failed to parse multipart form", + }, nil + } + + switch part.FormName() { + case "source": + sourceData, err = io.ReadAll(part) + if err != nil { + return oapi.CreateBuild400JSONResponse{ + Code: "invalid_source", + Message: "failed to read source data", + }, nil + } + case "base_image_digest": + data, err := io.ReadAll(part) + if err != nil { + return oapi.CreateBuild400JSONResponse{ + Code: "invalid_request", + Message: "failed to read base_image_digest field", + }, nil + } + baseImageDigest = string(data) + case "cache_scope": + data, err := io.ReadAll(part) + if err != nil { + return oapi.CreateBuild400JSONResponse{ + Code: "invalid_request", + Message: "failed to read cache_scope field", + }, nil + } + cacheScope = string(data) + case "dockerfile": + data, err := io.ReadAll(part) + if err != nil { + return oapi.CreateBuild400JSONResponse{ + Code: "invalid_request", + Message: "failed to read dockerfile field", + }, nil + } + dockerfile = string(data) + case "timeout_seconds": + data, err := io.ReadAll(part) + if err != nil { + return oapi.CreateBuild400JSONResponse{ + Code: "invalid_request", + Message: "failed to read timeout_seconds field", + }, nil + } + if v, err := strconv.Atoi(string(data)); err == nil { + timeoutSeconds = v + } + case "secrets": + data, err := io.ReadAll(part) + if err != nil { + return oapi.CreateBuild400JSONResponse{ + Code: "invalid_request", + Message: "failed to read secrets field", + }, nil + } + if err := json.Unmarshal(data, &secrets); err != nil { + return oapi.CreateBuild400JSONResponse{ + Code: "invalid_request", + Message: "secrets must be a JSON array of {\"id\": \"...\", \"env_var\": \"...\"} objects", + }, nil + } + } + part.Close() + } + + if len(sourceData) == 0 { + return oapi.CreateBuild400JSONResponse{ + Code: "invalid_request", + Message: "source is required", + }, nil + } + + // Note: Dockerfile validation happens in the builder agent. + // It will check if Dockerfile is in the source tarball or provided via dockerfile parameter. + + // Build domain request + domainReq := builds.CreateBuildRequest{ + BaseImageDigest: baseImageDigest, + CacheScope: cacheScope, + Dockerfile: dockerfile, + Secrets: secrets, + } + + // Apply timeout if provided + if timeoutSeconds > 0 { + domainReq.BuildPolicy = &builds.BuildPolicy{ + TimeoutSeconds: timeoutSeconds, + } + } + + build, err := s.BuildManager.CreateBuild(ctx, domainReq, sourceData) + if err != nil { + switch { + case errors.Is(err, builds.ErrDockerfileRequired): + return oapi.CreateBuild400JSONResponse{ + Code: "dockerfile_required", + Message: err.Error(), + }, nil + case errors.Is(err, builds.ErrInvalidSource): + return oapi.CreateBuild400JSONResponse{ + Code: "invalid_source", + Message: err.Error(), + }, nil + default: + log.ErrorContext(ctx, "failed to create build", "error", err) + return oapi.CreateBuild500JSONResponse{ + Code: "internal_error", + Message: "failed to create build", + }, nil + } + } + + return oapi.CreateBuild202JSONResponse(buildToOAPI(build)), nil +} + +// GetBuild gets build details +func (s *ApiService) GetBuild(ctx context.Context, request oapi.GetBuildRequestObject) (oapi.GetBuildResponseObject, error) { + log := logger.FromContext(ctx) + + build, err := s.BuildManager.GetBuild(ctx, request.Id) + if err != nil { + if errors.Is(err, builds.ErrNotFound) { + return oapi.GetBuild404JSONResponse{ + Code: "not_found", + Message: "build not found", + }, nil + } + log.ErrorContext(ctx, "failed to get build", "error", err, "id", request.Id) + return oapi.GetBuild500JSONResponse{ + Code: "internal_error", + Message: "failed to get build", + }, nil + } + + return oapi.GetBuild200JSONResponse(buildToOAPI(build)), nil +} + +// CancelBuild cancels a build +func (s *ApiService) CancelBuild(ctx context.Context, request oapi.CancelBuildRequestObject) (oapi.CancelBuildResponseObject, error) { + log := logger.FromContext(ctx) + + err := s.BuildManager.CancelBuild(ctx, request.Id) + if err != nil { + switch { + case errors.Is(err, builds.ErrNotFound): + return oapi.CancelBuild404JSONResponse{ + Code: "not_found", + Message: "build not found", + }, nil + case errors.Is(err, builds.ErrBuildInProgress): + return oapi.CancelBuild409JSONResponse{ + Code: "conflict", + Message: "build already in progress", + }, nil + default: + log.ErrorContext(ctx, "failed to cancel build", "error", err, "id", request.Id) + return oapi.CancelBuild500JSONResponse{ + Code: "internal_error", + Message: "failed to cancel build", + }, nil + } + } + + return oapi.CancelBuild204Response{}, nil +} + +// GetBuildEvents streams build events via SSE +// With follow=false (default), streams existing logs then closes +// With follow=true, continues streaming until build completes +func (s *ApiService) GetBuildEvents(ctx context.Context, request oapi.GetBuildEventsRequestObject) (oapi.GetBuildEventsResponseObject, error) { + log := logger.FromContext(ctx) + + // Parse follow parameter (default false) + follow := false + if request.Params.Follow != nil { + follow = *request.Params.Follow + } + + eventChan, err := s.BuildManager.StreamBuildEvents(ctx, request.Id, follow) + if err != nil { + if errors.Is(err, builds.ErrNotFound) { + return oapi.GetBuildEvents404JSONResponse{ + Code: "not_found", + Message: "build not found", + }, nil + } + log.ErrorContext(ctx, "failed to stream build events", "error", err, "id", request.Id) + return oapi.GetBuildEvents500JSONResponse{ + Code: "internal_error", + Message: "failed to stream build events", + }, nil + } + + return buildEventsStreamResponse{eventChan: eventChan}, nil +} + +// buildEventsStreamResponse implements oapi.GetBuildEventsResponseObject with proper SSE streaming +type buildEventsStreamResponse struct { + eventChan <-chan builds.BuildEvent +} + +func (r buildEventsStreamResponse) VisitGetBuildEventsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "text/event-stream") + w.Header().Set("Cache-Control", "no-cache") + w.Header().Set("Connection", "keep-alive") + w.Header().Set("X-Accel-Buffering", "no") // Disable nginx buffering + w.WriteHeader(200) + + flusher, ok := w.(http.Flusher) + if !ok { + return fmt.Errorf("streaming not supported") + } + + for event := range r.eventChan { + jsonEvent, err := json.Marshal(event) + if err != nil { + continue + } + fmt.Fprintf(w, "data: %s\n\n", jsonEvent) + flusher.Flush() + } + return nil +} + +// buildToOAPI converts a domain Build to OAPI Build +func buildToOAPI(b *builds.Build) oapi.Build { + oapiBuild := oapi.Build{ + Id: b.ID, + Status: oapi.BuildStatus(b.Status), + QueuePosition: b.QueuePosition, + ImageDigest: b.ImageDigest, + ImageRef: b.ImageRef, + Error: b.Error, + CreatedAt: b.CreatedAt, + StartedAt: b.StartedAt, + CompletedAt: b.CompletedAt, + DurationMs: b.DurationMS, + } + + if b.Provenance != nil { + oapiBuild.Provenance = &oapi.BuildProvenance{ + BaseImageDigest: &b.Provenance.BaseImageDigest, + SourceHash: &b.Provenance.SourceHash, + BuildkitVersion: &b.Provenance.BuildkitVersion, + Timestamp: &b.Provenance.Timestamp, + } + if len(b.Provenance.LockfileHashes) > 0 { + oapiBuild.Provenance.LockfileHashes = &b.Provenance.LockfileHashes + } + } + + return oapiBuild +} + diff --git a/cmd/api/api/images_test.go b/cmd/api/api/images_test.go index 9d5f0590..fc2e0180 100644 --- a/cmd/api/api/images_test.go +++ b/cmd/api/api/images_test.go @@ -225,9 +225,17 @@ func TestCreateImage_Idempotent(t *testing.T) { t.Fatal("Build failed - this is the root cause of test failures") } - require.Equal(t, oapi.ImageStatus(images.StatusPending), img2.Status) - require.NotNil(t, img2.QueuePosition, "should have queue position") - require.Equal(t, 1, *img2.QueuePosition, "should still be at position 1") + // Status can be "pending" (still processing) or "ready" (already completed in fast CI) + // The key idempotency invariant is that the digest is the same (verified above) + require.Contains(t, []oapi.ImageStatus{ + oapi.ImageStatus(images.StatusPending), + oapi.ImageStatus(images.StatusReady), + }, img2.Status, "status should be pending or ready") + + // If still pending, should have queue position + if img2.Status == oapi.ImageStatus(images.StatusPending) { + require.NotNil(t, img2.QueuePosition, "should have queue position when pending") + } // Construct digest reference: repository@digest // Extract repository from imageName (strip tag part) diff --git a/cmd/api/api/registry_test.go b/cmd/api/api/registry_test.go index 45e6264b..e88978ef 100644 --- a/cmd/api/api/registry_test.go +++ b/cmd/api/api/registry_test.go @@ -73,7 +73,8 @@ func TestRegistryPushAndConvert(t *testing.T) { t.Log("Push successful!") // Wait for image to be converted - imageName := "test/alpine@" + digest.String() + // Include serverHost since our registry now stores images with the full host + imageName := serverHost + "/test/alpine@" + digest.String() imgResp := waitForImageReady(t, svc, imageName, 60*time.Second) assert.NotNil(t, imgResp.SizeBytes, "ready image should have size") } @@ -124,7 +125,8 @@ func TestRegistryPushAndCreateInstance(t *testing.T) { require.NoError(t, err) // Wait for image to be ready - imageName := "test/alpine@" + digest.String() + // Include serverHost since our registry now stores images with the full host + imageName := serverHost + "/test/alpine@" + digest.String() waitForImageReady(t, svc, imageName, 60*time.Second) // Create instance with pushed image @@ -362,7 +364,8 @@ func TestRegistryTagPush(t *testing.T) { t.Log("Push successful!") // The image should be registered with the computed digest, not the tag - imageName := "tag-test/alpine@" + digest.String() + // Include serverHost since our registry now stores images with the full host + imageName := serverHost + "/tag-test/alpine@" + digest.String() waitForImageReady(t, svc, imageName, 60*time.Second) // Verify image appears in ListImages (GET /images) @@ -375,7 +378,7 @@ func TestRegistryTagPush(t *testing.T) { for _, img := range images { if img.Digest == digest.String() { found = true - assert.Equal(t, oapi.Ready, img.Status, "image in list should have Ready status") + assert.Equal(t, oapi.ImageStatusReady, img.Status, "image in list should have Ready status") assert.NotNil(t, img.SizeBytes, "ready image should have size") t.Logf("Image found in ListImages: %s (status=%s, size=%d)", img.Name, img.Status, *img.SizeBytes) break @@ -415,7 +418,8 @@ func TestRegistryDockerV2ManifestConversion(t *testing.T) { // Wait for image to be converted // The server converts Docker v2 to OCI format internally, resulting in a different digest - imgResp := waitForImageReady(t, svc, "dockerv2-test/alpine:v1", 60*time.Second) + // Include serverHost since our registry now stores images with the full host + imgResp := waitForImageReady(t, svc, serverHost+"/dockerv2-test/alpine:v1", 60*time.Second) assert.NotNil(t, imgResp.SizeBytes, "ready image should have size") assert.NotEmpty(t, imgResp.Digest, "image should have digest") } diff --git a/cmd/api/config/config.go b/cmd/api/config/config.go index 438c6012..d56c8024 100644 --- a/cmd/api/config/config.go +++ b/cmd/api/config/config.go @@ -103,6 +103,13 @@ type Config struct { // Cloudflare configuration (if AcmeDnsProvider=cloudflare) CloudflareApiToken string // Cloudflare API token + // Build system configuration + MaxConcurrentSourceBuilds int // Max concurrent source-to-image builds + BuilderImage string // OCI image for builder VMs + RegistryURL string // URL of registry for built images + BuildTimeout int // Default build timeout in seconds + BuildSecretsDir string // Directory containing build secrets (optional) + // Hypervisor configuration DefaultHypervisor string // Default hypervisor type: "cloud-hypervisor" or "qemu" @@ -185,6 +192,13 @@ func Load() *Config { // Cloudflare configuration CloudflareApiToken: getEnv("CLOUDFLARE_API_TOKEN", ""), + // Build system configuration + MaxConcurrentSourceBuilds: getEnvInt("MAX_CONCURRENT_SOURCE_BUILDS", 2), + BuilderImage: getEnv("BUILDER_IMAGE", "hypeman/builder:latest"), + RegistryURL: getEnv("REGISTRY_URL", "localhost:8080"), + BuildTimeout: getEnvInt("BUILD_TIMEOUT", 600), + BuildSecretsDir: getEnv("BUILD_SECRETS_DIR", ""), // Optional: path to directory with build secrets + // Hypervisor configuration DefaultHypervisor: getEnv("DEFAULT_HYPERVISOR", "cloud-hypervisor"), diff --git a/cmd/api/main.go b/cmd/api/main.go index 18e66a64..1025a71c 100644 --- a/cmd/api/main.go +++ b/cmd/api/main.go @@ -355,6 +355,12 @@ func run() error { // Error group for coordinated shutdown grp, gctx := errgroup.WithContext(ctx) + // Start build manager background services (vsock handler for builder VMs) + if err := app.BuildManager.Start(gctx); err != nil { + logger.Error("failed to start build manager", "error", err) + return err + } + // Run the server grp.Go(func() error { logger.Info("starting hypeman API", "port", app.Config.Port) diff --git a/cmd/api/wire.go b/cmd/api/wire.go index d9e734ac..4a08138b 100644 --- a/cmd/api/wire.go +++ b/cmd/api/wire.go @@ -9,6 +9,7 @@ import ( "github.com/google/wire" "github.com/onkernel/hypeman/cmd/api/api" "github.com/onkernel/hypeman/cmd/api/config" + "github.com/onkernel/hypeman/lib/builds" "github.com/onkernel/hypeman/lib/devices" "github.com/onkernel/hypeman/lib/images" "github.com/onkernel/hypeman/lib/ingress" @@ -33,6 +34,7 @@ type application struct { InstanceManager instances.Manager VolumeManager volumes.Manager IngressManager ingress.Manager + BuildManager builds.Manager ResourceManager *resources.Manager Registry *registry.Registry ApiService *api.ApiService @@ -52,6 +54,7 @@ func initializeApp() (*application, func(), error) { providers.ProvideInstanceManager, providers.ProvideVolumeManager, providers.ProvideIngressManager, + providers.ProvideBuildManager, providers.ProvideResourceManager, providers.ProvideRegistry, api.New, diff --git a/cmd/api/wire_gen.go b/cmd/api/wire_gen.go index 0ba6a773..fc72e1a2 100644 --- a/cmd/api/wire_gen.go +++ b/cmd/api/wire_gen.go @@ -10,6 +10,7 @@ import ( "context" "github.com/onkernel/hypeman/cmd/api/api" "github.com/onkernel/hypeman/cmd/api/config" + "github.com/onkernel/hypeman/lib/builds" "github.com/onkernel/hypeman/lib/devices" "github.com/onkernel/hypeman/lib/images" "github.com/onkernel/hypeman/lib/ingress" @@ -54,6 +55,10 @@ func initializeApp() (*application, func(), error) { if err != nil { return nil, nil, err } + buildsManager, err := providers.ProvideBuildManager(paths, config, instancesManager, volumesManager, logger) + if err != nil { + return nil, nil, err + } resourcesManager, err := providers.ProvideResourceManager(context, config, paths, manager, instancesManager, volumesManager) if err != nil { return nil, nil, err @@ -62,7 +67,7 @@ func initializeApp() (*application, func(), error) { if err != nil { return nil, nil, err } - apiService := api.New(config, manager, instancesManager, volumesManager, networkManager, devicesManager, ingressManager, resourcesManager) + apiService := api.New(config, manager, instancesManager, volumesManager, networkManager, devicesManager, ingressManager, buildsManager, resourcesManager) mainApplication := &application{ Ctx: context, Logger: logger, @@ -74,6 +79,7 @@ func initializeApp() (*application, func(), error) { InstanceManager: instancesManager, VolumeManager: volumesManager, IngressManager: ingressManager, + BuildManager: buildsManager, ResourceManager: resourcesManager, Registry: registry, ApiService: apiService, @@ -96,6 +102,7 @@ type application struct { InstanceManager instances.Manager VolumeManager volumes.Manager IngressManager ingress.Manager + BuildManager builds.Manager ResourceManager *resources.Manager Registry *registry.Registry ApiService *api.ApiService diff --git a/lib/builds/README.md b/lib/builds/README.md new file mode 100644 index 00000000..4aaa7def --- /dev/null +++ b/lib/builds/README.md @@ -0,0 +1,463 @@ +# Build System + +The build system provides source-to-image builds inside ephemeral Cloud Hypervisor microVMs, enabling secure multi-tenant isolation with rootless BuildKit. + +## Architecture + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ Hypeman API │ +│ POST /builds → BuildManager → BuildQueue │ +│ │ │ +│ Start() → VsockHandler (port 5001) │ +└─────────────────────────────────────────────────────────────────┘ + │ + ▼ +┌─────────────────────────────────────────────────────────────────┐ +│ Builder MicroVM │ +│ ┌─────────────────────────────────────────────────────────────┐│ +│ │ Volumes Mounted: ││ +│ │ - /src (source code, read-write) ││ +│ │ - /config/build.json (build configuration, read-only) ││ +│ ├─────────────────────────────────────────────────────────────┤│ +│ │ Builder Agent ││ +│ │ ┌─────────────┐ ┌──────────────┐ ┌────────────────────┐ ││ +│ │ │ Load Config │→ │ Read User's │→ │ Run BuildKit │ ││ +│ │ │ /config/ │ │ Dockerfile │ │ (buildctl) │ ││ +│ │ └─────────────┘ └──────────────┘ └────────────────────┘ ││ +│ │ │ ││ +│ │ ▼ ││ +│ │ Push to Registry ││ +│ │ (JWT token auth) ││ +│ └─────────────────────────────────────────────────────────────┘│ +└─────────────────────────────────────────────────────────────────┘ + │ + ▼ +┌─────────────────────────────────────────────────────────────────┐ +│ OCI Registry │ +│ {REGISTRY_URL}/builds/{build-id} │ +│ (default: 10.102.0.1:8083 from VM) │ +└─────────────────────────────────────────────────────────────────┘ +``` + +## Components + +### Core Types (`types.go`) + +| Type | Description | +|------|-------------| +| `Build` | Build job status and metadata | +| `CreateBuildRequest` | API request to create a build | +| `BuildConfig` | Configuration passed to builder VM | +| `BuildResult` | Result returned by builder agent | +| `BuildProvenance` | Audit trail for reproducibility | +| `BuildPolicy` | Resource limits and network policy | + +### Build Queue (`queue.go`) + +In-memory queue with configurable concurrency: + +```go +queue := NewBuildQueue(maxConcurrent) +position := queue.Enqueue(buildID, request, startFunc) +queue.Cancel(buildID) +queue.GetPosition(buildID) +``` + +**Recovery**: On startup, `listPendingBuilds()` scans disk metadata for incomplete builds and re-enqueues them in FIFO order. + +### Storage (`storage.go`) + +Builds are persisted to `$DATA_DIR/builds/{id}/`: + +``` +builds/ +└── {build-id}/ + ├── metadata.json # Build status, provenance + ├── config.json # Config for builder VM + ├── source/ + │ └── source.tar.gz + └── logs/ + └── build.log +``` + +### Build Manager (`manager.go`) + +Orchestrates the build lifecycle: + +1. Validate request and store source +2. Write build config to disk +3. Enqueue build job +4. Create source volume from archive +5. Create config volume with `build.json` +6. Create builder VM with both volumes attached +7. Wait for build completion +8. Update metadata and cleanup + +**Important**: The `Start()` method must be called to start the vsock handler for builder communication. + +### Cache System (`cache.go`) + +Registry-based caching with tenant isolation: + +``` +{registry}/cache/{tenant_scope}/{runtime}/{lockfile_hash} +``` + +```go +gen := NewCacheKeyGenerator("localhost:8080") +key, _ := gen.GenerateCacheKey("my-tenant", "myapp", lockfileHashes) +// key.ImportCacheArg() → "type=registry,ref=localhost:8080/cache/my-tenant/myapp/abc123" +// key.ExportCacheArg() → "type=registry,ref=localhost:8080/cache/my-tenant/myapp/abc123,mode=max" +``` + +### Registry Token System (`registry_token.go`) + +JWT-based authentication for builder VMs to push images: + +```go +generator := NewRegistryTokenGenerator(jwtSecret) +token, _ := generator.GeneratePushToken(buildID, []string{"builds/abc123", "cache/tenant-x"}, 30*time.Minute) +// Token grants push access only to specified repositories +// Validated by middleware on /v2/* registry endpoints +``` + +| Field | Description | +|-------|-------------| +| `BuildID` | Build job identifier for audit | +| `Repositories` | Allowed repository paths | +| `Scope` | Access scope: `push` or `pull` | +| `ExpiresAt` | Token expiry (matches build timeout) | + +### Metrics (`metrics.go`) + +OpenTelemetry metrics for monitoring: + +| Metric | Type | Description | +|--------|------|-------------| +| `hypeman_build_duration_seconds` | Histogram | Build duration | +| `hypeman_builds_total` | Counter | Total builds by status/runtime | +| `hypeman_build_queue_length` | Gauge | Pending builds in queue | +| `hypeman_builds_active` | Gauge | Currently running builds | + +### Builder Agent (`builder_agent/main.go`) + +Guest binary that runs inside builder VMs: + +1. Reads config from `/config/build.json` +2. Fetches secrets from host via vsock (if any) +3. Uses user-provided Dockerfile (from source or config) +4. Runs `buildctl-daemonless.sh` with cache and insecure registry flags +5. Computes provenance (lockfile hashes, source hash) +6. Reports result back via vsock + +**Note**: The agent requires a Dockerfile to be provided. It can be included in the source tarball or passed via the `dockerfile` config parameter. + +**Key Details**: +- Config path: `/config/build.json` +- Source path: `/src` +- Uses `registry.insecure=true` for HTTP registries +- Inherits `BUILDKITD_FLAGS` from environment + +## API Endpoints + +| Method | Path | Description | +|--------|------|-------------| +| `POST` | `/builds` | Submit build (multipart form) | +| `GET` | `/builds` | List all builds | +| `GET` | `/builds/{id}` | Get build details | +| `DELETE` | `/builds/{id}` | Cancel build | +| `GET` | `/builds/{id}/logs` | Stream logs (SSE) | + +### Submit Build Example + +```bash +# Option 1: Dockerfile in source tarball +curl -X POST http://localhost:8083/builds \ + -H "Authorization: Bearer $TOKEN" \ + -F "source=@source.tar.gz" \ + -F "cache_scope=tenant-123" \ + -F "timeout_seconds=300" + +# Option 2: Dockerfile as parameter +curl -X POST http://localhost:8083/builds \ + -H "Authorization: Bearer $TOKEN" \ + -F "source=@source.tar.gz" \ + -F "dockerfile=FROM node:20-alpine +WORKDIR /app +COPY . . +RUN npm ci +CMD [\"node\", \"index.js\"]" \ + -F "cache_scope=tenant-123" +``` + +### Response + +```json +{ + "id": "abc123", + "status": "queued", + "created_at": "2025-01-15T10:00:00Z" +} +``` + +## Configuration + +| Environment Variable | Default | Description | +|---------------------|---------|-------------| +| `MAX_CONCURRENT_SOURCE_BUILDS` | `2` | Max parallel builds | +| `BUILDER_IMAGE` | `hypeman/builder:latest` | Builder VM image | +| `REGISTRY_URL` | `localhost:8080` | Registry for built images | +| `BUILD_TIMEOUT` | `600` | Default timeout (seconds) | + +### Registry URL Configuration + +The `REGISTRY_URL` must be accessible from inside builder VMs. Since `localhost` in the VM refers to the VM itself, you need to use the host's gateway IP: + +```bash +# In .env +REGISTRY_URL=10.102.0.1:8083 # Gateway IP accessible from VM network +``` + +### Registry Authentication + +Builder VMs authenticate to the registry using short-lived JWT tokens: + +1. **Token Generation**: The build manager generates a scoped token for each build +2. **Token Scope**: Grants push access only to `builds/{build_id}` and `cache/{cache_scope}` +3. **Token TTL**: Matches build timeout (minimum 30 minutes) +4. **Authentication**: Builder agent sends token via Basic auth (`token:` format) + +## Build Status Flow + +``` +queued → building → pushing → ready + ↘ ↗ + failed + ↑ + cancelled +``` + +## Security Model + +1. **Isolation**: Each build runs in a fresh microVM (Cloud Hypervisor) +2. **Rootless**: BuildKit runs without root privileges +3. **Network Control**: `network_mode: isolated` or `egress` with optional domain allowlist +4. **Secret Handling**: Secrets fetched via vsock, never written to disk in guest +5. **Cache Isolation**: Per-tenant cache scopes prevent cross-tenant cache poisoning +6. **Registry Auth**: Short-lived JWT tokens scoped to specific repositories (builds/{id}, cache/{scope}) + +## Builder Images + +The generic builder image is in `images/generic/`: + +- `generic/Dockerfile` - Minimal Alpine + BuildKit + agent (runtime-agnostic) + +The generic builder does not include any runtime (Node.js, Python, etc.). Users provide their own Dockerfile which specifies the runtime. BuildKit pulls the runtime as part of the build process. + +### Required Components + +Builder images must include: + +| Component | Source | Purpose | +|-----------|--------|---------| +| `buildctl` | `moby/buildkit:rootless` | BuildKit CLI | +| `buildctl-daemonless.sh` | `moby/buildkit:rootless` | Daemonless wrapper | +| `buildkitd` | `moby/buildkit:rootless` | BuildKit daemon | +| `buildkit-runc` | `moby/buildkit:rootless` | Container runtime (as `/usr/bin/runc`) | +| `builder-agent` | Built from `builder_agent/main.go` | Hypeman agent | +| `fuse-overlayfs` | apk/apt | Overlay filesystem support | + +### Build and Push + +See [`images/README.md`](./images/README.md) for detailed build instructions. + +```bash +# Build and push the builder image +docker build \ + -t yourregistry/builder:latest \ + -f lib/builds/images/generic/Dockerfile \ + . + +docker push yourregistry/builder:latest +``` + +### Environment Variables + +The builder image should set: + +```dockerfile +# Empty or minimal flags - cgroups are mounted in microVM +ENV BUILDKITD_FLAGS="" +ENV HOME=/home/builder +ENV XDG_RUNTIME_DIR=/home/builder/.local/share +``` + +## MicroVM Requirements + +Builder VMs require specific kernel and init script features: + +### Cgroups + +The init script mounts cgroups for BuildKit/runc: + +```bash +# Cgroup v2 (preferred) +mount -t cgroup2 none /sys/fs/cgroup + +# Or cgroup v1 fallback +mount -t tmpfs cgroup /sys/fs/cgroup +for ctrl in cpu cpuacct memory devices freezer blkio pids; do + mkdir -p /sys/fs/cgroup/$ctrl + mount -t cgroup -o $ctrl cgroup /sys/fs/cgroup/$ctrl +done +``` + +### Volume Mounts + +Two volumes are attached to builder VMs: + +1. **Source volume** (`/src`, read-write): Contains extracted source tarball +2. **Config volume** (`/config`, read-only): Contains `build.json` + +The source is mounted read-write so the generated Dockerfile can be written. + +## Provenance + +Each build records provenance for reproducibility: + +```json +{ + "base_image_digest": "sha256:abc123...", + "source_hash": "sha256:def456...", + "lockfile_hashes": { + "package-lock.json": "sha256:..." + }, + "toolchain_version": "v20.10.0", + "buildkit_version": "v0.12.0", + "timestamp": "2025-01-15T10:05:00Z" +} +``` + +## Testing + +### Unit Tests + +```bash +# Run unit tests +go test ./lib/builds/... -v + +# Test specific components +go test ./lib/builds/queue_test.go ./lib/builds/queue.go ./lib/builds/types.go -v +go test ./lib/builds/cache_test.go ./lib/builds/cache.go ./lib/builds/types.go ./lib/builds/errors.go -v +go test ./lib/builds/registry_token_test.go ./lib/builds/registry_token.go -v +``` + +### E2E Testing + +1. **Start the server**: + ```bash + make dev + ``` + +2. **Ensure builder image is available**: + ```bash + TOKEN=$(make gen-jwt | tail -1) + curl -X POST http://localhost:8083/images \ + -H "Authorization: Bearer $TOKEN" \ + -H "Content-Type: application/json" \ + -d '{"name": "hirokernel/builder-generic:latest"}' + ``` + +3. **Create test source with Dockerfile**: + ```bash + mkdir -p /tmp/test-app + echo '{"name": "test", "version": "1.0.0", "dependencies": {}}' > /tmp/test-app/package.json + echo 'console.log("Hello!");' > /tmp/test-app/index.js + cat > /tmp/test-app/Dockerfile << 'EOF' + FROM node:20-alpine + WORKDIR /app + COPY package.json index.js ./ + CMD ["node", "index.js"] + EOF + tar -czf /tmp/source.tar.gz -C /tmp/test-app . + ``` + +4. **Submit build**: + ```bash + curl -X POST http://localhost:8083/builds \ + -H "Authorization: Bearer $TOKEN" \ + -F "source=@/tmp/source.tar.gz" + ``` + +5. **Poll for completion**: + ```bash + BUILD_ID="" + curl http://localhost:8083/builds/$BUILD_ID \ + -H "Authorization: Bearer $TOKEN" + ``` + +6. **Run the built image**: + ```bash + curl -X POST http://localhost:8083/instances \ + -H "Authorization: Bearer $TOKEN" \ + -H "Content-Type: application/json" \ + -d '{ + "name": "test-app", + "image": "builds/'$BUILD_ID':latest", + "size": "1GB", + "vcpus": 1 + }' + ``` + +## Troubleshooting + +### Common Issues + +| Error | Cause | Solution | +|-------|-------|----------| +| `image not found` | Builder image not imported | Import image using `POST /images` endpoint | +| `no cgroup mount found` | Cgroups not mounted in VM | Update init script to mount cgroups | +| `http: server gave HTTP response to HTTPS client` | BuildKit using HTTPS for HTTP registry | Add `registry.insecure=true` to output flags | +| `connection refused` to localhost:8080 | Registry URL not accessible from VM | Use gateway IP (10.102.0.1) instead of localhost | +| `401 Unauthorized` | Registry auth issue | Check registry_token in config.json; verify middleware handles Basic auth | +| `No space left on device` | Instance memory too small for image | Use at least 1GB RAM for Node.js images | +| `can't enable NoProcessSandbox without Rootless` | Wrong BUILDKITD_FLAGS | Use empty flags or remove the flag | + +### Debug Builder VM + +Check logs of the builder instance: + +```bash +# List instances +curl http://localhost:8083/instances -H "Authorization: Bearer $TOKEN" | jq + +# Get builder instance logs +INSTANCE_ID="" +curl http://localhost:8083/instances/$INSTANCE_ID/logs \ + -H "Authorization: Bearer $TOKEN" +``` + +### Verify Build Config + +Check the config volume contents: + +```bash +cat $DATA_DIR/builds/$BUILD_ID/config.json +``` + +Expected format: +```json +{ + "job_id": "abc123", + "registry_url": "10.102.0.1:8083", + "registry_token": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9...", + "cache_scope": "my-tenant", + "source_path": "/src", + "dockerfile": "FROM node:20-alpine\nWORKDIR /app\n...", + "timeout_seconds": 300, + "network_mode": "egress" +} +``` + +Note: `registry_token` is a short-lived JWT granting push access to `builds/abc123` and `cache/my-tenant`. diff --git a/lib/builds/TODO.md b/lib/builds/TODO.md new file mode 100644 index 00000000..9cced6c5 --- /dev/null +++ b/lib/builds/TODO.md @@ -0,0 +1,13 @@ +# Build System TODOs + +Outstanding issues and improvements for the build system. + +--- + +## 🟢 Low Priority + +### Builder Image Tooling + +**File:** `lib/builds/images/README.md` + +**Suggestion:** Create a script or tooling for building and publishing new builder images. diff --git a/lib/builds/builder_agent/main.go b/lib/builds/builder_agent/main.go new file mode 100644 index 00000000..102fa877 --- /dev/null +++ b/lib/builds/builder_agent/main.go @@ -0,0 +1,677 @@ +// Package main implements the builder agent that runs inside builder microVMs. +// It reads build configuration from the config disk, runs BuildKit to build +// the image, and reports results back to the host via vsock. +// +// Communication model: +// - Agent LISTENS on vsock port 5001 +// - Host CONNECTS to the agent via the VM's vsock.sock file +// - This follows the Cloud Hypervisor vsock pattern (host initiates) +package main + +import ( + "bufio" + "bytes" + "context" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "log" + "net" + "os" + "os/exec" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/mdlayher/vsock" +) + +const ( + configPath = "/config/build.json" + vsockPort = 5001 // Build agent port (different from exec agent) +) + +// BuildConfig matches the BuildConfig type from lib/builds/types.go +type BuildConfig struct { + JobID string `json:"job_id"` + BaseImageDigest string `json:"base_image_digest,omitempty"` + RegistryURL string `json:"registry_url"` + RegistryToken string `json:"registry_token,omitempty"` + CacheScope string `json:"cache_scope,omitempty"` + SourcePath string `json:"source_path"` + Dockerfile string `json:"dockerfile,omitempty"` + BuildArgs map[string]string `json:"build_args,omitempty"` + Secrets []SecretRef `json:"secrets,omitempty"` + TimeoutSeconds int `json:"timeout_seconds"` + NetworkMode string `json:"network_mode"` +} + +// SecretRef references a secret to inject during build +type SecretRef struct { + ID string `json:"id"` + EnvVar string `json:"env_var,omitempty"` +} + +// BuildResult is sent back to the host +type BuildResult struct { + Success bool `json:"success"` + ImageDigest string `json:"image_digest,omitempty"` + Error string `json:"error,omitempty"` + Logs string `json:"logs,omitempty"` + Provenance BuildProvenance `json:"provenance"` + DurationMS int64 `json:"duration_ms"` +} + +// BuildProvenance records build inputs +type BuildProvenance struct { + BaseImageDigest string `json:"base_image_digest"` + SourceHash string `json:"source_hash"` + LockfileHashes map[string]string `json:"lockfile_hashes,omitempty"` + BuildkitVersion string `json:"buildkit_version,omitempty"` + Timestamp time.Time `json:"timestamp"` +} + +// VsockMessage is the envelope for vsock communication +type VsockMessage struct { + Type string `json:"type"` + Result *BuildResult `json:"result,omitempty"` + Log string `json:"log,omitempty"` + SecretIDs []string `json:"secret_ids,omitempty"` // For secrets request to host + Secrets map[string]string `json:"secrets,omitempty"` // For secrets response from host +} + +// Global state for the result to send when host connects +var ( + buildResult *BuildResult + buildResultLock sync.Mutex + buildDone = make(chan struct{}) + + // Secrets coordination + buildConfig *BuildConfig + buildConfigLock sync.Mutex + secretsReady = make(chan struct{}) + secretsOnce sync.Once + + // Encoder lock protects concurrent access to json.Encoder + // (the goroutine sending build_result and the main loop handling get_status) + encoderLock sync.Mutex +) + +func main() { + log.Println("=== Builder Agent Starting ===") + + // Start guest-agent for exec/debugging support (runs in background) + startGuestAgent() + + // Start vsock listener first (so host can connect as soon as VM is ready) + listener, err := startVsockListener() + if err != nil { + log.Fatalf("Failed to start vsock listener: %v", err) + } + defer listener.Close() + log.Printf("Listening on vsock port %d", vsockPort) + + // Run the build in background + go runBuildProcess() + + // Accept connections from host + for { + conn, err := listener.Accept() + if err != nil { + log.Printf("Accept error: %v", err) + continue + } + go handleHostConnection(conn) + } +} + +// startVsockListener starts listening on vsock with retries (like exec-agent) +func startVsockListener() (*vsock.Listener, error) { + var l *vsock.Listener + var err error + + for i := 0; i < 10; i++ { + l, err = vsock.Listen(vsockPort, nil) + if err == nil { + return l, nil + } + log.Printf("vsock listen attempt %d/10 failed: %v (retrying in 1s)", i+1, err) + time.Sleep(1 * time.Second) + } + + return nil, fmt.Errorf("failed to listen on vsock port %d after retries: %v", vsockPort, err) +} + +// startGuestAgent starts the guest-agent binary for exec/debugging support. +// The guest-agent listens on vsock port 2222 and provides exec capability +// so operators can debug failed builds. +func startGuestAgent() { + guestAgentPath := "/usr/bin/guest-agent" + + // Check if guest-agent exists + if _, err := os.Stat(guestAgentPath); os.IsNotExist(err) { + log.Printf("guest-agent not found at %s (exec disabled)", guestAgentPath) + return + } + + // Start guest-agent in background + cmd := exec.Command(guestAgentPath) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + if err := cmd.Start(); err != nil { + log.Printf("Failed to start guest-agent: %v", err) + return + } + + log.Printf("Started guest-agent (PID %d) for exec support", cmd.Process.Pid) + + // Let the process run in background - don't wait for it + go func() { + if err := cmd.Wait(); err != nil { + log.Printf("guest-agent exited: %v", err) + } + }() +} + +// handleHostConnection handles a connection from the host +func handleHostConnection(conn net.Conn) { + defer conn.Close() + + reader := bufio.NewReader(conn) + encoder := json.NewEncoder(conn) + decoder := json.NewDecoder(reader) + + for { + var msg VsockMessage + if err := decoder.Decode(&msg); err != nil { + if err == io.EOF { + return + } + log.Printf("Decode error: %v", err) + return + } + + switch msg.Type { + case "host_ready": + // Host is ready to handle requests + // Request secrets if we have any configured + if err := handleSecretsRequest(encoder, decoder); err != nil { + log.Printf("Failed to fetch secrets: %v", err) + } + // Signal that secrets are ready (even if failed, build can proceed) + secretsOnce.Do(func() { + close(secretsReady) + }) + + // Wait for build to complete and send result to host + go func() { + <-buildDone + + buildResultLock.Lock() + result := buildResult + buildResultLock.Unlock() + + log.Printf("Build completed, sending result to host") + encoderLock.Lock() + err := encoder.Encode(VsockMessage{Type: "build_result", Result: result}) + encoderLock.Unlock() + if err != nil { + log.Printf("Failed to send build result: %v", err) + } + }() + + case "get_result": + // Host is asking for the build result + // Wait for build to complete if not done yet + <-buildDone + + buildResultLock.Lock() + result := buildResult + buildResultLock.Unlock() + + response := VsockMessage{ + Type: "build_result", + Result: result, + } + encoderLock.Lock() + err := encoder.Encode(response) + encoderLock.Unlock() + if err != nil { + log.Printf("Failed to send result: %v", err) + } + return // Close connection after sending result + + case "get_status": + // Host is checking if build is still running + encoderLock.Lock() + select { + case <-buildDone: + encoder.Encode(VsockMessage{Type: "status", Log: "completed"}) + default: + encoder.Encode(VsockMessage{Type: "status", Log: "building"}) + } + encoderLock.Unlock() + + default: + log.Printf("Unknown message type: %s", msg.Type) + } + } +} + +// handleSecretsRequest requests secrets from the host and writes them to /run/secrets/ +func handleSecretsRequest(encoder *json.Encoder, decoder *json.Decoder) error { + // Wait for config to be loaded + var config *BuildConfig + for i := 0; i < 30; i++ { + buildConfigLock.Lock() + config = buildConfig + buildConfigLock.Unlock() + if config != nil { + break + } + time.Sleep(100 * time.Millisecond) + } + + if config == nil { + log.Printf("Config not loaded yet, skipping secrets") + return nil + } + + if len(config.Secrets) == 0 { + log.Printf("No secrets configured") + return nil + } + + // Extract secret IDs + secretIDs := make([]string, len(config.Secrets)) + for i, s := range config.Secrets { + secretIDs[i] = s.ID + } + + log.Printf("Requesting secrets: %v", secretIDs) + + // Send get_secrets request + req := VsockMessage{ + Type: "get_secrets", + SecretIDs: secretIDs, + } + encoderLock.Lock() + err := encoder.Encode(req) + encoderLock.Unlock() + if err != nil { + return fmt.Errorf("send get_secrets: %w", err) + } + + // Wait for secrets_response + var resp VsockMessage + if err := decoder.Decode(&resp); err != nil { + return fmt.Errorf("receive secrets_response: %w", err) + } + + if resp.Type != "secrets_response" { + return fmt.Errorf("unexpected response type: %s", resp.Type) + } + + // Write secrets to /run/secrets/ + if err := os.MkdirAll("/run/secrets", 0700); err != nil { + return fmt.Errorf("create secrets dir: %w", err) + } + + for id, value := range resp.Secrets { + secretPath := fmt.Sprintf("/run/secrets/%s", id) + if err := os.WriteFile(secretPath, []byte(value), 0600); err != nil { + return fmt.Errorf("write secret %s: %w", id, err) + } + log.Printf("Wrote secret: %s", id) + } + + log.Printf("Received %d secrets", len(resp.Secrets)) + return nil +} + +// runBuildProcess runs the actual build and stores the result +func runBuildProcess() { + start := time.Now() + var logs bytes.Buffer + logWriter := io.MultiWriter(os.Stdout, &logs) + + log.SetOutput(logWriter) + + defer func() { + close(buildDone) + }() + + // Load build config + config, err := loadConfig() + if err != nil { + setResult(BuildResult{ + Success: false, + Error: fmt.Sprintf("load config: %v", err), + Logs: logs.String(), + DurationMS: time.Since(start).Milliseconds(), + }) + return + } + log.Printf("Job: %s", config.JobID) + + // Store config globally so handleHostConnection can access it for secrets + buildConfigLock.Lock() + buildConfig = config + buildConfigLock.Unlock() + + // Setup registry authentication before running the build + if err := setupRegistryAuth(config.RegistryURL, config.RegistryToken); err != nil { + setResult(BuildResult{ + Success: false, + Error: fmt.Sprintf("setup registry auth: %v", err), + Logs: logs.String(), + DurationMS: time.Since(start).Milliseconds(), + }) + return + } + + // Setup timeout context + ctx := context.Background() + if config.TimeoutSeconds > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, time.Duration(config.TimeoutSeconds)*time.Second) + defer cancel() + } + + // Wait for secrets if any are configured + if len(config.Secrets) > 0 { + log.Printf("Waiting for secrets from host...") + select { + case <-secretsReady: + log.Printf("Secrets ready, proceeding with build") + case <-time.After(30 * time.Second): + log.Printf("Warning: Timeout waiting for secrets, proceeding anyway") + // Signal secrets ready to avoid blocking other goroutines + secretsOnce.Do(func() { + close(secretsReady) + }) + case <-ctx.Done(): + setResult(BuildResult{ + Success: false, + Error: "build timeout while waiting for secrets", + Logs: logs.String(), + DurationMS: time.Since(start).Milliseconds(), + }) + return + } + } + + // Ensure Dockerfile exists (either in source or provided via config) + dockerfilePath := filepath.Join(config.SourcePath, "Dockerfile") + if _, err := os.Stat(dockerfilePath); os.IsNotExist(err) { + // Check if Dockerfile was provided in config + if config.Dockerfile == "" { + setResult(BuildResult{ + Success: false, + Error: "Dockerfile required: provide dockerfile parameter or include Dockerfile in source tarball", + Logs: logs.String(), + DurationMS: time.Since(start).Milliseconds(), + }) + return + } + // Write provided Dockerfile to source directory + if err := os.WriteFile(dockerfilePath, []byte(config.Dockerfile), 0644); err != nil { + setResult(BuildResult{ + Success: false, + Error: fmt.Sprintf("write dockerfile: %v", err), + Logs: logs.String(), + DurationMS: time.Since(start).Milliseconds(), + }) + return + } + log.Println("Using Dockerfile from config") + } else { + log.Println("Using Dockerfile from source") + } + + // Compute provenance + provenance := computeProvenance(config) + + // Run the build + log.Println("=== Starting Build ===") + digest, buildLogs, err := runBuild(ctx, config, logWriter) + logs.WriteString(buildLogs) + + duration := time.Since(start).Milliseconds() + + if err != nil { + setResult(BuildResult{ + Success: false, + Error: err.Error(), + Logs: logs.String(), + Provenance: provenance, + DurationMS: duration, + }) + return + } + + // Success! + log.Printf("=== Build Complete: %s ===", digest) + provenance.Timestamp = time.Now() + + setResult(BuildResult{ + Success: true, + ImageDigest: digest, + Logs: logs.String(), + Provenance: provenance, + DurationMS: duration, + }) +} + +// setResult stores the build result for the host to retrieve +func setResult(result BuildResult) { + buildResultLock.Lock() + defer buildResultLock.Unlock() + buildResult = &result +} + +func loadConfig() (*BuildConfig, error) { + data, err := os.ReadFile(configPath) + if err != nil { + return nil, err + } + var config BuildConfig + if err := json.Unmarshal(data, &config); err != nil { + return nil, err + } + return &config, nil +} + +// setupRegistryAuth creates a Docker config.json with the registry token for authentication. +// BuildKit uses this file to authenticate when pushing images. +func setupRegistryAuth(registryURL, token string) error { + if token == "" { + log.Println("No registry token provided, skipping auth setup") + return nil + } + + // Docker config format expects base64-encoded "username:password" or just the token + // For bearer tokens, we use the token directly as the "auth" value + // Format: base64(token + ":") - empty password + authValue := base64.StdEncoding.EncodeToString([]byte(token + ":")) + + // Create the Docker config structure + dockerConfig := map[string]interface{}{ + "auths": map[string]interface{}{ + registryURL: map[string]string{ + "auth": authValue, + }, + }, + } + + configData, err := json.MarshalIndent(dockerConfig, "", " ") + if err != nil { + return fmt.Errorf("marshal docker config: %w", err) + } + + // Ensure ~/.docker directory exists + dockerDir := "/home/builder/.docker" + if err := os.MkdirAll(dockerDir, 0700); err != nil { + return fmt.Errorf("create docker config dir: %w", err) + } + + // Write config.json + configPath := filepath.Join(dockerDir, "config.json") + if err := os.WriteFile(configPath, configData, 0600); err != nil { + return fmt.Errorf("write docker config: %w", err) + } + + log.Printf("Registry auth configured for %s", registryURL) + return nil +} + +func runBuild(ctx context.Context, config *BuildConfig, logWriter io.Writer) (string, string, error) { + var buildLogs bytes.Buffer + + // Build output reference + outputRef := fmt.Sprintf("%s/builds/%s", config.RegistryURL, config.JobID) + + // Build arguments + // Use registry.insecure=true for internal HTTP registries + args := []string{ + "build", + "--frontend", "dockerfile.v0", + "--local", "context=" + config.SourcePath, + "--local", "dockerfile=" + config.SourcePath, + "--output", fmt.Sprintf("type=image,name=%s,push=true,registry.insecure=true,oci-mediatypes=true", outputRef), + "--metadata-file", "/tmp/build-metadata.json", + } + + // Add cache if scope is set + if config.CacheScope != "" { + cacheRef := fmt.Sprintf("%s/cache/%s", config.RegistryURL, config.CacheScope) + args = append(args, "--import-cache", fmt.Sprintf("type=registry,ref=%s,registry.insecure=true", cacheRef)) + args = append(args, "--export-cache", fmt.Sprintf("type=registry,ref=%s,mode=max,registry.insecure=true", cacheRef)) + } + + // Add secret mounts + for _, secret := range config.Secrets { + secretPath := fmt.Sprintf("/run/secrets/%s", secret.ID) + args = append(args, "--secret", fmt.Sprintf("id=%s,src=%s", secret.ID, secretPath)) + } + + // Add build args + for k, v := range config.BuildArgs { + args = append(args, "--opt", fmt.Sprintf("build-arg:%s=%s", k, v)) + } + + log.Printf("Running: buildctl-daemonless.sh %s", strings.Join(args, " ")) + + // Run buildctl-daemonless.sh + cmd := exec.CommandContext(ctx, "buildctl-daemonless.sh", args...) + cmd.Stdout = io.MultiWriter(logWriter, &buildLogs) + cmd.Stderr = io.MultiWriter(logWriter, &buildLogs) + // Use BUILDKITD_FLAGS from environment (set in Dockerfile) or empty for default + cmd.Env = os.Environ() + + if err := cmd.Run(); err != nil { + return "", buildLogs.String(), fmt.Errorf("buildctl failed: %w", err) + } + + // Extract digest from metadata + digest, err := extractDigest("/tmp/build-metadata.json") + if err != nil { + return "", buildLogs.String(), fmt.Errorf("extract digest: %w", err) + } + + return digest, buildLogs.String(), nil +} + +func extractDigest(metadataPath string) (string, error) { + data, err := os.ReadFile(metadataPath) + if err != nil { + return "", err + } + + var metadata struct { + ContainerImageDigest string `json:"containerimage.digest"` + } + if err := json.Unmarshal(data, &metadata); err != nil { + return "", err + } + + if metadata.ContainerImageDigest == "" { + return "", fmt.Errorf("no digest in metadata") + } + + return metadata.ContainerImageDigest, nil +} + +func computeProvenance(config *BuildConfig) BuildProvenance { + prov := BuildProvenance{ + BaseImageDigest: config.BaseImageDigest, + LockfileHashes: make(map[string]string), + BuildkitVersion: getBuildkitVersion(), + } + + // Hash lockfiles + lockfiles := []string{ + "package-lock.json", "yarn.lock", "pnpm-lock.yaml", + "requirements.txt", "poetry.lock", "Pipfile.lock", + } + for _, lf := range lockfiles { + path := filepath.Join(config.SourcePath, lf) + if hash, err := hashFile(path); err == nil { + prov.LockfileHashes[lf] = hash + } + } + + // Hash source directory + prov.SourceHash, _ = hashDirectory(config.SourcePath) + + return prov +} + +func hashFile(path string) (string, error) { + data, err := os.ReadFile(path) + if err != nil { + return "", err + } + sum := sha256.Sum256(data) + return hex.EncodeToString(sum[:]), nil +} + +func hashDirectory(path string) (string, error) { + h := sha256.New() + err := filepath.Walk(path, func(p string, info os.FileInfo, err error) error { + if err != nil { + return nil + } + if info.IsDir() { + return nil + } + // Skip Dockerfile (generated) and hidden files + name := filepath.Base(p) + if name == "Dockerfile" || strings.HasPrefix(name, ".") { + return nil + } + data, err := os.ReadFile(p) + if err != nil { + return nil + } + relPath, _ := filepath.Rel(path, p) + h.Write([]byte(relPath)) + h.Write(data) + return nil + }) + if err != nil { + return "", err + } + return hex.EncodeToString(h.Sum(nil)), nil +} + +func getBuildkitVersion() string { + cmd := exec.Command("buildctl", "--version") + out, _ := cmd.Output() + return strings.TrimSpace(string(out)) +} diff --git a/lib/builds/cache.go b/lib/builds/cache.go new file mode 100644 index 00000000..ff3e26a8 --- /dev/null +++ b/lib/builds/cache.go @@ -0,0 +1,179 @@ +package builds + +import ( + "crypto/sha256" + "encoding/hex" + "fmt" + "regexp" + "strings" +) + +// CacheKeyGenerator generates cache keys for builds with tenant isolation +type CacheKeyGenerator struct { + registryURL string +} + +// NewCacheKeyGenerator creates a new cache key generator +func NewCacheKeyGenerator(registryURL string) *CacheKeyGenerator { + return &CacheKeyGenerator{registryURL: registryURL} +} + +// CacheKey represents a validated cache key +type CacheKey struct { + // Full reference for BuildKit --import-cache / --export-cache + Reference string + + // Components + TenantScope string + Runtime string + LockfileHash string +} + +// GenerateCacheKey generates a cache key for a build. +// +// Cache key structure: +// +// {registry}/cache/{tenant_scope}/{runtime}/{lockfile_hash} +// +// This structure provides: +// - Tenant isolation: each tenant's cache is isolated by scope +// - Runtime separation: Node.js and Python caches don't mix +// - Lockfile-based keying: same lockfile = cache hit +func (g *CacheKeyGenerator) GenerateCacheKey(tenantScope, runtime string, lockfileHashes map[string]string) (*CacheKey, error) { + if tenantScope == "" { + return nil, fmt.Errorf("tenant scope is required for caching") + } + + // Note: Runtime is no longer validated as the generic builder accepts any runtime. + // The runtime is still used as part of the cache key for separation. + + // Normalize tenant scope (alphanumeric + hyphen only) + normalizedScope := normalizeCacheScope(tenantScope) + if normalizedScope == "" { + return nil, fmt.Errorf("invalid tenant scope: %s", tenantScope) + } + + // Compute lockfile hash from all lockfile hashes + lockfileHash := computeCombinedHash(lockfileHashes) + + // Build the reference + reference := fmt.Sprintf("%s/cache/%s/%s/%s", + g.registryURL, + normalizedScope, + runtime, + lockfileHash[:16], // Use first 16 chars for brevity + ) + + return &CacheKey{ + Reference: reference, + TenantScope: normalizedScope, + Runtime: runtime, + LockfileHash: lockfileHash, + }, nil +} + +// ValidateCacheScope validates that a cache scope is safe to use +func ValidateCacheScope(scope string) error { + if scope == "" { + return fmt.Errorf("cache scope is required") + } + + normalized := normalizeCacheScope(scope) + if normalized == "" { + return fmt.Errorf("cache scope contains only invalid characters") + } + + if len(normalized) < 3 { + return fmt.Errorf("cache scope must be at least 3 characters") + } + + if len(normalized) > 64 { + return fmt.Errorf("cache scope must be at most 64 characters") + } + + return nil +} + +// ImportCacheArg returns the BuildKit --import-cache argument +func (k *CacheKey) ImportCacheArg() string { + return fmt.Sprintf("type=registry,ref=%s", k.Reference) +} + +// ExportCacheArg returns the BuildKit --export-cache argument +func (k *CacheKey) ExportCacheArg() string { + return fmt.Sprintf("type=registry,ref=%s,mode=max", k.Reference) +} + +// normalizeCacheScope normalizes a cache scope to only contain safe characters +// for use in registry paths (alphanumeric and hyphens) +func normalizeCacheScope(scope string) string { + // Convert to lowercase and replace unsafe characters + scope = strings.ToLower(scope) + + // Keep only alphanumeric and hyphens + re := regexp.MustCompile(`[^a-z0-9-]`) + normalized := re.ReplaceAllString(scope, "-") + + // Remove consecutive hyphens + re = regexp.MustCompile(`-+`) + normalized = re.ReplaceAllString(normalized, "-") + + // Trim leading/trailing hyphens + normalized = strings.Trim(normalized, "-") + + return normalized +} + +// computeCombinedHash computes a combined hash from multiple lockfile hashes. +// Returns a 64-character hex string (sha256), even for empty input. +func computeCombinedHash(lockfileHashes map[string]string) string { + h := sha256.New() + + if len(lockfileHashes) == 0 { + // Hash "empty" to get a consistent 64-char hex string + h.Write([]byte("empty")) + return hex.EncodeToString(h.Sum(nil)) + } + + // Sort keys for determinism + for _, name := range sortedKeys(lockfileHashes) { + h.Write([]byte(name)) + h.Write([]byte(":")) + h.Write([]byte(lockfileHashes[name])) + h.Write([]byte("\n")) + } + + return hex.EncodeToString(h.Sum(nil)) +} + +// sortedKeys returns the keys of a map in sorted order +func sortedKeys(m map[string]string) []string { + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + // Simple bubble sort for small maps (lockfiles are typically 1-3) + for i := 0; i < len(keys)-1; i++ { + for j := i + 1; j < len(keys); j++ { + if keys[i] > keys[j] { + keys[i], keys[j] = keys[j], keys[i] + } + } + } + return keys +} + +// GetCacheKeyFromConfig extracts cache configuration for the builder agent +func GetCacheKeyFromConfig(registryURL, cacheScope, runtime string, lockfileHashes map[string]string) (importArg, exportArg string, err error) { + if cacheScope == "" { + return "", "", nil // Caching disabled + } + + gen := NewCacheKeyGenerator(registryURL) + key, err := gen.GenerateCacheKey(cacheScope, runtime, lockfileHashes) + if err != nil { + return "", "", err + } + + return key.ImportCacheArg(), key.ExportCacheArg(), nil +} diff --git a/lib/builds/cache_test.go b/lib/builds/cache_test.go new file mode 100644 index 00000000..d51fb7cf --- /dev/null +++ b/lib/builds/cache_test.go @@ -0,0 +1,232 @@ +package builds + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestCacheKeyGenerator_GenerateCacheKey(t *testing.T) { + gen := NewCacheKeyGenerator("localhost:8080") + + tests := []struct { + name string + tenantScope string + runtime string + lockfileHashes map[string]string + wantErr bool + wantPrefix string + }{ + { + name: "valid nodejs build", + tenantScope: "tenant-abc", + runtime: "nodejs", + lockfileHashes: map[string]string{ + "package-lock.json": "abc123", + }, + wantPrefix: "localhost:8080/cache/tenant-abc/nodejs/", + }, + { + name: "valid python build", + tenantScope: "my-team", + runtime: "python", + lockfileHashes: map[string]string{ + "requirements.txt": "def456", + }, + wantPrefix: "localhost:8080/cache/my-team/python/", + }, + { + name: "empty tenant scope", + tenantScope: "", + runtime: "nodejs", + wantErr: true, + }, + { + name: "any runtime is accepted", + tenantScope: "tenant", + runtime: "ruby", + lockfileHashes: map[string]string{ + "Gemfile.lock": "abc123", + }, + wantPrefix: "localhost:8080/cache/tenant/ruby/", + }, + { + name: "scope with special chars", + tenantScope: "My Team!@#$%", + runtime: "nodejs", + lockfileHashes: map[string]string{ + "package-lock.json": "abc", + }, + wantPrefix: "localhost:8080/cache/my-team/nodejs/", + }, + { + name: "empty lockfileHashes does not panic", + tenantScope: "tenant-abc", + runtime: "nodejs", + lockfileHashes: map[string]string{}, + wantPrefix: "localhost:8080/cache/tenant-abc/nodejs/", + }, + { + name: "nil lockfileHashes does not panic", + tenantScope: "tenant-abc", + runtime: "python", + lockfileHashes: nil, + wantPrefix: "localhost:8080/cache/tenant-abc/python/", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + key, err := gen.GenerateCacheKey(tt.tenantScope, tt.runtime, tt.lockfileHashes) + + if tt.wantErr { + require.Error(t, err) + return + } + + require.NoError(t, err) + assert.Contains(t, key.Reference, tt.wantPrefix) + }) + } +} + +func TestCacheKey_Args(t *testing.T) { + key := &CacheKey{ + Reference: "localhost:8080/cache/tenant/nodejs/abc123", + TenantScope: "tenant", + Runtime: "nodejs", + LockfileHash: "abc123", + } + + importArg := key.ImportCacheArg() + assert.Equal(t, "type=registry,ref=localhost:8080/cache/tenant/nodejs/abc123", importArg) + + exportArg := key.ExportCacheArg() + assert.Equal(t, "type=registry,ref=localhost:8080/cache/tenant/nodejs/abc123,mode=max", exportArg) +} + +func TestValidateCacheScope(t *testing.T) { + tests := []struct { + scope string + wantErr bool + }{ + {"valid-scope", false}, + {"abc", false}, + {"my-team-123", false}, + {"", true}, // Empty + {"ab", true}, // Too short + {"a", true}, // Too short + {string(make([]byte, 65)), true}, // Too long + } + + for _, tt := range tests { + t.Run(tt.scope, func(t *testing.T) { + err := ValidateCacheScope(tt.scope) + if tt.wantErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestNormalizeCacheScope(t *testing.T) { + tests := []struct { + input string + expected string + }{ + {"simple", "simple"}, + {"with-hyphens", "with-hyphens"}, + {"MixedCase", "mixedcase"}, + {"with spaces", "with-spaces"}, + {"special!@#chars", "special-chars"}, + {"multiple---hyphens", "multiple-hyphens"}, + {"-leading-trailing-", "leading-trailing"}, + {"123numbers", "123numbers"}, + } + + for _, tt := range tests { + t.Run(tt.input, func(t *testing.T) { + result := normalizeCacheScope(tt.input) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestComputeCombinedHash(t *testing.T) { + // Same inputs should produce same hash + hash1 := computeCombinedHash(map[string]string{ + "package-lock.json": "abc123", + "yarn.lock": "def456", + }) + hash2 := computeCombinedHash(map[string]string{ + "yarn.lock": "def456", + "package-lock.json": "abc123", + }) + assert.Equal(t, hash1, hash2, "hash should be deterministic regardless of map order") + + // Different inputs should produce different hashes + hash3 := computeCombinedHash(map[string]string{ + "package-lock.json": "different", + }) + assert.NotEqual(t, hash1, hash3) + + // Empty map should return a valid hash (64 hex chars), not a short string + emptyHash := computeCombinedHash(map[string]string{}) + assert.Len(t, emptyHash, 64, "empty hash should be 64 hex characters (sha256)") + + // Nil map should also return a valid hash + nilHash := computeCombinedHash(nil) + assert.Len(t, nilHash, 64, "nil hash should be 64 hex characters (sha256)") + assert.Equal(t, emptyHash, nilHash, "empty and nil should produce same hash") +} + +func TestGetCacheKeyFromConfig(t *testing.T) { + // With cache scope + importArg, exportArg, err := GetCacheKeyFromConfig( + "localhost:8080", + "my-tenant", + "nodejs", + map[string]string{"package-lock.json": "abc"}, + ) + require.NoError(t, err) + assert.NotEmpty(t, importArg) + assert.NotEmpty(t, exportArg) + assert.Contains(t, importArg, "type=registry") + assert.Contains(t, exportArg, "mode=max") + + // Without cache scope (caching disabled) + importArg, exportArg, err = GetCacheKeyFromConfig( + "localhost:8080", + "", // Empty = no caching + "nodejs", + nil, + ) + require.NoError(t, err) + assert.Empty(t, importArg) + assert.Empty(t, exportArg) + + // With cache scope but empty lockfileHashes - should not panic (regression test) + importArg, exportArg, err = GetCacheKeyFromConfig( + "localhost:8080", + "my-tenant", + "nodejs", + map[string]string{}, // Empty lockfileHashes + ) + require.NoError(t, err) + assert.NotEmpty(t, importArg, "should generate cache args even with empty lockfileHashes") + assert.NotEmpty(t, exportArg) + + // With cache scope but nil lockfileHashes - should not panic (regression test) + importArg, exportArg, err = GetCacheKeyFromConfig( + "localhost:8080", + "my-tenant", + "python", + nil, // nil lockfileHashes + ) + require.NoError(t, err) + assert.NotEmpty(t, importArg, "should generate cache args even with nil lockfileHashes") + assert.NotEmpty(t, exportArg) +} diff --git a/lib/builds/errors.go b/lib/builds/errors.go new file mode 100644 index 00000000..2e8d888d --- /dev/null +++ b/lib/builds/errors.go @@ -0,0 +1,35 @@ +package builds + +import "errors" + +var ( + // ErrNotFound is returned when a build is not found + ErrNotFound = errors.New("build not found") + + // ErrAlreadyExists is returned when a build with the same ID already exists + ErrAlreadyExists = errors.New("build already exists") + + // ErrDockerfileRequired is returned when no Dockerfile is provided + ErrDockerfileRequired = errors.New("dockerfile required: provide dockerfile parameter or include Dockerfile in source tarball") + + // ErrBuildFailed is returned when a build fails + ErrBuildFailed = errors.New("build failed") + + // ErrBuildTimeout is returned when a build exceeds its timeout + ErrBuildTimeout = errors.New("build timeout") + + // ErrBuildCancelled is returned when a build is cancelled + ErrBuildCancelled = errors.New("build cancelled") + + // ErrInvalidSource is returned when the source tarball is invalid + ErrInvalidSource = errors.New("invalid source") + + // ErrSourceHashMismatch is returned when the source hash doesn't match + ErrSourceHashMismatch = errors.New("source hash mismatch") + + // ErrBuilderNotReady is returned when the builder image is not available + ErrBuilderNotReady = errors.New("builder image not ready") + + // ErrBuildInProgress is returned when trying to cancel a build that's already complete + ErrBuildInProgress = errors.New("build in progress") +) diff --git a/lib/builds/file_secret_provider.go b/lib/builds/file_secret_provider.go new file mode 100644 index 00000000..382ba7cf --- /dev/null +++ b/lib/builds/file_secret_provider.go @@ -0,0 +1,66 @@ +package builds + +import ( + "context" + "fmt" + "os" + "path/filepath" + "strings" +) + +// FileSecretProvider reads secrets from files in a directory. +// Each secret is stored as a file named by its ID, with the secret value as the file content. +// Example: /etc/hypeman/secrets/npm_token contains the npm token value. +type FileSecretProvider struct { + secretsDir string +} + +// NewFileSecretProvider creates a new file-based secret provider. +// secretsDir is the directory containing secret files (e.g., /etc/hypeman/secrets/). +func NewFileSecretProvider(secretsDir string) *FileSecretProvider { + return &FileSecretProvider{ + secretsDir: secretsDir, + } +} + +// GetSecrets returns the values for the given secret IDs by reading files from the secrets directory. +// Missing secrets are silently skipped (not an error). +// Returns an error only if a secret file exists but cannot be read. +func (p *FileSecretProvider) GetSecrets(ctx context.Context, secretIDs []string) (map[string]string, error) { + result := make(map[string]string) + + for _, id := range secretIDs { + // Validate secret ID to prevent path traversal + if strings.Contains(id, "/") || strings.Contains(id, "\\") || id == ".." || id == "." { + continue // Skip invalid IDs + } + + path := filepath.Join(p.secretsDir, id) + + // Check context before each file read + select { + case <-ctx.Done(): + return result, ctx.Err() + default: + } + + data, err := os.ReadFile(path) + if err != nil { + if os.IsNotExist(err) { + // Secret doesn't exist - skip it (not an error) + continue + } + return nil, fmt.Errorf("read secret %s: %w", id, err) + } + + // Trim whitespace (especially trailing newlines) + result[id] = strings.TrimSpace(string(data)) + } + + return result, nil +} + +// Ensure FileSecretProvider implements SecretProvider +var _ SecretProvider = (*FileSecretProvider)(nil) + + diff --git a/lib/builds/file_secret_provider_test.go b/lib/builds/file_secret_provider_test.go new file mode 100644 index 00000000..784ed96d --- /dev/null +++ b/lib/builds/file_secret_provider_test.go @@ -0,0 +1,103 @@ +package builds + +import ( + "context" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestFileSecretProvider_GetSecrets(t *testing.T) { + // Create temp directory with test secrets + tempDir, err := os.MkdirTemp("", "secrets-test-*") + require.NoError(t, err) + defer os.RemoveAll(tempDir) + + // Write test secrets + require.NoError(t, os.WriteFile(filepath.Join(tempDir, "npm_token"), []byte("npm-secret-value\n"), 0600)) + require.NoError(t, os.WriteFile(filepath.Join(tempDir, "github_token"), []byte("github-secret-value"), 0600)) + require.NoError(t, os.WriteFile(filepath.Join(tempDir, "with_whitespace"), []byte(" trimmed \n"), 0600)) + + provider := NewFileSecretProvider(tempDir) + ctx := context.Background() + + t.Run("fetch existing secrets", func(t *testing.T) { + secrets, err := provider.GetSecrets(ctx, []string{"npm_token", "github_token"}) + require.NoError(t, err) + assert.Len(t, secrets, 2) + assert.Equal(t, "npm-secret-value", secrets["npm_token"]) + assert.Equal(t, "github-secret-value", secrets["github_token"]) + }) + + t.Run("missing secrets are skipped", func(t *testing.T) { + secrets, err := provider.GetSecrets(ctx, []string{"npm_token", "nonexistent"}) + require.NoError(t, err) + assert.Len(t, secrets, 1) + assert.Equal(t, "npm-secret-value", secrets["npm_token"]) + }) + + t.Run("all missing secrets returns empty map", func(t *testing.T) { + secrets, err := provider.GetSecrets(ctx, []string{"missing1", "missing2"}) + require.NoError(t, err) + assert.Empty(t, secrets) + }) + + t.Run("whitespace is trimmed", func(t *testing.T) { + secrets, err := provider.GetSecrets(ctx, []string{"with_whitespace"}) + require.NoError(t, err) + assert.Equal(t, "trimmed", secrets["with_whitespace"]) + }) + + t.Run("path traversal is blocked", func(t *testing.T) { + secrets, err := provider.GetSecrets(ctx, []string{"../etc/passwd", "../../root/.ssh/id_rsa"}) + require.NoError(t, err) + assert.Empty(t, secrets) + }) + + t.Run("special characters in ID are blocked", func(t *testing.T) { + secrets, err := provider.GetSecrets(ctx, []string{"foo/bar", "baz\\qux", "..", "."}) + require.NoError(t, err) + assert.Empty(t, secrets) + }) + + t.Run("empty request returns empty map", func(t *testing.T) { + secrets, err := provider.GetSecrets(ctx, []string{}) + require.NoError(t, err) + assert.Empty(t, secrets) + }) +} + +func TestFileSecretProvider_ContextCancellation(t *testing.T) { + tempDir, err := os.MkdirTemp("", "secrets-test-*") + require.NoError(t, err) + defer os.RemoveAll(tempDir) + + // Write many secrets + for i := 0; i < 10; i++ { + require.NoError(t, os.WriteFile(filepath.Join(tempDir, "secret"+string(rune('A'+i))), []byte("value"), 0600)) + } + + provider := NewFileSecretProvider(tempDir) + + // Cancel context immediately + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + secrets, err := provider.GetSecrets(ctx, []string{"secretA", "secretB", "secretC"}) + // May return partial results or context error + assert.True(t, err == context.Canceled || len(secrets) <= 3) +} + +func TestNoOpSecretProvider(t *testing.T) { + provider := &NoOpSecretProvider{} + ctx := context.Background() + + secrets, err := provider.GetSecrets(ctx, []string{"any", "secret", "ids"}) + require.NoError(t, err) + assert.Empty(t, secrets) +} + + diff --git a/lib/builds/images/README.md b/lib/builds/images/README.md new file mode 100644 index 00000000..93045573 --- /dev/null +++ b/lib/builds/images/README.md @@ -0,0 +1,260 @@ +# Generic Builder Image + +The generic builder image runs inside Hypeman microVMs to execute source-to-image builds using BuildKit. It is runtime-agnostic - users provide their own Dockerfile which specifies the runtime. + +## Architecture + +``` +┌─────────────────────────────────────────────────────────────┐ +│ Generic Builder Image (~50MB) │ +│ │ +│ ┌─────────────┐ ┌─────────────┐ ┌─────────────────────┐ │ +│ │ BuildKit │ │ builder- │ │ Minimal Alpine │ │ +│ │ (daemonless)│ │ agent │ │ (git, curl, fuse) │ │ +│ └─────────────┘ └─────────────┘ └─────────────────────┘ │ +└─────────────────────────────────────────────────────────────┘ + │ + ▼ + User's Dockerfile + │ + ▼ + ┌───────────────────────────────┐ + │ FROM node:20-alpine │ + │ FROM python:3.12-slim │ + │ FROM rust:1.75 │ + │ FROM golang:1.22 │ + │ ... any base image │ + └───────────────────────────────┘ +``` + +## Key Benefits + +- **One image to maintain** - No more runtime-specific builder images +- **Any Dockerfile works** - Node.js, Python, Rust, Go, Java, Ruby, etc. +- **Smaller footprint** - ~50MB vs 200MB+ for runtime-specific images +- **User-controlled versions** - Users specify their runtime version in their Dockerfile + +## Directory Structure + +``` +images/ +└── generic/ + └── Dockerfile # The generic builder image +``` + +## Building the Generic Builder Image + +Hypeman supports both Docker v2 and OCI image formats. You can use standard `docker build` +or `docker buildx` - both work. + +### Prerequisites + +1. **Docker** installed +2. **Docker Hub login** (or your registry): + ```bash + docker login + ``` + +### 1. Build and Push + +```bash +# From repository root +docker build \ + -t hirokernel/builder-generic:latest \ + -f lib/builds/images/generic/Dockerfile \ + . + +docker push hirokernel/builder-generic:latest +``` + +Or with buildx for multi-platform support: + +```bash +docker buildx build \ + --platform linux/amd64 \ + --push \ + --tag hirokernel/builder-generic:latest \ + -f lib/builds/images/generic/Dockerfile \ + . +``` + +### 2. Import into Hypeman + +```bash +# Generate a token +TOKEN=$(make gen-jwt | tail -1) + +# Import the image +curl -X POST http://localhost:8083/images \ + -H "Authorization: Bearer $TOKEN" \ + -H "Content-Type: application/json" \ + -d '{"name": "hirokernel/builder-generic:latest"}' + +# Wait for import to complete +curl http://localhost:8083/images/docker.io%2Fhirokernel%2Fbuilder-generic:latest \ + -H "Authorization: Bearer $TOKEN" +``` + +### 3. Configure Hypeman + +Set the builder image in your `.env`: + +```bash +BUILDER_IMAGE=hirokernel/builder-generic:latest +``` + +### Building for Local Testing (without pushing) + +```bash +# Build locally +docker build \ + -t hypeman/builder:local \ + -f lib/builds/images/generic/Dockerfile \ + . + +# Run locally to test +docker run --rm hypeman/builder:local --help +``` + +## Usage + +### Submitting a Build + +Users must provide a Dockerfile either: +1. **In the source tarball** - Include a `Dockerfile` in the root of the source +2. **As a parameter** - Pass `dockerfile` content in the API request + +```bash +# Option 1: Dockerfile in source tarball +tar -czf source.tar.gz Dockerfile package.json index.js + +curl -X POST http://localhost:8083/builds \ + -H "Authorization: Bearer $TOKEN" \ + -F "source=@source.tar.gz" + +# Option 2: Dockerfile as parameter +curl -X POST http://localhost:8083/builds \ + -H "Authorization: Bearer $TOKEN" \ + -F "source=@source.tar.gz" \ + -F "dockerfile=FROM node:20-alpine +WORKDIR /app +COPY . . +RUN npm ci +CMD [\"node\", \"index.js\"]" +``` + +### Example Dockerfiles + +**Node.js:** +```dockerfile +FROM node:20-alpine +WORKDIR /app +COPY package*.json ./ +RUN npm ci +COPY . . +CMD ["node", "index.js"] +``` + +**Python:** +```dockerfile +FROM python:3.12-slim +WORKDIR /app +COPY requirements.txt ./ +RUN pip install --no-cache-dir -r requirements.txt +COPY . . +CMD ["python", "main.py"] +``` + +**Go:** +```dockerfile +FROM golang:1.22-alpine AS builder +WORKDIR /app +COPY go.mod go.sum ./ +RUN go mod download +COPY . . +RUN CGO_ENABLED=0 go build -o main . + +FROM alpine:3.21 +COPY --from=builder /app/main /main +CMD ["/main"] +``` + +**Rust:** +```dockerfile +FROM rust:1.75 AS builder +WORKDIR /app +COPY Cargo.toml Cargo.lock ./ +COPY src ./src +RUN cargo build --release + +FROM debian:bookworm-slim +COPY --from=builder /app/target/release/myapp /myapp +CMD ["/myapp"] +``` + +## Required Components + +The generic builder image contains: + +| Component | Path | Purpose | +|-----------|------|---------| +| `buildctl` | `/usr/bin/buildctl` | BuildKit CLI | +| `buildctl-daemonless.sh` | `/usr/bin/buildctl-daemonless.sh` | Runs buildkitd + buildctl | +| `buildkitd` | `/usr/bin/buildkitd` | BuildKit daemon | +| `runc` | `/usr/bin/runc` | Container runtime | +| `builder-agent` | `/usr/bin/builder-agent` | Hypeman orchestration | +| `fuse-overlayfs` | System package | Rootless overlay filesystem | +| `git` | System package | Git operations (for go mod, etc.) | +| `curl` | System package | Network utilities | + +## Environment Variables + +| Variable | Value | Purpose | +|----------|-------|---------| +| `HOME` | `/home/builder` | User home directory | +| `XDG_RUNTIME_DIR` | `/home/builder/.local/share` | Runtime directory for BuildKit | +| `BUILDKITD_FLAGS` | `""` (empty) | BuildKit daemon flags | + +## MicroVM Runtime Environment + +When the builder runs inside a Hypeman microVM: + +1. **Volumes mounted**: + - `/src` - Source code (read-write) + - `/config/build.json` - Build configuration (read-only) + +2. **Cgroups**: Mounted at `/sys/fs/cgroup` + +3. **Network**: Access to host registry via gateway IP `10.102.0.1` + +4. **Registry**: Uses HTTP (insecure) with `registry.insecure=true` + +## Troubleshooting + +| Issue | Cause | Solution | +|-------|-------|----------| +| Image import stuck on `pending`/`failed` | Network or registry issue | Check Hypeman logs, verify registry access | +| `Dockerfile required` | No Dockerfile in source or parameter | Include Dockerfile in tarball or pass as parameter | +| `401 Unauthorized` during push | Registry token issue | Check builder agent logs, verify token generation | +| `runc: not found` | BuildKit binaries missing | Rebuild the builder image | +| `no cgroup mount found` | Cgroups not available | Check VM init script | +| `fuse-overlayfs: not found` | Missing package | Rebuild image with fuse-overlayfs | +| `permission denied` | Wrong user/permissions | Ensure running as `builder` user | + +### Debugging Image Import Issues + +```bash +# Check image status +cat ~/hypeman_data_dir/images/docker.io/hirokernel/builder-generic/*/metadata.json | jq . + +# Check OCI cache index +cat ~/hypeman_data_dir/system/oci-cache/index.json | jq '.manifests[-1]' +``` + +## Using the Generic Builder + +The generic builder accepts any Dockerfile. To use it: + +1. **Include a Dockerfile** in your source tarball (or pass it via the `dockerfile` parameter) +2. **Your Dockerfile specifies the runtime** - e.g., `FROM node:20-alpine` or `FROM python:3.12-slim` +3. **Configure `BUILDER_IMAGE`** in your `.env` to point to the generic builder image diff --git a/lib/builds/images/generic/Dockerfile b/lib/builds/images/generic/Dockerfile new file mode 100644 index 00000000..83a080f9 --- /dev/null +++ b/lib/builds/images/generic/Dockerfile @@ -0,0 +1,66 @@ +# Generic Builder Image +# Contains rootless BuildKit + builder agent + guest-agent for debugging +# Builds any Dockerfile provided by the user + +FROM moby/buildkit:rootless AS buildkit + +# Build the builder-agent and guest-agent (multi-stage build from hypeman repo) +FROM golang:1.25-alpine AS agent-builder + +WORKDIR /app + +# Copy go.mod and go.sum first for better layer caching +COPY go.mod go.sum ./ +RUN go mod download + +# Copy the builder_agent and guest_agent sources +COPY lib/builds/builder_agent/ ./lib/builds/builder_agent/ +COPY lib/system/guest_agent/ ./lib/system/guest_agent/ +# Only copy proto files for guest-agent (not client.go which has host-side deps) +COPY lib/guest/guest.pb.go lib/guest/guest_grpc.pb.go ./lib/guest/ + +# Build the builder-agent +RUN CGO_ENABLED=0 go build -ldflags="-s -w" -o /builder-agent ./lib/builds/builder_agent + +# Build the guest-agent (for exec into builder VMs) +RUN CGO_ENABLED=0 go build -ldflags="-s -w" -o /guest-agent ./lib/system/guest_agent + +# Final builder image - minimal alpine base +FROM alpine:3.21 + +# Copy BuildKit binaries from official image +COPY --from=buildkit /usr/bin/buildctl /usr/bin/buildctl +COPY --from=buildkit /usr/bin/buildctl-daemonless.sh /usr/bin/buildctl-daemonless.sh +COPY --from=buildkit /usr/bin/buildkitd /usr/bin/buildkitd +COPY --from=buildkit /usr/bin/buildkit-runc /usr/bin/runc + +# Copy builder agent and guest agent +COPY --from=agent-builder /builder-agent /usr/bin/builder-agent +COPY --from=agent-builder /guest-agent /usr/bin/guest-agent + +# Install minimal dependencies +RUN apk add --no-cache \ + ca-certificates \ + git \ + curl \ + fuse-overlayfs + +# Create unprivileged user for rootless BuildKit +RUN adduser -D -u 1000 builder && \ + mkdir -p /home/builder/.local/share/buildkit /config /run/secrets /src && \ + chown -R builder:builder /home/builder /config /run/secrets /src + +# Switch to unprivileged user +USER builder +WORKDIR /src + +# Set environment for buildkit in microVM +ENV BUILDKITD_FLAGS="" +ENV HOME=/home/builder +ENV XDG_RUNTIME_DIR=/home/builder/.local/share + +# Run builder agent as entrypoint +ENTRYPOINT ["/usr/bin/builder-agent"] + + + diff --git a/lib/builds/manager.go b/lib/builds/manager.go new file mode 100644 index 00000000..1bfc40b1 --- /dev/null +++ b/lib/builds/manager.go @@ -0,0 +1,1078 @@ +package builds + +import ( + "bufio" + "context" + "encoding/json" + "fmt" + "log/slog" + "net" + "os" + "os/exec" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/nrednav/cuid2" + "github.com/onkernel/hypeman/lib/images" + "github.com/onkernel/hypeman/lib/instances" + "github.com/onkernel/hypeman/lib/paths" + "github.com/onkernel/hypeman/lib/volumes" + "go.opentelemetry.io/otel/metric" +) + +// Manager interface for the build system +type Manager interface { + // Start starts the build manager's background services (vsock handler, etc.) + // This should be called once when the API server starts. + Start(ctx context.Context) error + + // CreateBuild starts a new build job + CreateBuild(ctx context.Context, req CreateBuildRequest, sourceData []byte) (*Build, error) + + // GetBuild returns a build by ID + GetBuild(ctx context.Context, id string) (*Build, error) + + // ListBuilds returns all builds + ListBuilds(ctx context.Context) ([]*Build, error) + + // CancelBuild cancels a pending or running build + CancelBuild(ctx context.Context, id string) error + + // GetBuildLogs returns the logs for a build + GetBuildLogs(ctx context.Context, id string) ([]byte, error) + + // StreamBuildEvents streams build events (logs, status changes, heartbeats) + // With follow=false, returns existing logs then closes + // With follow=true, continues streaming until build completes or context cancels + StreamBuildEvents(ctx context.Context, id string, follow bool) (<-chan BuildEvent, error) + + // RecoverPendingBuilds recovers builds that were interrupted on restart + RecoverPendingBuilds() +} + +// Config holds configuration for the build manager +type Config struct { + // MaxConcurrentBuilds is the maximum number of concurrent builds + MaxConcurrentBuilds int + + // BuilderImage is the OCI image to use for builder VMs + // This should contain rootless BuildKit and the builder agent + BuilderImage string + + // RegistryURL is the URL of the registry to push built images to + RegistryURL string + + // DefaultTimeout is the default build timeout in seconds + DefaultTimeout int + + // RegistrySecret is the secret used to sign registry access tokens + // This should be the same secret used by the registry middleware + RegistrySecret string +} + +// DefaultConfig returns the default build manager configuration +func DefaultConfig() Config { + return Config{ + MaxConcurrentBuilds: 2, + BuilderImage: "hypeman/builder:latest", + RegistryURL: "localhost:8080", + DefaultTimeout: 600, // 10 minutes + } +} + +type manager struct { + config Config + paths *paths.Paths + queue *BuildQueue + instanceManager instances.Manager + volumeManager volumes.Manager + secretProvider SecretProvider + tokenGenerator *RegistryTokenGenerator + logger *slog.Logger + metrics *Metrics + createMu sync.Mutex + + // Status subscription system for SSE streaming + statusSubscribers map[string][]chan BuildEvent + subscriberMu sync.RWMutex +} + +// NewManager creates a new build manager +func NewManager( + p *paths.Paths, + config Config, + instanceMgr instances.Manager, + volumeMgr volumes.Manager, + secretProvider SecretProvider, + logger *slog.Logger, + meter metric.Meter, +) (Manager, error) { + if logger == nil { + logger = slog.Default() + } + + m := &manager{ + config: config, + paths: p, + queue: NewBuildQueue(config.MaxConcurrentBuilds), + instanceManager: instanceMgr, + volumeManager: volumeMgr, + secretProvider: secretProvider, + tokenGenerator: NewRegistryTokenGenerator(config.RegistrySecret), + logger: logger, + statusSubscribers: make(map[string][]chan BuildEvent), + } + + // Initialize metrics if meter is provided + if meter != nil { + metrics, err := NewMetrics(meter) + if err != nil { + return nil, fmt.Errorf("create metrics: %w", err) + } + m.metrics = metrics + } + + // Recover any pending builds from disk + m.RecoverPendingBuilds() + + return m, nil +} + +// Start starts the build manager's background services +func (m *manager) Start(ctx context.Context) error { + // Note: We no longer use a global vsock listener. + // Instead, we connect TO each builder VM's vsock socket directly. + // This follows the Cloud Hypervisor vsock pattern where host initiates connections. + m.logger.Info("build manager started") + return nil +} + +// CreateBuild starts a new build job +func (m *manager) CreateBuild(ctx context.Context, req CreateBuildRequest, sourceData []byte) (*Build, error) { + m.logger.Info("creating build") + + // Apply defaults to build policy + policy := req.BuildPolicy + if policy == nil { + defaultPolicy := DefaultBuildPolicy() + policy = &defaultPolicy + } else { + policy.ApplyDefaults() + } + + m.createMu.Lock() + defer m.createMu.Unlock() + + // Generate build ID + id := cuid2.Generate() + + // Create build metadata + meta := &buildMetadata{ + ID: id, + Status: StatusQueued, + Request: &req, + CreatedAt: time.Now(), + } + + // Write initial metadata + if err := writeMetadata(m.paths, meta); err != nil { + return nil, fmt.Errorf("write metadata: %w", err) + } + + // Store source data + if err := m.storeSource(id, sourceData); err != nil { + deleteBuild(m.paths, id) + return nil, fmt.Errorf("store source: %w", err) + } + + // Generate scoped registry token for this build + // Token grants push access to the build output repo and cache repo + allowedRepos := []string{fmt.Sprintf("builds/%s", id)} + if req.CacheScope != "" { + allowedRepos = append(allowedRepos, fmt.Sprintf("cache/%s", req.CacheScope)) + } + tokenTTL := time.Duration(policy.TimeoutSeconds) * time.Second + if tokenTTL < 30*time.Minute { + tokenTTL = 30 * time.Minute // Minimum 30 minutes + } + registryToken, err := m.tokenGenerator.GeneratePushToken(id, allowedRepos, tokenTTL) + if err != nil { + deleteBuild(m.paths, id) + return nil, fmt.Errorf("generate registry token: %w", err) + } + + // Write build config for the builder agent + buildConfig := &BuildConfig{ + JobID: id, + BaseImageDigest: req.BaseImageDigest, + RegistryURL: m.config.RegistryURL, + RegistryToken: registryToken, + CacheScope: req.CacheScope, + SourcePath: "/src", + Dockerfile: req.Dockerfile, + BuildArgs: req.BuildArgs, + Secrets: req.Secrets, + TimeoutSeconds: policy.TimeoutSeconds, + NetworkMode: policy.NetworkMode, + } + if err := writeBuildConfig(m.paths, id, buildConfig); err != nil { + deleteBuild(m.paths, id) + return nil, fmt.Errorf("write build config: %w", err) + } + + // Enqueue the build + queuePos := m.queue.Enqueue(id, req, func() { + m.runBuild(context.Background(), id, req, policy) + }) + + build := meta.toBuild() + if queuePos > 0 { + build.QueuePosition = &queuePos + } + + m.logger.Info("build created", "id", id, "queue_position", queuePos) + return build, nil +} + +// storeSource stores the source tarball for a build +func (m *manager) storeSource(buildID string, data []byte) error { + sourceDir := m.paths.BuildSourceDir(buildID) + if err := ensureDir(sourceDir); err != nil { + return err + } + + // Write source tarball + sourcePath := sourceDir + "/source.tar.gz" + return writeFile(sourcePath, data) +} + +// runBuild executes a build in a builder VM +func (m *manager) runBuild(ctx context.Context, id string, req CreateBuildRequest, policy *BuildPolicy) { + start := time.Now() + m.logger.Info("starting build", "id", id) + + // Update status to building + m.updateStatus(id, StatusBuilding, nil) + + // Create timeout context + buildCtx, cancel := context.WithTimeout(ctx, time.Duration(policy.TimeoutSeconds)*time.Second) + defer cancel() + + // Run the build in a builder VM + result, err := m.executeBuild(buildCtx, id, req, policy) + + duration := time.Since(start) + durationMS := duration.Milliseconds() + + if err != nil { + m.logger.Error("build failed", "id", id, "error", err, "duration", duration) + errMsg := err.Error() + m.updateBuildComplete(id, StatusFailed, nil, &errMsg, nil, &durationMS) + if m.metrics != nil { + m.metrics.RecordBuild(ctx, "failed", duration) + } + return + } + + // Save build logs (regardless of success/failure) + if result.Logs != "" { + if err := appendLog(m.paths, id, []byte(result.Logs)); err != nil { + m.logger.Warn("failed to save build logs", "id", id, "error", err) + } + } + + if !result.Success { + m.logger.Error("build failed", "id", id, "error", result.Error, "duration", duration) + m.updateBuildComplete(id, StatusFailed, nil, &result.Error, &result.Provenance, &durationMS) + if m.metrics != nil { + m.metrics.RecordBuild(ctx, "failed", duration) + } + return + } + + m.logger.Info("build succeeded", "id", id, "digest", result.ImageDigest, "duration", duration) + imageRef := fmt.Sprintf("%s/builds/%s", m.config.RegistryURL, id) + m.updateBuildComplete(id, StatusReady, &result.ImageDigest, nil, &result.Provenance, &durationMS) + + // Update with image ref + if meta, err := readMetadata(m.paths, id); err == nil { + meta.ImageRef = &imageRef + writeMetadata(m.paths, meta) + } + + if m.metrics != nil { + m.metrics.RecordBuild(ctx, "success", duration) + } +} + +// executeBuild runs the build in a builder VM +func (m *manager) executeBuild(ctx context.Context, id string, req CreateBuildRequest, policy *BuildPolicy) (*BuildResult, error) { + // Create a volume with the source data + sourceVolID := fmt.Sprintf("build-source-%s", id) + sourcePath := m.paths.BuildSourceDir(id) + "/source.tar.gz" + + // Open source tarball + sourceFile, err := os.Open(sourcePath) + if err != nil { + return nil, fmt.Errorf("open source: %w", err) + } + defer sourceFile.Close() + + // Create volume with source (using the volume manager's archive import) + _, err = m.volumeManager.CreateVolumeFromArchive(ctx, volumes.CreateVolumeFromArchiveRequest{ + Id: &sourceVolID, + Name: sourceVolID, + SizeGb: 10, // 10GB should be enough for most source bundles + }, sourceFile) + if err != nil { + return nil, fmt.Errorf("create source volume: %w", err) + } + defer m.volumeManager.DeleteVolume(context.Background(), sourceVolID) + + // Create config volume with build.json for the builder agent + configVolID := fmt.Sprintf("build-config-%s", id) + configVolPath, err := m.createBuildConfigVolume(id, configVolID) + if err != nil { + return nil, fmt.Errorf("create config volume: %w", err) + } + defer os.Remove(configVolPath) // Clean up the config disk file + + // Register the config volume with the volume manager + _, err = m.volumeManager.CreateVolume(ctx, volumes.CreateVolumeRequest{ + Id: &configVolID, + Name: configVolID, + SizeGb: 1, + }) + if err != nil { + // If volume creation fails, try to use the disk file directly + // by copying it to the expected location + volPath := m.paths.VolumeData(configVolID) + if copyErr := copyFile(configVolPath, volPath); copyErr != nil { + return nil, fmt.Errorf("setup config volume: %w", copyErr) + } + } else { + // Copy our config disk over the empty volume + volPath := m.paths.VolumeData(configVolID) + if err := copyFile(configVolPath, volPath); err != nil { + m.volumeManager.DeleteVolume(context.Background(), configVolID) + return nil, fmt.Errorf("write config to volume: %w", err) + } + } + defer m.volumeManager.DeleteVolume(context.Background(), configVolID) + + // Create builder instance + builderName := fmt.Sprintf("builder-%s", id) + networkEnabled := policy.NetworkMode == "egress" + + inst, err := m.instanceManager.CreateInstance(ctx, instances.CreateInstanceRequest{ + Name: builderName, + Image: m.config.BuilderImage, + Size: int64(policy.MemoryMB) * 1024 * 1024, + Vcpus: policy.CPUs, + NetworkEnabled: networkEnabled, + Volumes: []instances.VolumeAttachment{ + { + VolumeID: sourceVolID, + MountPath: "/src", + Readonly: false, // Builder needs to write generated Dockerfile + }, + { + VolumeID: configVolID, + MountPath: "/config", + Readonly: true, + }, + }, + }) + if err != nil { + return nil, fmt.Errorf("create builder instance: %w", err) + } + + // Update metadata with builder instance + if meta, err := readMetadata(m.paths, id); err == nil { + meta.BuilderInstance = &inst.Id + writeMetadata(m.paths, meta) + } + + // Ensure cleanup + defer func() { + m.instanceManager.DeleteInstance(context.Background(), inst.Id) + }() + + // Wait for build result via vsock + // The builder agent will send the result when complete + result, err := m.waitForResult(ctx, inst) + if err != nil { + return nil, fmt.Errorf("wait for result: %w", err) + } + + return result, nil +} + +// waitForResult waits for the build result from the builder agent via vsock +func (m *manager) waitForResult(ctx context.Context, inst *instances.Instance) (*BuildResult, error) { + // Wait a bit for the VM to start and the builder agent to listen on vsock + time.Sleep(3 * time.Second) + + // Try to connect to the builder agent with retries + var conn net.Conn + var err error + + for attempt := 0; attempt < 30; attempt++ { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + conn, err = m.dialBuilderVsock(inst.VsockSocket) + if err == nil { + break + } + + m.logger.Debug("waiting for builder agent", "attempt", attempt+1, "error", err) + time.Sleep(2 * time.Second) + + // Check if instance is still running + current, checkErr := m.instanceManager.GetInstance(ctx, inst.Id) + if checkErr != nil { + return nil, fmt.Errorf("check instance: %w", checkErr) + } + if current.State == instances.StateStopped || current.State == instances.StateShutdown { + return &BuildResult{ + Success: false, + Error: "builder instance stopped unexpectedly", + }, nil + } + } + + if conn == nil { + return nil, fmt.Errorf("failed to connect to builder agent after retries: %w", err) + } + defer conn.Close() + + m.logger.Info("connected to builder agent", "instance", inst.Id) + + encoder := json.NewEncoder(conn) + decoder := json.NewDecoder(conn) + + // Tell the agent we're ready - it may request secrets + m.logger.Info("sending host_ready to agent", "instance", inst.Id) + if err := encoder.Encode(VsockMessage{Type: "host_ready"}); err != nil { + return nil, fmt.Errorf("send host_ready: %w", err) + } + m.logger.Info("host_ready sent, waiting for agent messages", "instance", inst.Id) + + // Handle messages from agent until we get the build result + for { + // Use a goroutine for decoding so we can respect context cancellation. + type decodeResult struct { + response VsockMessage + err error + } + resultCh := make(chan decodeResult, 1) + + go func() { + var response VsockMessage + err := decoder.Decode(&response) + resultCh <- decodeResult{response: response, err: err} + }() + + // Wait for either a message or context cancellation + var dr decodeResult + select { + case <-ctx.Done(): + conn.Close() + <-resultCh + return nil, ctx.Err() + case dr = <-resultCh: + if dr.err != nil { + return nil, fmt.Errorf("read message: %w", dr.err) + } + } + + // Handle message based on type + m.logger.Info("received message from agent", "type", dr.response.Type, "instance", inst.Id) + switch dr.response.Type { + case "get_secrets": + // Agent is requesting secrets + m.logger.Info("agent requesting secrets", "instance", inst.Id, "secret_ids", dr.response.SecretIDs) + + // Fetch secrets from provider + secrets, err := m.secretProvider.GetSecrets(ctx, dr.response.SecretIDs) + if err != nil { + m.logger.Error("failed to fetch secrets", "error", err) + secrets = make(map[string]string) + } + + // Send secrets response + if err := encoder.Encode(VsockMessage{Type: "secrets_response", Secrets: secrets}); err != nil { + return nil, fmt.Errorf("send secrets response: %w", err) + } + m.logger.Info("sent secrets to agent", "count", len(secrets), "instance", inst.Id) + + case "build_result": + // Build completed + if dr.response.Result == nil { + return nil, fmt.Errorf("received build_result with nil result") + } + return dr.response.Result, nil + + default: + m.logger.Warn("unexpected message type from agent", "type", dr.response.Type) + } + } +} + +// dialBuilderVsock connects to a builder VM's vsock socket using Cloud Hypervisor's handshake +func (m *manager) dialBuilderVsock(vsockSocketPath string) (net.Conn, error) { + // Connect to the Cloud Hypervisor vsock Unix socket + conn, err := net.DialTimeout("unix", vsockSocketPath, 5*time.Second) + if err != nil { + return nil, fmt.Errorf("dial vsock socket %s: %w", vsockSocketPath, err) + } + + // Set deadline for handshake + if err := conn.SetDeadline(time.Now().Add(5 * time.Second)); err != nil { + conn.Close() + return nil, fmt.Errorf("set handshake deadline: %w", err) + } + + // Perform Cloud Hypervisor vsock handshake + // Format: "CONNECT \n" -> "OK \n" + handshakeCmd := fmt.Sprintf("CONNECT %d\n", BuildAgentVsockPort) + if _, err := conn.Write([]byte(handshakeCmd)); err != nil { + conn.Close() + return nil, fmt.Errorf("send vsock handshake: %w", err) + } + + // Read handshake response + reader := bufio.NewReader(conn) + response, err := reader.ReadString('\n') + if err != nil { + conn.Close() + return nil, fmt.Errorf("read vsock handshake response: %w", err) + } + + // Clear deadline after successful handshake + if err := conn.SetDeadline(time.Time{}); err != nil { + conn.Close() + return nil, fmt.Errorf("clear deadline: %w", err) + } + + response = strings.TrimSpace(response) + if !strings.HasPrefix(response, "OK ") { + conn.Close() + return nil, fmt.Errorf("vsock handshake failed: %s", response) + } + + return &bufferedConn{Conn: conn, reader: reader}, nil +} + +// bufferedConn wraps a net.Conn with a bufio.Reader to ensure any buffered +// data from the handshake is properly drained before reading from the connection +type bufferedConn struct { + net.Conn + reader *bufio.Reader +} + +func (c *bufferedConn) Read(p []byte) (int, error) { + return c.reader.Read(p) +} + +// updateStatus updates the build status +func (m *manager) updateStatus(id string, status string, err error) { + meta, readErr := readMetadata(m.paths, id) + if readErr != nil { + m.logger.Error("read metadata for status update", "id", id, "error", readErr) + return + } + + meta.Status = status + if status == StatusBuilding && meta.StartedAt == nil { + now := time.Now() + meta.StartedAt = &now + } + if err != nil { + errMsg := err.Error() + meta.Error = &errMsg + } + + if writeErr := writeMetadata(m.paths, meta); writeErr != nil { + m.logger.Error("write metadata for status update", "id", id, "error", writeErr) + } + + // Notify subscribers of status change + m.notifyStatusChange(id, status) +} + +// updateBuildComplete updates the build with final results +func (m *manager) updateBuildComplete(id string, status string, digest *string, errMsg *string, provenance *BuildProvenance, durationMS *int64) { + meta, readErr := readMetadata(m.paths, id) + if readErr != nil { + m.logger.Error("read metadata for completion", "id", id, "error", readErr) + return + } + + // Don't overwrite terminal states - this prevents race conditions where + // a cancelled build's runBuild goroutine later fails and tries to set "failed" + if meta.Status == StatusCancelled || meta.Status == StatusReady || meta.Status == StatusFailed { + m.logger.Debug("skipping status update for already-terminal build", + "id", id, "current_status", meta.Status, "attempted_status", status) + return + } + + meta.Status = status + meta.ImageDigest = digest + meta.Error = errMsg + meta.Provenance = provenance + meta.DurationMS = durationMS + + now := time.Now() + meta.CompletedAt = &now + + if writeErr := writeMetadata(m.paths, meta); writeErr != nil { + m.logger.Error("write metadata for completion", "id", id, "error", writeErr) + } + + // Notify subscribers of status change + m.notifyStatusChange(id, status) +} + +// subscribeToStatus adds a subscriber channel for status updates on a build +func (m *manager) subscribeToStatus(buildID string, ch chan BuildEvent) { + m.subscriberMu.Lock() + defer m.subscriberMu.Unlock() + m.statusSubscribers[buildID] = append(m.statusSubscribers[buildID], ch) +} + +// unsubscribeFromStatus removes a subscriber channel +func (m *manager) unsubscribeFromStatus(buildID string, ch chan BuildEvent) { + m.subscriberMu.Lock() + defer m.subscriberMu.Unlock() + + subscribers := m.statusSubscribers[buildID] + for i, sub := range subscribers { + if sub == ch { + m.statusSubscribers[buildID] = append(subscribers[:i], subscribers[i+1:]...) + break + } + } + + // Clean up empty subscriber lists + if len(m.statusSubscribers[buildID]) == 0 { + delete(m.statusSubscribers, buildID) + } +} + +// notifyStatusChange broadcasts a status change to all subscribers +func (m *manager) notifyStatusChange(buildID string, status string) { + m.subscriberMu.RLock() + defer m.subscriberMu.RUnlock() + + event := BuildEvent{ + Type: EventTypeStatus, + Timestamp: time.Now(), + Status: status, + } + + for _, ch := range m.statusSubscribers[buildID] { + // Non-blocking send - drop if channel is full + select { + case ch <- event: + default: + } + } +} + +// GetBuild returns a build by ID +func (m *manager) GetBuild(ctx context.Context, id string) (*Build, error) { + meta, err := readMetadata(m.paths, id) + if err != nil { + return nil, err + } + + build := meta.toBuild() + + // Add queue position if queued + if meta.Status == StatusQueued { + build.QueuePosition = m.queue.GetPosition(id) + } + + return build, nil +} + +// ListBuilds returns all builds +func (m *manager) ListBuilds(ctx context.Context) ([]*Build, error) { + metas, err := listAllBuilds(m.paths) + if err != nil { + return nil, err + } + + builds := make([]*Build, 0, len(metas)) + for _, meta := range metas { + build := meta.toBuild() + if meta.Status == StatusQueued { + build.QueuePosition = m.queue.GetPosition(meta.ID) + } + builds = append(builds, build) + } + + return builds, nil +} + +// CancelBuild cancels a pending build +func (m *manager) CancelBuild(ctx context.Context, id string) error { + meta, err := readMetadata(m.paths, id) + if err != nil { + return err + } + + switch meta.Status { + case StatusQueued: + // Remove from queue + if m.queue.Cancel(id) { + m.updateStatus(id, StatusCancelled, nil) + return nil + } + return ErrBuildInProgress // Was already picked up + + case StatusBuilding, StatusPushing: + // Can't cancel a running build easily + // Would need to terminate the builder instance + if meta.BuilderInstance != nil { + m.instanceManager.DeleteInstance(ctx, *meta.BuilderInstance) + } + m.updateStatus(id, StatusCancelled, nil) + return nil + + case StatusReady, StatusFailed, StatusCancelled: + return fmt.Errorf("build already completed with status: %s", meta.Status) + + default: + return fmt.Errorf("unknown build status: %s", meta.Status) + } +} + +// GetBuildLogs returns the logs for a build +func (m *manager) GetBuildLogs(ctx context.Context, id string) ([]byte, error) { + _, err := readMetadata(m.paths, id) + if err != nil { + return nil, err + } + + return readLog(m.paths, id) +} + +// StreamBuildEvents streams build events (logs, status changes, heartbeats) +func (m *manager) StreamBuildEvents(ctx context.Context, id string, follow bool) (<-chan BuildEvent, error) { + meta, err := readMetadata(m.paths, id) + if err != nil { + return nil, err + } + + // Create output channel + out := make(chan BuildEvent, 100) + + // Check if build is already complete + isComplete := meta.Status == StatusReady || meta.Status == StatusFailed || meta.Status == StatusCancelled + + go func() { + defer close(out) + + // Create a channel for status updates + statusChan := make(chan BuildEvent, 10) + if follow && !isComplete { + m.subscribeToStatus(id, statusChan) + defer m.unsubscribeFromStatus(id, statusChan) + } + + // Stream existing logs using tail + logPath := m.paths.BuildLog(id) + + // Check if log file exists + if _, err := os.Stat(logPath); os.IsNotExist(err) { + // No logs yet - if not following, just return + if !follow || isComplete { + return + } + // Wait for log file to appear, or for build to complete + for { + select { + case <-ctx.Done(): + return + case event := <-statusChan: + select { + case out <- event: + case <-ctx.Done(): + return + } + // Check if build completed + if event.Status == StatusReady || event.Status == StatusFailed || event.Status == StatusCancelled { + return + } + // Non-terminal status event - keep waiting for log file + continue + case <-time.After(500 * time.Millisecond): + if _, err := os.Stat(logPath); err == nil { + break // Log file appeared + } + continue + } + break + } + } + + // Build tail command args + args := []string{"-n", "+1"} // Start from beginning + if follow && !isComplete { + args = append(args, "-f") + } + args = append(args, logPath) + + cmd := exec.CommandContext(ctx, "tail", args...) + stdout, err := cmd.StdoutPipe() + if err != nil { + m.logger.Error("create stdout pipe for build logs", "id", id, "error", err) + return + } + + if err := cmd.Start(); err != nil { + m.logger.Error("start tail for build logs", "id", id, "error", err) + return + } + + // Ensure tail process is cleaned up on all exit paths to avoid zombie processes. + // Kill() is safe to call even if the process has already exited. + // Wait() reaps the process to prevent zombies. + defer func() { + cmd.Process.Kill() + cmd.Wait() + }() + + // Goroutine to read log lines + logLines := make(chan string, 100) + go func() { + defer close(logLines) + scanner := bufio.NewScanner(stdout) + for scanner.Scan() { + select { + case logLines <- scanner.Text(): + case <-ctx.Done(): + return + } + } + }() + + // Heartbeat ticker (30 seconds) + heartbeatTicker := time.NewTicker(30 * time.Second) + defer heartbeatTicker.Stop() + + // Main event loop + for { + select { + case <-ctx.Done(): + return + + case line, ok := <-logLines: + if !ok { + // Log stream ended + return + } + event := BuildEvent{ + Type: EventTypeLog, + Timestamp: time.Now(), + Content: line, + } + select { + case out <- event: + case <-ctx.Done(): + return + } + + case event := <-statusChan: + select { + case out <- event: + case <-ctx.Done(): + return + } + // Check if build completed + if event.Status == StatusReady || event.Status == StatusFailed || event.Status == StatusCancelled { + // Give a moment for final logs to come through + time.Sleep(100 * time.Millisecond) + return + } + + case <-heartbeatTicker.C: + if !follow { + continue + } + event := BuildEvent{ + Type: EventTypeHeartbeat, + Timestamp: time.Now(), + } + select { + case out <- event: + case <-ctx.Done(): + return + } + } + } + }() + + return out, nil +} + +// RecoverPendingBuilds recovers builds that were interrupted on restart +func (m *manager) RecoverPendingBuilds() { + pending, err := listPendingBuilds(m.paths) + if err != nil { + m.logger.Error("list pending builds for recovery", "error", err) + return + } + + for _, meta := range pending { + meta := meta // Shadow loop variable for closure capture + m.logger.Info("recovering build", "id", meta.ID, "status", meta.Status) + + // Re-enqueue the build + if meta.Request != nil { + // Regenerate registry token since the original token may have expired + // during server downtime. Token TTL is minimum 30 minutes. + if err := m.refreshBuildToken(meta.ID, meta.Request); err != nil { + m.logger.Error("failed to refresh registry token for recovered build", + "id", meta.ID, "error", err) + // Mark the build as failed since we can't refresh the token + errMsg := fmt.Sprintf("failed to refresh registry token on recovery: %v", err) + m.updateBuildComplete(meta.ID, StatusFailed, nil, &errMsg, nil, nil) + continue + } + + m.queue.Enqueue(meta.ID, *meta.Request, func() { + policy := DefaultBuildPolicy() + if meta.Request.BuildPolicy != nil { + policy = *meta.Request.BuildPolicy + } + m.runBuild(context.Background(), meta.ID, *meta.Request, &policy) + }) + } + } + + if len(pending) > 0 { + m.logger.Info("recovered pending builds", "count", len(pending)) + } +} + +// refreshBuildToken regenerates the registry token for a build and updates the config file +func (m *manager) refreshBuildToken(buildID string, req *CreateBuildRequest) error { + // Read existing build config + config, err := readBuildConfig(m.paths, buildID) + if err != nil { + return fmt.Errorf("read build config: %w", err) + } + + // Determine token TTL from build policy + policy := DefaultBuildPolicy() + if req.BuildPolicy != nil { + policy = *req.BuildPolicy + policy.ApplyDefaults() + } + tokenTTL := time.Duration(policy.TimeoutSeconds) * time.Second + if tokenTTL < 30*time.Minute { + tokenTTL = 30 * time.Minute // Minimum 30 minutes + } + + // Generate allowed repos list + allowedRepos := []string{fmt.Sprintf("builds/%s", buildID)} + if req.CacheScope != "" { + allowedRepos = append(allowedRepos, fmt.Sprintf("cache/%s", req.CacheScope)) + } + + // Generate fresh registry token + registryToken, err := m.tokenGenerator.GeneratePushToken(buildID, allowedRepos, tokenTTL) + if err != nil { + return fmt.Errorf("generate registry token: %w", err) + } + + // Update config with new token + config.RegistryToken = registryToken + + // Write updated config back to disk + if err := writeBuildConfig(m.paths, buildID, config); err != nil { + return fmt.Errorf("write build config: %w", err) + } + + m.logger.Debug("refreshed registry token for recovered build", "id", buildID) + return nil +} + +// Helper functions + +func ensureDir(path string) error { + return os.MkdirAll(path, 0755) +} + +func writeFile(path string, data []byte) error { + return os.WriteFile(path, data, 0644) +} + +func readFile(path string) ([]byte, error) { + return os.ReadFile(path) +} + +// createBuildConfigVolume creates an ext4 disk containing the build.json config file +// Returns the path to the disk file +func (m *manager) createBuildConfigVolume(buildID, volID string) (string, error) { + // Read the build config + configPath := m.paths.BuildConfig(buildID) + configData, err := os.ReadFile(configPath) + if err != nil { + return "", fmt.Errorf("read build config: %w", err) + } + + // Create temp directory with config file + tmpDir, err := os.MkdirTemp("", "hypeman-build-config-*") + if err != nil { + return "", fmt.Errorf("create temp dir: %w", err) + } + defer os.RemoveAll(tmpDir) + + // Write build.json to temp directory + buildJSONPath := filepath.Join(tmpDir, "build.json") + if err := os.WriteFile(buildJSONPath, configData, 0644); err != nil { + return "", fmt.Errorf("write build.json: %w", err) + } + + // Also write a metadata file for debugging + metadata := map[string]interface{}{ + "build_id": buildID, + "created_at": time.Now().Format(time.RFC3339), + } + metadataData, _ := json.MarshalIndent(metadata, "", " ") + metadataPath := filepath.Join(tmpDir, "metadata.json") + os.WriteFile(metadataPath, metadataData, 0644) + + // Create ext4 disk from the directory + diskPath := filepath.Join(os.TempDir(), fmt.Sprintf("build-config-%s.ext4", buildID)) + _, err = images.ExportRootfs(tmpDir, diskPath, images.FormatExt4) + if err != nil { + return "", fmt.Errorf("create config disk: %w", err) + } + + return diskPath, nil +} + +// copyFile copies a file from src to dst +func copyFile(src, dst string) error { + // Ensure parent directory exists + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + + data, err := os.ReadFile(src) + if err != nil { + return err + } + return os.WriteFile(dst, data, 0644) +} diff --git a/lib/builds/manager_test.go b/lib/builds/manager_test.go new file mode 100644 index 00000000..c53ac984 --- /dev/null +++ b/lib/builds/manager_test.go @@ -0,0 +1,889 @@ +package builds + +import ( + "context" + "encoding/json" + "io" + "log/slog" + "os" + "path/filepath" + "testing" + "time" + + "github.com/onkernel/hypeman/lib/instances" + "github.com/onkernel/hypeman/lib/paths" + "github.com/onkernel/hypeman/lib/resources" + "github.com/onkernel/hypeman/lib/volumes" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// mockInstanceManager implements instances.Manager for testing +type mockInstanceManager struct { + instances map[string]*instances.Instance + createFunc func(ctx context.Context, req instances.CreateInstanceRequest) (*instances.Instance, error) + getFunc func(ctx context.Context, id string) (*instances.Instance, error) + deleteFunc func(ctx context.Context, id string) error + stopFunc func(ctx context.Context, id string) (*instances.Instance, error) + createCallCount int + deleteCallCount int +} + +func newMockInstanceManager() *mockInstanceManager { + return &mockInstanceManager{ + instances: make(map[string]*instances.Instance), + } +} + +func (m *mockInstanceManager) ListInstances(ctx context.Context) ([]instances.Instance, error) { + var result []instances.Instance + for _, inst := range m.instances { + result = append(result, *inst) + } + return result, nil +} + +func (m *mockInstanceManager) CreateInstance(ctx context.Context, req instances.CreateInstanceRequest) (*instances.Instance, error) { + m.createCallCount++ + if m.createFunc != nil { + return m.createFunc(ctx, req) + } + inst := &instances.Instance{ + StoredMetadata: instances.StoredMetadata{ + Id: "inst-" + req.Name, + Name: req.Name, + }, + State: instances.StateRunning, + } + m.instances[inst.Id] = inst + return inst, nil +} + +func (m *mockInstanceManager) GetInstance(ctx context.Context, id string) (*instances.Instance, error) { + if m.getFunc != nil { + return m.getFunc(ctx, id) + } + if inst, ok := m.instances[id]; ok { + return inst, nil + } + return nil, instances.ErrNotFound +} + +func (m *mockInstanceManager) DeleteInstance(ctx context.Context, id string) error { + m.deleteCallCount++ + if m.deleteFunc != nil { + return m.deleteFunc(ctx, id) + } + delete(m.instances, id) + return nil +} + +func (m *mockInstanceManager) StandbyInstance(ctx context.Context, id string) (*instances.Instance, error) { + return nil, nil +} + +func (m *mockInstanceManager) RestoreInstance(ctx context.Context, id string) (*instances.Instance, error) { + return nil, nil +} + +func (m *mockInstanceManager) StopInstance(ctx context.Context, id string) (*instances.Instance, error) { + if m.stopFunc != nil { + return m.stopFunc(ctx, id) + } + if inst, ok := m.instances[id]; ok { + inst.State = instances.StateStopped + return inst, nil + } + return nil, instances.ErrNotFound +} + +func (m *mockInstanceManager) StartInstance(ctx context.Context, id string) (*instances.Instance, error) { + return nil, nil +} + +func (m *mockInstanceManager) StreamInstanceLogs(ctx context.Context, id string, tail int, follow bool, source instances.LogSource) (<-chan string, error) { + return nil, nil +} + +func (m *mockInstanceManager) RotateLogs(ctx context.Context, maxBytes int64, maxFiles int) error { + return nil +} + +func (m *mockInstanceManager) AttachVolume(ctx context.Context, id string, volumeId string, req instances.AttachVolumeRequest) (*instances.Instance, error) { + return nil, nil +} + +func (m *mockInstanceManager) DetachVolume(ctx context.Context, id string, volumeId string) (*instances.Instance, error) { + return nil, nil +} + +func (m *mockInstanceManager) ListInstanceAllocations(ctx context.Context) ([]resources.InstanceAllocation, error) { + return nil, nil +} + +// mockVolumeManager implements volumes.Manager for testing +type mockVolumeManager struct { + volumes map[string]*volumes.Volume + createFunc func(ctx context.Context, req volumes.CreateVolumeRequest) (*volumes.Volume, error) + createFromArchiveFunc func(ctx context.Context, req volumes.CreateVolumeFromArchiveRequest, archive io.Reader) (*volumes.Volume, error) + deleteFunc func(ctx context.Context, id string) error + createCallCount int + deleteCallCount int +} + +func newMockVolumeManager() *mockVolumeManager { + return &mockVolumeManager{ + volumes: make(map[string]*volumes.Volume), + } +} + +func (m *mockVolumeManager) ListVolumes(ctx context.Context) ([]volumes.Volume, error) { + var result []volumes.Volume + for _, vol := range m.volumes { + result = append(result, *vol) + } + return result, nil +} + +func (m *mockVolumeManager) CreateVolume(ctx context.Context, req volumes.CreateVolumeRequest) (*volumes.Volume, error) { + m.createCallCount++ + if m.createFunc != nil { + return m.createFunc(ctx, req) + } + vol := &volumes.Volume{ + Id: "vol-" + req.Name, + Name: req.Name, + } + m.volumes[vol.Id] = vol + return vol, nil +} + +func (m *mockVolumeManager) CreateVolumeFromArchive(ctx context.Context, req volumes.CreateVolumeFromArchiveRequest, archive io.Reader) (*volumes.Volume, error) { + m.createCallCount++ + if m.createFromArchiveFunc != nil { + return m.createFromArchiveFunc(ctx, req, archive) + } + vol := &volumes.Volume{ + Id: "vol-" + req.Name, + Name: req.Name, + } + m.volumes[vol.Id] = vol + return vol, nil +} + +func (m *mockVolumeManager) GetVolume(ctx context.Context, id string) (*volumes.Volume, error) { + if vol, ok := m.volumes[id]; ok { + return vol, nil + } + return nil, volumes.ErrNotFound +} + +func (m *mockVolumeManager) GetVolumeByName(ctx context.Context, name string) (*volumes.Volume, error) { + for _, vol := range m.volumes { + if vol.Name == name { + return vol, nil + } + } + return nil, volumes.ErrNotFound +} + +func (m *mockVolumeManager) DeleteVolume(ctx context.Context, id string) error { + m.deleteCallCount++ + if m.deleteFunc != nil { + return m.deleteFunc(ctx, id) + } + delete(m.volumes, id) + return nil +} + +func (m *mockVolumeManager) AttachVolume(ctx context.Context, id string, req volumes.AttachVolumeRequest) error { + return nil +} + +func (m *mockVolumeManager) DetachVolume(ctx context.Context, volumeID string, instanceID string) error { + return nil +} + +func (m *mockVolumeManager) GetVolumePath(id string) string { + return "/tmp/volumes/" + id +} + +func (m *mockVolumeManager) TotalVolumeBytes(ctx context.Context) (int64, error) { + return 0, nil +} + +// mockSecretProvider implements SecretProvider for testing +type mockSecretProvider struct{} + +func (m *mockSecretProvider) GetSecrets(ctx context.Context, secretIDs []string) (map[string]string, error) { + return make(map[string]string), nil +} + +// Test helper to create a manager with test paths and mocks +func setupTestManager(t *testing.T) (*manager, *mockInstanceManager, *mockVolumeManager, string) { + t.Helper() + + // Create temp directory for test data + tempDir, err := os.MkdirTemp("", "builds-test-*") + require.NoError(t, err) + + // Create paths + p := paths.New(tempDir) + + // Create necessary directories + require.NoError(t, os.MkdirAll(filepath.Join(tempDir, "builds"), 0755)) + + // Create mocks + instanceMgr := newMockInstanceManager() + volumeMgr := newMockVolumeManager() + secretProvider := &mockSecretProvider{} + + // Create config + config := Config{ + MaxConcurrentBuilds: 2, + BuilderImage: "test/builder:latest", + RegistryURL: "localhost:5000", + DefaultTimeout: 300, + RegistrySecret: "test-secret-key", + } + + // Create a discard logger for tests + logger := slog.New(slog.NewTextHandler(io.Discard, nil)) + + // Create manager (without calling NewManager to avoid RecoverPendingBuilds) + mgr := &manager{ + config: config, + paths: p, + queue: NewBuildQueue(config.MaxConcurrentBuilds), + instanceManager: instanceMgr, + volumeManager: volumeMgr, + secretProvider: secretProvider, + tokenGenerator: NewRegistryTokenGenerator(config.RegistrySecret), + logger: logger, + statusSubscribers: make(map[string][]chan BuildEvent), + } + + return mgr, instanceMgr, volumeMgr, tempDir +} + +func TestCreateBuild_Success(t *testing.T) { + mgr, _, _, tempDir := setupTestManager(t) + defer os.RemoveAll(tempDir) + + ctx := context.Background() + req := CreateBuildRequest{ + CacheScope: "test-scope", + Dockerfile: "FROM alpine\nRUN echo hello", + } + sourceData := []byte("fake-tarball-data") + + build, err := mgr.CreateBuild(ctx, req, sourceData) + + require.NoError(t, err) + assert.NotEmpty(t, build.ID) + assert.Equal(t, StatusQueued, build.Status) + assert.NotNil(t, build.CreatedAt) + + // Verify source was stored + sourcePath := filepath.Join(tempDir, "builds", build.ID, "source", "source.tar.gz") + data, err := os.ReadFile(sourcePath) + require.NoError(t, err) + assert.Equal(t, sourceData, data) + + // Verify config was written + configPath := filepath.Join(tempDir, "builds", build.ID, "config.json") + _, err = os.Stat(configPath) + assert.NoError(t, err) + + // Verify metadata was written + metaPath := filepath.Join(tempDir, "builds", build.ID, "metadata.json") + _, err = os.Stat(metaPath) + assert.NoError(t, err) +} + +func TestCreateBuild_WithBuildPolicy(t *testing.T) { + mgr, _, _, tempDir := setupTestManager(t) + defer os.RemoveAll(tempDir) + + ctx := context.Background() + timeout := 600 + req := CreateBuildRequest{ + Dockerfile: "FROM alpine", + BuildPolicy: &BuildPolicy{ + TimeoutSeconds: timeout, + NetworkMode: "host", + }, + } + + build, err := mgr.CreateBuild(ctx, req, []byte("source")) + + require.NoError(t, err) + assert.NotEmpty(t, build.ID) +} + +func TestGetBuild_Found(t *testing.T) { + mgr, _, _, tempDir := setupTestManager(t) + defer os.RemoveAll(tempDir) + + ctx := context.Background() + + // Create a build first + req := CreateBuildRequest{ + Dockerfile: "FROM alpine", + } + created, err := mgr.CreateBuild(ctx, req, []byte("source")) + require.NoError(t, err) + + // Get the build + build, err := mgr.GetBuild(ctx, created.ID) + + require.NoError(t, err) + assert.Equal(t, created.ID, build.ID) + assert.Equal(t, StatusQueued, build.Status) +} + +func TestGetBuild_NotFound(t *testing.T) { + mgr, _, _, tempDir := setupTestManager(t) + defer os.RemoveAll(tempDir) + + ctx := context.Background() + + _, err := mgr.GetBuild(ctx, "nonexistent-id") + + assert.Error(t, err) + assert.ErrorIs(t, err, ErrNotFound) +} + +func TestListBuilds_Empty(t *testing.T) { + mgr, _, _, tempDir := setupTestManager(t) + defer os.RemoveAll(tempDir) + + ctx := context.Background() + + builds, err := mgr.ListBuilds(ctx) + + require.NoError(t, err) + assert.Empty(t, builds) +} + +func TestListBuilds_WithBuilds(t *testing.T) { + mgr, _, _, tempDir := setupTestManager(t) + defer os.RemoveAll(tempDir) + + ctx := context.Background() + + // Create multiple builds + for i := 0; i < 3; i++ { + req := CreateBuildRequest{ + Dockerfile: "FROM alpine", + } + _, err := mgr.CreateBuild(ctx, req, []byte("source")) + require.NoError(t, err) + } + + builds, err := mgr.ListBuilds(ctx) + + require.NoError(t, err) + assert.Len(t, builds, 3) +} + +func TestCancelBuild_QueuedBuild(t *testing.T) { + // Test the queue cancellation directly to avoid race conditions + queue := NewBuildQueue(1) // Only 1 concurrent + + started := make(chan struct{}) + + // Add a blocking build to fill the single slot + queue.Enqueue("build-1", CreateBuildRequest{}, func() { + started <- struct{}{} + select {} // Block forever + }) + + // Wait for first build to start + <-started + + // Add a second build - this one should be queued + queue.Enqueue("build-2", CreateBuildRequest{}, func() {}) + + // Verify it's pending + assert.Equal(t, 1, queue.PendingCount()) + + // Cancel the queued build + cancelled := queue.Cancel("build-2") + assert.True(t, cancelled) + + // Verify it's removed from pending + assert.Equal(t, 0, queue.PendingCount()) +} + +func TestCancelBuild_NotFound(t *testing.T) { + mgr, _, _, tempDir := setupTestManager(t) + defer os.RemoveAll(tempDir) + + ctx := context.Background() + + err := mgr.CancelBuild(ctx, "nonexistent-id") + + assert.Error(t, err) + assert.ErrorIs(t, err, ErrNotFound) +} + +func TestCancelBuild_AlreadyCompleted(t *testing.T) { + // Test cancel rejection for completed builds by directly setting up metadata + tempDir, err := os.MkdirTemp("", "builds-test-*") + require.NoError(t, err) + defer os.RemoveAll(tempDir) + + p := paths.New(tempDir) + require.NoError(t, os.MkdirAll(filepath.Join(tempDir, "builds", "completed-build"), 0755)) + + // Create metadata with completed status + meta := &buildMetadata{ + ID: "completed-build", + Status: StatusReady, + CreatedAt: time.Now(), + } + require.NoError(t, writeMetadata(p, meta)) + + // Create manager + config := Config{ + MaxConcurrentBuilds: 2, + RegistrySecret: "test-secret", + } + mgr := &manager{ + config: config, + paths: p, + queue: NewBuildQueue(config.MaxConcurrentBuilds), + tokenGenerator: NewRegistryTokenGenerator(config.RegistrySecret), + logger: slog.New(slog.NewTextHandler(io.Discard, nil)), + } + + // Try to cancel - should fail because it's already completed + err = mgr.CancelBuild(context.Background(), "completed-build") + + require.Error(t, err, "expected error when cancelling completed build") + assert.Contains(t, err.Error(), "already completed") +} + +func TestGetBuildLogs_Empty(t *testing.T) { + mgr, _, _, tempDir := setupTestManager(t) + defer os.RemoveAll(tempDir) + + ctx := context.Background() + + // Create a build + req := CreateBuildRequest{ + Dockerfile: "FROM alpine", + } + build, err := mgr.CreateBuild(ctx, req, []byte("source")) + require.NoError(t, err) + + // Get logs (should be empty initially) + logs, err := mgr.GetBuildLogs(ctx, build.ID) + + require.NoError(t, err) + assert.Empty(t, logs) +} + +func TestGetBuildLogs_WithLogs(t *testing.T) { + mgr, _, _, tempDir := setupTestManager(t) + defer os.RemoveAll(tempDir) + + ctx := context.Background() + + // Create a build + req := CreateBuildRequest{ + Dockerfile: "FROM alpine", + } + build, err := mgr.CreateBuild(ctx, req, []byte("source")) + require.NoError(t, err) + + // Append some logs + logData := []byte("Step 1: FROM alpine\nStep 2: RUN echo hello\n") + err = appendLog(mgr.paths, build.ID, logData) + require.NoError(t, err) + + // Get logs + logs, err := mgr.GetBuildLogs(ctx, build.ID) + + require.NoError(t, err) + assert.Equal(t, logData, logs) +} + +func TestGetBuildLogs_NotFound(t *testing.T) { + mgr, _, _, tempDir := setupTestManager(t) + defer os.RemoveAll(tempDir) + + ctx := context.Background() + + _, err := mgr.GetBuildLogs(ctx, "nonexistent-id") + + assert.Error(t, err) +} + +func TestBuildQueue_ConcurrencyLimit(t *testing.T) { + // Test the queue directly rather than through the manager + // because the manager's runBuild goroutine completes quickly with mocks + queue := NewBuildQueue(2) // Max 2 concurrent + + started := make(chan string, 5) + + // Enqueue 5 builds with blocking start functions + for i := 0; i < 5; i++ { + id := string(rune('A' + i)) + queue.Enqueue(id, CreateBuildRequest{}, func() { + started <- id + // Block until test completes - simulates long-running build + select {} + }) + } + + // Give goroutines time to start + for i := 0; i < 2; i++ { + <-started + } + + // First 2 should be active, rest should be pending + active := queue.ActiveCount() + pending := queue.PendingCount() + assert.Equal(t, 2, active, "expected 2 active builds") + assert.Equal(t, 3, pending, "expected 3 pending builds") +} + +func TestUpdateStatus(t *testing.T) { + // Test the updateStatus function directly using storage functions + // This avoids race conditions with the build queue goroutines + tempDir, err := os.MkdirTemp("", "builds-test-*") + require.NoError(t, err) + defer os.RemoveAll(tempDir) + + p := paths.New(tempDir) + require.NoError(t, os.MkdirAll(filepath.Join(tempDir, "builds", "test-build-1"), 0755)) + + // Create initial metadata + meta := &buildMetadata{ + ID: "test-build-1", + Status: StatusQueued, + CreatedAt: time.Now(), + } + require.NoError(t, writeMetadata(p, meta)) + + // Update status + meta.Status = StatusBuilding + now := time.Now() + meta.StartedAt = &now + require.NoError(t, writeMetadata(p, meta)) + + // Read back and verify + readMeta, err := readMetadata(p, "test-build-1") + require.NoError(t, err) + assert.Equal(t, StatusBuilding, readMeta.Status) + assert.NotNil(t, readMeta.StartedAt) +} + +func TestUpdateStatus_WithError(t *testing.T) { + // Test status updates with error message directly + tempDir, err := os.MkdirTemp("", "builds-test-*") + require.NoError(t, err) + defer os.RemoveAll(tempDir) + + p := paths.New(tempDir) + require.NoError(t, os.MkdirAll(filepath.Join(tempDir, "builds", "test-build-1"), 0755)) + + // Create initial metadata + meta := &buildMetadata{ + ID: "test-build-1", + Status: StatusQueued, + CreatedAt: time.Now(), + } + require.NoError(t, writeMetadata(p, meta)) + + // Update status with error + errMsg := "build failed: out of memory" + meta.Status = StatusFailed + meta.Error = &errMsg + require.NoError(t, writeMetadata(p, meta)) + + // Read back and verify + readMeta, err := readMetadata(p, "test-build-1") + require.NoError(t, err) + assert.Equal(t, StatusFailed, readMeta.Status) + require.NotNil(t, readMeta.Error) + assert.Contains(t, *readMeta.Error, "out of memory") +} + +func TestRegistryTokenGeneration(t *testing.T) { + mgr, _, _, tempDir := setupTestManager(t) + defer os.RemoveAll(tempDir) + + ctx := context.Background() + + // Create a build with cache scope + req := CreateBuildRequest{ + CacheScope: "my-cache", + Dockerfile: "FROM alpine", + } + build, err := mgr.CreateBuild(ctx, req, []byte("source")) + require.NoError(t, err) + + // Read the build config and verify token was generated + configPath := filepath.Join(tempDir, "builds", build.ID, "config.json") + data, err := os.ReadFile(configPath) + require.NoError(t, err) + + var config BuildConfig + err = json.Unmarshal(data, &config) + require.NoError(t, err) + + assert.NotEmpty(t, config.RegistryToken) + assert.Equal(t, "localhost:5000", config.RegistryURL) +} + +func TestStart(t *testing.T) { + mgr, _, _, tempDir := setupTestManager(t) + defer os.RemoveAll(tempDir) + + ctx := context.Background() + + // Start should succeed without error + err := mgr.Start(ctx) + + assert.NoError(t, err) +} + +func TestCreateBuild_MultipleConcurrent(t *testing.T) { + mgr, _, _, tempDir := setupTestManager(t) + defer os.RemoveAll(tempDir) + + ctx := context.Background() + + // Create builds in parallel + done := make(chan *Build, 5) + errs := make(chan error, 5) + + for i := 0; i < 5; i++ { + go func() { + req := CreateBuildRequest{ + Dockerfile: "FROM alpine", + } + build, err := mgr.CreateBuild(ctx, req, []byte("source")) + if err != nil { + errs <- err + } else { + done <- build + } + }() + } + + // Collect results + var builds []*Build + for i := 0; i < 5; i++ { + select { + case b := <-done: + builds = append(builds, b) + case err := <-errs: + t.Fatalf("unexpected error: %v", err) + } + } + + assert.Len(t, builds, 5) + + // Verify all IDs are unique + ids := make(map[string]bool) + for _, b := range builds { + assert.False(t, ids[b.ID], "duplicate build ID: %s", b.ID) + ids[b.ID] = true + } +} + +func TestStreamBuildEvents_NotFound(t *testing.T) { + mgr, _, _, tempDir := setupTestManager(t) + defer os.RemoveAll(tempDir) + + ctx := context.Background() + + _, err := mgr.StreamBuildEvents(ctx, "nonexistent-id", false) + assert.Error(t, err) +} + +func TestStreamBuildEvents_ExistingLogs(t *testing.T) { + mgr, _, _, tempDir := setupTestManager(t) + defer os.RemoveAll(tempDir) + + ctx := context.Background() + + // Create a build + req := CreateBuildRequest{Dockerfile: "FROM alpine"} + sourceData := []byte("fake-tarball-data") + build, err := mgr.CreateBuild(ctx, req, sourceData) + require.NoError(t, err) + + // Write some logs directly + logDir := filepath.Join(tempDir, "builds", build.ID, "logs") + require.NoError(t, os.MkdirAll(logDir, 0755)) + logPath := filepath.Join(logDir, "build.log") + require.NoError(t, os.WriteFile(logPath, []byte("line1\nline2\nline3\n"), 0644)) + + // Stream events without follow + eventChan, err := mgr.StreamBuildEvents(ctx, build.ID, false) + require.NoError(t, err) + + // Collect events + var events []BuildEvent + for event := range eventChan { + events = append(events, event) + } + + // Should have 3 log events + assert.Len(t, events, 3) + for _, event := range events { + assert.Equal(t, EventTypeLog, event.Type) + } + assert.Equal(t, "line1", events[0].Content) + assert.Equal(t, "line2", events[1].Content) + assert.Equal(t, "line3", events[2].Content) +} + +func TestStreamBuildEvents_NoLogs(t *testing.T) { + mgr, _, _, tempDir := setupTestManager(t) + defer os.RemoveAll(tempDir) + + ctx := context.Background() + + // Create a build + req := CreateBuildRequest{Dockerfile: "FROM alpine"} + sourceData := []byte("fake-tarball-data") + build, err := mgr.CreateBuild(ctx, req, sourceData) + require.NoError(t, err) + + // Stream events without follow (no logs exist) + eventChan, err := mgr.StreamBuildEvents(ctx, build.ID, false) + require.NoError(t, err) + + // Should close immediately with no events + var events []BuildEvent + for event := range eventChan { + events = append(events, event) + } + assert.Empty(t, events) +} + +func TestStreamBuildEvents_WithStatusUpdate(t *testing.T) { + mgr, _, _, tempDir := setupTestManager(t) + defer os.RemoveAll(tempDir) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Create a build + req := CreateBuildRequest{Dockerfile: "FROM alpine"} + sourceData := []byte("fake-tarball-data") + build, err := mgr.CreateBuild(ctx, req, sourceData) + require.NoError(t, err) + + // Write some initial logs + logDir := filepath.Join(tempDir, "builds", build.ID, "logs") + require.NoError(t, os.MkdirAll(logDir, 0755)) + logPath := filepath.Join(logDir, "build.log") + require.NoError(t, os.WriteFile(logPath, []byte("initial log\n"), 0644)) + + // Stream events with follow + eventChan, err := mgr.StreamBuildEvents(ctx, build.ID, true) + require.NoError(t, err) + + // Read events until we see the initial log + var foundInitialLog bool + timeout := time.After(2 * time.Second) +eventLoop: + for !foundInitialLog { + select { + case event := <-eventChan: + if event.Type == EventTypeLog && event.Content == "initial log" { + foundInitialLog = true + break eventLoop + } + // Skip status events from queue (e.g. "building") + case <-timeout: + t.Fatal("timeout waiting for initial log event") + } + } + + // Trigger a status update to "ready" (should cause stream to close) + mgr.updateStatus(build.ID, StatusReady, nil) + + // Should receive "ready" status event and channel should close + var readyReceived bool + timeout = time.After(2 * time.Second) + for !readyReceived { + select { + case event, ok := <-eventChan: + if !ok { + // Channel closed, this is fine after status update + return + } + if event.Type == EventTypeStatus && event.Status == StatusReady { + readyReceived = true + } + case <-timeout: + t.Fatal("timeout waiting for ready status event") + } + } +} + +func TestStreamBuildEvents_ContextCancellation(t *testing.T) { + mgr, _, _, tempDir := setupTestManager(t) + defer os.RemoveAll(tempDir) + + ctx, cancel := context.WithCancel(context.Background()) + + // Create a build + req := CreateBuildRequest{Dockerfile: "FROM alpine"} + sourceData := []byte("fake-tarball-data") + build, err := mgr.CreateBuild(ctx, req, sourceData) + require.NoError(t, err) + + // Write some logs + logDir := filepath.Join(tempDir, "builds", build.ID, "logs") + require.NoError(t, os.MkdirAll(logDir, 0755)) + logPath := filepath.Join(logDir, "build.log") + require.NoError(t, os.WriteFile(logPath, []byte("log line\n"), 0644)) + + // Stream events with follow + eventChan, err := mgr.StreamBuildEvents(ctx, build.ID, true) + require.NoError(t, err) + + // Read events until we see the log line + var foundLogLine bool + timeout := time.After(2 * time.Second) +eventLoop: + for !foundLogLine { + select { + case event := <-eventChan: + if event.Type == EventTypeLog && event.Content == "log line" { + foundLogLine = true + break eventLoop + } + // Skip status events from queue (e.g. "building") + case <-timeout: + t.Fatal("timeout waiting for log event") + } + } + + // Cancel the context + cancel() + + // Channel should close + timeout = time.After(2 * time.Second) + for { + select { + case _, ok := <-eventChan: + if !ok { + // Channel closed as expected + return + } + // May get more events before close, drain them + case <-timeout: + t.Fatal("timeout waiting for channel to close after cancel") + } + } +} diff --git a/lib/builds/metrics.go b/lib/builds/metrics.go new file mode 100644 index 00000000..92d3c029 --- /dev/null +++ b/lib/builds/metrics.go @@ -0,0 +1,85 @@ +package builds + +import ( + "context" + "time" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" +) + +// Metrics provides Prometheus metrics for the build system +type Metrics struct { + buildDuration metric.Float64Histogram + buildTotal metric.Int64Counter + queueLength metric.Int64ObservableGauge + activeBuilds metric.Int64ObservableGauge +} + +// NewMetrics creates a new Metrics instance +func NewMetrics(meter metric.Meter) (*Metrics, error) { + buildDuration, err := meter.Float64Histogram( + "hypeman_build_duration_seconds", + metric.WithDescription("Duration of builds in seconds"), + metric.WithUnit("s"), + ) + if err != nil { + return nil, err + } + + buildTotal, err := meter.Int64Counter( + "hypeman_builds_total", + metric.WithDescription("Total number of builds"), + ) + if err != nil { + return nil, err + } + + queueLength, err := meter.Int64ObservableGauge( + "hypeman_build_queue_length", + metric.WithDescription("Number of builds in queue"), + ) + if err != nil { + return nil, err + } + + activeBuilds, err := meter.Int64ObservableGauge( + "hypeman_builds_active", + metric.WithDescription("Number of currently running builds"), + ) + if err != nil { + return nil, err + } + + return &Metrics{ + buildDuration: buildDuration, + buildTotal: buildTotal, + queueLength: queueLength, + activeBuilds: activeBuilds, + }, nil +} + +// RecordBuild records metrics for a completed build +func (m *Metrics) RecordBuild(ctx context.Context, status string, duration time.Duration) { + attrs := []attribute.KeyValue{ + attribute.String("status", status), + } + + m.buildDuration.Record(ctx, duration.Seconds(), metric.WithAttributes(attrs...)) + m.buildTotal.Add(ctx, 1, metric.WithAttributes(attrs...)) +} + +// RegisterQueueCallbacks registers callbacks for queue metrics +func (m *Metrics) RegisterQueueCallbacks(queue *BuildQueue, meter metric.Meter) error { + _, err := meter.RegisterCallback( + func(ctx context.Context, observer metric.Observer) error { + observer.ObserveInt64(m.queueLength, int64(queue.PendingCount())) + observer.ObserveInt64(m.activeBuilds, int64(queue.ActiveCount())) + return nil + }, + m.queueLength, + m.activeBuilds, + ) + return err +} + diff --git a/lib/builds/queue.go b/lib/builds/queue.go new file mode 100644 index 00000000..2fee288b --- /dev/null +++ b/lib/builds/queue.go @@ -0,0 +1,171 @@ +package builds + +import "sync" + +// QueuedBuild represents a build waiting to be executed +type QueuedBuild struct { + BuildID string + Request CreateBuildRequest + StartFn func() +} + +// BuildQueue manages concurrent builds with a configurable limit. +// Following the pattern from lib/images/queue.go. +// +// Design notes (see plan for full context): +// - Queue state is in-memory (lost on restart) +// - Build metadata is persisted to disk +// - On startup, pending builds are recovered via listPendingBuilds() +// +// Future migration path if needed: +// - Add BuildQueue interface with Enqueue/Dequeue/Ack/Nack +// - Implement adapters: memoryQueue, redisQueue, natsQueue +// - Use BUILD_QUEUE_BACKEND env var to select implementation +type BuildQueue struct { + maxConcurrent int + active map[string]bool + pending []QueuedBuild + mu sync.Mutex +} + +// NewBuildQueue creates a new build queue with the given concurrency limit +func NewBuildQueue(maxConcurrent int) *BuildQueue { + if maxConcurrent < 1 { + maxConcurrent = 1 + } + return &BuildQueue{ + maxConcurrent: maxConcurrent, + active: make(map[string]bool), + pending: make([]QueuedBuild, 0), + } +} + +// Enqueue adds a build to the queue. Returns queue position (0 if started immediately, >0 if queued). +// If the build is already building or queued, returns its current position without re-enqueueing. +func (q *BuildQueue) Enqueue(buildID string, req CreateBuildRequest, startFn func()) int { + q.mu.Lock() + defer q.mu.Unlock() + + // Check if already building (position 0, actively running) + if q.active[buildID] { + return 0 + } + + // Check if already in pending queue + for i, build := range q.pending { + if build.BuildID == buildID { + return i + 1 // Return existing queue position + } + } + + // Wrap the function to auto-complete + wrappedFn := func() { + defer q.MarkComplete(buildID) + startFn() + } + + build := QueuedBuild{ + BuildID: buildID, + Request: req, + StartFn: wrappedFn, + } + + // Start immediately if under concurrency limit + if len(q.active) < q.maxConcurrent { + q.active[buildID] = true + go wrappedFn() + return 0 + } + + // Otherwise queue it + q.pending = append(q.pending, build) + return len(q.pending) +} + +// MarkComplete marks a build as complete and starts the next pending build if any +func (q *BuildQueue) MarkComplete(buildID string) { + q.mu.Lock() + defer q.mu.Unlock() + + delete(q.active, buildID) + + // Start next pending build if we have capacity + if len(q.pending) > 0 && len(q.active) < q.maxConcurrent { + next := q.pending[0] + q.pending = q.pending[1:] + q.active[next.BuildID] = true + go next.StartFn() + } +} + +// GetPosition returns the queue position for a build. +// Returns nil if the build is actively running or not in queue. +func (q *BuildQueue) GetPosition(buildID string) *int { + q.mu.Lock() + defer q.mu.Unlock() + + if q.active[buildID] { + return nil // Actively running, not queued + } + + for i, build := range q.pending { + if build.BuildID == buildID { + pos := i + 1 + return &pos + } + } + + return nil // Not in queue +} + +// Cancel removes a build from the pending queue. +// Returns true if the build was cancelled, false if it was not in the queue +// (already running or not found). +func (q *BuildQueue) Cancel(buildID string) bool { + q.mu.Lock() + defer q.mu.Unlock() + + // Can't cancel if actively running + if q.active[buildID] { + return false + } + + // Find and remove from pending + for i, build := range q.pending { + if build.BuildID == buildID { + q.pending = append(q.pending[:i], q.pending[i+1:]...) + return true + } + } + + return false +} + +// IsActive returns true if the build is actively running +func (q *BuildQueue) IsActive(buildID string) bool { + q.mu.Lock() + defer q.mu.Unlock() + return q.active[buildID] +} + +// ActiveCount returns the number of actively building builds +func (q *BuildQueue) ActiveCount() int { + q.mu.Lock() + defer q.mu.Unlock() + return len(q.active) +} + +// PendingCount returns the number of queued builds +func (q *BuildQueue) PendingCount() int { + q.mu.Lock() + defer q.mu.Unlock() + return len(q.pending) +} + +// QueueLength returns the total number of builds (active + pending) +func (q *BuildQueue) QueueLength() int { + q.mu.Lock() + defer q.mu.Unlock() + return len(q.active) + len(q.pending) +} + diff --git a/lib/builds/queue_test.go b/lib/builds/queue_test.go new file mode 100644 index 00000000..5f5dd9af --- /dev/null +++ b/lib/builds/queue_test.go @@ -0,0 +1,230 @@ +package builds + +import ( + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestBuildQueue_EnqueueStartsImmediately(t *testing.T) { + queue := NewBuildQueue(2) + + started := make(chan string, 2) + done := make(chan struct{}) + + // Enqueue first build - should start immediately + pos := queue.Enqueue("build-1", CreateBuildRequest{}, func() { + started <- "build-1" + <-done // Wait for signal + }) + + assert.Equal(t, 0, pos, "first build should start immediately (position 0)") + + // Wait for it to start + select { + case id := <-started: + assert.Equal(t, "build-1", id) + case <-time.After(time.Second): + t.Fatal("build-1 did not start") + } + + close(done) +} + +func TestBuildQueue_QueueWhenAtCapacity(t *testing.T) { + queue := NewBuildQueue(1) // Max 1 concurrent + + var wg sync.WaitGroup + done := make(chan struct{}) + + // Start first build + wg.Add(1) + pos1 := queue.Enqueue("build-1", CreateBuildRequest{}, func() { + wg.Done() + <-done // Block + }) + assert.Equal(t, 0, pos1) + + // Wait for first to actually start + wg.Wait() + + // Second build should be queued + pos2 := queue.Enqueue("build-2", CreateBuildRequest{}, func() {}) + assert.Equal(t, 1, pos2, "second build should be queued at position 1") + + // Third build should be queued at position 2 + pos3 := queue.Enqueue("build-3", CreateBuildRequest{}, func() {}) + assert.Equal(t, 2, pos3, "third build should be queued at position 2") + + close(done) +} + +func TestBuildQueue_DeduplicationActive(t *testing.T) { + queue := NewBuildQueue(2) + done := make(chan struct{}) + + // Start a build + queue.Enqueue("build-1", CreateBuildRequest{}, func() { + <-done + }) + + // Wait for it to become active + time.Sleep(10 * time.Millisecond) + + // Try to enqueue the same build again - should return position 0 (active) + pos := queue.Enqueue("build-1", CreateBuildRequest{}, func() {}) + assert.Equal(t, 0, pos, "re-enqueueing active build should return position 0") + + close(done) +} + +func TestBuildQueue_DeduplicationPending(t *testing.T) { + queue := NewBuildQueue(1) + done := make(chan struct{}) + + // Fill the queue + queue.Enqueue("build-1", CreateBuildRequest{}, func() { + <-done + }) + + // Add a second build to pending + pos1 := queue.Enqueue("build-2", CreateBuildRequest{}, func() {}) + assert.Equal(t, 1, pos1) + + // Try to enqueue build-2 again - should return same position + pos2 := queue.Enqueue("build-2", CreateBuildRequest{}, func() {}) + assert.Equal(t, 1, pos2, "re-enqueueing pending build should return same position") + + close(done) +} + +func TestBuildQueue_Cancel(t *testing.T) { + queue := NewBuildQueue(1) + done := make(chan struct{}) + + // Fill the queue + queue.Enqueue("build-1", CreateBuildRequest{}, func() { + <-done + }) + + // Add to pending + queue.Enqueue("build-2", CreateBuildRequest{}, func() {}) + queue.Enqueue("build-3", CreateBuildRequest{}, func() {}) + + // Cancel build-2 + cancelled := queue.Cancel("build-2") + require.True(t, cancelled, "should be able to cancel pending build") + + // Verify build-3 moved up + pos := queue.GetPosition("build-3") + require.NotNil(t, pos) + assert.Equal(t, 1, *pos, "build-3 should move to position 1") + + // Can't cancel active build + cancelled = queue.Cancel("build-1") + assert.False(t, cancelled, "should not be able to cancel active build") + + close(done) +} + +func TestBuildQueue_GetPosition(t *testing.T) { + queue := NewBuildQueue(1) + done := make(chan struct{}) + + queue.Enqueue("build-1", CreateBuildRequest{}, func() { + <-done + }) + queue.Enqueue("build-2", CreateBuildRequest{}, func() {}) + queue.Enqueue("build-3", CreateBuildRequest{}, func() {}) + + // Active build has no position (returns nil) + pos1 := queue.GetPosition("build-1") + assert.Nil(t, pos1, "active build should have no position") + + // Pending builds have positions + pos2 := queue.GetPosition("build-2") + require.NotNil(t, pos2) + assert.Equal(t, 1, *pos2) + + pos3 := queue.GetPosition("build-3") + require.NotNil(t, pos3) + assert.Equal(t, 2, *pos3) + + // Non-existent build has no position + pos4 := queue.GetPosition("build-4") + assert.Nil(t, pos4) + + close(done) +} + +func TestBuildQueue_AutoStartNextOnComplete(t *testing.T) { + queue := NewBuildQueue(1) + + started := make(chan string, 3) + var mu sync.Mutex + completionOrder := []string{} + + // Add builds + queue.Enqueue("build-1", CreateBuildRequest{}, func() { + started <- "build-1" + time.Sleep(10 * time.Millisecond) + mu.Lock() + completionOrder = append(completionOrder, "build-1") + mu.Unlock() + }) + queue.Enqueue("build-2", CreateBuildRequest{}, func() { + started <- "build-2" + time.Sleep(10 * time.Millisecond) + mu.Lock() + completionOrder = append(completionOrder, "build-2") + mu.Unlock() + }) + + // Wait for both to complete + for i := 0; i < 2; i++ { + select { + case <-started: + case <-time.After(2 * time.Second): + t.Fatal("builds did not complete in time") + } + } + + // Give time for completion + time.Sleep(50 * time.Millisecond) + + mu.Lock() + defer mu.Unlock() + assert.Equal(t, []string{"build-1", "build-2"}, completionOrder) +} + +func TestBuildQueue_Counts(t *testing.T) { + queue := NewBuildQueue(2) + + assert.Equal(t, 0, queue.ActiveCount()) + assert.Equal(t, 0, queue.PendingCount()) + assert.Equal(t, 0, queue.QueueLength()) + + done := make(chan struct{}) + queue.Enqueue("build-1", CreateBuildRequest{}, func() { <-done }) + queue.Enqueue("build-2", CreateBuildRequest{}, func() { <-done }) + + // Wait for them to start + time.Sleep(10 * time.Millisecond) + + assert.Equal(t, 2, queue.ActiveCount()) + assert.Equal(t, 0, queue.PendingCount()) + assert.Equal(t, 2, queue.QueueLength()) + + // Add a pending one + queue.Enqueue("build-3", CreateBuildRequest{}, func() {}) + + assert.Equal(t, 2, queue.ActiveCount()) + assert.Equal(t, 1, queue.PendingCount()) + assert.Equal(t, 3, queue.QueueLength()) + + close(done) +} + diff --git a/lib/builds/registry_token.go b/lib/builds/registry_token.go new file mode 100644 index 00000000..6c8cf44b --- /dev/null +++ b/lib/builds/registry_token.go @@ -0,0 +1,106 @@ +// Package builds implements registry token generation for secure builder VM authentication. +package builds + +import ( + "fmt" + "time" + + "github.com/golang-jwt/jwt/v5" +) + +// RegistryTokenClaims contains the claims for a scoped registry access token. +// These tokens are issued to builder VMs to grant limited push access to specific repositories. +type RegistryTokenClaims struct { + jwt.RegisteredClaims + + // BuildID is the build job identifier for audit purposes + BuildID string `json:"build_id"` + + // Repositories is the list of allowed repository paths (e.g., ["builds/abc123", "cache/tenant-x"]) + Repositories []string `json:"repos"` + + // Scope is the access scope: "push" for write access, "pull" for read-only + Scope string `json:"scope"` +} + +// RegistryTokenGenerator creates scoped registry access tokens +type RegistryTokenGenerator struct { + secret []byte +} + +// NewRegistryTokenGenerator creates a new token generator with the given secret +func NewRegistryTokenGenerator(secret string) *RegistryTokenGenerator { + return &RegistryTokenGenerator{ + secret: []byte(secret), + } +} + +// GeneratePushToken creates a short-lived token granting push access to specific repositories. +// The token expires after the specified duration (typically matching the build timeout). +func (g *RegistryTokenGenerator) GeneratePushToken(buildID string, repos []string, ttl time.Duration) (string, error) { + if buildID == "" { + return "", fmt.Errorf("build ID is required") + } + if len(repos) == 0 { + return "", fmt.Errorf("at least one repository is required") + } + + now := time.Now() + claims := RegistryTokenClaims{ + RegisteredClaims: jwt.RegisteredClaims{ + Subject: "builder-" + buildID, + IssuedAt: jwt.NewNumericDate(now), + ExpiresAt: jwt.NewNumericDate(now.Add(ttl)), + Issuer: "hypeman", + }, + BuildID: buildID, + Repositories: repos, + Scope: "push", + } + + token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims) + return token.SignedString(g.secret) +} + +// ValidateToken parses and validates a registry token, returning the claims if valid. +func (g *RegistryTokenGenerator) ValidateToken(tokenString string) (*RegistryTokenClaims, error) { + token, err := jwt.ParseWithClaims(tokenString, &RegistryTokenClaims{}, func(token *jwt.Token) (interface{}, error) { + // Validate signing method + if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok { + return nil, fmt.Errorf("unexpected signing method: %v", token.Header["alg"]) + } + return g.secret, nil + }) + + if err != nil { + return nil, fmt.Errorf("parse token: %w", err) + } + + claims, ok := token.Claims.(*RegistryTokenClaims) + if !ok || !token.Valid { + return nil, fmt.Errorf("invalid token") + } + + return claims, nil +} + +// IsRepositoryAllowed checks if the given repository path is allowed by the token claims. +func (c *RegistryTokenClaims) IsRepositoryAllowed(repo string) bool { + for _, allowed := range c.Repositories { + if allowed == repo { + return true + } + } + return false +} + +// IsPushAllowed returns true if the token grants push (write) access. +func (c *RegistryTokenClaims) IsPushAllowed() bool { + return c.Scope == "push" +} + +// IsPullAllowed returns true if the token grants pull (read) access. +// Push tokens also implicitly grant pull access. +func (c *RegistryTokenClaims) IsPullAllowed() bool { + return c.Scope == "push" || c.Scope == "pull" +} diff --git a/lib/builds/registry_token_test.go b/lib/builds/registry_token_test.go new file mode 100644 index 00000000..1231e07e --- /dev/null +++ b/lib/builds/registry_token_test.go @@ -0,0 +1,111 @@ +package builds + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestRegistryTokenGenerator_GeneratePushToken(t *testing.T) { + generator := NewRegistryTokenGenerator("test-secret-key") + + t.Run("valid token generation", func(t *testing.T) { + token, err := generator.GeneratePushToken("build-123", []string{"builds/build-123", "cache/tenant-x"}, 30*time.Minute) + require.NoError(t, err) + assert.NotEmpty(t, token) + + // Validate the token + claims, err := generator.ValidateToken(token) + require.NoError(t, err) + assert.Equal(t, "build-123", claims.BuildID) + assert.Equal(t, []string{"builds/build-123", "cache/tenant-x"}, claims.Repositories) + assert.Equal(t, "push", claims.Scope) + assert.Equal(t, "builder-build-123", claims.Subject) + assert.Equal(t, "hypeman", claims.Issuer) + }) + + t.Run("empty build ID", func(t *testing.T) { + _, err := generator.GeneratePushToken("", []string{"builds/build-123"}, 30*time.Minute) + require.Error(t, err) + assert.Contains(t, err.Error(), "build ID is required") + }) + + t.Run("empty repositories", func(t *testing.T) { + _, err := generator.GeneratePushToken("build-123", []string{}, 30*time.Minute) + require.Error(t, err) + assert.Contains(t, err.Error(), "at least one repository is required") + }) +} + +func TestRegistryTokenGenerator_ValidateToken(t *testing.T) { + generator := NewRegistryTokenGenerator("test-secret-key") + + t.Run("valid token", func(t *testing.T) { + token, err := generator.GeneratePushToken("build-abc", []string{"builds/build-abc"}, time.Hour) + require.NoError(t, err) + + claims, err := generator.ValidateToken(token) + require.NoError(t, err) + assert.Equal(t, "build-abc", claims.BuildID) + }) + + t.Run("expired token", func(t *testing.T) { + // Generate a token that expires immediately + token, err := generator.GeneratePushToken("build-expired", []string{"builds/build-expired"}, -time.Hour) + require.NoError(t, err) + + _, err = generator.ValidateToken(token) + require.Error(t, err) + assert.Contains(t, err.Error(), "token is expired") + }) + + t.Run("invalid signature", func(t *testing.T) { + // Generate with one secret + gen1 := NewRegistryTokenGenerator("secret-1") + token, err := gen1.GeneratePushToken("build-123", []string{"builds/build-123"}, time.Hour) + require.NoError(t, err) + + // Validate with different secret + gen2 := NewRegistryTokenGenerator("secret-2") + _, err = gen2.ValidateToken(token) + require.Error(t, err) + assert.Contains(t, err.Error(), "signature is invalid") + }) + + t.Run("malformed token", func(t *testing.T) { + _, err := generator.ValidateToken("not.a.valid.jwt.token") + require.Error(t, err) + }) +} + +func TestRegistryTokenClaims_IsRepositoryAllowed(t *testing.T) { + claims := &RegistryTokenClaims{ + Repositories: []string{"builds/abc123", "cache/tenant-x"}, + } + + t.Run("allowed repo", func(t *testing.T) { + assert.True(t, claims.IsRepositoryAllowed("builds/abc123")) + assert.True(t, claims.IsRepositoryAllowed("cache/tenant-x")) + }) + + t.Run("not allowed repo", func(t *testing.T) { + assert.False(t, claims.IsRepositoryAllowed("builds/other")) + assert.False(t, claims.IsRepositoryAllowed("cache/other-tenant")) + }) +} + +func TestRegistryTokenClaims_IsPushAllowed(t *testing.T) { + t.Run("push scope", func(t *testing.T) { + claims := &RegistryTokenClaims{Scope: "push"} + assert.True(t, claims.IsPushAllowed()) + assert.True(t, claims.IsPullAllowed()) // push implies pull + }) + + t.Run("pull scope", func(t *testing.T) { + claims := &RegistryTokenClaims{Scope: "pull"} + assert.False(t, claims.IsPushAllowed()) + assert.True(t, claims.IsPullAllowed()) + }) +} diff --git a/lib/builds/storage.go b/lib/builds/storage.go new file mode 100644 index 00000000..777382bb --- /dev/null +++ b/lib/builds/storage.go @@ -0,0 +1,243 @@ +package builds + +import ( + "encoding/json" + "fmt" + "os" + "sort" + "time" + + "github.com/onkernel/hypeman/lib/paths" +) + +// buildMetadata is the internal representation stored on disk +type buildMetadata struct { + ID string `json:"id"` + Status string `json:"status"` + Request *CreateBuildRequest `json:"request,omitempty"` + ImageDigest *string `json:"image_digest,omitempty"` + ImageRef *string `json:"image_ref,omitempty"` + Error *string `json:"error,omitempty"` + Provenance *BuildProvenance `json:"provenance,omitempty"` + CreatedAt time.Time `json:"created_at"` + StartedAt *time.Time `json:"started_at,omitempty"` + CompletedAt *time.Time `json:"completed_at,omitempty"` + DurationMS *int64 `json:"duration_ms,omitempty"` + BuilderInstance *string `json:"builder_instance,omitempty"` // Instance ID of builder VM +} + +// toBuild converts internal metadata to the public Build type +func (m *buildMetadata) toBuild() *Build { + return &Build{ + ID: m.ID, + Status: m.Status, + ImageDigest: m.ImageDigest, + ImageRef: m.ImageRef, + Error: m.Error, + Provenance: m.Provenance, + CreatedAt: m.CreatedAt, + StartedAt: m.StartedAt, + CompletedAt: m.CompletedAt, + DurationMS: m.DurationMS, + } +} + +// writeMetadata writes build metadata to disk atomically +func writeMetadata(p *paths.Paths, meta *buildMetadata) error { + dir := p.BuildDir(meta.ID) + if err := os.MkdirAll(dir, 0755); err != nil { + return fmt.Errorf("create build directory: %w", err) + } + + data, err := json.MarshalIndent(meta, "", " ") + if err != nil { + return fmt.Errorf("marshal metadata: %w", err) + } + + // Write atomically via temp file + tempPath := p.BuildMetadata(meta.ID) + ".tmp" + if err := os.WriteFile(tempPath, data, 0644); err != nil { + return fmt.Errorf("write temp metadata: %w", err) + } + + finalPath := p.BuildMetadata(meta.ID) + if err := os.Rename(tempPath, finalPath); err != nil { + os.Remove(tempPath) + return fmt.Errorf("rename metadata: %w", err) + } + + return nil +} + +// readMetadata reads build metadata from disk +func readMetadata(p *paths.Paths, id string) (*buildMetadata, error) { + path := p.BuildMetadata(id) + data, err := os.ReadFile(path) + if err != nil { + if os.IsNotExist(err) { + return nil, ErrNotFound + } + return nil, fmt.Errorf("read metadata: %w", err) + } + + var meta buildMetadata + if err := json.Unmarshal(data, &meta); err != nil { + return nil, fmt.Errorf("unmarshal metadata: %w", err) + } + + return &meta, nil +} + +// listAllBuilds returns all builds sorted by creation time (newest first) +func listAllBuilds(p *paths.Paths) ([]*buildMetadata, error) { + buildsDir := p.BuildsDir() + + entries, err := os.ReadDir(buildsDir) + if err != nil { + if os.IsNotExist(err) { + return nil, nil + } + return nil, fmt.Errorf("read builds directory: %w", err) + } + + var metas []*buildMetadata + for _, entry := range entries { + if !entry.IsDir() { + continue + } + + meta, err := readMetadata(p, entry.Name()) + if err != nil { + continue // Skip invalid entries + } + metas = append(metas, meta) + } + + // Sort by created_at descending (newest first) + sort.Slice(metas, func(i, j int) bool { + return metas[i].CreatedAt.After(metas[j].CreatedAt) + }) + + return metas, nil +} + +// listPendingBuilds returns builds that need to be recovered on startup +// Returns builds with status queued/building, sorted by created_at (oldest first for FIFO) +func listPendingBuilds(p *paths.Paths) ([]*buildMetadata, error) { + all, err := listAllBuilds(p) + if err != nil { + return nil, err + } + + var pending []*buildMetadata + for _, meta := range all { + switch meta.Status { + case StatusQueued, StatusBuilding, StatusPushing: + pending = append(pending, meta) + } + } + + // Sort by created_at ascending (oldest first for FIFO recovery) + sort.Slice(pending, func(i, j int) bool { + return pending[i].CreatedAt.Before(pending[j].CreatedAt) + }) + + return pending, nil +} + +// deleteBuild removes a build's data from disk +func deleteBuild(p *paths.Paths, id string) error { + dir := p.BuildDir(id) + + // Check if exists + if _, err := os.Stat(dir); err != nil { + if os.IsNotExist(err) { + return ErrNotFound + } + return fmt.Errorf("stat build directory: %w", err) + } + + if err := os.RemoveAll(dir); err != nil { + return fmt.Errorf("remove build directory: %w", err) + } + + return nil +} + +// ensureLogsDir ensures the logs directory exists for a build +func ensureLogsDir(p *paths.Paths, id string) error { + logsDir := p.BuildLogs(id) + return os.MkdirAll(logsDir, 0755) +} + +// appendLog appends log data to the build log file +func appendLog(p *paths.Paths, id string, data []byte) error { + if err := ensureLogsDir(p, id); err != nil { + return err + } + + logPath := p.BuildLog(id) + f, err := os.OpenFile(logPath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return fmt.Errorf("open log file: %w", err) + } + defer f.Close() + + if _, err := f.Write(data); err != nil { + return fmt.Errorf("write log: %w", err) + } + + return nil +} + +// readLog reads the build log file +func readLog(p *paths.Paths, id string) ([]byte, error) { + logPath := p.BuildLog(id) + data, err := os.ReadFile(logPath) + if err != nil { + if os.IsNotExist(err) { + return nil, nil // No logs yet + } + return nil, fmt.Errorf("read log: %w", err) + } + return data, nil +} + +// writeBuildConfig writes the build config for the builder VM +func writeBuildConfig(p *paths.Paths, id string, config *BuildConfig) error { + dir := p.BuildDir(id) + if err := os.MkdirAll(dir, 0755); err != nil { + return fmt.Errorf("create build directory: %w", err) + } + + data, err := json.MarshalIndent(config, "", " ") + if err != nil { + return fmt.Errorf("marshal build config: %w", err) + } + + configPath := p.BuildConfig(id) + if err := os.WriteFile(configPath, data, 0644); err != nil { + return fmt.Errorf("write build config: %w", err) + } + + return nil +} + +// readBuildConfig reads the build config for a build +func readBuildConfig(p *paths.Paths, id string) (*BuildConfig, error) { + configPath := p.BuildConfig(id) + data, err := os.ReadFile(configPath) + if err != nil { + if os.IsNotExist(err) { + return nil, ErrNotFound + } + return nil, fmt.Errorf("read build config: %w", err) + } + + var config BuildConfig + if err := json.Unmarshal(data, &config); err != nil { + return nil, fmt.Errorf("unmarshal build config: %w", err) + } + + return &config, nil +} diff --git a/lib/builds/types.go b/lib/builds/types.go new file mode 100644 index 00000000..08b05881 --- /dev/null +++ b/lib/builds/types.go @@ -0,0 +1,210 @@ +// Package builds implements a secure build system that runs rootless BuildKit +// inside ephemeral Cloud Hypervisor microVMs for multi-tenant isolation. +package builds + +import "time" + +// Build status constants +const ( + StatusQueued = "queued" + StatusBuilding = "building" + StatusPushing = "pushing" + StatusReady = "ready" + StatusFailed = "failed" + StatusCancelled = "cancelled" +) + +// Build represents a source-to-image build job +type Build struct { + ID string `json:"id"` + Status string `json:"status"` + QueuePosition *int `json:"queue_position,omitempty"` + ImageDigest *string `json:"image_digest,omitempty"` + ImageRef *string `json:"image_ref,omitempty"` + Error *string `json:"error,omitempty"` + Provenance *BuildProvenance `json:"provenance,omitempty"` + CreatedAt time.Time `json:"created_at"` + StartedAt *time.Time `json:"started_at,omitempty"` + CompletedAt *time.Time `json:"completed_at,omitempty"` + DurationMS *int64 `json:"duration_ms,omitempty"` +} + +// CreateBuildRequest represents a request to create a new build +type CreateBuildRequest struct { + // Dockerfile content. Required if not included in the source tarball. + // The Dockerfile specifies the runtime (e.g., FROM node:20-alpine). + Dockerfile string `json:"dockerfile,omitempty"` + + // BaseImageDigest optionally pins the base image by digest for reproducibility + BaseImageDigest string `json:"base_image_digest,omitempty"` + + // SourceHash is the SHA256 hash of the source tarball for verification + SourceHash string `json:"source_hash,omitempty"` + + // BuildPolicy contains resource limits and network policy for the build + BuildPolicy *BuildPolicy `json:"build_policy,omitempty"` + + // CacheScope is the tenant-specific cache key prefix for isolation + CacheScope string `json:"cache_scope,omitempty"` + + // BuildArgs are ARG values to pass to the Dockerfile + BuildArgs map[string]string `json:"build_args,omitempty"` + + // Secrets are secret references to inject during build + Secrets []SecretRef `json:"secrets,omitempty"` +} + +// BuildPolicy defines resource limits and network policy for a build +type BuildPolicy struct { + // TimeoutSeconds is the maximum build duration (default: 600) + TimeoutSeconds int `json:"timeout_seconds,omitempty"` + + // MemoryMB is the memory limit for the builder VM (default: 2048) + MemoryMB int `json:"memory_mb,omitempty"` + + // CPUs is the number of vCPUs for the builder VM (default: 2) + CPUs int `json:"cpus,omitempty"` + + // NetworkMode controls network access during build + // "isolated" = no network, "egress" = outbound allowed + NetworkMode string `json:"network_mode,omitempty"` + + // AllowedDomains restricts egress to specific domains (only when NetworkMode="egress") + AllowedDomains []string `json:"allowed_domains,omitempty"` +} + +// SecretRef references a secret to inject during build +type SecretRef struct { + // ID is the secret identifier (used in --mount=type=secret,id=...) + ID string `json:"id"` + + // EnvVar is the environment variable name to expose the secret as + EnvVar string `json:"env_var,omitempty"` +} + +// BuildProvenance records the inputs and toolchain used for a build +// This enables reproducibility verification and audit trails +type BuildProvenance struct { + // BaseImageDigest is the pinned base image used + BaseImageDigest string `json:"base_image_digest"` + + // SourceHash is the SHA256 of the source tarball + SourceHash string `json:"source_hash"` + + // LockfileHashes maps lockfile names to their SHA256 hashes + LockfileHashes map[string]string `json:"lockfile_hashes,omitempty"` + + // BuildkitVersion is the BuildKit version used + BuildkitVersion string `json:"buildkit_version,omitempty"` + + // Timestamp is when the build completed + Timestamp time.Time `json:"timestamp"` +} + +// BuildConfig is the configuration passed to the builder VM via config disk +// This is read by the builder agent inside the guest +type BuildConfig struct { + // JobID is the build job identifier + JobID string `json:"job_id"` + + // Dockerfile content (if not provided in source tarball) + Dockerfile string `json:"dockerfile,omitempty"` + + // BaseImageDigest optionally pins the base image + BaseImageDigest string `json:"base_image_digest,omitempty"` + + // RegistryURL is where to push the built image + RegistryURL string `json:"registry_url"` + + // RegistryToken is a short-lived JWT granting push access to specific repositories. + // The builder agent uses this token to authenticate with the registry. + RegistryToken string `json:"registry_token,omitempty"` + + // CacheScope is the tenant-specific cache key prefix + CacheScope string `json:"cache_scope,omitempty"` + + // SourcePath is the path to source in the guest (typically /src) + SourcePath string `json:"source_path"` + + // BuildArgs are ARG values for the Dockerfile + BuildArgs map[string]string `json:"build_args,omitempty"` + + // Secrets are secret references to fetch from host + Secrets []SecretRef `json:"secrets,omitempty"` + + // TimeoutSeconds is the build timeout + TimeoutSeconds int `json:"timeout_seconds"` + + // NetworkMode is "isolated" or "egress" + NetworkMode string `json:"network_mode"` +} + +// BuildEvent represents a typed SSE event for build streaming +type BuildEvent struct { + // Type is one of "log", "status", or "heartbeat" + Type string `json:"type"` + + // Timestamp is when the event occurred + Timestamp time.Time `json:"timestamp"` + + // Content is the log line content (only for type="log") + Content string `json:"content,omitempty"` + + // Status is the new build status (only for type="status") + Status string `json:"status,omitempty"` +} + +// BuildEvent type constants +const ( + EventTypeLog = "log" + EventTypeStatus = "status" + EventTypeHeartbeat = "heartbeat" +) + +// BuildResult is returned by the builder agent after a build completes +type BuildResult struct { + // Success indicates whether the build succeeded + Success bool `json:"success"` + + // ImageDigest is the digest of the pushed image (only on success) + ImageDigest string `json:"image_digest,omitempty"` + + // Error is the error message (only on failure) + Error string `json:"error,omitempty"` + + // Logs is the full build log output + Logs string `json:"logs,omitempty"` + + // Provenance records build inputs for reproducibility + Provenance BuildProvenance `json:"provenance"` + + // DurationMS is the build duration in milliseconds + DurationMS int64 `json:"duration_ms"` +} + +// DefaultBuildPolicy returns the default build policy +func DefaultBuildPolicy() BuildPolicy { + return BuildPolicy{ + TimeoutSeconds: 600, // 10 minutes + MemoryMB: 2048, // 2GB + CPUs: 2, + NetworkMode: "egress", // Allow outbound for dependency downloads + } +} + +// ApplyDefaults fills in default values for a build policy +func (p *BuildPolicy) ApplyDefaults() { + defaults := DefaultBuildPolicy() + if p.TimeoutSeconds == 0 { + p.TimeoutSeconds = defaults.TimeoutSeconds + } + if p.MemoryMB == 0 { + p.MemoryMB = defaults.MemoryMB + } + if p.CPUs == 0 { + p.CPUs = defaults.CPUs + } + if p.NetworkMode == "" { + p.NetworkMode = defaults.NetworkMode + } +} diff --git a/lib/builds/vsock_handler.go b/lib/builds/vsock_handler.go new file mode 100644 index 00000000..f5aebfcd --- /dev/null +++ b/lib/builds/vsock_handler.go @@ -0,0 +1,45 @@ +package builds + +import ( + "context" +) + +const ( + // BuildAgentVsockPort is the port the builder agent listens on inside the guest + BuildAgentVsockPort = 5001 + + // SecretsVsockPort is the port the host listens on for secret requests from builder agents + SecretsVsockPort = 5002 +) + +// VsockMessage is the envelope for vsock communication with builder agents +type VsockMessage struct { + Type string `json:"type"` + Result *BuildResult `json:"result,omitempty"` + Log string `json:"log,omitempty"` + SecretIDs []string `json:"secret_ids,omitempty"` // For secrets request + Secrets map[string]string `json:"secrets,omitempty"` // For secrets response +} + +// SecretsRequest is sent by the builder agent to fetch secrets +type SecretsRequest struct { + SecretIDs []string `json:"secret_ids"` +} + +// SecretsResponse contains the requested secrets +type SecretsResponse struct { + Secrets map[string]string `json:"secrets"` +} + +// SecretProvider provides secrets for builds +type SecretProvider interface { + // GetSecrets returns the values for the given secret IDs + GetSecrets(ctx context.Context, secretIDs []string) (map[string]string, error) +} + +// NoOpSecretProvider returns empty secrets (for builds without secrets) +type NoOpSecretProvider struct{} + +func (p *NoOpSecretProvider) GetSecrets(ctx context.Context, secretIDs []string) (map[string]string, error) { + return make(map[string]string), nil +} diff --git a/lib/images/oci.go b/lib/images/oci.go index 959e6c79..f98b89f7 100644 --- a/lib/images/oci.go +++ b/lib/images/oci.go @@ -13,6 +13,8 @@ import ( "github.com/google/go-containerregistry/pkg/v1/empty" "github.com/google/go-containerregistry/pkg/v1/layout" "github.com/google/go-containerregistry/pkg/v1/remote" + digest "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/specs-go" v1 "github.com/opencontainers/image-spec/specs-go/v1" rspec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/umoci/oci/cas/dir" @@ -205,61 +207,65 @@ func (c *ociClient) extractDigest(layoutTag string) (string, error) { return digest, nil } -// extractOCIMetadata reads metadata from OCI layout config.json -func (c *ociClient) extractOCIMetadata(layoutTag string) (*containerMetadata, error) { - // Open the shared OCI layout - casEngine, err := dir.Open(c.cacheDir) +// imageByAnnotation finds an image in the OCI layout by its annotation tag. +// This iterates through the index to find the image with matching +// "org.opencontainers.image.ref.name" annotation. +func imageByAnnotation(path layout.Path, layoutTag string) (gcr.Image, error) { + index, err := path.ImageIndex() if err != nil { - return nil, fmt.Errorf("open oci layout: %w", err) + return nil, fmt.Errorf("get image index: %w", err) } - defer casEngine.Close() - engine := casext.NewEngine(casEngine) - - // Resolve the layout tag in the shared layout - descriptorPaths, err := engine.ResolveReference(context.Background(), layoutTag) + indexManifest, err := index.IndexManifest() if err != nil { - return nil, fmt.Errorf("resolve reference: %w", err) + return nil, fmt.Errorf("get index manifest: %w", err) } - if len(descriptorPaths) == 0 { - return nil, fmt.Errorf("no image found in oci layout") + // Find the image with matching annotation + for _, desc := range indexManifest.Manifests { + if desc.Annotations != nil { + if refName, ok := desc.Annotations["org.opencontainers.image.ref.name"]; ok { + if refName == layoutTag { + return path.Image(desc.Digest) + } + } + } } - // Get the manifest - manifestBlob, err := engine.FromDescriptor(context.Background(), descriptorPaths[0].Descriptor()) - if err != nil { - return nil, fmt.Errorf("get manifest: %w", err) - } + return nil, fmt.Errorf("no image found with tag %s", layoutTag) +} - // casext automatically parses manifests, so Data is already a v1.Manifest - manifest, ok := manifestBlob.Data.(v1.Manifest) - if !ok { - return nil, fmt.Errorf("manifest data is not v1.Manifest (got %T)", manifestBlob.Data) +// extractOCIMetadata reads metadata from OCI layout config.json +// Uses go-containerregistry which handles both Docker v2 and OCI v1 manifests. +func (c *ociClient) extractOCIMetadata(layoutTag string) (*containerMetadata, error) { + // Open OCI layout using go-containerregistry (handles Docker v2 and OCI v1) + path, err := layout.FromPath(c.cacheDir) + if err != nil { + return nil, fmt.Errorf("open oci layout: %w", err) } - // Get the config blob - configBlob, err := engine.FromDescriptor(context.Background(), manifest.Config) + // Get the image by annotation tag from the layout + img, err := imageByAnnotation(path, layoutTag) if err != nil { - return nil, fmt.Errorf("get config: %w", err) + return nil, fmt.Errorf("find image by tag %s: %w", layoutTag, err) } - // casext automatically parses config, so Data is already a v1.Image - config, ok := configBlob.Data.(v1.Image) - if !ok { - return nil, fmt.Errorf("config data is not v1.Image (got %T)", configBlob.Data) + // Get config file (go-containerregistry handles manifest format automatically) + configFile, err := img.ConfigFile() + if err != nil { + return nil, fmt.Errorf("get config file: %w", err) } - // Extract metadata + // Extract metadata from config meta := &containerMetadata{ - Entrypoint: config.Config.Entrypoint, - Cmd: config.Config.Cmd, + Entrypoint: configFile.Config.Entrypoint, + Cmd: configFile.Config.Cmd, Env: make(map[string]string), - WorkingDir: config.Config.WorkingDir, + WorkingDir: configFile.Config.WorkingDir, } // Parse environment variables - for _, env := range config.Config.Env { + for _, env := range configFile.Config.Env { for i := 0; i < len(env); i++ { if env[i] == '=' { key := env[:i] @@ -274,37 +280,36 @@ func (c *ociClient) extractOCIMetadata(layoutTag string) (*containerMetadata, er } // unpackLayers unpacks all OCI layers to a target directory using umoci -func (c *ociClient) unpackLayers(ctx context.Context, imageRef, targetDir string) error { - // Open the shared OCI layout - casEngine, err := dir.Open(c.cacheDir) +// Uses go-containerregistry to get the manifest (handles both Docker v2 and OCI v1) +// then converts it to OCI v1 format for umoci's layer unpacker. +func (c *ociClient) unpackLayers(ctx context.Context, layoutTag, targetDir string) error { + // Open OCI layout using go-containerregistry (handles Docker v2 and OCI v1) + path, err := layout.FromPath(c.cacheDir) if err != nil { return fmt.Errorf("open oci layout: %w", err) } - defer casEngine.Close() - engine := casext.NewEngine(casEngine) - - // Resolve the image reference (tag) in the shared layout - descriptorPaths, err := engine.ResolveReference(context.Background(), imageRef) + // Get the image by annotation tag from the layout + img, err := imageByAnnotation(path, layoutTag) if err != nil { - return fmt.Errorf("resolve reference: %w", err) + return fmt.Errorf("find image by tag %s: %w", layoutTag, err) } - if len(descriptorPaths) == 0 { - return fmt.Errorf("no image found") - } - - // Get the manifest blob - manifestBlob, err := engine.FromDescriptor(context.Background(), descriptorPaths[0].Descriptor()) + // Get manifest from go-containerregistry + gcrManifest, err := img.Manifest() if err != nil { return fmt.Errorf("get manifest: %w", err) } - // casext automatically parses manifests - manifest, ok := manifestBlob.Data.(v1.Manifest) - if !ok { - return fmt.Errorf("manifest data is not v1.Manifest (got %T)", manifestBlob.Data) + // Convert go-containerregistry manifest to OCI v1.Manifest for umoci + ociManifest := convertToOCIManifest(gcrManifest) + + // Open the shared OCI layout with umoci for layer unpacking + casEngine, err := dir.Open(c.cacheDir) + if err != nil { + return fmt.Errorf("open oci layout for unpacking: %w", err) } + defer casEngine.Close() // Pre-create target directory (umoci needs it to exist) if err := os.MkdirAll(targetDir, 0755); err != nil { @@ -330,7 +335,7 @@ func (c *ociClient) unpackLayers(ctx context.Context, imageRef, targetDir string }, } - err = layer.UnpackRootfs(context.Background(), casEngine, targetDir, manifest, unpackOpts) + err = layer.UnpackRootfs(context.Background(), casEngine, targetDir, ociManifest, unpackOpts) if err != nil { return fmt.Errorf("unpack rootfs: %w", err) } @@ -338,6 +343,45 @@ func (c *ociClient) unpackLayers(ctx context.Context, imageRef, targetDir string return nil } +// convertToOCIManifest converts a go-containerregistry manifest to OCI v1.Manifest +// This allows us to use go-containerregistry (which handles both Docker v2 and OCI v1) +// for manifest parsing, while still using umoci for layer unpacking. +func convertToOCIManifest(gcrManifest *gcr.Manifest) v1.Manifest { + // Convert config descriptor + configDesc := v1.Descriptor{ + MediaType: string(gcrManifest.Config.MediaType), + Digest: gcrDigestToOCI(gcrManifest.Config.Digest), + Size: gcrManifest.Config.Size, + Annotations: gcrManifest.Config.Annotations, + } + + // Convert layer descriptors + layers := make([]v1.Descriptor, len(gcrManifest.Layers)) + for i, layer := range gcrManifest.Layers { + layers[i] = v1.Descriptor{ + MediaType: string(layer.MediaType), + Digest: gcrDigestToOCI(layer.Digest), + Size: layer.Size, + Annotations: layer.Annotations, + } + } + + return v1.Manifest{ + Versioned: specs.Versioned{ + SchemaVersion: int(gcrManifest.SchemaVersion), + }, + MediaType: string(gcrManifest.MediaType), + Config: configDesc, + Layers: layers, + Annotations: gcrManifest.Annotations, + } +} + +// gcrDigestToOCI converts a go-containerregistry digest to OCI digest +func gcrDigestToOCI(d gcr.Hash) digest.Digest { + return digest.NewDigestFromEncoded(digest.Algorithm(d.Algorithm), d.Hex) +} + type containerMetadata struct { Entrypoint []string Cmd []string diff --git a/lib/middleware/oapi_auth.go b/lib/middleware/oapi_auth.go index 8a39b2e9..430e822a 100644 --- a/lib/middleware/oapi_auth.go +++ b/lib/middleware/oapi_auth.go @@ -2,8 +2,10 @@ package middleware import ( "context" + "encoding/base64" "fmt" "net/http" + "regexp" "strings" "github.com/getkin/kin-openapi/openapi3filter" @@ -15,6 +17,18 @@ type contextKey string const userIDKey contextKey = "user_id" +// registryPathPattern matches /v2/{repository}/... paths +var registryPathPattern = regexp.MustCompile(`^/v2/([^/]+(?:/[^/]+)?)/`) + +// RegistryTokenClaims contains the claims for a scoped registry access token. +// This mirrors the type in lib/builds/registry_token.go to avoid circular imports. +type RegistryTokenClaims struct { + jwt.RegisteredClaims + BuildID string `json:"build_id"` + Repositories []string `json:"repos"` + Scope string `json:"scope"` +} + // OapiAuthenticationFunc creates an AuthenticationFunc compatible with nethttp-middleware // that validates JWT bearer tokens for endpoints with security requirements. func OapiAuthenticationFunc(jwtSecret string) openapi3filter.AuthenticationFunc { @@ -65,6 +79,21 @@ func OapiAuthenticationFunc(jwtSecret string) openapi3filter.AuthenticationFunc return fmt.Errorf("invalid token") } + // Reject registry tokens - they should not be used for API authentication. + // Registry tokens have specific claims (repos, scope, build_id) that user tokens don't have. + if _, hasRepos := claims["repos"]; hasRepos { + log.DebugContext(ctx, "rejected registry token used for API auth") + return fmt.Errorf("invalid token type") + } + if _, hasScope := claims["scope"]; hasScope { + log.DebugContext(ctx, "rejected registry token used for API auth") + return fmt.Errorf("invalid token type") + } + if _, hasBuildID := claims["build_id"]; hasBuildID { + log.DebugContext(ctx, "rejected registry token used for API auth") + return fmt.Errorf("invalid token type") + } + // Extract user ID from claims and add to context var userID string if sub, ok := claims["sub"].(string); ok { @@ -73,7 +102,7 @@ func OapiAuthenticationFunc(jwtSecret string) openapi3filter.AuthenticationFunc // Update the context with user ID newCtx := context.WithValue(ctx, userIDKey, userID) - + // Update the request with the new context *input.RequestValidationInput.Request = *input.RequestValidationInput.Request.WithContext(newCtx) @@ -86,10 +115,10 @@ func OapiAuthenticationFunc(jwtSecret string) openapi3filter.AuthenticationFunc func OapiErrorHandler(w http.ResponseWriter, message string, statusCode int) { w.Header().Set("Content-Type", "application/json") w.WriteHeader(statusCode) - + // Return a simple JSON error response matching our Error schema - fmt.Fprintf(w, `{"code":"%s","message":"%s"}`, - http.StatusText(statusCode), + fmt.Fprintf(w, `{"code":"%s","message":"%s"}`, + http.StatusText(statusCode), message) } @@ -108,6 +137,37 @@ func extractBearerToken(authHeader string) (string, error) { return parts[1], nil } +// extractTokenFromAuth extracts a JWT token from either Bearer or Basic auth headers. +// For Bearer: returns the token directly +// For Basic: decodes base64 and returns the username part (BuildKit sends JWT as username) +func extractTokenFromAuth(authHeader string) (string, string, error) { + parts := strings.SplitN(authHeader, " ", 2) + if len(parts) != 2 { + return "", "", fmt.Errorf("invalid authorization header format") + } + + scheme := strings.ToLower(parts[0]) + switch scheme { + case "bearer": + return parts[1], "bearer", nil + case "basic": + // Decode base64 + decoded, err := base64.StdEncoding.DecodeString(parts[1]) + if err != nil { + return "", "", fmt.Errorf("invalid basic auth encoding: %w", err) + } + // Split on colon to get username:password + credentials := strings.SplitN(string(decoded), ":", 2) + if len(credentials) == 0 { + return "", "", fmt.Errorf("invalid basic auth format") + } + // The JWT is the username part + return credentials[0], "basic", nil + default: + return "", "", fmt.Errorf("unsupported authorization scheme: %s", scheme) + } +} + // GetUserIDFromContext extracts the user ID from context func GetUserIDFromContext(ctx context.Context) string { if userID, ok := ctx.Value(userIDKey).(string); ok { @@ -116,6 +176,98 @@ func GetUserIDFromContext(ctx context.Context) string { return "" } +// isRegistryPath checks if the request is for the OCI registry endpoints (/v2/...) +func isRegistryPath(path string) bool { + return strings.HasPrefix(path, "/v2/") +} + +// isInternalVMRequest checks if the request is from an internal VM network (10.102.x.x) +// This is used as a fallback for builder VMs that don't have token auth yet. +// +// SECURITY: We only trust RemoteAddr, not X-Real-IP or X-Forwarded-For headers, +// as those can be spoofed by attackers to bypass authentication. +func isInternalVMRequest(r *http.Request) bool { + // Use only RemoteAddr - never trust client-supplied headers for auth decisions + ip := r.RemoteAddr + + // RemoteAddr is "IP:port" format, extract just the IP + if idx := strings.LastIndex(ip, ":"); idx != -1 { + ip = ip[:idx] + } + + // Check if it's from the VM network (10.102.x.x) + return strings.HasPrefix(ip, "10.102.") +} + +// extractRepoFromPath extracts the repository name from a registry path. +// e.g., "/v2/builds/abc123/manifests/latest" -> "builds/abc123" +func extractRepoFromPath(path string) string { + matches := registryPathPattern.FindStringSubmatch(path) + if len(matches) >= 2 { + return matches[1] + } + return "" +} + +// isWriteOperation returns true if the HTTP method implies a write operation +func isWriteOperation(method string) bool { + return method == http.MethodPut || method == http.MethodPost || method == http.MethodPatch || method == http.MethodDelete +} + +// validateRegistryToken validates a registry-scoped JWT token and checks repository access. +// Returns the claims if valid, nil otherwise. +func validateRegistryToken(tokenString, jwtSecret, requestPath, method string) (*RegistryTokenClaims, error) { + token, err := jwt.ParseWithClaims(tokenString, &RegistryTokenClaims{}, func(token *jwt.Token) (interface{}, error) { + if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok { + return nil, fmt.Errorf("unexpected signing method: %v", token.Header["alg"]) + } + return []byte(jwtSecret), nil + }) + + if err != nil { + return nil, fmt.Errorf("parse token: %w", err) + } + + claims, ok := token.Claims.(*RegistryTokenClaims) + if !ok || !token.Valid { + return nil, fmt.Errorf("invalid token") + } + + // Check if this is a registry token (has repos claim) + if len(claims.Repositories) == 0 { + return nil, fmt.Errorf("not a registry token") + } + + // Extract repository from request path + repo := extractRepoFromPath(requestPath) + if repo == "" { + // Allow /v2/ (base path check) without repo validation + if requestPath == "/v2/" || requestPath == "/v2" { + return claims, nil + } + return nil, fmt.Errorf("could not extract repository from path") + } + + // Check if the repository is allowed by the token + allowed := false + for _, allowedRepo := range claims.Repositories { + if allowedRepo == repo { + allowed = true + break + } + } + if !allowed { + return nil, fmt.Errorf("repository %s not allowed by token", repo) + } + + // Check scope for write operations + if isWriteOperation(method) && claims.Scope != "push" { + return nil, fmt.Errorf("token does not allow write operations") + } + + return claims, nil +} + // JwtAuth creates a chi middleware that validates JWT bearer tokens func JwtAuth(jwtSecret string) func(http.Handler) http.Handler { return func(next http.Handler) http.Handler { @@ -124,6 +276,51 @@ func JwtAuth(jwtSecret string) func(http.Handler) http.Handler { // Extract token from Authorization header authHeader := r.Header.Get("Authorization") + + // For registry paths, handle specially to support both Bearer and Basic auth + if isRegistryPath(r.URL.Path) { + if authHeader != "" { + // Try to extract token (supports both Bearer and Basic auth) + token, authType, err := extractTokenFromAuth(authHeader) + if err == nil { + log.DebugContext(r.Context(), "extracted token for registry request", "auth_type", authType) + + // Try to validate as a registry-scoped token + registryClaims, err := validateRegistryToken(token, jwtSecret, r.URL.Path, r.Method) + if err == nil { + // Valid registry token - set build ID as user for audit trail + log.DebugContext(r.Context(), "registry token validated", + "build_id", registryClaims.BuildID, + "repos", registryClaims.Repositories, + "scope", registryClaims.Scope) + ctx := context.WithValue(r.Context(), userIDKey, "builder-"+registryClaims.BuildID) + next.ServeHTTP(w, r.WithContext(ctx)) + return + } + log.DebugContext(r.Context(), "registry token validation failed", "error", err) + } else { + log.DebugContext(r.Context(), "failed to extract token", "error", err) + } + } + + // Fallback: Allow internal VM network (10.102.x.x) for registry pushes + // This is a transitional fallback for older builder images without token auth + if isInternalVMRequest(r) { + log.DebugContext(r.Context(), "allowing internal VM request via IP fallback (deprecated)", + "remote_addr", r.RemoteAddr, + "path", r.URL.Path) + ctx := context.WithValue(r.Context(), userIDKey, "internal-builder-legacy") + next.ServeHTTP(w, r.WithContext(ctx)) + return + } + + // Registry auth failed + log.DebugContext(r.Context(), "registry request unauthorized", "remote_addr", r.RemoteAddr) + OapiErrorHandler(w, "registry authentication required", http.StatusUnauthorized) + return + } + + // For non-registry paths, require Bearer token if authHeader == "" { log.DebugContext(r.Context(), "missing authorization header") OapiErrorHandler(w, "authorization header required", http.StatusUnauthorized) @@ -138,7 +335,7 @@ func JwtAuth(jwtSecret string) func(http.Handler) http.Handler { return } - // Parse and validate JWT + // Parse and validate as regular user JWT claims := jwt.MapClaims{} parsedToken, err := jwt.ParseWithClaims(token, claims, func(token *jwt.Token) (interface{}, error) { // Validate signing method @@ -160,6 +357,31 @@ func JwtAuth(jwtSecret string) func(http.Handler) http.Handler { return } + // Reject registry tokens - they should not be used for API authentication. + // Registry tokens have specific claims that user tokens don't have. + // This provides defense-in-depth even though BuildKit isolates build containers. + if _, hasRepos := claims["repos"]; hasRepos { + log.DebugContext(r.Context(), "rejected registry token used for API auth") + OapiErrorHandler(w, "invalid token type", http.StatusUnauthorized) + return + } + if _, hasScope := claims["scope"]; hasScope { + log.DebugContext(r.Context(), "rejected registry token used for API auth") + OapiErrorHandler(w, "invalid token type", http.StatusUnauthorized) + return + } + if _, hasBuildID := claims["build_id"]; hasBuildID { + log.DebugContext(r.Context(), "rejected registry token used for API auth") + OapiErrorHandler(w, "invalid token type", http.StatusUnauthorized) + return + } + // Also reject tokens with "builder-" prefix in subject as an extra safeguard + if sub, ok := claims["sub"].(string); ok && strings.HasPrefix(sub, "builder-") { + log.DebugContext(r.Context(), "rejected builder token used for API auth", "sub", sub) + OapiErrorHandler(w, "invalid token type", http.StatusUnauthorized) + return + } + // Extract user ID from claims and add to context var userID string if sub, ok := claims["sub"].(string); ok { @@ -174,4 +396,3 @@ func JwtAuth(jwtSecret string) func(http.Handler) http.Handler { }) } } - diff --git a/lib/middleware/oapi_auth_test.go b/lib/middleware/oapi_auth_test.go new file mode 100644 index 00000000..dbb5a266 --- /dev/null +++ b/lib/middleware/oapi_auth_test.go @@ -0,0 +1,204 @@ +package middleware + +import ( + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/golang-jwt/jwt/v5" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const testJWTSecret = "test-secret-key-for-testing" + +// generateUserToken creates a valid user JWT token +func generateUserToken(t *testing.T, userID string) string { + token := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{ + "sub": userID, + "iat": time.Now().Unix(), + "exp": time.Now().Add(time.Hour).Unix(), + }) + tokenString, err := token.SignedString([]byte(testJWTSecret)) + require.NoError(t, err) + return tokenString +} + +// generateRegistryToken creates a registry token (like those given to builder VMs) +func generateRegistryToken(t *testing.T, buildID string) string { + token := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{ + "sub": "builder-" + buildID, + "iat": time.Now().Unix(), + "exp": time.Now().Add(time.Hour).Unix(), + "iss": "hypeman", + "build_id": buildID, + "repos": []string{"builds/" + buildID}, + "scope": "push", + }) + tokenString, err := token.SignedString([]byte(testJWTSecret)) + require.NoError(t, err) + return tokenString +} + +func TestJwtAuth_RejectsRegistryTokens(t *testing.T) { + // Create a simple handler that returns 200 if auth passes + nextHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }) + + // Wrap with JwtAuth middleware + handler := JwtAuth(testJWTSecret)(nextHandler) + + t.Run("valid user token is accepted", func(t *testing.T) { + userToken := generateUserToken(t, "user-123") + + req := httptest.NewRequest(http.MethodGet, "/instances", nil) + req.Header.Set("Authorization", "Bearer "+userToken) + + rr := httptest.NewRecorder() + handler.ServeHTTP(rr, req) + + assert.Equal(t, http.StatusOK, rr.Code, "user token should be accepted") + }) + + t.Run("registry token with repos claim is rejected", func(t *testing.T) { + registryToken := generateRegistryToken(t, "build-abc123") + + req := httptest.NewRequest(http.MethodGet, "/instances", nil) + req.Header.Set("Authorization", "Bearer "+registryToken) + + rr := httptest.NewRecorder() + handler.ServeHTTP(rr, req) + + assert.Equal(t, http.StatusUnauthorized, rr.Code, "registry token should be rejected") + assert.Contains(t, rr.Body.String(), "invalid token type") + }) + + t.Run("token with only builder- prefix is rejected", func(t *testing.T) { + // A token that has builder- prefix but no other registry claims + // This could be crafted by an attacker who knows the pattern + token := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{ + "sub": "builder-malicious-build", + "iat": time.Now().Unix(), + "exp": time.Now().Add(time.Hour).Unix(), + }) + tokenString, err := token.SignedString([]byte(testJWTSecret)) + require.NoError(t, err) + + req := httptest.NewRequest(http.MethodGet, "/instances", nil) + req.Header.Set("Authorization", "Bearer "+tokenString) + + rr := httptest.NewRecorder() + handler.ServeHTTP(rr, req) + + assert.Equal(t, http.StatusUnauthorized, rr.Code, "builder- prefix token should be rejected") + assert.Contains(t, rr.Body.String(), "invalid token type") + }) + + t.Run("token with scope claim is rejected", func(t *testing.T) { + token := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{ + "sub": "some-user", + "iat": time.Now().Unix(), + "exp": time.Now().Add(time.Hour).Unix(), + "scope": "push", + }) + tokenString, err := token.SignedString([]byte(testJWTSecret)) + require.NoError(t, err) + + req := httptest.NewRequest(http.MethodGet, "/instances", nil) + req.Header.Set("Authorization", "Bearer "+tokenString) + + rr := httptest.NewRecorder() + handler.ServeHTTP(rr, req) + + assert.Equal(t, http.StatusUnauthorized, rr.Code, "token with scope claim should be rejected") + }) + + t.Run("token with build_id claim is rejected", func(t *testing.T) { + token := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{ + "sub": "some-user", + "iat": time.Now().Unix(), + "exp": time.Now().Add(time.Hour).Unix(), + "build_id": "some-build", + }) + tokenString, err := token.SignedString([]byte(testJWTSecret)) + require.NoError(t, err) + + req := httptest.NewRequest(http.MethodGet, "/instances", nil) + req.Header.Set("Authorization", "Bearer "+tokenString) + + rr := httptest.NewRecorder() + handler.ServeHTTP(rr, req) + + assert.Equal(t, http.StatusUnauthorized, rr.Code, "token with build_id claim should be rejected") + }) +} + +func TestJwtAuth_RequiresAuthorization(t *testing.T) { + nextHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }) + + handler := JwtAuth(testJWTSecret)(nextHandler) + + t.Run("missing authorization header is rejected", func(t *testing.T) { + req := httptest.NewRequest(http.MethodGet, "/instances", nil) + + rr := httptest.NewRecorder() + handler.ServeHTTP(rr, req) + + assert.Equal(t, http.StatusUnauthorized, rr.Code) + assert.Contains(t, rr.Body.String(), "authorization header required") + }) + + t.Run("invalid token format is rejected", func(t *testing.T) { + req := httptest.NewRequest(http.MethodGet, "/instances", nil) + req.Header.Set("Authorization", "Basic abc123") + + rr := httptest.NewRecorder() + handler.ServeHTTP(rr, req) + + assert.Equal(t, http.StatusUnauthorized, rr.Code) + assert.Contains(t, rr.Body.String(), "invalid authorization header format") + }) + + t.Run("expired token is rejected", func(t *testing.T) { + token := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{ + "sub": "user-123", + "iat": time.Now().Add(-2 * time.Hour).Unix(), + "exp": time.Now().Add(-1 * time.Hour).Unix(), // Expired + }) + tokenString, err := token.SignedString([]byte(testJWTSecret)) + require.NoError(t, err) + + req := httptest.NewRequest(http.MethodGet, "/instances", nil) + req.Header.Set("Authorization", "Bearer "+tokenString) + + rr := httptest.NewRecorder() + handler.ServeHTTP(rr, req) + + assert.Equal(t, http.StatusUnauthorized, rr.Code) + assert.Contains(t, rr.Body.String(), "invalid token") + }) + + t.Run("wrong secret is rejected", func(t *testing.T) { + token := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{ + "sub": "user-123", + "iat": time.Now().Unix(), + "exp": time.Now().Add(time.Hour).Unix(), + }) + tokenString, err := token.SignedString([]byte("wrong-secret")) + require.NoError(t, err) + + req := httptest.NewRequest(http.MethodGet, "/instances", nil) + req.Header.Set("Authorization", "Bearer "+tokenString) + + rr := httptest.NewRecorder() + handler.ServeHTTP(rr, req) + + assert.Equal(t, http.StatusUnauthorized, rr.Code) + assert.Contains(t, rr.Body.String(), "invalid token") + }) +} + diff --git a/lib/oapi/oapi.go b/lib/oapi/oapi.go index 450b9030..4171b96e 100644 --- a/lib/oapi/oapi.go +++ b/lib/oapi/oapi.go @@ -29,6 +29,23 @@ const ( BearerAuthScopes = "bearerAuth.Scopes" ) +// Defines values for BuildEventType. +const ( + Heartbeat BuildEventType = "heartbeat" + Log BuildEventType = "log" + Status BuildEventType = "status" +) + +// Defines values for BuildStatus. +const ( + BuildStatusBuilding BuildStatus = "building" + BuildStatusCancelled BuildStatus = "cancelled" + BuildStatusFailed BuildStatus = "failed" + BuildStatusPushing BuildStatus = "pushing" + BuildStatusQueued BuildStatus = "queued" + BuildStatusReady BuildStatus = "ready" +) + // Defines values for CreateInstanceRequestHypervisor. const ( CreateInstanceRequestHypervisorCloudHypervisor CreateInstanceRequestHypervisor = "cloud-hypervisor" @@ -48,11 +65,11 @@ const ( // Defines values for ImageStatus. const ( - Converting ImageStatus = "converting" - Failed ImageStatus = "failed" - Pending ImageStatus = "pending" - Pulling ImageStatus = "pulling" - Ready ImageStatus = "ready" + ImageStatusConverting ImageStatus = "converting" + ImageStatusFailed ImageStatus = "failed" + ImageStatusPending ImageStatus = "pending" + ImageStatusPulling ImageStatus = "pulling" + ImageStatusReady ImageStatus = "ready" ) // Defines values for InstanceHypervisor. @@ -112,6 +129,79 @@ type AvailableDevice struct { VendorName *string `json:"vendor_name,omitempty"` } +// Build defines model for Build. +type Build struct { + // CompletedAt Build completion timestamp + CompletedAt *time.Time `json:"completed_at"` + + // CreatedAt Build creation timestamp + CreatedAt time.Time `json:"created_at"` + + // DurationMs Build duration in milliseconds + DurationMs *int64 `json:"duration_ms"` + + // Error Error message (only when status is failed) + Error *string `json:"error"` + + // Id Build job identifier + Id string `json:"id"` + + // ImageDigest Digest of built image (only when status is ready) + ImageDigest *string `json:"image_digest"` + + // ImageRef Full image reference (only when status is ready) + ImageRef *string `json:"image_ref"` + Provenance *BuildProvenance `json:"provenance,omitempty"` + + // QueuePosition Position in build queue (only when status is queued) + QueuePosition *int `json:"queue_position"` + + // StartedAt Build start timestamp + StartedAt *time.Time `json:"started_at"` + + // Status Build job status + Status BuildStatus `json:"status"` +} + +// BuildEvent defines model for BuildEvent. +type BuildEvent struct { + // Content Log line content (only for type=log) + Content *string `json:"content,omitempty"` + + // Status Build job status + Status *BuildStatus `json:"status,omitempty"` + + // Timestamp Event timestamp + Timestamp time.Time `json:"timestamp"` + + // Type Event type + Type BuildEventType `json:"type"` +} + +// BuildEventType Event type +type BuildEventType string + +// BuildProvenance defines model for BuildProvenance. +type BuildProvenance struct { + // BaseImageDigest Pinned base image digest used + BaseImageDigest *string `json:"base_image_digest,omitempty"` + + // BuildkitVersion BuildKit version used + BuildkitVersion *string `json:"buildkit_version,omitempty"` + + // LockfileHashes Map of lockfile names to SHA256 hashes + LockfileHashes *map[string]string `json:"lockfile_hashes,omitempty"` + + // SourceHash SHA256 hash of source tarball + SourceHash *string `json:"source_hash,omitempty"` + + // Timestamp Build completion timestamp + Timestamp *time.Time `json:"timestamp,omitempty"` +} + +// BuildStatus Build job status +type BuildStatus string + // CreateDeviceRequest defines model for CreateDeviceRequest. type CreateDeviceRequest struct { // Name Optional globally unique device name. If not provided, a name is auto-generated from the PCI address (e.g., "pci-0000-a2-00-0") @@ -604,6 +694,35 @@ type VolumeMount struct { VolumeId string `json:"volume_id"` } +// CreateBuildMultipartBody defines parameters for CreateBuild. +type CreateBuildMultipartBody struct { + // BaseImageDigest Optional pinned base image digest + BaseImageDigest *string `json:"base_image_digest,omitempty"` + + // CacheScope Tenant-specific cache key prefix + CacheScope *string `json:"cache_scope,omitempty"` + + // Dockerfile Dockerfile content. Required if not included in the source tarball. + Dockerfile *string `json:"dockerfile,omitempty"` + + // Secrets JSON array of secret references to inject during build. + // Each object has "id" (required) for use with --mount=type=secret,id=... + // Example: [{"id": "npm_token"}, {"id": "github_token"}] + Secrets *string `json:"secrets,omitempty"` + + // Source Source tarball (tar.gz) containing application code and optionally a Dockerfile + Source openapi_types.File `json:"source"` + + // TimeoutSeconds Build timeout (default 600) + TimeoutSeconds *int `json:"timeout_seconds,omitempty"` +} + +// GetBuildEventsParams defines parameters for GetBuildEvents. +type GetBuildEventsParams struct { + // Follow Continue streaming new events after initial output + Follow *bool `form:"follow,omitempty" json:"follow,omitempty"` +} + // GetInstanceLogsParams defines parameters for GetInstanceLogs. type GetInstanceLogsParams struct { // Tail Number of lines to return from end @@ -646,6 +765,9 @@ type CreateVolumeMultipartBody struct { SizeGb int `json:"size_gb"` } +// CreateBuildMultipartRequestBody defines body for CreateBuild for multipart/form-data ContentType. +type CreateBuildMultipartRequestBody CreateBuildMultipartBody + // CreateDeviceJSONRequestBody defines body for CreateDevice for application/json ContentType. type CreateDeviceJSONRequestBody = CreateDeviceRequest @@ -740,6 +862,21 @@ func WithRequestEditorFn(fn RequestEditorFn) ClientOption { // The interface specification for the client above. type ClientInterface interface { + // ListBuilds request + ListBuilds(ctx context.Context, reqEditors ...RequestEditorFn) (*http.Response, error) + + // CreateBuildWithBody request with any body + CreateBuildWithBody(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) + + // CancelBuild request + CancelBuild(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*http.Response, error) + + // GetBuild request + GetBuild(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*http.Response, error) + + // GetBuildEvents request + GetBuildEvents(ctx context.Context, id string, params *GetBuildEventsParams, reqEditors ...RequestEditorFn) (*http.Response, error) + // ListDevices request ListDevices(ctx context.Context, reqEditors ...RequestEditorFn) (*http.Response, error) @@ -846,6 +983,66 @@ type ClientInterface interface { GetVolume(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*http.Response, error) } +func (c *Client) ListBuilds(ctx context.Context, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewListBuildsRequest(c.Server) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) CreateBuildWithBody(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewCreateBuildRequestWithBody(c.Server, contentType, body) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) CancelBuild(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewCancelBuildRequest(c.Server, id) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) GetBuild(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewGetBuildRequest(c.Server, id) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) GetBuildEvents(ctx context.Context, id string, params *GetBuildEventsParams, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewGetBuildEventsRequest(c.Server, id, params) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + func (c *Client) ListDevices(ctx context.Context, reqEditors ...RequestEditorFn) (*http.Response, error) { req, err := NewListDevicesRequest(c.Server) if err != nil { @@ -1290,8 +1487,8 @@ func (c *Client) GetVolume(ctx context.Context, id string, reqEditors ...Request return c.Client.Do(req) } -// NewListDevicesRequest generates requests for ListDevices -func NewListDevicesRequest(server string) (*http.Request, error) { +// NewListBuildsRequest generates requests for ListBuilds +func NewListBuildsRequest(server string) (*http.Request, error) { var err error serverURL, err := url.Parse(server) @@ -1299,7 +1496,7 @@ func NewListDevicesRequest(server string) (*http.Request, error) { return nil, err } - operationPath := fmt.Sprintf("/devices") + operationPath := fmt.Sprintf("/builds") if operationPath[0] == '/' { operationPath = "." + operationPath } @@ -1317,19 +1514,8 @@ func NewListDevicesRequest(server string) (*http.Request, error) { return req, nil } -// NewCreateDeviceRequest calls the generic CreateDevice builder with application/json body -func NewCreateDeviceRequest(server string, body CreateDeviceJSONRequestBody) (*http.Request, error) { - var bodyReader io.Reader - buf, err := json.Marshal(body) - if err != nil { - return nil, err - } - bodyReader = bytes.NewReader(buf) - return NewCreateDeviceRequestWithBody(server, "application/json", bodyReader) -} - -// NewCreateDeviceRequestWithBody generates requests for CreateDevice with any type of body -func NewCreateDeviceRequestWithBody(server string, contentType string, body io.Reader) (*http.Request, error) { +// NewCreateBuildRequestWithBody generates requests for CreateBuild with any type of body +func NewCreateBuildRequestWithBody(server string, contentType string, body io.Reader) (*http.Request, error) { var err error serverURL, err := url.Parse(server) @@ -1337,7 +1523,7 @@ func NewCreateDeviceRequestWithBody(server string, contentType string, body io.R return nil, err } - operationPath := fmt.Sprintf("/devices") + operationPath := fmt.Sprintf("/builds") if operationPath[0] == '/' { operationPath = "." + operationPath } @@ -1357,16 +1543,23 @@ func NewCreateDeviceRequestWithBody(server string, contentType string, body io.R return req, nil } -// NewListAvailableDevicesRequest generates requests for ListAvailableDevices -func NewListAvailableDevicesRequest(server string) (*http.Request, error) { +// NewCancelBuildRequest generates requests for CancelBuild +func NewCancelBuildRequest(server string, id string) (*http.Request, error) { var err error + var pathParam0 string + + pathParam0, err = runtime.StyleParamWithLocation("simple", false, "id", runtime.ParamLocationPath, id) + if err != nil { + return nil, err + } + serverURL, err := url.Parse(server) if err != nil { return nil, err } - operationPath := fmt.Sprintf("/devices/available") + operationPath := fmt.Sprintf("/builds/%s", pathParam0) if operationPath[0] == '/' { operationPath = "." + operationPath } @@ -1376,7 +1569,7 @@ func NewListAvailableDevicesRequest(server string) (*http.Request, error) { return nil, err } - req, err := http.NewRequest("GET", queryURL.String(), nil) + req, err := http.NewRequest("DELETE", queryURL.String(), nil) if err != nil { return nil, err } @@ -1384,8 +1577,8 @@ func NewListAvailableDevicesRequest(server string) (*http.Request, error) { return req, nil } -// NewDeleteDeviceRequest generates requests for DeleteDevice -func NewDeleteDeviceRequest(server string, id string) (*http.Request, error) { +// NewGetBuildRequest generates requests for GetBuild +func NewGetBuildRequest(server string, id string) (*http.Request, error) { var err error var pathParam0 string @@ -1400,7 +1593,7 @@ func NewDeleteDeviceRequest(server string, id string) (*http.Request, error) { return nil, err } - operationPath := fmt.Sprintf("/devices/%s", pathParam0) + operationPath := fmt.Sprintf("/builds/%s", pathParam0) if operationPath[0] == '/' { operationPath = "." + operationPath } @@ -1410,7 +1603,7 @@ func NewDeleteDeviceRequest(server string, id string) (*http.Request, error) { return nil, err } - req, err := http.NewRequest("DELETE", queryURL.String(), nil) + req, err := http.NewRequest("GET", queryURL.String(), nil) if err != nil { return nil, err } @@ -1418,8 +1611,8 @@ func NewDeleteDeviceRequest(server string, id string) (*http.Request, error) { return req, nil } -// NewGetDeviceRequest generates requests for GetDevice -func NewGetDeviceRequest(server string, id string) (*http.Request, error) { +// NewGetBuildEventsRequest generates requests for GetBuildEvents +func NewGetBuildEventsRequest(server string, id string, params *GetBuildEventsParams) (*http.Request, error) { var err error var pathParam0 string @@ -1434,7 +1627,7 @@ func NewGetDeviceRequest(server string, id string) (*http.Request, error) { return nil, err } - operationPath := fmt.Sprintf("/devices/%s", pathParam0) + operationPath := fmt.Sprintf("/builds/%s/events", pathParam0) if operationPath[0] == '/' { operationPath = "." + operationPath } @@ -1444,6 +1637,28 @@ func NewGetDeviceRequest(server string, id string) (*http.Request, error) { return nil, err } + if params != nil { + queryValues := queryURL.Query() + + if params.Follow != nil { + + if queryFrag, err := runtime.StyleParamWithLocation("form", true, "follow", runtime.ParamLocationQuery, *params.Follow); err != nil { + return nil, err + } else if parsed, err := url.ParseQuery(queryFrag); err != nil { + return nil, err + } else { + for k, v := range parsed { + for _, v2 := range v { + queryValues.Add(k, v2) + } + } + } + + } + + queryURL.RawQuery = queryValues.Encode() + } + req, err := http.NewRequest("GET", queryURL.String(), nil) if err != nil { return nil, err @@ -1452,8 +1667,8 @@ func NewGetDeviceRequest(server string, id string) (*http.Request, error) { return req, nil } -// NewGetHealthRequest generates requests for GetHealth -func NewGetHealthRequest(server string) (*http.Request, error) { +// NewListDevicesRequest generates requests for ListDevices +func NewListDevicesRequest(server string) (*http.Request, error) { var err error serverURL, err := url.Parse(server) @@ -1461,7 +1676,7 @@ func NewGetHealthRequest(server string) (*http.Request, error) { return nil, err } - operationPath := fmt.Sprintf("/health") + operationPath := fmt.Sprintf("/devices") if operationPath[0] == '/' { operationPath = "." + operationPath } @@ -1479,8 +1694,19 @@ func NewGetHealthRequest(server string) (*http.Request, error) { return req, nil } -// NewListImagesRequest generates requests for ListImages -func NewListImagesRequest(server string) (*http.Request, error) { +// NewCreateDeviceRequest calls the generic CreateDevice builder with application/json body +func NewCreateDeviceRequest(server string, body CreateDeviceJSONRequestBody) (*http.Request, error) { + var bodyReader io.Reader + buf, err := json.Marshal(body) + if err != nil { + return nil, err + } + bodyReader = bytes.NewReader(buf) + return NewCreateDeviceRequestWithBody(server, "application/json", bodyReader) +} + +// NewCreateDeviceRequestWithBody generates requests for CreateDevice with any type of body +func NewCreateDeviceRequestWithBody(server string, contentType string, body io.Reader) (*http.Request, error) { var err error serverURL, err := url.Parse(server) @@ -1488,7 +1714,7 @@ func NewListImagesRequest(server string) (*http.Request, error) { return nil, err } - operationPath := fmt.Sprintf("/images") + operationPath := fmt.Sprintf("/devices") if operationPath[0] == '/' { operationPath = "." + operationPath } @@ -1498,27 +1724,18 @@ func NewListImagesRequest(server string) (*http.Request, error) { return nil, err } - req, err := http.NewRequest("GET", queryURL.String(), nil) + req, err := http.NewRequest("POST", queryURL.String(), body) if err != nil { return nil, err } - return req, nil -} + req.Header.Add("Content-Type", contentType) -// NewCreateImageRequest calls the generic CreateImage builder with application/json body -func NewCreateImageRequest(server string, body CreateImageJSONRequestBody) (*http.Request, error) { - var bodyReader io.Reader - buf, err := json.Marshal(body) - if err != nil { - return nil, err - } - bodyReader = bytes.NewReader(buf) - return NewCreateImageRequestWithBody(server, "application/json", bodyReader) + return req, nil } -// NewCreateImageRequestWithBody generates requests for CreateImage with any type of body -func NewCreateImageRequestWithBody(server string, contentType string, body io.Reader) (*http.Request, error) { +// NewListAvailableDevicesRequest generates requests for ListAvailableDevices +func NewListAvailableDevicesRequest(server string) (*http.Request, error) { var err error serverURL, err := url.Parse(server) @@ -1526,7 +1743,7 @@ func NewCreateImageRequestWithBody(server string, contentType string, body io.Re return nil, err } - operationPath := fmt.Sprintf("/images") + operationPath := fmt.Sprintf("/devices/available") if operationPath[0] == '/' { operationPath = "." + operationPath } @@ -1536,23 +1753,21 @@ func NewCreateImageRequestWithBody(server string, contentType string, body io.Re return nil, err } - req, err := http.NewRequest("POST", queryURL.String(), body) + req, err := http.NewRequest("GET", queryURL.String(), nil) if err != nil { return nil, err } - req.Header.Add("Content-Type", contentType) - return req, nil } -// NewDeleteImageRequest generates requests for DeleteImage -func NewDeleteImageRequest(server string, name string) (*http.Request, error) { +// NewDeleteDeviceRequest generates requests for DeleteDevice +func NewDeleteDeviceRequest(server string, id string) (*http.Request, error) { var err error var pathParam0 string - pathParam0, err = runtime.StyleParamWithLocation("simple", false, "name", runtime.ParamLocationPath, name) + pathParam0, err = runtime.StyleParamWithLocation("simple", false, "id", runtime.ParamLocationPath, id) if err != nil { return nil, err } @@ -1562,7 +1777,7 @@ func NewDeleteImageRequest(server string, name string) (*http.Request, error) { return nil, err } - operationPath := fmt.Sprintf("/images/%s", pathParam0) + operationPath := fmt.Sprintf("/devices/%s", pathParam0) if operationPath[0] == '/' { operationPath = "." + operationPath } @@ -1580,13 +1795,13 @@ func NewDeleteImageRequest(server string, name string) (*http.Request, error) { return req, nil } -// NewGetImageRequest generates requests for GetImage -func NewGetImageRequest(server string, name string) (*http.Request, error) { +// NewGetDeviceRequest generates requests for GetDevice +func NewGetDeviceRequest(server string, id string) (*http.Request, error) { var err error var pathParam0 string - pathParam0, err = runtime.StyleParamWithLocation("simple", false, "name", runtime.ParamLocationPath, name) + pathParam0, err = runtime.StyleParamWithLocation("simple", false, "id", runtime.ParamLocationPath, id) if err != nil { return nil, err } @@ -1596,7 +1811,7 @@ func NewGetImageRequest(server string, name string) (*http.Request, error) { return nil, err } - operationPath := fmt.Sprintf("/images/%s", pathParam0) + operationPath := fmt.Sprintf("/devices/%s", pathParam0) if operationPath[0] == '/' { operationPath = "." + operationPath } @@ -1614,8 +1829,8 @@ func NewGetImageRequest(server string, name string) (*http.Request, error) { return req, nil } -// NewListIngressesRequest generates requests for ListIngresses -func NewListIngressesRequest(server string) (*http.Request, error) { +// NewGetHealthRequest generates requests for GetHealth +func NewGetHealthRequest(server string) (*http.Request, error) { var err error serverURL, err := url.Parse(server) @@ -1623,7 +1838,7 @@ func NewListIngressesRequest(server string) (*http.Request, error) { return nil, err } - operationPath := fmt.Sprintf("/ingresses") + operationPath := fmt.Sprintf("/health") if operationPath[0] == '/' { operationPath = "." + operationPath } @@ -1641,19 +1856,8 @@ func NewListIngressesRequest(server string) (*http.Request, error) { return req, nil } -// NewCreateIngressRequest calls the generic CreateIngress builder with application/json body -func NewCreateIngressRequest(server string, body CreateIngressJSONRequestBody) (*http.Request, error) { - var bodyReader io.Reader - buf, err := json.Marshal(body) - if err != nil { - return nil, err - } - bodyReader = bytes.NewReader(buf) - return NewCreateIngressRequestWithBody(server, "application/json", bodyReader) -} - -// NewCreateIngressRequestWithBody generates requests for CreateIngress with any type of body -func NewCreateIngressRequestWithBody(server string, contentType string, body io.Reader) (*http.Request, error) { +// NewListImagesRequest generates requests for ListImages +func NewListImagesRequest(server string) (*http.Request, error) { var err error serverURL, err := url.Parse(server) @@ -1661,7 +1865,7 @@ func NewCreateIngressRequestWithBody(server string, contentType string, body io. return nil, err } - operationPath := fmt.Sprintf("/ingresses") + operationPath := fmt.Sprintf("/images") if operationPath[0] == '/' { operationPath = "." + operationPath } @@ -1671,21 +1875,194 @@ func NewCreateIngressRequestWithBody(server string, contentType string, body io. return nil, err } - req, err := http.NewRequest("POST", queryURL.String(), body) + req, err := http.NewRequest("GET", queryURL.String(), nil) if err != nil { return nil, err } - req.Header.Add("Content-Type", contentType) - return req, nil } -// NewDeleteIngressRequest generates requests for DeleteIngress -func NewDeleteIngressRequest(server string, id string) (*http.Request, error) { - var err error - - var pathParam0 string +// NewCreateImageRequest calls the generic CreateImage builder with application/json body +func NewCreateImageRequest(server string, body CreateImageJSONRequestBody) (*http.Request, error) { + var bodyReader io.Reader + buf, err := json.Marshal(body) + if err != nil { + return nil, err + } + bodyReader = bytes.NewReader(buf) + return NewCreateImageRequestWithBody(server, "application/json", bodyReader) +} + +// NewCreateImageRequestWithBody generates requests for CreateImage with any type of body +func NewCreateImageRequestWithBody(server string, contentType string, body io.Reader) (*http.Request, error) { + var err error + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/images") + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("POST", queryURL.String(), body) + if err != nil { + return nil, err + } + + req.Header.Add("Content-Type", contentType) + + return req, nil +} + +// NewDeleteImageRequest generates requests for DeleteImage +func NewDeleteImageRequest(server string, name string) (*http.Request, error) { + var err error + + var pathParam0 string + + pathParam0, err = runtime.StyleParamWithLocation("simple", false, "name", runtime.ParamLocationPath, name) + if err != nil { + return nil, err + } + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/images/%s", pathParam0) + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("DELETE", queryURL.String(), nil) + if err != nil { + return nil, err + } + + return req, nil +} + +// NewGetImageRequest generates requests for GetImage +func NewGetImageRequest(server string, name string) (*http.Request, error) { + var err error + + var pathParam0 string + + pathParam0, err = runtime.StyleParamWithLocation("simple", false, "name", runtime.ParamLocationPath, name) + if err != nil { + return nil, err + } + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/images/%s", pathParam0) + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("GET", queryURL.String(), nil) + if err != nil { + return nil, err + } + + return req, nil +} + +// NewListIngressesRequest generates requests for ListIngresses +func NewListIngressesRequest(server string) (*http.Request, error) { + var err error + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/ingresses") + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("GET", queryURL.String(), nil) + if err != nil { + return nil, err + } + + return req, nil +} + +// NewCreateIngressRequest calls the generic CreateIngress builder with application/json body +func NewCreateIngressRequest(server string, body CreateIngressJSONRequestBody) (*http.Request, error) { + var bodyReader io.Reader + buf, err := json.Marshal(body) + if err != nil { + return nil, err + } + bodyReader = bytes.NewReader(buf) + return NewCreateIngressRequestWithBody(server, "application/json", bodyReader) +} + +// NewCreateIngressRequestWithBody generates requests for CreateIngress with any type of body +func NewCreateIngressRequestWithBody(server string, contentType string, body io.Reader) (*http.Request, error) { + var err error + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/ingresses") + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("POST", queryURL.String(), body) + if err != nil { + return nil, err + } + + req.Header.Add("Content-Type", contentType) + + return req, nil +} + +// NewDeleteIngressRequest generates requests for DeleteIngress +func NewDeleteIngressRequest(server string, id string) (*http.Request, error) { + var err error + + var pathParam0 string pathParam0, err = runtime.StyleParamWithLocation("simple", false, "id", runtime.ParamLocationPath, id) if err != nil { @@ -2476,6 +2853,21 @@ func WithBaseURL(baseURL string) ClientOption { // ClientWithResponsesInterface is the interface specification for the client with responses above. type ClientWithResponsesInterface interface { + // ListBuildsWithResponse request + ListBuildsWithResponse(ctx context.Context, reqEditors ...RequestEditorFn) (*ListBuildsResponse, error) + + // CreateBuildWithBodyWithResponse request with any body + CreateBuildWithBodyWithResponse(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*CreateBuildResponse, error) + + // CancelBuildWithResponse request + CancelBuildWithResponse(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*CancelBuildResponse, error) + + // GetBuildWithResponse request + GetBuildWithResponse(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*GetBuildResponse, error) + + // GetBuildEventsWithResponse request + GetBuildEventsWithResponse(ctx context.Context, id string, params *GetBuildEventsParams, reqEditors ...RequestEditorFn) (*GetBuildEventsResponse, error) + // ListDevicesWithResponse request ListDevicesWithResponse(ctx context.Context, reqEditors ...RequestEditorFn) (*ListDevicesResponse, error) @@ -2582,6 +2974,126 @@ type ClientWithResponsesInterface interface { GetVolumeWithResponse(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*GetVolumeResponse, error) } +type ListBuildsResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *[]Build + JSON401 *Error + JSON500 *Error +} + +// Status returns HTTPResponse.Status +func (r ListBuildsResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r ListBuildsResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type CreateBuildResponse struct { + Body []byte + HTTPResponse *http.Response + JSON202 *Build + JSON400 *Error + JSON401 *Error + JSON500 *Error +} + +// Status returns HTTPResponse.Status +func (r CreateBuildResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r CreateBuildResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type CancelBuildResponse struct { + Body []byte + HTTPResponse *http.Response + JSON404 *Error + JSON409 *Error + JSON500 *Error +} + +// Status returns HTTPResponse.Status +func (r CancelBuildResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r CancelBuildResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type GetBuildResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *Build + JSON404 *Error + JSON500 *Error +} + +// Status returns HTTPResponse.Status +func (r GetBuildResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r GetBuildResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type GetBuildEventsResponse struct { + Body []byte + HTTPResponse *http.Response + JSON404 *Error + JSON500 *Error +} + +// Status returns HTTPResponse.Status +func (r GetBuildEventsResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r GetBuildEventsResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + type ListDevicesResponse struct { Body []byte HTTPResponse *http.Response @@ -3337,6 +3849,51 @@ func (r GetVolumeResponse) StatusCode() int { return 0 } +// ListBuildsWithResponse request returning *ListBuildsResponse +func (c *ClientWithResponses) ListBuildsWithResponse(ctx context.Context, reqEditors ...RequestEditorFn) (*ListBuildsResponse, error) { + rsp, err := c.ListBuilds(ctx, reqEditors...) + if err != nil { + return nil, err + } + return ParseListBuildsResponse(rsp) +} + +// CreateBuildWithBodyWithResponse request with arbitrary body returning *CreateBuildResponse +func (c *ClientWithResponses) CreateBuildWithBodyWithResponse(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*CreateBuildResponse, error) { + rsp, err := c.CreateBuildWithBody(ctx, contentType, body, reqEditors...) + if err != nil { + return nil, err + } + return ParseCreateBuildResponse(rsp) +} + +// CancelBuildWithResponse request returning *CancelBuildResponse +func (c *ClientWithResponses) CancelBuildWithResponse(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*CancelBuildResponse, error) { + rsp, err := c.CancelBuild(ctx, id, reqEditors...) + if err != nil { + return nil, err + } + return ParseCancelBuildResponse(rsp) +} + +// GetBuildWithResponse request returning *GetBuildResponse +func (c *ClientWithResponses) GetBuildWithResponse(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*GetBuildResponse, error) { + rsp, err := c.GetBuild(ctx, id, reqEditors...) + if err != nil { + return nil, err + } + return ParseGetBuildResponse(rsp) +} + +// GetBuildEventsWithResponse request returning *GetBuildEventsResponse +func (c *ClientWithResponses) GetBuildEventsWithResponse(ctx context.Context, id string, params *GetBuildEventsParams, reqEditors ...RequestEditorFn) (*GetBuildEventsResponse, error) { + rsp, err := c.GetBuildEvents(ctx, id, params, reqEditors...) + if err != nil { + return nil, err + } + return ParseGetBuildEventsResponse(rsp) +} + // ListDevicesWithResponse request returning *ListDevicesResponse func (c *ClientWithResponses) ListDevicesWithResponse(ctx context.Context, reqEditors ...RequestEditorFn) (*ListDevicesResponse, error) { rsp, err := c.ListDevices(ctx, reqEditors...) @@ -3576,92 +4133,292 @@ func (c *ClientWithResponses) StatInstancePathWithResponse(ctx context.Context, return ParseStatInstancePathResponse(rsp) } -// StopInstanceWithResponse request returning *StopInstanceResponse -func (c *ClientWithResponses) StopInstanceWithResponse(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*StopInstanceResponse, error) { - rsp, err := c.StopInstance(ctx, id, reqEditors...) - if err != nil { - return nil, err +// StopInstanceWithResponse request returning *StopInstanceResponse +func (c *ClientWithResponses) StopInstanceWithResponse(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*StopInstanceResponse, error) { + rsp, err := c.StopInstance(ctx, id, reqEditors...) + if err != nil { + return nil, err + } + return ParseStopInstanceResponse(rsp) +} + +// DetachVolumeWithResponse request returning *DetachVolumeResponse +func (c *ClientWithResponses) DetachVolumeWithResponse(ctx context.Context, id string, volumeId string, reqEditors ...RequestEditorFn) (*DetachVolumeResponse, error) { + rsp, err := c.DetachVolume(ctx, id, volumeId, reqEditors...) + if err != nil { + return nil, err + } + return ParseDetachVolumeResponse(rsp) +} + +// AttachVolumeWithBodyWithResponse request with arbitrary body returning *AttachVolumeResponse +func (c *ClientWithResponses) AttachVolumeWithBodyWithResponse(ctx context.Context, id string, volumeId string, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*AttachVolumeResponse, error) { + rsp, err := c.AttachVolumeWithBody(ctx, id, volumeId, contentType, body, reqEditors...) + if err != nil { + return nil, err + } + return ParseAttachVolumeResponse(rsp) +} + +func (c *ClientWithResponses) AttachVolumeWithResponse(ctx context.Context, id string, volumeId string, body AttachVolumeJSONRequestBody, reqEditors ...RequestEditorFn) (*AttachVolumeResponse, error) { + rsp, err := c.AttachVolume(ctx, id, volumeId, body, reqEditors...) + if err != nil { + return nil, err + } + return ParseAttachVolumeResponse(rsp) +} + +// GetResourcesWithResponse request returning *GetResourcesResponse +func (c *ClientWithResponses) GetResourcesWithResponse(ctx context.Context, reqEditors ...RequestEditorFn) (*GetResourcesResponse, error) { + rsp, err := c.GetResources(ctx, reqEditors...) + if err != nil { + return nil, err + } + return ParseGetResourcesResponse(rsp) +} + +// ListVolumesWithResponse request returning *ListVolumesResponse +func (c *ClientWithResponses) ListVolumesWithResponse(ctx context.Context, reqEditors ...RequestEditorFn) (*ListVolumesResponse, error) { + rsp, err := c.ListVolumes(ctx, reqEditors...) + if err != nil { + return nil, err + } + return ParseListVolumesResponse(rsp) +} + +// CreateVolumeWithBodyWithResponse request with arbitrary body returning *CreateVolumeResponse +func (c *ClientWithResponses) CreateVolumeWithBodyWithResponse(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*CreateVolumeResponse, error) { + rsp, err := c.CreateVolumeWithBody(ctx, contentType, body, reqEditors...) + if err != nil { + return nil, err + } + return ParseCreateVolumeResponse(rsp) +} + +func (c *ClientWithResponses) CreateVolumeWithResponse(ctx context.Context, body CreateVolumeJSONRequestBody, reqEditors ...RequestEditorFn) (*CreateVolumeResponse, error) { + rsp, err := c.CreateVolume(ctx, body, reqEditors...) + if err != nil { + return nil, err + } + return ParseCreateVolumeResponse(rsp) +} + +// DeleteVolumeWithResponse request returning *DeleteVolumeResponse +func (c *ClientWithResponses) DeleteVolumeWithResponse(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*DeleteVolumeResponse, error) { + rsp, err := c.DeleteVolume(ctx, id, reqEditors...) + if err != nil { + return nil, err + } + return ParseDeleteVolumeResponse(rsp) +} + +// GetVolumeWithResponse request returning *GetVolumeResponse +func (c *ClientWithResponses) GetVolumeWithResponse(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*GetVolumeResponse, error) { + rsp, err := c.GetVolume(ctx, id, reqEditors...) + if err != nil { + return nil, err + } + return ParseGetVolumeResponse(rsp) +} + +// ParseListBuildsResponse parses an HTTP response from a ListBuildsWithResponse call +func ParseListBuildsResponse(rsp *http.Response) (*ListBuildsResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &ListBuildsResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: + var dest []Build + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON200 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 401: + var dest Error + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON401 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 500: + var dest Error + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON500 = &dest + + } + + return response, nil +} + +// ParseCreateBuildResponse parses an HTTP response from a CreateBuildWithResponse call +func ParseCreateBuildResponse(rsp *http.Response) (*CreateBuildResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &CreateBuildResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 202: + var dest Build + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON202 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 400: + var dest Error + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON400 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 401: + var dest Error + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON401 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 500: + var dest Error + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON500 = &dest + + } + + return response, nil +} + +// ParseCancelBuildResponse parses an HTTP response from a CancelBuildWithResponse call +func ParseCancelBuildResponse(rsp *http.Response) (*CancelBuildResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &CancelBuildResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 404: + var dest Error + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON404 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 409: + var dest Error + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON409 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 500: + var dest Error + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON500 = &dest + } - return ParseStopInstanceResponse(rsp) -} -// DetachVolumeWithResponse request returning *DetachVolumeResponse -func (c *ClientWithResponses) DetachVolumeWithResponse(ctx context.Context, id string, volumeId string, reqEditors ...RequestEditorFn) (*DetachVolumeResponse, error) { - rsp, err := c.DetachVolume(ctx, id, volumeId, reqEditors...) - if err != nil { - return nil, err - } - return ParseDetachVolumeResponse(rsp) + return response, nil } -// AttachVolumeWithBodyWithResponse request with arbitrary body returning *AttachVolumeResponse -func (c *ClientWithResponses) AttachVolumeWithBodyWithResponse(ctx context.Context, id string, volumeId string, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*AttachVolumeResponse, error) { - rsp, err := c.AttachVolumeWithBody(ctx, id, volumeId, contentType, body, reqEditors...) +// ParseGetBuildResponse parses an HTTP response from a GetBuildWithResponse call +func ParseGetBuildResponse(rsp *http.Response) (*GetBuildResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() if err != nil { return nil, err } - return ParseAttachVolumeResponse(rsp) -} -func (c *ClientWithResponses) AttachVolumeWithResponse(ctx context.Context, id string, volumeId string, body AttachVolumeJSONRequestBody, reqEditors ...RequestEditorFn) (*AttachVolumeResponse, error) { - rsp, err := c.AttachVolume(ctx, id, volumeId, body, reqEditors...) - if err != nil { - return nil, err + response := &GetBuildResponse{ + Body: bodyBytes, + HTTPResponse: rsp, } - return ParseAttachVolumeResponse(rsp) -} -// GetResourcesWithResponse request returning *GetResourcesResponse -func (c *ClientWithResponses) GetResourcesWithResponse(ctx context.Context, reqEditors ...RequestEditorFn) (*GetResourcesResponse, error) { - rsp, err := c.GetResources(ctx, reqEditors...) - if err != nil { - return nil, err - } - return ParseGetResourcesResponse(rsp) -} + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: + var dest Build + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON200 = &dest -// ListVolumesWithResponse request returning *ListVolumesResponse -func (c *ClientWithResponses) ListVolumesWithResponse(ctx context.Context, reqEditors ...RequestEditorFn) (*ListVolumesResponse, error) { - rsp, err := c.ListVolumes(ctx, reqEditors...) - if err != nil { - return nil, err - } - return ParseListVolumesResponse(rsp) -} + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 404: + var dest Error + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON404 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 500: + var dest Error + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON500 = &dest -// CreateVolumeWithBodyWithResponse request with arbitrary body returning *CreateVolumeResponse -func (c *ClientWithResponses) CreateVolumeWithBodyWithResponse(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*CreateVolumeResponse, error) { - rsp, err := c.CreateVolumeWithBody(ctx, contentType, body, reqEditors...) - if err != nil { - return nil, err } - return ParseCreateVolumeResponse(rsp) + + return response, nil } -func (c *ClientWithResponses) CreateVolumeWithResponse(ctx context.Context, body CreateVolumeJSONRequestBody, reqEditors ...RequestEditorFn) (*CreateVolumeResponse, error) { - rsp, err := c.CreateVolume(ctx, body, reqEditors...) +// ParseGetBuildEventsResponse parses an HTTP response from a GetBuildEventsWithResponse call +func ParseGetBuildEventsResponse(rsp *http.Response) (*GetBuildEventsResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() if err != nil { return nil, err } - return ParseCreateVolumeResponse(rsp) -} -// DeleteVolumeWithResponse request returning *DeleteVolumeResponse -func (c *ClientWithResponses) DeleteVolumeWithResponse(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*DeleteVolumeResponse, error) { - rsp, err := c.DeleteVolume(ctx, id, reqEditors...) - if err != nil { - return nil, err + response := &GetBuildEventsResponse{ + Body: bodyBytes, + HTTPResponse: rsp, } - return ParseDeleteVolumeResponse(rsp) -} -// GetVolumeWithResponse request returning *GetVolumeResponse -func (c *ClientWithResponses) GetVolumeWithResponse(ctx context.Context, id string, reqEditors ...RequestEditorFn) (*GetVolumeResponse, error) { - rsp, err := c.GetVolume(ctx, id, reqEditors...) - if err != nil { - return nil, err + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 404: + var dest Error + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON404 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 500: + var dest Error + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON500 = &dest + } - return ParseGetVolumeResponse(rsp) + + return response, nil } // ParseListDevicesResponse parses an HTTP response from a ListDevicesWithResponse call @@ -4983,6 +5740,21 @@ func ParseGetVolumeResponse(rsp *http.Response) (*GetVolumeResponse, error) { // ServerInterface represents all server handlers. type ServerInterface interface { + // List builds + // (GET /builds) + ListBuilds(w http.ResponseWriter, r *http.Request) + // Create a new build + // (POST /builds) + CreateBuild(w http.ResponseWriter, r *http.Request) + // Cancel build + // (DELETE /builds/{id}) + CancelBuild(w http.ResponseWriter, r *http.Request, id string) + // Get build details + // (GET /builds/{id}) + GetBuild(w http.ResponseWriter, r *http.Request, id string) + // Stream build events (SSE) + // (GET /builds/{id}/events) + GetBuildEvents(w http.ResponseWriter, r *http.Request, id string, params GetBuildEventsParams) // List registered devices // (GET /devices) ListDevices(w http.ResponseWriter, r *http.Request) @@ -5082,6 +5854,36 @@ type ServerInterface interface { type Unimplemented struct{} +// List builds +// (GET /builds) +func (_ Unimplemented) ListBuilds(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotImplemented) +} + +// Create a new build +// (POST /builds) +func (_ Unimplemented) CreateBuild(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotImplemented) +} + +// Cancel build +// (DELETE /builds/{id}) +func (_ Unimplemented) CancelBuild(w http.ResponseWriter, r *http.Request, id string) { + w.WriteHeader(http.StatusNotImplemented) +} + +// Get build details +// (GET /builds/{id}) +func (_ Unimplemented) GetBuild(w http.ResponseWriter, r *http.Request, id string) { + w.WriteHeader(http.StatusNotImplemented) +} + +// Stream build events (SSE) +// (GET /builds/{id}/events) +func (_ Unimplemented) GetBuildEvents(w http.ResponseWriter, r *http.Request, id string, params GetBuildEventsParams) { + w.WriteHeader(http.StatusNotImplemented) +} + // List registered devices // (GET /devices) func (_ Unimplemented) ListDevices(w http.ResponseWriter, r *http.Request) { @@ -5277,6 +6079,150 @@ type ServerInterfaceWrapper struct { type MiddlewareFunc func(http.Handler) http.Handler +// ListBuilds operation middleware +func (siw *ServerInterfaceWrapper) ListBuilds(w http.ResponseWriter, r *http.Request) { + + ctx := r.Context() + + ctx = context.WithValue(ctx, BearerAuthScopes, []string{}) + + r = r.WithContext(ctx) + + handler := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + siw.Handler.ListBuilds(w, r) + })) + + for _, middleware := range siw.HandlerMiddlewares { + handler = middleware(handler) + } + + handler.ServeHTTP(w, r) +} + +// CreateBuild operation middleware +func (siw *ServerInterfaceWrapper) CreateBuild(w http.ResponseWriter, r *http.Request) { + + ctx := r.Context() + + ctx = context.WithValue(ctx, BearerAuthScopes, []string{}) + + r = r.WithContext(ctx) + + handler := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + siw.Handler.CreateBuild(w, r) + })) + + for _, middleware := range siw.HandlerMiddlewares { + handler = middleware(handler) + } + + handler.ServeHTTP(w, r) +} + +// CancelBuild operation middleware +func (siw *ServerInterfaceWrapper) CancelBuild(w http.ResponseWriter, r *http.Request) { + + var err error + + // ------------- Path parameter "id" ------------- + var id string + + err = runtime.BindStyledParameterWithOptions("simple", "id", chi.URLParam(r, "id"), &id, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "id", Err: err}) + return + } + + ctx := r.Context() + + ctx = context.WithValue(ctx, BearerAuthScopes, []string{}) + + r = r.WithContext(ctx) + + handler := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + siw.Handler.CancelBuild(w, r, id) + })) + + for _, middleware := range siw.HandlerMiddlewares { + handler = middleware(handler) + } + + handler.ServeHTTP(w, r) +} + +// GetBuild operation middleware +func (siw *ServerInterfaceWrapper) GetBuild(w http.ResponseWriter, r *http.Request) { + + var err error + + // ------------- Path parameter "id" ------------- + var id string + + err = runtime.BindStyledParameterWithOptions("simple", "id", chi.URLParam(r, "id"), &id, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "id", Err: err}) + return + } + + ctx := r.Context() + + ctx = context.WithValue(ctx, BearerAuthScopes, []string{}) + + r = r.WithContext(ctx) + + handler := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + siw.Handler.GetBuild(w, r, id) + })) + + for _, middleware := range siw.HandlerMiddlewares { + handler = middleware(handler) + } + + handler.ServeHTTP(w, r) +} + +// GetBuildEvents operation middleware +func (siw *ServerInterfaceWrapper) GetBuildEvents(w http.ResponseWriter, r *http.Request) { + + var err error + + // ------------- Path parameter "id" ------------- + var id string + + err = runtime.BindStyledParameterWithOptions("simple", "id", chi.URLParam(r, "id"), &id, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "id", Err: err}) + return + } + + ctx := r.Context() + + ctx = context.WithValue(ctx, BearerAuthScopes, []string{}) + + r = r.WithContext(ctx) + + // Parameter object where we will unmarshal all parameters from the context + var params GetBuildEventsParams + + // ------------- Optional query parameter "follow" ------------- + + err = runtime.BindQueryParameter("form", true, false, "follow", r.URL.Query(), ¶ms.Follow) + if err != nil { + siw.ErrorHandlerFunc(w, r, &InvalidParamFormatError{ParamName: "follow", Err: err}) + return + } + + handler := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + siw.Handler.GetBuildEvents(w, r, id, params) + })) + + for _, middleware := range siw.HandlerMiddlewares { + handler = middleware(handler) + } + + handler.ServeHTTP(w, r) +} + // ListDevices operation middleware func (siw *ServerInterfaceWrapper) ListDevices(w http.ResponseWriter, r *http.Request) { @@ -6273,6 +7219,21 @@ func HandlerWithOptions(si ServerInterface, options ChiServerOptions) http.Handl ErrorHandlerFunc: options.ErrorHandlerFunc, } + r.Group(func(r chi.Router) { + r.Get(options.BaseURL+"/builds", wrapper.ListBuilds) + }) + r.Group(func(r chi.Router) { + r.Post(options.BaseURL+"/builds", wrapper.CreateBuild) + }) + r.Group(func(r chi.Router) { + r.Delete(options.BaseURL+"/builds/{id}", wrapper.CancelBuild) + }) + r.Group(func(r chi.Router) { + r.Get(options.BaseURL+"/builds/{id}", wrapper.GetBuild) + }) + r.Group(func(r chi.Router) { + r.Get(options.BaseURL+"/builds/{id}/events", wrapper.GetBuildEvents) + }) r.Group(func(r chi.Router) { r.Get(options.BaseURL+"/devices", wrapper.ListDevices) }) @@ -6370,6 +7331,208 @@ func HandlerWithOptions(si ServerInterface, options ChiServerOptions) http.Handl return r } +type ListBuildsRequestObject struct { +} + +type ListBuildsResponseObject interface { + VisitListBuildsResponse(w http.ResponseWriter) error +} + +type ListBuilds200JSONResponse []Build + +func (response ListBuilds200JSONResponse) VisitListBuildsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + + return json.NewEncoder(w).Encode(response) +} + +type ListBuilds401JSONResponse Error + +func (response ListBuilds401JSONResponse) VisitListBuildsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(401) + + return json.NewEncoder(w).Encode(response) +} + +type ListBuilds500JSONResponse Error + +func (response ListBuilds500JSONResponse) VisitListBuildsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(500) + + return json.NewEncoder(w).Encode(response) +} + +type CreateBuildRequestObject struct { + Body *multipart.Reader +} + +type CreateBuildResponseObject interface { + VisitCreateBuildResponse(w http.ResponseWriter) error +} + +type CreateBuild202JSONResponse Build + +func (response CreateBuild202JSONResponse) VisitCreateBuildResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(202) + + return json.NewEncoder(w).Encode(response) +} + +type CreateBuild400JSONResponse Error + +func (response CreateBuild400JSONResponse) VisitCreateBuildResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(400) + + return json.NewEncoder(w).Encode(response) +} + +type CreateBuild401JSONResponse Error + +func (response CreateBuild401JSONResponse) VisitCreateBuildResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(401) + + return json.NewEncoder(w).Encode(response) +} + +type CreateBuild500JSONResponse Error + +func (response CreateBuild500JSONResponse) VisitCreateBuildResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(500) + + return json.NewEncoder(w).Encode(response) +} + +type CancelBuildRequestObject struct { + Id string `json:"id"` +} + +type CancelBuildResponseObject interface { + VisitCancelBuildResponse(w http.ResponseWriter) error +} + +type CancelBuild204Response struct { +} + +func (response CancelBuild204Response) VisitCancelBuildResponse(w http.ResponseWriter) error { + w.WriteHeader(204) + return nil +} + +type CancelBuild404JSONResponse Error + +func (response CancelBuild404JSONResponse) VisitCancelBuildResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(404) + + return json.NewEncoder(w).Encode(response) +} + +type CancelBuild409JSONResponse Error + +func (response CancelBuild409JSONResponse) VisitCancelBuildResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(409) + + return json.NewEncoder(w).Encode(response) +} + +type CancelBuild500JSONResponse Error + +func (response CancelBuild500JSONResponse) VisitCancelBuildResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(500) + + return json.NewEncoder(w).Encode(response) +} + +type GetBuildRequestObject struct { + Id string `json:"id"` +} + +type GetBuildResponseObject interface { + VisitGetBuildResponse(w http.ResponseWriter) error +} + +type GetBuild200JSONResponse Build + +func (response GetBuild200JSONResponse) VisitGetBuildResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + + return json.NewEncoder(w).Encode(response) +} + +type GetBuild404JSONResponse Error + +func (response GetBuild404JSONResponse) VisitGetBuildResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(404) + + return json.NewEncoder(w).Encode(response) +} + +type GetBuild500JSONResponse Error + +func (response GetBuild500JSONResponse) VisitGetBuildResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(500) + + return json.NewEncoder(w).Encode(response) +} + +type GetBuildEventsRequestObject struct { + Id string `json:"id"` + Params GetBuildEventsParams +} + +type GetBuildEventsResponseObject interface { + VisitGetBuildEventsResponse(w http.ResponseWriter) error +} + +type GetBuildEvents200TexteventStreamResponse struct { + Body io.Reader + ContentLength int64 +} + +func (response GetBuildEvents200TexteventStreamResponse) VisitGetBuildEventsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "text/event-stream") + if response.ContentLength != 0 { + w.Header().Set("Content-Length", fmt.Sprint(response.ContentLength)) + } + w.WriteHeader(200) + + if closer, ok := response.Body.(io.ReadCloser); ok { + defer closer.Close() + } + _, err := io.Copy(w, response.Body) + return err +} + +type GetBuildEvents404JSONResponse Error + +func (response GetBuildEvents404JSONResponse) VisitGetBuildEventsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(404) + + return json.NewEncoder(w).Encode(response) +} + +type GetBuildEvents500JSONResponse Error + +func (response GetBuildEvents500JSONResponse) VisitGetBuildEventsResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(500) + + return json.NewEncoder(w).Encode(response) +} + type ListDevicesRequestObject struct { } @@ -7613,6 +8776,21 @@ func (response GetVolume500JSONResponse) VisitGetVolumeResponse(w http.ResponseW // StrictServerInterface represents all server handlers. type StrictServerInterface interface { + // List builds + // (GET /builds) + ListBuilds(ctx context.Context, request ListBuildsRequestObject) (ListBuildsResponseObject, error) + // Create a new build + // (POST /builds) + CreateBuild(ctx context.Context, request CreateBuildRequestObject) (CreateBuildResponseObject, error) + // Cancel build + // (DELETE /builds/{id}) + CancelBuild(ctx context.Context, request CancelBuildRequestObject) (CancelBuildResponseObject, error) + // Get build details + // (GET /builds/{id}) + GetBuild(ctx context.Context, request GetBuildRequestObject) (GetBuildResponseObject, error) + // Stream build events (SSE) + // (GET /builds/{id}/events) + GetBuildEvents(ctx context.Context, request GetBuildEventsRequestObject) (GetBuildEventsResponseObject, error) // List registered devices // (GET /devices) ListDevices(ctx context.Context, request ListDevicesRequestObject) (ListDevicesResponseObject, error) @@ -7737,6 +8915,140 @@ type strictHandler struct { options StrictHTTPServerOptions } +// ListBuilds operation middleware +func (sh *strictHandler) ListBuilds(w http.ResponseWriter, r *http.Request) { + var request ListBuildsRequestObject + + handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, request interface{}) (interface{}, error) { + return sh.ssi.ListBuilds(ctx, request.(ListBuildsRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "ListBuilds") + } + + response, err := handler(r.Context(), w, r, request) + + if err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } else if validResponse, ok := response.(ListBuildsResponseObject); ok { + if err := validResponse.VisitListBuildsResponse(w); err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } + } else if response != nil { + sh.options.ResponseErrorHandlerFunc(w, r, fmt.Errorf("unexpected response type: %T", response)) + } +} + +// CreateBuild operation middleware +func (sh *strictHandler) CreateBuild(w http.ResponseWriter, r *http.Request) { + var request CreateBuildRequestObject + + if reader, err := r.MultipartReader(); err != nil { + sh.options.RequestErrorHandlerFunc(w, r, fmt.Errorf("can't decode multipart body: %w", err)) + return + } else { + request.Body = reader + } + + handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, request interface{}) (interface{}, error) { + return sh.ssi.CreateBuild(ctx, request.(CreateBuildRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "CreateBuild") + } + + response, err := handler(r.Context(), w, r, request) + + if err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } else if validResponse, ok := response.(CreateBuildResponseObject); ok { + if err := validResponse.VisitCreateBuildResponse(w); err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } + } else if response != nil { + sh.options.ResponseErrorHandlerFunc(w, r, fmt.Errorf("unexpected response type: %T", response)) + } +} + +// CancelBuild operation middleware +func (sh *strictHandler) CancelBuild(w http.ResponseWriter, r *http.Request, id string) { + var request CancelBuildRequestObject + + request.Id = id + + handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, request interface{}) (interface{}, error) { + return sh.ssi.CancelBuild(ctx, request.(CancelBuildRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "CancelBuild") + } + + response, err := handler(r.Context(), w, r, request) + + if err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } else if validResponse, ok := response.(CancelBuildResponseObject); ok { + if err := validResponse.VisitCancelBuildResponse(w); err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } + } else if response != nil { + sh.options.ResponseErrorHandlerFunc(w, r, fmt.Errorf("unexpected response type: %T", response)) + } +} + +// GetBuild operation middleware +func (sh *strictHandler) GetBuild(w http.ResponseWriter, r *http.Request, id string) { + var request GetBuildRequestObject + + request.Id = id + + handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, request interface{}) (interface{}, error) { + return sh.ssi.GetBuild(ctx, request.(GetBuildRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "GetBuild") + } + + response, err := handler(r.Context(), w, r, request) + + if err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } else if validResponse, ok := response.(GetBuildResponseObject); ok { + if err := validResponse.VisitGetBuildResponse(w); err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } + } else if response != nil { + sh.options.ResponseErrorHandlerFunc(w, r, fmt.Errorf("unexpected response type: %T", response)) + } +} + +// GetBuildEvents operation middleware +func (sh *strictHandler) GetBuildEvents(w http.ResponseWriter, r *http.Request, id string, params GetBuildEventsParams) { + var request GetBuildEventsRequestObject + + request.Id = id + request.Params = params + + handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, request interface{}) (interface{}, error) { + return sh.ssi.GetBuildEvents(ctx, request.(GetBuildEventsRequestObject)) + } + for _, middleware := range sh.middlewares { + handler = middleware(handler, "GetBuildEvents") + } + + response, err := handler(r.Context(), w, r, request) + + if err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } else if validResponse, ok := response.(GetBuildEventsResponseObject); ok { + if err := validResponse.VisitGetBuildEventsResponse(w); err != nil { + sh.options.ResponseErrorHandlerFunc(w, r, err) + } + } else if response != nil { + sh.options.ResponseErrorHandlerFunc(w, r, fmt.Errorf("unexpected response type: %T", response)) + } +} + // ListDevices operation middleware func (sh *strictHandler) ListDevices(w http.ResponseWriter, r *http.Request) { var request ListDevicesRequestObject @@ -8577,126 +9889,143 @@ func (sh *strictHandler) GetVolume(w http.ResponseWriter, r *http.Request, id st // Base64 encoded, gzipped, json marshaled Swagger object var swaggerSpec = []string{ - "H4sIAAAAAAAC/+x9+3LbuNX4q2D4a6dyK8my7GQddTq/cewk606ceOLE+7XrfApEQhI2JMAAoGxtxv/2", - "AfqIfZJvcADwJlCmk1iJm3Q6s4pJAOeGc8PB4ccg5EnKGWFKBqOPgQznJMHw80ApHM7PeZwl5BX5kBGp", - "9J9TwVMiFCXwUsIzpsYpVnP9r4jIUNBUUc6CUXCK1RxdzokgaAGzIDnnWRyhCUEwjkRBNyBXOEljEoyC", - "7YSp7QgrHHQDtUz1n6QSlM2C624gCI44i5dmmSnOYhWMpjiWpFtb9kRPjbBEekgPxuTzTTiPCWbBNcz4", - "IaOCRMHo1zIab/OX+eQ3Eiq9+MEC0xhPYnJEFjQkq2QIMyEIU+NI0AURq6Q4NM/jJZrwjEXIvIc6LItj", - "RKeIcUa2KsRgCxpRTQn9il46GCmREQ9lIoBpTCMPBw6PkXmMjo9QZ06uqosMf5rsB81TMpyQ1Ul/zhLM", - "epq4Giw3P7xbnvv5nm9mypMkG88Ez9LVmY9fnpy8QfAQsSyZEFGecX+Yz0eZIjMi9IRpSMc4igSR0o+/", - "e1iGbTAYDEZ4OBoM+gMflAvCIi4aSWoe+0m6M4jImilbkdTOv0LSF+fHR8cH6JCLlAsMY1dWqgl2mTxl", - "vMpiU+WKT/4PBcHKCn+jKvCj9hJ+4BjNYj7BcbxEGaMfsorc9NGx3gIKpYIvaESiLsLwAFGJcKZ4b0YY", - "EViRCE0FT5CaE1TiLeqQ/qzfRRca3Z5mbg8Pe4NBb3ARVLkT7/VmaRZ0gxQrRYQG8H9/xb3fD3r/HPQe", - "vS1+jvu9t3/5g4+RbQUO8SnAafHsOK50kQO2LIV1QNdL6BomN7PvOMGzW3Pv8BhRPQ4JMiWCMI2JgT/i", - "4Xsi+pRvx3QisFhusxllV6MYKyJVFZv1796IH8C2BjE206jfErXangNx68T8kogQS4JiogVEdlFEZ1TJ", - "LsJabWM5JxJpm/JXFGKmZVYqLBTiAhEWoUuq5gjDe1UKJMseTmmPGlCDbpDgq+eEzbTdfLi7Io9aGDv2", - "R+/tn92ftv6/VyRFFhOPML7imaJshuAxmnKB1JxKVMBAFUlg3B8EmQaj4P9tF87AtvUEth11s5jotRLK", - "js2wnRwSLARe+rnmgFvHPakwW6NXzAby4HfkLJtEVltKpDjC4LcAvs9O32zrLZliKdVc8Gw2L3PlV6cP", - "3pZosULdKpLdIKLy/Zjy8ST1wUTle3S8/RJpbYVimlBVaKedweDk8ba8CPQ/Hrh/bPXRkXFoAHyNPBdW", - "aco5FgRNsCQR4gwdnr5BOI55CMpfOw4hZ1M6ywSJ+jUzBLP7pIWwhYYbRxE1q5xWyO1xBsoIPmELKjhL", - "CFNogQXVm6diXD8GL14ePRk/eXEejDQnoyy0lur05avXwSjYHQwGJboW8jDnKo2z2VjS30nFzQt2nz0O", - "6oAc5PCjhCRcLIHjdg7UmVe395SLBCsU0/cEXej5DBN2ntUV7xCWWiHCfJkSsaCSe5y7n/Nnmn+ZJOW9", - "ZoS7ymJJhPb+HO+AmcA+liVaKsOYZ1GvtGQ3+EASENMCUM9Lq96WVt2ttPoN6hrHKWWkUV93vxUde8nF", - "+5jjqLfzhVUsI0rPvYriC/OgykwrACTnv/Y4Krtsgll0SSM1H0f8kmmQPbrEPkH5y7lCudKY4Pg///r3", - "+UnhUOw8m6RWu+wMH3ymdqnpEz21jzAFIlnqR+NN6kfi/OQ///q3w+TrIkGYls+oonRMtFVF5Zc5UXMi", - "SlbGMVj/yXh7MBw5eSktXwnfyoHoiiLkCyJivPQowp2BRxP+IqiC/WXHIW2hkB58gxrUszljtKoIB35N", - "6AHKA9Njvb+tXm4DSQ7IzvDE/hy21c2LMM1kBaRhHZwXEE1ql3xBhcpwrOWkYra8waVJW3jMvMmKlN0N", - "y/9cHrBCofZvtDZQFDyhVu6WmRlyGKvOh9/DMlq+2cO6IYXji3LzqC3MpOIJohFhik4pEahTC8hoNXSr", - "cmzB416EFQZ93NJoGHBXo99kaaYyTGkSzfFssjrlmZZAytCMzvBkqaoOy85glfV+Qrv5faRuygwZ8SDR", - "WHFPwsNJy/GRpqN7t03WB/JIY8XHiyn1zJxrqiICpRKFtTSUFVo9RS8NqU1LddHlnGrdJpEjAhi085Oy", - "I92/YD2kgRuho3yBfNp8Sm3S9aY37lmHixIQlIG3NFluIYzOT/rodQ7tnyRiWNEFcamyOZZoQghDGdhE", - "EsH6kAAsA5BJHfFQVR9ufXCTVduCeIHbZ32kHbgEM3RJ4xjyDQlWNIRkxYTW8LmcE2YZpVfSCoAVbt4F", - "K0uWTU/WVX43AM1AojFWnqiNzKhUotAcUuEkRZ1XTw93d3cf1ZX08EFvsNPbefB6ZzAa6P//M+gGRrlq", - "Tw4r0rPqZxOJQ99cB1V9YdM/ZY1y+Ob4aGgtQnUd9fsefrR/dYXVo4f0Uj76PZmI2W+7eCOpRb96Oiry", - "VqiTSSJ6TvVpqfJlq0pJoYZs1CcnmW6V1TR/WG9+DHav9Zt3kQet6VVIPsIr3U/IVNaVYGVfNevo15YM", - "VXz0X7V/UEh+KSCzOcOQlqYt6Kpj/seC4PfalffYV22e5djYHX/CINPO62SJyJX2a0mEBOdqKk2QVnVT", - "dvZ+2tvffbi3PxiU9jll6mEp314SYh7ScaitSisAdGQY46UOTvUY1AHvOkKTmE+qwvtg9+H+T4NHO8O2", - "cBjftB0dci/KjUIdS5G/uKMk96QC1HD408Pd3d3Bw4fDvVZQWQevFVDOGay4Dj/t/rS3sz/ca0UFn6//", - "RAiTU6gdKPHII6QHaRpTE9n0ZEpCOqUhInoGpAegTgJmieRudnVPTnA0FtYN9NoDhWnsIUMp1WIWs2+i", - "jrbpSRYrmsbEPAOGtPJ0AfMjmMmXZqOMETEmjjy3mCkhUnrzHrV0hMMlfwVclIhMstlMk6RMuhMqwbMo", - "HCJK4mhkduiNeg64WQD2tkkOLA4tpeE5vySiF5MFictCYMyRBjbhgqBcTgzTKlhRtsAxjcaUpZlXJBpJ", - "+TQT4F+aSRGe8EyBL2kYVl4Ezh0gRphqde0l1go5fiY4NmfKVUpIhZUJ9pxu5u+reTH+/kZ22El8bDh2", - "GbMaAxKPCTw8OTIGPuRMYcqIQAlR2J5gl7LMcNgRdIOelqkIk4QzxKfTv67POzeEAPkGWedEHpZDz7tz", - "IOnMRpR1F1byeEEilGBGp0QqZN8sryznePjg4QhPwp3hbkSmew8e9vt9f3ZGiWXKKfMs9SR/1o4V2ya3", - "2Svm7Mv55/HhDvLpbXD5GJwevP45GAXbmRTbMQ9xvC0nlI1K/87/WTyAH+afE8q8efhc59YgBRVjNYIO", - "V8020lHXFNO4VsyRZnFs/z7SmDAS5gLJQdncGOL6/e8XWjRj+juJkPd0UuGZdsSNxH3eMWQ3+JCRjIxT", - "LqlZfcULtk90KDvJaBwhGFEu7FDmT9XEyLAR/ZJzAumGdZ6JtGkNeMeumTFFYxNx+/01j5/SApRc7dbS", - "fICzfVr4yylhkbGgWgzMr5Czhd4V8A+AT+sZIzgVBe6erTDjkov3lM3GEfVI5y/mIYqoIKGC46Cb91Cw", - "jdP0ZlH0Z4NynZajf0P4YU9SPdblq2vyT4nbq6u/nP39w//I059+2/nw/Pz8H4tnfz96Qf9xHp++/Kzj", - "ovWH6F/1JHxtahaC1coJeFvxOMEq9Dg+cy5VA9XsE6Q4SvTgPjrEDE3I6IL10HOqiMDxCF0EOKV9S8x+", - "yJOLAHXIFQ6VGYU4Q3oqNCc4ImJLDz41R2Z68EcXk13X54iWDCc0RMISOT+Kkdkk4gmmbOuCXTA7F3KI", - "SMj96V8RCnGqMkE0R1CYiXiJJgKHJC/sKRbvoo84Ta+3LpiaY4XIlRIagxQLlVfcuBWA0RYqk1u0r5MI", - "LXCcEYlCINQFy+1HpEHQkygsZkT180gU/P1afq+BKN6EDheqckSxP+h6+Ij0e5qRMZWKMJQfJVIJwos6", - "7oBpf1DZ/vuD/ZvT2LkMrRE/kO7VMk8nlC32hxFgWNoo4/FcqfTmuk3QN2aPoJ9fvz7VZND/PUNuooIW", - "OYs7nMVLhHVcTKRJzqoYfBJ7prcV+BKwhrstEXptXtbDYnkzHk9gYfT6+RlSRCSUGf3dCTU5pzp8JyZN", - "SKXMtChSjA4OT55s9VvUqQJtc/jX8PF1jmEtG+VOpFczYDCiyL1o+nbR8VFXu1N2hxaOFqTfn3KBYqNg", - "in09Qm8kqR6GAatMptBwMl4WVTFGq18EW27GtK4pRuhV7t/hHJS8ErAQBjdlsS9h2gv2ixYMczawMnu3", - "Ciucetj4xao2OAnACtncCZjiZlWwfvt7KA57nrN6wcDt9na50kAv5heNgvd37oHs3jaWvG1VVfVAuVRA", - "kBdWfd2KqNX6JizHkuFUzrlqPrHDyL2DyBWVSq5WE7U6Y1qtpqoaG1MnteaI/kvWRYmMMTguq6PxxSue", - "vuYB1LdXbbW2Pupzi5ysu3VHNU6N29tXH1Td6ebPX7Za6U7AqdQd+ZRB2Sq56oBPLjXqBtRzMnogJZ0x", - "EqHj06KevkhfuOlrOD0a9nce7vd3BoP+zqBNMifB4Zq1Tw4O2y8+GJrwdoQnozAakelnJJOsYBv3AceX", - "eCnRhXPwLgLjUZZcydK2tU5gq3T2akXXpxVw1U3aTSVatynJaqXvoRKzwVE4gyrN23sJDxq9hBu5KhVW", - "5GY33myiM3jZjRrfJs1JUMizOGJ/Umiid55x7Elk4w9JlJEU8y6V6A17z/glq6Jusl16/37IiFii85OT", - "Sm5UkGkm25X/SMXTtJEPPL0VG4Y3OGs3QlOqwNtE1V1dE5Ys0BevsSsnctxhn5G6Fgmdstw1F3/BdJCs", - "MfV60UhLBrKzo0mmUF4ErUXuUPtBqORdmVIniJ9eGUdLzwA2I9RP4mXugK0dfIq1+LmxKfxr/Yizeaa0", - "cYcxcp4ppP8FIGsUrAO7fgojySP0gsMYC2lXq/+aJ2xexyyaLFdfr3vNHZPb0cGc4oJEsJjdliP0NN+K", - "+Wa2m7cjif1pNIQ9FIYD7y0T9Fmn1XIr6AaW6kE3MCQMuoGjjP5pMIRfAHzQDSwg3nqSU6zmx2zKV8O0", - "26gse/jiguJUIymlVjMRYZREW330sqK7LN3gOCeWBEUZseVthg4C24pCbELVFKs5CCYMpGxWLbZeWbCN", - "IjEwrC9nhHXti218Huk/MHgtMqCVCUkkwsXRQav4isrxlMakzcSCzLIYCwTvtwNZLpOYsvdtZpfLZMJj", - "GiI9oG6QpjyO+eVYP5J/A1y2WmGnB4yLLFnNwBjgbI7UMKS2boHC3zSWW7VTl1Bbg20zfhuuXbdxIb1V", - "EE9pTFAC1S9vGL0qCXq1NmhvOGg6ZGuYtHK8Vi37aVVeVLMkVmR9luIVkTwTITnILy54UjRptgrnQhtU", - "d9+hegK758MWsizrjhTzqUrnis4rdbVXVbqWaqBalVw5Y+2tKsxtYsMxk6kW8Abfblq/o39cTkXWo+JF", - "4q990a5yE7VOjCO9Sq9K5u7B/qNHu3sPHg1bkcZGO3m43JAMawqZHQTbkoS1O0JVjg0fDOB/twLKBMx+", - "kBqC5ipAlfs+nwzQ9Zrtc5afUtdq8PP9saYxQ8FJYaersHJvvxW1sOsU4Ql23SPwKErXODtkOiXgqI0N", - "3XoFMLVDnlYwhDjFIVVLzzkovoS8N8pfKc3+sF3JZA1YD0nt3AhPlfb+F0TIbFIU2XXc4ujPCDJJNVnY", - "b11PKrPJGGbwJN3qq8J79qAoqoUgRdDDs0lcSk/bSnFtJkAifHnUy5yY6BLLSmyof4eKRN3SNd16EsG8", - "sa5ce7W2SoOCbNl0KT3qKyWv2SA7qMz+Gju7QdmaFOJcp/g6M9a8BbVVhlOoNmGaxyp66kStXWwzkdUP", - "1g5+2qjxpFzpvbaUvlIWnhuU2y9bSsveZmC9BhXEw8JgKVDM3a1wyMdcEzQ3XXBKXAOhWokqlUrH//bO", - "Dyq9jDokSdXSFUm5mH7rdkH8QT6hVza+8LHW4NGXKKx5s7aS5r/kylw5b+IWuTFjssLTxuNrv/d4VD+T", - "MGGSvTJQzaHXCqGl6jU7l+u6XJl2UxAD2dKRWVavdb1FZ6umqLfYOeaEumhtdVMw13BQbO7TlDArQdLM", - "G5M0+8w2YFS6/l+fSDIbkdxci2FSNDom7NXvlIAXdikohDiWQIawmgR51LoaGq/P5Z/gq3wFCCCxRLWr", - "zwaPUluQZ4/hivsrd7eATt0UAEb9Evvjz+uP5qRqlRnrGqa5tKx341n9s0ajNe2tmnAWa3TX92TTqouE", - "maBqeaYNgj1xJFgQcZAZMQRLAUjAn4vFoR7p+hqixqnHeXxGGBE0RAenxyAlCWZ4pll2foJiOiXhMoyJ", - "LSdZSW1Ce4qXh8c9UwfnTlzh/I8qIIi7b3pwegxX3YQ06w76wz60RuEpYTilwSjY7e/AZT5NBkBxu9QO", - "xyZn9EYEU3YcWZN7ZN/RxJUpZ9K8PxwMzG0SpqxyxcWFou3fpEk7GAPb2kuzF59X8+grhRLOGRBww5Vo", - "SXfIXHeDvcHOrYC78UKQD4Q3DGdqzgX9nUR60Qe3pMgnLXrMTMzr+r4Q+2IhwsHo16rw/vr2+m03kFmS", - "YO0xGtL56ZZy6ZGCcse2wOwxItVjHi2/GL6+pnDX1Q2ttdf1ihB+OT472Vulub2lW5DMiNgGuP0YR3mh", - "WMdesMoL4CpXgb+W0O8N9u5+0dIN8vzqF+Km7M4A8ejugTjkbBrTUKGeg8U2CkM4Nk0JqgJyX9TBKws1", - "wg6vKdRNFl3O9HTOVGxXklKNRqPW5HQz1qPeWfUWZiTHqnRn+4cluUl0jqgMtXNZlpZeiNNSL1dZ7NOy", - "FH2k0bXxlWJizrOrMnQEf89NTooFTogiQgJMDa37UNHnlOoH7iwIwlwTRFbNSbdEw7ov+XZFYvca+zdk", - "rG4bNqAUj2oK8SsqwlpBWamJyX2S5jc5F13ThuuuX8M9I+rbEs3B5rwgd7H/a4r5fZGoZ0S5LZKTTWvB", - "eX4hvUm87JX1O2S0XcGD+JmOPs2uNoCaQqYCLTMUhXMSvjcI2d4i6zyCY9d+5O79AHPv/hbW34L/w9y3", - "CBwLWq0LFo9tddvdxYqVDtStQsXhF4PACpiHyFCaP3EXmU2JHZZLFm59jZjxvzsqrPcCuUc76TSLY+hm", - "Zy+yF90Hyvp0+6P2D1r4yW63rfVF3rx63iMs5BGJ7BWSZofEXTb+st6yYZhB5YeYtImvgFROMJqd0c/g", - "vzk5KBrw/3H41N4c+uPwqbk79Mfdg6IP/90Iy2BTqnnT3us9Fj7tvNIq0UA1mSvBN3l7+Vsbcfhs74Xb", - "uHw5gD+8vjZeX5lcax2/vA3GHbp+1W90bPicIBc2H7Xhkbtt8Z25fJtNPVmJNEekUJlRycXbCwjwFQd7", - "o990CL5PW89WHNBc4sr6t2UOtdiQa70DJ7rHR13brMG0WEgFmdKrzWVUHRwb9xLtuptPpx4kEzrLeCbL", - "t8ihNweRRT/MigK+b/5rYZ4bPdhvWEoHmzQdG3dQf8j9HbnOdYYa5W2ORW5ynt1bm3Gei6Oa9t6zg/CH", - "99zKey6Ra733nF8+vkv3ufqRtI37z07efAS3dZXfowd9z7xSzGyOu3TYW9FxrR3UouXLettffN9k4wf9", - "+eKb90vdXcP7mEOC7hHwITbnCRa2ptkV/NbkYbBZ3bd5F/A+i9izcptBv7MFimg75rOy21VvcyIITop2", - "Yki/jbBEZwBY74wwhZ4sNFb9C+Z6Er4zl6TeoVxQzRcZYxIq+92jmMN3fSTMD3053uE0fZf33twaoWdQ", - "3lmirlm8I4mgOEYhZ5LHpr/Fu0WSvBut1omfn5zAIHhnbirC343ybxHle0zqty7YBXtFVCaYBCxiLBV6", - "gWLKiEQdzXDB49h8p+KdpmcJvy1ocKhnNM3t4uUF0yMoy4i0WFI2Q4xc2gnpFL0zjQPg/sE70+uwcdc/", - "11z6Sju/29xzxuCiOBJAONMvksAHCGBd6MFTLGw/jlAsld+i2Bl4rzt9XE10AU29JDV3YSmjSssHz5T5", - "4IIPEEN5PyiNV3xWvw0xQ+6iaEWUcZq2FV8LJkjxIknWyDDqFF35kFQRz9RfpIqIMG2ErXQ3CTfq4ND8", - "Q+H3pultpU+g6briI5W9leslVWBae7tmLeZfiyQJTNPCBPuar7SwJIpcqW2i1UrPkLWqU+sTrsZjmjMw", - "EHXOzp5s/bAZLd0SIFlV2VsCeiyH7foDN9W8wdsr88J377m49khfWQw3fxRRgoJC4zYWTZb2w4B5l9Z7", - "dScAGFlgBvbO4uXdI+5Z4x6x7aq++z1SyMd3vktCLqAPu3SdFO9P8VYp4iht9w40uSuax3Vd1Ht+crLV", - "tGlMK+/GLSN+hMO2jvK7tynQ9+/+7RbTyBXnCKxLFuoNoRpjdBezUmbaZehQw3wrDq+2TID+eHIpFUlM", - "wD7NYrjYBlXr8F3fqRtnagW68KVfLf5dSFmVer9dsAmZanuYEqHX1sP1/KXYwxfWnimcb99Tswe/jbgW", - "uihAKIdVE9VWPm3kGij4Yqe858Mng/QUAtVq/0GJOvBJdwBzIVGsf2ytjXRNc8Lbxbt3qeHy9pu+W61G", - "ZnNh/h403HFNrblWsvdOrT0j5c3i9A8w2qfWeLrOzPP0h5W3fW1/+MT30ieGg54cm85M4BAsrrSdi/3+", - "r20Buv3R/Di+6bhQ4XB+7tpPfRum1HaruWkZh+C92JQWp4iYK72b35M8byh0T69taMI5FCB1Uj749FsB", - "06jse5PuL1/jUqbjrSpcNrq33HX5b2ZvbdryWRhcuXaZHvdlmxtJc5goXgttRbmB6dqA1jW0hG66blje", - "CbZbbu9rPg+bB6hFI7q8k2j/guWtUxFlYZxFBB2evunaz7V04YMwZgbbsLOP/B1uJcKCuDa3F0xxFOI4", - "zGKsCMpbvZr2zLLhWPdVqf3xne23YhEPo/MetzLvgXqfYgy/TAD3yk1WQeJK3zpprC21nz3ZSGWpNWa3", - "qCt1GPwowWtRVVoilnMpfA0rJcJQO2Be76OzLE25UBKpSw7fOpBwlv/3s5cv0IRHyxHKxzFk2upaFWf7", - "ocqUhHRKSQQ9KfXYE6jWxgI+qpqUJnAjU0F6KU9BdUTmyo6lsXGPMFJY9Ge/IyzCOV0QjzIxc+b+0d2V", - "x9Zdh26QOPS2NXrQBbc6ae3jCgUsVX5UcTQZRPtNcfM9w7ypqZui1Bl4QhkWy7ZtgV/a+hwUZlLxxM17", - "fIQ6uPoFQ9syORV8QaN6S/FvpH/wCb6iSZbkH9F49hg+SSBMqQd8nAYKjZxMkauQkEhC5cfWLXsNr7YZ", - "trzw9A3daN2006aNPuVXrJkumhNqFmsf0wm54hzFWMzI1ndzM9HuteJi4vFR7VriPaz2XjjpK/yMlvXd", - "7ULalpHmXdR25+mOzVZ2n387UVipf9s9vF64yN3MppLyb0sEB5szCZsuJT+/x1k7HW0tamQzE+gZfQLz", - "nIc4RhFZkJin0E3fvBt0g0zEtjf4aHtbh2mxDuTg6+vB9dvr/wsAAP//kZMzgCqrAAA=", + "H4sIAAAAAAAC/+x97XITubboq6j6nl3HOdt2nA+Y4FNTt0ICTPYhkCKQffeecI3cLduadEs9ktqJofg7", + "DzCPOE9yS0tSf1ltd4AYcmHXrhqT1ufS0vpeSx+CkCcpZ4QpGQw/BDKckQTDz0OlcDi74HGWkFfk94xI", + "pf+cCp4SoSiBRgnPmBqlWM30vyIiQ0FTRTkLhsEZVjN0PSOCoDmMguSMZ3GExgRBPxIF3YDc4CSNSTAM", + "thOmtiOscNAN1CLVf5JKUDYNPnYDQXDEWbww00xwFqtgOMGxJN3atKd6aIQl0l160Ccfb8x5TDALPsKI", + "v2dUkCgY/lrextu8MR//RkKlJz+cYxrjcUyOyZyGZBkMYSYEYWoUCTonYhkUR+Z7vEBjnrEImXaow7I4", + "RnSCGGdkqwIMNqcR1ZDQTfTUwVCJjHggE8GaRjTynMDRCTKf0ckx6szITXWS3Z/GB0HzkAwnZHnQX7IE", + "s54Grl6WGx/alsd+vu8bmfIkyUZTwbN0eeSTl6enbxB8RCxLxkSURzzYzcejTJEpEXrANKQjHEWCSOnf", + "v/tYXttgMBgM8e5wMOgPfKucExZx0QhS89kP0p1BRFYM2QqkdvwlkL64ODk+OURHXKRcYOi7NFMNscvg", + "Ke+rjDbVU/Hh/+OMxpEH67lemCLRCKvlTUEnZNtQzpCiCZEKJ2nQDSZcJLpTEGFFevpLG1QPBcFrptMt", + "Wk22jPSZgekokU2juyaIMpTQOKaShJxFsjwHZerhfvNmSqhLhOAeWvFE/xklREo8JaijCZimogxJhVUm", + "EZVogmlMoq02IPPhsNnMb3yMaESYohNavWnBWDfo4XG4s7vnvcUJnpJRRKeWJ1SHP4a/Iz5BehyFoLV/", + "IxrlF+32AVMKMlme7ykQUZhEkAkRhIWfPV0q+JwwzAyx/w+YN/hf2wWz3LacchuAeVY0/9gNfs9IRkYp", + "l9SscImG2C8ajQDUCHr41wyfVp11CaOkwmL1/YAWX+AmmvW1gs25aVqnTEB47DCVm91IgJ7MCVM+KsSU", + "/VDd8XM+RTFlBNkWFr4TLpCe4OeYT7eCL7O3blCAdPlC63V/AkEyf2gYTX/rBoRliQZmzKdlaM4IFmpM", + "KsBsYBB2oGJ1jeA/q1yJ6hmMsSSj1VThjDJGIqRb2stqWqJMghy4tH24GVdUjeZESO89gmX9D1XItmgc", + "Kubh1YTGZDTDcmZWjKMI7iCOzyo78chCFeESp5qwuQGBR0ukODr/5XD3wUNkJ/DAUPJMhGYFyzsp9dbD", + "m7ZIYTHGcezFjWZ0uz3fXcYQPwac5xejiZ/kGOgQ01CvwJ6mHr4bpJmcmV9Aj/WqgJ9pMqDRK9a/33o2", + "fQREwsjgjRqJX8J6mZrDRtOYa5guUMbo71lFfO2jEy2JK6SJP41I1EUYPmgyjDPFe1PCiNB0Ck0ET5Ca", + "EVQSMVGH9Kf9LrrUUldPy5g9vNsbDHqDy6AqJMb7vWmaaVBgpYjQC/y/v+Le+8Pevwe9R2+Ln6N+7+3f", + "/8OHAG3lXo1Oep12nx1397vILbYsDNcXulpQXiFr+qiIOb4Tffdve3pHJ8sM3qw/4uEVEX3Kt2M6Flgs", + "ttmUspthjBWRqrqb1W3X7g/WtmJjbKq3fsut1UR/QLdOzK+JCDWljIlGENnVxJIq2UVYa49AZJDmZv+N", + "Qsw0zhrGzgUiLELXVM0QhnZVCCSLHk5pj5qlBt0gwTfPCZtq9f3h3hI+amTs2B+9t//l/rT1v70oKbKY", + "eJDxFc8UZVMEnw33nVGJijVQRZK17NZBN4tBxEooOzHddvKVYCHwwn9qbnGrTk8qTXwaj89cIM/+jp2C", + "LZFV2oAhYDCfwH6fnb3Z1lcyxVKqmeDZdFY+lV8dPXhbgkWDNOA22Q0iKq9GlI/GqW9NVF6hk+2XSFMr", + "FNOEqoI67QwGp4+35WWg//HA/WOrj46NXQWWrzfPhSWacoYFAdYdIc7Q0dkbhOOYh1YZmmgJa0KnmSBR", + "v6YNw+g+bCFs/hl8+AmbU8FZomWhORZUX56Kjv8hePHy+MnoyYuLYKhPMspCqzCfvXz1OhgGe4PBIPCx", + "uhlXaZxNR5K+JxVrU7D37HFQX8hhvn6UkIQLI1/aMVBnVr3ehv2imF4RdKnHM4ew86xOeHdhqiUgzBYp", + "EXMqfXrjL/k3fX6ZJOW7ZpC7esSSiDkR+dnBYfZLvDuMeRb1SlN2g99JAmhaLNTTyK+7taLqa8g1jlPK", + "SCO97n4rNPaai6uY46i384VJLCNKj728xRfmQ/UwLQKQ/PyD7pLczqJrGqnZKOLXTC/ZQ0vsF5Q3zgnK", + "jd4Jjv/648+L00Kg2Hk2Ti112dl98JnUpUZP9NBeZSHfSJb6t/Em9W/i4vSvP/50O/m6myBM42dUITpG", + "/65u5Z8zomZElLiMO2D9JyPtQXfk8KU0fUWhL9vDlwghnxMR44WHEO4MPJTwn4IquF+2H9IcCunOa8ig", + "Hs0xo2VCOPBTQs+iPGt6rO+3pcttVpIvZGf31P7cbUub52HqlCO7pN36cl6AUVuL5HMqVIZjjScVtuW1", + "cRvviYfNG+dMWdyw55/jA1ZVk2hbccuMDK6UZeHDL2EZKt8sYa3xJPkMlbnWFmZS8aRkrkSdmkJGq6pb", + "9cTmPO5FWGGgxy2ZhlnushE+WZihzKE0oeZoOvZo+RoDKUNTOsXjhaoKLDuD5aP3A9qN7wN1k4PKoAeJ", + "Rop7/C4OW06ONRxd2zZ2QHBnjRQfzSfUM3JOqQoNlEoU1rxhFmn1EL00pNY71kXXM6ppm0QOCMDQLk7L", + "gnT/kvWQXtwQHecT5MPmQ2qWDtYGGKLDRWkRFAxHaLzYQhhdnPbR63y1/ykRw4rOifPYzbBEY0IYyoAn", + "kgjmBz9keQGZ1BoPVfXuVgY3zr0t0Be4/dZHWoBLMEPXNI7B3pBgRUMwVoxpbT9gJDYHpWfSBIAVYt4l", + "K2OW9ZLWSf5qd8orMqVSiZozBXVePT3a29t7VCfSuw96g53ezoPXO4PhQP//3+39Ll/ef+kb67BKL6z5", + "p0xRjt6cHO9ajlCdR73fx48Obm6wevSQXstH75OxmP62hzfi4fSTp+PCboU6mSSi50ifxiqftapkFGqw", + "Rn2ykelWzlVn1l7FfszuXuuWd+GO9bkirCH89g7TOhFc68wobW5pP/qvWj4oML+kkFmbYUi91lGt8z8W", + "BF9pUd7DXzV7liPDd/wGg0wLr+MFIjdariUREpyriTRKWlVM2dn/af9g7+H+wWDg8X0uIzEP6SjUXKXV", + "ArRmGOOFVk51H9QB6TpC45iPq8j7YO/hwU+DRzu7bddhZNN2cMilKNcLdSxE/u4iWtyXyqJ2d396uLe3", + "N3j4cHe/1aqsgNdqUU4YrIgOP+39tL9zsLvfCgo+Wf+J80XXfWuRB0kP0zSmRrPpyZSEdEJDBN5spDug", + "TgJsieRidvVOjnE0ElYM9PIDhWnsAUPJ1GImsy1RR/P0JIsVTWNivsGBtJJ0YefHMJLPzEYZI2KUu+pv", + "MZL14K81R7i95E1ARInIOJtOjZukAN0plSBZFAIRJXE0NDd0LZ2D0ywW9rYJD+weWmLDc35NRC8mcxKX", + "kcCwI73YhAuCcjwxh1bZFWVzHNNoRFmaeVGiEZRPMwHypRkU4THPFMiS5sDKk4DfAXSEiSbX7dxevxAc", + "m9C2KiQKF7GjzfyqahfjV2uPww7iO4YTZzGrHUDiYYFHp8eGwYecKUwZESghCttAupKVGZwdQTfoaZyK", + "MEk4Q3wy+e/VducGFSC/IKuEyKOlaJw7ESAbPM6viOTxnEQowYxOiFTW41yZWc7w7oOHQxPrEpHJ/oOH", + "/X7fb51RYpFy6gs1eJJ/a3cU28a22SvG7MvZ553DHdjT2+zlQ3B2+PqXYBhsZ1JsxzzE8bYcUzYs/Tv/", + "Z/EBfph/jinz2uFbhUfRyVJYVOV40yyO7d+HeieMhDlCciA2a1Vcv/z9QqNmTN+TCHm9kwpPtSBuMO7z", + "3JCfEVBUxJeqUiBR2drUIqiIvl8ttUlr1oA2ds6MKRoX8VbL8tonRczJlQEIS8EHKWF5yEEcm18hZ3N9", + "K3zxBxUC7r4tHcY1F1eUTUcR9WDnP81HFFFBQgXuoPV3KNjGaboeFf3WoJymtY2lsp5UD3f56pT8U/T2", + "6uwvp//4/f/Is59+2/n9+cXFv+bP/nH8gv7rIj57+VnuotVO9K/qCV9pmgVlteIBb4sep1iFHsFnxqVq", + "gJr9ghRHie7cR0eYoTEZXrIeek4VETgeossAp7RvgdkPeXIZoA65waEyvRBnSA+FZgRHRGzpzmfGZaY7", + "f3A62cf6GNGC4YSGSFgg564YmY0jnmDKti7ZJbNjIbcRCbY//StCIU5VJog+ERRmIl6gscAhyQN7ism7", + "6ANO049bl0zNsELkRgm9gxQLlUfcuBngoO2qjG3RNicRmuM4IxKFAKhLlvOPSC9BD6KwmBLVzzVRkPdr", + "9r0GoHgNOlyoioviYND1nCPS7fRBxlQqwlDuSqQSkBd1nIPpYFC5/geDg/Vm7ByHVqAfYPdytolDyhb3", + "wyAwTG2I8WimVLo+fQTojbkj6JfXr880GPR/z5EbqIBFfsQmshRrvZhIY5xVMcgk1qe3FfgMsOZ0W27o", + "tWmsu8Vy/T6ewMTo9fNzpIhIKDP0uxNqcE60+k6MmZBKmWlUpBgdHp0+2eq3SJcB2ObrX3GOr/Md1qxR", + "ziO9bAGDHoXtRcO3i06Ou1qcsje0ELTA/P6UCxQbAlPc6yF6I0nVGQZHZSyF5iTjRREVY6j6ZbDlRkzr", + "lGKIXuXyHc6XkkcCFsjghizuJQx7yf6pEcP4BpZG71bXCl4Pq79Y0gaeAKyQtZ0AK24mBauvvwficOc5", + "qwcM3O5ulyMN9GR+1CjO/s4lkL3b6pK3jaqqOpRLAQR5YNXXjYhajm/CciQZTuWMq2aPHUauDSI3VCq5", + "HE3Uyse0HE1VZTYmTmqFi/5LxkWJjDFwl9W38cUjnr6mA+rbi7ZaGR/1uUFOVty6oxinxuvtiw+q3nTz", + "5y8brXQny6nEHfmIQZkrueiATw416gbU4xk9lJJOGYnQyVkRT1+YL9zwtT092u3vPDzo7wwG/Z1BG2NO", + "gsMVc58eHrWffLBr1NshHg/DaEgmn2FMsohtxAccX+OFRJdOwLsMjERZEiVL19YKga3M2csRXZ8WwFVn", + "aetCtG4TktWK3q9KdDuvpri1lhIe/PuzsuHIejHeXKJzaOx6jW5j5iQo5Fkcsf9UaKxvnhHsSWT1D0lU", + "kT0Il/UNu2L8mlW3bqxd+v7+nhGxQBenpxXbqCATm0jVYuM8TRvPgae3OobdNcLa2tWUIvA2EXVXp4Ql", + "DvTFY+zKhhzn7DNY18KgU8a75uAvGA6MNSZeLxpqzEB2dDTOFMqDoDXKHWk5CJWkKxPqBPrTKyNo6RGA", + "Z4T6S7zIBbCVnc+wRj/XN4V/re5xPsuUZu7QR84yhfS/YMl6C1aAXT2EweQhesGhj11pV5P/miRsmmMW", + "jRfLzetSc8fYdrQyp7ggEUxmr+UQPc2vYn6Z7eXtSGJ/GgphncLg8N4ySp8VWu1pBd3AQj3oBgaEQTdw", + "kNE/zQ7hFyw+6AZ2Id54kjOsZidswpfVtNuQLOt8cUpxqjcpIVkzIoySaKuPXlZol4UbuHNiSVCUERve", + "ZuAgsI0oxEZVTbGaAWJCR8qm1WDrpQnbEBKzhtXhjDCvbdhG5pF+h8FrkQGsjEoiES5cB630KypHExqT", + "NgMLMs1iLBC0b7dkuUhiyq7ajC4XyZjHNES6Q50hTXgc8+uR/iR/hr1stdqd7jAqrGQ1BmMWZ22k5kBq", + "8xZb+FnvcqvmdQk1N9g2/beh+ksbEdIbBfGUxgQlEP3yhtGbEqJXY4P2dwdNTraGQSvutWrYT6vwohon", + "sSjr4xSviEk8PswTFzwmmjRbXudcM1SX71D1wO77dgtWllUuxXyokl/RSaUu9qoK11IMVKuQK8esvVGF", + "OU9scDOtqIzhhvUL+idlU2RdK54n/tgXLSo3QevUCNLL8KpY7h4cPHq0t//g0W4r0FhtJ1eXG4xhTSqz", + "W8G2JGEtR6h6YrsPBvC/Wy3KKMz+JTUozdUFVfJ9PnlBH1dcnyJNvhaDn9+PFfWhipMUdrjKUe4ftIIW", + "dgWrPMqu+wQSRSmNs0MmEwKC2sjArVcspubkabWGEKc4pGrh8YPia7B7o7xJafSH7UIma4v1gNSOjfBE", + "ael/ToTMxkWQXcdNjv4LgSWphgsHreNJZTYewQgeo1t9VmhnHUVRTQUplB6ejeOSedpGiudFJHx21Osc", + "mOgay4puqH+HikTdUppu3YhgWrSvQuJwPS9EUphHfaHk/qIj5eOvHWc3KHOTAp3rEF/FxpqvoObK4IVq", + "o6Z5uKInTtTyxTYDFUVjNB/8tF6jcTnSe2UofSUsPGcot5+2ZJa9Tcd6DCqgh12DhUAxdrdyQr7DNUpz", + "U4JT4uoY1kJUqalHZXN+UKkx6pAkVQsXJOV0+q3bKfGH+YBe3PjCbq3Boy8RWPNmZSTN/ycpc2W7iZtk", + "rcVk6Uwb3dd+6fG47pMwapJNGaja0GuB0FKtKLu2qtimqXoJOpANHZlm9VjXWxTYbNJ6i5vj6qm5Cpvr", + "lLkGR7HJpyntrLSS5rMxRrPPrEZKpStD+okgsxrJ+lgMY6LROmGvnlMCUti1oKDiWAAZwGoQ5Frrsmq8", + "2pZ/im/yGUCBxBLVUp/NPkplQZ49hhT3Vy63gE7cELCMehL7488r0+qwavkwVtVtdWZZ78Wz9GcFRWu6", + "WzXkLObori4Nq0kXCTNB1eJcMwTrcSRYEHGYGTQETgGbgD8Xk0M80sePoDVOPMLjM8KIoCE6PDsBLEkw", + "w1N9ZBenKKYTEi7CmNhwkiXTJpSneHl00jNxcM7jCv4/qgAgLt/08OwEUt1slbVg0N/tQ2kUnhKGUxoM", + "g73+DiTzaTDAFrchzBh+WtuMvofAyU4iy3EfmyYatDLlTBrg7A4Gtap9uEgn2v5NGqODYa+tZTRToHTZ", + "iL4UJeEkAbv8j91gf7Bzq/WszQDyTfuG4UzNuKDvCSzzwS2B8EmTnjCj5LpCL8Q2LHA2GP5axdZf3358", + "2w1kliRYi4gGXAWsUi6bRBgiEUaMXNv489/4uI/OjYoA6UBF6WejwZNIkySMFBb96XuERTijc3LJLCU2", + "2VxYQLBdgjQFNqFOVTQzU5vTN1eYSPWYR4sadPPhtvVwII1UAXzrcoZ5aYK0oa6hjzqaDEgZcm/qJ2GY", + "qSKhzqQ+XpEFSgWZ0BtvuBIEbfgNwMf5N1cAs0rbtbhLWRhnUcEAq4UHvWkwkoSC+ITsf5y/fIHg4kEJ", + "Q2hWxJpAkQrKNNlEUQacBzClf8me4HCGDEWF/PrLgEaXQVGqbguoXyaJIWq9HpDkn6GGp5mmS6Of+309", + "lKH2Q/TrBzPKEF0GLE1Gil8Rdhl87KLShylVs2ycf3t7ybwbbtC5zyuwQh2DyVsu1l/vsHSpzS3ALELc", + "Yk68QBgVh1SW5ceUYbFoqvvIMzVyhYcbUiFssyJO9+FgsLXeNmy36uFzlYZaGvi4RNZ3vxhFs9R8maKV", + "ajxr+sFsnktk6PgGSOpjHLnwyx+8Yw3vsEJviStAfys5bH+g0UeDvjExfukaaYdSoI60p1jghCgiJMzr", + "Q4uTY62z6387Tw4oqUYFrCJvtwSeuiT4dgmx9xtrrObVSgEX9jeAfzBvkcQK8z7a1Lw4NiVU8rrv9wod", + "4bAcInb9Yuszor4FjBtsipS6XPuviL/3BX+eESsJF0CrUbNtMnfmR7+/WgmCE2lHMY21EHwOa+qdE6YQ", + "VPeWfftfJ59BVM67mE/fDZEBYWxrm0sjExXGQ80ULSyhk8kKzPvZZNlwhtmUSNQx/POvP/509Zn/+uNP", + "W5/5rz/+hOu+bV8bgOHyyuLvhuh/CEl7OKZz4jYj9RbInIgF2hvYGnnwyZN6Ky/ZJXtFVCaYzGM39L4A", + "JmZAyIlgsB/KMiKRBBBCbaWJDSowtgmPbuDusgHlRm90d0lFsjsobUBzRYcD4KGijCqKY8QzZcogwDog", + "RK9YiNlzUJ68bmZZMrytpy+K3CiDvT2zwFsSGFOZ33PvTLF6MybqnJ8/2eojEPcNVkDgCOgNxTBWE+j/", + "oEnraZKhKFWCAlA2tKlUsrjRSHNs22zCSmOL093CTCOgChnRGqvbzA+xu4XJxg83Z77x2VCOXbGpZiPK", + "p+/XV7i/lU755c7Z4d4yzG0ltQJkX0ObRB1bBCdPUqyUa/taSL8RAlyq8pdTYcRNauTGNJwjziYxDRXq", + "ubXYYu651lNFkPtCDl7ZVSPs9jWB3NaiEn2ZVWxXAocamUbtPbzNcI/6I3y3YCP5rkp19X5wknWoc0xl", + "yHXfErb0QpyWnv2TxT0tY9E6284x/D1nOSsF8/x5BVQ8ibchK4+dOmN13rABonhcI4hfkRDWkv5KhWbv", + "Eza/yU/RFdZcYQT6tlBzsDkpaNMGIR+a3yeLUFQDm6aCs7xoYBN62bKCd3jQdgbPxs+JcLfaLNQkmxXb", + "Ml1ROCPhldmQrf+6SiI4cSVi714OMLURb8H97fJ/sPsWimMBq1XK4onNQLw7XbHyStiG3Y8WwTxABvf+", + "uHhIk0Sog+WChVvflQdyI5yhXq/1Ht2ksyyOnSF+ToQqKkSW6en2By0ftJCT3W1bKYu8efW8R1jIIZjD", + "gK5RIHEF4b6stGwOzGzlB5q00a8AVA4xmoXRzzh/E91ZPJL4t92ntrrL33afmvouf9s7LN5KvBtkGWyK", + "NG9aer3HyKeFV1oFGpAmU7ZtnbSXt9qIwGfrY95G5MsX+EPqayP1lcG1UvDLS5XeoehXfUd1w36CHNl8", + "0IZPLv7sOxP5Nmt6shhpoxtmVFZt8bZIBLy0aasumlec7mGAHM0xrkx/W9pQiwu5UjpwqHty3LUFNU0Z", + "zDyyeEMWVbeOjUuJdt7Nm1MPkzGdZjyT5Up/UD+VyOLNkgoBvm/ya8GeGyXYbxhLB5tkHRsXUH/g/R2J", + "zvUDNcTbuEXWCc+u1WaE58JV0156div8IT23kp5L4FotPecF4u5SfK4+ZL9x+dnhmw/gNvf1e5Sg71va", + "BrM27pKzt0LjWguoRVne1by/eIN2447+fPLNy6WuHtT9DD/lJuA8cpJgwWuaRcFvDR8Gm6V9mxcB7zOK", + "PSs/BeEXtkzuRcyn6zMv8pFcmoEn9eKSuXcj3pl0yHcoR1SkOJIkJqGyb1PHHN5eNgHuJksDp+m7PO9y", + "a4ieQXhnORMUJu9IIiiOUciZ5LGpQfpuniTvhsu5/Benp9DJZGCYrP13w/y96PyOSd2qnFahdxFjqdAL", + "myzS0QcueBybt0TfaXiW9rdlEy6KFNVL5ku+YOTaDkgn6F0pD+NdQyKGQ8Ln+pS+0s3vNtcFNntRHAkA", + "nHnTg8Ajkb4kDPuApScFY2fgLUnTMh3ELOOOs0G6y+93TvO87woq4zRti752mYDF8yRZgcOoU7ycgKSK", + "eKb+LlVEhHnqyWJ3E3KjDg7NPxS+Mg8TVd5yMJVxfaCyqc1eUAXm+TVXUNf8a54kgXlYIsG+Armfn1ZT", + "H3BZH9MnU8qd+cEzbpMVUyX2pbSYGuewlZmhCoRXeXtlGnz3kosrYf2V0XDzrojSKigU12fReAFnW9QG", + "v185AXCQxc6A39l9ee+I+9Z4R2xJ8e/+jhT48Z3fkpALeCtPutcu7k/wVknjKF33DjxEUBT47zqt9+L0", + "dKvp0pjn1hqvjPihDts4yu+ep8DbDPfvtpjHdnC+gVXGQn0hVKOO7nRWykwZJK1qmPf88XJZS3jDQC6k", + "IolR2CdZDIltELVu6wPg8hsNXUSVhGrFXTBZlerzX7IxmWh+mBKh59bdoWxWoXv41NpzhfPre2bu4Leh", + "10KlS1DlsGqC2tLz067IpU93yutyfvKSnoKiWn0jQqJOTK/MIydoLlGsf2yt1HTNAxJfuvrBp9+s/IkU", + "X1arwdkcmb8HCndSI2vuuZ97R9aekfJlcfQHDtpH1ni6is3z9AeXt28P/ZCJ76VMDI6efDedqcAhcFxp", + "X5fyy7/2mZbtD+bHyTp3ocLh7MKVCP82WKmtKLxuGrfBe3Ep7Z4iYlJ6N38neV70+Z6mbWjAuS2A6aTs", + "+PRzAVNM/nvD7i8f41KG460iXDZ6t1y6/DdztzbN+ewaXLh2GR735ZobTHM7geLFZdVWlB+ZWanQukdH", + "4MUj1y1/radbfoLJVOfLFdTisYD8tZf+Jcuft3HVAdHR2ZuufVK3C4/2mhHsoyp95H+FSCIsiHuK6JIp", + "jkIch1mMFUH5czzmCS3Z4NZ9VXqi6s7uWzGJ56Dzd4hk/k7NfdIx/DgBp1d+CAcwrvQebWNsqX2adiOR", + "pZaZ3SKu1O3gRwhei6jSErDalN03zfvoPEtTLpRE6prDe5QSfPlQZHHMo8UQ5f0YMk8fWRJn36yx9edJ", + "BO+G6L6nlVr8pQFcz1SQXspTIB2RSdmxMDbi0XKV/4ZC/rl8dHfhsXXRoXvbtwFKa6meR3WPKC+8b2vB", + "a9haeLkhWlV89z10kj8+EGZS8cSNe3KMOjhTvDclTAO3qPOfCj6nUf3Zt2/kjadTfEOTLMkfOn32GJ6N", + "FCbUAx4QhkAjh1PkJiQkkhD5sXXL96CWn4KyZ/FpNe+/HBFz1LRRpvyKMdNFcUJ9xFrGdEiuOEcxFlOy", + "9d1kJtq7ViQmnhzX0hLvYbT33GFfIWe0jO9up9K21DTvIrY7N3dsNrL74tvRwkr12+5heuE8FzObQsq/", + "LRQcbI4lbDqU/OIeW+20tjWvgc0MoEf0IcxzHuIYRWROYp7Ci4embdANMhHb99uG29taTYu1Ijc8GBwM", + "go9vP/6/AAAA///Bm92RVcUAAA==", } // GetSwagger returns the content of the embedded swagger specification file diff --git a/lib/paths/paths.go b/lib/paths/paths.go index 30097731..06aaf3e9 100644 --- a/lib/paths/paths.go +++ b/lib/paths/paths.go @@ -284,3 +284,40 @@ func (p *Paths) IngressesDir() string { func (p *Paths) IngressMetadata(id string) string { return filepath.Join(p.IngressesDir(), id+".json") } + +// Build path methods + +// BuildsDir returns the root builds directory. +func (p *Paths) BuildsDir() string { + return filepath.Join(p.dataDir, "builds") +} + +// BuildDir returns the directory for a specific build. +func (p *Paths) BuildDir(id string) string { + return filepath.Join(p.BuildsDir(), id) +} + +// BuildMetadata returns the path to build metadata.json. +func (p *Paths) BuildMetadata(id string) string { + return filepath.Join(p.BuildDir(id), "metadata.json") +} + +// BuildLogs returns the path to build logs directory. +func (p *Paths) BuildLogs(id string) string { + return filepath.Join(p.BuildDir(id), "logs") +} + +// BuildLog returns the path to the main build log file. +func (p *Paths) BuildLog(id string) string { + return filepath.Join(p.BuildLogs(id), "build.log") +} + +// BuildSourceDir returns the path to the source directory for a build. +func (p *Paths) BuildSourceDir(id string) string { + return filepath.Join(p.BuildDir(id), "source") +} + +// BuildConfig returns the path to the build config file (passed to builder VM). +func (p *Paths) BuildConfig(id string) string { + return filepath.Join(p.BuildDir(id), "config.json") +} diff --git a/lib/providers/providers.go b/lib/providers/providers.go index 49a35185..c2a46f7a 100644 --- a/lib/providers/providers.go +++ b/lib/providers/providers.go @@ -8,6 +8,7 @@ import ( "github.com/c2h5oh/datasize" "github.com/onkernel/hypeman/cmd/api/config" + "github.com/onkernel/hypeman/lib/builds" "github.com/onkernel/hypeman/lib/devices" "github.com/onkernel/hypeman/lib/hypervisor" "github.com/onkernel/hypeman/lib/images" @@ -212,3 +213,40 @@ func ProvideIngressManager(p *paths.Paths, cfg *config.Config, instanceManager i resolver := instances.NewIngressResolver(instanceManager) return ingress.NewManager(p, ingressConfig, resolver, otelLogger), nil } + +// ProvideBuildManager provides the build manager +func ProvideBuildManager(p *paths.Paths, cfg *config.Config, instanceManager instances.Manager, volumeManager volumes.Manager, log *slog.Logger) (builds.Manager, error) { + buildConfig := builds.Config{ + MaxConcurrentBuilds: cfg.MaxConcurrentSourceBuilds, + BuilderImage: cfg.BuilderImage, + RegistryURL: cfg.RegistryURL, + DefaultTimeout: cfg.BuildTimeout, + RegistrySecret: cfg.JwtSecret, // Use same secret for registry tokens + } + + // Apply defaults if not set + if buildConfig.MaxConcurrentBuilds == 0 { + buildConfig.MaxConcurrentBuilds = 2 + } + if buildConfig.BuilderImage == "" { + buildConfig.BuilderImage = "hypeman/builder:latest" + } + if buildConfig.RegistryURL == "" { + buildConfig.RegistryURL = "localhost:8080" + } + if buildConfig.DefaultTimeout == 0 { + buildConfig.DefaultTimeout = 600 + } + + // Configure secret provider (use NoOpSecretProvider as fallback to avoid nil panics) + var secretProvider builds.SecretProvider + if cfg.BuildSecretsDir != "" { + secretProvider = builds.NewFileSecretProvider(cfg.BuildSecretsDir) + log.Info("build secrets enabled", "dir", cfg.BuildSecretsDir) + } else { + secretProvider = &builds.NoOpSecretProvider{} + } + + meter := otel.GetMeterProvider().Meter("hypeman") + return builds.NewManager(p, buildConfig, instanceManager, volumeManager, secretProvider, log, meter) +} diff --git a/lib/registry/registry.go b/lib/registry/registry.go index 9f43309b..d3ef7002 100644 --- a/lib/registry/registry.go +++ b/lib/registry/registry.go @@ -67,9 +67,17 @@ func (r *Registry) Handler() http.Handler { if req.Method == http.MethodPut { matches := manifestPutPattern.FindStringSubmatch(req.URL.Path) if matches != nil { - repo := matches[1] + pathRepo := matches[1] reference := matches[2] + // Include the host to form the full repository path + // This preserves the registry host (e.g., "10.102.0.1:8083/builds/xxx") + // instead of normalizing to docker.io + fullRepo := pathRepo + if req.Host != "" { + fullRepo = req.Host + "/" + pathRepo + } + body, err := io.ReadAll(req.Body) req.Body.Close() if err != nil { @@ -94,7 +102,7 @@ func (r *Registry) Handler() http.Handler { r.handler.ServeHTTP(wrapper, req) if wrapper.statusCode == http.StatusCreated { - go r.triggerConversion(repo, reference, digest) + go r.triggerConversion(fullRepo, reference, digest) } return } diff --git a/lib/system/init/mount.go b/lib/system/init/mount.go index 3dcee32a..fa1fc89d 100644 --- a/lib/system/init/mount.go +++ b/lib/system/init/mount.go @@ -14,6 +14,7 @@ import ( // This function mounts: // - /dev/pts (pseudo-terminals) // - /dev/shm (shared memory) +// - /sys/fs/cgroup (cgroup2 for container runtimes like runc) func mountEssentials(log *Logger) error { // Create mount points for pts and shm (proc/sys/dev already exist from wrapper) for _, dir := range []string{"/dev/pts", "/dev/shm"} { @@ -32,6 +33,19 @@ func mountEssentials(log *Logger) error { return fmt.Errorf("chmod /dev/shm: %w", err) } + // Mount cgroup2 for container runtimes (runc/BuildKit require cgroups) + // This is safe because VMs are already isolated by the hypervisor, and + // cgroup v2 has better security than v1 (no release_agent escape vector) + if err := os.MkdirAll("/sys/fs/cgroup", 0755); err != nil { + return fmt.Errorf("mkdir /sys/fs/cgroup: %w", err) + } + if err := syscall.Mount("cgroup2", "/sys/fs/cgroup", "cgroup2", 0, ""); err != nil { + // Non-fatal: some kernels may not have cgroup2 support + log.Info("mount", "cgroup2 mount failed (non-fatal): "+err.Error()) + } else { + log.Info("mount", "mounted cgroup2") + } + log.Info("mount", "mounted devpts/shm") // Set up serial console now that /dev is mounted @@ -99,7 +113,7 @@ func bindMountsToNewRoot(log *Logger) error { newroot := "/overlay/newroot" // Create mount points in new root - for _, dir := range []string{"proc", "sys", "dev", "dev/pts"} { + for _, dir := range []string{"proc", "sys", "sys/fs/cgroup", "dev", "dev/pts"} { if err := os.MkdirAll(newroot+"/"+dir, 0755); err != nil { return fmt.Errorf("mkdir %s: %w", dir, err) } @@ -109,6 +123,7 @@ func bindMountsToNewRoot(log *Logger) error { mounts := []struct{ src, dst string }{ {"/proc", newroot + "/proc"}, {"/sys", newroot + "/sys"}, + {"/sys/fs/cgroup", newroot + "/sys/fs/cgroup"}, {"/dev", newroot + "/dev"}, {"/dev/pts", newroot + "/dev/pts"}, } diff --git a/openapi.yaml b/openapi.yaml index fac72d3a..bc20590f 100644 --- a/openapi.yaml +++ b/openapi.yaml @@ -683,6 +683,121 @@ components: nullable: true example: "nvidia" + BuildStatus: + type: string + enum: [queued, building, pushing, ready, failed, cancelled] + description: Build job status + + BuildPolicy: + type: object + properties: + timeout_seconds: + type: integer + description: Maximum build duration (default 600) + default: 600 + memory_mb: + type: integer + description: Memory limit for builder VM (default 2048) + default: 2048 + cpus: + type: integer + description: Number of vCPUs for builder VM (default 2) + default: 2 + network_mode: + type: string + enum: [isolated, egress] + description: Network access during build + default: egress + + BuildProvenance: + type: object + properties: + base_image_digest: + type: string + description: Pinned base image digest used + source_hash: + type: string + description: SHA256 hash of source tarball + lockfile_hashes: + type: object + additionalProperties: + type: string + description: Map of lockfile names to SHA256 hashes + buildkit_version: + type: string + description: BuildKit version used + timestamp: + type: string + format: date-time + description: Build completion timestamp + + BuildEvent: + type: object + required: [type, timestamp] + properties: + type: + type: string + enum: [log, status, heartbeat] + description: Event type + timestamp: + type: string + format: date-time + description: Event timestamp + content: + type: string + description: Log line content (only for type=log) + status: + $ref: "#/components/schemas/BuildStatus" + description: New build status (only for type=status) + + Build: + type: object + required: [id, status, created_at] + properties: + id: + type: string + description: Build job identifier + example: "build-abc123" + status: + $ref: "#/components/schemas/BuildStatus" + queue_position: + type: integer + description: Position in build queue (only when status is queued) + nullable: true + image_digest: + type: string + description: Digest of built image (only when status is ready) + nullable: true + image_ref: + type: string + description: Full image reference (only when status is ready) + nullable: true + error: + type: string + description: Error message (only when status is failed) + nullable: true + provenance: + $ref: "#/components/schemas/BuildProvenance" + created_at: + type: string + format: date-time + description: Build creation timestamp + started_at: + type: string + format: date-time + description: Build start timestamp + nullable: true + completed_at: + type: string + format: date-time + description: Build completion timestamp + nullable: true + duration_ms: + type: integer + format: int64 + description: Build duration in milliseconds + nullable: true + ResourceStatus: type: object required: [type, capacity, effective_limit, allocated, available, oversub_ratio] @@ -1936,4 +2051,207 @@ paths: schema: $ref: "#/components/schemas/Error" + /builds: + get: + summary: List builds + operationId: listBuilds + security: + - bearerAuth: [] + responses: + 200: + description: List of builds + content: + application/json: + schema: + type: array + items: + $ref: "#/components/schemas/Build" + 401: + description: Unauthorized + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + 500: + description: Internal server error + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + post: + summary: Create a new build + description: | + Creates a new build job. Source code should be uploaded as a tar.gz archive + in the multipart form data. + operationId: createBuild + security: + - bearerAuth: [] + requestBody: + required: true + content: + multipart/form-data: + schema: + type: object + required: + - source + properties: + source: + type: string + format: binary + description: Source tarball (tar.gz) containing application code and optionally a Dockerfile + dockerfile: + type: string + description: Dockerfile content. Required if not included in the source tarball. + base_image_digest: + type: string + description: Optional pinned base image digest + cache_scope: + type: string + description: Tenant-specific cache key prefix + timeout_seconds: + type: integer + description: Build timeout (default 600) + secrets: + type: string + description: | + JSON array of secret references to inject during build. + Each object has "id" (required) for use with --mount=type=secret,id=... + Example: [{"id": "npm_token"}, {"id": "github_token"}] + responses: + 202: + description: Build created and queued + content: + application/json: + schema: + $ref: "#/components/schemas/Build" + 400: + description: Bad request + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + 401: + description: Unauthorized + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + 500: + description: Internal server error + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + + /builds/{id}: + get: + summary: Get build details + operationId: getBuild + security: + - bearerAuth: [] + parameters: + - name: id + in: path + required: true + schema: + type: string + description: Build ID + responses: + 200: + description: Build details + content: + application/json: + schema: + $ref: "#/components/schemas/Build" + 404: + description: Build not found + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + 500: + description: Internal server error + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + delete: + summary: Cancel build + operationId: cancelBuild + security: + - bearerAuth: [] + parameters: + - name: id + in: path + required: true + schema: + type: string + description: Build ID + responses: + 204: + description: Build cancelled + 404: + description: Build not found + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + 409: + description: Build already completed + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + 500: + description: Internal server error + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + /builds/{id}/events: + get: + summary: Stream build events (SSE) + description: | + Streams build events as Server-Sent Events. Events include: + - `log`: Build log lines with timestamp and content + - `status`: Build status changes (queued→building→pushing→ready/failed) + - `heartbeat`: Keep-alive events sent every 30s to prevent connection timeouts + + Returns existing logs as events, then continues streaming if follow=true. + operationId: getBuildEvents + security: + - bearerAuth: [] + parameters: + - name: id + in: path + required: true + schema: + type: string + description: Build ID + - name: follow + in: query + required: false + schema: + type: boolean + default: false + description: Continue streaming new events after initial output + responses: + 200: + description: Event stream (SSE). Each event is a JSON BuildEvent object. + content: + text/event-stream: + schema: + $ref: "#/components/schemas/BuildEvent" + 404: + description: Build not found + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + 500: + description: Internal server error + content: + application/json: + schema: + $ref: "#/components/schemas/Error" diff --git a/scripts/e2e-build-test.sh b/scripts/e2e-build-test.sh new file mode 100755 index 00000000..f454063b --- /dev/null +++ b/scripts/e2e-build-test.sh @@ -0,0 +1,516 @@ +#!/bin/bash +# E2E Build System Test +# Usage: ./scripts/e2e-build-test.sh [--skip-run] +# +# Prerequisites: +# - API server running (make dev) +# - Generic builder image imported into Hypeman registry +# - .env file configured +# +# Options: +# --skip-run Skip running the built image (only test build) +# +# Environment variables: +# API_URL - API endpoint (default: http://localhost:8083) +# BUILDER_IMAGE - Builder image to check (default: hypeman/builder:latest) + +set -e + +# Configuration +API_URL="${API_URL:-http://localhost:8083}" +TIMEOUT_POLLS=60 +POLL_INTERVAL=5 +SKIP_RUN=false + +# Parse arguments +while [[ $# -gt 0 ]]; do + case $1 in + --skip-run) + SKIP_RUN=true + shift + ;; + *) + echo "Unknown option: $1" + exit 1 + ;; + esac +done + +# Colors +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +NC='\033[0m' # No Color + +log() { echo -e "${GREEN}[INFO]${NC} $1" >&2; } +warn() { echo -e "${YELLOW}[WARN]${NC} $1" >&2; } +error() { echo -e "${RED}[ERROR]${NC} $1" >&2; } + +# Check prerequisites +check_prerequisites() { + log "Checking prerequisites..." + + # Check if API is reachable + if ! curl -s "$API_URL/health" | grep -q "ok"; then + error "API server not reachable at $API_URL" + error "Start the server with: make dev" + exit 1 + fi + log "✓ API server is running" + + # Check if generic builder image exists + BUILDER_IMAGE="${BUILDER_IMAGE:-hypeman/builder:latest}" + if ! docker images "$BUILDER_IMAGE" --format "{{.Repository}}" | grep -q .; then + warn "Builder image not found locally" + warn "Build it with: docker build -t hypeman/builder:latest -f lib/builds/images/generic/Dockerfile ." + else + log "✓ Builder image available locally" + fi +} + +# Generate JWT token +generate_token() { + cd "$(dirname "$0")/.." + + # Try using make gen-jwt + if command -v make &> /dev/null && [ -f Makefile ]; then + TOKEN=$(make gen-jwt 2>/dev/null | tail -1) + if [ -n "$TOKEN" ] && [ "$TOKEN" != "make:" ]; then + echo "$TOKEN" + return + fi + fi + + # Fallback: run directly + if [ -f ./bin/godotenv ]; then + TOKEN=$(./bin/godotenv -f .env go run ./cmd/gen-jwt -user-id e2e-test 2>/dev/null | tail -1) + echo "$TOKEN" + return + fi + + echo "" +} + +# Create test source with Dockerfile +# The generic builder requires a Dockerfile to be provided +create_test_source() { + TEST_DIR=$(mktemp -d) + + # Application code + cat > "$TEST_DIR/package.json" << 'EOF' +{ + "name": "e2e-test-app", + "version": "1.0.0", + "main": "index.js", + "scripts": { + "start": "node index.js" + }, + "dependencies": {} +} +EOF + + cat > "$TEST_DIR/index.js" << 'EOF' +console.log("E2E Build Test - Success!"); +console.log("Built at:", new Date().toISOString()); +EOF + + # Dockerfile is REQUIRED for the generic builder + # Users control their runtime version here + cat > "$TEST_DIR/Dockerfile" << 'EOF' +FROM node:20-alpine +WORKDIR /app +COPY package.json index.js ./ +CMD ["node", "index.js"] +EOF + + # Create tarball + TARBALL=$(mktemp --suffix=.tar.gz) + tar -czvf "$TARBALL" -C "$TEST_DIR" . > /dev/null 2>&1 + + rm -rf "$TEST_DIR" + echo "$TARBALL" +} + +# Submit build +submit_build() { + local token="$1" + local source="$2" + + log "Submitting build..." + + # Extract Dockerfile from source tarball + DOCKERFILE_CONTENT=$(tar -xzf "$source" -O ./Dockerfile 2>/dev/null || echo "") + + if [ -n "$DOCKERFILE_CONTENT" ]; then + # Dockerfile found in source - pass it explicitly for reliability + RESPONSE=$(curl -s -X POST "$API_URL/builds" \ + -H "Authorization: Bearer $token" \ + -F "source=@$source" \ + -F "dockerfile=$DOCKERFILE_CONTENT" \ + -F "cache_scope=e2e-test" \ + -F "timeout_seconds=300") + else + # No Dockerfile in source - will fail if not provided + error "No Dockerfile found in source tarball" + exit 1 + fi + + BUILD_ID=$(echo "$RESPONSE" | jq -r '.id // empty') + + if [ -z "$BUILD_ID" ]; then + error "Failed to submit build" + echo "$RESPONSE" | jq . + exit 1 + fi + + log "Build submitted: $BUILD_ID" + echo "$BUILD_ID" +} + +# Get build events/logs +get_logs() { + local token="$1" + local build_id="$2" + + log "Fetching build events..." + curl -s "$API_URL/builds/$build_id/events" \ + -H "Authorization: Bearer $token" +} + +# Import an image into Hypeman's image store +import_image() { + local token="$1" + local image_ref="$2" + + log "Importing image into Hypeman..." + log " Image: $image_ref" + + # Request image import + RESPONSE=$(curl -s -X POST "$API_URL/images" \ + -H "Authorization: Bearer $token" \ + -H "Content-Type: application/json" \ + -d "{\"name\": \"$image_ref\"}") + + IMAGE_NAME=$(echo "$RESPONSE" | jq -r '.name // empty') + IMAGE_STATUS=$(echo "$RESPONSE" | jq -r '.status // empty') + + if [ -z "$IMAGE_NAME" ]; then + error "Failed to import image" + echo "$RESPONSE" | jq . + return 1 + fi + + log "Image import started: $IMAGE_NAME (status: $IMAGE_STATUS)" + + # Extract the build ID for filtering (last part of the path before the tag) + # e.g., "10.102.0.1:8083/builds/abc123:latest" -> "abc123" + BUILD_ID=$(echo "$IMAGE_NAME" | sed -E 's|.*/([^/:]+)(:[^/]*)?$|\1|') + + # Wait for image to be ready + # The API may normalize image names (e.g., 10.102.0.1:8083/builds/xxx -> docker.io/builds/xxx) + # So we need to check for both the original name and the normalized version + log "Waiting for image conversion..." + for i in $(seq 1 60); do + # Query the list endpoint and filter by build ID (works regardless of registry prefix) + RESPONSE=$(curl -s "$API_URL/images" \ + -H "Authorization: Bearer $token" | \ + jq --arg buildid "$BUILD_ID" '[.[] | select(.name | contains($buildid))] | .[0] // empty') + + if [ -z "$RESPONSE" ] || [ "$RESPONSE" = "null" ]; then + echo -ne "\r Waiting for image... (poll $i/60)..." >&2 + sleep 2 + continue + fi + + STATUS=$(echo "$RESPONSE" | jq -r '.status // empty') + IMAGE_ERROR=$(echo "$RESPONSE" | jq -r '.error // empty') + FOUND_NAME=$(echo "$RESPONSE" | jq -r '.name // empty') + + case "$STATUS" in + "ready") + echo "" >&2 # Clear the progress line + log "✓ Image is ready: $FOUND_NAME" + # Export the actual image name for use in instance creation (to stdout) + echo "$FOUND_NAME" + return 0 + ;; + "failed") + echo "" >&2 # Clear the progress line + error "Image import failed: $IMAGE_ERROR" + if echo "$IMAGE_ERROR" | grep -q "mediatype"; then + error " Hint: The builder may be pushing Docker-format images instead of OCI format." + error " Ensure the builder image has been updated with oci-mediatypes=true" + fi + return 1 + ;; + "pending"|"pulling"|"converting") + echo -ne "\r Status: $STATUS (poll $i/60)..." >&2 + ;; + *) + warn "Unknown status: $STATUS" + ;; + esac + + sleep 2 + done + echo "" + + error "Image import timed out" + return 1 +} + +# Create and run an instance from the built image +run_built_image() { + local token="$1" + local image_ref="$2" + + log "Running built image as VM..." + log " Image: $image_ref" + + # First, import the image into Hypeman's image store + IMPORTED_NAME=$(import_image "$token" "$image_ref") + if [ $? -ne 0 ]; then + error "Failed to import image" + error "" + error " This typically happens when the builder outputs Docker-format images" + error " instead of OCI format. The builder agent needs oci-mediatypes=true" + error " in the BuildKit output configuration." + error "" + error " To fix: rebuild the builder image and deploy it:" + error " make build-builder" + error " docker push /builder:latest" + return 1 + fi + + # Use the imported image name (may differ from the original reference) + if [ -n "$IMPORTED_NAME" ]; then + log "Using imported image: $IMPORTED_NAME" + image_ref="$IMPORTED_NAME" + fi + + # Create instance + log "Creating instance..." + RESPONSE=$(curl -s -X POST "$API_URL/instances" \ + -H "Authorization: Bearer $token" \ + -H "Content-Type: application/json" \ + -d "{ + \"image\": \"$image_ref\", + \"name\": \"e2e-test-instance\", + \"vcpus\": 1, + \"memory\": \"256M\" + }") + + INSTANCE_ID=$(echo "$RESPONSE" | jq -r '.id // empty') + + if [ -z "$INSTANCE_ID" ]; then + error "Failed to create instance" + echo "$RESPONSE" | jq . + return 1 + fi + + log "Instance created: $INSTANCE_ID" + + # Wait for instance to be running + log "Waiting for instance to start..." + for i in $(seq 1 30); do + RESPONSE=$(curl -s "$API_URL/instances/$INSTANCE_ID" \ + -H "Authorization: Bearer $token") + + STATE=$(echo "$RESPONSE" | jq -r '.state') + + # Convert state to lowercase for comparison (API may return "Running" or "running") + STATE_LOWER=$(echo "$STATE" | tr '[:upper:]' '[:lower:]') + + case "$STATE_LOWER" in + "running") + log "✓ Instance is running" + break + ;; + "stopped"|"shutdown"|"failed") + error "Instance failed to start (state: $STATE)" + echo "$RESPONSE" | jq . + cleanup_instance "$token" "$INSTANCE_ID" + return 1 + ;; + *) + echo -ne "\r State: $STATE (poll $i/30)..." + ;; + esac + + sleep 2 + done + echo "" + + if [ "$STATE_LOWER" != "running" ]; then + error "Instance did not start in time (final state: $STATE)" + cleanup_instance "$token" "$INSTANCE_ID" + return 1 + fi + + # Give the container a moment to run + sleep 2 + + # Try to exec into the instance and run a simple command + log "Executing test command in instance..." + EXEC_RESPONSE=$(curl -s -X POST "$API_URL/instances/$INSTANCE_ID/exec" \ + -H "Authorization: Bearer $token" \ + -H "Content-Type: application/json" \ + -d '{ + "command": ["node", "-e", "console.log(\"E2E VM test passed!\")"], + "timeout_seconds": 30 + }') + + EXEC_EXIT_CODE=$(echo "$EXEC_RESPONSE" | jq -r '.exit_code // -1') + EXEC_STDOUT=$(echo "$EXEC_RESPONSE" | jq -r '.stdout // ""') + + if [ "$EXEC_EXIT_CODE" = "0" ]; then + log "✅ Instance exec succeeded!" + log " Output: $EXEC_STDOUT" + else + warn "Instance exec returned exit code: $EXEC_EXIT_CODE" + echo "$EXEC_RESPONSE" | jq . + fi + + # Cleanup + cleanup_instance "$token" "$INSTANCE_ID" + + return 0 +} + +# Cleanup instance +cleanup_instance() { + local token="$1" + local instance_id="$2" + + log "Cleaning up instance: $instance_id" + + # Stop the instance + curl -s -X POST "$API_URL/instances/$instance_id/stop" \ + -H "Authorization: Bearer $token" > /dev/null 2>&1 || true + + # Wait a bit for it to stop + sleep 2 + + # Delete the instance + curl -s -X DELETE "$API_URL/instances/$instance_id" \ + -H "Authorization: Bearer $token" > /dev/null 2>&1 || true + + log "✓ Instance cleaned up" +} + +# Main +main() { + log "=== E2E Build System Test ===" + echo "" + + # Check prerequisites + check_prerequisites + echo "" + + # Generate token + log "Generating JWT token..." + TOKEN=$(generate_token) + if [ -z "$TOKEN" ]; then + error "Failed to generate token" + error "Run: make gen-jwt" + exit 1 + fi + log "✓ Token generated" + echo "" + + # Create test source + log "Creating test Node.js source..." + SOURCE=$(create_test_source) + log "✓ Test source created: $SOURCE" + echo "" + + # Submit build + BUILD_ID=$(submit_build "$TOKEN" "$SOURCE") + echo "" + + # Wait for completion and capture the response + BUILD_RESPONSE="" + log "Waiting for build to complete..." + + for i in $(seq 1 $TIMEOUT_POLLS); do + BUILD_RESPONSE=$(curl -s "$API_URL/builds/$BUILD_ID" \ + -H "Authorization: Bearer $TOKEN") + + STATUS=$(echo "$BUILD_RESPONSE" | jq -r '.status') + + case "$STATUS" in + "ready") + log "✅ Build succeeded!" + echo "$BUILD_RESPONSE" | jq . + break + ;; + "failed") + error "❌ Build failed!" + echo "$BUILD_RESPONSE" | jq . + echo "" + log "=== Build Logs ===" + get_logs "$TOKEN" "$BUILD_ID" + echo "" + error "=== E2E Test FAILED ===" + rm -f "$SOURCE" + exit 1 + ;; + "cancelled") + warn "Build was cancelled" + rm -f "$SOURCE" + exit 1 + ;; + "queued"|"building"|"pushing") + echo -ne "\r Status: $STATUS (poll $i/$TIMEOUT_POLLS)..." + ;; + *) + warn "Unknown status: $STATUS" + ;; + esac + + sleep $POLL_INTERVAL + done + echo "" + + if [ "$STATUS" != "ready" ]; then + error "Build timed out after $((TIMEOUT_POLLS * POLL_INTERVAL)) seconds" + rm -f "$SOURCE" + exit 1 + fi + + echo "" + log "=== Build Logs ===" + get_logs "$TOKEN" "$BUILD_ID" + echo "" + + # Run the built image (unless skipped) + if [ "$SKIP_RUN" = "false" ]; then + IMAGE_REF=$(echo "$BUILD_RESPONSE" | jq -r '.image_ref // empty') + + if [ -n "$IMAGE_REF" ]; then + echo "" + log "=== Running Built Image ===" + if run_built_image "$TOKEN" "$IMAGE_REF"; then + log "✅ VM run test passed!" + else + error "❌ VM run test failed!" + rm -f "$SOURCE" + exit 1 + fi + else + warn "No image_ref in build response, skipping VM test" + fi + else + log "Skipping VM run test (--skip-run)" + fi + + echo "" + log "=== E2E Test PASSED ===" + + # Cleanup + rm -f "$SOURCE" + exit 0 +} + +main +