diff --git a/api/v1alpha1/step_types.go b/api/v1alpha1/step_types.go index 67708927..23df33c9 100644 --- a/api/v1alpha1/step_types.go +++ b/api/v1alpha1/step_types.go @@ -60,18 +60,6 @@ type StepSpec struct { // and decisions made by it. // +kubebuilder:validation:Optional Description string `json:"description,omitempty"` - - // If needed, database credentials for fetching data from the database. - // The secret should contain the following keys: - // - "username": The database username. - // - "password": The database password. - // - "host": The database host. - // - "port": The database port. - // - "database": The database name. - // Note: this field will be removed in the future when db access in scheduler - // steps is no longer needed. - // +kubebuilder:validation:Optional - DatabaseSecretRef *corev1.SecretReference `json:"databaseSecretRef"` } const ( diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index b9ca7124..fa1602f4 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -1189,11 +1189,6 @@ func (in *StepSpec) DeepCopyInto(out *StepSpec) { *out = make([]v1.ObjectReference, len(*in)) copy(*out, *in) } - if in.DatabaseSecretRef != nil { - in, out := &in.DatabaseSecretRef, &out.DatabaseSecretRef - *out = new(v1.SecretReference) - **out = **in - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepSpec. diff --git a/cmd/main.go b/cmd/main.go index d5de841e..35a7f271 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -59,6 +59,7 @@ import ( "github.com/cobaltcore-dev/cortex/pkg/monitoring" "github.com/cobaltcore-dev/cortex/pkg/multicluster" "github.com/cobaltcore-dev/cortex/pkg/task" + hv1 "github.com/cobaltcore-dev/openstack-hypervisor-operator/api/v1" "github.com/sapcc/go-bits/httpext" "github.com/sapcc/go-bits/must" corev1 "k8s.io/api/core/v1" @@ -76,6 +77,7 @@ func init() { utilruntime.Must(v1alpha1.AddToScheme(scheme)) utilruntime.Must(ironcorev1alpha1.AddToScheme(scheme)) utilruntime.Must(corev1.AddToScheme(scheme)) + utilruntime.Must(hv1.AddToScheme(scheme)) // +kubebuilder:scaffold:scheme } diff --git a/config/crd/bases/cortex.cloud_steps.yaml b/config/crd/bases/cortex.cloud_steps.yaml index e243633e..b66e46ad 100644 --- a/config/crd/bases/cortex.cloud_steps.yaml +++ b/config/crd/bases/cortex.cloud_steps.yaml @@ -55,28 +55,6 @@ spec: spec: description: spec defines the desired state of Step properties: - databaseSecretRef: - description: |- - If needed, database credentials for fetching data from the database. - The secret should contain the following keys: - - "username": The database username. - - "password": The database password. - - "host": The database host. - - "port": The database port. - - "database": The database name. - Note: this field will be removed in the future when db access in scheduler - steps is no longer needed. - properties: - name: - description: name is unique within a namespace to reference a - secret resource. - type: string - namespace: - description: namespace defines the space within which the secret - name must be unique. - type: string - type: object - x-kubernetes-map-type: atomic description: description: |- Additional description of the step which helps understand its purpose diff --git a/config/crd/cortex.cloud_steps.yaml b/config/crd/cortex.cloud_steps.yaml index e243633e..b66e46ad 100644 --- a/config/crd/cortex.cloud_steps.yaml +++ b/config/crd/cortex.cloud_steps.yaml @@ -55,28 +55,6 @@ spec: spec: description: spec defines the desired state of Step properties: - databaseSecretRef: - description: |- - If needed, database credentials for fetching data from the database. - The secret should contain the following keys: - - "username": The database username. - - "password": The database password. - - "host": The database host. - - "port": The database port. - - "database": The database name. - Note: this field will be removed in the future when db access in scheduler - steps is no longer needed. - properties: - name: - description: name is unique within a namespace to reference a - secret resource. - type: string - namespace: - description: namespace defines the space within which the secret - name must be unique. - type: string - type: object - x-kubernetes-map-type: atomic description: description: |- Additional description of the step which helps understand its purpose diff --git a/dist/chart/templates/crd/cortex.cloud_steps.yaml b/dist/chart/templates/crd/cortex.cloud_steps.yaml index 404db52a..3d3379fd 100644 --- a/dist/chart/templates/crd/cortex.cloud_steps.yaml +++ b/dist/chart/templates/crd/cortex.cloud_steps.yaml @@ -61,28 +61,6 @@ spec: spec: description: spec defines the desired state of Step properties: - databaseSecretRef: - description: |- - If needed, database credentials for fetching data from the database. - The secret should contain the following keys: - - "username": The database username. - - "password": The database password. - - "host": The database host. - - "port": The database port. - - "database": The database name. - Note: this field will be removed in the future when db access in scheduler - steps is no longer needed. - properties: - name: - description: name is unique within a namespace to reference a - secret resource. - type: string - namespace: - description: namespace defines the space within which the secret - name must be unique. - type: string - type: object - x-kubernetes-map-type: atomic description: description: |- Additional description of the step which helps understand its purpose diff --git a/dist/chart/templates/rbac/hypervisor_role.yaml b/dist/chart/templates/rbac/hypervisor_role.yaml new file mode 100644 index 00000000..14b61e5d --- /dev/null +++ b/dist/chart/templates/rbac/hypervisor_role.yaml @@ -0,0 +1,24 @@ +{{- if .Values.rbac.hypervisor.enable }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + {{- include "chart.labels" . | nindent 4 }} + name: {{ .Values.namePrefix }}-manager-role-hypervisor +rules: +- apiGroups: + - kvm.cloud.sap + resources: + - hypervisors + verbs: + - get + - list + - watch +- apiGroups: + - kvm.cloud.sap + resources: + - hypervisors/status + verbs: + - get +{{- end -}} \ No newline at end of file diff --git a/dist/chart/templates/rbac/hypervisor_role_binding.yaml b/dist/chart/templates/rbac/hypervisor_role_binding.yaml new file mode 100644 index 00000000..7c41c451 --- /dev/null +++ b/dist/chart/templates/rbac/hypervisor_role_binding.yaml @@ -0,0 +1,16 @@ +{{- if .Values.rbac.hypervisor.enable }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + {{- include "chart.labels" . | nindent 4 }} + name: {{ .Values.namePrefix }}-manager-rolebinding-hypervisor +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ .Values.namePrefix }}-manager-role-hypervisor +subjects: +- kind: ServiceAccount + name: {{ .Values.namePrefix }}-{{ .Values.controllerManager.serviceAccountName }} + namespace: {{ .Release.Namespace }} +{{- end -}} \ No newline at end of file diff --git a/dist/chart/values.yaml b/dist/chart/values.yaml index 09025fe7..4ced4388 100644 --- a/dist/chart/values.yaml +++ b/dist/chart/values.yaml @@ -55,6 +55,10 @@ rbac: enable: false pods: enable: false + # Whether hypervisor operator/crd related roles should be deployed. + # See: https://github.com/cobaltcore-dev/openstack-hypervisor-operator + hypervisor: + enable: false # [CRDs]: To enable the CRDs crd: diff --git a/go.mod b/go.mod index 8b37887a..f8c02ee0 100644 --- a/go.mod +++ b/go.mod @@ -3,6 +3,7 @@ module github.com/cobaltcore-dev/cortex go 1.25.0 require ( + github.com/cobaltcore-dev/openstack-hypervisor-operator v0.0.0-20251230105055-37950dd7ff29 github.com/go-gorp/gorp v2.2.0+incompatible github.com/gophercloud/gophercloud/v2 v2.9.0 github.com/ironcore-dev/ironcore v0.2.4 @@ -20,17 +21,17 @@ require ( github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect - github.com/antlr4-go/antlr/v4 v4.13.0 // indirect + github.com/antlr4-go/antlr/v4 v4.13.1 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/cenkalti/backoff v2.2.1+incompatible // indirect - github.com/cenkalti/backoff/v5 v5.0.2 // indirect + github.com/cenkalti/backoff/v5 v5.0.3 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/containerd/continuity v0.4.5 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/docker/go-connections v0.6.0 // indirect github.com/docker/go-units v0.5.0 // indirect - github.com/emicklei/go-restful/v3 v3.12.2 // indirect + github.com/emicklei/go-restful/v3 v3.13.0 // indirect github.com/evanphx/json-patch/v5 v5.9.11 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.9.0 // indirect @@ -38,9 +39,10 @@ require ( github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/zapr v1.3.0 // indirect - github.com/go-openapi/jsonpointer v0.21.1 // indirect - github.com/go-openapi/jsonreference v0.21.0 // indirect + github.com/go-openapi/jsonpointer v0.22.1 // indirect + github.com/go-openapi/jsonreference v0.21.2 // indirect github.com/go-openapi/swag v0.23.1 // indirect + github.com/go-openapi/swag/jsonname v0.25.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-migrate/migrate/v4 v4.19.1 // indirect github.com/google/btree v1.1.3 // indirect @@ -49,7 +51,7 @@ require ( github.com/google/go-cmp v0.7.0 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gotestyourself/gotestyourself v2.2.0+incompatible // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect @@ -73,21 +75,21 @@ require ( github.com/sapcc/go-api-declarations v1.18.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/spf13/cobra v1.10.1 // indirect - github.com/spf13/pflag v1.0.9 // indirect - github.com/stoewer/go-strcase v1.3.0 // indirect + github.com/spf13/pflag v1.0.10 // indirect + github.com/stoewer/go-strcase v1.3.1 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/ziutek/mymysql v1.5.4 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect go.opentelemetry.io/otel v1.37.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0 // indirect go.opentelemetry.io/otel/metric v1.37.0 // indirect go.opentelemetry.io/otel/sdk v1.37.0 // indirect go.opentelemetry.io/otel/trace v1.37.0 // indirect go.opentelemetry.io/proto/otlp v1.7.0 // indirect go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.27.0 // indirect + go.uber.org/zap v1.27.1 // indirect go.yaml.in/yaml/v2 v2.4.3 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect go4.org/netipx v0.0.0-20231129151722-fdeea329fbba // indirect @@ -101,8 +103,8 @@ require ( golang.org/x/time v0.14.0 // indirect gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20250818200422-3122310a409c // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250818200422-3122310a409c // indirect - google.golang.org/grpc v1.75.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250929231259-57b25ae835d4 // indirect + google.golang.org/grpc v1.75.1 // indirect google.golang.org/protobuf v1.36.10 // indirect gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect @@ -117,6 +119,6 @@ require ( sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.33.0 // indirect sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect sigs.k8s.io/randfill v1.0.0 // indirect - sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect + sigs.k8s.io/structured-merge-diff/v6 v6.3.1 // indirect sigs.k8s.io/yaml v1.6.0 // indirect ) diff --git a/go.sum b/go.sum index 732acbd4..c1031397 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,7 @@ cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY= cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= +cel.dev/expr v0.25.1 h1:1KrZg61W6TWSxuNZ37Xy49ps13NUovb66QLprthtwi4= +cel.dev/expr v0.25.1/go.mod h1:hrXvqGP6G6gyx8UAHSHJ5RGk//1Oj5nXQ2NI02Nrsg4= github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0= @@ -8,18 +10,22 @@ github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERo github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= -github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI= -github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g= +github.com/antlr4-go/antlr/v4 v4.13.1 h1:SqQKkuVZ+zWkMMNkjy5FZe5mr5WURWnlpmOuzYWrPrQ= +github.com/antlr4-go/antlr/v4 v4.13.1/go.mod h1:GKmUxMtwp6ZgGwZSva4eWPC5mS6vUAmOABFgjdkM7Nw= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/cenkalti/backoff/v5 v5.0.2 h1:rIfFVxEf1QsI7E1ZHfp/B4DF/6QBAUhmgkxc0H7Zss8= -github.com/cenkalti/backoff/v5 v5.0.2/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= +github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= +github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cobaltcore-dev/openstack-hypervisor-operator v0.0.0-20251219152336-768f63171244 h1:HedVhcR2smWlJqthYHYT5kL3Hhqjvg3lETz3pWiDprc= +github.com/cobaltcore-dev/openstack-hypervisor-operator v0.0.0-20251219152336-768f63171244/go.mod h1:i/YQm59sAvilkgTFpKc+elMIf/KzkdimnXMd13P3V9s= +github.com/cobaltcore-dev/openstack-hypervisor-operator v0.0.0-20251230105055-37950dd7ff29 h1:2tPhnOy0tPv49xLuk1i/0mvPwOneWE+oK/yP8s4GKZY= +github.com/cobaltcore-dev/openstack-hypervisor-operator v0.0.0-20251230105055-37950dd7ff29/go.mod h1:i/YQm59sAvilkgTFpKc+elMIf/KzkdimnXMd13P3V9s= github.com/containerd/continuity v0.4.5 h1:ZRoN1sXq9u7V6QoHMcVWGhOwDFqZ4B9i5H6un1Wh0x4= github.com/containerd/continuity v0.4.5/go.mod h1:/lNJvtJKUQStBzpVQ1+rasXO1LAWtUQssk28EZvJ3nE= github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= @@ -41,8 +47,8 @@ github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pM github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= -github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.13.0 h1:C4Bl2xDndpU6nJ4bc1jXd+uTmYPVUwkD6bFY/oTyCes= +github.com/emicklei/go-restful/v3 v3.13.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= @@ -62,12 +68,42 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= -github.com/go-openapi/jsonpointer v0.21.1 h1:whnzv/pNXtK2FbX/W9yJfRmE2gsmkfahjMKB0fZvcic= -github.com/go-openapi/jsonpointer v0.21.1/go.mod h1:50I1STOfbY1ycR8jGz8DaMeLCdXiI6aDteEdRNNzpdk= -github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= -github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= +github.com/go-openapi/jsonpointer v0.22.1 h1:sHYI1He3b9NqJ4wXLoJDKmUmHkWy/L7rtEo92JUxBNk= +github.com/go-openapi/jsonpointer v0.22.1/go.mod h1:pQT9OsLkfz1yWoMgYFy4x3U5GY5nUlsOn1qSBH5MkCM= +github.com/go-openapi/jsonpointer v0.22.4 h1:dZtK82WlNpVLDW2jlA1YCiVJFVqkED1MegOUy9kR5T4= +github.com/go-openapi/jsonpointer v0.22.4/go.mod h1:elX9+UgznpFhgBuaMQ7iu4lvvX1nvNsesQ3oxmYTw80= +github.com/go-openapi/jsonreference v0.21.2 h1:Wxjda4M/BBQllegefXrY/9aq1fxBA8sI5M/lFU6tSWU= +github.com/go-openapi/jsonreference v0.21.2/go.mod h1:pp3PEjIsJ9CZDGCNOyXIQxsNuroxm8FAJ/+quA0yKzQ= +github.com/go-openapi/jsonreference v0.21.4 h1:24qaE2y9bx/q3uRK/qN+TDwbok1NhbSmGjjySRCHtC8= +github.com/go-openapi/jsonreference v0.21.4/go.mod h1:rIENPTjDbLpzQmQWCj5kKj3ZlmEh+EFVbz3RTUh30/4= github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZU= github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDqQnDHMef0= +github.com/go-openapi/swag v0.25.4 h1:OyUPUFYDPDBMkqyxOTkqDYFnrhuhi9NR6QVUvIochMU= +github.com/go-openapi/swag v0.25.4/go.mod h1:zNfJ9WZABGHCFg2RnY0S4IOkAcVTzJ6z2Bi+Q4i6qFQ= +github.com/go-openapi/swag/cmdutils v0.25.4 h1:8rYhB5n6WawR192/BfUu2iVlxqVR9aRgGJP6WaBoW+4= +github.com/go-openapi/swag/cmdutils v0.25.4/go.mod h1:pdae/AFo6WxLl5L0rq87eRzVPm/XRHM3MoYgRMvG4A0= +github.com/go-openapi/swag/conv v0.25.4 h1:/Dd7p0LZXczgUcC/Ikm1+YqVzkEeCc9LnOWjfkpkfe4= +github.com/go-openapi/swag/conv v0.25.4/go.mod h1:3LXfie/lwoAv0NHoEuY1hjoFAYkvlqI/Bn5EQDD3PPU= +github.com/go-openapi/swag/fileutils v0.25.4 h1:2oI0XNW5y6UWZTC7vAxC8hmsK/tOkWXHJQH4lKjqw+Y= +github.com/go-openapi/swag/fileutils v0.25.4/go.mod h1:cdOT/PKbwcysVQ9Tpr0q20lQKH7MGhOEb6EwmHOirUk= +github.com/go-openapi/swag/jsonname v0.25.1 h1:Sgx+qbwa4ej6AomWC6pEfXrA6uP2RkaNjA9BR8a1RJU= +github.com/go-openapi/swag/jsonname v0.25.1/go.mod h1:71Tekow6UOLBD3wS7XhdT98g5J5GR13NOTQ9/6Q11Zo= +github.com/go-openapi/swag/jsonname v0.25.4 h1:bZH0+MsS03MbnwBXYhuTttMOqk+5KcQ9869Vye1bNHI= +github.com/go-openapi/swag/jsonname v0.25.4/go.mod h1:GPVEk9CWVhNvWhZgrnvRA6utbAltopbKwDu8mXNUMag= +github.com/go-openapi/swag/jsonutils v0.25.4 h1:VSchfbGhD4UTf4vCdR2F4TLBdLwHyUDTd1/q4i+jGZA= +github.com/go-openapi/swag/jsonutils v0.25.4/go.mod h1:7OYGXpvVFPn4PpaSdPHJBtF0iGnbEaTk8AvBkoWnaAY= +github.com/go-openapi/swag/loading v0.25.4 h1:jN4MvLj0X6yhCDduRsxDDw1aHe+ZWoLjW+9ZQWIKn2s= +github.com/go-openapi/swag/loading v0.25.4/go.mod h1:rpUM1ZiyEP9+mNLIQUdMiD7dCETXvkkC30z53i+ftTE= +github.com/go-openapi/swag/mangling v0.25.4 h1:2b9kBJk9JvPgxr36V23FxJLdwBrpijI26Bx5JH4Hp48= +github.com/go-openapi/swag/mangling v0.25.4/go.mod h1:6dxwu6QyORHpIIApsdZgb6wBk/DPU15MdyYj/ikn0Hg= +github.com/go-openapi/swag/netutils v0.25.4 h1:Gqe6K71bGRb3ZQLusdI8p/y1KLgV4M/k+/HzVSqT8H0= +github.com/go-openapi/swag/netutils v0.25.4/go.mod h1:m2W8dtdaoX7oj9rEttLyTeEFFEBvnAx9qHd5nJEBzYg= +github.com/go-openapi/swag/stringutils v0.25.4 h1:O6dU1Rd8bej4HPA3/CLPciNBBDwZj9HiEpdVsb8B5A8= +github.com/go-openapi/swag/stringutils v0.25.4/go.mod h1:GTsRvhJW5xM5gkgiFe0fV3PUlFm0dr8vki6/VSRaZK0= +github.com/go-openapi/swag/typeutils v0.25.4 h1:1/fbZOUN472NTc39zpa+YGHn3jzHWhv42wAJSN91wRw= +github.com/go-openapi/swag/typeutils v0.25.4/go.mod h1:Ou7g//Wx8tTLS9vG0UmzfCsjZjKhpjxayRKTHXf2pTE= +github.com/go-openapi/swag/yamlutils v0.25.4 h1:6jdaeSItEUb7ioS9lFoCZ65Cne1/RZtPBZ9A56h92Sw= +github.com/go-openapi/swag/yamlutils v0.25.4/go.mod h1:MNzq1ulQu+yd8Kl7wPOut/YHAAU/H6hL91fF+E2RFwc= github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= @@ -82,8 +118,12 @@ github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/cel-go v0.26.0 h1:DPGjXackMpJWH680oGY4lZhYjIameYmR+/6RBdDGmaI= github.com/google/cel-go v0.26.0/go.mod h1:A9O8OU9rdvrK5MQyrqfIxo1a0u4g3sF8KB6PUIaryMM= +github.com/google/cel-go v0.26.1 h1:iPbVVEdkhTX++hpe3lzSk7D3G3QSYqLGoHOcEio+UXQ= +github.com/google/cel-go v0.26.1/go.mod h1:A9O8OU9rdvrK5MQyrqfIxo1a0u4g3sF8KB6PUIaryMM= github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= +github.com/google/gnostic-models v0.7.1 h1:SisTfuFKJSKM5CPZkffwi6coztzzeYUhc3v4yxLWH8c= +github.com/google/gnostic-models v0.7.1/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -97,8 +137,10 @@ github.com/gophercloud/gophercloud/v2 v2.9.0 h1:Y9OMrwKF9EDERcHFSOTpf/6XGoAI0yOx github.com/gophercloud/gophercloud/v2 v2.9.0/go.mod h1:Ki/ILhYZr/5EPebrPL9Ej+tUg4lqx71/YH2JWVeU+Qk= github.com/gotestyourself/gotestyourself v2.2.0+incompatible h1:AQwinXlbQR2HvPjQZOmDhRqsv5mZf+Jb1RnSLxcqZcI= github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 h1:X5VWvz21y3gzm9Nw/kaUeku/1+uBhcekkmy4IkffJww= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1/go.mod h1:Zanoh4+gvIgluNqcfMVTJueD4wSS5hT7zTt4Mrutd90= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 h1:NmZ1PKzSTQbuGHw9DGPFomqkkLWMC+vZCkfs+FHv1Vg= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3/go.mod h1:zQrxl1YP88HQlA6i9c63DSVPFklWpGX4OWAc9bFuaH4= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/ironcore-dev/ironcore v0.2.4 h1:i/RqiMIdzaptuDR6EKSX9hbeolj7AfTuT+4v1ZC4Jeg= @@ -121,6 +163,8 @@ github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= +github.com/mailru/easyjson v0.9.1 h1:LbtsOm5WAswyWbvTEOqhypdPeZzHavpZx96/n553mR8= +github.com/mailru/easyjson v0.9.1/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= github.com/majewsky/gg v1.5.0 h1:b4LNLhfbjHgMEjIrBgMiLz2BO73yDsVC84O7h31K+R4= github.com/majewsky/gg v1.5.0/go.mod h1:KC7qUlln1VBY90OE0jXMNjXW2b9B4jJ1heYQ08OzeAg= github.com/mattn/go-sqlite3 v1.14.32 h1:JD12Ag3oLy1zQA+BNn74xRgaBbdhbNIDYvQUEuuErjs= @@ -142,10 +186,10 @@ github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7P github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= -github.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns= -github.com/onsi/ginkgo/v2 v2.27.2/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo= -github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A= -github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k= +github.com/onsi/ginkgo/v2 v2.27.3 h1:ICsZJ8JoYafeXFFlFAG75a7CxMsJHwgKwtO+82SE9L8= +github.com/onsi/ginkgo/v2 v2.27.3/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo= +github.com/onsi/gomega v1.38.3 h1:eTX+W6dobAYfFeGC2PV6RwXRu/MyT+cQguijutvkpSM= +github.com/onsi/gomega v1.38.3/go.mod h1:ZCU1pkQcXDO5Sl9/VVEGlDyp+zm0m1cmeG5TOzLgdh4= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= @@ -169,6 +213,8 @@ github.com/prometheus/common v0.67.4 h1:yR3NqWO1/UyO1w2PhUvXlGQs/PtFmoveVO0KZ4+L github.com/prometheus/common v0.67.4/go.mod h1:gP0fq6YjjNCLssJCQp0yk4M8W6ikLURwkdd/YKtTbyI= github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= +github.com/prometheus/procfs v0.19.2 h1:zUMhqEW66Ex7OXIiDkll3tl9a1ZdilUOd/F6ZXw4Vws= +github.com/prometheus/procfs v0.19.2/go.mod h1:M0aotyiemPhBCM0z5w87kL22CxfcH05ZpYlu+b4J7mw= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -182,10 +228,13 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= -github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY= +github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU= +github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4= github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= -github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= +github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= +github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stoewer/go-strcase v1.3.1 h1:iS0MdW+kVTxgMoE1LAZyMiYJFKlOzLooE4MxjirtkAs= +github.com/stoewer/go-strcase v1.3.1/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= @@ -206,30 +255,49 @@ github.com/ziutek/mymysql v1.5.4 h1:GB0qdRGsTwQSBVYuVShFBKaXSnSnYYC2d9knnE1LHFs= github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0 h1:ssfIgGNANqpVFCndZvcuyKbl0g+UAVcbBcqGkG28H0Y= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0/go.mod h1:GQ/474YrbE4Jx8gZ4q5I4hrhUzM6UPzyrqJYV2AqPoQ= go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0 h1:dNzwXjZKpMpE2JhmO+9HsPl42NIXFIFSUSSs0fiqra0= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0/go.mod h1:90PoxvaEB5n6AOdZvi+yWJQoE95U8Dhhw2bSyRqnTD0= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0 h1:JgtbA0xkWHnTmYk7YusopJFX6uleBmAuZ8n05NEh8nQ= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0/go.mod h1:179AK5aar5R3eS9FucPy6rggvU0g52cvKId8pv4+v0c= +go.opentelemetry.io/otel v1.39.0 h1:8yPrr/S0ND9QEfTfdP9V+SiwT4E0G7Y5MO7p85nis48= +go.opentelemetry.io/otel v1.39.0/go.mod h1:kLlFTywNWrFyEdH0oj2xK0bFYZtHRYUdv1NklR/tgc8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0 h1:Ahq7pZmv87yiyn3jeFz/LekZmPLLdKejuO3NcK9MssM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0/go.mod h1:MJTqhM0im3mRLw1i8uGHnCvUEeS7VwRyxlLC78PA18M= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.39.0 h1:f0cb2XPmrqn4XMy9PNliTgRKJgS5WcL/u0/WRYGz4t0= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.39.0/go.mod h1:vnakAaFckOMiMtOIhFI2MNH4FYrZzXCYxmb1LlhoGz8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0 h1:EtFWSnwW9hGObjkIdmlnWSydO+Qs8OwzfzXLUPg4xOc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0/go.mod h1:QjUEoiGCPkvFZ/MjK6ZZfNOS6mfVEVKYE99dFhuN2LI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.39.0 h1:in9O8ESIOlwJAEGTkkf34DesGRAc/Pn8qJ7k3r/42LM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.39.0/go.mod h1:Rp0EXBm5tfnv0WL+ARyO/PHBEaEAT8UUHQ6AGJcSq6c= go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE= go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E= +go.opentelemetry.io/otel/metric v1.39.0 h1:d1UzonvEZriVfpNKEVmHXbdf909uGTOQjA0HF0Ls5Q0= +go.opentelemetry.io/otel/metric v1.39.0/go.mod h1:jrZSWL33sD7bBxg1xjrqyDjnuzTUB0x1nBERXd7Ftcs= go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI= go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg= +go.opentelemetry.io/otel/sdk v1.39.0 h1:nMLYcjVsvdui1B/4FRkwjzoRVsMK8uL/cj0OyhKzt18= +go.opentelemetry.io/otel/sdk v1.39.0/go.mod h1:vDojkC4/jsTJsE+kh+LXYQlbL8CgrEcwmt1ENZszdJE= go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc= go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps= +go.opentelemetry.io/otel/sdk/metric v1.39.0 h1:cXMVVFVgsIf2YL6QkRF4Urbr/aMInf+2WKg+sEJTtB8= go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= +go.opentelemetry.io/otel/trace v1.39.0 h1:2d2vfpEDmCJ5zVYz7ijaJdOF59xLomrvj7bjt6/qCJI= +go.opentelemetry.io/otel/trace v1.39.0/go.mod h1:88w4/PnZSazkGzz/w84VHpQafiU4EtqqlVdxWy+rNOA= go.opentelemetry.io/proto/otlp v1.7.0 h1:jX1VolD6nHuFzOYso2E73H85i92Mv8JQYk0K9vz09os= go.opentelemetry.io/proto/otlp v1.7.0/go.mod h1:fSKjH6YJ7HDlwzltzyMj036AJ3ejJLCgCSHGj4efDDo= +go.opentelemetry.io/proto/otlp v1.9.0 h1:l706jCMITVouPOqEnii2fIAuO3IVGBRPV5ICjceRb/A= +go.opentelemetry.io/proto/otlp v1.9.0/go.mod h1:xE+Cx5E/eEHw+ISFkwPLwCZefwVjY+pqKg1qcK03+/4= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= -go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc= +go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= @@ -241,23 +309,32 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b h1:DXr+pvt3nC887026GRP39Ej11UATqWDmWuS99x26cD0= golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b/go.mod h1:4QTo5u+SEIbbKW1RacMZq1YEfOBqeXa19JeshGi+zc4= +golang.org/x/exp v0.0.0-20251219203646-944ab1f22d93 h1:fQsdNF2N+/YewlRZiricy4P1iimyPKZ/xwniHj8Q2a0= +golang.org/x/exp v0.0.0-20251219203646-944ab1f22d93/go.mod h1:EPRbTFwzwjXj9NpYyyrvenVh9Y+GFeEvMNh7Xuz7xgU= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA= golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w= +golang.org/x/mod v0.31.0 h1:HaW9xtz0+kOcWKwli0ZXy79Ix+UW/vOfmWI5QVd2tgI= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= +golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= +golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= golang.org/x/oauth2 v0.32.0 h1:jsCblLleRMDrxMN29H3z/k1KliIvpLgCkE6R8FXXNgY= golang.org/x/oauth2 v0.32.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= +golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw= +golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= +golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -265,12 +342,18 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= +golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= +golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q= +golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= +golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU= +golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -279,6 +362,7 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ= golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs= +golang.org/x/tools v0.40.0 h1:yLkxfA+Qnul4cs9QA3KnlFu0lVmd8JJfoq+E41uSutA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -289,12 +373,20 @@ gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= google.golang.org/genproto/googleapis/api v0.0.0-20250818200422-3122310a409c h1:AtEkQdl5b6zsybXcbz00j1LwNodDuH6hVifIaNqk7NQ= google.golang.org/genproto/googleapis/api v0.0.0-20250818200422-3122310a409c/go.mod h1:ea2MjsO70ssTfCjiwHgI0ZFqcw45Ksuk2ckf9G468GA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250818200422-3122310a409c h1:qXWI/sQtv5UKboZ/zUk7h+mrf/lXORyI+n9DKDAusdg= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250818200422-3122310a409c/go.mod h1:gw1tLEfykwDz2ET4a12jcXt4couGAm7IwsVaTy0Sflo= -google.golang.org/grpc v1.75.0 h1:+TW+dqTd2Biwe6KKfhE5JpiYIBWq865PhKGSXiivqt4= -google.golang.org/grpc v1.75.0/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= +google.golang.org/genproto/googleapis/api v0.0.0-20251213004720-97cd9d5aeac2 h1:7LRqPCEdE4TP4/9psdaB7F2nhZFfBiGJomA5sojLWdU= +google.golang.org/genproto/googleapis/api v0.0.0-20251213004720-97cd9d5aeac2/go.mod h1:+rXWjjaukWZun3mLfjmVnQi18E1AsFbDN9QdJ5YXLto= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250929231259-57b25ae835d4 h1:i8QOKZfYg6AbGVZzUAY3LrNWCKF8O6zFisU9Wl9RER4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250929231259-57b25ae835d4/go.mod h1:HSkG/KdJWusxU1F6CNrwNDjBMgisKxGnc5dAZfT0mjQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251213004720-97cd9d5aeac2 h1:2I6GHUeJ/4shcDpoUlLs/2WPnhg7yJwvXtqcMJt9liA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251213004720-97cd9d5aeac2/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= +google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= +google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= +google.golang.org/grpc v1.77.0 h1:wVVY6/8cGA6vvffn+wWK5ToddbgdU3d8MNENr4evgXM= +google.golang.org/grpc v1.77.0/go.mod h1:z0BY1iVj0q8E1uSQCjL9cppRj+gnZjzDnzV0dHhrNig= google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= +google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= +google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= @@ -311,29 +403,41 @@ k8s.io/api v0.35.0 h1:iBAU5LTyBI9vw3L5glmat1njFK34srdLmktWwLTprlY= k8s.io/api v0.35.0/go.mod h1:AQ0SNTzm4ZAczM03QH42c7l3bih1TbAXYo0DkF8ktnA= k8s.io/apiextensions-apiserver v0.34.1 h1:NNPBva8FNAPt1iSVwIE0FsdrVriRXMsaWFMqJbII2CI= k8s.io/apiextensions-apiserver v0.34.1/go.mod h1:hP9Rld3zF5Ay2Of3BeEpLAToP+l4s5UlxiHfqRaRcMc= +k8s.io/apiextensions-apiserver v0.35.0 h1:3xHk2rTOdWXXJM+RDQZJvdx0yEOgC0FgQ1PlJatA5T4= +k8s.io/apiextensions-apiserver v0.35.0/go.mod h1:E1Ahk9SADaLQ4qtzYFkwUqusXTcaV2uw3l14aqpL2LU= k8s.io/apimachinery v0.35.0 h1:Z2L3IHvPVv/MJ7xRxHEtk6GoJElaAqDCCU0S6ncYok8= k8s.io/apimachinery v0.35.0/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns= k8s.io/apiserver v0.34.1 h1:U3JBGdgANK3dfFcyknWde1G6X1F4bg7PXuvlqt8lITA= k8s.io/apiserver v0.34.1/go.mod h1:eOOc9nrVqlBI1AFCvVzsob0OxtPZUCPiUJL45JOTBG0= +k8s.io/apiserver v0.35.0 h1:CUGo5o+7hW9GcAEF3x3usT3fX4f9r8xmgQeCBDaOgX4= +k8s.io/apiserver v0.35.0/go.mod h1:QUy1U4+PrzbJaM3XGu2tQ7U9A4udRRo5cyxkFX0GEds= k8s.io/client-go v0.35.0 h1:IAW0ifFbfQQwQmga0UdoH0yvdqrbwMdq9vIFEhRpxBE= k8s.io/client-go v0.35.0/go.mod h1:q2E5AAyqcbeLGPdoRB+Nxe3KYTfPce1Dnu1myQdqz9o= k8s.io/component-base v0.34.1 h1:v7xFgG+ONhytZNFpIz5/kecwD+sUhVE6HU7qQUiRM4A= k8s.io/component-base v0.34.1/go.mod h1:mknCpLlTSKHzAQJJnnHVKqjxR7gBeHRv0rPXA7gdtQ0= +k8s.io/component-base v0.35.0 h1:+yBrOhzri2S1BVqyVSvcM3PtPyx5GUxCK2tinZz1G94= +k8s.io/component-base v0.35.0/go.mod h1:85SCX4UCa6SCFt6p3IKAPej7jSnF3L8EbfSyMZayJR0= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE= k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ= +k8s.io/kube-openapi v0.0.0-20251125145642-4e65d59e963e h1:iW9ChlU0cU16w8MpVYjXk12dqQ4BPFBEgif+ap7/hqQ= +k8s.io/kube-openapi v0.0.0-20251125145642-4e65d59e963e/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ= k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck= k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20251220205832-9d40a56c1308 h1:rk+D2uTO79bbNsICltOdVoA6mcJb0NpvBcts+ACymBQ= +k8s.io/utils v0.0.0-20251220205832-9d40a56c1308/go.mod h1:xDxuJ0whA3d0I4mf/C4ppKHxXynQ+fxnkmQH0vTHnuk= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.33.0 h1:qPrZsv1cwQiFeieFlRqT627fVZ+tyfou/+S5S0H5ua0= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.33.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.34.0 h1:hSfpvjjTQXQY2Fol2CS0QHMNs/WI1MOSGzCm1KhM5ec= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.34.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= sigs.k8s.io/controller-runtime v0.22.4 h1:GEjV7KV3TY8e+tJ2LCTxUTanW4z/FmNB7l327UfMq9A= sigs.k8s.io/controller-runtime v0.22.4/go.mod h1:+QX1XUpTXN4mLoblf4tqr5CQcyHPAki2HLXqQMY6vh8= sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg= sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= -sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= -sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= +sigs.k8s.io/structured-merge-diff/v6 v6.3.1 h1:JrhdFMqOd/+3ByqlP2I45kTOZmTRLBUm5pvRjeheg7E= +sigs.k8s.io/structured-merge-diff/v6 v6.3.1/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= diff --git a/helm/bundles/cortex-manila/templates/steps.yaml b/helm/bundles/cortex-manila/templates/steps.yaml index a8e46300..5197bdf6 100644 --- a/helm/bundles/cortex-manila/templates/steps.yaml +++ b/helm/bundles/cortex-manila/templates/steps.yaml @@ -5,11 +5,6 @@ metadata: name: netapp-cpu-usage-balancing-manila spec: operator: cortex-manila - # TODO: Remove this database reference once the scheduler - # step doesn't need it anymore. - databaseSecretRef: - name: cortex-manila-postgres - namespace: {{ .Release.Namespace }} type: weigher impl: netapp_cpu_usage_balancing description: | @@ -17,8 +12,6 @@ spec: to balance manila share placements across available storage pools. Its main purpose is to avoid cpu overutilization on a storage pool which may lead to performance degradation for shares placed on that pool. - knowledges: - - name: netapp-storage-pool-cpu-usage-manila opts: # Min-max scaling for gap-fitting based on CPU usage (pct) avgCPUUsageLowerBound: 0 # pct diff --git a/helm/bundles/cortex-nova/templates/pipelines.yaml b/helm/bundles/cortex-nova/templates/pipelines.yaml index d4bbbccc..86665690 100644 --- a/helm/bundles/cortex-nova/templates/pipelines.yaml +++ b/helm/bundles/cortex-nova/templates/pipelines.yaml @@ -45,7 +45,7 @@ spec: apiVersion: cortex.cloud/v1alpha1 kind: Pipeline metadata: - name: nova-external-scheduler-kvm-reservations + name: nova-external-scheduler-kvm-all-filters-enabled spec: operator: cortex-nova description: | @@ -64,11 +64,12 @@ spec: - ref: {name: filter-has-requested-traits} - ref: {name: filter-has-accelerators} - ref: {name: filter-correct-az} - - ref: {name: filter-disabled} + - ref: {name: filter-status-conditions} + - ref: {name: filter-maintenance} - ref: {name: filter-external-customer} - ref: {name: filter-packed-virtqueue} - - ref: {name: filter-project-aggregates} - - ref: {name: filter-compute-capabilities} + - ref: {name: filter-allowed-projects} + - ref: {name: filter-capabilities} --- apiVersion: cortex.cloud/v1alpha1 kind: Pipeline diff --git a/helm/bundles/cortex-nova/templates/steps.yaml b/helm/bundles/cortex-nova/templates/steps.yaml index 01c63d1f..3c4c118b 100644 --- a/helm/bundles/cortex-nova/templates/steps.yaml +++ b/helm/bundles/cortex-nova/templates/steps.yaml @@ -5,11 +5,6 @@ metadata: name: vmware-hana-binpacking spec: operator: cortex-nova - # TODO: Remove this database reference once the scheduler - # step doesn't need it anymore. - databaseSecretRef: - name: cortex-nova-postgres - namespace: {{ .Release.Namespace }} type: weigher impl: vmware_hana_binpacking description: | @@ -31,11 +26,6 @@ metadata: name: vmware-general-purpose-balancing spec: operator: cortex-nova - # TODO: Remove this database reference once the scheduler - # step doesn't need it anymore. - databaseSecretRef: - name: cortex-nova-postgres - namespace: {{ .Release.Namespace }} type: weigher impl: vmware_general_purpose_balancing description: | @@ -57,11 +47,6 @@ metadata: name: vmware-avoid-long-term-contended-hosts spec: operator: cortex-nova - # TODO: Remove this database reference once the scheduler - # step doesn't need it anymore. - databaseSecretRef: - name: cortex-nova-postgres - namespace: {{ .Release.Namespace }} type: weigher impl: vmware_avoid_long_term_contended_hosts description: | @@ -87,11 +72,6 @@ metadata: name: vmware-avoid-short-term-contended-hosts spec: operator: cortex-nova - # TODO: Remove this database reference once the scheduler - # step doesn't need it anymore. - databaseSecretRef: - name: cortex-nova-postgres - namespace: {{ .Release.Namespace }} type: weigher impl: vmware_avoid_short_term_contended_hosts description: | @@ -117,11 +97,6 @@ metadata: name: filter-host-instructions spec: operator: cortex-nova - # TODO: Remove this database reference once the scheduler - # step doesn't need it anymore. - databaseSecretRef: - name: cortex-nova-postgres - namespace: {{ .Release.Namespace }} type: filter impl: filter_host_instructions description: | @@ -136,22 +111,12 @@ metadata: name: filter-has-enough-capacity spec: operator: cortex-nova - # TODO: Remove this database reference once the scheduler - # step doesn't need it anymore. - databaseSecretRef: - name: cortex-nova-postgres - namespace: {{ .Release.Namespace }} type: filter impl: filter_has_enough_capacity description: | This step will filter out hosts that do not have enough available capacity to host the requested flavor. If enabled, this step will subtract the current reservations residing on this host from the available capacity. - knowledges: - # TODO: Remove this dependency since this is a filter which should - # not depend on a potentially non-recent knowledge. - # This should be changed to use the hypervisor CRD. - - name: host-utilization opts: # If reserved space should be locked even for matching requests. # For the reservations pipeline, we don't want to unlock @@ -165,11 +130,6 @@ metadata: name: filter-has-requested-traits spec: operator: cortex-nova - # TODO: Remove this database reference once the scheduler - # step doesn't need it anymore. - databaseSecretRef: - name: cortex-nova-postgres - namespace: {{ .Release.Namespace }} type: filter impl: filter_has_requested_traits description: | @@ -177,11 +137,6 @@ spec: nova flavor extra spec: "trait:": "forbidden" means the host must not have the specified trait. "trait:": "required" means the host must have the specified trait. - knowledges: - # TODO: Remove this dependency since this is a filter which should - # not depend on a potentially non-recent knowledge. - # This should be changed to use the hypervisor CRD. - - name: host-capabilities --- apiVersion: cortex.cloud/v1alpha1 kind: Step @@ -189,19 +144,11 @@ metadata: name: filter-has-accelerators spec: operator: cortex-nova - # TODO: Remove this database reference once the scheduler - # step doesn't need it anymore. - databaseSecretRef: - name: cortex-nova-postgres - namespace: {{ .Release.Namespace }} type: filter impl: filter_has_accelerators description: | This step will filter out hosts without the trait `COMPUTE_ACCELERATORS` if the nova flavor extra specs request accelerators via "accel:device_profile". - # TODO: This step currently depends on traits directly from the datasources. - # This should be changed to use the hypervisor CRD. - knowledges: [] --- apiVersion: cortex.cloud/v1alpha1 kind: Step @@ -209,42 +156,36 @@ metadata: name: filter-correct-az spec: operator: cortex-nova - # TODO: Remove this database reference once the scheduler - # step doesn't need it anymore. - databaseSecretRef: - name: cortex-nova-postgres - namespace: {{ .Release.Namespace }} type: filter impl: filter_correct_az description: | This step will filter out hosts whose aggregate information indicates they are not placed in the requested availability zone. - knowledges: - # TODO: Remove this dependency since this is a filter which should - # not depend on a potentially non-recent knowledge. - # This should be changed to use the hypervisor CRD. - - name: host-az --- apiVersion: cortex.cloud/v1alpha1 kind: Step metadata: - name: filter-disabled + name: filter-status-conditions spec: operator: cortex-nova - # TODO: Remove this database reference once the scheduler - # step doesn't need it anymore. - databaseSecretRef: - name: cortex-nova-postgres - namespace: {{ .Release.Namespace }} type: filter - impl: filter_disabled + impl: filter_status_conditions description: | - This step will filter out hosts for which the hypervisor status is - `disabled`, the hypervisor state is `down`, or the trait - `COMPUTE_STATUS_DISABLED` is assigned. - # TODO: This step currently depends on traits directly from the datasources. - # This should be changed to use the hypervisor CRD. - knowledges: [] + This step will filter out hosts for which the hypervisor status conditions + do not meet the expected values, for example, that the hypervisor is ready + and not disabled. +--- +apiVersion: cortex.cloud/v1alpha1 +kind: Step +metadata: + name: filter-maintenance +spec: + operator: cortex-nova + type: filter + impl: filter_maintenance + description: | + This step will filter out hosts that are currently in maintenance mode that + prevents scheduling, for example, manual maintenance or termination. --- apiVersion: cortex.cloud/v1alpha1 kind: Step @@ -252,11 +193,6 @@ metadata: name: filter-external-customer spec: operator: cortex-nova - # TODO: Remove this database reference once the scheduler - # step doesn't need it anymore. - databaseSecretRef: - name: cortex-nova-postgres - namespace: {{ .Release.Namespace }} type: filter impl: filter_external_customer description: | @@ -264,9 +200,6 @@ spec: filters out hosts that are not intended for external customers. It considers the `CUSTOM_EXTERNAL_CUSTOMER_SUPPORTED` trait on hosts as well as the `domain_name` scheduler hint from the nova request spec. - # TODO: This step currently depends on traits directly from the datasources. - # This should be changed to use the hypervisor CRD. - knowledges: [] opts: domainNamePrefixes: ["iaas-"] --- @@ -276,57 +209,35 @@ metadata: name: filter-packed-virtqueue spec: operator: cortex-nova - # TODO: Remove this database reference once the scheduler - # step doesn't need it anymore. - databaseSecretRef: - name: cortex-nova-postgres - namespace: {{ .Release.Namespace }} type: filter impl: filter_packed_virtqueue description: | If the flavor extra specs contain the `hw:virtio_packed_ring` key, or the image properties contain the `hw_virtio_packed_ring` key, this step will filter out hosts that do not have the `COMPUTE_NET_VIRTIO_PACKED` trait. - # TODO: This step currently depends on traits directly from the datasources. - # This should be changed to use the hypervisor CRD. - knowledges: [] --- apiVersion: cortex.cloud/v1alpha1 kind: Step metadata: - name: filter-project-aggregates + name: filter-allowed-projects spec: operator: cortex-nova - # TODO: Remove this database reference once the scheduler - # step doesn't need it anymore. - databaseSecretRef: - name: cortex-nova-postgres - namespace: {{ .Release.Namespace }} type: filter - impl: filter_project_aggregates + impl: filter_allowed_projects description: | - This step locks certain hosts for certain projects, based on the aggregate - metadata. Note that hosts without aggregate tenant filter are still - accessible and will not be filtered out. - knowledges: - # TODO: Remove this dependency since this is a filter which should - # not depend on a potentially non-recent knowledge. - # This should be changed to use the hypervisor CRD. - - name: host-pinned-projects + This step filters hosts based on allowed projects defined in the + hypervisor resource. Note that hosts allowing all projects are still + accessible and will not be filtered out. In this way some hypervisors + are made accessible to some projects only. --- apiVersion: cortex.cloud/v1alpha1 kind: Step metadata: - name: filter-compute-capabilities + name: filter-capabilities spec: operator: cortex-nova - # TODO: Remove this database reference once the scheduler - # step doesn't need it anymore. - databaseSecretRef: - name: cortex-nova-postgres - namespace: {{ .Release.Namespace }} type: filter - impl: filter_compute_capabilities + impl: filter_capabilities description: | This step will filter out hosts that do not meet the compute capabilities requested by the nova flavor extra specs, like `{"arch": "x86_64", @@ -335,9 +246,6 @@ spec: Note: currently, advanced boolean/numeric operators for the capabilities like `>`, `!`, ... are not supported because they are not used by any of our flavors in production. - # TODO: This step currently depends on hypervisor capabilities directly from - # the datasources. This should be changed to use the hypervisor CRD. - knowledges: [] --- apiVersion: cortex.cloud/v1alpha1 kind: Step @@ -345,11 +253,6 @@ metadata: name: avoid-high-steal-pct spec: operator: cortex-nova - # TODO: Remove this database reference once the scheduler - # step doesn't need it anymore. - databaseSecretRef: - name: cortex-nova-postgres - namespace: {{ .Release.Namespace }} type: descheduler impl: avoid_high_steal_pct description: | diff --git a/helm/bundles/cortex-nova/values.yaml b/helm/bundles/cortex-nova/values.yaml index 020f96a1..7fbfbc90 100644 --- a/helm/bundles/cortex-nova/values.yaml +++ b/helm/bundles/cortex-nova/values.yaml @@ -90,6 +90,9 @@ cortex: &cortex cortex-scheduling-controllers: <<: *cortex namePrefix: cortex-nova-scheduling + rbac: + # The cortex nova scheduling controllers need hypervisor crd access. + hypervisor: {enable: true} conf: <<: *cortexConf leaderElectionID: cortex-nova-scheduling diff --git a/internal/scheduling/decisions/nova/pipeline_controller.go b/internal/scheduling/decisions/nova/pipeline_controller.go index caf448cf..4b35f2e1 100644 --- a/internal/scheduling/decisions/nova/pipeline_controller.go +++ b/internal/scheduling/decisions/nova/pipeline_controller.go @@ -18,6 +18,7 @@ import ( "github.com/cobaltcore-dev/cortex/internal/scheduling/lib" "github.com/cobaltcore-dev/cortex/pkg/conf" "github.com/cobaltcore-dev/cortex/pkg/multicluster" + hv1 "github.com/cobaltcore-dev/openstack-hypervisor-operator/api/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" @@ -194,6 +195,8 @@ func (c *DecisionPipelineController) SetupWithManager(mgr manager.Manager, mcl * return knowledge.Spec.Operator == c.Conf.Operator }), ). + // Watch hypervisor changes so the cache gets updated. + WatchesMulticluster(&hv1.Hypervisor{}, handler.Funcs{}). Named("cortex-nova-decisions"). For( &v1alpha1.Decision{}, diff --git a/internal/scheduling/decisions/nova/pipeline_controller_test.go b/internal/scheduling/decisions/nova/pipeline_controller_test.go index b9965ba4..aba391f5 100644 --- a/internal/scheduling/decisions/nova/pipeline_controller_test.go +++ b/internal/scheduling/decisions/nova/pipeline_controller_test.go @@ -285,11 +285,11 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) { steps: []v1alpha1.Step{ { ObjectMeta: metav1.ObjectMeta{ - Name: "filter_disabled", + Name: "filter_status_conditions", }, Spec: v1alpha1.StepSpec{ Type: v1alpha1.StepTypeFilter, - Impl: "filter_disabled", + Impl: "filter_status_conditions", }, }, }, @@ -319,7 +319,7 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) { }, Spec: v1alpha1.StepSpec{ Type: v1alpha1.StepTypeFilter, - Impl: "filter_disabled", + Impl: "filter_status_conditions", Opts: runtime.RawExtension{ Raw: []byte(`{"scope":{"host_capabilities":{"any_of_trait_infixes":["TEST_TRAIT"]}}}`), }, @@ -337,7 +337,7 @@ func TestDecisionPipelineController_InitPipeline(t *testing.T) { }, Spec: v1alpha1.StepSpec{ Type: v1alpha1.StepTypeFilter, - Impl: "filter_disabled", + Impl: "filter_status_conditions", Opts: runtime.RawExtension{ Raw: []byte(`invalid json`), }, diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_allowed_projects.go b/internal/scheduling/decisions/nova/plugins/filters/filter_allowed_projects.go new file mode 100644 index 00000000..215a0f6b --- /dev/null +++ b/internal/scheduling/decisions/nova/plugins/filters/filter_allowed_projects.go @@ -0,0 +1,47 @@ +// Copyright SAP SE +// SPDX-License-Identifier: Apache-2.0 + +package filters + +import ( + "context" + "log/slog" + "slices" + + api "github.com/cobaltcore-dev/cortex/api/delegation/nova" + "github.com/cobaltcore-dev/cortex/internal/scheduling/lib" + hv1 "github.com/cobaltcore-dev/openstack-hypervisor-operator/api/v1" +) + +type FilterAllowedProjectsStep struct { + lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts] +} + +// Lock certain hosts for certain projects, based on the hypervisor spec. +// Note that hosts without specified projects are still accessible. +func (s *FilterAllowedProjectsStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.StepResult, error) { + result := s.PrepareResult(request) + if request.Spec.Data.ProjectID == "" { + traceLog.Info("no project ID in request, skipping filter") + return result, nil + } + + hvs := &hv1.HypervisorList{} + if err := s.Client.List(context.Background(), hvs); err != nil { + traceLog.Error("failed to list hypervisors", "error", err) + return nil, err + } + + for _, hv := range hvs.Items { + if len(hv.Spec.AllowedProjects) == 0 { + // Hypervisor is available for all projects. + continue + } + if !slices.Contains(hv.Spec.AllowedProjects, request.Spec.Data.ProjectID) { + // Project is not allowed on this hypervisor, filter it out. + delete(result.Activations, hv.Name) + traceLog.Info("filtering host not allowing project", "host", hv.Name) + } + } + return result, nil +} diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_project_aggregates_test.go b/internal/scheduling/decisions/nova/plugins/filters/filter_allowed_projects_test.go similarity index 50% rename from internal/scheduling/decisions/nova/plugins/filters/filter_project_aggregates_test.go rename to internal/scheduling/decisions/nova/plugins/filters/filter_allowed_projects_test.go index 8c33ea39..5957536f 100644 --- a/internal/scheduling/decisions/nova/plugins/filters/filter_project_aggregates_test.go +++ b/internal/scheduling/decisions/nova/plugins/filters/filter_allowed_projects_test.go @@ -8,79 +8,59 @@ import ( "testing" api "github.com/cobaltcore-dev/cortex/api/delegation/nova" - "github.com/cobaltcore-dev/cortex/api/v1alpha1" - "github.com/cobaltcore-dev/cortex/internal/knowledge/extractor/plugins/compute" - testlib "github.com/cobaltcore-dev/cortex/pkg/testing" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + hv1 "github.com/cobaltcore-dev/openstack-hypervisor-operator/api/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" ) -func TestFilterProjectAggregatesStep_Run(t *testing.T) { - scheme, err := v1alpha1.SchemeBuilder.Build() +func TestFilterAllowedProjectsStep_Run(t *testing.T) { + scheme, err := hv1.SchemeBuilder.Build() if err != nil { t.Fatalf("expected no error, got %v", err) } - hostPinnedProjects, err := v1alpha1.BoxFeatureList([]any{ - // Host1 has no assigned filter_tenant_id - should always be included - &compute.HostPinnedProjects{ - AggregateName: nil, - AggregateUUID: nil, - ComputeHost: testlib.Ptr("host1"), - ProjectID: nil, - }, - // Aggregate 2 maps to project-123 to host2 - &compute.HostPinnedProjects{ - AggregateName: testlib.Ptr("agg2"), - AggregateUUID: testlib.Ptr("aggregate2"), - ComputeHost: testlib.Ptr("host2"), - ProjectID: testlib.Ptr("project-123"), - }, - // Aggregate 3 maps to project-456 to host3 - &compute.HostPinnedProjects{ - AggregateName: testlib.Ptr("agg3"), - AggregateUUID: testlib.Ptr("aggregate3"), - ComputeHost: testlib.Ptr("host3"), - ProjectID: testlib.Ptr("project-456"), - }, - // Aggregate 4 maps to project-123 and project-789 to host4 - &compute.HostPinnedProjects{ - AggregateName: testlib.Ptr("agg4"), - AggregateUUID: testlib.Ptr("agg4"), - ComputeHost: testlib.Ptr("host4"), - ProjectID: testlib.Ptr("project-123"), + hvs := []client.Object{ + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{ + Name: "host1", + }, + Spec: hv1.HypervisorSpec{ + AllowedProjects: []string{}, + }, }, - &compute.HostPinnedProjects{ - AggregateName: testlib.Ptr("agg4"), - AggregateUUID: testlib.Ptr("agg4"), - ComputeHost: testlib.Ptr("host4"), - ProjectID: testlib.Ptr("project-789"), + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{ + Name: "host2", + }, + Spec: hv1.HypervisorSpec{ + AllowedProjects: []string{"project-a", "project-b"}, + }, }, - // Host5 has no assigned filter_tenant_id - should always be included - &compute.HostPinnedProjects{ - AggregateName: nil, - AggregateUUID: nil, - ComputeHost: testlib.Ptr("host5"), - ProjectID: nil, + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{ + Name: "host3", + }, + Spec: hv1.HypervisorSpec{ + AllowedProjects: []string{"project-a"}, + }, }, - // Aggregate 6 has no hosts assigned but a tenant filter - // This should not have any effect on the filter - &compute.HostPinnedProjects{ - AggregateName: testlib.Ptr("agg6"), - AggregateUUID: testlib.Ptr("aggregate6"), - ComputeHost: nil, - ProjectID: testlib.Ptr("project-123"), + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{ + Name: "host4", + }, + Spec: hv1.HypervisorSpec{ + AllowedProjects: []string{"project-c"}, + }, }, - // Maps project-123 to host2 a second time to test DISTINCT in SQL - &compute.HostPinnedProjects{ - AggregateName: testlib.Ptr("agg7"), - AggregateUUID: testlib.Ptr("aggregate7"), - ComputeHost: testlib.Ptr("host2"), - ProjectID: testlib.Ptr("project-123"), + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{ + Name: "host5", + }, + Spec: hv1.HypervisorSpec{ + AllowedProjects: []string{}, + }, }, - }) - if err != nil { - t.Fatalf("expected no error, got %v", err) } tests := []struct { @@ -90,7 +70,7 @@ func TestFilterProjectAggregatesStep_Run(t *testing.T) { filteredHosts []string }{ { - name: "No project ID - no filtering", + name: "No project ID in request - all hosts pass", request: api.ExternalSchedulerRequest{ Spec: api.NovaObject[api.NovaSpec]{ Data: api.NovaSpec{ @@ -101,57 +81,84 @@ func TestFilterProjectAggregatesStep_Run(t *testing.T) { {ComputeHost: "host1"}, {ComputeHost: "host2"}, {ComputeHost: "host3"}, - {ComputeHost: "host4"}, - {ComputeHost: "host5"}, }, }, - expectedHosts: []string{"host1", "host2", "host3", "host4", "host5"}, + expectedHosts: []string{"host1", "host2", "host3"}, filteredHosts: []string{}, }, { - name: "Project matches aggregate filter", + name: "Project matches allowed projects on host2", request: api.ExternalSchedulerRequest{ Spec: api.NovaObject[api.NovaSpec]{ Data: api.NovaSpec{ - ProjectID: "project-123", + ProjectID: "project-a", }, }, Hosts: []api.ExternalSchedulerHost{ {ComputeHost: "host1"}, {ComputeHost: "host2"}, {ComputeHost: "host3"}, + }, + }, + expectedHosts: []string{"host1", "host2", "host3"}, + filteredHosts: []string{}, + }, + { + name: "Project matches allowed projects on host3 only", + request: api.ExternalSchedulerRequest{ + Spec: api.NovaObject[api.NovaSpec]{ + Data: api.NovaSpec{ + ProjectID: "project-a", + }, + }, + Hosts: []api.ExternalSchedulerHost{ + {ComputeHost: "host2"}, + {ComputeHost: "host3"}, {ComputeHost: "host4"}, - {ComputeHost: "host5"}, }, }, - expectedHosts: []string{"host1", "host2", "host4", "host5"}, // host1 (no filter), host2 (matches), host4 (matches), host5 (no filter) - filteredHosts: []string{"host3"}, // host3 has filter for different project + expectedHosts: []string{"host2", "host3"}, + filteredHosts: []string{"host4"}, }, { - name: "Project matches different aggregate filter", + name: "Project does not match any allowed projects", request: api.ExternalSchedulerRequest{ Spec: api.NovaObject[api.NovaSpec]{ Data: api.NovaSpec{ - ProjectID: "project-456", + ProjectID: "project-d", }, }, Hosts: []api.ExternalSchedulerHost{ - {ComputeHost: "host1"}, {ComputeHost: "host2"}, {ComputeHost: "host3"}, {ComputeHost: "host4"}, + }, + }, + expectedHosts: []string{}, + filteredHosts: []string{"host2", "host3", "host4"}, + }, + { + name: "Hosts without allowed projects are accessible", + request: api.ExternalSchedulerRequest{ + Spec: api.NovaObject[api.NovaSpec]{ + Data: api.NovaSpec{ + ProjectID: "project-x", + }, + }, + Hosts: []api.ExternalSchedulerHost{ + {ComputeHost: "host1"}, {ComputeHost: "host5"}, }, }, - expectedHosts: []string{"host1", "host3", "host5"}, // host1 (no filter), host3 (matches), host5 (no filter) - filteredHosts: []string{"host2", "host4"}, // host2 and host4 have filters for different projects + expectedHosts: []string{"host1", "host5"}, + filteredHosts: []string{}, }, { - name: "Project matches multiple project filter", + name: "Mixed hosts - some with empty allowed projects, some with matching project", request: api.ExternalSchedulerRequest{ Spec: api.NovaObject[api.NovaSpec]{ Data: api.NovaSpec{ - ProjectID: "project-789", + ProjectID: "project-b", }, }, Hosts: []api.ExternalSchedulerHost{ @@ -159,14 +166,13 @@ func TestFilterProjectAggregatesStep_Run(t *testing.T) { {ComputeHost: "host2"}, {ComputeHost: "host3"}, {ComputeHost: "host4"}, - {ComputeHost: "host5"}, }, }, - expectedHosts: []string{"host1", "host4", "host5"}, // host1 (no filter), host4 (matches), host5 (no filter) - filteredHosts: []string{"host2", "host3"}, // host2 and host3 have filters for different projects + expectedHosts: []string{"host1", "host2"}, + filteredHosts: []string{"host3", "host4"}, }, { - name: "Project doesn't match any filter", + name: "All hosts filtered out", request: api.ExternalSchedulerRequest{ Spec: api.NovaObject[api.NovaSpec]{ Data: api.NovaSpec{ @@ -174,105 +180,120 @@ func TestFilterProjectAggregatesStep_Run(t *testing.T) { }, }, Hosts: []api.ExternalSchedulerHost{ - {ComputeHost: "host1"}, {ComputeHost: "host2"}, {ComputeHost: "host3"}, {ComputeHost: "host4"}, - {ComputeHost: "host5"}, }, }, - expectedHosts: []string{"host1", "host5"}, // Only hosts without tenant filters + expectedHosts: []string{}, filteredHosts: []string{"host2", "host3", "host4"}, }, { - name: "Only hosts without filters", + name: "Empty host list", + request: api.ExternalSchedulerRequest{ + Spec: api.NovaObject[api.NovaSpec]{ + Data: api.NovaSpec{ + ProjectID: "project-a", + }, + }, + Hosts: []api.ExternalSchedulerHost{}, + }, + expectedHosts: []string{}, + filteredHosts: []string{}, + }, + { + name: "Project matches on multiple hosts with allowed projects", request: api.ExternalSchedulerRequest{ Spec: api.NovaObject[api.NovaSpec]{ Data: api.NovaSpec{ - ProjectID: "project-123", + ProjectID: "project-a", }, }, Hosts: []api.ExternalSchedulerHost{ {ComputeHost: "host1"}, + {ComputeHost: "host2"}, + {ComputeHost: "host3"}, + {ComputeHost: "host4"}, {ComputeHost: "host5"}, }, }, - expectedHosts: []string{"host1", "host5"}, - filteredHosts: []string{}, + expectedHosts: []string{"host1", "host2", "host3", "host5"}, + filteredHosts: []string{"host4"}, }, { - name: "Only hosts with matching filters", + name: "Project matches second item in allowed projects list", request: api.ExternalSchedulerRequest{ Spec: api.NovaObject[api.NovaSpec]{ Data: api.NovaSpec{ - ProjectID: "project-123", + ProjectID: "project-b", }, }, Hosts: []api.ExternalSchedulerHost{ {ComputeHost: "host2"}, - {ComputeHost: "host4"}, }, }, - expectedHosts: []string{"host2", "host4"}, + expectedHosts: []string{"host2"}, filteredHosts: []string{}, }, { - name: "Only hosts with non-matching filters", + name: "All hosts have empty allowed projects", request: api.ExternalSchedulerRequest{ Spec: api.NovaObject[api.NovaSpec]{ Data: api.NovaSpec{ - ProjectID: "project-123", + ProjectID: "any-project", }, }, Hosts: []api.ExternalSchedulerHost{ - {ComputeHost: "host3"}, + {ComputeHost: "host1"}, + {ComputeHost: "host5"}, }, }, - expectedHosts: []string{}, - filteredHosts: []string{"host3"}, + expectedHosts: []string{"host1", "host5"}, + filteredHosts: []string{}, }, { - name: "Host not in database", + name: "Mixed allowed projects configuration", request: api.ExternalSchedulerRequest{ Spec: api.NovaObject[api.NovaSpec]{ Data: api.NovaSpec{ - ProjectID: "project-123", + ProjectID: "project-c", }, }, Hosts: []api.ExternalSchedulerHost{ {ComputeHost: "host1"}, - {ComputeHost: "host-unknown"}, + {ComputeHost: "host2"}, + {ComputeHost: "host3"}, + {ComputeHost: "host4"}, + {ComputeHost: "host5"}, }, }, - expectedHosts: []string{"host1"}, - filteredHosts: []string{"host-unknown"}, + expectedHosts: []string{"host1", "host4", "host5"}, + filteredHosts: []string{"host2", "host3"}, }, { - name: "Empty host list", + name: "Case sensitive project matching", request: api.ExternalSchedulerRequest{ Spec: api.NovaObject[api.NovaSpec]{ Data: api.NovaSpec{ - ProjectID: "project-123", + ProjectID: "Project-A", }, }, - Hosts: []api.ExternalSchedulerHost{}, + Hosts: []api.ExternalSchedulerHost{ + {ComputeHost: "host2"}, + {ComputeHost: "host3"}, + }, }, expectedHosts: []string{}, - filteredHosts: []string{}, + filteredHosts: []string{"host2", "host3"}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - step := &FilterProjectAggregatesStep{} + step := &FilterAllowedProjectsStep{} step.Client = fake.NewClientBuilder(). WithScheme(scheme). - WithRuntimeObjects( - &v1alpha1.Knowledge{ - ObjectMeta: metav1.ObjectMeta{Name: "host-pinned-projects"}, - Status: v1alpha1.KnowledgeStatus{Raw: hostPinnedProjects}, - }, - ). + WithObjects(hvs...). Build() result, err := step.Run(slog.Default(), tt.request) if err != nil { diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_capabilities.go b/internal/scheduling/decisions/nova/plugins/filters/filter_capabilities.go new file mode 100644 index 00000000..ea0c86b7 --- /dev/null +++ b/internal/scheduling/decisions/nova/plugins/filters/filter_capabilities.go @@ -0,0 +1,114 @@ +// Copyright SAP SE +// SPDX-License-Identifier: Apache-2.0 + +package filters + +import ( + "context" + "fmt" + "log/slog" + "strings" + + api "github.com/cobaltcore-dev/cortex/api/delegation/nova" + "github.com/cobaltcore-dev/cortex/internal/scheduling/lib" + hv1 "github.com/cobaltcore-dev/openstack-hypervisor-operator/api/v1" +) + +type FilterCapabilitiesStep struct { + lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts] +} + +// Get the provided capabilities of a hypervisor resource in the format Nova expects. +// The resulting map has keys like "capabilities:key" to match flavor extra specs. +// For example, if the hypervisor provides a cpu architecture "x86_64", +// the resulting map will have an entry "capabilities:cpu_info": "x86_64". +func hvToNovaCapabilities(hv hv1.Hypervisor) (map[string]string, error) { + caps := make(map[string]string) + + // Nova example: capabilities:hypervisor_type='CH' + // Value provided by libvirt domain capabilities: 'ch' + switch hv.Status.DomainCapabilities.HypervisorType { + case "ch": + caps["capabilities:hypervisor_type"] = "CH" + case "qemu": + caps["capabilities:hypervisor_type"] = "QEMU" + default: + return nil, fmt.Errorf("unknown autodiscovered hypervisor type: %s", hv.Status.DomainCapabilities.HypervisorType) + } + + // Nova example: capabilities:cpu_arch='x86_64' + caps["capabilities:cpu_arch"] = hv.Status.Capabilities.HostCpuArch + + return caps, nil +} + +// Check the capabilities of each host and if they match the extra spec provided +// in the request spec flavor. +func (s *FilterCapabilitiesStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.StepResult, error) { + result := s.PrepareResult(request) + requestedCapabilities := request.Spec.Data.Flavor.Data.ExtraSpecs + if len(requestedCapabilities) == 0 { + traceLog.Debug("no flavor extra spec capabilities in request, skipping filter") + return result, nil + } + + // Note: currently none of the advanced operators for capabilities are + // supported because they are not used by any of our flavors in production. + // Ops: https://github.com/sapcc/nova/blob/3ebf80/nova/scheduler/filters/extra_specs_ops.py#L23 + unsupportedOps := []string{ + "=", "", "", "==", "!=", ">=", "<=", + "s==", "s!=", "s<", "s<=", "s>", "s>=", "", // or is special + } + for key, expr := range requestedCapabilities { + if !strings.HasPrefix(key, "capabilities:") { + delete(requestedCapabilities, key) // Remove non-capability keys. + } + for _, op := range unsupportedOps { + if strings.Contains(expr, op) { + traceLog.Warn( + "unsupported extra spec operator in capabilities filter, skipping filter", + "key", key, "expr", expr, "flavor", request.Spec.Data.Flavor, + ) + return result, nil + } + } + } + + hvs := &hv1.HypervisorList{} + if err := s.Client.List(context.Background(), hvs); err != nil { + traceLog.Error("failed to list hypervisors", "error", err) + return nil, err + } + + hvCaps := make(map[string]map[string]string) + for _, hv := range hvs.Items { + var err error + if hvCaps[hv.Name], err = hvToNovaCapabilities(hv); err != nil { + traceLog.Error("failed to get nova capabilities from hypervisor", "host", hv.Name, "error", err) + return nil, err + } + } + + // Check which hosts match the requested capabilities. + for host := range result.Activations { + provided, ok := hvCaps[host] + if !ok { + delete(result.Activations, host) + traceLog.Info("filtering host without provided capabilities", "host", host) + continue + } + // Check if the provided capabilities match the requested ones. + for keyRequested, valueRequested := range requestedCapabilities { + if providedValue, ok := provided[keyRequested]; !ok || providedValue != valueRequested { + traceLog.Info( + "filtering host with mismatched capabilities", "host", host, + "wantKey", keyRequested, "wantValue", valueRequested, + "haveKey?", ok, "haveValue", providedValue, + ) + delete(result.Activations, host) + break + } + } + } + return result, nil +} diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_capabilities_test.go b/internal/scheduling/decisions/nova/plugins/filters/filter_capabilities_test.go new file mode 100644 index 00000000..564b68fa --- /dev/null +++ b/internal/scheduling/decisions/nova/plugins/filters/filter_capabilities_test.go @@ -0,0 +1,581 @@ +// Copyright SAP SE +// SPDX-License-Identifier: Apache-2.0 + +package filters + +import ( + "log/slog" + "testing" + + api "github.com/cobaltcore-dev/cortex/api/delegation/nova" + hv1 "github.com/cobaltcore-dev/openstack-hypervisor-operator/api/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func TestHvToNovaCapabilities(t *testing.T) { + tests := []struct { + name string + hv hv1.Hypervisor + expected map[string]string + expectError bool + }{ + { + name: "CH hypervisor with x86_64 architecture", + hv: hv1.Hypervisor{ + Status: hv1.HypervisorStatus{ + DomainCapabilities: hv1.DomainCapabilities{ + HypervisorType: "ch", + }, + Capabilities: hv1.Capabilities{ + HostCpuArch: "x86_64", + }, + }, + }, + expected: map[string]string{ + "capabilities:hypervisor_type": "CH", + "capabilities:cpu_arch": "x86_64", + }, + expectError: false, + }, + { + name: "QEMU hypervisor with x86_64 architecture", + hv: hv1.Hypervisor{ + Status: hv1.HypervisorStatus{ + DomainCapabilities: hv1.DomainCapabilities{ + HypervisorType: "qemu", + }, + Capabilities: hv1.Capabilities{ + HostCpuArch: "x86_64", + }, + }, + }, + expected: map[string]string{ + "capabilities:hypervisor_type": "QEMU", + "capabilities:cpu_arch": "x86_64", + }, + expectError: false, + }, + { + name: "CH hypervisor with aarch64 architecture", + hv: hv1.Hypervisor{ + Status: hv1.HypervisorStatus{ + DomainCapabilities: hv1.DomainCapabilities{ + HypervisorType: "ch", + }, + Capabilities: hv1.Capabilities{ + HostCpuArch: "aarch64", + }, + }, + }, + expected: map[string]string{ + "capabilities:hypervisor_type": "CH", + "capabilities:cpu_arch": "aarch64", + }, + expectError: false, + }, + { + name: "Unknown hypervisor type", + hv: hv1.Hypervisor{ + Status: hv1.HypervisorStatus{ + DomainCapabilities: hv1.DomainCapabilities{ + HypervisorType: "kvm", + }, + Capabilities: hv1.Capabilities{ + HostCpuArch: "x86_64", + }, + }, + }, + expected: nil, + expectError: true, + }, + { + name: "Empty hypervisor type", + hv: hv1.Hypervisor{ + Status: hv1.HypervisorStatus{ + DomainCapabilities: hv1.DomainCapabilities{ + HypervisorType: "", + }, + Capabilities: hv1.Capabilities{ + HostCpuArch: "x86_64", + }, + }, + }, + expected: nil, + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := hvToNovaCapabilities(tt.hv) + if tt.expectError { + if err == nil { + t.Errorf("expected error, got nil") + } + return + } + if err != nil { + t.Fatalf("expected no error, got %v", err) + } + if len(result) != len(tt.expected) { + t.Errorf("expected %d capabilities, got %d", len(tt.expected), len(result)) + } + for key, expectedValue := range tt.expected { + if actualValue, ok := result[key]; !ok { + t.Errorf("expected key %s not found in result", key) + } else if actualValue != expectedValue { + t.Errorf("for key %s, expected %s, got %s", key, expectedValue, actualValue) + } + } + }) + } +} + +func TestFilterCapabilitiesStep_Run(t *testing.T) { + scheme, err := hv1.SchemeBuilder.Build() + if err != nil { + t.Fatalf("expected no error, got %v", err) + } + + hvs := []client.Object{ + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{ + Name: "host1", + }, + Status: hv1.HypervisorStatus{ + DomainCapabilities: hv1.DomainCapabilities{ + HypervisorType: "ch", + }, + Capabilities: hv1.Capabilities{ + HostCpuArch: "x86_64", + }, + }, + }, + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{ + Name: "host2", + }, + Status: hv1.HypervisorStatus{ + DomainCapabilities: hv1.DomainCapabilities{ + HypervisorType: "qemu", + }, + Capabilities: hv1.Capabilities{ + HostCpuArch: "x86_64", + }, + }, + }, + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{ + Name: "host3", + }, + Status: hv1.HypervisorStatus{ + DomainCapabilities: hv1.DomainCapabilities{ + HypervisorType: "ch", + }, + Capabilities: hv1.Capabilities{ + HostCpuArch: "aarch64", + }, + }, + }, + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{ + Name: "host4", + }, + Status: hv1.HypervisorStatus{ + DomainCapabilities: hv1.DomainCapabilities{ + HypervisorType: "qemu", + }, + Capabilities: hv1.Capabilities{ + HostCpuArch: "aarch64", + }, + }, + }, + } + + tests := []struct { + name string + request api.ExternalSchedulerRequest + expectedHosts []string + filteredHosts []string + }{ + { + name: "No extra specs in request - all hosts pass", + request: api.ExternalSchedulerRequest{ + Spec: api.NovaObject[api.NovaSpec]{ + Data: api.NovaSpec{ + Flavor: api.NovaObject[api.NovaFlavor]{ + Data: api.NovaFlavor{ + ExtraSpecs: map[string]string{}, + }, + }, + }, + }, + Hosts: []api.ExternalSchedulerHost{ + {ComputeHost: "host1"}, + {ComputeHost: "host2"}, + {ComputeHost: "host3"}, + }, + }, + expectedHosts: []string{"host1", "host2", "host3"}, + filteredHosts: []string{}, + }, + { + name: "Non-capability extra specs - all hosts pass", + request: api.ExternalSchedulerRequest{ + Spec: api.NovaObject[api.NovaSpec]{ + Data: api.NovaSpec{ + Flavor: api.NovaObject[api.NovaFlavor]{ + Data: api.NovaFlavor{ + ExtraSpecs: map[string]string{ + "hw:mem_page_size": "large", + "hw:cpu_policy": "dedicated", + }, + }, + }, + }, + }, + Hosts: []api.ExternalSchedulerHost{ + {ComputeHost: "host1"}, + {ComputeHost: "host2"}, + }, + }, + expectedHosts: []string{"host1", "host2"}, + filteredHosts: []string{}, + }, + { + name: "Match CH hypervisor type", + request: api.ExternalSchedulerRequest{ + Spec: api.NovaObject[api.NovaSpec]{ + Data: api.NovaSpec{ + Flavor: api.NovaObject[api.NovaFlavor]{ + Data: api.NovaFlavor{ + ExtraSpecs: map[string]string{ + "capabilities:hypervisor_type": "CH", + }, + }, + }, + }, + }, + Hosts: []api.ExternalSchedulerHost{ + {ComputeHost: "host1"}, + {ComputeHost: "host2"}, + {ComputeHost: "host3"}, + }, + }, + expectedHosts: []string{"host1", "host3"}, + filteredHosts: []string{"host2"}, + }, + { + name: "Match QEMU hypervisor type", + request: api.ExternalSchedulerRequest{ + Spec: api.NovaObject[api.NovaSpec]{ + Data: api.NovaSpec{ + Flavor: api.NovaObject[api.NovaFlavor]{ + Data: api.NovaFlavor{ + ExtraSpecs: map[string]string{ + "capabilities:hypervisor_type": "QEMU", + }, + }, + }, + }, + }, + Hosts: []api.ExternalSchedulerHost{ + {ComputeHost: "host1"}, + {ComputeHost: "host2"}, + {ComputeHost: "host3"}, + {ComputeHost: "host4"}, + }, + }, + expectedHosts: []string{"host2", "host4"}, + filteredHosts: []string{"host1", "host3"}, + }, + { + name: "Match x86_64 CPU architecture", + request: api.ExternalSchedulerRequest{ + Spec: api.NovaObject[api.NovaSpec]{ + Data: api.NovaSpec{ + Flavor: api.NovaObject[api.NovaFlavor]{ + Data: api.NovaFlavor{ + ExtraSpecs: map[string]string{ + "capabilities:cpu_arch": "x86_64", + }, + }, + }, + }, + }, + Hosts: []api.ExternalSchedulerHost{ + {ComputeHost: "host1"}, + {ComputeHost: "host2"}, + {ComputeHost: "host3"}, + {ComputeHost: "host4"}, + }, + }, + expectedHosts: []string{"host1", "host2"}, + filteredHosts: []string{"host3", "host4"}, + }, + { + name: "Match aarch64 CPU architecture", + request: api.ExternalSchedulerRequest{ + Spec: api.NovaObject[api.NovaSpec]{ + Data: api.NovaSpec{ + Flavor: api.NovaObject[api.NovaFlavor]{ + Data: api.NovaFlavor{ + ExtraSpecs: map[string]string{ + "capabilities:cpu_arch": "aarch64", + }, + }, + }, + }, + }, + Hosts: []api.ExternalSchedulerHost{ + {ComputeHost: "host1"}, + {ComputeHost: "host2"}, + {ComputeHost: "host3"}, + {ComputeHost: "host4"}, + }, + }, + expectedHosts: []string{"host3", "host4"}, + filteredHosts: []string{"host1", "host2"}, + }, + { + name: "Match both hypervisor type and CPU architecture", + request: api.ExternalSchedulerRequest{ + Spec: api.NovaObject[api.NovaSpec]{ + Data: api.NovaSpec{ + Flavor: api.NovaObject[api.NovaFlavor]{ + Data: api.NovaFlavor{ + ExtraSpecs: map[string]string{ + "capabilities:hypervisor_type": "CH", + "capabilities:cpu_arch": "x86_64", + }, + }, + }, + }, + }, + Hosts: []api.ExternalSchedulerHost{ + {ComputeHost: "host1"}, + {ComputeHost: "host2"}, + {ComputeHost: "host3"}, + {ComputeHost: "host4"}, + }, + }, + expectedHosts: []string{"host1"}, + filteredHosts: []string{"host2", "host3", "host4"}, + }, + { + name: "Match QEMU with aarch64", + request: api.ExternalSchedulerRequest{ + Spec: api.NovaObject[api.NovaSpec]{ + Data: api.NovaSpec{ + Flavor: api.NovaObject[api.NovaFlavor]{ + Data: api.NovaFlavor{ + ExtraSpecs: map[string]string{ + "capabilities:hypervisor_type": "QEMU", + "capabilities:cpu_arch": "aarch64", + }, + }, + }, + }, + }, + Hosts: []api.ExternalSchedulerHost{ + {ComputeHost: "host1"}, + {ComputeHost: "host2"}, + {ComputeHost: "host3"}, + {ComputeHost: "host4"}, + }, + }, + expectedHosts: []string{"host4"}, + filteredHosts: []string{"host1", "host2", "host3"}, + }, + { + name: "No matching hosts", + request: api.ExternalSchedulerRequest{ + Spec: api.NovaObject[api.NovaSpec]{ + Data: api.NovaSpec{ + Flavor: api.NovaObject[api.NovaFlavor]{ + Data: api.NovaFlavor{ + ExtraSpecs: map[string]string{ + "capabilities:hypervisor_type": "KVM", + }, + }, + }, + }, + }, + Hosts: []api.ExternalSchedulerHost{ + {ComputeHost: "host1"}, + {ComputeHost: "host2"}, + }, + }, + expectedHosts: []string{}, + filteredHosts: []string{"host1", "host2"}, + }, + { + name: "Mixed capability and non-capability extra specs", + request: api.ExternalSchedulerRequest{ + Spec: api.NovaObject[api.NovaSpec]{ + Data: api.NovaSpec{ + Flavor: api.NovaObject[api.NovaFlavor]{ + Data: api.NovaFlavor{ + ExtraSpecs: map[string]string{ + "capabilities:hypervisor_type": "CH", + "hw:mem_page_size": "large", + "capabilities:cpu_arch": "x86_64", + }, + }, + }, + }, + }, + Hosts: []api.ExternalSchedulerHost{ + {ComputeHost: "host1"}, + {ComputeHost: "host2"}, + {ComputeHost: "host3"}, + }, + }, + expectedHosts: []string{"host1"}, + filteredHosts: []string{"host2", "host3"}, + }, + { + name: "Empty host list", + request: api.ExternalSchedulerRequest{ + Spec: api.NovaObject[api.NovaSpec]{ + Data: api.NovaSpec{ + Flavor: api.NovaObject[api.NovaFlavor]{ + Data: api.NovaFlavor{ + ExtraSpecs: map[string]string{ + "capabilities:hypervisor_type": "CH", + }, + }, + }, + }, + }, + Hosts: []api.ExternalSchedulerHost{}, + }, + expectedHosts: []string{}, + filteredHosts: []string{}, + }, + { + name: "Case sensitive capability matching", + request: api.ExternalSchedulerRequest{ + Spec: api.NovaObject[api.NovaSpec]{ + Data: api.NovaSpec{ + Flavor: api.NovaObject[api.NovaFlavor]{ + Data: api.NovaFlavor{ + ExtraSpecs: map[string]string{ + "capabilities:hypervisor_type": "ch", // lowercase should not match + }, + }, + }, + }, + }, + Hosts: []api.ExternalSchedulerHost{ + {ComputeHost: "host1"}, + {ComputeHost: "host2"}, + }, + }, + expectedHosts: []string{}, + filteredHosts: []string{"host1", "host2"}, + }, + { + name: "Unsupported operator in extra specs - skip filter", + request: api.ExternalSchedulerRequest{ + Spec: api.NovaObject[api.NovaSpec]{ + Data: api.NovaSpec{ + Flavor: api.NovaObject[api.NovaFlavor]{ + Data: api.NovaFlavor{ + ExtraSpecs: map[string]string{ + "capabilities:cpu_arch": "s>=x86_64", + }, + }, + }, + }, + }, + Hosts: []api.ExternalSchedulerHost{ + {ComputeHost: "host1"}, + {ComputeHost: "host2"}, + {ComputeHost: "host3"}, + }, + }, + expectedHosts: []string{"host1", "host2", "host3"}, + filteredHosts: []string{}, + }, + { + name: "Unsupported operator - skip filter", + request: api.ExternalSchedulerRequest{ + Spec: api.NovaObject[api.NovaSpec]{ + Data: api.NovaSpec{ + Flavor: api.NovaObject[api.NovaFlavor]{ + Data: api.NovaFlavor{ + ExtraSpecs: map[string]string{ + "capabilities:hypervisor_type": " CH QEMU", + }, + }, + }, + }, + }, + Hosts: []api.ExternalSchedulerHost{ + {ComputeHost: "host1"}, + {ComputeHost: "host2"}, + }, + }, + expectedHosts: []string{"host1", "host2"}, + filteredHosts: []string{}, + }, + { + name: "All hosts match when no capabilities requested", + request: api.ExternalSchedulerRequest{ + Spec: api.NovaObject[api.NovaSpec]{ + Data: api.NovaSpec{ + Flavor: api.NovaObject[api.NovaFlavor]{ + Data: api.NovaFlavor{ + ExtraSpecs: map[string]string{}, + }, + }, + }, + }, + Hosts: []api.ExternalSchedulerHost{ + {ComputeHost: "host1"}, + {ComputeHost: "host2"}, + {ComputeHost: "host3"}, + {ComputeHost: "host4"}, + }, + }, + expectedHosts: []string{"host1", "host2", "host3", "host4"}, + filteredHosts: []string{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + step := &FilterCapabilitiesStep{} + step.Client = fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(hvs...). + Build() + result, err := step.Run(slog.Default(), tt.request) + if err != nil { + t.Fatalf("expected no error, got %v", err) + } + + // Check expected hosts are present + for _, host := range tt.expectedHosts { + if _, ok := result.Activations[host]; !ok { + t.Errorf("expected host %s to be present in activations", host) + } + } + + // Check filtered hosts are not present + for _, host := range tt.filteredHosts { + if _, ok := result.Activations[host]; ok { + t.Errorf("expected host %s to be filtered out", host) + } + } + + // Check total count + if len(result.Activations) != len(tt.expectedHosts) { + t.Errorf("expected %d hosts, got %d", len(tt.expectedHosts), len(result.Activations)) + } + }) + } +} diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_compute_capabilities.go b/internal/scheduling/decisions/nova/plugins/filters/filter_compute_capabilities.go deleted file mode 100644 index 558937ab..00000000 --- a/internal/scheduling/decisions/nova/plugins/filters/filter_compute_capabilities.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright SAP SE -// SPDX-License-Identifier: Apache-2.0 - -package filters - -import ( - "encoding/json" - "log/slog" - "maps" - "strings" - - api "github.com/cobaltcore-dev/cortex/api/delegation/nova" - "github.com/cobaltcore-dev/cortex/internal/knowledge/datasources/openstack/nova" - "github.com/cobaltcore-dev/cortex/internal/scheduling/lib" -) - -type FilterComputeCapabilitiesStep struct { - lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts] -} - -// Convert a nested dictionary into a list of capabilities. -// -// The input is something like this: -// -// { -// "arch": "x86_64", -// "maxphysaddr": {"bits": 46}, -// ... -// } -// -// Which then outputs a list of capabilities like: -// {"arch": "x86_64", "maxphysaddr:bits": 46, ...} -func convertToCapabilities(prefix string, obj map[string]any) map[string]any { - capabilities := make(map[string]any) - for key, value := range obj { - if subObj, ok := value.(map[string]any); ok { - // Nested object. - subCapabilities := convertToCapabilities(prefix+key+":", subObj) - maps.Copy(capabilities, subCapabilities) - } else { - // Flat value. - capabilities[prefix+key] = value - } - } - return capabilities -} - -// Check the capabilities of each host and if they match the extra spec provided -// in the request spec flavor. -func (s *FilterComputeCapabilitiesStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.StepResult, error) { - result := s.PrepareResult(request) - requestedCapabilities := request.Spec.Data.Flavor.Data.ExtraSpecs - // Note: currently advanced operators for the capabilities are not supported - // because they are not used by any of our flavors in production. - for key := range requestedCapabilities { - if !strings.HasPrefix(key, "capabilities:") { - delete(requestedCapabilities, key) // Remove non-capability keys. - } - } - if len(requestedCapabilities) == 0 { - traceLog.Debug("no flavor extra spec capabilities in request, skipping filter") - return result, nil - } - var hypervisors []nova.Hypervisor - if _, err := s.DB.SelectTimed( - "scheduler-nova", &hypervisors, "SELECT * FROM "+nova.Hypervisor{}.TableName(), - ); err != nil { - return result, err - } - // Serialize the hypervisor fields that are interesting for the filter. - providedCapabilities := make(map[string]map[string]any) - for _, h := range hypervisors { - // It is assumed that multiple hypervisors have the same capabilities - // when they are nested in the same compute host. - if _, ok := providedCapabilities[h.ServiceHost]; ok { - continue // Already processed this compute host. - } - // Uwrap the cpu capabilities. - var cpuInfo map[string]any - if h.CPUInfo != "" { - if err := json.Unmarshal([]byte(h.CPUInfo), &cpuInfo); err != nil { - traceLog.Warn("failed to unmarshal CPU info", "hv", h.ID, "error", err) - return result, err - } - } else { - cpuInfo = make(map[string]any) - } - // Note that Nova flavors directly map the cpu_info fields to extra - // specs, without a nested `capabilities:cpu_info` prefix. - cs := convertToCapabilities("capabilities:", cpuInfo) - cs["capabilities:hypervisor_type"] = h.HypervisorType - cs["capabilities:hypervisor_version"] = h.HypervisorVersion - providedCapabilities[h.ServiceHost] = cs - } - // Check which hosts match the requested capabilities. - for host := range result.Activations { - provided, ok := providedCapabilities[host] - if !ok { - delete(result.Activations, host) - traceLog.Debug("filtering host without provided capabilities", "host", host) - continue - } - // Check if the provided capabilities match the requested ones. - for keyRequested, valueRequested := range requestedCapabilities { - if providedValue, ok := provided[keyRequested]; !ok || providedValue != valueRequested { - traceLog.Debug( - "filtering host with mismatched capabilities", "host", host, - "wantKey", keyRequested, "wantValue", valueRequested, - "haveKey?", ok, "haveValue", providedValue, - ) - delete(result.Activations, host) - break - } - } - } - return result, nil -} diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_compute_capabilities_test.go b/internal/scheduling/decisions/nova/plugins/filters/filter_compute_capabilities_test.go deleted file mode 100644 index 006d9dfc..00000000 --- a/internal/scheduling/decisions/nova/plugins/filters/filter_compute_capabilities_test.go +++ /dev/null @@ -1,291 +0,0 @@ -// Copyright SAP SE -// SPDX-License-Identifier: Apache-2.0 - -package filters - -import ( - "log/slog" - "testing" - - "github.com/cobaltcore-dev/cortex/internal/knowledge/datasources/openstack/nova" - "github.com/cobaltcore-dev/cortex/pkg/db" - - api "github.com/cobaltcore-dev/cortex/api/delegation/nova" - - testlibDB "github.com/cobaltcore-dev/cortex/pkg/db/testing" -) - -func TestFilterComputeCapabilitiesStep_Run(t *testing.T) { - dbEnv := testlibDB.SetupDBEnv(t) - testDB := db.DB{DbMap: dbEnv.DbMap} - defer dbEnv.Close() - // Create dependency tables - err := testDB.CreateTable( - testDB.AddTable(nova.Hypervisor{}), - ) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - - // Insert mock hypervisor data - hypervisors := []any{ - &nova.Hypervisor{ID: "hv1", Hostname: "hypervisor1", State: "up", Status: "enabled", HypervisorType: "QEMU", HypervisorVersion: 2008000, HostIP: "192.168.1.1", ServiceID: "svc1", ServiceHost: "host1", VCPUs: 16, MemoryMB: 32768, LocalGB: 1000, VCPUsUsed: 4, MemoryMBUsed: 8192, LocalGBUsed: 100, FreeRAMMB: 24576, FreeDiskGB: 900, CurrentWorkload: 0, RunningVMs: 2, DiskAvailableLeast: &[]int{900}[0], CPUInfo: `{"arch": "x86_64", "model": "Haswell", "features": ["sse", "avx"]}`}, - &nova.Hypervisor{ID: "hv2", Hostname: "hypervisor2", State: "up", Status: "enabled", HypervisorType: "QEMU", HypervisorVersion: 2008000, HostIP: "192.168.1.2", ServiceID: "svc2", ServiceHost: "host2", VCPUs: 16, MemoryMB: 32768, LocalGB: 1000, VCPUsUsed: 4, MemoryMBUsed: 8192, LocalGBUsed: 100, FreeRAMMB: 24576, FreeDiskGB: 900, CurrentWorkload: 0, RunningVMs: 2, DiskAvailableLeast: &[]int{900}[0], CPUInfo: `{"arch": "aarch64", "model": "Cortex-A72", "features": ["neon"]}`}, - &nova.Hypervisor{ID: "hv3", Hostname: "hypervisor3", State: "up", Status: "enabled", HypervisorType: "VMware", HypervisorVersion: 6007000, HostIP: "192.168.1.3", ServiceID: "svc3", ServiceHost: "host3", VCPUs: 16, MemoryMB: 32768, LocalGB: 1000, VCPUsUsed: 4, MemoryMBUsed: 8192, LocalGBUsed: 100, FreeRAMMB: 24576, FreeDiskGB: 900, CurrentWorkload: 0, RunningVMs: 2, DiskAvailableLeast: &[]int{900}[0], CPUInfo: "{}"}, - } - if err := testDB.Insert(hypervisors...); err != nil { - t.Fatalf("expected no error, got %v", err) - } - - tests := []struct { - name string - request api.ExternalSchedulerRequest - expectedHosts []string - filteredHosts []string - }{ - { - name: "No capabilities requested", - request: api.ExternalSchedulerRequest{ - Spec: api.NovaObject[api.NovaSpec]{ - Data: api.NovaSpec{ - Flavor: api.NovaObject[api.NovaFlavor]{ - Data: api.NovaFlavor{ - ExtraSpecs: map[string]string{ - "hw:cpu_policy": "dedicated", - }, - }, - }, - }, - }, - Hosts: []api.ExternalSchedulerHost{ - {ComputeHost: "host1"}, - {ComputeHost: "host2"}, - {ComputeHost: "host3"}, - }, - }, - expectedHosts: []string{"host1", "host2", "host3"}, - filteredHosts: []string{}, - }, - { - name: "Match x86_64 architecture", - request: api.ExternalSchedulerRequest{ - Spec: api.NovaObject[api.NovaSpec]{ - Data: api.NovaSpec{ - Flavor: api.NovaObject[api.NovaFlavor]{ - Data: api.NovaFlavor{ - ExtraSpecs: map[string]string{ - "capabilities:arch": "x86_64", - }, - }, - }, - }, - }, - Hosts: []api.ExternalSchedulerHost{ - {ComputeHost: "host1"}, - {ComputeHost: "host2"}, - {ComputeHost: "host3"}, - }, - }, - expectedHosts: []string{"host1"}, - filteredHosts: []string{"host2", "host3"}, - }, - { - name: "Match hypervisor type", - request: api.ExternalSchedulerRequest{ - Spec: api.NovaObject[api.NovaSpec]{ - Data: api.NovaSpec{ - Flavor: api.NovaObject[api.NovaFlavor]{ - Data: api.NovaFlavor{ - ExtraSpecs: map[string]string{ - "capabilities:hypervisor_type": "VMware", - }, - }, - }, - }, - }, - Hosts: []api.ExternalSchedulerHost{ - {ComputeHost: "host1"}, - {ComputeHost: "host2"}, - {ComputeHost: "host3"}, - }, - }, - expectedHosts: []string{"host3"}, - filteredHosts: []string{"host1", "host2"}, - }, - { - name: "Match multiple capabilities", - request: api.ExternalSchedulerRequest{ - Spec: api.NovaObject[api.NovaSpec]{ - Data: api.NovaSpec{ - Flavor: api.NovaObject[api.NovaFlavor]{ - Data: api.NovaFlavor{ - ExtraSpecs: map[string]string{ - "capabilities:arch": "x86_64", - "capabilities:hypervisor_type": "QEMU", - }, - }, - }, - }, - }, - Hosts: []api.ExternalSchedulerHost{ - {ComputeHost: "host1"}, - {ComputeHost: "host2"}, - {ComputeHost: "host3"}, - }, - }, - expectedHosts: []string{"host1"}, - filteredHosts: []string{"host2", "host3"}, - }, - { - name: "No matching capabilities", - request: api.ExternalSchedulerRequest{ - Spec: api.NovaObject[api.NovaSpec]{ - Data: api.NovaSpec{ - Flavor: api.NovaObject[api.NovaFlavor]{ - Data: api.NovaFlavor{ - ExtraSpecs: map[string]string{ - "capabilities:arch": "s390x", - }, - }, - }, - }, - }, - Hosts: []api.ExternalSchedulerHost{ - {ComputeHost: "host1"}, - {ComputeHost: "host2"}, - {ComputeHost: "host3"}, - }, - }, - expectedHosts: []string{}, - filteredHosts: []string{"host1", "host2", "host3"}, - }, - { - name: "Host without hypervisor data", - request: api.ExternalSchedulerRequest{ - Spec: api.NovaObject[api.NovaSpec]{ - Data: api.NovaSpec{ - Flavor: api.NovaObject[api.NovaFlavor]{ - Data: api.NovaFlavor{ - ExtraSpecs: map[string]string{ - "capabilities:arch": "x86_64", - }, - }, - }, - }, - }, - Hosts: []api.ExternalSchedulerHost{ - {ComputeHost: "host1"}, - {ComputeHost: "host4"}, // Non-existent host - }, - }, - expectedHosts: []string{"host1"}, - filteredHosts: []string{"host4"}, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - step := &FilterComputeCapabilitiesStep{} - step.DB = &testDB - result, err := step.Run(slog.Default(), tt.request) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - - // Check expected hosts are present - for _, host := range tt.expectedHosts { - if _, ok := result.Activations[host]; !ok { - t.Errorf("expected host %s to be present in activations", host) - } - } - - // Check filtered hosts are not present - for _, host := range tt.filteredHosts { - if _, ok := result.Activations[host]; ok { - t.Errorf("expected host %s to be filtered out", host) - } - } - - // Check total count - if len(result.Activations) != len(tt.expectedHosts) { - t.Errorf("expected %d hosts, got %d", len(tt.expectedHosts), len(result.Activations)) - } - }) - } -} - -func TestConvertToCapabilities(t *testing.T) { - tests := []struct { - name string - prefix string - input map[string]any - expected map[string]any - }{ - { - name: "Flat values", - prefix: "capabilities:", - input: map[string]any{ - "arch": "x86_64", - "model": "Haswell", - }, - expected: map[string]any{ - "capabilities:arch": "x86_64", - "capabilities:model": "Haswell", - }, - }, - { - name: "Nested values", - prefix: "capabilities:", - input: map[string]any{ - "arch": "x86_64", - "maxphysaddr": map[string]any{ - "bits": 46, - }, - }, - expected: map[string]any{ - "capabilities:arch": "x86_64", - "capabilities:maxphysaddr:bits": 46, - }, - }, - { - name: "Deep nesting", - prefix: "capabilities:", - input: map[string]any{ - "topology": map[string]any{ - "sockets": 2, - "cores": map[string]any{ - "per_socket": 8, - }, - }, - }, - expected: map[string]any{ - "capabilities:topology:sockets": 2, - "capabilities:topology:cores:per_socket": 8, - }, - }, - { - name: "Empty input", - prefix: "capabilities:", - input: map[string]any{}, - expected: map[string]any{}, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := convertToCapabilities(tt.prefix, tt.input) - - if len(result) != len(tt.expected) { - t.Errorf("expected %d capabilities, got %d", len(tt.expected), len(result)) - } - - for key, expectedValue := range tt.expected { - if actualValue, ok := result[key]; !ok { - t.Errorf("expected capability %s not found, got result %v", key, result) - } else if actualValue != expectedValue { - t.Errorf("expected capability %s to be %v, got %v", key, expectedValue, actualValue) - } - } - }) - } -} diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_correct_az.go b/internal/scheduling/decisions/nova/plugins/filters/filter_correct_az.go index a79adf17..744edfb6 100644 --- a/internal/scheduling/decisions/nova/plugins/filters/filter_correct_az.go +++ b/internal/scheduling/decisions/nova/plugins/filters/filter_correct_az.go @@ -6,13 +6,10 @@ package filters import ( "context" "log/slog" - "strings" api "github.com/cobaltcore-dev/cortex/api/delegation/nova" - "github.com/cobaltcore-dev/cortex/api/v1alpha1" - "github.com/cobaltcore-dev/cortex/internal/knowledge/extractor/plugins/compute" "github.com/cobaltcore-dev/cortex/internal/scheduling/lib" - "sigs.k8s.io/controller-runtime/pkg/client" + hv1 "github.com/cobaltcore-dev/openstack-hypervisor-operator/api/v1" ) type FilterCorrectAZStep struct { @@ -23,39 +20,42 @@ type FilterCorrectAZStep struct { func (s *FilterCorrectAZStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.StepResult, error) { result := s.PrepareResult(request) if request.Spec.Data.AvailabilityZone == "" { - traceLog.Debug("no availability zone requested, skipping filter_correct_az step") + traceLog.Info("no availability zone requested, skipping filter_correct_az step") return result, nil } - knowledge := &v1alpha1.Knowledge{} - if err := s.Client.Get( - context.Background(), - client.ObjectKey{Name: "host-az"}, - knowledge, - ); err != nil { - return nil, err - } - hostAZs, err := v1alpha1. - UnboxFeatureList[compute.HostAZ](knowledge.Status.Raw) - if err != nil { + + hvs := &hv1.HypervisorList{} + if err := s.Client.List(context.Background(), hvs); err != nil { + traceLog.Error("failed to list hypervisors", "error", err) return nil, err } - var computeHostsInAZ []string - for _, hostAZ := range hostAZs { - if hostAZ.AvailabilityZone == nil { - traceLog.Warn("host az knowledge has nil availability zone", "host", hostAZ.ComputeHost) + // The availability zone is provided by the label + // "topology.kubernetes.io/zone" on the hv crd. + var computeHostsInAZ = make(map[string]struct{}) + for _, hv := range hvs.Items { + az, ok := hv.Labels["topology.kubernetes.io/zone"] + if !ok { + traceLog.Warn("hypervisor missing topology.kubernetes.io/zone label", "host", hv.Name) continue } - if *hostAZ.AvailabilityZone == request.Spec.Data.AvailabilityZone { - computeHostsInAZ = append(computeHostsInAZ, hostAZ.ComputeHost) + if az == request.Spec.Data.AvailabilityZone { + // We always assume the name of the resource corresponds + // to the compute host name. + computeHostsInAZ[hv.Name] = struct{}{} } } - lookupStr := strings.Join(computeHostsInAZ, ",") + + traceLog.Info( + "hosts inside requested az", + "availabilityZone", request.Spec.Data.AvailabilityZone, + "hosts", computeHostsInAZ, + ) for host := range result.Activations { - if strings.Contains(lookupStr, host) { + if _, ok := computeHostsInAZ[host]; ok { continue } delete(result.Activations, host) - traceLog.Debug("filtering host outside requested az", "host", host) + traceLog.Info("filtering host outside requested az", "host", host) } return result, nil } diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_correct_az_test.go b/internal/scheduling/decisions/nova/plugins/filters/filter_correct_az_test.go index 0fede0f1..4a7b633e 100644 --- a/internal/scheduling/decisions/nova/plugins/filters/filter_correct_az_test.go +++ b/internal/scheduling/decisions/nova/plugins/filters/filter_correct_az_test.go @@ -8,30 +8,49 @@ import ( "testing" api "github.com/cobaltcore-dev/cortex/api/delegation/nova" - "github.com/cobaltcore-dev/cortex/api/v1alpha1" - "github.com/cobaltcore-dev/cortex/internal/knowledge/extractor/plugins/compute" + hv1 "github.com/cobaltcore-dev/openstack-hypervisor-operator/api/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" - - testlib "github.com/cobaltcore-dev/cortex/pkg/testing" ) func TestFilterCorrectAZStep_Run(t *testing.T) { - scheme, err := v1alpha1.SchemeBuilder.Build() + scheme, err := hv1.SchemeBuilder.Build() if err != nil { t.Fatalf("expected no error, got %v", err) } - // Insert mock data into the feature_host_az table - hostAZs, err := v1alpha1.BoxFeatureList([]any{ - &compute.HostAZ{ComputeHost: "host1", AvailabilityZone: testlib.Ptr("az-1")}, - &compute.HostAZ{ComputeHost: "host2", AvailabilityZone: testlib.Ptr("az-1")}, - &compute.HostAZ{ComputeHost: "host3", AvailabilityZone: testlib.Ptr("az-2")}, - &compute.HostAZ{ComputeHost: "host4", AvailabilityZone: testlib.Ptr("az-3")}, - &compute.HostAZ{ComputeHost: "host5", AvailabilityZone: nil}, - }) - if err != nil { - t.Fatalf("expected no error, got %v", err) + hvs := []client.Object{ + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{ + Name: "host1", + Labels: map[string]string{"topology.kubernetes.io/zone": "az-1"}, + }, + }, + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{ + Name: "host2", + Labels: map[string]string{"topology.kubernetes.io/zone": "az-1"}, + }, + }, + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{ + Name: "host3", + Labels: map[string]string{"topology.kubernetes.io/zone": "az-2"}, + }, + }, + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{ + Name: "host4", + Labels: map[string]string{"topology.kubernetes.io/zone": "az-3"}, + }, + }, + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{ + Name: "host5", + Labels: map[string]string{}, + }, + }, } tests := []struct { @@ -148,10 +167,7 @@ func TestFilterCorrectAZStep_Run(t *testing.T) { step := &FilterCorrectAZStep{} step.Client = fake.NewClientBuilder(). WithScheme(scheme). - WithObjects(&v1alpha1.Knowledge{ - ObjectMeta: v1.ObjectMeta{Name: "host-az"}, - Status: v1alpha1.KnowledgeStatus{Raw: hostAZs}, - }). + WithObjects(hvs...). Build() result, err := step.Run(slog.Default(), tt.request) if err != nil { diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_disabled.go b/internal/scheduling/decisions/nova/plugins/filters/filter_disabled.go deleted file mode 100644 index 7d778f6f..00000000 --- a/internal/scheduling/decisions/nova/plugins/filters/filter_disabled.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright SAP SE -// SPDX-License-Identifier: Apache-2.0 - -package filters - -import ( - "log/slog" - "strings" - - "github.com/cobaltcore-dev/cortex/internal/knowledge/datasources/openstack/nova" - "github.com/cobaltcore-dev/cortex/internal/knowledge/datasources/openstack/placement" - - api "github.com/cobaltcore-dev/cortex/api/delegation/nova" - "github.com/cobaltcore-dev/cortex/internal/scheduling/lib" -) - -type FilterDisabledStep struct { - lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts] -} - -// Only get hosts that are not disabled or down. -func (s *FilterDisabledStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.StepResult, error) { - result := s.PrepareResult(request) - var computeHostsActive []string - if _, err := s.DB.SelectTimed("scheduler-nova", &computeHostsActive, ` - SELECT h.service_host - FROM `+placement.Trait{}.TableName()+` rpt - JOIN `+nova.Hypervisor{}.TableName()+` h - ON h.id = rpt.resource_provider_uuid - WHERE - name != 'COMPUTE_STATUS_DISABLED' AND - h.status != 'disabled' AND - h.state != 'down'`, - map[string]any{"az": request.Spec.Data.AvailabilityZone}, - ); err != nil { - return nil, err - } - lookupStr := strings.Join(computeHostsActive, ",") - for host := range result.Activations { - if strings.Contains(lookupStr, host) { - continue - } - delete(result.Activations, host) - traceLog.Debug("filtering disabled/down host", "host", host) - } - return result, nil -} diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_disabled_test.go b/internal/scheduling/decisions/nova/plugins/filters/filter_disabled_test.go deleted file mode 100644 index 1f065423..00000000 --- a/internal/scheduling/decisions/nova/plugins/filters/filter_disabled_test.go +++ /dev/null @@ -1,175 +0,0 @@ -// Copyright SAP SE -// SPDX-License-Identifier: Apache-2.0 - -package filters - -import ( - "log/slog" - "testing" - - "github.com/cobaltcore-dev/cortex/internal/knowledge/datasources/openstack/nova" - "github.com/cobaltcore-dev/cortex/internal/knowledge/datasources/openstack/placement" - "github.com/cobaltcore-dev/cortex/pkg/db" - - api "github.com/cobaltcore-dev/cortex/api/delegation/nova" - - testlibDB "github.com/cobaltcore-dev/cortex/pkg/db/testing" - testlib "github.com/cobaltcore-dev/cortex/pkg/testing" -) - -func TestFilterDisabledStep_Run(t *testing.T) { - dbEnv := testlibDB.SetupDBEnv(t) - testDB := db.DB{DbMap: dbEnv.DbMap} - defer dbEnv.Close() - // Create dependency tables - err := testDB.CreateTable( - testDB.AddTable(nova.Hypervisor{}), - testDB.AddTable(placement.Trait{}), - ) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - - // Insert mock hypervisor data - hypervisors := []any{ - &nova.Hypervisor{ID: "hv1", Hostname: "hypervisor1", State: "up", Status: "enabled", HypervisorType: "QEMU", HypervisorVersion: 2008000, HostIP: "192.168.1.1", ServiceID: "svc1", ServiceHost: "host1", ServiceDisabledReason: nil, VCPUs: 16, MemoryMB: 32768, LocalGB: 1000, VCPUsUsed: 4, MemoryMBUsed: 8192, LocalGBUsed: 100, FreeRAMMB: 24576, FreeDiskGB: 900, CurrentWorkload: 0, RunningVMs: 2, DiskAvailableLeast: &[]int{900}[0], CPUInfo: "{}"}, - &nova.Hypervisor{ID: "hv2", Hostname: "hypervisor2", State: "up", Status: "disabled", HypervisorType: "QEMU", HypervisorVersion: 2008000, HostIP: "192.168.1.2", ServiceID: "svc2", ServiceHost: "host2", ServiceDisabledReason: testlib.Ptr("maintenance"), VCPUs: 16, MemoryMB: 32768, LocalGB: 1000, VCPUsUsed: 4, MemoryMBUsed: 8192, LocalGBUsed: 100, FreeRAMMB: 24576, FreeDiskGB: 900, CurrentWorkload: 0, RunningVMs: 2, DiskAvailableLeast: &[]int{900}[0], CPUInfo: "{}"}, - &nova.Hypervisor{ID: "hv3", Hostname: "hypervisor3", State: "down", Status: "enabled", HypervisorType: "QEMU", HypervisorVersion: 2008000, HostIP: "192.168.1.3", ServiceID: "svc3", ServiceHost: "host3", ServiceDisabledReason: nil, VCPUs: 16, MemoryMB: 32768, LocalGB: 1000, VCPUsUsed: 4, MemoryMBUsed: 8192, LocalGBUsed: 100, FreeRAMMB: 24576, FreeDiskGB: 900, CurrentWorkload: 0, RunningVMs: 2, DiskAvailableLeast: &[]int{900}[0], CPUInfo: "{}"}, - &nova.Hypervisor{ID: "hv4", Hostname: "hypervisor4", State: "up", Status: "enabled", HypervisorType: "QEMU", HypervisorVersion: 2008000, HostIP: "192.168.1.4", ServiceID: "svc4", ServiceHost: "host4", ServiceDisabledReason: nil, VCPUs: 16, MemoryMB: 32768, LocalGB: 1000, VCPUsUsed: 4, MemoryMBUsed: 8192, LocalGBUsed: 100, FreeRAMMB: 24576, FreeDiskGB: 900, CurrentWorkload: 0, RunningVMs: 2, DiskAvailableLeast: &[]int{900}[0], CPUInfo: "{}"}, - &nova.Hypervisor{ID: "hv5", Hostname: "hypervisor5", State: "up", Status: "enabled", HypervisorType: "QEMU", HypervisorVersion: 2008000, HostIP: "192.168.1.5", ServiceID: "svc5", ServiceHost: "host5", ServiceDisabledReason: nil, VCPUs: 16, MemoryMB: 32768, LocalGB: 1000, VCPUsUsed: 4, MemoryMBUsed: 8192, LocalGBUsed: 100, FreeRAMMB: 24576, FreeDiskGB: 900, CurrentWorkload: 0, RunningVMs: 2, DiskAvailableLeast: &[]int{900}[0], CPUInfo: "{}"}, - } - if err := testDB.Insert(hypervisors...); err != nil { - t.Fatalf("expected no error, got %v", err) - } - - // Insert mock trait data - traits := []any{ - &placement.Trait{ResourceProviderUUID: "hv1", Name: "COMPUTE_STATUS_ENABLED", ResourceProviderGeneration: 1}, - &placement.Trait{ResourceProviderUUID: "hv2", Name: "COMPUTE_STATUS_DISABLED", ResourceProviderGeneration: 1}, - &placement.Trait{ResourceProviderUUID: "hv3", Name: "COMPUTE_STATUS_ENABLED", ResourceProviderGeneration: 1}, - &placement.Trait{ResourceProviderUUID: "hv4", Name: "COMPUTE_STATUS_ENABLED", ResourceProviderGeneration: 1}, - &placement.Trait{ResourceProviderUUID: "hv5", Name: "COMPUTE_STATUS_DISABLED", ResourceProviderGeneration: 1}, - } - if err := testDB.Insert(traits...); err != nil { - t.Fatalf("expected no error, got %v", err) - } - - tests := []struct { - name string - request api.ExternalSchedulerRequest - expectedHosts []string - filteredHosts []string - }{ - { - name: "Filter enabled hosts only", - request: api.ExternalSchedulerRequest{ - Spec: api.NovaObject[api.NovaSpec]{ - Data: api.NovaSpec{ - AvailabilityZone: "az-1", - }, - }, - Hosts: []api.ExternalSchedulerHost{ - {ComputeHost: "host1"}, - {ComputeHost: "host2"}, - {ComputeHost: "host3"}, - {ComputeHost: "host4"}, - {ComputeHost: "host5"}, - }, - }, - expectedHosts: []string{"host1", "host4"}, // Only enabled, up hosts without COMPUTE_STATUS_DISABLED trait - filteredHosts: []string{"host2", "host3", "host5"}, - }, - { - name: "All hosts disabled or down", - request: api.ExternalSchedulerRequest{ - Spec: api.NovaObject[api.NovaSpec]{ - Data: api.NovaSpec{ - AvailabilityZone: "az-1", - }, - }, - Hosts: []api.ExternalSchedulerHost{ - {ComputeHost: "host2"}, // disabled - {ComputeHost: "host3"}, // down - {ComputeHost: "host5"}, // has COMPUTE_STATUS_DISABLED trait - }, - }, - expectedHosts: []string{}, - filteredHosts: []string{"host2", "host3", "host5"}, - }, - { - name: "Only enabled hosts", - request: api.ExternalSchedulerRequest{ - Spec: api.NovaObject[api.NovaSpec]{ - Data: api.NovaSpec{ - AvailabilityZone: "az-1", - }, - }, - Hosts: []api.ExternalSchedulerHost{ - {ComputeHost: "host1"}, - {ComputeHost: "host4"}, - }, - }, - expectedHosts: []string{"host1", "host4"}, - filteredHosts: []string{}, - }, - { - name: "Empty host list", - request: api.ExternalSchedulerRequest{ - Spec: api.NovaObject[api.NovaSpec]{ - Data: api.NovaSpec{ - AvailabilityZone: "az-1", - }, - }, - Hosts: []api.ExternalSchedulerHost{}, - }, - expectedHosts: []string{}, - filteredHosts: []string{}, - }, - { - name: "Host not in database", - request: api.ExternalSchedulerRequest{ - Spec: api.NovaObject[api.NovaSpec]{ - Data: api.NovaSpec{ - AvailabilityZone: "az-1", - }, - }, - Hosts: []api.ExternalSchedulerHost{ - {ComputeHost: "host1"}, - {ComputeHost: "host-unknown"}, - }, - }, - expectedHosts: []string{"host1"}, - filteredHosts: []string{"host-unknown"}, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - step := &FilterDisabledStep{} - step.DB = &testDB - result, err := step.Run(slog.Default(), tt.request) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - - // Check expected hosts are present - for _, host := range tt.expectedHosts { - if _, ok := result.Activations[host]; !ok { - t.Errorf("expected host %s to be present in activations", host) - } - } - - // Check filtered hosts are not present - for _, host := range tt.filteredHosts { - if _, ok := result.Activations[host]; ok { - t.Errorf("expected host %s to be filtered out", host) - } - } - - // Check total count - if len(result.Activations) != len(tt.expectedHosts) { - t.Errorf("expected %d hosts, got %d", len(tt.expectedHosts), len(result.Activations)) - } - }) - } -} diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_external_customer.go b/internal/scheduling/decisions/nova/plugins/filters/filter_external_customer.go index 2cc5b716..7385063d 100644 --- a/internal/scheduling/decisions/nova/plugins/filters/filter_external_customer.go +++ b/internal/scheduling/decisions/nova/plugins/filters/filter_external_customer.go @@ -4,16 +4,15 @@ package filters import ( + "context" "errors" "log/slog" "slices" "strings" - "github.com/cobaltcore-dev/cortex/internal/knowledge/datasources/openstack/nova" - "github.com/cobaltcore-dev/cortex/internal/knowledge/datasources/openstack/placement" - api "github.com/cobaltcore-dev/cortex/api/delegation/nova" "github.com/cobaltcore-dev/cortex/internal/scheduling/lib" + hv1 "github.com/cobaltcore-dev/openstack-hypervisor-operator/api/v1" ) type FilterExternalCustomerStepOpts struct { @@ -55,23 +54,29 @@ func (s *FilterExternalCustomerStep) Run(traceLog *slog.Logger, request api.Exte traceLog.Debug("domain does not match any external customer prefix", "domain", domainName) return result, nil } - var externalCustomerComputeHosts []string - if _, err := s.DB.SelectTimed("scheduler-nova", &externalCustomerComputeHosts, ` - SELECT h.service_host - FROM `+nova.Hypervisor{}.TableName()+` h - JOIN `+placement.Trait{}.TableName()+` rpt - ON h.id = rpt.resource_provider_uuid - WHERE rpt.name = 'CUSTOM_EXTERNAL_CUSTOMER_SUPPORTED'`, - ); err != nil { + + hvs := &hv1.HypervisorList{} + if err := s.Client.List(context.Background(), hvs); err != nil { + traceLog.Error("failed to list hypervisors", "error", err) return nil, err } - lookupStr := strings.Join(externalCustomerComputeHosts, ",") + hvsWithTrait := make(map[string]struct{}) + for _, hv := range hvs.Items { + traits := hv.Status.Traits + traits = append(traits, hv.Spec.CustomTraits...) + if !slices.Contains(traits, "CUSTOM_EXTERNAL_CUSTOMER_SUPPORTED") { + continue + } + hvsWithTrait[hv.Name] = struct{}{} + } + + traceLog.Info("hosts supporting external customers", "hosts", hvsWithTrait) for host := range result.Activations { - if !strings.Contains(lookupStr, host) { + if _, ok := hvsWithTrait[host]; ok { continue } delete(result.Activations, host) - traceLog.Debug("filtering host not intended for external customers", "host", host) + traceLog.Info("filtering host not supporting external customers", "host", host) } return result, nil } diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_external_customer_test.go b/internal/scheduling/decisions/nova/plugins/filters/filter_external_customer_test.go index d179adc9..0dafa1a7 100644 --- a/internal/scheduling/decisions/nova/plugins/filters/filter_external_customer_test.go +++ b/internal/scheduling/decisions/nova/plugins/filters/filter_external_customer_test.go @@ -7,64 +7,80 @@ import ( "log/slog" "testing" - "github.com/cobaltcore-dev/cortex/internal/knowledge/datasources/openstack/nova" - "github.com/cobaltcore-dev/cortex/internal/knowledge/datasources/openstack/placement" - "github.com/cobaltcore-dev/cortex/pkg/db" - api "github.com/cobaltcore-dev/cortex/api/delegation/nova" - - testlibDB "github.com/cobaltcore-dev/cortex/pkg/db/testing" + hv1 "github.com/cobaltcore-dev/openstack-hypervisor-operator/api/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" ) func TestFilterExternalCustomerStep_Run(t *testing.T) { - dbEnv := testlibDB.SetupDBEnv(t) - testDB := db.DB{DbMap: dbEnv.DbMap} - defer dbEnv.Close() - // Create dependency tables - err := testDB.CreateTable( - testDB.AddTable(nova.Hypervisor{}), - testDB.AddTable(placement.Trait{}), - ) + scheme, err := hv1.SchemeBuilder.Build() if err != nil { t.Fatalf("expected no error, got %v", err) } - // Insert mock hypervisor data - hypervisors := []any{ - &nova.Hypervisor{ID: "hv1", Hostname: "hypervisor1", State: "up", Status: "enabled", HypervisorType: "QEMU", HypervisorVersion: 2008000, HostIP: "192.168.1.1", ServiceID: "svc1", ServiceHost: "host1", VCPUs: 16, MemoryMB: 32768, LocalGB: 1000, VCPUsUsed: 4, MemoryMBUsed: 8192, LocalGBUsed: 100, FreeRAMMB: 24576, FreeDiskGB: 900, CurrentWorkload: 0, RunningVMs: 2, DiskAvailableLeast: &[]int{900}[0], CPUInfo: "{}"}, - &nova.Hypervisor{ID: "hv2", Hostname: "hypervisor2", State: "up", Status: "enabled", HypervisorType: "QEMU", HypervisorVersion: 2008000, HostIP: "192.168.1.2", ServiceID: "svc2", ServiceHost: "host2", VCPUs: 16, MemoryMB: 32768, LocalGB: 1000, VCPUsUsed: 4, MemoryMBUsed: 8192, LocalGBUsed: 100, FreeRAMMB: 24576, FreeDiskGB: 900, CurrentWorkload: 0, RunningVMs: 2, DiskAvailableLeast: &[]int{900}[0], CPUInfo: "{}"}, - &nova.Hypervisor{ID: "hv3", Hostname: "hypervisor3", State: "up", Status: "enabled", HypervisorType: "QEMU", HypervisorVersion: 2008000, HostIP: "192.168.1.3", ServiceID: "svc3", ServiceHost: "host3", VCPUs: 16, MemoryMB: 32768, LocalGB: 1000, VCPUsUsed: 4, MemoryMBUsed: 8192, LocalGBUsed: 100, FreeRAMMB: 24576, FreeDiskGB: 900, CurrentWorkload: 0, RunningVMs: 2, DiskAvailableLeast: &[]int{900}[0], CPUInfo: "{}"}, - &nova.Hypervisor{ID: "hv4", Hostname: "hypervisor4", State: "up", Status: "enabled", HypervisorType: "QEMU", HypervisorVersion: 2008000, HostIP: "192.168.1.4", ServiceID: "svc4", ServiceHost: "host4", VCPUs: 16, MemoryMB: 32768, LocalGB: 1000, VCPUsUsed: 4, MemoryMBUsed: 8192, LocalGBUsed: 100, FreeRAMMB: 24576, FreeDiskGB: 900, CurrentWorkload: 0, RunningVMs: 2, DiskAvailableLeast: &[]int{900}[0], CPUInfo: "{}"}, - } - if err := testDB.Insert(hypervisors...); err != nil { - t.Fatalf("expected no error, got %v", err) - } - - // Insert mock trait data - host1 and host3 support external customers - traits := []any{ - &placement.Trait{ResourceProviderUUID: "hv1", Name: "CUSTOM_EXTERNAL_CUSTOMER_SUPPORTED", ResourceProviderGeneration: 1}, - &placement.Trait{ResourceProviderUUID: "hv2", Name: "COMPUTE_STATUS_ENABLED", ResourceProviderGeneration: 1}, - &placement.Trait{ResourceProviderUUID: "hv3", Name: "CUSTOM_EXTERNAL_CUSTOMER_SUPPORTED", ResourceProviderGeneration: 1}, - &placement.Trait{ResourceProviderUUID: "hv4", Name: "COMPUTE_STATUS_ENABLED", ResourceProviderGeneration: 1}, - } - if err := testDB.Insert(traits...); err != nil { - t.Fatalf("expected no error, got %v", err) + hvs := []client.Object{ + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{ + Name: "host1", + }, + Status: hv1.HypervisorStatus{ + Traits: []string{"CUSTOM_EXTERNAL_CUSTOMER_SUPPORTED"}, + }, + }, + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{ + Name: "host2", + }, + Status: hv1.HypervisorStatus{ + Traits: []string{"CUSTOM_EXTERNAL_CUSTOMER_SUPPORTED", "SOME_OTHER_TRAIT"}, + }, + }, + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{ + Name: "host3", + }, + Status: hv1.HypervisorStatus{ + Traits: []string{"SOME_OTHER_TRAIT"}, + }, + }, + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{ + Name: "host4", + }, + Status: hv1.HypervisorStatus{ + Traits: []string{}, + }, + }, + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{ + Name: "host5", + }, + Status: hv1.HypervisorStatus{ + Traits: []string{"CUSTOM_EXTERNAL_CUSTOMER_SUPPORTED"}, + }, + }, } tests := []struct { name string - request api.ExternalSchedulerRequest opts FilterExternalCustomerStepOpts + request api.ExternalSchedulerRequest expectedHosts []string filteredHosts []string + expectError bool }{ { - name: "External customer domain - filter out external customer hosts", + name: "External customer domain matches prefix - filter to supported hosts", + opts: FilterExternalCustomerStepOpts{ + CustomerDomainNamePrefixes: []string{"ext-"}, + }, request: api.ExternalSchedulerRequest{ Spec: api.NovaObject[api.NovaSpec]{ Data: api.NovaSpec{ SchedulerHints: map[string]any{ - "domain_name": "external-customer-corp.com", + "domain_name": "ext-customer1", }, }, }, @@ -75,20 +91,19 @@ func TestFilterExternalCustomerStep_Run(t *testing.T) { {ComputeHost: "host4"}, }, }, - opts: FilterExternalCustomerStepOpts{ - CustomerDomainNamePrefixes: []string{"external-customer-"}, - CustomerIgnoredDomainNames: []string{}, - }, - expectedHosts: []string{"host2", "host4"}, // Hosts without external customer support - filteredHosts: []string{"host1", "host3"}, // Hosts with external customer support are filtered out + expectedHosts: []string{"host1", "host2"}, + filteredHosts: []string{"host3", "host4"}, }, { - name: "Internal domain - no filtering", + name: "Domain does not match external customer prefix - all hosts pass", + opts: FilterExternalCustomerStepOpts{ + CustomerDomainNamePrefixes: []string{"ext-"}, + }, request: api.ExternalSchedulerRequest{ Spec: api.NovaObject[api.NovaSpec]{ Data: api.NovaSpec{ SchedulerHints: map[string]any{ - "domain_name": "internal.company.com", + "domain_name": "internal-customer", }, }, }, @@ -99,122 +114,253 @@ func TestFilterExternalCustomerStep_Run(t *testing.T) { {ComputeHost: "host4"}, }, }, - opts: FilterExternalCustomerStepOpts{ - CustomerDomainNamePrefixes: []string{"external-customer-"}, - CustomerIgnoredDomainNames: []string{}, - }, expectedHosts: []string{"host1", "host2", "host3", "host4"}, filteredHosts: []string{}, }, { - name: "Ignored external customer domain - no filtering", + name: "Multiple domain prefixes - matches second prefix", + opts: FilterExternalCustomerStepOpts{ + CustomerDomainNamePrefixes: []string{"external-", "ext-", "customer-"}, + }, request: api.ExternalSchedulerRequest{ Spec: api.NovaObject[api.NovaSpec]{ Data: api.NovaSpec{ SchedulerHints: map[string]any{ - "domain_name": "external-customer-ignored.com", + "domain_name": "customer-abc", }, }, }, Hosts: []api.ExternalSchedulerHost{ {ComputeHost: "host1"}, - {ComputeHost: "host2"}, {ComputeHost: "host3"}, {ComputeHost: "host4"}, }, }, + expectedHosts: []string{"host1"}, + filteredHosts: []string{"host3", "host4"}, + }, + { + name: "Domain in ignored list - all hosts pass", opts: FilterExternalCustomerStepOpts{ - CustomerDomainNamePrefixes: []string{"external-customer-"}, - CustomerIgnoredDomainNames: []string{"external-customer-ignored.com"}, + CustomerDomainNamePrefixes: []string{"ext-"}, + CustomerIgnoredDomainNames: []string{"ext-special-domain"}, }, - expectedHosts: []string{"host1", "host2", "host3", "host4"}, + request: api.ExternalSchedulerRequest{ + Spec: api.NovaObject[api.NovaSpec]{ + Data: api.NovaSpec{ + SchedulerHints: map[string]any{ + "domain_name": "ext-special-domain", + }, + }, + }, + Hosts: []api.ExternalSchedulerHost{ + {ComputeHost: "host1"}, + {ComputeHost: "host2"}, + {ComputeHost: "host3"}, + }, + }, + expectedHosts: []string{"host1", "host2", "host3"}, filteredHosts: []string{}, }, { - name: "Multiple domain prefixes", + name: "Only hosts with trait should remain for external customer", + opts: FilterExternalCustomerStepOpts{ + CustomerDomainNamePrefixes: []string{"ext-"}, + }, request: api.ExternalSchedulerRequest{ Spec: api.NovaObject[api.NovaSpec]{ Data: api.NovaSpec{ SchedulerHints: map[string]any{ - "domain_name": "partner-company.com", + "domain_name": "ext-customer2", }, }, }, Hosts: []api.ExternalSchedulerHost{ {ComputeHost: "host1"}, {ComputeHost: "host2"}, + {ComputeHost: "host5"}, + }, + }, + expectedHosts: []string{"host1", "host2", "host5"}, + filteredHosts: []string{}, + }, + { + name: "No hosts with trait - all filtered", + opts: FilterExternalCustomerStepOpts{ + CustomerDomainNamePrefixes: []string{"ext-"}, + }, + request: api.ExternalSchedulerRequest{ + Spec: api.NovaObject[api.NovaSpec]{ + Data: api.NovaSpec{ + SchedulerHints: map[string]any{ + "domain_name": "ext-customer3", + }, + }, + }, + Hosts: []api.ExternalSchedulerHost{ {ComputeHost: "host3"}, {ComputeHost: "host4"}, }, }, + expectedHosts: []string{}, + filteredHosts: []string{"host3", "host4"}, + }, + { + name: "Empty host list", opts: FilterExternalCustomerStepOpts{ - CustomerDomainNamePrefixes: []string{"external-customer-", "partner-"}, - CustomerIgnoredDomainNames: []string{}, + CustomerDomainNamePrefixes: []string{"ext-"}, + }, + request: api.ExternalSchedulerRequest{ + Spec: api.NovaObject[api.NovaSpec]{ + Data: api.NovaSpec{ + SchedulerHints: map[string]any{ + "domain_name": "ext-customer", + }, + }, + }, + Hosts: []api.ExternalSchedulerHost{}, }, - expectedHosts: []string{"host2", "host4"}, - filteredHosts: []string{"host1", "host3"}, + expectedHosts: []string{}, + filteredHosts: []string{}, }, { - name: "Domain hint as array", + name: "Domain name as list - uses first element", + opts: FilterExternalCustomerStepOpts{ + CustomerDomainNamePrefixes: []string{"ext-"}, + }, request: api.ExternalSchedulerRequest{ Spec: api.NovaObject[api.NovaSpec]{ Data: api.NovaSpec{ SchedulerHints: map[string]any{ - "domain_name": []any{"external-customer-test.com"}, + "domain_name": []any{"ext-customer", "other"}, }, }, }, Hosts: []api.ExternalSchedulerHost{ {ComputeHost: "host1"}, - {ComputeHost: "host2"}, {ComputeHost: "host3"}, - {ComputeHost: "host4"}, }, }, + expectedHosts: []string{"host1"}, + filteredHosts: []string{"host3"}, + }, + { + name: "Missing domain_name in scheduler hints - error", opts: FilterExternalCustomerStepOpts{ - CustomerDomainNamePrefixes: []string{"external-customer-"}, - CustomerIgnoredDomainNames: []string{}, + CustomerDomainNamePrefixes: []string{"ext-"}, + }, + request: api.ExternalSchedulerRequest{ + Spec: api.NovaObject[api.NovaSpec]{ + Data: api.NovaSpec{ + SchedulerHints: map[string]any{}, + }, + }, + Hosts: []api.ExternalSchedulerHost{ + {ComputeHost: "host1"}, + }, }, - expectedHosts: []string{"host2", "host4"}, - filteredHosts: []string{"host1", "host3"}, + expectError: true, }, { - name: "No domain hint", + name: "Nil scheduler hints - error", + opts: FilterExternalCustomerStepOpts{ + CustomerDomainNamePrefixes: []string{"ext-"}, + }, request: api.ExternalSchedulerRequest{ Spec: api.NovaObject[api.NovaSpec]{ Data: api.NovaSpec{ - SchedulerHints: map[string]any{}, + SchedulerHints: nil, + }, + }, + Hosts: []api.ExternalSchedulerHost{ + {ComputeHost: "host1"}, + }, + }, + expectError: true, + }, + { + name: "Case sensitive prefix matching", + opts: FilterExternalCustomerStepOpts{ + CustomerDomainNamePrefixes: []string{"ext-"}, + }, + request: api.ExternalSchedulerRequest{ + Spec: api.NovaObject[api.NovaSpec]{ + Data: api.NovaSpec{ + SchedulerHints: map[string]any{ + "domain_name": "EXT-customer", + }, }, }, Hosts: []api.ExternalSchedulerHost{ {ComputeHost: "host1"}, {ComputeHost: "host2"}, + {ComputeHost: "host3"}, + }, + }, + expectedHosts: []string{"host1", "host2", "host3"}, + filteredHosts: []string{}, + }, + { + name: "Exact prefix match", + opts: FilterExternalCustomerStepOpts{ + CustomerDomainNamePrefixes: []string{"ext"}, + }, + request: api.ExternalSchedulerRequest{ + Spec: api.NovaObject[api.NovaSpec]{ + Data: api.NovaSpec{ + SchedulerHints: map[string]any{ + "domain_name": "ext", + }, + }, + }, + Hosts: []api.ExternalSchedulerHost{ + {ComputeHost: "host1"}, + {ComputeHost: "host3"}, }, }, + expectedHosts: []string{"host1"}, + filteredHosts: []string{"host3"}, + }, + { + name: "Multiple ignored domains", opts: FilterExternalCustomerStepOpts{ - CustomerDomainNamePrefixes: []string{"external-customer-"}, - CustomerIgnoredDomainNames: []string{}, + CustomerDomainNamePrefixes: []string{"ext-"}, + CustomerIgnoredDomainNames: []string{"ext-test", "ext-dev", "ext-staging"}, }, - expectedHosts: []string{}, // Should return error, but we expect empty result - filteredHosts: []string{"host1", "host2"}, + request: api.ExternalSchedulerRequest{ + Spec: api.NovaObject[api.NovaSpec]{ + Data: api.NovaSpec{ + SchedulerHints: map[string]any{ + "domain_name": "ext-dev", + }, + }, + }, + Hosts: []api.ExternalSchedulerHost{ + {ComputeHost: "host1"}, + {ComputeHost: "host3"}, + }, + }, + expectedHosts: []string{"host1", "host3"}, + filteredHosts: []string{}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { step := &FilterExternalCustomerStep{} - step.DB = &testDB + step.Client = fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(hvs...). + Build() step.Options = tt.opts - result, err := step.Run(slog.Default(), tt.request) - // For the "No domain hint" test case, we expect an error - if tt.name == "No domain hint" { + result, err := step.Run(slog.Default(), tt.request) + if tt.expectError { if err == nil { - t.Errorf("expected error for missing domain hint, got nil") + t.Errorf("expected error but got none") } return } - if err != nil { t.Fatalf("expected no error, got %v", err) } @@ -248,34 +394,38 @@ func TestFilterExternalCustomerStepOpts_Validate(t *testing.T) { expectError bool }{ { - name: "Valid options", + name: "Valid options with single prefix", + opts: FilterExternalCustomerStepOpts{ + CustomerDomainNamePrefixes: []string{"ext-"}, + }, + expectError: false, + }, + { + name: "Valid options with multiple prefixes", opts: FilterExternalCustomerStepOpts{ - CustomerDomainNamePrefixes: []string{"external-customer-"}, - CustomerIgnoredDomainNames: []string{}, + CustomerDomainNamePrefixes: []string{"ext-", "external-", "customer-"}, }, expectError: false, }, { - name: "Multiple prefixes", + name: "Valid options with prefixes and ignored domains", opts: FilterExternalCustomerStepOpts{ - CustomerDomainNamePrefixes: []string{"external-customer-", "partner-"}, - CustomerIgnoredDomainNames: []string{"ignored.com"}, + CustomerDomainNamePrefixes: []string{"ext-"}, + CustomerIgnoredDomainNames: []string{"ext-test"}, }, expectError: false, }, { - name: "Empty prefixes", + name: "Invalid - empty domain name prefixes", opts: FilterExternalCustomerStepOpts{ CustomerDomainNamePrefixes: []string{}, - CustomerIgnoredDomainNames: []string{}, }, expectError: true, }, { - name: "Nil prefixes", + name: "Invalid - nil domain name prefixes", opts: FilterExternalCustomerStepOpts{ CustomerDomainNamePrefixes: nil, - CustomerIgnoredDomainNames: []string{}, }, expectError: true, }, @@ -285,10 +435,10 @@ func TestFilterExternalCustomerStepOpts_Validate(t *testing.T) { t.Run(tt.name, func(t *testing.T) { err := tt.opts.Validate() if tt.expectError && err == nil { - t.Errorf("expected error, got nil") + t.Errorf("expected validation error but got none") } if !tt.expectError && err != nil { - t.Errorf("expected no error, got %v", err) + t.Errorf("expected no validation error but got: %v", err) } }) } diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_has_accelerators.go b/internal/scheduling/decisions/nova/plugins/filters/filter_has_accelerators.go index 9a709de9..04918542 100644 --- a/internal/scheduling/decisions/nova/plugins/filters/filter_has_accelerators.go +++ b/internal/scheduling/decisions/nova/plugins/filters/filter_has_accelerators.go @@ -4,14 +4,13 @@ package filters import ( + "context" "log/slog" - "strings" - - "github.com/cobaltcore-dev/cortex/internal/knowledge/datasources/openstack/nova" - "github.com/cobaltcore-dev/cortex/internal/knowledge/datasources/openstack/placement" + "slices" api "github.com/cobaltcore-dev/cortex/api/delegation/nova" "github.com/cobaltcore-dev/cortex/internal/scheduling/lib" + hv1 "github.com/cobaltcore-dev/openstack-hypervisor-operator/api/v1" ) type FilterHasAcceleratorsStep struct { @@ -26,24 +25,29 @@ func (s *FilterHasAcceleratorsStep) Run(traceLog *slog.Logger, request api.Exter traceLog.Debug("no accelerators requested") return result, nil } - var computeHostsWithAccelerators []string - if _, err := s.DB.SelectTimed("scheduler-nova", &computeHostsWithAccelerators, ` - SELECT h.service_host - FROM `+placement.Trait{}.TableName()+` rpt - JOIN `+nova.Hypervisor{}.TableName()+` h - ON h.id = rpt.resource_provider_uuid - WHERE name = 'COMPUTE_ACCELERATORS'`, - map[string]any{"az": request.Spec.Data.AvailabilityZone}, - ); err != nil { + + hvs := &hv1.HypervisorList{} + if err := s.Client.List(context.Background(), hvs); err != nil { + traceLog.Error("failed to list hypervisors", "error", err) return nil, err } - lookupStr := strings.Join(computeHostsWithAccelerators, ",") + hvsWithTrait := make(map[string]struct{}) + for _, hv := range hvs.Items { + traits := hv.Status.Traits + traits = append(traits, hv.Spec.CustomTraits...) + if !slices.Contains(traits, "COMPUTE_ACCELERATORS") { + continue + } + hvsWithTrait[hv.Name] = struct{}{} + } + + traceLog.Info("hosts with accelerators", "hosts", hvsWithTrait) for host := range result.Activations { - if strings.Contains(lookupStr, host) { + if _, ok := hvsWithTrait[host]; ok { continue } delete(result.Activations, host) - traceLog.Debug("filtering host which has no accelerators", "host", host) + traceLog.Info("filtering host without accelerators", "host", host) } return result, nil } diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_has_accelerators_test.go b/internal/scheduling/decisions/nova/plugins/filters/filter_has_accelerators_test.go index 64e64f6b..e88142bc 100644 --- a/internal/scheduling/decisions/nova/plugins/filters/filter_has_accelerators_test.go +++ b/internal/scheduling/decisions/nova/plugins/filters/filter_has_accelerators_test.go @@ -7,48 +7,60 @@ import ( "log/slog" "testing" - "github.com/cobaltcore-dev/cortex/internal/knowledge/datasources/openstack/nova" - "github.com/cobaltcore-dev/cortex/internal/knowledge/datasources/openstack/placement" - "github.com/cobaltcore-dev/cortex/pkg/db" - api "github.com/cobaltcore-dev/cortex/api/delegation/nova" - - testlibDB "github.com/cobaltcore-dev/cortex/pkg/db/testing" + hv1 "github.com/cobaltcore-dev/openstack-hypervisor-operator/api/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" ) func TestFilterHasAcceleratorsStep_Run(t *testing.T) { - dbEnv := testlibDB.SetupDBEnv(t) - testDB := db.DB{DbMap: dbEnv.DbMap} - defer dbEnv.Close() - // Create dependency tables - err := testDB.CreateTable( - testDB.AddTable(nova.Hypervisor{}), - testDB.AddTable(placement.Trait{}), - ) + scheme, err := hv1.SchemeBuilder.Build() if err != nil { t.Fatalf("expected no error, got %v", err) } - // Insert mock hypervisor data - hypervisors := []any{ - &nova.Hypervisor{ID: "hv1", Hostname: "hypervisor1", State: "up", Status: "enabled", HypervisorType: "QEMU", HypervisorVersion: 2008000, HostIP: "192.168.1.1", ServiceID: "svc1", ServiceHost: "host1", VCPUs: 16, MemoryMB: 32768, LocalGB: 1000, VCPUsUsed: 4, MemoryMBUsed: 8192, LocalGBUsed: 100, FreeRAMMB: 24576, FreeDiskGB: 900, CurrentWorkload: 0, RunningVMs: 2, DiskAvailableLeast: &[]int{900}[0], CPUInfo: "{}"}, - &nova.Hypervisor{ID: "hv2", Hostname: "hypervisor2", State: "up", Status: "enabled", HypervisorType: "QEMU", HypervisorVersion: 2008000, HostIP: "192.168.1.2", ServiceID: "svc2", ServiceHost: "host2", VCPUs: 16, MemoryMB: 32768, LocalGB: 1000, VCPUsUsed: 4, MemoryMBUsed: 8192, LocalGBUsed: 100, FreeRAMMB: 24576, FreeDiskGB: 900, CurrentWorkload: 0, RunningVMs: 2, DiskAvailableLeast: &[]int{900}[0], CPUInfo: "{}"}, - &nova.Hypervisor{ID: "hv3", Hostname: "hypervisor3", State: "up", Status: "enabled", HypervisorType: "QEMU", HypervisorVersion: 2008000, HostIP: "192.168.1.3", ServiceID: "svc3", ServiceHost: "host3", VCPUs: 16, MemoryMB: 32768, LocalGB: 1000, VCPUsUsed: 4, MemoryMBUsed: 8192, LocalGBUsed: 100, FreeRAMMB: 24576, FreeDiskGB: 900, CurrentWorkload: 0, RunningVMs: 2, DiskAvailableLeast: &[]int{900}[0], CPUInfo: "{}"}, - &nova.Hypervisor{ID: "hv4", Hostname: "hypervisor4", State: "up", Status: "enabled", HypervisorType: "QEMU", HypervisorVersion: 2008000, HostIP: "192.168.1.4", ServiceID: "svc4", ServiceHost: "host4", VCPUs: 16, MemoryMB: 32768, LocalGB: 1000, VCPUsUsed: 4, MemoryMBUsed: 8192, LocalGBUsed: 100, FreeRAMMB: 24576, FreeDiskGB: 900, CurrentWorkload: 0, RunningVMs: 2, DiskAvailableLeast: &[]int{900}[0], CPUInfo: "{}"}, - } - if err := testDB.Insert(hypervisors...); err != nil { - t.Fatalf("expected no error, got %v", err) - } - - // Insert mock trait data - host1 and host3 have accelerators - traits := []any{ - &placement.Trait{ResourceProviderUUID: "hv1", Name: "COMPUTE_ACCELERATORS", ResourceProviderGeneration: 1}, - &placement.Trait{ResourceProviderUUID: "hv2", Name: "COMPUTE_STATUS_ENABLED", ResourceProviderGeneration: 1}, - &placement.Trait{ResourceProviderUUID: "hv3", Name: "COMPUTE_ACCELERATORS", ResourceProviderGeneration: 1}, - &placement.Trait{ResourceProviderUUID: "hv4", Name: "COMPUTE_STATUS_ENABLED", ResourceProviderGeneration: 1}, - } - if err := testDB.Insert(traits...); err != nil { - t.Fatalf("expected no error, got %v", err) + hvs := []client.Object{ + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{ + Name: "host1", + }, + Status: hv1.HypervisorStatus{ + Traits: []string{"COMPUTE_ACCELERATORS"}, + }, + }, + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{ + Name: "host2", + }, + Status: hv1.HypervisorStatus{ + Traits: []string{"COMPUTE_ACCELERATORS", "SOME_OTHER_TRAIT"}, + }, + }, + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{ + Name: "host3", + }, + Status: hv1.HypervisorStatus{ + Traits: []string{"SOME_OTHER_TRAIT"}, + }, + }, + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{ + Name: "host4", + }, + Status: hv1.HypervisorStatus{ + Traits: []string{}, + }, + }, + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{ + Name: "host5", + }, + Status: hv1.HypervisorStatus{ + Traits: []string{"COMPUTE_ACCELERATORS", "CUSTOM_GPU"}, + }, + }, } tests := []struct { @@ -58,15 +70,13 @@ func TestFilterHasAcceleratorsStep_Run(t *testing.T) { filteredHosts []string }{ { - name: "No accelerators requested - no filtering", + name: "No accelerators requested - all hosts pass", request: api.ExternalSchedulerRequest{ Spec: api.NovaObject[api.NovaSpec]{ Data: api.NovaSpec{ Flavor: api.NovaObject[api.NovaFlavor]{ Data: api.NovaFlavor{ - ExtraSpecs: map[string]string{ - "hw:cpu_policy": "dedicated", - }, + ExtraSpecs: map[string]string{}, }, }, }, @@ -82,7 +92,7 @@ func TestFilterHasAcceleratorsStep_Run(t *testing.T) { filteredHosts: []string{}, }, { - name: "Accelerators requested - filter hosts without accelerators", + name: "Accelerators requested - filter to hosts with COMPUTE_ACCELERATORS trait", request: api.ExternalSchedulerRequest{ Spec: api.NovaObject[api.NovaSpec]{ Data: api.NovaSpec{ @@ -102,19 +112,41 @@ func TestFilterHasAcceleratorsStep_Run(t *testing.T) { {ComputeHost: "host4"}, }, }, - expectedHosts: []string{"host1", "host3"}, // Only hosts with COMPUTE_ACCELERATORS trait - filteredHosts: []string{"host2", "host4"}, // Hosts without accelerators are filtered out + expectedHosts: []string{"host1", "host2"}, + filteredHosts: []string{"host3", "host4"}, }, { - name: "Accelerators requested with specific device profile", + name: "Accelerators requested with different device profile value", request: api.ExternalSchedulerRequest{ Spec: api.NovaObject[api.NovaSpec]{ Data: api.NovaSpec{ Flavor: api.NovaObject[api.NovaFlavor]{ Data: api.NovaFlavor{ ExtraSpecs: map[string]string{ - "accel:device_profile": "nvidia-v100", - "hw:cpu_policy": "dedicated", + "accel:device_profile": "fpga-profile", + }, + }, + }, + }, + }, + Hosts: []api.ExternalSchedulerHost{ + {ComputeHost: "host1"}, + {ComputeHost: "host3"}, + {ComputeHost: "host5"}, + }, + }, + expectedHosts: []string{"host1", "host5"}, + filteredHosts: []string{"host3"}, + }, + { + name: "Accelerators requested - all hosts have the trait", + request: api.ExternalSchedulerRequest{ + Spec: api.NovaObject[api.NovaSpec]{ + Data: api.NovaSpec{ + Flavor: api.NovaObject[api.NovaFlavor]{ + Data: api.NovaFlavor{ + ExtraSpecs: map[string]string{ + "accel:device_profile": "gpu-profile", }, }, }, @@ -123,37 +155,83 @@ func TestFilterHasAcceleratorsStep_Run(t *testing.T) { Hosts: []api.ExternalSchedulerHost{ {ComputeHost: "host1"}, {ComputeHost: "host2"}, + {ComputeHost: "host5"}, + }, + }, + expectedHosts: []string{"host1", "host2", "host5"}, + filteredHosts: []string{}, + }, + { + name: "Accelerators requested - no hosts have the trait", + request: api.ExternalSchedulerRequest{ + Spec: api.NovaObject[api.NovaSpec]{ + Data: api.NovaSpec{ + Flavor: api.NovaObject[api.NovaFlavor]{ + Data: api.NovaFlavor{ + ExtraSpecs: map[string]string{ + "accel:device_profile": "gpu-profile", + }, + }, + }, + }, + }, + Hosts: []api.ExternalSchedulerHost{ {ComputeHost: "host3"}, {ComputeHost: "host4"}, }, }, - expectedHosts: []string{"host1", "host3"}, - filteredHosts: []string{"host2", "host4"}, + expectedHosts: []string{}, + filteredHosts: []string{"host3", "host4"}, }, { - name: "Empty extra specs - no filtering", + name: "Other extra specs present but no accelerator request - all hosts pass", request: api.ExternalSchedulerRequest{ Spec: api.NovaObject[api.NovaSpec]{ Data: api.NovaSpec{ Flavor: api.NovaObject[api.NovaFlavor]{ Data: api.NovaFlavor{ - ExtraSpecs: map[string]string{}, + ExtraSpecs: map[string]string{ + "hw:cpu_policy": "dedicated", + "quota:vif_inbound": "100000", + }, }, }, }, }, Hosts: []api.ExternalSchedulerHost{ {ComputeHost: "host1"}, - {ComputeHost: "host2"}, {ComputeHost: "host3"}, {ComputeHost: "host4"}, }, }, - expectedHosts: []string{"host1", "host2", "host3", "host4"}, + expectedHosts: []string{"host1", "host3", "host4"}, filteredHosts: []string{}, }, { - name: "All hosts without accelerators", + name: "Accelerators requested with empty device profile value", + request: api.ExternalSchedulerRequest{ + Spec: api.NovaObject[api.NovaSpec]{ + Data: api.NovaSpec{ + Flavor: api.NovaObject[api.NovaFlavor]{ + Data: api.NovaFlavor{ + ExtraSpecs: map[string]string{ + "accel:device_profile": "", + }, + }, + }, + }, + }, + Hosts: []api.ExternalSchedulerHost{ + {ComputeHost: "host1"}, + {ComputeHost: "host2"}, + {ComputeHost: "host3"}, + }, + }, + expectedHosts: []string{"host1", "host2"}, + filteredHosts: []string{"host3"}, + }, + { + name: "Empty host list with accelerators requested", request: api.ExternalSchedulerRequest{ Spec: api.NovaObject[api.NovaSpec]{ Data: api.NovaSpec{ @@ -166,16 +244,55 @@ func TestFilterHasAcceleratorsStep_Run(t *testing.T) { }, }, }, + Hosts: []api.ExternalSchedulerHost{}, + }, + expectedHosts: []string{}, + filteredHosts: []string{}, + }, + { + name: "Empty host list without accelerators requested", + request: api.ExternalSchedulerRequest{ + Spec: api.NovaObject[api.NovaSpec]{ + Data: api.NovaSpec{ + Flavor: api.NovaObject[api.NovaFlavor]{ + Data: api.NovaFlavor{ + ExtraSpecs: map[string]string{}, + }, + }, + }, + }, + Hosts: []api.ExternalSchedulerHost{}, + }, + expectedHosts: []string{}, + filteredHosts: []string{}, + }, + { + name: "Mixed hosts with and without accelerators trait", + request: api.ExternalSchedulerRequest{ + Spec: api.NovaObject[api.NovaSpec]{ + Data: api.NovaSpec{ + Flavor: api.NovaObject[api.NovaFlavor]{ + Data: api.NovaFlavor{ + ExtraSpecs: map[string]string{ + "accel:device_profile": "custom-accelerator", + }, + }, + }, + }, + }, Hosts: []api.ExternalSchedulerHost{ + {ComputeHost: "host1"}, {ComputeHost: "host2"}, + {ComputeHost: "host3"}, {ComputeHost: "host4"}, + {ComputeHost: "host5"}, }, }, - expectedHosts: []string{}, - filteredHosts: []string{"host2", "host4"}, + expectedHosts: []string{"host1", "host2", "host5"}, + filteredHosts: []string{"host3", "host4"}, }, { - name: "All hosts with accelerators", + name: "Accelerators with additional extra specs", request: api.ExternalSchedulerRequest{ Spec: api.NovaObject[api.NovaSpec]{ Data: api.NovaSpec{ @@ -183,6 +300,8 @@ func TestFilterHasAcceleratorsStep_Run(t *testing.T) { Data: api.NovaFlavor{ ExtraSpecs: map[string]string{ "accel:device_profile": "gpu-profile", + "hw:cpu_policy": "dedicated", + "hw:mem_page_size": "large", }, }, }, @@ -193,11 +312,11 @@ func TestFilterHasAcceleratorsStep_Run(t *testing.T) { {ComputeHost: "host3"}, }, }, - expectedHosts: []string{"host1", "host3"}, - filteredHosts: []string{}, + expectedHosts: []string{"host1"}, + filteredHosts: []string{"host3"}, }, { - name: "Host not in database", + name: "Host not in database with accelerators requested", request: api.ExternalSchedulerRequest{ Spec: api.NovaObject[api.NovaSpec]{ Data: api.NovaSpec{ @@ -223,7 +342,11 @@ func TestFilterHasAcceleratorsStep_Run(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { step := &FilterHasAcceleratorsStep{} - step.DB = &testDB + step.Client = fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(hvs...). + Build() + result, err := step.Run(slog.Default(), tt.request) if err != nil { t.Fatalf("expected no error, got %v", err) diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_has_enough_capacity.go b/internal/scheduling/decisions/nova/plugins/filters/filter_has_enough_capacity.go index b1fb7ac9..7956709a 100644 --- a/internal/scheduling/decisions/nova/plugins/filters/filter_has_enough_capacity.go +++ b/internal/scheduling/decisions/nova/plugins/filters/filter_has_enough_capacity.go @@ -10,9 +10,9 @@ import ( api "github.com/cobaltcore-dev/cortex/api/delegation/nova" "github.com/cobaltcore-dev/cortex/api/v1alpha1" - "github.com/cobaltcore-dev/cortex/internal/knowledge/extractor/plugins/compute" "github.com/cobaltcore-dev/cortex/internal/scheduling/lib" - "sigs.k8s.io/controller-runtime/pkg/client" + hv1 "github.com/cobaltcore-dev/openstack-hypervisor-operator/api/v1" + "k8s.io/apimachinery/pkg/api/resource" ) type FilterHasEnoughCapacityOpts struct { @@ -42,27 +42,56 @@ type FilterHasEnoughCapacity struct { // Please also note that disk space is currently not considered by this filter. func (s *FilterHasEnoughCapacity) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.StepResult, error) { result := s.PrepareResult(request) - knowledge := &v1alpha1.Knowledge{} - if err := s.Client.Get( - context.Background(), - client.ObjectKey{Name: "host-utilization"}, - knowledge, - ); err != nil { + + // This map holds the free resources per host. + freeResourcesByHost := make(map[string]map[string]resource.Quantity) + + // The hypervisor resource auto-discovers its current utilization. + // We can use the hypervisor status to calculate the total capacity + // and then subtract the actual resource allocation from virtual machines. + hvs := &hv1.HypervisorList{} + if err := s.Client.List(context.Background(), hvs); err != nil { + traceLog.Error("failed to list hypervisors", "error", err) return nil, err } - hostUtilizations, err := v1alpha1. - UnboxFeatureList[compute.HostUtilization](knowledge.Status.Raw) - if err != nil { - return nil, err + for _, hv := range hvs.Items { + // Start with the total capacity. + freeResourcesByHost[hv.Name] = map[string]resource.Quantity{ + "cpu": hv.Status.Capabilities.HostCpus, + "memory": hv.Status.Capabilities.HostMemory, + } + + // Subtract allocated resources by VMs. + for _, dom := range hv.Status.DomainInfos { + if cpuAlloc, ok := dom.Allocation["cpu"]; ok { + freeCPU := freeResourcesByHost[hv.Name]["cpu"] + freeCPU.Sub(cpuAlloc) + freeResourcesByHost[hv.Name]["cpu"] = freeCPU + } else { + traceLog.Error( + "libvirt domain without cpu allocation info", + "host", hv.Name, "domain", dom.Name, + ) + } + + if memoryAlloc, ok := dom.Allocation["memory"]; ok { + freeMemory := freeResourcesByHost[hv.Name]["memory"] + freeMemory.Sub(memoryAlloc) + freeResourcesByHost[hv.Name]["memory"] = freeMemory + } else { + traceLog.Error( + "libvirt domain without memory allocation info", + "host", hv.Name, "domain", dom.Name, + ) + } + } } + + // Subtract reserved resources by Reservations. var reservations v1alpha1.ReservationList - ctx := context.Background() - if err := s.Client.List(ctx, &reservations); err != nil { + if err := s.Client.List(context.Background(), &reservations); err != nil { return nil, err } - // Resources reserved by hosts. - vcpusReserved := make(map[string]uint64) // in vCPUs - memoryReserved := make(map[string]uint64) // in MB for _, reservation := range reservations.Items { if reservation.Status.Phase != v1alpha1.ReservationStatusPhaseActive { continue // Only consider active reservations. @@ -79,61 +108,76 @@ func (s *FilterHasEnoughCapacity) Run(traceLog *slog.Logger, request api.Externa } host := reservation.Status.Host if cpu, ok := reservation.Spec.Requests["cpu"]; ok { - vcpusReserved[host] += cpu.AsDec().UnscaledBig().Uint64() + freeCPU := freeResourcesByHost[host]["cpu"] + freeCPU.Sub(cpu) + freeResourcesByHost[host]["cpu"] = freeCPU } if memory, ok := reservation.Spec.Requests["memory"]; ok { - memoryReserved[host] += memory.AsDec().UnscaledBig().Uint64() / 1000000 // MB + freeMemory := freeResourcesByHost[host]["memory"] + freeMemory.Sub(memory) + freeResourcesByHost[host]["memory"] = freeMemory } - // Disk is currently not considered. } - traceLog.Debug( - "reserved resources", - "vcpus", vcpusReserved, - "memory", memoryReserved, - ) - hostsEncountered := map[string]struct{}{} - for _, utilization := range hostUtilizations { - hostsEncountered[utilization.ComputeHost] = struct{}{} - vCPUsAllocatable := uint64(utilization.TotalVCPUsAllocatable) - if reserved, ok := vcpusReserved[utilization.ComputeHost]; ok { - vCPUsAllocatable -= reserved - } + + hostsEncountered := make(map[string]struct{}) + for host, free := range freeResourcesByHost { + hostsEncountered[host] = struct{}{} + + // Check cpu capacity. if request.Spec.Data.Flavor.Data.VCPUs == 0 { return nil, errors.New("flavor has 0 vcpus") } - vcpuSlots := vCPUsAllocatable / request.Spec.Data.Flavor.Data.VCPUs // floored. - if vcpuSlots < request.Spec.Data.NumInstances { - traceLog.Debug( - "Filtering host due to insufficient VCPU capacity", - slog.String("host", utilization.ComputeHost), - slog.Uint64("requested_vcpus", request.Spec.Data.Flavor.Data.VCPUs), - slog.Uint64("requested_instances", request.Spec.Data.NumInstances), - slog.Float64("available_vcpus", utilization.TotalVCPUsAllocatable), + freeCPU, ok := free["cpu"] + if !ok || freeCPU.Value() < 0 { + traceLog.Error( + "host with invalid CPU capacity", + "host", host, "freeCPU", freeCPU.String(), ) - delete(result.Activations, utilization.ComputeHost) continue } - memoryAllocatableMB := uint64(utilization.TotalRAMAllocatableMB) - if reserved, ok := memoryReserved[utilization.ComputeHost]; ok { - memoryAllocatableMB -= reserved + // Calculate how many instances can fit on this host, based on cpu. + //nolint:gosec // We're checking for underflows above (< 0). + vcpuSlots := uint64(freeCPU.Value()) / + request.Spec.Data.Flavor.Data.VCPUs + if vcpuSlots < request.Spec.Data.NumInstances { + traceLog.Info( + "filtering host due to insufficient CPU capacity", + "host", host, "requested", request.Spec.Data.Flavor.Data.VCPUs, + "available", freeCPU.String(), + ) + delete(result.Activations, host) + continue } + + // Check memory capacity. if request.Spec.Data.Flavor.Data.MemoryMB == 0 { return nil, errors.New("flavor has 0 memory") } - memorySlots := memoryAllocatableMB / request.Spec.Data.Flavor.Data.MemoryMB // floored. + freeMemory, ok := free["memory"] + if !ok || freeMemory.Value() < 0 { + traceLog.Error( + "host with invalid memory capacity", + "host", host, "freeMemory", freeMemory.String(), + ) + continue + } + // Calculate how many instances can fit on this host, based on memory. + // Note: according to the OpenStack docs, the memory is in MB, not MiB. + // See: https://docs.openstack.org/nova/latest/user/flavors.html + //nolint:gosec // We're checking for underflows above (< 0). + memorySlots := uint64(freeMemory.Value()/1_000_000 /* MB */) / + request.Spec.Data.Flavor.Data.MemoryMB if memorySlots < request.Spec.Data.NumInstances { - traceLog.Debug( - "Filtering host due to insufficient RAM capacity", - slog.String("host", utilization.ComputeHost), - slog.Uint64("requested_mb", request.Spec.Data.Flavor.Data.MemoryMB), - slog.Uint64("requested_instances", request.Spec.Data.NumInstances), - slog.Float64("available_mb", utilization.TotalRAMAllocatableMB), + traceLog.Info( + "filtering host due to insufficient RAM capacity", + "host", host, "requested_mb", request.Spec.Data.Flavor.Data.MemoryMB, + "available_mb", freeMemory.String(), ) - delete(result.Activations, utilization.ComputeHost) + delete(result.Activations, host) continue } - // Disk is currently not considered. } + // Remove all hosts that weren't encountered. for host := range result.Activations { if _, ok := hostsEncountered[host]; !ok { diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_has_enough_capacity_test.go b/internal/scheduling/decisions/nova/plugins/filters/filter_has_enough_capacity_test.go index 0c780145..8ff30858 100644 --- a/internal/scheduling/decisions/nova/plugins/filters/filter_has_enough_capacity_test.go +++ b/internal/scheduling/decisions/nova/plugins/filters/filter_has_enough_capacity_test.go @@ -9,68 +9,98 @@ import ( api "github.com/cobaltcore-dev/cortex/api/delegation/nova" "github.com/cobaltcore-dev/cortex/api/v1alpha1" - "github.com/cobaltcore-dev/cortex/internal/knowledge/extractor/plugins/compute" - + hv1 "github.com/cobaltcore-dev/openstack-hypervisor-operator/api/v1" "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" ) func TestFilterHasEnoughCapacity_Run(t *testing.T) { - scheme, err := v1alpha1.SchemeBuilder.Build() - if err != nil { - t.Fatalf("expected no error, got %v", err) + // Build schemes for both Hypervisor and Reservation types + scheme := runtime.NewScheme() + if err := hv1.SchemeBuilder.AddToScheme(scheme); err != nil { + t.Fatalf("failed to add hypervisor scheme: %v", err) } - - // Insert mock data into the feature_host_utilization table - hostUtilizations, err := v1alpha1.BoxFeatureList([]any{ - &compute.HostUtilization{ComputeHost: "host1", RAMUtilizedPct: 50.0, VCPUsUtilizedPct: 40.0, DiskUtilizedPct: 30.0, TotalRAMAllocatableMB: 32768, TotalVCPUsAllocatable: 16, TotalDiskAllocatableGB: 1000}, // High capacity host - &compute.HostUtilization{ComputeHost: "host2", RAMUtilizedPct: 80.0, VCPUsUtilizedPct: 70.0, DiskUtilizedPct: 60.0, TotalRAMAllocatableMB: 16384, TotalVCPUsAllocatable: 8, TotalDiskAllocatableGB: 500}, // Medium capacity host - &compute.HostUtilization{ComputeHost: "host3", RAMUtilizedPct: 90.0, VCPUsUtilizedPct: 85.0, DiskUtilizedPct: 75.0, TotalRAMAllocatableMB: 8192, TotalVCPUsAllocatable: 4, TotalDiskAllocatableGB: 250}, // Low capacity host - &compute.HostUtilization{ComputeHost: "host4", RAMUtilizedPct: 20.0, VCPUsUtilizedPct: 15.0, DiskUtilizedPct: 10.0, TotalRAMAllocatableMB: 65536, TotalVCPUsAllocatable: 32, TotalDiskAllocatableGB: 2000}, // Very high capacity host - &compute.HostUtilization{ComputeHost: "host5", RAMUtilizedPct: 95.0, VCPUsUtilizedPct: 90.0, DiskUtilizedPct: 85.0, TotalRAMAllocatableMB: 4096, TotalVCPUsAllocatable: 2, TotalDiskAllocatableGB: 100}, // Very low capacity host - &compute.HostUtilization{ComputeHost: "host6", RAMUtilizedPct: 0.0, VCPUsUtilizedPct: 0.0, DiskUtilizedPct: 0.0, TotalRAMAllocatableMB: 0, TotalVCPUsAllocatable: 0, TotalDiskAllocatableGB: 0}, // Zero capacity host (edge case) - }) - if err != nil { - t.Fatalf("expected no error, got %v", err) + if err := v1alpha1.AddToScheme(scheme); err != nil { + t.Fatalf("failed to add cortex scheme: %v", err) } tests := []struct { name string + hypervisors []client.Object + reservations []client.Object request api.ExternalSchedulerRequest + options FilterHasEnoughCapacityOpts expectedHosts []string filteredHosts []string + expectError bool }{ { - name: "Small flavor - most hosts have capacity", + name: "Single instance with sufficient capacity", + hypervisors: []client.Object{ + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{Name: "host1"}, + Status: hv1.HypervisorStatus{ + Capabilities: hv1.Capabilities{ + HostCpus: resource.MustParse("16"), + HostMemory: resource.MustParse("32Gi"), + }, + DomainInfos: []hv1.DomainInfo{ + { + Name: "instance-1", + Allocation: map[string]resource.Quantity{ + "cpu": resource.MustParse("4"), + "memory": resource.MustParse("8Gi"), + }, + }, + }, + }, + }, + }, request: api.ExternalSchedulerRequest{ Spec: api.NovaObject[api.NovaSpec]{ Data: api.NovaSpec{ NumInstances: 1, Flavor: api.NovaObject[api.NovaFlavor]{ Data: api.NovaFlavor{ - VCPUs: 2, - MemoryMB: 4096, - RootGB: 50, + VCPUs: 4, + MemoryMB: 4000, }, }, }, }, Hosts: []api.ExternalSchedulerHost{ {ComputeHost: "host1"}, - {ComputeHost: "host2"}, - {ComputeHost: "host3"}, - {ComputeHost: "host4"}, - {ComputeHost: "host5"}, - {ComputeHost: "host6"}, }, }, - expectedHosts: []string{"host1", "host2", "host3", "host4", "host5"}, // All except host6 (0 capacity) - host5 has exactly 2 vCPUs - filteredHosts: []string{"host6"}, + expectedHosts: []string{"host1"}, + filteredHosts: []string{}, + expectError: false, }, { - name: "Medium flavor - some hosts filtered", + name: "Single instance with insufficient CPU", + hypervisors: []client.Object{ + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{Name: "host1"}, + Status: hv1.HypervisorStatus{ + Capabilities: hv1.Capabilities{ + HostCpus: resource.MustParse("16"), + HostMemory: resource.MustParse("32Gi"), + }, + DomainInfos: []hv1.DomainInfo{ + { + Name: "instance-1", + Allocation: map[string]resource.Quantity{ + "cpu": resource.MustParse("12"), + "memory": resource.MustParse("8Gi"), + }, + }, + }, + }, + }, + }, request: api.ExternalSchedulerRequest{ Spec: api.NovaObject[api.NovaSpec]{ Data: api.NovaSpec{ @@ -78,116 +108,171 @@ func TestFilterHasEnoughCapacity_Run(t *testing.T) { Flavor: api.NovaObject[api.NovaFlavor]{ Data: api.NovaFlavor{ VCPUs: 8, - MemoryMB: 16384, - RootGB: 200, + MemoryMB: 4000, }, }, }, }, Hosts: []api.ExternalSchedulerHost{ {ComputeHost: "host1"}, - {ComputeHost: "host2"}, - {ComputeHost: "host3"}, - {ComputeHost: "host4"}, - {ComputeHost: "host5"}, - {ComputeHost: "host6"}, }, }, - expectedHosts: []string{"host1", "host2", "host4"}, // Only hosts with >= 8 vCPUs, >= 16384 MB RAM, >= 200 GB disk - filteredHosts: []string{"host3", "host5", "host6"}, // host3 has only 4 vCPUs, host5 has only 2 vCPUs, host6 has 0 + expectedHosts: []string{}, + filteredHosts: []string{"host1"}, + expectError: false, }, { - name: "Large flavor - only high capacity hosts", + name: "Single instance with insufficient memory", + hypervisors: []client.Object{ + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{Name: "host1"}, + Status: hv1.HypervisorStatus{ + Capabilities: hv1.Capabilities{ + HostCpus: resource.MustParse("16"), + HostMemory: resource.MustParse("32Gi"), + }, + DomainInfos: []hv1.DomainInfo{ + { + Name: "instance-1", + Allocation: map[string]resource.Quantity{ + "cpu": resource.MustParse("4"), + "memory": resource.MustParse("28Gi"), + }, + }, + }, + }, + }, + }, request: api.ExternalSchedulerRequest{ Spec: api.NovaObject[api.NovaSpec]{ Data: api.NovaSpec{ NumInstances: 1, Flavor: api.NovaObject[api.NovaFlavor]{ Data: api.NovaFlavor{ - VCPUs: 16, - MemoryMB: 32768, - RootGB: 500, + VCPUs: 4, + MemoryMB: 8000, }, }, }, }, Hosts: []api.ExternalSchedulerHost{ {ComputeHost: "host1"}, - {ComputeHost: "host2"}, - {ComputeHost: "host3"}, - {ComputeHost: "host4"}, - {ComputeHost: "host5"}, - {ComputeHost: "host6"}, }, }, - expectedHosts: []string{"host1", "host4"}, // Only hosts with >= 16 vCPUs, >= 32768 MB RAM, >= 500 GB disk - filteredHosts: []string{"host2", "host3", "host5", "host6"}, + expectedHosts: []string{}, + filteredHosts: []string{"host1"}, + expectError: false, }, { - name: "Very large flavor - only very high capacity host", + name: "Multiple instances on single host - sufficient capacity", + hypervisors: []client.Object{ + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{Name: "host1"}, + Status: hv1.HypervisorStatus{ + Capabilities: hv1.Capabilities{ + HostCpus: resource.MustParse("16"), + HostMemory: resource.MustParse("32Gi"), + }, + DomainInfos: []hv1.DomainInfo{}, + }, + }, + }, request: api.ExternalSchedulerRequest{ Spec: api.NovaObject[api.NovaSpec]{ Data: api.NovaSpec{ - NumInstances: 1, + NumInstances: 4, Flavor: api.NovaObject[api.NovaFlavor]{ Data: api.NovaFlavor{ - VCPUs: 32, - MemoryMB: 65536, - RootGB: 1000, + VCPUs: 4, + MemoryMB: 8000, }, }, }, }, Hosts: []api.ExternalSchedulerHost{ {ComputeHost: "host1"}, - {ComputeHost: "host2"}, - {ComputeHost: "host3"}, - {ComputeHost: "host4"}, - {ComputeHost: "host5"}, - {ComputeHost: "host6"}, }, }, - expectedHosts: []string{"host4"}, // Only host4 has enough capacity - filteredHosts: []string{"host1", "host2", "host3", "host5", "host6"}, + expectedHosts: []string{"host1"}, + filteredHosts: []string{}, + expectError: false, }, { - name: "Impossible flavor - no hosts have capacity", + name: "Multiple instances - insufficient capacity for all", + hypervisors: []client.Object{ + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{Name: "host1"}, + Status: hv1.HypervisorStatus{ + Capabilities: hv1.Capabilities{ + HostCpus: resource.MustParse("16"), + HostMemory: resource.MustParse("32Gi"), + }, + DomainInfos: []hv1.DomainInfo{}, + }, + }, + }, request: api.ExternalSchedulerRequest{ Spec: api.NovaObject[api.NovaSpec]{ Data: api.NovaSpec{ - NumInstances: 1, + NumInstances: 5, Flavor: api.NovaObject[api.NovaFlavor]{ Data: api.NovaFlavor{ - VCPUs: 64, - MemoryMB: 131072, - RootGB: 5000, + VCPUs: 4, + MemoryMB: 8000, }, }, }, }, Hosts: []api.ExternalSchedulerHost{ {ComputeHost: "host1"}, - {ComputeHost: "host2"}, - {ComputeHost: "host3"}, - {ComputeHost: "host4"}, - {ComputeHost: "host5"}, - {ComputeHost: "host6"}, }, }, - expectedHosts: []string{}, // No hosts have enough capacity - filteredHosts: []string{"host1", "host2", "host3", "host4", "host5", "host6"}, + expectedHosts: []string{}, + filteredHosts: []string{"host1"}, + expectError: false, }, { - name: "CPU constraint only", + name: "Multiple hosts - mixed capacity", + hypervisors: []client.Object{ + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{Name: "host1"}, + Status: hv1.HypervisorStatus{ + Capabilities: hv1.Capabilities{ + HostCpus: resource.MustParse("16"), + HostMemory: resource.MustParse("32Gi"), + }, + DomainInfos: []hv1.DomainInfo{}, + }, + }, + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{Name: "host2"}, + Status: hv1.HypervisorStatus{ + Capabilities: hv1.Capabilities{ + HostCpus: resource.MustParse("8"), + HostMemory: resource.MustParse("16Gi"), + }, + DomainInfos: []hv1.DomainInfo{}, + }, + }, + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{Name: "host3"}, + Status: hv1.HypervisorStatus{ + Capabilities: hv1.Capabilities{ + HostCpus: resource.MustParse("32"), + HostMemory: resource.MustParse("64Gi"), + }, + DomainInfos: []hv1.DomainInfo{}, + }, + }, + }, request: api.ExternalSchedulerRequest{ Spec: api.NovaObject[api.NovaSpec]{ Data: api.NovaSpec{ NumInstances: 1, Flavor: api.NovaObject[api.NovaFlavor]{ Data: api.NovaFlavor{ - VCPUs: 10, // More than host3 (4) and host5 (2) - MemoryMB: 1024, - RootGB: 10, + VCPUs: 12, + MemoryMB: 24000, }, }, }, @@ -196,473 +281,372 @@ func TestFilterHasEnoughCapacity_Run(t *testing.T) { {ComputeHost: "host1"}, {ComputeHost: "host2"}, {ComputeHost: "host3"}, - {ComputeHost: "host4"}, - {ComputeHost: "host5"}, - {ComputeHost: "host6"}, }, }, - expectedHosts: []string{"host1", "host4"}, // Only hosts with >= 10 vCPUs - filteredHosts: []string{"host2", "host3", "host5", "host6"}, + expectedHosts: []string{"host1", "host3"}, + filteredHosts: []string{"host2"}, + expectError: false, }, { - name: "Memory constraint only", + name: "Active reservation - subtract reserved resources", + hypervisors: []client.Object{ + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{Name: "host1"}, + Status: hv1.HypervisorStatus{ + Capabilities: hv1.Capabilities{ + HostCpus: resource.MustParse("16"), + HostMemory: resource.MustParse("32Gi"), + }, + DomainInfos: []hv1.DomainInfo{}, + }, + }, + }, + reservations: []client.Object{ + &v1alpha1.Reservation{ + ObjectMeta: v1.ObjectMeta{Name: "reservation-1"}, + Spec: v1alpha1.ReservationSpec{ + Scheduler: v1alpha1.ReservationSchedulerSpec{ + CortexNova: &v1alpha1.ReservationSchedulerSpecCortexNova{ + ProjectID: "different-project", + FlavorName: "different-flavor", + }, + }, + Requests: map[string]resource.Quantity{ + "cpu": resource.MustParse("8"), + "memory": resource.MustParse("16Gi"), + }, + }, + Status: v1alpha1.ReservationStatus{ + Phase: v1alpha1.ReservationStatusPhaseActive, + Host: "host1", + }, + }, + }, request: api.ExternalSchedulerRequest{ Spec: api.NovaObject[api.NovaSpec]{ Data: api.NovaSpec{ NumInstances: 1, + ProjectID: "test-project", Flavor: api.NovaObject[api.NovaFlavor]{ Data: api.NovaFlavor{ - VCPUs: 1, - MemoryMB: 20000, // More than host3 (8192) and host5 (4096) - RootGB: 10, + Name: "test-flavor", + VCPUs: 8, + MemoryMB: 16000, }, }, }, }, Hosts: []api.ExternalSchedulerHost{ {ComputeHost: "host1"}, - {ComputeHost: "host2"}, - {ComputeHost: "host3"}, - {ComputeHost: "host4"}, - {ComputeHost: "host5"}, - {ComputeHost: "host6"}, }, }, - expectedHosts: []string{"host1", "host4"}, // Only hosts with >= 20000 MB RAM - filteredHosts: []string{"host2", "host3", "host5", "host6"}, + expectedHosts: []string{"host1"}, + filteredHosts: []string{}, + expectError: false, }, { - name: "Very small flavor", + name: "Matching reservation - unlock reserved resources", + hypervisors: []client.Object{ + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{Name: "host1"}, + Status: hv1.HypervisorStatus{ + Capabilities: hv1.Capabilities{ + HostCpus: resource.MustParse("16"), + HostMemory: resource.MustParse("32Gi"), + }, + DomainInfos: []hv1.DomainInfo{}, + }, + }, + }, + reservations: []client.Object{ + &v1alpha1.Reservation{ + ObjectMeta: v1.ObjectMeta{Name: "reservation-1"}, + Spec: v1alpha1.ReservationSpec{ + Scheduler: v1alpha1.ReservationSchedulerSpec{ + CortexNova: &v1alpha1.ReservationSchedulerSpecCortexNova{ + ProjectID: "test-project", + FlavorName: "test-flavor", + }, + }, + Requests: map[string]resource.Quantity{ + "cpu": resource.MustParse("8"), + "memory": resource.MustParse("16Gi"), + }, + }, + Status: v1alpha1.ReservationStatus{ + Phase: v1alpha1.ReservationStatusPhaseActive, + Host: "host1", + }, + }, + }, request: api.ExternalSchedulerRequest{ Spec: api.NovaObject[api.NovaSpec]{ Data: api.NovaSpec{ NumInstances: 1, + ProjectID: "test-project", Flavor: api.NovaObject[api.NovaFlavor]{ Data: api.NovaFlavor{ - VCPUs: 1, - MemoryMB: 512, - RootGB: 10, + Name: "test-flavor", + VCPUs: 8, + MemoryMB: 16000, }, }, }, }, Hosts: []api.ExternalSchedulerHost{ {ComputeHost: "host1"}, - {ComputeHost: "host2"}, - {ComputeHost: "host3"}, - {ComputeHost: "host4"}, - {ComputeHost: "host5"}, - {ComputeHost: "host6"}, }, }, - expectedHosts: []string{"host1", "host2", "host3", "host4", "host5"}, // All except host6 (0 capacity) - filteredHosts: []string{"host6"}, + options: FilterHasEnoughCapacityOpts{LockReserved: false}, + expectedHosts: []string{"host1"}, + filteredHosts: []string{}, + expectError: false, }, { - name: "Host not in database", + name: "Matching reservation with LockReserved option", + hypervisors: []client.Object{ + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{Name: "host1"}, + Status: hv1.HypervisorStatus{ + Capabilities: hv1.Capabilities{ + HostCpus: resource.MustParse("16"), + HostMemory: resource.MustParse("32Gi"), + }, + DomainInfos: []hv1.DomainInfo{}, + }, + }, + }, + reservations: []client.Object{ + &v1alpha1.Reservation{ + ObjectMeta: v1.ObjectMeta{Name: "reservation-1"}, + Spec: v1alpha1.ReservationSpec{ + Scheduler: v1alpha1.ReservationSchedulerSpec{ + CortexNova: &v1alpha1.ReservationSchedulerSpecCortexNova{ + ProjectID: "test-project", + FlavorName: "test-flavor", + }, + }, + Requests: map[string]resource.Quantity{ + "cpu": resource.MustParse("8"), + "memory": resource.MustParse("16Gi"), + }, + }, + Status: v1alpha1.ReservationStatus{ + Phase: v1alpha1.ReservationStatusPhaseActive, + Host: "host1", + }, + }, + }, request: api.ExternalSchedulerRequest{ Spec: api.NovaObject[api.NovaSpec]{ Data: api.NovaSpec{ NumInstances: 1, + ProjectID: "test-project", Flavor: api.NovaObject[api.NovaFlavor]{ Data: api.NovaFlavor{ - VCPUs: 2, - MemoryMB: 4096, - RootGB: 50, + Name: "test-flavor", + VCPUs: 8, + MemoryMB: 16000, }, }, }, }, Hosts: []api.ExternalSchedulerHost{ {ComputeHost: "host1"}, - {ComputeHost: "host-unknown"}, }, }, + options: FilterHasEnoughCapacityOpts{LockReserved: true}, expectedHosts: []string{"host1"}, - filteredHosts: []string{"host-unknown"}, // Host not in database gets filtered out + filteredHosts: []string{}, + expectError: false, }, { - name: "Empty host list", - request: api.ExternalSchedulerRequest{ - Spec: api.NovaObject[api.NovaSpec]{ - Data: api.NovaSpec{ - NumInstances: 1, - Flavor: api.NovaObject[api.NovaFlavor]{ - Data: api.NovaFlavor{ - VCPUs: 2, - MemoryMB: 4096, - RootGB: 50, + name: "Inactive reservation - do not subtract", + hypervisors: []client.Object{ + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{Name: "host1"}, + Status: hv1.HypervisorStatus{ + Capabilities: hv1.Capabilities{ + HostCpus: resource.MustParse("16"), + HostMemory: resource.MustParse("32Gi"), + }, + DomainInfos: []hv1.DomainInfo{}, + }, + }, + }, + reservations: []client.Object{ + &v1alpha1.Reservation{ + ObjectMeta: v1.ObjectMeta{Name: "reservation-1"}, + Spec: v1alpha1.ReservationSpec{ + Scheduler: v1alpha1.ReservationSchedulerSpec{ + CortexNova: &v1alpha1.ReservationSchedulerSpecCortexNova{ + ProjectID: "test-project", + FlavorName: "test-flavor", }, }, + Requests: map[string]resource.Quantity{ + "cpu": resource.MustParse("8"), + "memory": resource.MustParse("16Gi"), + }, + }, + Status: v1alpha1.ReservationStatus{ + Phase: v1alpha1.ReservationStatusPhaseFailed, + Host: "host1", }, }, - Hosts: []api.ExternalSchedulerHost{}, }, - expectedHosts: []string{}, - filteredHosts: []string{}, - }, - { - name: "Exact capacity match", request: api.ExternalSchedulerRequest{ Spec: api.NovaObject[api.NovaSpec]{ Data: api.NovaSpec{ NumInstances: 1, + ProjectID: "test-project", Flavor: api.NovaObject[api.NovaFlavor]{ Data: api.NovaFlavor{ - VCPUs: 8, // Exactly matches host2 - MemoryMB: 16384, - RootGB: 500, + Name: "test-flavor", + VCPUs: 8, + MemoryMB: 16000, }, }, }, }, Hosts: []api.ExternalSchedulerHost{ {ComputeHost: "host1"}, - {ComputeHost: "host2"}, - {ComputeHost: "host3"}, - {ComputeHost: "host4"}, }, }, - expectedHosts: []string{"host1", "host2", "host4"}, // host2 exactly matches, host1 and host4 exceed - filteredHosts: []string{"host3"}, // host3 has insufficient capacity + expectedHosts: []string{"host1"}, + filteredHosts: []string{}, + expectError: false, }, { - name: "Boundary test - just over capacity", + name: "Reservation for different scheduler - do not consider", + hypervisors: []client.Object{ + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{Name: "host1"}, + Status: hv1.HypervisorStatus{ + Capabilities: hv1.Capabilities{ + HostCpus: resource.MustParse("16"), + HostMemory: resource.MustParse("32Gi"), + }, + DomainInfos: []hv1.DomainInfo{}, + }, + }, + }, + reservations: []client.Object{ + &v1alpha1.Reservation{ + ObjectMeta: v1.ObjectMeta{Name: "reservation-1"}, + Spec: v1alpha1.ReservationSpec{ + Scheduler: v1alpha1.ReservationSchedulerSpec{ + CortexNova: nil, + }, + Requests: map[string]resource.Quantity{ + "cpu": resource.MustParse("8"), + "memory": resource.MustParse("16Gi"), + }, + }, + Status: v1alpha1.ReservationStatus{ + Phase: v1alpha1.ReservationStatusPhaseActive, + Host: "host1", + }, + }, + }, request: api.ExternalSchedulerRequest{ Spec: api.NovaObject[api.NovaSpec]{ Data: api.NovaSpec{ NumInstances: 1, Flavor: api.NovaObject[api.NovaFlavor]{ Data: api.NovaFlavor{ - VCPUs: 9, // Just over host2's 8 vCPUs - MemoryMB: 16385, // Just over host2's 16384 MB - RootGB: 501, // Just over host2's 500 GB + VCPUs: 8, + MemoryMB: 16000, }, }, }, }, Hosts: []api.ExternalSchedulerHost{ {ComputeHost: "host1"}, - {ComputeHost: "host2"}, - {ComputeHost: "host3"}, - {ComputeHost: "host4"}, }, }, - expectedHosts: []string{"host1", "host4"}, // Only hosts that exceed the requirements - filteredHosts: []string{"host2", "host3"}, // host2 is just under, host3 is well under + expectedHosts: []string{"host1"}, + filteredHosts: []string{}, + expectError: false, }, { - name: "Edge case - exactly enough total slots", - request: api.ExternalSchedulerRequest{ - Spec: api.NovaObject[api.NovaSpec]{ - Data: api.NovaSpec{ - NumInstances: 8, - Flavor: api.NovaObject[api.NovaFlavor]{ - Data: api.NovaFlavor{ - VCPUs: 1, - MemoryMB: 4096, - RootGB: 20, - }, + name: "Host not in hypervisor list - filtered out", + hypervisors: []client.Object{ + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{Name: "host1"}, + Status: hv1.HypervisorStatus{ + Capabilities: hv1.Capabilities{ + HostCpus: resource.MustParse("16"), + HostMemory: resource.MustParse("32Gi"), }, + DomainInfos: []hv1.DomainInfo{}, }, }, - Hosts: []api.ExternalSchedulerHost{ - {ComputeHost: "host1"}, // 32768/4096 = 8 memory slots, 16/1 = 16 vcpu slots - {ComputeHost: "host5"}, // 4096/4096 = 1 memory slot, 2/1 = 2 vcpu slots - }, }, - expectedHosts: []string{"host1"}, // Should pass as memorySlotsTotal (8+1=9) == numInstances (9) - filteredHosts: []string{"host5"}, - }, - { - name: "Edge case - 1 vm more than available slots", request: api.ExternalSchedulerRequest{ Spec: api.NovaObject[api.NovaSpec]{ Data: api.NovaSpec{ - NumInstances: 9, // 1 more than available. + NumInstances: 1, Flavor: api.NovaObject[api.NovaFlavor]{ Data: api.NovaFlavor{ - VCPUs: 1, - MemoryMB: 4096, - RootGB: 20, + VCPUs: 4, + MemoryMB: 8000, }, }, }, }, Hosts: []api.ExternalSchedulerHost{ - {ComputeHost: "host1"}, // 32768/4096 = 8 memory slots, 16/1 = 16 vcpu slots - {ComputeHost: "host5"}, // 4096/4096 = 1 memory slot, 2/1 = 2 vcpu slots - }, - }, - expectedHosts: []string{}, // Should fail as memorySlotsTotal (8+1=9) < numInstances (10) - filteredHosts: []string{"host1", "host5"}, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - t.Logf("Running test case: %s", tt.name) - step := &FilterHasEnoughCapacity{} - step.Client = fake.NewClientBuilder(). - WithScheme(scheme). - WithRuntimeObjects(&v1alpha1.Knowledge{ - ObjectMeta: metav1.ObjectMeta{Name: "host-utilization"}, - Status: v1alpha1.KnowledgeStatus{Raw: hostUtilizations}, - }). - Build() - // Override the real client with our fake client after Init() - result, err := step.Run(slog.Default(), tt.request) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - - // Check expected hosts are present - for _, host := range tt.expectedHosts { - if _, ok := result.Activations[host]; !ok { - t.Errorf("expected host %s to be present in activations", host) - } - } - - // Check filtered hosts are not present - for _, host := range tt.filteredHosts { - if _, ok := result.Activations[host]; ok { - t.Errorf("expected host %s to be filtered out", host) - } - } - - // Check total count - if len(result.Activations) != len(tt.expectedHosts) { - t.Errorf("expected %d hosts, got %d", len(tt.expectedHosts), len(result.Activations)) - } - }) - } -} - -func TestFilterHasEnoughCapacity_WithReservations(t *testing.T) { - scheme, err := v1alpha1.SchemeBuilder.Build() - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - - // Insert mock data into the feature_host_utilization table - hostUtilizations, err := v1alpha1.BoxFeatureList([]any{ - &compute.HostUtilization{ComputeHost: "host1", RAMUtilizedPct: 50.0, VCPUsUtilizedPct: 40.0, DiskUtilizedPct: 30.0, TotalRAMAllocatableMB: 32768, TotalVCPUsAllocatable: 16, TotalDiskAllocatableGB: 1000}, // High capacity host - &compute.HostUtilization{ComputeHost: "host2", RAMUtilizedPct: 80.0, VCPUsUtilizedPct: 70.0, DiskUtilizedPct: 60.0, TotalRAMAllocatableMB: 16384, TotalVCPUsAllocatable: 8, TotalDiskAllocatableGB: 500}, // Medium capacity host - }) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - - // Create active reservations that consume resources on hosts - reservations := []v1alpha1.Reservation{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "reservation-host1-1", - Namespace: "test-namespace", - }, - Spec: v1alpha1.ReservationSpec{ - Scheduler: v1alpha1.ReservationSchedulerSpec{ - CortexNova: &v1alpha1.ReservationSchedulerSpecCortexNova{ - FlavorName: "test-flavor", - ProjectID: "test-project", - DomainID: "test-domain", - }, - }, - Requests: map[string]resource.Quantity{ - "memory": *resource.NewQuantity(4*1024*1024*1024, resource.BinarySI), // 4GB - "cpu": *resource.NewQuantity(4, resource.DecimalSI), - }, - }, - Status: v1alpha1.ReservationStatus{ - Phase: v1alpha1.ReservationStatusPhaseActive, - Host: "host1", - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "reservation-host2-1", - Namespace: "test-namespace", - }, - Spec: v1alpha1.ReservationSpec{ - Scheduler: v1alpha1.ReservationSchedulerSpec{ - CortexNova: &v1alpha1.ReservationSchedulerSpecCortexNova{ - FlavorName: "test-flavor", - ProjectID: "test-project", - DomainID: "test-domain", - }, - }, - Requests: map[string]resource.Quantity{ - "memory": *resource.NewQuantity(4*1024*1024*1024, resource.BinarySI), // 4GB - "cpu": *resource.NewQuantity(4, resource.DecimalSI), + {ComputeHost: "host1"}, + {ComputeHost: "host2"}, }, }, - Status: v1alpha1.ReservationStatus{ - Phase: v1alpha1.ReservationStatusPhaseActive, - Host: "host2", - }, + expectedHosts: []string{"host1"}, + filteredHosts: []string{"host2"}, + expectError: false, }, { - ObjectMeta: metav1.ObjectMeta{ - Name: "reservation-inactive", - Namespace: "test-namespace", - }, - Spec: v1alpha1.ReservationSpec{ - Scheduler: v1alpha1.ReservationSchedulerSpec{ - CortexNova: &v1alpha1.ReservationSchedulerSpecCortexNova{ - FlavorName: "test-flavor", - ProjectID: "test-project", - DomainID: "test-domain", - }, - }, - Requests: map[string]resource.Quantity{ - "memory": *resource.NewQuantity(16*1024*1024*1024, resource.BinarySI), // 16GB - "cpu": *resource.NewQuantity(8, resource.DecimalSI), - }, - }, - Status: v1alpha1.ReservationStatus{ - Phase: v1alpha1.ReservationStatusPhaseFailed, // Not active, should be ignored - Host: "host1", - }, - }, - } - - step := &FilterHasEnoughCapacity{} - step.Client = fake.NewClientBuilder(). - WithScheme(scheme). - WithRuntimeObjects( - &v1alpha1.Knowledge{ - ObjectMeta: metav1.ObjectMeta{Name: "host-utilization"}, - Status: v1alpha1.KnowledgeStatus{Raw: hostUtilizations}, - }, - ). - WithRuntimeObjects(func() []runtime.Object { - objs := []runtime.Object{} - for i := range reservations { - objs = append(objs, &reservations[i]) - } - return objs - }()...). - Build() - - // Test case: Request that would fit on host1 without reservations, but not with reservations - request := api.ExternalSchedulerRequest{ - Spec: api.NovaObject[api.NovaSpec]{ - Data: api.NovaSpec{ - NumInstances: 1, - Flavor: api.NovaObject[api.NovaFlavor]{ - Data: api.NovaFlavor{ - VCPUs: 14, // host1 has 16 total, 4 reserved = 12 available, so this should fail - MemoryMB: 16384, // host1 has 32768 total, 4000 reserved = 28768 available, so this should pass - RootGB: 500, // host1 has 1000 total, 100 reserved = 900 available, so this should pass + name: "Empty host list", + hypervisors: []client.Object{ + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{Name: "host1"}, + Status: hv1.HypervisorStatus{ + Capabilities: hv1.Capabilities{ + HostCpus: resource.MustParse("16"), + HostMemory: resource.MustParse("32Gi"), + }, + DomainInfos: []hv1.DomainInfo{}, }, }, }, - }, - Hosts: []api.ExternalSchedulerHost{ - {ComputeHost: "host1"}, - {ComputeHost: "host2"}, - }, - } - - result, err := step.Run(slog.Default(), request) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - - // Debug: Print the result to see what's happening - t.Logf("Result activations: %v", result.Activations) - - // host1 should be filtered out due to insufficient vCPUs after reservations (16 - 4 = 12 < 14) - if _, ok := result.Activations["host1"]; ok { - t.Error("expected host1 to be filtered out due to reservations consuming vCPUs") - } - - // host2 should be filtered out due to insufficient vCPUs (8 - 4 = 4 < 14) - if _, ok := result.Activations["host2"]; ok { - t.Error("expected host2 to be filtered out due to insufficient vCPUs") - } - - // Test case: Request that fits after accounting for reservations - request2 := api.ExternalSchedulerRequest{ - Spec: api.NovaObject[api.NovaSpec]{ - Data: api.NovaSpec{ - NumInstances: 1, - Flavor: api.NovaObject[api.NovaFlavor]{ - Data: api.NovaFlavor{ - VCPUs: 10, // host1 has 16 - 4 = 12 available, so this should pass - MemoryMB: 20480, // host1 has 32768 - 4096 = 28672 available, so this should pass - RootGB: 800, // host1 has 1000 - 100 = 900 available, so this should pass + request: api.ExternalSchedulerRequest{ + Spec: api.NovaObject[api.NovaSpec]{ + Data: api.NovaSpec{ + NumInstances: 1, + Flavor: api.NovaObject[api.NovaFlavor]{ + Data: api.NovaFlavor{ + VCPUs: 4, + MemoryMB: 8000, + }, + }, }, }, + Hosts: []api.ExternalSchedulerHost{}, }, + expectedHosts: []string{}, + filteredHosts: []string{}, + expectError: false, }, - Hosts: []api.ExternalSchedulerHost{ - {ComputeHost: "host1"}, - {ComputeHost: "host2"}, - }, - } - - result2, err := step.Run(slog.Default(), request2) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - - // host1 should pass (16-4=12 vCPUs >= 10, 32768-4096=28672 MB >= 20480, 1000-100=900 GB >= 800) - if _, ok := result2.Activations["host1"]; !ok { - t.Error("expected host1 to be available after accounting for reservations") - } - - // host2 should be filtered out (8-4=4 vCPUs < 10) - if _, ok := result2.Activations["host2"]; ok { - t.Error("expected host2 to be filtered out due to insufficient vCPUs after reservations") - } -} - -func TestFilterHasEnoughCapacity_ReservationMatching(t *testing.T) { - scheme, err := v1alpha1.SchemeBuilder.Build() - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - - // Insert mock data into the feature_host_utilization table - hostUtilizations, err := v1alpha1.BoxFeatureList([]any{ - &compute.HostUtilization{ComputeHost: "host1", RAMUtilizedPct: 50.0, VCPUsUtilizedPct: 40.0, DiskUtilizedPct: 30.0, TotalRAMAllocatableMB: 16384, TotalVCPUsAllocatable: 8, TotalDiskAllocatableGB: 500}, // Limited capacity host - }) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - - tests := []struct { - name string - reservations []v1alpha1.Reservation - request api.ExternalSchedulerRequest - expectedHostPresent bool - description string - }{ { - name: "Reservation matches request - resources should be unlocked", - reservations: []v1alpha1.Reservation{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "matching-reservation", - Namespace: "test-namespace", - }, - Spec: v1alpha1.ReservationSpec{ - Scheduler: v1alpha1.ReservationSchedulerSpec{ - CortexNova: &v1alpha1.ReservationSchedulerSpecCortexNova{ - FlavorName: "test-flavor", // Matches request - ProjectID: "test-project", // Matches request - DomainID: "test-domain", - }, + name: "Flavor with zero vCPUs - error", + hypervisors: []client.Object{ + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{Name: "host1"}, + Status: hv1.HypervisorStatus{ + Capabilities: hv1.Capabilities{ + HostCpus: resource.MustParse("16"), + HostMemory: resource.MustParse("32Gi"), }, - Requests: map[string]resource.Quantity{ - "memory": *resource.NewQuantity(8*1024*1024*1024, resource.BinarySI), // 8GB - consumes all memory - "cpu": *resource.NewQuantity(4, resource.DecimalSI), // 4 vCPUs - consumes half vCPUs - }, - }, - Status: v1alpha1.ReservationStatus{ - Phase: v1alpha1.ReservationStatusPhaseActive, - Host: "host1", + DomainInfos: []hv1.DomainInfo{}, }, }, }, @@ -670,13 +654,10 @@ func TestFilterHasEnoughCapacity_ReservationMatching(t *testing.T) { Spec: api.NovaObject[api.NovaSpec]{ Data: api.NovaSpec{ NumInstances: 1, - ProjectID: "test-project", // Matches reservation Flavor: api.NovaObject[api.NovaFlavor]{ Data: api.NovaFlavor{ - Name: "test-flavor", // Matches reservation - VCPUs: 6, // Would normally fail (8 - 4 = 4 < 6), but reservation should be unlocked - MemoryMB: 12288, // Would normally fail (16384 - 8192 = 8192 < 12288), but reservation should be unlocked - RootGB: 200, + VCPUs: 0, + MemoryMB: 8000, }, }, }, @@ -685,33 +666,21 @@ func TestFilterHasEnoughCapacity_ReservationMatching(t *testing.T) { {ComputeHost: "host1"}, }, }, - expectedHostPresent: true, - description: "When ProjectID and FlavorName match, reservation resources should be unlocked allowing the request to succeed", + expectedHosts: []string{}, + filteredHosts: []string{}, + expectError: true, }, { - name: "Reservation does not match ProjectID - resources remain reserved", - reservations: []v1alpha1.Reservation{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "non-matching-project-reservation", - Namespace: "test-namespace", - }, - Spec: v1alpha1.ReservationSpec{ - Scheduler: v1alpha1.ReservationSchedulerSpec{ - CortexNova: &v1alpha1.ReservationSchedulerSpecCortexNova{ - FlavorName: "test-flavor", // Matches request - ProjectID: "different-project", // Does NOT match request - DomainID: "test-domain", - }, - }, - Requests: map[string]resource.Quantity{ - "memory": *resource.NewQuantity(8*1024*1024*1024, resource.BinarySI), // 8GB - "cpu": *resource.NewQuantity(4, resource.DecimalSI), // 4 vCPUs + name: "Flavor with zero memory - error", + hypervisors: []client.Object{ + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{Name: "host1"}, + Status: hv1.HypervisorStatus{ + Capabilities: hv1.Capabilities{ + HostCpus: resource.MustParse("16"), + HostMemory: resource.MustParse("32Gi"), }, - }, - Status: v1alpha1.ReservationStatus{ - Phase: v1alpha1.ReservationStatusPhaseActive, - Host: "host1", + DomainInfos: []hv1.DomainInfo{}, }, }, }, @@ -719,13 +688,10 @@ func TestFilterHasEnoughCapacity_ReservationMatching(t *testing.T) { Spec: api.NovaObject[api.NovaSpec]{ Data: api.NovaSpec{ NumInstances: 1, - ProjectID: "test-project", // Does NOT match reservation Flavor: api.NovaObject[api.NovaFlavor]{ Data: api.NovaFlavor{ - Name: "test-flavor", // Matches reservation - VCPUs: 6, // Should fail (8 - 4 = 4 < 6) - MemoryMB: 12288, // Should fail (16384 - 8192 = 8192 < 12288) - RootGB: 200, + VCPUs: 4, + MemoryMB: 0, }, }, }, @@ -734,28 +700,70 @@ func TestFilterHasEnoughCapacity_ReservationMatching(t *testing.T) { {ComputeHost: "host1"}, }, }, - expectedHostPresent: false, - description: "When ProjectID does not match, reservation resources should remain reserved and request should fail", + expectedHosts: []string{}, + filteredHosts: []string{}, + expectError: true, }, { - name: "Reservation does not match FlavorName - resources remain reserved", - reservations: []v1alpha1.Reservation{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "non-matching-flavor-reservation", - Namespace: "test-namespace", + name: "Complex scenario - multiple hosts, VMs, and reservations", + hypervisors: []client.Object{ + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{Name: "host1"}, + Status: hv1.HypervisorStatus{ + Capabilities: hv1.Capabilities{ + HostCpus: resource.MustParse("32"), + HostMemory: resource.MustParse("64Gi"), + }, + DomainInfos: []hv1.DomainInfo{ + { + Name: "instance-1", + Allocation: map[string]resource.Quantity{ + "cpu": resource.MustParse("8"), + "memory": resource.MustParse("16Gi"), + }, + }, + { + Name: "instance-2", + Allocation: map[string]resource.Quantity{ + "cpu": resource.MustParse("4"), + "memory": resource.MustParse("8Gi"), + }, + }, + }, }, + }, + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{Name: "host2"}, + Status: hv1.HypervisorStatus{ + Capabilities: hv1.Capabilities{ + HostCpus: resource.MustParse("32"), + HostMemory: resource.MustParse("64Gi"), + }, + DomainInfos: []hv1.DomainInfo{ + { + Name: "instance-3", + Allocation: map[string]resource.Quantity{ + "cpu": resource.MustParse("16"), + "memory": resource.MustParse("32Gi"), + }, + }, + }, + }, + }, + }, + reservations: []client.Object{ + &v1alpha1.Reservation{ + ObjectMeta: v1.ObjectMeta{Name: "reservation-1"}, Spec: v1alpha1.ReservationSpec{ Scheduler: v1alpha1.ReservationSchedulerSpec{ CortexNova: &v1alpha1.ReservationSchedulerSpecCortexNova{ - FlavorName: "different-flavor", // Does NOT match request - ProjectID: "test-project", // Matches request - DomainID: "test-domain", + ProjectID: "other-project", + FlavorName: "other-flavor", }, }, Requests: map[string]resource.Quantity{ - "memory": *resource.NewQuantity(8*1024*1024*1024, resource.BinarySI), // 8GB - "cpu": *resource.NewQuantity(4, resource.DecimalSI), // 4 vCPUs + "cpu": resource.MustParse("8"), + "memory": resource.MustParse("16Gi"), }, }, Status: v1alpha1.ReservationStatus{ @@ -768,60 +776,69 @@ func TestFilterHasEnoughCapacity_ReservationMatching(t *testing.T) { Spec: api.NovaObject[api.NovaSpec]{ Data: api.NovaSpec{ NumInstances: 1, - ProjectID: "test-project", // Matches reservation + ProjectID: "test-project", Flavor: api.NovaObject[api.NovaFlavor]{ Data: api.NovaFlavor{ - Name: "test-flavor", // Does NOT match reservation - VCPUs: 6, // Should fail (8 - 4 = 4 < 6) - MemoryMB: 12288, // Should fail (16384 - 8192 = 8192 < 12288) - RootGB: 200, + Name: "test-flavor", + VCPUs: 8, + MemoryMB: 16000, }, }, }, }, Hosts: []api.ExternalSchedulerHost{ {ComputeHost: "host1"}, + {ComputeHost: "host2"}, }, }, - expectedHostPresent: false, - description: "When FlavorName does not match, reservation resources should remain reserved and request should fail", + expectedHosts: []string{"host1", "host2"}, + filteredHosts: []string{}, + expectError: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + //nolint:gocritic + objects := append(tt.hypervisors, tt.reservations...) step := &FilterHasEnoughCapacity{} step.Client = fake.NewClientBuilder(). WithScheme(scheme). - WithRuntimeObjects( - &v1alpha1.Knowledge{ - ObjectMeta: metav1.ObjectMeta{Name: "host-utilization"}, - Status: v1alpha1.KnowledgeStatus{Raw: hostUtilizations}, - }, - ). - WithRuntimeObjects(func() []runtime.Object { - objs := []runtime.Object{} - for i := range tt.reservations { - objs = append(objs, &tt.reservations[i]) - } - return objs - }()...). + WithObjects(objects...). Build() + step.Options = tt.options result, err := step.Run(slog.Default(), tt.request) + + if tt.expectError { + if err == nil { + t.Fatalf("expected error, got nil") + } + return + } + if err != nil { t.Fatalf("expected no error, got %v", err) } - // Check if host is present or absent as expected - _, hostPresent := result.Activations["host1"] - if hostPresent != tt.expectedHostPresent { - t.Errorf("Test case: %s\nExpected host1 present: %v, got: %v\nDescription: %s", - tt.name, tt.expectedHostPresent, hostPresent, tt.description) + // Check expected hosts are present + for _, host := range tt.expectedHosts { + if _, ok := result.Activations[host]; !ok { + t.Errorf("expected host %s to be present in activations", host) + } + } + + // Check filtered hosts are not present + for _, host := range tt.filteredHosts { + if _, ok := result.Activations[host]; ok { + t.Errorf("expected host %s to be filtered out", host) + } } - // Debug information - t.Logf("Test: %s, Host present: %v, Activations: %v", tt.name, hostPresent, result.Activations) + // Check total count + if len(result.Activations) != len(tt.expectedHosts) { + t.Errorf("expected %d hosts, got %d", len(tt.expectedHosts), len(result.Activations)) + } }) } } diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_has_requested_traits.go b/internal/scheduling/decisions/nova/plugins/filters/filter_has_requested_traits.go index e499c4fa..14cf927a 100644 --- a/internal/scheduling/decisions/nova/plugins/filters/filter_has_requested_traits.go +++ b/internal/scheduling/decisions/nova/plugins/filters/filter_has_requested_traits.go @@ -6,13 +6,12 @@ package filters import ( "context" "log/slog" + "slices" "strings" api "github.com/cobaltcore-dev/cortex/api/delegation/nova" - "github.com/cobaltcore-dev/cortex/api/v1alpha1" - "github.com/cobaltcore-dev/cortex/internal/knowledge/extractor/plugins/compute" "github.com/cobaltcore-dev/cortex/internal/scheduling/lib" - "sigs.k8s.io/controller-runtime/pkg/client" + hv1 "github.com/cobaltcore-dev/openstack-hypervisor-operator/api/v1" ) type FilterHasRequestedTraits struct { @@ -41,55 +40,43 @@ func (s *FilterHasRequestedTraits) Run(traceLog *slog.Logger, request api.Extern traceLog.Debug("no traits requested, skipping filter") return result, nil } - knowledge := &v1alpha1.Knowledge{} - if err := s.Client.Get( - context.Background(), - client.ObjectKey{Name: "host-capabilities"}, - knowledge, - ); err != nil { - return nil, err - } - hostCapabilities, err := v1alpha1. - UnboxFeatureList[compute.HostCapabilities](knowledge.Status.Raw) - if err != nil { + + hvs := &hv1.HypervisorList{} + if err := s.Client.List(context.Background(), hvs); err != nil { + traceLog.Error("failed to list hypervisors", "error", err) return nil, err } - hostsEncountered := map[string]struct{}{} - for _, cap := range hostCapabilities { - hostsEncountered[cap.ComputeHost] = struct{}{} - providedTraits := cap.Traits // Comma-separated list. + + hostsMatchingAllTraits := map[string]struct{}{} + for _, hv := range hvs.Items { allRequiredPresent := true + traits := hv.Status.Traits + traits = append(traits, hv.Spec.CustomTraits...) for _, required := range requiredTraits { - if !strings.Contains(providedTraits, required) { + if !slices.Contains(traits, required) { allRequiredPresent = false break } } allForbiddenAbsent := true for _, forbidden := range forbiddenTraits { - if strings.Contains(providedTraits, forbidden) { + if slices.Contains(traits, forbidden) { allForbiddenAbsent = false break } } - if !allRequiredPresent || !allForbiddenAbsent { - delete(result.Activations, cap.ComputeHost) - traceLog.Debug( - "filtering host which does not match trait check", - "host", cap.ComputeHost, "want", requiredTraits, - "forbid", forbiddenTraits, "have", providedTraits, - ) + if allRequiredPresent && allForbiddenAbsent { + hostsMatchingAllTraits[hv.Name] = struct{}{} } } - // Remove all hosts that weren't encountered. + + traceLog.Info("hosts matching requested traits", "hosts", hostsMatchingAllTraits) for host := range result.Activations { - if _, ok := hostsEncountered[host]; !ok { - delete(result.Activations, host) - traceLog.Debug( - "removing host with unknown capabilities", - "host", host, - ) + if _, ok := hostsMatchingAllTraits[host]; ok { + continue } + delete(result.Activations, host) + traceLog.Info("filtering host not matching requested traits", "host", host) } return result, nil } diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_has_requested_traits_test.go b/internal/scheduling/decisions/nova/plugins/filters/filter_has_requested_traits_test.go index ab5c9cd8..1d53bb2b 100644 --- a/internal/scheduling/decisions/nova/plugins/filters/filter_has_requested_traits_test.go +++ b/internal/scheduling/decisions/nova/plugins/filters/filter_has_requested_traits_test.go @@ -8,29 +8,67 @@ import ( "testing" api "github.com/cobaltcore-dev/cortex/api/delegation/nova" - "github.com/cobaltcore-dev/cortex/api/v1alpha1" - "github.com/cobaltcore-dev/cortex/internal/knowledge/extractor/plugins/compute" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + hv1 "github.com/cobaltcore-dev/openstack-hypervisor-operator/api/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" ) func TestFilterHasRequestedTraits_Run(t *testing.T) { - scheme, err := v1alpha1.SchemeBuilder.Build() + scheme, err := hv1.SchemeBuilder.Build() if err != nil { t.Fatalf("expected no error, got %v", err) } - // Insert mock data into the feature_host_capabilities table - hostCapabilities, err := v1alpha1.BoxFeatureList([]any{ - &compute.HostCapabilities{ComputeHost: "host1", Traits: "COMPUTE_ACCELERATORS,COMPUTE_NET_VIRTIO_PACKED,CUSTOM_GPU_NVIDIA", HypervisorType: "QEMU"}, - &compute.HostCapabilities{ComputeHost: "host2", Traits: "COMPUTE_STATUS_ENABLED,COMPUTE_NET_VIRTIO", HypervisorType: "QEMU"}, - &compute.HostCapabilities{ComputeHost: "host3", Traits: "COMPUTE_ACCELERATORS,COMPUTE_STATUS_ENABLED,CUSTOM_STORAGE_SSD", HypervisorType: "VMware"}, - &compute.HostCapabilities{ComputeHost: "host4", Traits: "COMPUTE_NET_VIRTIO_PACKED,CUSTOM_CPU_AVX512", HypervisorType: "QEMU"}, - &compute.HostCapabilities{ComputeHost: "host5", Traits: "", HypervisorType: "QEMU"}, - &compute.HostCapabilities{ComputeHost: "host6", Traits: "COMPUTE_ACCELERATORS,CUSTOM_GPU_AMD,CUSTOM_STORAGE_NVME", HypervisorType: "QEMU"}, - }) - if err != nil { - t.Fatalf("expected no error, got %v", err) + hvs := []client.Object{ + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{ + Name: "host1", + }, + Status: hv1.HypervisorStatus{ + Traits: []string{"CUSTOM_TRAIT_A", "CUSTOM_TRAIT_B"}, + }, + }, + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{ + Name: "host2", + }, + Status: hv1.HypervisorStatus{ + Traits: []string{"CUSTOM_TRAIT_A", "CUSTOM_TRAIT_C"}, + }, + }, + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{ + Name: "host3", + }, + Status: hv1.HypervisorStatus{ + Traits: []string{"CUSTOM_TRAIT_B", "CUSTOM_TRAIT_C"}, + }, + }, + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{ + Name: "host4", + }, + Status: hv1.HypervisorStatus{ + Traits: []string{"CUSTOM_TRAIT_D"}, + }, + }, + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{ + Name: "host5", + }, + Status: hv1.HypervisorStatus{ + Traits: []string{}, + }, + }, + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{ + Name: "host6", + }, + Status: hv1.HypervisorStatus{ + Traits: []string{"CUSTOM_TRAIT_A", "CUSTOM_TRAIT_B", "CUSTOM_TRAIT_C", "CUSTOM_TRAIT_D"}, + }, + }, } tests := []struct { @@ -40,15 +78,13 @@ func TestFilterHasRequestedTraits_Run(t *testing.T) { filteredHosts []string }{ { - name: "No traits requested - no filtering", + name: "No traits requested - all hosts pass", request: api.ExternalSchedulerRequest{ Spec: api.NovaObject[api.NovaSpec]{ Data: api.NovaSpec{ Flavor: api.NovaObject[api.NovaFlavor]{ Data: api.NovaFlavor{ - ExtraSpecs: map[string]string{ - "hw:cpu_policy": "dedicated", - }, + ExtraSpecs: map[string]string{}, }, }, }, @@ -57,23 +93,20 @@ func TestFilterHasRequestedTraits_Run(t *testing.T) { {ComputeHost: "host1"}, {ComputeHost: "host2"}, {ComputeHost: "host3"}, - {ComputeHost: "host4"}, - {ComputeHost: "host5"}, - {ComputeHost: "host6"}, }, }, - expectedHosts: []string{"host1", "host2", "host3", "host4", "host5", "host6"}, + expectedHosts: []string{"host1", "host2", "host3"}, filteredHosts: []string{}, }, { - name: "Single required trait - filter hosts without it", + name: "Single required trait - filter hosts with trait", request: api.ExternalSchedulerRequest{ Spec: api.NovaObject[api.NovaSpec]{ Data: api.NovaSpec{ Flavor: api.NovaObject[api.NovaFlavor]{ Data: api.NovaFlavor{ ExtraSpecs: map[string]string{ - "trait:COMPUTE_ACCELERATORS": "required", + "trait:CUSTOM_TRAIT_A": "required", }, }, }, @@ -84,22 +117,21 @@ func TestFilterHasRequestedTraits_Run(t *testing.T) { {ComputeHost: "host2"}, {ComputeHost: "host3"}, {ComputeHost: "host4"}, - {ComputeHost: "host5"}, - {ComputeHost: "host6"}, }, }, - expectedHosts: []string{"host1", "host3", "host6"}, // Only hosts with COMPUTE_ACCELERATORS - filteredHosts: []string{"host2", "host4", "host5"}, // Hosts without COMPUTE_ACCELERATORS + expectedHosts: []string{"host1", "host2"}, + filteredHosts: []string{"host3", "host4"}, }, { - name: "Single forbidden trait - filter hosts with it", + name: "Multiple required traits - filter hosts with all traits", request: api.ExternalSchedulerRequest{ Spec: api.NovaObject[api.NovaSpec]{ Data: api.NovaSpec{ Flavor: api.NovaObject[api.NovaFlavor]{ Data: api.NovaFlavor{ ExtraSpecs: map[string]string{ - "trait:COMPUTE_ACCELERATORS": "forbidden", + "trait:CUSTOM_TRAIT_A": "required", + "trait:CUSTOM_TRAIT_B": "required", }, }, }, @@ -109,24 +141,21 @@ func TestFilterHasRequestedTraits_Run(t *testing.T) { {ComputeHost: "host1"}, {ComputeHost: "host2"}, {ComputeHost: "host3"}, - {ComputeHost: "host4"}, - {ComputeHost: "host5"}, {ComputeHost: "host6"}, }, }, - expectedHosts: []string{"host2", "host4", "host5"}, // Hosts without COMPUTE_ACCELERATORS - filteredHosts: []string{"host1", "host3", "host6"}, // Hosts with COMPUTE_ACCELERATORS + expectedHosts: []string{"host1", "host6"}, + filteredHosts: []string{"host2", "host3"}, }, { - name: "Multiple required traits - filter hosts missing any", + name: "Single forbidden trait - filter hosts without trait", request: api.ExternalSchedulerRequest{ Spec: api.NovaObject[api.NovaSpec]{ Data: api.NovaSpec{ Flavor: api.NovaObject[api.NovaFlavor]{ Data: api.NovaFlavor{ ExtraSpecs: map[string]string{ - "trait:COMPUTE_ACCELERATORS": "required", - "trait:COMPUTE_NET_VIRTIO_PACKED": "required", + "trait:CUSTOM_TRAIT_A": "forbidden", }, }, }, @@ -138,22 +167,21 @@ func TestFilterHasRequestedTraits_Run(t *testing.T) { {ComputeHost: "host3"}, {ComputeHost: "host4"}, {ComputeHost: "host5"}, - {ComputeHost: "host6"}, }, }, - expectedHosts: []string{"host1"}, // Only host1 has both traits - filteredHosts: []string{"host2", "host3", "host4", "host5", "host6"}, + expectedHosts: []string{"host3", "host4", "host5"}, + filteredHosts: []string{"host1", "host2"}, }, { - name: "Multiple forbidden traits - filter hosts with any", + name: "Multiple forbidden traits - filter hosts without any of them", request: api.ExternalSchedulerRequest{ Spec: api.NovaObject[api.NovaSpec]{ Data: api.NovaSpec{ Flavor: api.NovaObject[api.NovaFlavor]{ Data: api.NovaFlavor{ ExtraSpecs: map[string]string{ - "trait:COMPUTE_ACCELERATORS": "forbidden", - "trait:CUSTOM_CPU_AVX512": "forbidden", + "trait:CUSTOM_TRAIT_A": "forbidden", + "trait:CUSTOM_TRAIT_B": "forbidden", }, }, }, @@ -165,11 +193,10 @@ func TestFilterHasRequestedTraits_Run(t *testing.T) { {ComputeHost: "host3"}, {ComputeHost: "host4"}, {ComputeHost: "host5"}, - {ComputeHost: "host6"}, }, }, - expectedHosts: []string{"host2", "host5"}, // Hosts without any forbidden traits - filteredHosts: []string{"host1", "host3", "host4", "host6"}, + expectedHosts: []string{"host4", "host5"}, + filteredHosts: []string{"host1", "host2", "host3"}, }, { name: "Mixed required and forbidden traits", @@ -179,8 +206,8 @@ func TestFilterHasRequestedTraits_Run(t *testing.T) { Flavor: api.NovaObject[api.NovaFlavor]{ Data: api.NovaFlavor{ ExtraSpecs: map[string]string{ - "trait:COMPUTE_STATUS_ENABLED": "required", - "trait:COMPUTE_ACCELERATORS": "forbidden", + "trait:CUSTOM_TRAIT_A": "required", + "trait:CUSTOM_TRAIT_D": "forbidden", }, }, }, @@ -189,24 +216,22 @@ func TestFilterHasRequestedTraits_Run(t *testing.T) { Hosts: []api.ExternalSchedulerHost{ {ComputeHost: "host1"}, {ComputeHost: "host2"}, - {ComputeHost: "host3"}, {ComputeHost: "host4"}, - {ComputeHost: "host5"}, {ComputeHost: "host6"}, }, }, - expectedHosts: []string{"host2"}, // Only host2 has required trait and not forbidden trait - filteredHosts: []string{"host1", "host3", "host4", "host5", "host6"}, + expectedHosts: []string{"host1", "host2"}, + filteredHosts: []string{"host4", "host6"}, }, { - name: "Custom traits - required", + name: "Required trait that no host has", request: api.ExternalSchedulerRequest{ Spec: api.NovaObject[api.NovaSpec]{ Data: api.NovaSpec{ Flavor: api.NovaObject[api.NovaFlavor]{ Data: api.NovaFlavor{ ExtraSpecs: map[string]string{ - "trait:CUSTOM_GPU_NVIDIA": "required", + "trait:CUSTOM_TRAIT_NONEXISTENT": "required", }, }, }, @@ -216,23 +241,20 @@ func TestFilterHasRequestedTraits_Run(t *testing.T) { {ComputeHost: "host1"}, {ComputeHost: "host2"}, {ComputeHost: "host3"}, - {ComputeHost: "host4"}, - {ComputeHost: "host5"}, - {ComputeHost: "host6"}, }, }, - expectedHosts: []string{"host1"}, // Only host1 has CUSTOM_GPU_NVIDIA - filteredHosts: []string{"host2", "host3", "host4", "host5", "host6"}, + expectedHosts: []string{}, + filteredHosts: []string{"host1", "host2", "host3"}, }, { - name: "Custom traits - forbidden", + name: "Forbidden trait that no host has - all pass", request: api.ExternalSchedulerRequest{ Spec: api.NovaObject[api.NovaSpec]{ Data: api.NovaSpec{ Flavor: api.NovaObject[api.NovaFlavor]{ Data: api.NovaFlavor{ ExtraSpecs: map[string]string{ - "trait:CUSTOM_STORAGE_SSD": "forbidden", + "trait:CUSTOM_TRAIT_NONEXISTENT": "forbidden", }, }, }, @@ -242,24 +264,20 @@ func TestFilterHasRequestedTraits_Run(t *testing.T) { {ComputeHost: "host1"}, {ComputeHost: "host2"}, {ComputeHost: "host3"}, - {ComputeHost: "host4"}, - {ComputeHost: "host5"}, - {ComputeHost: "host6"}, }, }, - expectedHosts: []string{"host1", "host2", "host4", "host5", "host6"}, // All except host3 - filteredHosts: []string{"host3"}, // host3 has CUSTOM_STORAGE_SSD + expectedHosts: []string{"host1", "host2", "host3"}, + filteredHosts: []string{}, }, { - name: "Invalid trait value - ignored", + name: "Host with no traits - required trait filters it out", request: api.ExternalSchedulerRequest{ Spec: api.NovaObject[api.NovaSpec]{ Data: api.NovaSpec{ Flavor: api.NovaObject[api.NovaFlavor]{ Data: api.NovaFlavor{ ExtraSpecs: map[string]string{ - "trait:COMPUTE_ACCELERATORS": "invalid_value", - "trait:COMPUTE_STATUS_ENABLED": "required", + "trait:CUSTOM_TRAIT_A": "required", }, }, }, @@ -267,28 +285,21 @@ func TestFilterHasRequestedTraits_Run(t *testing.T) { }, Hosts: []api.ExternalSchedulerHost{ {ComputeHost: "host1"}, - {ComputeHost: "host2"}, - {ComputeHost: "host3"}, - {ComputeHost: "host4"}, {ComputeHost: "host5"}, - {ComputeHost: "host6"}, }, }, - expectedHosts: []string{"host2", "host3"}, // Only hosts with COMPUTE_STATUS_ENABLED (invalid value ignored) - filteredHosts: []string{"host1", "host4", "host5", "host6"}, + expectedHosts: []string{"host1"}, + filteredHosts: []string{"host5"}, }, { - name: "Non-trait extra specs - ignored", + name: "Host with no traits - forbidden trait lets it pass", request: api.ExternalSchedulerRequest{ Spec: api.NovaObject[api.NovaSpec]{ Data: api.NovaSpec{ Flavor: api.NovaObject[api.NovaFlavor]{ Data: api.NovaFlavor{ ExtraSpecs: map[string]string{ - "hw:cpu_policy": "dedicated", - "accel:device_profile": "gpu-profile", - "trait:COMPUTE_ACCELERATORS": "required", - "capabilities:hypervisor_type": "QEMU", + "trait:CUSTOM_TRAIT_A": "forbidden", }, }, }, @@ -296,83 +307,76 @@ func TestFilterHasRequestedTraits_Run(t *testing.T) { }, Hosts: []api.ExternalSchedulerHost{ {ComputeHost: "host1"}, - {ComputeHost: "host2"}, - {ComputeHost: "host3"}, - {ComputeHost: "host4"}, {ComputeHost: "host5"}, - {ComputeHost: "host6"}, }, }, - expectedHosts: []string{"host1", "host3", "host6"}, // Only trait: prefixed specs are processed - filteredHosts: []string{"host2", "host4", "host5"}, + expectedHosts: []string{"host5"}, + filteredHosts: []string{"host1"}, }, { - name: "Host with empty traits", + name: "Non-trait extra specs are ignored", request: api.ExternalSchedulerRequest{ Spec: api.NovaObject[api.NovaSpec]{ Data: api.NovaSpec{ Flavor: api.NovaObject[api.NovaFlavor]{ Data: api.NovaFlavor{ ExtraSpecs: map[string]string{ - "trait:COMPUTE_STATUS_ENABLED": "required", + "hw:cpu_policy": "dedicated", + "trait:CUSTOM_TRAIT_A": "required", }, }, }, }, }, Hosts: []api.ExternalSchedulerHost{ - {ComputeHost: "host5"}, // host5 has empty traits + {ComputeHost: "host1"}, + {ComputeHost: "host3"}, }, }, - expectedHosts: []string{}, - filteredHosts: []string{"host5"}, + expectedHosts: []string{"host1"}, + filteredHosts: []string{"host3"}, }, { - name: "Host with empty traits - forbidden trait", + name: "Invalid trait value (not required or forbidden) - ignored", request: api.ExternalSchedulerRequest{ Spec: api.NovaObject[api.NovaSpec]{ Data: api.NovaSpec{ Flavor: api.NovaObject[api.NovaFlavor]{ Data: api.NovaFlavor{ ExtraSpecs: map[string]string{ - "trait:COMPUTE_ACCELERATORS": "forbidden", + "trait:CUSTOM_TRAIT_A": "invalid", }, }, }, }, }, Hosts: []api.ExternalSchedulerHost{ - {ComputeHost: "host5"}, // host5 has empty traits + {ComputeHost: "host1"}, + {ComputeHost: "host2"}, + {ComputeHost: "host3"}, }, }, - expectedHosts: []string{"host5"}, // Empty traits means no forbidden traits present + expectedHosts: []string{"host1", "host2", "host3"}, filteredHosts: []string{}, }, { - name: "No matching hosts", + name: "Empty host list", request: api.ExternalSchedulerRequest{ Spec: api.NovaObject[api.NovaSpec]{ Data: api.NovaSpec{ Flavor: api.NovaObject[api.NovaFlavor]{ Data: api.NovaFlavor{ ExtraSpecs: map[string]string{ - "trait:NONEXISTENT_TRAIT": "required", + "trait:CUSTOM_TRAIT_A": "required", }, }, }, }, }, - Hosts: []api.ExternalSchedulerHost{ - {ComputeHost: "host1"}, - {ComputeHost: "host2"}, - {ComputeHost: "host3"}, - {ComputeHost: "host4"}, - {ComputeHost: "host5"}, - {ComputeHost: "host6"}, - }, + Hosts: []api.ExternalSchedulerHost{}, }, expectedHosts: []string{}, - filteredHosts: []string{"host1", "host2", "host3", "host4", "host5", "host6"}, + filteredHosts: []string{}, }, { name: "Host not in database", @@ -382,7 +386,7 @@ func TestFilterHasRequestedTraits_Run(t *testing.T) { Flavor: api.NovaObject[api.NovaFlavor]{ Data: api.NovaFlavor{ ExtraSpecs: map[string]string{ - "trait:COMPUTE_ACCELERATORS": "required", + "trait:CUSTOM_TRAIT_A": "required", }, }, }, @@ -394,39 +398,19 @@ func TestFilterHasRequestedTraits_Run(t *testing.T) { }, }, expectedHosts: []string{"host1"}, - filteredHosts: []string{"host-unknown"}, // Host not in database gets filtered out - }, - { - name: "Empty host list", - request: api.ExternalSchedulerRequest{ - Spec: api.NovaObject[api.NovaSpec]{ - Data: api.NovaSpec{ - Flavor: api.NovaObject[api.NovaFlavor]{ - Data: api.NovaFlavor{ - ExtraSpecs: map[string]string{ - "trait:COMPUTE_ACCELERATORS": "required", - }, - }, - }, - }, - }, - Hosts: []api.ExternalSchedulerHost{}, - }, - expectedHosts: []string{}, - filteredHosts: []string{}, + filteredHosts: []string{"host-unknown"}, }, { - name: "Complex scenario with multiple requirements and restrictions", + name: "Complex scenario with many traits", request: api.ExternalSchedulerRequest{ Spec: api.NovaObject[api.NovaSpec]{ Data: api.NovaSpec{ Flavor: api.NovaObject[api.NovaFlavor]{ Data: api.NovaFlavor{ ExtraSpecs: map[string]string{ - "trait:COMPUTE_ACCELERATORS": "required", - "trait:CUSTOM_GPU_AMD": "forbidden", - "trait:COMPUTE_NET_VIRTIO_PACKED": "forbidden", - "hw:cpu_policy": "dedicated", // Should be ignored + "trait:CUSTOM_TRAIT_A": "required", + "trait:CUSTOM_TRAIT_B": "required", + "trait:CUSTOM_TRAIT_D": "forbidden", }, }, }, @@ -441,134 +425,20 @@ func TestFilterHasRequestedTraits_Run(t *testing.T) { {ComputeHost: "host6"}, }, }, - expectedHosts: []string{"host3"}, // Only host3 has COMPUTE_ACCELERATORS but not the forbidden traits - filteredHosts: []string{"host1", "host2", "host4", "host5", "host6"}, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - step := &FilterHasRequestedTraits{} - step.Client = fake.NewClientBuilder(). - WithScheme(scheme). - WithRuntimeObjects( - &v1alpha1.Knowledge{ - ObjectMeta: metav1.ObjectMeta{Name: "host-capabilities"}, - Status: v1alpha1.KnowledgeStatus{Raw: hostCapabilities}, - }, - ). - Build() - result, err := step.Run(slog.Default(), tt.request) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - - // Check expected hosts are present - for _, host := range tt.expectedHosts { - if _, ok := result.Activations[host]; !ok { - t.Errorf("expected host %s to be present in activations", host) - } - } - - // Check filtered hosts are not present - for _, host := range tt.filteredHosts { - if _, ok := result.Activations[host]; ok { - t.Errorf("expected host %s to be filtered out", host) - } - } - - // Check total count - if len(result.Activations) != len(tt.expectedHosts) { - t.Errorf("expected %d hosts, got %d", len(tt.expectedHosts), len(result.Activations)) - } - }) - } -} - -func TestFilterHasRequestedTraits_TraitParsing(t *testing.T) { - // Set log level debug - slog.SetLogLoggerLevel(slog.LevelDebug) - - scheme, err := v1alpha1.SchemeBuilder.Build() - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - - // Insert test data with edge cases in trait names - hostCapabilitiesEdgeCases, err := v1alpha1.BoxFeatureList([]any{ - &compute.HostCapabilities{ComputeHost: "host1", Traits: "TRAIT_WITH_UNDERSCORES,TRAIT-WITH-DASHES,TRAIT.WITH.DOTS", HypervisorType: "QEMU"}, - &compute.HostCapabilities{ComputeHost: "host2", Traits: "VERY_LONG_TRAIT_NAME_WITH_MANY_CHARACTERS_AND_NUMBERS_123", HypervisorType: "QEMU"}, - &compute.HostCapabilities{ComputeHost: "host3", Traits: "SHORT,A,B,C", HypervisorType: "QEMU"}, - }) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - - tests := []struct { - name string - extraSpecs map[string]string - expectedHosts []string - filteredHosts []string - }{ - { - name: "Trait with underscores", - extraSpecs: map[string]string{ - "trait:TRAIT_WITH_UNDERSCORES": "required", - }, expectedHosts: []string{"host1"}, - filteredHosts: []string{"host2", "host3"}, - }, - { - name: "Trait with dashes", - extraSpecs: map[string]string{ - "trait:TRAIT-WITH-DASHES": "required", - }, - expectedHosts: []string{"host1"}, - filteredHosts: []string{"host2", "host3"}, - }, - { - name: "Trait with dots", - extraSpecs: map[string]string{ - "trait:TRAIT.WITH.DOTS": "required", - }, - expectedHosts: []string{"host1"}, - filteredHosts: []string{"host2", "host3"}, - }, - { - name: "Very long trait name", - extraSpecs: map[string]string{ - "trait:VERY_LONG_TRAIT_NAME_WITH_MANY_CHARACTERS_AND_NUMBERS_123": "required", - }, - expectedHosts: []string{"host2"}, - filteredHosts: []string{"host1", "host3"}, - }, - { - name: "Short trait names", - extraSpecs: map[string]string{ - "trait:A": "required", - "trait:B": "required", - }, - expectedHosts: []string{"host2", "host3"}, // host2's long trait contains both "A" and "B", host3 has both traits - filteredHosts: []string{"host1"}, // host1 doesn't have "A" or "B" in its traits + filteredHosts: []string{"host2", "host3", "host4", "host5", "host6"}, }, { - name: "Case sensitivity test", - extraSpecs: map[string]string{ - "trait:short": "required", // lowercase, should not match "SHORT" - }, - expectedHosts: []string{}, - filteredHosts: []string{"host1", "host2", "host3"}, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - request := api.ExternalSchedulerRequest{ + name: "All hosts match required and forbidden traits", + request: api.ExternalSchedulerRequest{ Spec: api.NovaObject[api.NovaSpec]{ Data: api.NovaSpec{ Flavor: api.NovaObject[api.NovaFlavor]{ Data: api.NovaFlavor{ - ExtraSpecs: tt.extraSpecs, + ExtraSpecs: map[string]string{ + "trait:CUSTOM_TRAIT_A": "required", + "trait:CUSTOM_TRAIT_E": "forbidden", + }, }, }, }, @@ -576,21 +446,22 @@ func TestFilterHasRequestedTraits_TraitParsing(t *testing.T) { Hosts: []api.ExternalSchedulerHost{ {ComputeHost: "host1"}, {ComputeHost: "host2"}, - {ComputeHost: "host3"}, }, - } + }, + expectedHosts: []string{"host1", "host2"}, + filteredHosts: []string{}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { step := &FilterHasRequestedTraits{} step.Client = fake.NewClientBuilder(). WithScheme(scheme). - WithRuntimeObjects( - &v1alpha1.Knowledge{ - ObjectMeta: metav1.ObjectMeta{Name: "host-capabilities"}, - Status: v1alpha1.KnowledgeStatus{Raw: hostCapabilitiesEdgeCases}, - }, - ). + WithObjects(hvs...). Build() - result, err := step.Run(slog.Default(), request) + + result, err := step.Run(slog.Default(), tt.request) if err != nil { t.Fatalf("expected no error, got %v", err) } diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_maintenance.go b/internal/scheduling/decisions/nova/plugins/filters/filter_maintenance.go new file mode 100644 index 00000000..15dc4eaf --- /dev/null +++ b/internal/scheduling/decisions/nova/plugins/filters/filter_maintenance.go @@ -0,0 +1,65 @@ +// Copyright SAP SE +// SPDX-License-Identifier: Apache-2.0 + +package filters + +import ( + "context" + "log/slog" + + api "github.com/cobaltcore-dev/cortex/api/delegation/nova" + "github.com/cobaltcore-dev/cortex/internal/scheduling/lib" + hv1 "github.com/cobaltcore-dev/openstack-hypervisor-operator/api/v1" +) + +type FilterMaintenanceStep struct { + lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts] +} + +// Check that the maintenance spec of the hypervisor doesn't prevent scheduling. +func (s *FilterMaintenanceStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.StepResult, error) { + result := s.PrepareResult(request) + + hvs := &hv1.HypervisorList{} + if err := s.Client.List(context.Background(), hvs); err != nil { + traceLog.Error("failed to list hypervisors", "error", err) + return nil, err + } + + flagsPreventingScheduling := map[string]bool{ + hv1.MaintenanceUnset: false, + hv1.MaintenanceManual: true, + hv1.MaintenanceAuto: false, + hv1.MaintenanceHA: false, + hv1.MaintenanceTermination: true, + } + + var hostsReady = make(map[string]struct{}) + for _, hv := range hvs.Items { + preventScheduling, ok := flagsPreventingScheduling[hv.Spec.Maintenance] + if !ok { + traceLog.Info( + "hypervisor has unknown maintenance flag, filtering host", + "host", hv.Name, "maintenance", hv.Spec.Maintenance, + ) + continue + } + if preventScheduling { + traceLog.Info( + "hypervisor maintenance flag prevents scheduling, filtering host", + "host", hv.Name, "maintenance", hv.Spec.Maintenance, + ) + continue + } + hostsReady[hv.Name] = struct{}{} + } + + traceLog.Info("hosts passing maintenance filter", "hosts", hostsReady) + for host := range result.Activations { + if _, ok := hostsReady[host]; ok { + continue + } + delete(result.Activations, host) + } + return result, nil +} diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_maintenance_test.go b/internal/scheduling/decisions/nova/plugins/filters/filter_maintenance_test.go new file mode 100644 index 00000000..c7f144f4 --- /dev/null +++ b/internal/scheduling/decisions/nova/plugins/filters/filter_maintenance_test.go @@ -0,0 +1,213 @@ +// Copyright SAP SE +// SPDX-License-Identifier: Apache-2.0 + +package filters + +import ( + "log/slog" + "testing" + + api "github.com/cobaltcore-dev/cortex/api/delegation/nova" + hv1 "github.com/cobaltcore-dev/openstack-hypervisor-operator/api/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func TestFilterMaintenanceStep_Run(t *testing.T) { + scheme, err := hv1.SchemeBuilder.Build() + if err != nil { + t.Fatalf("expected no error, got %v", err) + } + + hvs := []client.Object{ + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{ + Name: "host1", + }, + Spec: hv1.HypervisorSpec{ + Maintenance: hv1.MaintenanceUnset, + }, + }, + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{ + Name: "host2", + }, + Spec: hv1.HypervisorSpec{ + Maintenance: hv1.MaintenanceAuto, + }, + }, + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{ + Name: "host3", + }, + Spec: hv1.HypervisorSpec{ + Maintenance: hv1.MaintenanceManual, + }, + }, + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{ + Name: "host4", + }, + Spec: hv1.HypervisorSpec{ + Maintenance: hv1.MaintenanceHA, + }, + }, + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{ + Name: "host5", + }, + Spec: hv1.HypervisorSpec{ + Maintenance: hv1.MaintenanceTermination, + }, + }, + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{ + Name: "host6", + }, + Spec: hv1.HypervisorSpec{ + Maintenance: "unknown-flag", + }, + }, + } + + tests := []struct { + name string + request api.ExternalSchedulerRequest + expectedHosts []string + filteredHosts []string + }{ + { + name: "Filter hosts with maintenance preventing scheduling", + request: api.ExternalSchedulerRequest{ + Hosts: []api.ExternalSchedulerHost{ + {ComputeHost: "host1"}, + {ComputeHost: "host2"}, + {ComputeHost: "host3"}, + {ComputeHost: "host4"}, + {ComputeHost: "host5"}, + }, + }, + expectedHosts: []string{"host1", "host2", "host4"}, + filteredHosts: []string{"host3", "host5"}, + }, + { + name: "Only unset maintenance hosts", + request: api.ExternalSchedulerRequest{ + Hosts: []api.ExternalSchedulerHost{ + {ComputeHost: "host1"}, + }, + }, + expectedHosts: []string{"host1"}, + filteredHosts: []string{}, + }, + { + name: "Only manual maintenance hosts", + request: api.ExternalSchedulerRequest{ + Hosts: []api.ExternalSchedulerHost{ + {ComputeHost: "host3"}, + }, + }, + expectedHosts: []string{}, + filteredHosts: []string{"host3"}, + }, + { + name: "Only termination maintenance hosts", + request: api.ExternalSchedulerRequest{ + Hosts: []api.ExternalSchedulerHost{ + {ComputeHost: "host5"}, + }, + }, + expectedHosts: []string{}, + filteredHosts: []string{"host5"}, + }, + { + name: "Auto and HA maintenance hosts should pass", + request: api.ExternalSchedulerRequest{ + Hosts: []api.ExternalSchedulerHost{ + {ComputeHost: "host2"}, + {ComputeHost: "host4"}, + }, + }, + expectedHosts: []string{"host2", "host4"}, + filteredHosts: []string{}, + }, + { + name: "Unknown maintenance flag should be filtered", + request: api.ExternalSchedulerRequest{ + Hosts: []api.ExternalSchedulerHost{ + {ComputeHost: "host6"}, + }, + }, + expectedHosts: []string{}, + filteredHosts: []string{"host6"}, + }, + { + name: "Empty host list", + request: api.ExternalSchedulerRequest{ + Hosts: []api.ExternalSchedulerHost{}, + }, + expectedHosts: []string{}, + filteredHosts: []string{}, + }, + { + name: "Host not in database", + request: api.ExternalSchedulerRequest{ + Hosts: []api.ExternalSchedulerHost{ + {ComputeHost: "host1"}, + {ComputeHost: "host-unknown"}, + }, + }, + expectedHosts: []string{"host1"}, + filteredHosts: []string{"host-unknown"}, + }, + { + name: "Mixed maintenance states", + request: api.ExternalSchedulerRequest{ + Hosts: []api.ExternalSchedulerHost{ + {ComputeHost: "host1"}, + {ComputeHost: "host2"}, + {ComputeHost: "host3"}, + {ComputeHost: "host4"}, + {ComputeHost: "host5"}, + {ComputeHost: "host6"}, + }, + }, + expectedHosts: []string{"host1", "host2", "host4"}, + filteredHosts: []string{"host3", "host5", "host6"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + step := &FilterMaintenanceStep{} + step.Client = fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(hvs...). + Build() + result, err := step.Run(slog.Default(), tt.request) + if err != nil { + t.Fatalf("expected no error, got %v", err) + } + + // Check expected hosts are present + for _, host := range tt.expectedHosts { + if _, ok := result.Activations[host]; !ok { + t.Errorf("expected host %s to be present in activations", host) + } + } + + // Check filtered hosts are not present + for _, host := range tt.filteredHosts { + if _, ok := result.Activations[host]; ok { + t.Errorf("expected host %s to be filtered out", host) + } + } + + // Check total count + if len(result.Activations) != len(tt.expectedHosts) { + t.Errorf("expected %d hosts, got %d", len(tt.expectedHosts), len(result.Activations)) + } + }) + } +} diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_packed_virtqueue.go b/internal/scheduling/decisions/nova/plugins/filters/filter_packed_virtqueue.go index c8efc389..836ffd05 100644 --- a/internal/scheduling/decisions/nova/plugins/filters/filter_packed_virtqueue.go +++ b/internal/scheduling/decisions/nova/plugins/filters/filter_packed_virtqueue.go @@ -4,13 +4,13 @@ package filters import ( + "context" "log/slog" - "strings" + "slices" api "github.com/cobaltcore-dev/cortex/api/delegation/nova" - "github.com/cobaltcore-dev/cortex/internal/knowledge/datasources/openstack/nova" - "github.com/cobaltcore-dev/cortex/internal/knowledge/datasources/openstack/placement" "github.com/cobaltcore-dev/cortex/internal/scheduling/lib" + hv1 "github.com/cobaltcore-dev/openstack-hypervisor-operator/api/v1" ) type FilterPackedVirtqueueStep struct { @@ -26,24 +26,29 @@ func (s *FilterPackedVirtqueueStep) Run(traceLog *slog.Logger, request api.Exter if !reqInSpecs && !reqInProps { return result, nil // No packed virtqueue requested, nothing to filter. } - var computeHostsWithPackedVirtqueues []string - if _, err := s.DB.SelectTimed("scheduler-nova", &computeHostsWithPackedVirtqueues, ` - SELECT h.service_host - FROM `+placement.Trait{}.TableName()+` rpt - JOIN `+nova.Hypervisor{}.TableName()+` h - ON h.id = rpt.resource_provider_uuid - WHERE name = 'COMPUTE_NET_VIRTIO_PACKED'`, - map[string]any{"az": request.Spec.Data.AvailabilityZone}, - ); err != nil { + + hvs := &hv1.HypervisorList{} + if err := s.Client.List(context.Background(), hvs); err != nil { + traceLog.Error("failed to list hypervisors", "error", err) return nil, err } - lookupStr := strings.Join(computeHostsWithPackedVirtqueues, ",") + hvsWithTrait := make(map[string]struct{}) + for _, hv := range hvs.Items { + traits := hv.Status.Traits + traits = append(traits, hv.Spec.CustomTraits...) + if !slices.Contains(traits, "COMPUTE_NET_VIRTIO_PACKED") { + continue + } + hvsWithTrait[hv.Name] = struct{}{} + } + + traceLog.Info("hosts with packed virtqueues", "hosts", hvsWithTrait) for host := range result.Activations { - if strings.Contains(lookupStr, host) { + if _, ok := hvsWithTrait[host]; ok { continue } delete(result.Activations, host) - traceLog.Debug("filtering host which has no packed virtqueues", "host", host) + traceLog.Info("filtering host without packed virtqueues", "host", host) } return result, nil } diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_packed_virtqueue_test.go b/internal/scheduling/decisions/nova/plugins/filters/filter_packed_virtqueue_test.go index 8441d0f5..30e77a26 100644 --- a/internal/scheduling/decisions/nova/plugins/filters/filter_packed_virtqueue_test.go +++ b/internal/scheduling/decisions/nova/plugins/filters/filter_packed_virtqueue_test.go @@ -8,46 +8,51 @@ import ( "testing" api "github.com/cobaltcore-dev/cortex/api/delegation/nova" - "github.com/cobaltcore-dev/cortex/internal/knowledge/datasources/openstack/nova" - "github.com/cobaltcore-dev/cortex/internal/knowledge/datasources/openstack/placement" - "github.com/cobaltcore-dev/cortex/pkg/db" - - testlibDB "github.com/cobaltcore-dev/cortex/pkg/db/testing" + hv1 "github.com/cobaltcore-dev/openstack-hypervisor-operator/api/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" ) func TestFilterPackedVirtqueueStep_Run(t *testing.T) { - dbEnv := testlibDB.SetupDBEnv(t) - testDB := db.DB{DbMap: dbEnv.DbMap} - defer dbEnv.Close() - // Create dependency tables - err := testDB.CreateTable( - testDB.AddTable(nova.Hypervisor{}), - testDB.AddTable(placement.Trait{}), - ) + scheme, err := hv1.SchemeBuilder.Build() if err != nil { t.Fatalf("expected no error, got %v", err) } - // Insert mock hypervisor data - hypervisors := []any{ - &nova.Hypervisor{ID: "hv1", Hostname: "hypervisor1", State: "up", Status: "enabled", HypervisorType: "QEMU", HypervisorVersion: 2008000, HostIP: "192.168.1.1", ServiceID: "svc1", ServiceHost: "host1", VCPUs: 16, MemoryMB: 32768, LocalGB: 1000, VCPUsUsed: 4, MemoryMBUsed: 8192, LocalGBUsed: 100, FreeRAMMB: 24576, FreeDiskGB: 900, CurrentWorkload: 0, RunningVMs: 2, DiskAvailableLeast: &[]int{900}[0], CPUInfo: "{}"}, - &nova.Hypervisor{ID: "hv2", Hostname: "hypervisor2", State: "up", Status: "enabled", HypervisorType: "QEMU", HypervisorVersion: 2008000, HostIP: "192.168.1.2", ServiceID: "svc2", ServiceHost: "host2", VCPUs: 16, MemoryMB: 32768, LocalGB: 1000, VCPUsUsed: 4, MemoryMBUsed: 8192, LocalGBUsed: 100, FreeRAMMB: 24576, FreeDiskGB: 900, CurrentWorkload: 0, RunningVMs: 2, DiskAvailableLeast: &[]int{900}[0], CPUInfo: "{}"}, - &nova.Hypervisor{ID: "hv3", Hostname: "hypervisor3", State: "up", Status: "enabled", HypervisorType: "QEMU", HypervisorVersion: 2008000, HostIP: "192.168.1.3", ServiceID: "svc3", ServiceHost: "host3", VCPUs: 16, MemoryMB: 32768, LocalGB: 1000, VCPUsUsed: 4, MemoryMBUsed: 8192, LocalGBUsed: 100, FreeRAMMB: 24576, FreeDiskGB: 900, CurrentWorkload: 0, RunningVMs: 2, DiskAvailableLeast: &[]int{900}[0], CPUInfo: "{}"}, - &nova.Hypervisor{ID: "hv4", Hostname: "hypervisor4", State: "up", Status: "enabled", HypervisorType: "QEMU", HypervisorVersion: 2008000, HostIP: "192.168.1.4", ServiceID: "svc4", ServiceHost: "host4", VCPUs: 16, MemoryMB: 32768, LocalGB: 1000, VCPUsUsed: 4, MemoryMBUsed: 8192, LocalGBUsed: 100, FreeRAMMB: 24576, FreeDiskGB: 900, CurrentWorkload: 0, RunningVMs: 2, DiskAvailableLeast: &[]int{900}[0], CPUInfo: "{}"}, - } - if err := testDB.Insert(hypervisors...); err != nil { - t.Fatalf("expected no error, got %v", err) - } - - // Insert mock trait data - host1 and host3 support packed virtqueues - traits := []any{ - &placement.Trait{ResourceProviderUUID: "hv1", Name: "COMPUTE_NET_VIRTIO_PACKED", ResourceProviderGeneration: 1}, - &placement.Trait{ResourceProviderUUID: "hv2", Name: "COMPUTE_STATUS_ENABLED", ResourceProviderGeneration: 1}, - &placement.Trait{ResourceProviderUUID: "hv3", Name: "COMPUTE_NET_VIRTIO_PACKED", ResourceProviderGeneration: 1}, - &placement.Trait{ResourceProviderUUID: "hv4", Name: "COMPUTE_STATUS_ENABLED", ResourceProviderGeneration: 1}, - } - if err := testDB.Insert(traits...); err != nil { - t.Fatalf("expected no error, got %v", err) + hvs := []client.Object{ + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{ + Name: "host1", + }, + Status: hv1.HypervisorStatus{ + Traits: []string{"COMPUTE_NET_VIRTIO_PACKED"}, + }, + }, + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{ + Name: "host2", + }, + Status: hv1.HypervisorStatus{ + Traits: []string{"COMPUTE_NET_VIRTIO_PACKED", "SOME_OTHER_TRAIT"}, + }, + }, + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{ + Name: "host3", + }, + Status: hv1.HypervisorStatus{ + Traits: []string{"SOME_OTHER_TRAIT"}, + }, + }, + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{ + Name: "host4", + }, + Status: hv1.HypervisorStatus{ + Traits: []string{}, + }, + }, } tests := []struct { @@ -57,23 +62,19 @@ func TestFilterPackedVirtqueueStep_Run(t *testing.T) { filteredHosts []string }{ { - name: "No packed virtqueue requested - no filtering", + name: "No packed virtqueue requested - all hosts pass", request: api.ExternalSchedulerRequest{ Spec: api.NovaObject[api.NovaSpec]{ Data: api.NovaSpec{ Flavor: api.NovaObject[api.NovaFlavor]{ Data: api.NovaFlavor{ - ExtraSpecs: map[string]string{ - "hw:cpu_policy": "dedicated", - }, + ExtraSpecs: map[string]string{}, }, }, Image: api.NovaObject[api.NovaImageMeta]{ Data: api.NovaImageMeta{ Properties: api.NovaObject[map[string]any]{ - Data: map[string]any{ - "hw_disk_bus": "virtio", - }, + Data: map[string]any{}, }, }, }, @@ -90,7 +91,7 @@ func TestFilterPackedVirtqueueStep_Run(t *testing.T) { filteredHosts: []string{}, }, { - name: "Packed virtqueue requested in flavor - filter hosts without support", + name: "Packed virtqueue requested in flavor extra specs", request: api.ExternalSchedulerRequest{ Spec: api.NovaObject[api.NovaSpec]{ Data: api.NovaSpec{ @@ -117,11 +118,11 @@ func TestFilterPackedVirtqueueStep_Run(t *testing.T) { {ComputeHost: "host4"}, }, }, - expectedHosts: []string{"host1", "host3"}, // Only hosts with COMPUTE_NET_VIRTIO_PACKED trait - filteredHosts: []string{"host2", "host4"}, // Hosts without packed virtqueue support are filtered out + expectedHosts: []string{"host1", "host2"}, + filteredHosts: []string{"host3", "host4"}, }, { - name: "Packed virtqueue requested in image properties - filter hosts without support", + name: "Packed virtqueue requested in image properties", request: api.ExternalSchedulerRequest{ Spec: api.NovaObject[api.NovaSpec]{ Data: api.NovaSpec{ @@ -148,11 +149,11 @@ func TestFilterPackedVirtqueueStep_Run(t *testing.T) { {ComputeHost: "host4"}, }, }, - expectedHosts: []string{"host1", "host3"}, - filteredHosts: []string{"host2", "host4"}, + expectedHosts: []string{"host1", "host2"}, + filteredHosts: []string{"host3", "host4"}, }, { - name: "Packed virtqueue requested in both flavor and image - filter hosts without support", + name: "Packed virtqueue requested in both flavor and image", request: api.ExternalSchedulerRequest{ Spec: api.NovaObject[api.NovaSpec]{ Data: api.NovaSpec{ @@ -176,16 +177,14 @@ func TestFilterPackedVirtqueueStep_Run(t *testing.T) { }, Hosts: []api.ExternalSchedulerHost{ {ComputeHost: "host1"}, - {ComputeHost: "host2"}, {ComputeHost: "host3"}, - {ComputeHost: "host4"}, }, }, - expectedHosts: []string{"host1", "host3"}, - filteredHosts: []string{"host2", "host4"}, + expectedHosts: []string{"host1"}, + filteredHosts: []string{"host3"}, }, { - name: "Packed virtqueue set to false - no filtering", + name: "Packed virtqueue with false value in flavor - still triggers filter", request: api.ExternalSchedulerRequest{ Spec: api.NovaObject[api.NovaSpec]{ Data: api.NovaSpec{ @@ -209,14 +208,42 @@ func TestFilterPackedVirtqueueStep_Run(t *testing.T) { {ComputeHost: "host1"}, {ComputeHost: "host2"}, {ComputeHost: "host3"}, + }, + }, + expectedHosts: []string{"host1", "host2"}, + filteredHosts: []string{"host3"}, + }, + { + name: "Packed virtqueue with empty value in image - still triggers filter", + request: api.ExternalSchedulerRequest{ + Spec: api.NovaObject[api.NovaSpec]{ + Data: api.NovaSpec{ + Flavor: api.NovaObject[api.NovaFlavor]{ + Data: api.NovaFlavor{ + ExtraSpecs: map[string]string{}, + }, + }, + Image: api.NovaObject[api.NovaImageMeta]{ + Data: api.NovaImageMeta{ + Properties: api.NovaObject[map[string]any]{ + Data: map[string]any{ + "hw_virtio_packed_ring": "", + }, + }, + }, + }, + }, + }, + Hosts: []api.ExternalSchedulerHost{ + {ComputeHost: "host1"}, {ComputeHost: "host4"}, }, }, - expectedHosts: []string{"host1", "host3"}, // Still filters because the key exists - filteredHosts: []string{"host2", "host4"}, + expectedHosts: []string{"host1"}, + filteredHosts: []string{"host4"}, }, { - name: "All hosts without packed virtqueue support", + name: "No hosts with trait - all filtered", request: api.ExternalSchedulerRequest{ Spec: api.NovaObject[api.NovaSpec]{ Data: api.NovaSpec{ @@ -237,15 +264,15 @@ func TestFilterPackedVirtqueueStep_Run(t *testing.T) { }, }, Hosts: []api.ExternalSchedulerHost{ - {ComputeHost: "host2"}, + {ComputeHost: "host3"}, {ComputeHost: "host4"}, }, }, expectedHosts: []string{}, - filteredHosts: []string{"host2", "host4"}, + filteredHosts: []string{"host3", "host4"}, }, { - name: "All hosts with packed virtqueue support", + name: "All hosts have trait", request: api.ExternalSchedulerRequest{ Spec: api.NovaObject[api.NovaSpec]{ Data: api.NovaSpec{ @@ -267,14 +294,64 @@ func TestFilterPackedVirtqueueStep_Run(t *testing.T) { }, Hosts: []api.ExternalSchedulerHost{ {ComputeHost: "host1"}, - {ComputeHost: "host3"}, + {ComputeHost: "host2"}, }, }, - expectedHosts: []string{"host1", "host3"}, + expectedHosts: []string{"host1", "host2"}, filteredHosts: []string{}, }, { - name: "Host not in database", + name: "Empty host list with packed virtqueue requested", + request: api.ExternalSchedulerRequest{ + Spec: api.NovaObject[api.NovaSpec]{ + Data: api.NovaSpec{ + Flavor: api.NovaObject[api.NovaFlavor]{ + Data: api.NovaFlavor{ + ExtraSpecs: map[string]string{ + "hw:virtio_packed_ring": "true", + }, + }, + }, + Image: api.NovaObject[api.NovaImageMeta]{ + Data: api.NovaImageMeta{ + Properties: api.NovaObject[map[string]any]{ + Data: map[string]any{}, + }, + }, + }, + }, + }, + Hosts: []api.ExternalSchedulerHost{}, + }, + expectedHosts: []string{}, + filteredHosts: []string{}, + }, + { + name: "Empty host list without packed virtqueue requested", + request: api.ExternalSchedulerRequest{ + Spec: api.NovaObject[api.NovaSpec]{ + Data: api.NovaSpec{ + Flavor: api.NovaObject[api.NovaFlavor]{ + Data: api.NovaFlavor{ + ExtraSpecs: map[string]string{}, + }, + }, + Image: api.NovaObject[api.NovaImageMeta]{ + Data: api.NovaImageMeta{ + Properties: api.NovaObject[map[string]any]{ + Data: map[string]any{}, + }, + }, + }, + }, + }, + Hosts: []api.ExternalSchedulerHost{}, + }, + expectedHosts: []string{}, + filteredHosts: []string{}, + }, + { + name: "Host not in database with packed virtqueue requested", request: api.ExternalSchedulerRequest{ Spec: api.NovaObject[api.NovaSpec]{ Data: api.NovaSpec{ @@ -302,12 +379,109 @@ func TestFilterPackedVirtqueueStep_Run(t *testing.T) { expectedHosts: []string{"host1"}, filteredHosts: []string{"host-unknown"}, }, + { + name: "Packed virtqueue with additional extra specs", + request: api.ExternalSchedulerRequest{ + Spec: api.NovaObject[api.NovaSpec]{ + Data: api.NovaSpec{ + Flavor: api.NovaObject[api.NovaFlavor]{ + Data: api.NovaFlavor{ + ExtraSpecs: map[string]string{ + "hw:virtio_packed_ring": "true", + "hw:cpu_policy": "dedicated", + "hw:mem_page_size": "large", + }, + }, + }, + Image: api.NovaObject[api.NovaImageMeta]{ + Data: api.NovaImageMeta{ + Properties: api.NovaObject[map[string]any]{ + Data: map[string]any{}, + }, + }, + }, + }, + }, + Hosts: []api.ExternalSchedulerHost{ + {ComputeHost: "host1"}, + {ComputeHost: "host3"}, + }, + }, + expectedHosts: []string{"host1"}, + filteredHosts: []string{"host3"}, + }, + { + name: "Mixed hosts with and without trait", + request: api.ExternalSchedulerRequest{ + Spec: api.NovaObject[api.NovaSpec]{ + Data: api.NovaSpec{ + Flavor: api.NovaObject[api.NovaFlavor]{ + Data: api.NovaFlavor{ + ExtraSpecs: map[string]string{ + "hw:virtio_packed_ring": "true", + }, + }, + }, + Image: api.NovaObject[api.NovaImageMeta]{ + Data: api.NovaImageMeta{ + Properties: api.NovaObject[map[string]any]{ + Data: map[string]any{}, + }, + }, + }, + }, + }, + Hosts: []api.ExternalSchedulerHost{ + {ComputeHost: "host1"}, + {ComputeHost: "host2"}, + {ComputeHost: "host3"}, + {ComputeHost: "host4"}, + }, + }, + expectedHosts: []string{"host1", "host2"}, + filteredHosts: []string{"host3", "host4"}, + }, + { + name: "Image property with additional properties", + request: api.ExternalSchedulerRequest{ + Spec: api.NovaObject[api.NovaSpec]{ + Data: api.NovaSpec{ + Flavor: api.NovaObject[api.NovaFlavor]{ + Data: api.NovaFlavor{ + ExtraSpecs: map[string]string{}, + }, + }, + Image: api.NovaObject[api.NovaImageMeta]{ + Data: api.NovaImageMeta{ + Properties: api.NovaObject[map[string]any]{ + Data: map[string]any{ + "hw_virtio_packed_ring": "true", + "hw_disk_bus": "virtio", + "hw_vif_model": "virtio", + }, + }, + }, + }, + }, + }, + Hosts: []api.ExternalSchedulerHost{ + {ComputeHost: "host2"}, + {ComputeHost: "host4"}, + }, + }, + expectedHosts: []string{"host2"}, + filteredHosts: []string{"host4"}, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { step := &FilterPackedVirtqueueStep{} - step.DB = &testDB + step.Client = fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(hvs...). + Build() + result, err := step.Run(slog.Default(), tt.request) if err != nil { t.Fatalf("expected no error, got %v", err) diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_project_aggregates.go b/internal/scheduling/decisions/nova/plugins/filters/filter_project_aggregates.go deleted file mode 100644 index 51781418..00000000 --- a/internal/scheduling/decisions/nova/plugins/filters/filter_project_aggregates.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright SAP SE -// SPDX-License-Identifier: Apache-2.0 - -package filters - -import ( - "context" - "log/slog" - "strings" - - api "github.com/cobaltcore-dev/cortex/api/delegation/nova" - "github.com/cobaltcore-dev/cortex/api/v1alpha1" - "github.com/cobaltcore-dev/cortex/internal/knowledge/extractor/plugins/compute" - "github.com/cobaltcore-dev/cortex/internal/scheduling/lib" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -type FilterProjectAggregatesStep struct { - lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts] -} - -// Lock certain hosts for certain projects, based on the aggregate metadata. -// Note that hosts without aggregate tenant filter are still accessible. -func (s *FilterProjectAggregatesStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.StepResult, error) { - result := s.PrepareResult(request) - if request.Spec.Data.ProjectID == "" { - traceLog.Debug("no project ID in request, skipping filter") - return result, nil - } - knowledge := &v1alpha1.Knowledge{} - if err := s.Client.Get( - context.Background(), - client.ObjectKey{Name: "host-pinned-projects"}, - knowledge, - ); err != nil { - return nil, err - } - hostPinnedProjects, err := v1alpha1. - UnboxFeatureList[compute.HostPinnedProjects](knowledge.Status.Raw) - if err != nil { - return nil, err - } - var computeHostsMatchingProject []string - for _, hostProj := range hostPinnedProjects { - if hostProj.ComputeHost == nil { - traceLog.Warn("host pinned projects knowledge has nil compute host", "entry", hostProj) - continue - } - if hostProj.ProjectID == nil { - // Host is available for all projects. - computeHostsMatchingProject = append(computeHostsMatchingProject, *hostProj.ComputeHost) - continue - } - if *hostProj.ProjectID == request.Spec.Data.ProjectID { - computeHostsMatchingProject = append(computeHostsMatchingProject, *hostProj.ComputeHost) - } - } - lookupStr := strings.Join(computeHostsMatchingProject, ",") - for host := range result.Activations { - if strings.Contains(lookupStr, host) { - continue - } - delete(result.Activations, host) - traceLog.Debug("filtering host not matching project aggregates", "host", host) - } - return result, nil -} diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_status_conditions.go b/internal/scheduling/decisions/nova/plugins/filters/filter_status_conditions.go new file mode 100644 index 00000000..0ea1f037 --- /dev/null +++ b/internal/scheduling/decisions/nova/plugins/filters/filter_status_conditions.go @@ -0,0 +1,82 @@ +// Copyright SAP SE +// SPDX-License-Identifier: Apache-2.0 + +package filters + +import ( + "context" + "log/slog" + + api "github.com/cobaltcore-dev/cortex/api/delegation/nova" + "github.com/cobaltcore-dev/cortex/internal/scheduling/lib" + hv1 "github.com/cobaltcore-dev/openstack-hypervisor-operator/api/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type FilterStatusConditionsStep struct { + lib.BaseStep[api.ExternalSchedulerRequest, lib.EmptyStepOpts] +} + +// Check that all status conditions meet the expected values, for example, +// that the hypervisor is ready and not disabled. +func (s *FilterStatusConditionsStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.StepResult, error) { + result := s.PrepareResult(request) + + hvs := &hv1.HypervisorList{} + if err := s.Client.List(context.Background(), hvs); err != nil { + traceLog.Error("failed to list hypervisors", "error", err) + return nil, err + } + + expected := map[string]metav1.ConditionStatus{ + hv1.ConditionTypeOnboarding: "", // Don't care + hv1.ConditionTypeReady: metav1.ConditionTrue, + hv1.ConditionTypeTerminating: metav1.ConditionFalse, + hv1.ConditionTypeTainted: metav1.ConditionFalse, + hv1.ConditionTypeTraitsUpdated: "", // Don't care + hv1.ConditionTypeAggregatesUpdated: "", // Don't care + } + + var hostsReady = make(map[string]struct{}) + for _, hv := range hvs.Items { + allMet := true + for conditionType, expectedStatus := range expected { + cd := meta.FindStatusCondition(hv.Status.Conditions, conditionType) + if cd == nil { + traceLog.Info( + "hypervisor missing condition, keeping", + "host", hv.Name, "condition", conditionType, + ) + // TODO: Decide if we want to filter hosts missing conditions + // or not. For now we keep them. + continue + } + if expectedStatus == "" { + continue // Don't care about this condition + } + if cd.Status != expectedStatus { + traceLog.Info( + "hypervisor condition not met, filtering host", + "host", hv.Name, + "condition", conditionType, + "status", cd.Status, + ) + allMet = false + break + } + } + if allMet { + hostsReady[hv.Name] = struct{}{} + } + } + + traceLog.Info("hosts passing status conditions filter", "hosts", hostsReady) + for host := range result.Activations { + if _, ok := hostsReady[host]; ok { + continue + } + delete(result.Activations, host) + } + return result, nil +} diff --git a/internal/scheduling/decisions/nova/plugins/filters/filter_status_conditions_test.go b/internal/scheduling/decisions/nova/plugins/filters/filter_status_conditions_test.go new file mode 100644 index 00000000..9a51b7fa --- /dev/null +++ b/internal/scheduling/decisions/nova/plugins/filters/filter_status_conditions_test.go @@ -0,0 +1,314 @@ +// Copyright SAP SE +// SPDX-License-Identifier: Apache-2.0 + +package filters + +import ( + "log/slog" + "testing" + + api "github.com/cobaltcore-dev/cortex/api/delegation/nova" + hv1 "github.com/cobaltcore-dev/openstack-hypervisor-operator/api/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func TestFilterStatusConditionsStep_Run(t *testing.T) { + scheme, err := hv1.SchemeBuilder.Build() + if err != nil { + t.Fatalf("expected no error, got %v", err) + } + + hvs := []client.Object{ + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{ + Name: "host1", + }, + Status: hv1.HypervisorStatus{ + Conditions: []v1.Condition{ + { + Type: hv1.ConditionTypeReady, + Status: v1.ConditionTrue, + }, + { + Type: hv1.ConditionTypeTerminating, + Status: v1.ConditionFalse, + }, + { + Type: hv1.ConditionTypeTainted, + Status: v1.ConditionFalse, + }, + }, + }, + }, + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{ + Name: "host2", + }, + Status: hv1.HypervisorStatus{ + Conditions: []v1.Condition{ + { + Type: hv1.ConditionTypeReady, + Status: v1.ConditionFalse, + }, + { + Type: hv1.ConditionTypeTerminating, + Status: v1.ConditionFalse, + }, + { + Type: hv1.ConditionTypeTainted, + Status: v1.ConditionFalse, + }, + }, + }, + }, + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{ + Name: "host3", + }, + Status: hv1.HypervisorStatus{ + Conditions: []v1.Condition{ + { + Type: hv1.ConditionTypeReady, + Status: v1.ConditionTrue, + }, + { + Type: hv1.ConditionTypeTerminating, + Status: v1.ConditionTrue, + }, + { + Type: hv1.ConditionTypeTainted, + Status: v1.ConditionFalse, + }, + }, + }, + }, + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{ + Name: "host4", + }, + Status: hv1.HypervisorStatus{ + Conditions: []v1.Condition{ + { + Type: hv1.ConditionTypeReady, + Status: v1.ConditionTrue, + }, + { + Type: hv1.ConditionTypeTerminating, + Status: v1.ConditionFalse, + }, + { + Type: hv1.ConditionTypeTainted, + Status: v1.ConditionTrue, + }, + }, + }, + }, + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{ + Name: "host5", + }, + Status: hv1.HypervisorStatus{ + Conditions: []v1.Condition{ + { + Type: hv1.ConditionTypeOnboarding, + Status: v1.ConditionTrue, + }, + { + Type: hv1.ConditionTypeReady, + Status: v1.ConditionTrue, + }, + { + Type: hv1.ConditionTypeTerminating, + Status: v1.ConditionFalse, + }, + { + Type: hv1.ConditionTypeTainted, + Status: v1.ConditionFalse, + }, + { + Type: hv1.ConditionTypeTraitsUpdated, + Status: v1.ConditionTrue, + }, + { + Type: hv1.ConditionTypeAggregatesUpdated, + Status: v1.ConditionFalse, + }, + }, + }, + }, + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{ + Name: "host6", + }, + Status: hv1.HypervisorStatus{ + Conditions: []v1.Condition{ + { + Type: hv1.ConditionTypeTerminating, + Status: v1.ConditionFalse, + }, + { + Type: hv1.ConditionTypeTainted, + Status: v1.ConditionFalse, + }, + }, + }, + }, + &hv1.Hypervisor{ + ObjectMeta: v1.ObjectMeta{ + Name: "host7", + }, + Status: hv1.HypervisorStatus{ + Conditions: []v1.Condition{}, + }, + }, + } + + tests := []struct { + name string + request api.ExternalSchedulerRequest + expectedHosts []string + filteredHosts []string + }{ + { + name: "Filter hosts with all conditions met", + request: api.ExternalSchedulerRequest{ + Hosts: []api.ExternalSchedulerHost{ + {ComputeHost: "host1"}, + {ComputeHost: "host2"}, + {ComputeHost: "host3"}, + {ComputeHost: "host4"}, + }, + }, + expectedHosts: []string{"host1"}, + filteredHosts: []string{"host2", "host3", "host4"}, + }, + { + name: "Host not ready should be filtered", + request: api.ExternalSchedulerRequest{ + Hosts: []api.ExternalSchedulerHost{ + {ComputeHost: "host2"}, + }, + }, + expectedHosts: []string{}, + filteredHosts: []string{"host2"}, + }, + { + name: "Terminating host should be filtered", + request: api.ExternalSchedulerRequest{ + Hosts: []api.ExternalSchedulerHost{ + {ComputeHost: "host3"}, + }, + }, + expectedHosts: []string{}, + filteredHosts: []string{"host3"}, + }, + { + name: "Tainted host should be filtered", + request: api.ExternalSchedulerRequest{ + Hosts: []api.ExternalSchedulerHost{ + {ComputeHost: "host4"}, + }, + }, + expectedHosts: []string{}, + filteredHosts: []string{"host4"}, + }, + { + name: "Host with optional conditions in any state should pass", + request: api.ExternalSchedulerRequest{ + Hosts: []api.ExternalSchedulerHost{ + {ComputeHost: "host5"}, + }, + }, + expectedHosts: []string{"host5"}, + filteredHosts: []string{}, + }, + { + name: "Host missing Ready condition should be kept", + request: api.ExternalSchedulerRequest{ + Hosts: []api.ExternalSchedulerHost{ + {ComputeHost: "host6"}, + }, + }, + expectedHosts: []string{"host6"}, + filteredHosts: []string{}, + }, + { + name: "Host with no conditions should be kept", + request: api.ExternalSchedulerRequest{ + Hosts: []api.ExternalSchedulerHost{ + {ComputeHost: "host7"}, + }, + }, + expectedHosts: []string{"host7"}, + filteredHosts: []string{}, + }, + { + name: "Empty host list", + request: api.ExternalSchedulerRequest{ + Hosts: []api.ExternalSchedulerHost{}, + }, + expectedHosts: []string{}, + filteredHosts: []string{}, + }, + { + name: "Host not in database", + request: api.ExternalSchedulerRequest{ + Hosts: []api.ExternalSchedulerHost{ + {ComputeHost: "host1"}, + {ComputeHost: "host-unknown"}, + }, + }, + expectedHosts: []string{"host1"}, + filteredHosts: []string{"host-unknown"}, + }, + { + name: "Mixed condition states", + request: api.ExternalSchedulerRequest{ + Hosts: []api.ExternalSchedulerHost{ + {ComputeHost: "host1"}, + {ComputeHost: "host2"}, + {ComputeHost: "host3"}, + {ComputeHost: "host4"}, + {ComputeHost: "host5"}, + }, + }, + expectedHosts: []string{"host1", "host5"}, + filteredHosts: []string{"host2", "host3", "host4"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + step := &FilterStatusConditionsStep{} + step.Client = fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(hvs...). + Build() + result, err := step.Run(slog.Default(), tt.request) + if err != nil { + t.Fatalf("expected no error, got %v", err) + } + + // Check expected hosts are present + for _, host := range tt.expectedHosts { + if _, ok := result.Activations[host]; !ok { + t.Errorf("expected host %s to be present in activations", host) + } + } + + // Check filtered hosts are not present + for _, host := range tt.filteredHosts { + if _, ok := result.Activations[host]; ok { + t.Errorf("expected host %s to be filtered out", host) + } + } + + // Check total count + if len(result.Activations) != len(tt.expectedHosts) { + t.Errorf("expected %d hosts, got %d", len(tt.expectedHosts), len(result.Activations)) + } + }) + } +} diff --git a/internal/scheduling/decisions/nova/supported_steps.go b/internal/scheduling/decisions/nova/supported_steps.go index faba3a67..7caa253a 100644 --- a/internal/scheduling/decisions/nova/supported_steps.go +++ b/internal/scheduling/decisions/nova/supported_steps.go @@ -22,11 +22,12 @@ var supportedSteps = map[string]func() NovaStep{ "vmware_general_purpose_balancing": func() NovaStep { return &weighers.VMwareGeneralPurposeBalancingStep{} }, "filter_has_accelerators": func() NovaStep { return &filters.FilterHasAcceleratorsStep{} }, "filter_correct_az": func() NovaStep { return &filters.FilterCorrectAZStep{} }, - "filter_disabled": func() NovaStep { return &filters.FilterDisabledStep{} }, + "filter_status_conditions": func() NovaStep { return &filters.FilterStatusConditionsStep{} }, + "filter_maintenance": func() NovaStep { return &filters.FilterMaintenanceStep{} }, "filter_packed_virtqueue": func() NovaStep { return &filters.FilterPackedVirtqueueStep{} }, "filter_external_customer": func() NovaStep { return &filters.FilterExternalCustomerStep{} }, - "filter_project_aggregates": func() NovaStep { return &filters.FilterProjectAggregatesStep{} }, - "filter_compute_capabilities": func() NovaStep { return &filters.FilterComputeCapabilitiesStep{} }, + "filter_allowed_projects": func() NovaStep { return &filters.FilterAllowedProjectsStep{} }, + "filter_capabilities": func() NovaStep { return &filters.FilterCapabilitiesStep{} }, "filter_has_requested_traits": func() NovaStep { return &filters.FilterHasRequestedTraits{} }, "filter_has_enough_capacity": func() NovaStep { return &filters.FilterHasEnoughCapacity{} }, "filter_host_instructions": func() NovaStep { return &filters.FilterHostInstructionsStep{} }, diff --git a/internal/scheduling/descheduling/nova/plugins/base.go b/internal/scheduling/descheduling/nova/plugins/base.go index 3e039703..1ba37d25 100644 --- a/internal/scheduling/descheduling/nova/plugins/base.go +++ b/internal/scheduling/descheduling/nova/plugins/base.go @@ -8,7 +8,6 @@ import ( "github.com/cobaltcore-dev/cortex/api/v1alpha1" "github.com/cobaltcore-dev/cortex/pkg/conf" - "github.com/cobaltcore-dev/cortex/pkg/db" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -19,8 +18,6 @@ type BaseStep[Opts any] struct { conf.JsonOpts[Opts] // The kubernetes client to use. Client client.Client - // Initialized database connection, if configured through the step spec. - DB *db.DB } // Init the step with the database and options. @@ -30,15 +27,6 @@ func (s *BaseStep[Opts]) Init(ctx context.Context, client client.Client, step v1 return err } - if step.Spec.DatabaseSecretRef != nil { - authenticatedDB, err := db.Connector{Client: client}. - FromSecretRef(ctx, *step.Spec.DatabaseSecretRef) - if err != nil { - return err - } - s.DB = authenticatedDB - } - s.Client = client return nil } diff --git a/internal/scheduling/e2e/nova/checks.go b/internal/scheduling/e2e/nova/checks.go index 34659363..641087d4 100644 --- a/internal/scheduling/e2e/nova/checks.go +++ b/internal/scheduling/e2e/nova/checks.go @@ -290,9 +290,10 @@ func randomRequest(dc datacenter, seed int) api.ExternalSchedulerRequest { panic(err) } // Check if the flavor is for vmware. - vmware := false + vmware, kvm := false, false if val, ok := extraSpecs["capabilities:hypervisor_type"]; ok { vmware = strings.EqualFold(val, "VMware vCenter Server") + kvm = strings.EqualFold(val, "qemu") || strings.EqualFold(val, "ch") } slog.Info("using flavor extra specs", "extraSpecs", extraSpecs) request := api.ExternalSchedulerRequest{ @@ -315,6 +316,10 @@ func randomRequest(dc datacenter, seed int) api.ExternalSchedulerRequest { Weights: weights, VMware: vmware, } + // Force to use the pipeline with all filters enabled for kvm flavors. + if kvm { + request.Pipeline = "nova-external-scheduler-kvm-all-filters-enabled" + } return request } diff --git a/internal/scheduling/external/nova/api.go b/internal/scheduling/external/nova/api.go index 5d5f6a29..1b728005 100644 --- a/internal/scheduling/external/nova/api.go +++ b/internal/scheduling/external/nova/api.go @@ -89,7 +89,7 @@ func (httpAPI *httpAPI) inferPipelineName(requestData api.ExternalSchedulerReque switch strings.ToLower(hvType) { case "qemu", "ch": if requestData.Reservation { - return "nova-external-scheduler-kvm-reservations", nil + return "nova-external-scheduler-kvm-all-filters-enabled", nil } else { return "nova-external-scheduler-kvm", nil } diff --git a/internal/scheduling/lib/step.go b/internal/scheduling/lib/step.go index f7ce4623..845b3306 100644 --- a/internal/scheduling/lib/step.go +++ b/internal/scheduling/lib/step.go @@ -10,7 +10,6 @@ import ( "github.com/cobaltcore-dev/cortex/api/v1alpha1" "github.com/cobaltcore-dev/cortex/pkg/conf" - "github.com/cobaltcore-dev/cortex/pkg/db" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -51,8 +50,6 @@ type BaseStep[RequestType PipelineRequest, Opts StepOpts] struct { ActivationFunction // The kubernetes client to use. Client client.Client - // Initialized database connection, if configured through the step spec. - DB *db.DB } // Init the step with the database and options. @@ -65,15 +62,6 @@ func (s *BaseStep[RequestType, Opts]) Init(ctx context.Context, client client.Cl return err } - if step.Spec.DatabaseSecretRef != nil { - authenticatedDB, err := db.Connector{Client: client}. - FromSecretRef(ctx, *step.Spec.DatabaseSecretRef) - if err != nil { - return err - } - s.DB = authenticatedDB - } - s.Client = client return nil }