-
Notifications
You must be signed in to change notification settings - Fork 70
chore: fix vulnerabilities and CI failures #1179
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 25 commits
55fa917
3bf4801
115c155
bd92d2f
53c6cc5
eac2ac4
7b47784
114fd1a
81fb459
3e59077
aa34ec4
12e2be3
b2e73a5
d374da9
46fd36e
1f9b180
4977f2b
67c84e8
7507e05
946f21c
8da8978
e929a4c
bb54194
e6549b0
b0dcdc6
6cb2b32
7739e3d
0ee866c
70ddf0d
b859a49
f352eaf
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1,40 +1,42 @@ | ||
| run: | ||
| timeout: 5m | ||
|
|
||
| linters-settings: | ||
| gocritic: | ||
| enabled-tags: | ||
| - performance | ||
| gosec: | ||
| excludes: | ||
| - G108 | ||
| lll: | ||
| line-length: 200 | ||
| version: "2" | ||
|
|
||
| misspell: | ||
| locale: US | ||
| staticcheck: | ||
| go: "1.21" | ||
| run: | ||
| go: "1.23" | ||
ashnamehrotra marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
|
|
||
| linters: | ||
| disable-all: true | ||
| default: none | ||
| enable: | ||
| - errcheck | ||
| - exportloopref | ||
| - copyloopvar # replacement for exportloopref | ||
| - forcetypeassert | ||
| - gocritic | ||
| - goconst | ||
| - godot | ||
| - gofmt | ||
| - gofumpt | ||
| - goimports | ||
| - gosec | ||
| - gosimple | ||
| - govet | ||
| - ineffassign | ||
| - misspell | ||
| - revive # replacement for golint | ||
| - staticcheck | ||
| - typecheck | ||
| # - revive # replacement for golint | ||
| - staticcheck # includes gosimple and staticcheck | ||
| - unused | ||
| - whitespace | ||
| settings: | ||
| gocritic: | ||
| enabled-tags: | ||
| - performance | ||
| gosec: | ||
| excludes: | ||
| - G108 | ||
| lll: | ||
| line-length: 200 | ||
| misspell: | ||
| locale: US | ||
| exclusions: | ||
| paths: | ||
| - "docs/build/assets/files/.*\\.go" | ||
|
|
||
| formatters: | ||
| enable: | ||
| - gofmt | ||
| - gofumpt | ||
| - goimports | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1,2 +1,3 @@ | ||
| // Package v1alpha1 contains API Schema definitions for the eraser v1alpha1 API version. | ||
| // +k8s:conversion-gen=github.com/eraser-dev/eraser/api/unversioned | ||
| package v1alpha1 |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1,2 +1,3 @@ | ||
| // Package v1alpha2 contains API Schema definitions for the eraser v1alpha2 API version. | ||
| // +k8s:conversion-gen=github.com/eraser-dev/eraser/api/unversioned | ||
| package v1alpha2 |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1,3 +1,4 @@ | ||
| // Package controllers implements Kubernetes controllers for eraser resources. | ||
| package controllers | ||
|
|
||
| import ( | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -23,11 +23,10 @@ import ( | |
|
|
||
| "golang.org/x/exp/slices" | ||
| corev1 "k8s.io/api/core/v1" | ||
| "k8s.io/apimachinery/pkg/api/resource" | ||
| metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||
| "k8s.io/apimachinery/pkg/types" | ||
| "k8s.io/apimachinery/pkg/util/wait" | ||
| "k8s.io/kubernetes/pkg/scheduler/framework" | ||
| "k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources" | ||
|
|
||
| "k8s.io/apimachinery/pkg/labels" | ||
| "k8s.io/apimachinery/pkg/runtime" | ||
|
|
@@ -102,7 +101,7 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error { | |
| } | ||
|
|
||
| // Watch for changes to ImageJob | ||
| err = c.Watch(&source.Kind{Type: &eraserv1.ImageJob{}}, &handler.EnqueueRequestForObject{}, predicate.Funcs{ | ||
| err = c.Watch(source.Kind(mgr.GetCache(), &eraserv1.ImageJob{}), &handler.EnqueueRequestForObject{}, predicate.Funcs{ | ||
| UpdateFunc: func(e event.UpdateEvent) bool { | ||
| if job, ok := e.ObjectNew.(*eraserv1.ImageJob); ok && controllerUtils.IsCompletedOrFailed(job.Status.Phase) { | ||
| return false // handled by Owning controller | ||
|
|
@@ -120,13 +119,8 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error { | |
|
|
||
| // Watch for changes to pods created by ImageJob (eraser pods) | ||
| err = c.Watch( | ||
| &source.Kind{ | ||
| Type: &corev1.Pod{}, | ||
| }, | ||
| &handler.EnqueueRequestForOwner{ | ||
| OwnerType: &corev1.PodTemplate{}, | ||
| IsController: true, | ||
| }, | ||
| source.Kind(mgr.GetCache(), &corev1.Pod{}), | ||
| handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &corev1.PodTemplate{}), | ||
| predicate.Funcs{ | ||
| CreateFunc: func(e event.CreateEvent) bool { | ||
| return e.Object.GetNamespace() == eraserUtils.GetNamespace() | ||
|
|
@@ -145,13 +139,8 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error { | |
|
|
||
| // watch for changes to imagejob podTemplate (owned by controller manager pod) | ||
| err = c.Watch( | ||
| &source.Kind{ | ||
| Type: &corev1.PodTemplate{}, | ||
| }, | ||
| &handler.EnqueueRequestForOwner{ | ||
| OwnerType: &corev1.Pod{}, | ||
| IsController: true, | ||
| }, | ||
| source.Kind(mgr.GetCache(), &corev1.PodTemplate{}), | ||
| handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &corev1.Pod{}), | ||
| predicate.Funcs{ | ||
| CreateFunc: func(e event.CreateEvent) bool { | ||
| ownerLabels, ok := e.Object.GetLabels()[managerLabelKey] | ||
|
|
@@ -175,14 +164,43 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error { | |
| } | ||
|
|
||
| func checkNodeFitness(pod *corev1.Pod, node *corev1.Node) bool { | ||
| nodeInfo := framework.NewNodeInfo() | ||
| nodeInfo.SetNode(node) | ||
| // Check if node has sufficient resources for the pod | ||
|
||
| nodeAllocatable := node.Status.Allocatable | ||
| if nodeAllocatable == nil { | ||
| log.Error(fmt.Errorf("node %v has no allocatable resources", node.Name), "node resource check") | ||
| return false | ||
| } | ||
|
|
||
| insufficientResource := noderesources.Fits(pod, nodeInfo) | ||
| // Calculate total resource requests from all containers in the pod | ||
| totalCPU := resource.NewQuantity(0, resource.DecimalSI) | ||
| totalMemory := resource.NewQuantity(0, resource.BinarySI) | ||
|
|
||
| if len(insufficientResource) != 0 { | ||
| log.Error(fmt.Errorf("pod %v in namespace %v does not fit in node %v", pod.Name, pod.Namespace, node.Name), "insufficient resource") | ||
| return false | ||
| for i := range pod.Spec.Containers { | ||
| container := &pod.Spec.Containers[i] | ||
| if container.Resources.Requests != nil { | ||
| if cpu, exists := container.Resources.Requests[corev1.ResourceCPU]; exists { | ||
| totalCPU.Add(cpu) | ||
| } | ||
| if memory, exists := container.Resources.Requests[corev1.ResourceMemory]; exists { | ||
| totalMemory.Add(memory) | ||
| } | ||
| } | ||
| } | ||
|
|
||
| // Check if node has sufficient CPU | ||
| if allocatableCPU, exists := nodeAllocatable[corev1.ResourceCPU]; exists { | ||
| if totalCPU.Cmp(allocatableCPU) > 0 { | ||
| log.Error(fmt.Errorf("pod %v in namespace %v requires more CPU than available on node %v", pod.Name, pod.Namespace, node.Name), "insufficient CPU") | ||
| return false | ||
| } | ||
| } | ||
|
|
||
| // Check if node has sufficient memory | ||
| if allocatableMemory, exists := nodeAllocatable[corev1.ResourceMemory]; exists { | ||
| if totalMemory.Cmp(allocatableMemory) > 0 { | ||
| log.Error(fmt.Errorf("pod %v in namespace %v requires more memory than available on node %v", pod.Name, pod.Namespace, node.Name), "insufficient memory") | ||
| return false | ||
| } | ||
| } | ||
|
|
||
| return true | ||
|
|
@@ -437,6 +455,7 @@ func (r *Reconciler) handleNewJob(ctx context.Context, imageJob *eraserv1.ImageJ | |
| } | ||
|
|
||
| for _, namespacedName := range namespacedNames { | ||
| //nolint:staticcheck // SA1019: TODO: Replace with PollUntilContextTimeout in future refactor | ||
| if err := wait.PollImmediate(time.Nanosecond, time.Minute*5, r.isPodReady(ctx, namespacedName)); err != nil { | ||
| log.Error(err, "timed out waiting for pod to leave pending state", "pod NamespacedName", namespacedName) | ||
| } | ||
|
|
@@ -509,11 +528,11 @@ nodes: | |
| } | ||
|
|
||
| log.V(1).Info("includedLabels", "includedLabels", includedLabels) | ||
| log.V(1).Info("nodeLabels", "nodeLabels", nodes.Items[i].ObjectMeta.Labels) | ||
| if includedLabels.Matches(labels.Set(nodes.Items[i].ObjectMeta.Labels)) { | ||
| log.V(1).Info("nodeLabels", "nodeLabels", nodes.Items[i].Labels) | ||
| if includedLabels.Matches(labels.Set(nodes.Items[i].Labels)) { | ||
| log.Info("node is included because it matched the specified labels", | ||
| "nodeName", nodeName, | ||
| "labels", nodes.Items[i].ObjectMeta.Labels, | ||
| "labels", nodes.Items[i].Labels, | ||
| "specifiedSelectors", includeNodesSelectors, | ||
| ) | ||
|
|
||
|
|
@@ -543,11 +562,11 @@ nodes: | |
| } | ||
|
|
||
| log.V(1).Info("skipLabels", "skipLabels", skipLabels) | ||
| log.V(1).Info("nodeLabels", "nodeLabels", nodes.Items[i].ObjectMeta.Labels) | ||
| if skipLabels.Matches(labels.Set(nodes.Items[i].ObjectMeta.Labels)) { | ||
| log.V(1).Info("nodeLabels", "nodeLabels", nodes.Items[i].Labels) | ||
| if skipLabels.Matches(labels.Set(nodes.Items[i].Labels)) { | ||
| log.Info("node will be skipped because it matched the specified labels", | ||
| "nodeName", nodeName, | ||
| "labels", nodes.Items[i].ObjectMeta.Labels, | ||
| "labels", nodes.Items[i].Labels, | ||
| "specifiedSelectors", skipNodesSelectors, | ||
| ) | ||
|
|
||
|
|
||
Uh oh!
There was an error while loading. Please reload this page.