| 
 | 1 | +package main  | 
 | 2 | + | 
 | 3 | +import (  | 
 | 4 | +	"context"  | 
 | 5 | +	"encoding/json"  | 
 | 6 | +	"flag"  | 
 | 7 | +	"fmt"  | 
 | 8 | +	"os"  | 
 | 9 | +	"os/signal"  | 
 | 10 | +	"path/filepath"  | 
 | 11 | +	"syscall"  | 
 | 12 | + | 
 | 13 | +	corev1 "k8s.io/api/core/v1"  | 
 | 14 | +	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"  | 
 | 15 | +	"k8s.io/apimachinery/pkg/types"  | 
 | 16 | +	"k8s.io/client-go/kubernetes"  | 
 | 17 | +	"k8s.io/client-go/rest"  | 
 | 18 | +	"k8s.io/client-go/tools/clientcmd"  | 
 | 19 | +)  | 
 | 20 | + | 
 | 21 | +// This is a test utility that is used to inject a very specific error condition into workspace pods, so that we can test the behavior of the ws-manager+ws-daemon in handling such cases.  | 
 | 22 | + | 
 | 23 | +type patchStringValue struct {  | 
 | 24 | +	Op    string `json:"op"`  | 
 | 25 | +	Path  string `json:"path"`  | 
 | 26 | +	Value string `json:"value"`  | 
 | 27 | +}  | 
 | 28 | + | 
 | 29 | +func main() {  | 
 | 30 | +	// Get Kubernetes client  | 
 | 31 | +	clientset, err := getClient()  | 
 | 32 | +	if err != nil {  | 
 | 33 | +		fmt.Printf("Error creating Kubernetes client: %v\n", err)  | 
 | 34 | +		os.Exit(1)  | 
 | 35 | +	}  | 
 | 36 | + | 
 | 37 | +	namespace := "default"  | 
 | 38 | +	ctx := context.Background()  | 
 | 39 | + | 
 | 40 | +	// Listen for pod events  | 
 | 41 | +	podWatcher, err := clientset.CoreV1().Pods(namespace).Watch(ctx, metav1.ListOptions{  | 
 | 42 | +		LabelSelector: "component=workspace",  | 
 | 43 | +	})  | 
 | 44 | +	if err != nil {  | 
 | 45 | +		fmt.Printf("Error watching pods: %v\n", err)  | 
 | 46 | +		os.Exit(1)  | 
 | 47 | +	}  | 
 | 48 | + | 
 | 49 | +	// Handle pod events  | 
 | 50 | +	ch := podWatcher.ResultChan()  | 
 | 51 | +	stopChan := make(chan os.Signal, 1)  | 
 | 52 | +	signal.Notify(stopChan, syscall.SIGINT, syscall.SIGTERM)  | 
 | 53 | + | 
 | 54 | +	fmt.Println("Starting rejector...")  | 
 | 55 | + | 
 | 56 | +	knownPodVersions := map[string]string{}  | 
 | 57 | +	podRejectedCount := map[string]int{}  | 
 | 58 | + | 
 | 59 | +	for {  | 
 | 60 | +		select {  | 
 | 61 | +		case event := <-ch:  | 
 | 62 | +			pod, ok := event.Object.(*corev1.Pod)  | 
 | 63 | +			if !ok {  | 
 | 64 | +				fmt.Println("Unexpected type")  | 
 | 65 | +				continue  | 
 | 66 | +			}  | 
 | 67 | + | 
 | 68 | +			marked := true  | 
 | 69 | +			// marked := slices.ContainsFunc(pod.Spec.Containers[0].Env, func(e corev1.EnvVar) bool {  | 
 | 70 | +			// 	return e.Name == "GITPOD_WORKSPACE_CONTEXT_URL" && strings.Contains(e.Value, "geropl")  | 
 | 71 | +			// })  | 
 | 72 | + | 
 | 73 | +			knownVersion, known := knownPodVersions[pod.Name]  | 
 | 74 | +			if known && knownVersion >= pod.ResourceVersion {  | 
 | 75 | +				fmt.Printf("Skipping pod %s bc of outdated version...\n", pod.Name)  | 
 | 76 | +				continue  | 
 | 77 | +			}  | 
 | 78 | + | 
 | 79 | +			if count := podRejectedCount[pod.Name]; count > 0 || !marked {  | 
 | 80 | +				fmt.Printf("Skipping pod %s...\n", pod.Name)  | 
 | 81 | +				continue  | 
 | 82 | +			}  | 
 | 83 | +			fmt.Printf("Found marked pod %s\n", pod.Name)  | 
 | 84 | + | 
 | 85 | +			if pod.Status.Phase == corev1.PodPending && pod.Spec.NodeName != "" {  | 
 | 86 | +				fmt.Printf("found marked pending & scheduled pod: %s\n", pod.Name)  | 
 | 87 | +				patch := []patchStringValue{  | 
 | 88 | +					{  | 
 | 89 | +						Path:  "/status/phase",  | 
 | 90 | +						Op:    "replace",  | 
 | 91 | +						Value: string(corev1.PodFailed),  | 
 | 92 | +					},  | 
 | 93 | +					{  | 
 | 94 | +						Path:  "/status/reason",  | 
 | 95 | +						Op:    "replace",  | 
 | 96 | +						Value: "NodeAffinity",  | 
 | 97 | +					},  | 
 | 98 | +					{  | 
 | 99 | +						Path:  "/status/message",  | 
 | 100 | +						Op:    "replace",  | 
 | 101 | +						Value: "Pod was rejected",  | 
 | 102 | +					},  | 
 | 103 | +				}  | 
 | 104 | +				patchBytes, _ := json.Marshal(patch)  | 
 | 105 | +				pUpdated, err := clientset.CoreV1().Pods(namespace).Patch(ctx, pod.Name, types.JSONPatchType, patchBytes, metav1.PatchOptions{}, "status")  | 
 | 106 | +				if err != nil {  | 
 | 107 | +					fmt.Printf("error patching pod %s: %v\n", pod.Name, err)  | 
 | 108 | +				}  | 
 | 109 | +				podRejectedCount[pod.Name] = podRejectedCount[pod.Name] + 1  | 
 | 110 | +				knownPodVersions[pUpdated.Name] = pUpdated.ResourceVersion  | 
 | 111 | +				fmt.Printf("Applied status: %s\n", pUpdated.Status.Phase)  | 
 | 112 | +			}  | 
 | 113 | + | 
 | 114 | +		case <-stopChan:  | 
 | 115 | +			fmt.Println("Shutting down rejector...")  | 
 | 116 | +			return  | 
 | 117 | +		}  | 
 | 118 | +	}  | 
 | 119 | +}  | 
 | 120 | + | 
 | 121 | +// Function to get the Kubernetes client  | 
 | 122 | +func getClient() (*kubernetes.Clientset, error) {  | 
 | 123 | +	var config *rest.Config  | 
 | 124 | +	var err error  | 
 | 125 | + | 
 | 126 | +	// Try to get in-cluster config  | 
 | 127 | +	config, err = rest.InClusterConfig()  | 
 | 128 | +	if err != nil {  | 
 | 129 | +		// Fall back to using kubeconfig file if not running in a cluster  | 
 | 130 | +		kubeconfigFlag := flag.String("kubeconfig", "~/.kube/config", "location of your kubeconfig file")  | 
 | 131 | +		flag.Parse()  | 
 | 132 | +		kubeconfig, err := filepath.Abs(*kubeconfigFlag)  | 
 | 133 | +		if err != nil {  | 
 | 134 | +			fmt.Printf("Cannot resolve kubeconfig path: %s", *kubeconfigFlag)  | 
 | 135 | +		}  | 
 | 136 | +		config, err = clientcmd.BuildConfigFromFlags("", kubeconfig)  | 
 | 137 | +		if err != nil {  | 
 | 138 | +			return nil, err  | 
 | 139 | +		}  | 
 | 140 | +	}  | 
 | 141 | + | 
 | 142 | +	clientset, err := kubernetes.NewForConfig(config)  | 
 | 143 | +	if err != nil {  | 
 | 144 | +		return nil, err  | 
 | 145 | +	}  | 
 | 146 | +	return clientset, nil  | 
 | 147 | +}  | 
0 commit comments