From 07e13fe60b5c18fa4d46d8b9260207c3a83446e9 Mon Sep 17 00:00:00 2001 From: Imran Pochi Date: Tue, 29 Apr 2025 17:26:11 +0000 Subject: [PATCH 1/3] chore: bump github action runson to ubuntu-24.04 There is an error in github actions runner that runs on 22.04. This issue hasn't been rectified yet but the issue is not reported on 24.04 issue details: https://github.com/actions/runner-images/issues/11985 So this bumps the action to run on ubuntu 24.04 Signed-off-by: Imran Pochi --- .github/workflows/e2e.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/e2e.yaml b/.github/workflows/e2e.yaml index ba299c5e2..3e34320e1 100644 --- a/.github/workflows/e2e.yaml +++ b/.github/workflows/e2e.yaml @@ -20,7 +20,7 @@ env: jobs: build: name: build - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: Check out code uses: actions/checkout@v4 @@ -51,7 +51,7 @@ jobs: path: _output/konnectivity-agent.tar kind-e2e: name: kind-e2e - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 timeout-minutes: 100 needs: - build @@ -98,7 +98,7 @@ jobs: run: make test-e2e-ci e2e: name: e2e - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 timeout-minutes: 100 needs: - build From d76b597f0700d619e417e31265da78ed763984c3 Mon Sep 17 00:00:00 2001 From: Imran Pochi Date: Tue, 29 Apr 2025 17:42:01 +0000 Subject: [PATCH 2/3] fix: chmod only the required binaries switching to 24.04 github action runner introduces this error chmod: cannot operate on dangling symlink '/usr/local/bin/now' hence instead of chmod all binaries present in /usr/local/bin, we only chmod the binaries that we copy. Signed-off-by: Imran Pochi --- .github/workflows/e2e.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/e2e.yaml b/.github/workflows/e2e.yaml index 3e34320e1..98e122ad8 100644 --- a/.github/workflows/e2e.yaml +++ b/.github/workflows/e2e.yaml @@ -136,7 +136,7 @@ jobs: sudo cp ${TMP_DIR}/e2e.test /usr/local/bin/e2e.test sudo cp ${TMP_DIR}/kubectl /usr/local/bin/kubectl sudo cp ${TMP_DIR}/kind /usr/local/bin/kind - sudo chmod +x /usr/local/bin/* + sudo chmod +x /usr/local/bin/ginkgo /usr/local/bin/e2e.test /usr/local/bin/kubectl /usr/local/bin/kind - name: Create multi node cluster run: | From 0b63e334262c7e8c1141c16acee5bda2b4258a22 Mon Sep 17 00:00:00 2001 From: Imran Pochi Date: Tue, 15 Apr 2025 00:52:31 +0000 Subject: [PATCH 3/3] fix: k8s client setup Currently the setting up of k8s client is broken if service account authentication is not used between server and agent. This condition `if o.AgentNamespace != "" {` acts as a gatekeeper for setting the k8s client which worked fine previously as server never needed to talk to apiserver apart from authenticating agents using service account token. However when lease controller logic was added, it meant that setting up k8s client was required if lease controller was enabled but authentication was done using mTLS instead of service account authentication. This fixes that. Closing #728 in favour of this. Signed-off-by: Imran Pochi --- cmd/server/app/options/options.go | 28 +++++++++++++++------------- cmd/server/app/server.go | 2 +- 2 files changed, 16 insertions(+), 14 deletions(-) diff --git a/cmd/server/app/options/options.go b/cmd/server/app/options/options.go index f5a26621c..a5dbce407 100644 --- a/cmd/server/app/options/options.go +++ b/cmd/server/app/options/options.go @@ -112,6 +112,8 @@ type ProxyRunOptions struct { LeaseNamespace string // Lease Labels LeaseLabel string + // Needs kubernetes client + NeedsKubernetesClient bool } func (o *ProxyRunOptions) Flags() *pflag.FlagSet { @@ -287,29 +289,27 @@ func (o *ProxyRunOptions) Validate() error { if o.EnableContentionProfiling && !o.EnableProfiling { return fmt.Errorf("if --enable-contention-profiling is set, --enable-profiling must also be set") } - - // validate agent authentication params - // all 4 parameters must be empty or must have value (except KubeconfigPath that might be empty) - if o.AgentNamespace != "" || o.AgentServiceAccount != "" || o.AuthenticationAudience != "" || o.KubeconfigPath != "" { + usingServiceAccountAuth := o.AgentNamespace != "" || o.AgentServiceAccount != "" || o.AuthenticationAudience != "" + if usingServiceAccountAuth { if o.ClusterCaCert != "" { - return fmt.Errorf("ClusterCaCert can not be used when service account authentication is enabled") + return fmt.Errorf("--cluster-ca-cert can not be used when agent authentication is enabled") } if o.AgentNamespace == "" { - return fmt.Errorf("AgentNamespace cannot be empty when agent authentication is enabled") + return fmt.Errorf("--agent-namespace cannot be empty when agent authentication is enabled") } if o.AgentServiceAccount == "" { - return fmt.Errorf("AgentServiceAccount cannot be empty when agent authentication is enabled") + return fmt.Errorf("--agent-service-account cannot be empty when agent authentication is enabled") } if o.AuthenticationAudience == "" { - return fmt.Errorf("AuthenticationAudience cannot be empty when agent authentication is enabled") + return fmt.Errorf("--authentication-audience cannot be empty when agent authentication is enabled") } - if o.KubeconfigPath != "" { - if _, err := os.Stat(o.KubeconfigPath); os.IsNotExist(err) { - return fmt.Errorf("error checking KubeconfigPath %q, got %v", o.KubeconfigPath, err) - } + } + // Validate kubeconfig path if provided + if o.KubeconfigPath != "" { + if _, err := os.Stat(o.KubeconfigPath); os.IsNotExist(err) { + return fmt.Errorf("checking KubeconfigPath %q, got %v", o.KubeconfigPath, err) } } - // validate the proxy strategies if len(o.ProxyStrategies) == 0 { return fmt.Errorf("ProxyStrategies cannot be empty") @@ -338,6 +338,8 @@ func (o *ProxyRunOptions) Validate() error { } } + o.NeedsKubernetesClient = usingServiceAccountAuth || o.EnableLeaseController + return nil } diff --git a/cmd/server/app/server.go b/cmd/server/app/server.go index 8f8ddfa36..479d838f7 100644 --- a/cmd/server/app/server.go +++ b/cmd/server/app/server.go @@ -105,7 +105,7 @@ func (p *Proxy) Run(o *options.ProxyRunOptions, stopCh <-chan struct{}) error { defer cancel() var k8sClient *kubernetes.Clientset - if o.AgentNamespace != "" { + if o.NeedsKubernetesClient { config, err := clientcmd.BuildConfigFromFlags("", o.KubeconfigPath) if err != nil { return fmt.Errorf("failed to load kubernetes client config: %v", err)