Skip to content

bug: kube-router and KubeSpan don't play nice together #9814

@smira

Description

@smira

The root cause is not clear, there's no conflict on fwmark, but might be routing rules (?)

The patches:

controlplane:

machine:
  network:
    kubespan:
      enabled: true
cluster:
  controllerManager:
    extraArgs:
      allocate-node-cidrs: "true"
  network:
    podSubnets:
      - 192.168.0.0/17
    serviceSubnets:
      - 192.168.128.0/17
    cni:
      name: none
  proxy:
    # Using kube-router: --run-service-proxy=true
    disabled: true
  inlineManifests:
    - name: kube-router
      contents: |
        apiVersion: v1
        kind: ServiceAccount
        metadata:
          name: kube-router
          namespace: kube-system
        ---
        apiVersion: rbac.authorization.k8s.io/v1
        kind: ClusterRole
        metadata:
          name: kube-router
          namespace: kube-system
        rules:
        - apiGroups:
          - ""
          resources:
          - namespaces
          - pods
          - services
          - nodes
          - endpoints
          verbs:
          - list
          - get
          - watch
        - apiGroups:
          - networking.k8s.io
          resources:
          - networkpolicies
          verbs:
          - list
          - get
          - watch
        - apiGroups:
          - extensions
          resources:
          - networkpolicies
          verbs:
          - get
          - list
          - watch
        - apiGroups:
          - coordination.k8s.io
          resources:
          - leases
          verbs:
          - get
          - create
          - update
        - apiGroups:
          - ""
          resources:
          - services/status
          verbs:
          - update
        - apiGroups:
          - discovery.k8s.io
          resources:
          - endpointslices
          verbs:
          - get
          - list
          - watch
        ---
        apiVersion: rbac.authorization.k8s.io/v1
        kind: ClusterRoleBinding
        metadata:
          name: kube-router
        roleRef:
          apiGroup: rbac.authorization.k8s.io
          kind: ClusterRole
          name: kube-router
        subjects:
        - kind: ServiceAccount
          name: kube-router
          namespace: kube-system
        ---
        apiVersion: v1
        data:
          cni-conf.json: |
            {
              "cniVersion":"0.3.0",
              "name":"mynet",
              "plugins":[
                  {
                    "name":"kubernetes",
                    "type":"bridge",
                    "bridge":"kube-bridge",
                    "isDefaultGateway":true,
                    "ipam":{
                        "type":"host-local"
                    }
                  }
              ]
            }
          kubeconfig: |
            apiVersion: v1
            kind: Config
            clusterCIDR: "192.168.0.0/17"
            clusters:
            - name: cluster
              cluster:
                certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
                server: https://127.0.0.1:7445
            users:
            - name: kube-router
              user:
                tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
            contexts:
            - context:
                cluster: cluster
                user: kube-router
              name: kube-router-context
            current-context: kube-router-context
        kind: ConfigMap
        metadata:
          labels:
            k8s-app: kube-router
            tier: node
          name: kube-router-cfg
          namespace: kube-system
        ---
        apiVersion: apps/v1
        kind: DaemonSet
        metadata:
          labels:
            k8s-app: kube-router
            tier: node
          name: kube-router
          namespace: kube-system
        spec:
          selector:
            matchLabels:
              k8s-app: kube-router
              tier: node
          template:
            metadata:
              labels:
                k8s-app: kube-router
                tier: node
            spec:
              containers:
              - args:
                - --run-router=true
                - --run-firewall=true
                - --run-service-proxy=true
                - --bgp-graceful-restart=true
                - --kubeconfig=/var/lib/kube-router/kubeconfig
                env:
                - name: NODE_NAME
                  valueFrom:
                    fieldRef:
                      fieldPath: spec.nodeName
                - name: POD_NAME
                  valueFrom:
                    fieldRef:
                      fieldPath: metadata.name
                - name: KUBE_ROUTER_CNI_CONF_FILE
                  value: /etc/cni/net.d/10-kuberouter.conflist
                image: docker.io/cloudnativelabs/kube-router:v2.3.0
                imagePullPolicy: Always
                livenessProbe:
                  httpGet:
                    path: /healthz
                    port: 20244
                  initialDelaySeconds: 10
                  periodSeconds: 3
                name: kube-router
                resources:
                  requests:
                    cpu: 250m
                    memory: 250Mi
                securityContext:
                  privileged: true
                volumeMounts:
                - mountPath: /lib/modules
                  name: lib-modules
                  readOnly: true
                - mountPath: /etc/cni/net.d
                  name: cni-conf-dir
                - mountPath: /var/lib/kube-router
                  name: kubeconfig
                  readOnly: true
                - mountPath: /run/xtables.lock
                  name: xtables-lock
                  readOnly: false
              hostNetwork: true
              hostPID: true
              initContainers:
              - command:
                - /bin/sh
                - -c
                - set -e -x; if [ ! -f /etc/cni/net.d/10-kuberouter.conflist ]; then if [
                  -f /etc/cni/net.d/*.conf ]; then rm -f /etc/cni/net.d/*.conf; fi; TMP=/etc/cni/net.d/.tmp-kuberouter-cfg;
                  cp /etc/kube-router/cni-conf.json ${TMP}; mv ${TMP} /etc/cni/net.d/10-kuberouter.conflist;
                  fi; if [ ! -f /var/lib/kube-router/kubeconfig ]; then TMP=/var/lib/kube-router/.tmp-kubeconfig;
                  cp /etc/kube-router/kubeconfig ${TMP}; mv ${TMP} /var/lib/kube-router/kubeconfig;
                  fi; if [ -x /usr/local/bin/cni-install ]; then /usr/local/bin/cni-install;
                  fi;
                image: docker.io/cloudnativelabs/kube-router:v2.3.0
                imagePullPolicy: Always
                name: install-cni
                volumeMounts:
                - mountPath: /etc/cni/net.d
                  name: cni-conf-dir
                - mountPath: /etc/kube-router
                  name: kube-router-cfg
                - mountPath: /var/lib/kube-router
                  name: kubeconfig
                - mountPath: /opt
                  name: host-opt
              priorityClassName: system-node-critical
              serviceAccountName: kube-router
              tolerations:
              - effect: NoSchedule
                operator: Exists
              - key: CriticalAddonsOnly
                operator: Exists
              - effect: NoExecute
                operator: Exists
              volumes:
              - hostPath:
                  path: /lib/modules
                name: lib-modules
              - hostPath:
                  path: /etc/cni/net.d
                name: cni-conf-dir
              - configMap:
                  name: kube-router-cfg
                name: kube-router-cfg
              - hostPath:
                  path: /var/lib/kube-router
                name: kubeconfig
              - hostPath:
                  path: /run/xtables.lock
                  type: FileOrCreate
                name: xtables-lock
              - hostPath:
                  path: /opt
                name: host-opt

worker:

machine:
  network:
    kubespan:
      enabled: true
cluster:
  network:
    podSubnets:
      - 192.168.0.0/17
    serviceSubnets:
      - 192.168.128.0/17

Metadata

Metadata

Assignees

Labels

Type

No type

Projects

Status

Done

Milestone

No milestone

Relationships

None yet

Development

No branches or pull requests

Issue actions