Skip to content

Commit d0a50a6

Browse files
rbtrjpayne3506
andauthored
fix: test v1.5 train on k8s 1.28 (#2564)
* fix: test main/v1.5 branch on k8s 1.28 Signed-off-by: Evan Baker <[email protected]> * 1.28 ds changes * ci: specify vars for envsubst --------- Signed-off-by: Evan Baker <[email protected]> Co-authored-by: jpayne3506 <[email protected]>
1 parent 8d68e75 commit d0a50a6

File tree

6 files changed

+46
-23
lines changed

6 files changed

+46
-23
lines changed

.pipelines/cni/singletenancy/cniv1-template.yaml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -114,17 +114,17 @@ stages:
114114
if [ "${{parameters.os}}" == "windows" ]; then
115115
export CNI_IMAGE=$(make cni-image-name-and-tag OS='linux' ARCH=${{ parameters.arch }} CNI_VERSION=$(make cni-version))
116116
echo "CNI image: $CNI_IMAGE"
117-
envsubst < ./test/integration/manifests/cni/cni-installer-v1.yaml | kubectl apply -f -
117+
envsubst '${CNI_IMAGE}' < ./test/integration/manifests/cni/cni-installer-v1.yaml | kubectl apply -f -
118118
kubectl rollout status daemonset/azure-cni -n kube-system
119119
echo "Deploying on windows nodes"
120120
export CNI_IMAGE=$(make cni-image-name-and-tag OS='windows' ARCH=${{ parameters.arch }} OS_VERSION=${{ parameters.os_version }} CNI_VERSION=$(make cni-version))
121121
echo "CNI image: $CNI_IMAGE"
122-
envsubst < ./test/integration/manifests/cni/cni-installer-v1-windows.yaml | kubectl apply -f -
122+
envsubst '${CNI_IMAGE}' < ./test/integration/manifests/cni/cni-installer-v1-windows.yaml | kubectl apply -f -
123123
kubectl rollout status daemonset/azure-cni-windows -n kube-system
124124
else
125125
export CNI_IMAGE=$(make cni-image-name-and-tag OS=${{ parameters.os }} ARCH=${{ parameters.arch }} CNI_VERSION=$(make cni-version))
126126
echo "CNI image: $CNI_IMAGE"
127-
envsubst < ./test/integration/manifests/cni/cni-installer-v1.yaml | kubectl apply -f -
127+
envsubst '${CNI_IMAGE}' < ./test/integration/manifests/cni/cni-installer-v1.yaml | kubectl apply -f -
128128
kubectl rollout status daemonset/azure-cni -n kube-system
129129
fi
130130
kubectl get pods -A -owide

.pipelines/singletenancy/aks/e2e-step-template.yaml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -34,17 +34,17 @@ steps:
3434
if [ "${{parameters.os}}" == "windows" ]; then
3535
export CNI_IMAGE=$(make cni-image-name-and-tag OS='linux' ARCH=${{ parameters.arch }})
3636
echo "CNI image: $CNI_IMAGE"
37-
envsubst < ./test/integration/manifests/cni/cni-installer-v1.yaml | kubectl apply -f -
37+
envsubst '${CNI_IMAGE}' < ./test/integration/manifests/cni/cni-installer-v1.yaml | kubectl apply -f -
3838
kubectl rollout status daemonset/azure-cni -n kube-system
3939
echo "Deploying on windows nodes"
4040
export CNI_IMAGE=$( make cni-image-name-and-tag OS='windows' ARCH=${{ parameters.arch }} OS_VERSION=${{ parameters.os_version }})
4141
echo "CNI image: $CNI_IMAGE"
42-
envsubst < ./test/integration/manifests/cni/cni-installer-v1-windows.yaml | kubectl apply -f -
42+
envsubst '${CNI_IMAGE}' < ./test/integration/manifests/cni/cni-installer-v1-windows.yaml | kubectl apply -f -
4343
kubectl rollout status daemonset/azure-cni-windows -n kube-system
4444
else
4545
export CNI_IMAGE=$(make cni-image-name-and-tag OS=${{ parameters.os }} ARCH=${{ parameters.arch }})
4646
echo "CNI image: $CNI_IMAGE"
47-
envsubst < ./test/integration/manifests/cni/cni-installer-v1.yaml | kubectl apply -f -
47+
envsubst '${CNI_IMAGE}' < ./test/integration/manifests/cni/cni-installer-v1.yaml | kubectl apply -f -
4848
kubectl rollout status daemonset/azure-cni -n kube-system
4949
fi
5050
name: "deployCNI"

.pipelines/templates/create-cluster.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ jobs:
3636
3737
make -C ./hack/aks ${{ parameters.clusterType }} \
3838
AZCLI=az REGION=${{ parameters.region }} SUB=$(SUB_AZURE_NETWORK_AGENT_BUILD_VALIDATIONS) \
39-
CLUSTER=${{ parameters.clusterName }} K8S_VER=${{ parameters.k8sVersion }} \
39+
CLUSTER=${{ parameters.clusterName }} \
4040
VM_SIZE=${{ parameters.vmSize }} VM_SIZE_WIN=${{ parameters.vmSizeWin }} \
4141
OS_SKU_WIN=${{ parameters.osSkuWin }} OS=${{parameters.os}} \
4242
WINDOWS_USERNAME=${WINDOWS_USERNAME} WINDOWS_PASSWORD=${WINDOWS_PASSWORD}

hack/aks/Makefile

Lines changed: 22 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ AZCLI ?= docker run --rm -v $(AZCFG):/root/.azure -v $(KUBECFG):/root/.kube -v
99

1010
# overrideable defaults
1111
AUTOUPGRADE ?= patch
12-
K8S_VER ?= 1.27 # Designated for Long Term Support, July 2025 | Only Ubuntu 22.04 is supported
12+
K8S_VER ?= 1.28
1313
NODE_COUNT ?= 2
1414
NODE_COUNT_WIN ?= $(NODE_COUNT)
1515
NODEUPGRADE ?= NodeImage
@@ -99,6 +99,7 @@ overlay-byocni-up: rg-up overlay-net-up ## Brings up an Overlay BYO CNI cluster
9999
$(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \
100100
--auto-upgrade-channel $(AUTOUPGRADE) \
101101
--node-os-upgrade-channel $(NODEUPGRADE) \
102+
--kubernetes-version $(K8S_VER) \
102103
--node-count $(NODE_COUNT) \
103104
--node-vm-size $(VM_SIZE) \
104105
--load-balancer-sku standard \
@@ -111,14 +112,14 @@ overlay-byocni-up: rg-up overlay-net-up ## Brings up an Overlay BYO CNI cluster
111112
--yes
112113
ifeq ($(OS),windows)
113114
@$(MAKE) windows-nodepool-up
114-
else
115-
@$(MAKE) set-kubeconf
116115
endif
116+
@$(MAKE) set-kubeconf
117117

118118
overlay-byocni-nokubeproxy-up: rg-up overlay-net-up ## Brings up an Overlay BYO CNI cluster without kube-proxy
119119
$(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \
120120
--auto-upgrade-channel $(AUTOUPGRADE) \
121121
--node-os-upgrade-channel $(NODEUPGRADE) \
122+
--kubernetes-version $(K8S_VER) \
122123
--node-count $(NODE_COUNT) \
123124
--node-vm-size $(VM_SIZE) \
124125
--load-balancer-sku basic \
@@ -135,6 +136,7 @@ overlay-cilium-up: rg-up overlay-net-up ## Brings up an Overlay Cilium cluster
135136
$(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \
136137
--auto-upgrade-channel $(AUTOUPGRADE) \
137138
--node-os-upgrade-channel $(NODEUPGRADE) \
139+
--kubernetes-version $(K8S_VER) \
138140
--node-count $(NODE_COUNT) \
139141
--node-vm-size $(VM_SIZE) \
140142
--load-balancer-sku basic \
@@ -151,6 +153,7 @@ overlay-up: rg-up overlay-net-up ## Brings up an Overlay AzCNI cluster
151153
$(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \
152154
--auto-upgrade-channel $(AUTOUPGRADE) \
153155
--node-os-upgrade-channel $(NODEUPGRADE) \
156+
--kubernetes-version $(K8S_VER) \
154157
--node-count $(NODE_COUNT) \
155158
--node-vm-size $(VM_SIZE) \
156159
--load-balancer-sku basic \
@@ -166,6 +169,7 @@ swift-byocni-up: rg-up swift-net-up ## Bring up a SWIFT BYO CNI cluster
166169
$(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \
167170
--auto-upgrade-channel $(AUTOUPGRADE) \
168171
--node-os-upgrade-channel $(NODEUPGRADE) \
172+
--kubernetes-version $(K8S_VER) \
169173
--node-count $(NODE_COUNT) \
170174
--node-vm-size $(VM_SIZE) \
171175
--load-balancer-sku standard \
@@ -177,14 +181,14 @@ swift-byocni-up: rg-up swift-net-up ## Bring up a SWIFT BYO CNI cluster
177181
--yes
178182
ifeq ($(OS),windows)
179183
@$(MAKE) windows-swift-nodepool-up
180-
else
181-
@$(MAKE) set-kubeconf
182184
endif
185+
@$(MAKE) set-kubeconf
183186

184187
swift-byocni-nokubeproxy-up: rg-up swift-net-up ## Bring up a SWIFT BYO CNI cluster without kube-proxy
185188
$(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \
186189
--auto-upgrade-channel $(AUTOUPGRADE) \
187190
--node-os-upgrade-channel $(NODEUPGRADE) \
191+
--kubernetes-version $(K8S_VER) \
188192
--node-count $(NODE_COUNT) \
189193
--node-vm-size $(VM_SIZE) \
190194
--load-balancer-sku basic \
@@ -201,6 +205,7 @@ swift-cilium-up: rg-up swift-net-up ## Bring up a SWIFT Cilium cluster
201205
$(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \
202206
--auto-upgrade-channel $(AUTOUPGRADE) \
203207
--node-os-upgrade-channel $(NODEUPGRADE) \
208+
--kubernetes-version $(K8S_VER) \
204209
--node-count $(NODE_COUNT) \
205210
--node-vm-size $(VM_SIZE) \
206211
--load-balancer-sku basic \
@@ -217,6 +222,7 @@ swift-up: rg-up swift-net-up ## Bring up a SWIFT AzCNI cluster
217222
$(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \
218223
--auto-upgrade-channel $(AUTOUPGRADE) \
219224
--node-os-upgrade-channel $(NODEUPGRADE) \
225+
--kubernetes-version $(K8S_VER) \
220226
--node-count $(NODE_COUNT) \
221227
--node-vm-size $(VM_SIZE) \
222228
--load-balancer-sku basic \
@@ -228,17 +234,17 @@ swift-up: rg-up swift-net-up ## Bring up a SWIFT AzCNI cluster
228234
@$(MAKE) set-kubeconf
229235

230236
# The below Vnet Scale clusters are currently only in private preview and available with Kubernetes 1.28
231-
# These AKS clusters can only be created in a limited subscription listed here:
237+
# These AKS clusters can only be created in a limited subscription listed here:
232238
# https://dev.azure.com/msazure/CloudNativeCompute/_git/aks-rp?path=/resourceprovider/server/microsoft.com/containerservice/flags/network_flags.go&version=GBmaster&line=134&lineEnd=135&lineStartColumn=1&lineEndColumn=1&lineStyle=plain&_a=contents
233239
vnetscale-swift-byocni-up: rg-up vnetscale-swift-net-up ## Bring up a Vnet Scale SWIFT BYO CNI cluster
234240
$(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \
235241
--auto-upgrade-channel $(AUTOUPGRADE) \
236242
--node-os-upgrade-channel $(NODEUPGRADE) \
243+
--kubernetes-version $(K8S_VER) \
237244
--node-count $(NODE_COUNT) \
238245
--node-vm-size $(VM_SIZE) \
239246
--load-balancer-sku basic \
240247
--network-plugin none \
241-
--kubernetes-version $(K8S_VER) \
242248
--vnet-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/nodenet \
243249
--pod-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/podnet \
244250
--no-ssh-key \
@@ -250,11 +256,11 @@ vnetscale-swift-byocni-nokubeproxy-up: rg-up vnetscale-swift-net-up ## Bring up
250256
$(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \
251257
--auto-upgrade-channel $(AUTOUPGRADE) \
252258
--node-os-upgrade-channel $(NODEUPGRADE) \
259+
--kubernetes-version $(K8S_VER) \
253260
--node-count $(NODE_COUNT) \
254261
--node-vm-size $(VM_SIZE) \
255262
--load-balancer-sku basic \
256263
--network-plugin none \
257-
--kubernetes-version $(K8S_VER) \
258264
--vnet-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/nodenet \
259265
--pod-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/podnet \
260266
--no-ssh-key \
@@ -267,13 +273,13 @@ vnetscale-swift-cilium-up: rg-up vnetscale-swift-net-up ## Bring up a Vnet Scale
267273
$(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \
268274
--auto-upgrade-channel $(AUTOUPGRADE) \
269275
--node-os-upgrade-channel $(NODEUPGRADE) \
276+
--kubernetes-version $(K8S_VER) \
270277
--node-count $(NODE_COUNT) \
271278
--node-vm-size $(VM_SIZE) \
272279
--load-balancer-sku basic \
273280
--network-plugin azure \
274281
--network-dataplane cilium \
275282
--aks-custom-headers AKSHTTPCustomFeatures=Microsoft.ContainerService/CiliumDataplanePreview \
276-
--kubernetes-version $(K8S_VER) \
277283
--vnet-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/nodenet \
278284
--pod-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/podnet \
279285
--no-ssh-key \
@@ -284,11 +290,11 @@ vnetscale-swift-up: rg-up vnetscale-swift-net-up ## Bring up a Vnet Scale SWIFT
284290
$(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \
285291
--auto-upgrade-channel $(AUTOUPGRADE) \
286292
--node-os-upgrade-channel $(NODEUPGRADE) \
293+
--kubernetes-version $(K8S_VER) \
287294
--node-count $(NODE_COUNT) \
288295
--node-vm-size $(VM_SIZE) \
289296
--load-balancer-sku basic \
290297
--network-plugin azure \
291-
--kubernetes-version $(K8S_VER) \
292298
--vnet-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/nodenet \
293299
--pod-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/podnet \
294300
--no-ssh-key \
@@ -299,6 +305,7 @@ windows-cniv1-up: rg-up overlay-net-up ## Bring up a Windows CNIv1 cluster
299305
$(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \
300306
--auto-upgrade-channel $(AUTOUPGRADE) \
301307
--node-os-upgrade-channel $(NODEUPGRADE) \
308+
--kubernetes-version $(K8S_VER) \
302309
--node-count $(NODE_COUNT) \
303310
--node-vm-size $(VM_SIZE) \
304311
--network-plugin azure \
@@ -307,8 +314,8 @@ windows-cniv1-up: rg-up overlay-net-up ## Bring up a Windows CNIv1 cluster
307314
--vnet-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/nodenet \
308315
--no-ssh-key \
309316
--yes
310-
311317
@$(MAKE) windows-nodepool-up
318+
@$(MAKE) set-kubeconf
312319

313320
linux-cniv1-up: rg-up overlay-net-up ## Bring up a Linux CNIv1 cluster
314321
$(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \
@@ -322,13 +329,13 @@ linux-cniv1-up: rg-up overlay-net-up ## Bring up a Linux CNIv1 cluster
322329
--os-sku $(OS_SKU) \
323330
--no-ssh-key \
324331
--yes
325-
326332
@$(MAKE) set-kubeconf
327333

328334
dualstack-overlay-up: rg-up overlay-net-up ## Brings up an dualstack Overlay cluster with Linux node only
329335
$(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \
330336
--auto-upgrade-channel $(AUTOUPGRADE) \
331337
--node-os-upgrade-channel $(NODEUPGRADE) \
338+
--kubernetes-version $(K8S_VER) \
332339
--node-count $(NODE_COUNT) \
333340
--node-vm-size $(VM_SIZE) \
334341
--network-plugin azure \
@@ -344,6 +351,7 @@ dualstack-overlay-byocni-up: rg-up overlay-net-up ## Brings up an dualstack Over
344351
$(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \
345352
--auto-upgrade-channel $(AUTOUPGRADE) \
346353
--node-os-upgrade-channel $(NODEUPGRADE) \
354+
--kubernetes-version $(K8S_VER) \
347355
--node-count $(NODE_COUNT) \
348356
--node-vm-size $(VM_SIZE) \
349357
--network-plugin none \
@@ -359,6 +367,7 @@ cilium-dualstack-up: rg-up overlay-net-up ## Brings up a Cilium Dualstack Overla
359367
$(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \
360368
--auto-upgrade-channel $(AUTOUPGRADE) \
361369
--node-os-upgrade-channel $(NODEUPGRADE) \
370+
--kubernetes-version $(K8S_VER) \
362371
--node-count $(NODE_COUNT) \
363372
--node-vm-size $(VM_SIZE) \
364373
--network-plugin azure \
@@ -375,6 +384,7 @@ dualstack-byocni-nokubeproxy-up: rg-up overlay-net-up ## Brings up a Dualstack o
375384
$(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \
376385
--auto-upgrade-channel $(AUTOUPGRADE) \
377386
--node-os-upgrade-channel $(NODEUPGRADE) \
387+
--kubernetes-version $(K8S_VER) \
378388
--node-count $(NODE_COUNT) \
379389
--node-vm-size $(VM_SIZE) \
380390
--network-plugin none \
@@ -396,7 +406,6 @@ windows-nodepool-up: ## Add windows node pool
396406
--os-sku $(OS_SKU_WIN) \
397407
--max-pods 250 \
398408
--subscription $(SUB)
399-
@$(MAKE) set-kubeconf
400409

401410
windows-swift-nodepool-up: ## Add windows node pool
402411
$(AZCLI) aks nodepool add -g $(GROUP) -n npwin \
@@ -408,7 +417,6 @@ windows-swift-nodepool-up: ## Add windows node pool
408417
--max-pods 250 \
409418
--subscription $(SUB) \
410419
--pod-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/podnet
411-
@$(MAKE) set-kubeconf
412420

413421
down: ## Delete the cluster
414422
$(AZCLI) aks delete -g $(GROUP) -n $(CLUSTER) --yes

test/integration/manifests/cni/cni-installer-v1-windows.yaml

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -47,10 +47,15 @@ spec:
4747
image: mcr.microsoft.com/powershell:lts-nanoserver-ltsc2022
4848
command: ["powershell.exe", "-command"]
4949
args: ["if (Get-Process -Name 'azure-vnet-telemetry' -ErrorAction SilentlyContinue) { Stop-Process -Name 'azure-vnet-telemetry' -Force }"]
50+
env:
51+
- name: PATHEXT
52+
value: .COM;.EXE;.BAT;.CMD;.VBS;.VBE;.JS;.JSE;.WSF;.WSH;.MSC;.CPL;;
53+
workingDir: $env:CONTAINER_SANDBOX_MOUNT_POINT
5054
- name: cni-installer
5155
image: ${CNI_IMAGE}
5256
imagePullPolicy: Always
53-
command: ["%CONTAINER_SANDBOX_MOUNT_POINT%/dropgz"]
57+
command:
58+
- powershell.exe; $env:CONTAINER_SANDBOX_MOUNT_POINT/dropgz
5459
args:
5560
- deploy
5661
- azure-vnet
@@ -65,6 +70,9 @@ spec:
6570
- azure-vnet-telemetry.config
6671
- -o
6772
- /k/azurecni/bin/azure-vnet-telemetry.config
73+
env:
74+
- name: PATHEXT
75+
value: .COM;.EXE;.BAT;.CMD;.VBS;.VBE;.JS;.JSE;.WSF;.WSH;.MSC;.CPL;;
6876
volumeMounts:
6977
- name: cni-bin
7078
mountPath: /k/azurecni/bin/

test/integration/manifests/cns/daemonset-windows.yaml

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,7 @@ spec:
4848
imagePullPolicy: IfNotPresent
4949
securityContext:
5050
privileged: true
51+
workingDir: $env:CONTAINER_SANDBOX_MOUNT_POINT
5152
command: ["powershell.exe"]
5253
args:
5354
[
@@ -86,6 +87,8 @@ spec:
8687
env:
8788
- name: PATH
8889
value: '%CONTAINER_SANDBOX_MOUNT_POINT%\Windows\System32\WindowsPowershell\v1.0\'
90+
- name: PATHEXT
91+
value: .COM;.EXE;.BAT;.CMD;.VBS;.VBE;.JS;.JSE;.WSF;.WSH;.MSC;.CPL;;
8992
- name: CNSIpAddress
9093
value: "127.0.0.1"
9194
- name: CNSPort
@@ -107,12 +110,16 @@ spec:
107110
- name: cni-installer
108111
image: acnpublic.azurecr.io/cni-dropgz:latest
109112
imagePullPolicy: Always
110-
command: ["%CONTAINER_SANDBOX_MOUNT_POINT%/dropgz"]
113+
command:
114+
- powershell.exe; $env:CONTAINER_SANDBOX_MOUNT_POINT/dropgz
111115
args:
112116
- deploy
113117
- azure-vnet
114118
- -o
115119
- /k/azurecni/bin/azure-vnet.exe # // TODO: add windows cni conflist when ready
120+
env:
121+
- name: PATHEXT
122+
value: .COM;.EXE;.BAT;.CMD;.VBS;.VBE;.JS;.JSE;.WSF;.WSH;.MSC;.CPL;;
116123
volumeMounts:
117124
- name: cni-bin
118125
mountPath: /k/azurecni/bin/ # TODO: add cni conflist when ready

0 commit comments

Comments
 (0)