diff --git a/hack/aks/Makefile b/hack/aks/Makefile index cad91ba967..3e662f2593 100644 --- a/hack/aks/Makefile +++ b/hack/aks/Makefile @@ -19,6 +19,11 @@ OS_SKU_WIN ?= Windows2022 REGION ?= westus2 VM_SIZE ?= Standard_B2s VM_SIZE_WIN ?= Standard_B2s +IP_TAG ?= FirstPartyUsage=/DelegatedNetworkControllerTest +IP_PREFIX ?= serviceTaggedIp +PUBLIC_IP_ID ?= /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/publicIPAddresses +PUBLIC_IPv4 ?= $(PUBLIC_IP_ID)/$(IP_PREFIX)-$(CLUSTER)-v4 +PUBLIC_IPv6 ?= $(PUBLIC_IP_ID)/$(IP_PREFIX)-$(CLUSTER)-v6 KUBE_PROXY_JSON_PATH ?= ./kube-proxy.json # overrideable variables @@ -43,6 +48,23 @@ azcfg: ## Set the $AZCLI to use aks-preview @$(AZCLI) extension add --name aks-preview --yes @$(AZCLI) extension update --name aks-preview +ip: + $(AZCLI) network public-ip create --name $(IP_PREFIX)-$(CLUSTER)-$(IPVERSION) \ + --resource-group $(GROUP) \ + --allocation-method Static \ + --ip-tags $(IP_TAG) \ + --location $(REGION) \ + --sku Standard \ + --tier Regional \ + --version IP$(IPVERSION) + +ipv4: + @$(MAKE) ip IPVERSION=v4 + +ipv6: + @$(MAKE) ip IPVERSION=v6 + + set-kubeconf: ## Adds the kubeconf for $CLUSTER $(AZCLI) aks get-credentials -n $(CLUSTER) -g $(GROUP) @@ -89,7 +111,6 @@ overlay-net-up: ## Create vnet, nodenet subnets $(AZCLI) network vnet create -g $(GROUP) -l $(REGION) --name $(VNET) --address-prefixes 10.0.0.0/8 -o none $(AZCLI) network vnet subnet create -g $(GROUP) --vnet-name $(VNET) --name nodenet --address-prefix 10.10.0.0/16 -o none - ##@ AKS Clusters byocni-up: swift-byocni-up ## Alias to swift-byocni-up @@ -97,15 +118,15 @@ cilium-up: swift-cilium-up ## Alias to swift-cilium-up up: swift-up ## Alias to swift-up -nodesubnet-byocni-nokubeproxy-up: rg-up overlay-net-up ## Brings up an NodeSubnet BYO CNI cluster without kube-proxy +nodesubnet-byocni-nokubeproxy-up: rg-up ipv4 overlay-net-up ## Brings up an NodeSubnet BYO CNI cluster without kube-proxy $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ --auto-upgrade-channel $(AUTOUPGRADE) \ --node-os-upgrade-channel $(NODEUPGRADE) \ --kubernetes-version $(K8S_VER) \ --node-count $(NODE_COUNT) \ --node-vm-size $(VM_SIZE) \ - --load-balancer-sku standard \ --max-pods 250 \ + --load-balancer-outbound-ips $(PUBLIC_IPv4) \ --network-plugin none \ --vnet-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/nodenet \ --os-sku $(OS_SKU) \ @@ -114,14 +135,14 @@ nodesubnet-byocni-nokubeproxy-up: rg-up overlay-net-up ## Brings up an NodeSubne --yes @$(MAKE) set-kubeconf -overlay-byocni-up: rg-up overlay-net-up ## Brings up a Linux Overlay BYO CNI cluster +overlay-byocni-up: rg-up ipv4 overlay-net-up ## Brings up an Overlay BYO CNI cluster $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ --auto-upgrade-channel $(AUTOUPGRADE) \ --node-os-upgrade-channel $(NODEUPGRADE) \ --kubernetes-version $(K8S_VER) \ --node-count $(NODE_COUNT) \ --node-vm-size $(VM_SIZE) \ - --load-balancer-sku standard \ + --load-balancer-outbound-ips $(PUBLIC_IPv4) \ --network-plugin none \ --network-plugin-mode overlay \ --pod-cidr 192.168.0.0/16 \ @@ -134,13 +155,14 @@ ifeq ($(OS),windows) $(MAKE) windows-nodepool-up endif -overlay-byocni-nokubeproxy-up: rg-up overlay-net-up ## Brings up an Overlay BYO CNI cluster without kube-proxy +overlay-byocni-nokubeproxy-up: rg-up ipv4 overlay-net-up ## Brings up an Overlay BYO CNI cluster without kube-proxy $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ --auto-upgrade-channel $(AUTOUPGRADE) \ --node-os-upgrade-channel $(NODEUPGRADE) \ --kubernetes-version $(K8S_VER) \ --node-count $(NODE_COUNT) \ --node-vm-size $(VM_SIZE) \ + --load-balancer-outbound-ips $(PUBLIC_IPv4) \ --network-plugin none \ --network-plugin-mode overlay \ --pod-cidr 192.168.0.0/16 \ @@ -150,13 +172,14 @@ overlay-byocni-nokubeproxy-up: rg-up overlay-net-up ## Brings up an Overlay BYO --yes @$(MAKE) set-kubeconf -overlay-cilium-up: rg-up overlay-net-up ## Brings up an Overlay Cilium cluster +overlay-cilium-up: rg-up ipv4 overlay-net-up ## Brings up an Overlay Cilium cluster $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ --auto-upgrade-channel $(AUTOUPGRADE) \ --node-os-upgrade-channel $(NODEUPGRADE) \ --kubernetes-version $(K8S_VER) \ --node-count $(NODE_COUNT) \ --node-vm-size $(VM_SIZE) \ + --load-balancer-outbound-ips (PUBLIC_IPv4) \ --network-plugin azure \ --network-dataplane cilium \ --network-plugin-mode overlay \ @@ -166,13 +189,14 @@ overlay-cilium-up: rg-up overlay-net-up ## Brings up an Overlay Cilium cluster --yes @$(MAKE) set-kubeconf -overlay-up: rg-up overlay-net-up ## Brings up an Overlay AzCNI cluster +overlay-up: rg-up ipv4 overlay-net-up ## Brings up an Overlay AzCNI cluster $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ --auto-upgrade-channel $(AUTOUPGRADE) \ --node-os-upgrade-channel $(NODEUPGRADE) \ --kubernetes-version $(K8S_VER) \ --node-count $(NODE_COUNT) \ --node-vm-size $(VM_SIZE) \ + --load-balancer-outbound-ips $(PUBLIC_IPv4) \ --network-plugin azure \ --network-plugin-mode overlay \ --pod-cidr 192.168.0.0/16 \ @@ -181,14 +205,14 @@ overlay-up: rg-up overlay-net-up ## Brings up an Overlay AzCNI cluster --yes @$(MAKE) set-kubeconf -swift-byocni-up: rg-up swift-net-up ## Bring up a SWIFT (Podsubnet) BYO CNI cluster +swift-byocni-up: rg-up ipv4 swift-net-up ## Bring up a SWIFT BYO CNI cluster $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ --auto-upgrade-channel $(AUTOUPGRADE) \ --node-os-upgrade-channel $(NODEUPGRADE) \ --kubernetes-version $(K8S_VER) \ --node-count $(NODE_COUNT) \ --node-vm-size $(VM_SIZE) \ - --load-balancer-sku standard \ + --load-balancer-outbound-ips $(PUBLIC_IPv4) \ --network-plugin none \ --vnet-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/nodenet \ --pod-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/podnet \ @@ -200,13 +224,14 @@ ifeq ($(OS),windows) endif @$(MAKE) set-kubeconf -swift-byocni-nokubeproxy-up: rg-up swift-net-up ## Bring up a SWIFT (Podsubnet) BYO CNI cluster without kube-proxy +swift-byocni-nokubeproxy-up: rg-up ipv4 swift-net-up ## Bring up a SWIFT BYO CNI cluster without kube-proxy, add managed identity and public ip $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ --auto-upgrade-channel $(AUTOUPGRADE) \ --node-os-upgrade-channel $(NODEUPGRADE) \ --kubernetes-version $(K8S_VER) \ --node-count $(NODE_COUNT) \ --node-vm-size $(VM_SIZE) \ + --load-balancer-outbound-ips $(PUBLIC_IPv4) \ --network-plugin none \ --vnet-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/nodenet \ --pod-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/podnet \ @@ -216,13 +241,14 @@ swift-byocni-nokubeproxy-up: rg-up swift-net-up ## Bring up a SWIFT (Podsubnet) --yes @$(MAKE) set-kubeconf -swift-cilium-up: rg-up swift-net-up ## Bring up a SWIFT Cilium cluster +swift-cilium-up: rg-up ipv4 swift-net-up ## Bring up a SWIFT Cilium cluster $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ --auto-upgrade-channel $(AUTOUPGRADE) \ --node-os-upgrade-channel $(NODEUPGRADE) \ --kubernetes-version $(K8S_VER) \ --node-count $(NODE_COUNT) \ --node-vm-size $(VM_SIZE) \ + --load-balancer-outbound-ips $(PUBLIC_IPv4) \ --network-plugin azure \ --network-dataplane cilium \ --aks-custom-headers AKSHTTPCustomFeatures=Microsoft.ContainerService/CiliumDataplanePreview \ @@ -232,13 +258,14 @@ swift-cilium-up: rg-up swift-net-up ## Bring up a SWIFT Cilium cluster --yes @$(MAKE) set-kubeconf -swift-up: rg-up swift-net-up ## Bring up a SWIFT AzCNI cluster +swift-up: rg-up ipv4 swift-net-up ## Bring up a SWIFT AzCNI cluster $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ --auto-upgrade-channel $(AUTOUPGRADE) \ --node-os-upgrade-channel $(NODEUPGRADE) \ --kubernetes-version $(K8S_VER) \ --node-count $(NODE_COUNT) \ --node-vm-size $(VM_SIZE) \ + --load-balancer-outbound-ips $(PUBLIC_IPv4) \ --network-plugin azure \ --vnet-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/nodenet \ --pod-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/podnet \ @@ -246,7 +273,7 @@ swift-up: rg-up swift-net-up ## Bring up a SWIFT AzCNI cluster --yes @$(MAKE) set-kubeconf -swiftv2-multitenancy-cluster-up: rg-up +swiftv2-multitenancy-cluster-up: rg-up ipv4 $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ --network-plugin azure \ --network-plugin-mode overlay \ @@ -254,16 +281,18 @@ swiftv2-multitenancy-cluster-up: rg-up --nodepool-name "mtapool" \ --node-vm-size $(VM_SIZE) \ --node-count 2 \ + --load-balancer-outbound-ips $(PUBLIC_IPv4) \ --nodepool-tags fastpathenabled=true \ --no-ssh-key \ --yes @$(MAKE) set-kubeconf -swiftv2-dummy-cluster-up: rg-up swift-net-up ## Bring up a SWIFT AzCNI cluster +swiftv2-dummy-cluster-up: rg-up ipv4 swift-net-up ## Bring up a SWIFT AzCNI cluster $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ --network-plugin azure \ --vnet-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/nodenet \ --pod-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/podnet \ + --load-balancer-outbound-ips $(PUBLIC_IPv4) \ --no-ssh-key \ --yes @$(MAKE) set-kubeconf @@ -271,13 +300,14 @@ swiftv2-dummy-cluster-up: rg-up swift-net-up ## Bring up a SWIFT AzCNI cluster # The below Vnet Scale clusters are currently only in private preview and available with Kubernetes 1.28 # These AKS clusters can only be created in a limited subscription listed here: # https://dev.azure.com/msazure/CloudNativeCompute/_git/aks-rp?path=/resourceprovider/server/microsoft.com/containerservice/flags/network_flags.go&version=GBmaster&line=134&lineEnd=135&lineStartColumn=1&lineEndColumn=1&lineStyle=plain&_a=contents -vnetscale-swift-byocni-up: rg-up vnetscale-swift-net-up ## Bring up a Vnet Scale SWIFT BYO CNI cluster +vnetscale-swift-byocni-up: rg-up ipv4 vnetscale-swift-net-up ## Bring up a Vnet Scale SWIFT BYO CNI cluster $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ --auto-upgrade-channel $(AUTOUPGRADE) \ --node-os-upgrade-channel $(NODEUPGRADE) \ --kubernetes-version $(K8S_VER) \ --node-count $(NODE_COUNT) \ --node-vm-size $(VM_SIZE) \ + --load-balancer-outbound-ips $(PUBLIC_IPv4) \ --network-plugin none \ --vnet-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/nodenet \ --pod-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/podnet \ @@ -287,13 +317,14 @@ vnetscale-swift-byocni-up: rg-up vnetscale-swift-net-up ## Bring up a Vnet Scale --yes @$(MAKE) set-kubeconf -vnetscale-swift-byocni-nokubeproxy-up: rg-up vnetscale-swift-net-up ## Bring up a Vnet Scale SWIFT (Podsubnet) BYO CNI cluster without kube-proxy +vnetscale-swift-byocni-nokubeproxy-up: rg-up ipv4 vnetscale-swift-net-up ## Bring up a Vnet Scale SWIFT BYO CNI cluster without kube-proxy $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ --auto-upgrade-channel $(AUTOUPGRADE) \ --node-os-upgrade-channel $(NODEUPGRADE) \ --kubernetes-version $(K8S_VER) \ --node-count $(NODE_COUNT) \ --node-vm-size $(VM_SIZE) \ + --load-balancer-outbound-ips $(PUBLIC_IPv4) \ --network-plugin none \ --vnet-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/nodenet \ --pod-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/podnet \ @@ -304,13 +335,14 @@ vnetscale-swift-byocni-nokubeproxy-up: rg-up vnetscale-swift-net-up ## Bring up --yes @$(MAKE) set-kubeconf -vnetscale-swift-cilium-up: rg-up vnetscale-swift-net-up ## Bring up a Vnet Scale SWIFT Cilium cluster +vnetscale-swift-cilium-up: rg-up ipv4 vnetscale-swift-net-up ## Bring up a Vnet Scale SWIFT Cilium cluster $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ --auto-upgrade-channel $(AUTOUPGRADE) \ --node-os-upgrade-channel $(NODEUPGRADE) \ --kubernetes-version $(K8S_VER) \ --node-count $(NODE_COUNT) \ --node-vm-size $(VM_SIZE) \ + --load-balancer-outbound-ips $(PUBLIC_IPv4) \ --network-plugin azure \ --network-dataplane cilium \ --aks-custom-headers AKSHTTPCustomFeatures=Microsoft.ContainerService/CiliumDataplanePreview \ @@ -321,13 +353,14 @@ vnetscale-swift-cilium-up: rg-up vnetscale-swift-net-up ## Bring up a Vnet Scale --yes @$(MAKE) set-kubeconf -vnetscale-swift-up: rg-up vnetscale-swift-net-up ## Bring up a Vnet Scale SWIFT AzCNI cluster +vnetscale-swift-up: rg-up ipv4 vnetscale-swift-net-up ## Bring up a Vnet Scale SWIFT AzCNI cluster $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ --auto-upgrade-channel $(AUTOUPGRADE) \ --node-os-upgrade-channel $(NODEUPGRADE) \ --kubernetes-version $(K8S_VER) \ --node-count $(NODE_COUNT) \ --node-vm-size $(VM_SIZE) \ + --load-balancer-outbound-ips $(PUBLIC_IPv4) \ --network-plugin azure \ --vnet-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/nodenet \ --pod-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/podnet \ @@ -336,13 +369,14 @@ vnetscale-swift-up: rg-up vnetscale-swift-net-up ## Bring up a Vnet Scale SWIFT --yes @$(MAKE) set-kubeconf -cniv1-up: rg-up overlay-net-up ## Bring up a CNIv1 cluster +cniv1-up: rg-up ipv4 overlay-net-up ## Bring up a CNIv1 cluster $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ --auto-upgrade-channel $(AUTOUPGRADE) \ --node-os-upgrade-channel $(NODEUPGRADE) \ --kubernetes-version $(K8S_VER) \ --node-count $(NODE_COUNT) \ --node-vm-size $(VM_SIZE) \ + --load-balancer-outbound-ips $(PUBLIC_IPv4) \ --max-pods 250 \ --network-plugin azure \ --vnet-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/nodenet \ @@ -354,13 +388,14 @@ ifeq ($(OS),windows) $(MAKE) windows-nodepool-up endif -dualstack-overlay-up: rg-up overlay-net-up ## Brings up an dualstack Overlay cluster with Linux node only +dualstack-overlay-up: rg-up ipv4 ipv6 overlay-net-up ## Brings up an dualstack Overlay cluster with Linux node only $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ --auto-upgrade-channel $(AUTOUPGRADE) \ --node-os-upgrade-channel $(NODEUPGRADE) \ --kubernetes-version $(K8S_VER) \ --node-count $(NODE_COUNT) \ --node-vm-size $(VM_SIZE) \ + --load-balancer-outbound-ips $(PUBLIC_IPv4),$(PUBLIC_IPv6) \ --network-plugin azure \ --network-plugin-mode overlay \ --subscription $(SUB) \ @@ -370,13 +405,14 @@ dualstack-overlay-up: rg-up overlay-net-up ## Brings up an dualstack Overlay clu --yes @$(MAKE) set-kubeconf -dualstack-overlay-byocni-up: rg-up overlay-net-up ## Brings up an dualstack Overlay BYO CNI cluster +dualstack-overlay-byocni-up: rg-up ipv4 ipv6 overlay-net-up ## Brings up an dualstack Overlay BYO CNI cluster $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ --auto-upgrade-channel $(AUTOUPGRADE) \ --node-os-upgrade-channel $(NODEUPGRADE) \ --kubernetes-version $(K8S_VER) \ --node-count $(NODE_COUNT) \ --node-vm-size $(VM_SIZE) \ + --load-balancer-outbound-ips $(PUBLIC_IPv4),$(PUBLIC_IPv6) \ --network-plugin none \ --network-plugin-mode overlay \ --subscription $(SUB) \ @@ -389,13 +425,14 @@ ifeq ($(OS),windows) $(MAKE) windows-nodepool-up endif -cilium-dualstack-up: rg-up overlay-net-up ## Brings up a Cilium Dualstack Overlay cluster with Linux node only +cilium-dualstack-up: rg-up ipv4 ipv6 overlay-net-up ## Brings up a Cilium Dualstack Overlay cluster with Linux node only $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ --auto-upgrade-channel $(AUTOUPGRADE) \ --node-os-upgrade-channel $(NODEUPGRADE) \ --kubernetes-version $(K8S_VER) \ --node-count $(NODE_COUNT) \ --node-vm-size $(VM_SIZE) \ + --load-balancer-outbound-ips $(PUBLIC_IPv4),$(PUBLIC_IPv6) \ --network-plugin azure \ --network-plugin-mode overlay \ --network-dataplane cilium \ @@ -406,13 +443,14 @@ cilium-dualstack-up: rg-up overlay-net-up ## Brings up a Cilium Dualstack Overla --yes @$(MAKE) set-kubeconf -dualstack-byocni-nokubeproxy-up: rg-up overlay-net-up ## Brings up a Dualstack overlay BYOCNI cluster with Linux node only and no kube-proxy +dualstack-byocni-nokubeproxy-up: rg-up ipv4 ipv6 overlay-net-up ## Brings up a Dualstack overlay BYOCNI cluster with Linux node only and no kube-proxy $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ --auto-upgrade-channel $(AUTOUPGRADE) \ --node-os-upgrade-channel $(NODEUPGRADE) \ --kubernetes-version $(K8S_VER) \ --node-count $(NODE_COUNT) \ --node-vm-size $(VM_SIZE) \ + --load-balancer-outbound-ips $(PUBLIC_IPv4),$(PUBLIC_IPv6) \ --network-plugin none \ --network-plugin-mode overlay \ --subscription $(SUB) \