Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 27 additions & 0 deletions .pipelines/multitenancy/scripts/akse2e.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
#!/bin/bash

set -ex

az storage file download --account-name acnshared -s aksdev -p ./azureconfig-$SCENARIO.yaml --auth-mode login --enable-file-backup-request-intent
az storage file download --account-name acnshared -s aksdev -p ./aksdev --auth-mode login --enable-file-backup-request-intent

chmod +x ./aksdev
az aks get-credentials -g $RG --name $1 --file ~/.kube/config

cat azureconfig-$SCENARIO.yaml

# run e2e with vars
./aksdev e2e run -n "CNI Swift v2" --azureconfig azureconfig-$SCENARIO.yaml \
--var resource_group=$RG \
--var aks_cluster=$PODSUBNET_CLUSTER_NAME \
--var vnet_name=$VNET \
--var vnet_nodesubnet_name=$NODE_SUBNET_NAME \
--var vnet_podsubnet_name=$POD_SUBNET_NAME \
--var subnet_token=$SUBNET_TOKEN \
--var storage_account_name=$STORAGE_ACC \
--var nat_gateway_name=$NAT_GW_NAME \
--var public_ip_name=$PODSUBNET_CLUSTER_NAME-ip \
--var aks_multitenant_cluster=$1 \
--var service_ip=$SERVICE_IP \
--var client_id=$USER_ASSIGNED_CLIENT_ID \
--var keep_env=$KEEP_ENV
60 changes: 60 additions & 0 deletions .pipelines/multitenancy/scripts/az-login.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
#!/bin/bash

source ./.pipelines/multitenancy/scripts/utils.sh

function main() {
# parse the arguments
while [[ $# -gt 0 ]]; do
key="$1"
case $key in
--service-principal)
shift
service_principal="$1"
;;
--id-token)
shift
idtoken="$1"
;;
--tenant)
shift
tenant="$1"
;;
*)
echo 1>&2 "unknown argument: $1"
return 1
;;
esac
shift
done
export AZURE_CLIENT_ID="$service_principal"
export AZURE_TENANT_ID="$tenant"
if [[ -z "$tokenid" ]]; then
echo >&2 "Password Auth Disabled. Please convert to workload identity."
else
workload_login "$service_principal" "$tenantid" "$idtoken"
fi
}

# gets the service principal infomation for an app id
function get_sp_info() {
local sp_appid
sp_appid="${1}"
utils::log az ad show --id "$sp_appid" --query id -otsv
sp_oid="$cmd_out"
utils::log az ad show --id "$sp_appid" --query name -otsv
sp_name="$cmd_out"
}

function workload_login() {
#export AZURE_AUTHORITY_HOST="$2"
utils::setsecret AZURE_RESOURCE_BOOTSTRAP_CLIENT_ID "$1"
utils::setvar AZURE_RESOURCE_BOOTSTRAP_CLIENT_TENANT_ID "$2"

get_sp_info "$1"
utils::setvar AZURE_RESOURCE_BOOTSTRAP_CLIENT_NAME "$sp_name"

echo "$3" > wfi-token-file
local wfi_filepath
wfi_filepath=$(realpath wfi-token-file)
export AZURE_FEDERATED_TOKEN_FILE="$wfi_filepath"
}
81 changes: 81 additions & 0 deletions .pipelines/multitenancy/scripts/run-tests.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,81 @@
#!/bin/bash

set -ex
source ./.pipelines/multitenancy/scripts/utils.sh

function main() {
# parse the arguments
while [[ $# -gt 0 ]]; do
key="$1"
case "$key" in
--test-dir)
shift
test_dir="$1"
;;
--mt-test-cluster)
shift
mt_test_cluster="$1"
;;
--scenario)
shift
scenario="$1"
;;
*)
echo 1>&2 "unknown argument: $1"
return 1
;;
esac
shift
done
run_tests "$test_dir" "$mt_test_cluster" "$scenario"
}

function run_tests() {
local test_dir
test_dir="${1}"
local mt_test_cluster
mt_test_cluster="${2}"
local scenario
scenario="${3}"

STEP="runAKSe2e"
cd $test_dir

#get configvars
export_envVars $scenario

if [[ "$ENABLED" == "false" ]]; then
echo "scenario: $scenario skipped"
return 0
fi

# Get the OIDC Issuer URL
export AKS_OIDC_ISSUER="$(az aks show -n "$mt_test_cluster" -g "$RG" --query "oidcIssuerProfile.issuerUrl" -otsv)"

# Federate the identity
az identity federated-credential create \
--name "$FEDERATED_IDENTITY_CREDENTIAL_PREFIX-$mt_test_cluster" \
--identity-name "$USER_ASSIGNED_IDENTITY_NAME" \
--resource-group "$RG" \
--issuer "$AKS_OIDC_ISSUER" \
--subject system:serviceaccount:mtpod-to-service-endpoint:workload-identity-sa

# Get identity client ID
export USER_ASSIGNED_CLIENT_ID=$(az identity show --resource-group "$RG" --name "$USER_ASSIGNED_IDENTITY_NAME" --query 'clientId' -o tsv)

# Run aks e2e test suite
chmod +x ./akse2e.sh
./akse2e.sh $mt_test_cluster && passed="true" || passed="false"

#Clean up user-assigned identity
az identity federated-credential delete --name "$FEDERATED_IDENTITY_CREDENTIAL_PREFIX-$mt_test_cluster" --identity-name "$USER_ASSIGNED_IDENTITY_NAME" --resource-group "$RG" --yes

if [[ "$passed" == "true" ]]; then
echo "Tests passed"
else
echo "Tests failed"
return 1
fi
}

main $@
58 changes: 58 additions & 0 deletions .pipelines/multitenancy/scripts/utils.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
#!/bin/bash

export_envVars() {
az storage file download --account-name acnshared -s aksdev -p ./swiftv2runnerconfigvars-$1.env --auth-mode login --enable-file-backup-request-intent
export $(xargs < ./swiftv2runnerconfigvars-$1.env)
}

utils::gen_pass() {
local pass_len
pass_len="${1:-48}"

if [ -o xtrace ]; then
set +x
trap 'set -x' RETURN ERR
fi
base64_pass=$(openssl rand -base64 "${pass_len}")
return 0
}

utils::setvar() {
local var_name
var_name="${1}"
local value
value="${@:2}"

local hide="#"
local taskns="vso"
echo >&2 "${hide}${hide}${taskns}[task.setvariable name=${var_name};isoutput=true;]$value"
eval "export "$var_name"="$value""
}

utils::setsecret() {
local var_name
var_name="${1}"
local value
value="${@:2}"

local hide="#"
local taskns="vso"
echo >&2 "${hide}${hide}${taskns}[task.setvariable name=${var_name};isoutput=true;issecret=true;]$value"
eval "export "$var_name"="$value""
}

utils::log() {
local cmd
cmd=("${@}")
echo "${@}"
local outreader
outreader=$(touch out.log && echo "out.log")
local errreader
errreader=$(touch err.log && echo "err.log")

"${cmd[@]}" > >(tee ${outreader}) 2> >(tee ${errreader} >&2)
cmd_code="${PIPESTATUS[0]}"
cmd_out=$(cat $outreader)
cmd_err=$(cat $errreader)
return $cmd_code
}
8 changes: 7 additions & 1 deletion .pipelines/multitenancy/swiftv2-e2e-job-template.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,9 @@ parameters:
clusterType: ""
clusterName: ""
vmSize: ""
arch: ""
k8sVersion: ""
scenario: ""
dependsOn: ""
nodePoolName: ""
continueOnError: true
Expand All @@ -30,7 +32,7 @@ stages:
k8sVersion: ${{ parameters.k8sVersion }}
dependsOn: ${{ parameters.dependsOn }}
continueOnError: ${{ parameters.continueOnError }}
region: $(REGION_SWIFTV2_CLUSTER_TEST) # Swiftv2 has a specific region requirements
scenario: ${{ parameters.scenario }}

- stage: ${{ parameters.name }}
condition: and( succeeded(), not(eq(dependencies.mtacluster.result,'SucceededWithIssues')) ) # Cant use parameters in dependencies
Expand Down Expand Up @@ -60,3 +62,7 @@ stages:
name: ${{ parameters.name }}
clusterName: ${{ parameters.clusterName }}-$(commitID)
os: linux
arch: ${{ parameters.arch }}
region: $(REGION_SWIFTV2_CLUSTER_TEST) # Swiftv2 has a specific region requirements
scenario: ${{ parameters.scenario }}

102 changes: 56 additions & 46 deletions .pipelines/multitenancy/swiftv2-e2e-step-template.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,10 @@ parameters:
name: ""
clusterName: ""
continueOnError: true
arch: ""
os: ""
scenario: ""
region: ""

steps:
- bash: |
Expand All @@ -26,51 +30,57 @@ steps:
scriptType: "bash"
addSpnToEnvironment: true
inlineScript: |
set -e
make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${{ parameters.clusterName }}
ls -lah
pwd
kubectl cluster-info
kubectl get po -owide -A
echo "Apply the pod network yaml to start the delegation"
less test/integration/manifests/swiftv2/podnetwork.yaml
envsubst '${SUBNET_TOKEN},${SUBNET_RESOURCE_ID},${VNET_GUID}' < test/integration/manifests/swiftv2/podnetwork.yaml | kubectl apply -f -
echo "Check the podnetwork yaml file"
less test/integration/manifests/swiftv2/podnetwork.yaml
kubectl get pn
kubectl describe pn
echo "Apply the pod network instance yaml to reserve IP"
kubectl apply -f test/integration/manifests/swiftv2/pni.yaml
kubectl get pni
kubectl describe pni
export NODE_NAME_0="$(kubectl get nodes -o json | jq -r .items[0].metadata.name)"
echo $NODE_NAME_0
echo "Start the first pod using the reserved IP"
envsubst '$NODE_NAME_0' < test/integration/manifests/swiftv2/mtpod0.yaml | kubectl apply -f -
export NODE_NAME_1="$(kubectl get nodes -o json | jq -r .items[1].metadata.name)"
echo $NODE_NAME_1
echo "Start another pod using the reserved IP"
envsubst '$NODE_NAME_1' < test/integration/manifests/swiftv2/mtpod1.yaml | kubectl apply -f -
sleep 2m
kubectl get pod -o wide -A
sleep 2m
echo "Check pods after 4 minutes"
kubectl get po -owide -A
kubectl describe pni
name: "start_swiftv2_pods"
displayName: "Start Swiftv2 Pods"
continueOnError: ${{ parameters.continueOnError }}
env:
SUBNET_TOKEN: $(SUBNET_TOKEN)
set -ex
source ./.pipelines/multitenancy/scripts/utils.sh
export_envVars ${{ parameters.scenario }}
make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${{ parameters.clusterName }} GROUP=$RG
name: "kubeconfig"
displayName: "Set Kubeconfig"

- script: |
set -e
kubectl get po -owide -A
cd test/integration/swiftv2
echo "TestSwiftv2PodToPod and will run it after migration from scripts."
go test -count=1 swiftv2_test.go -timeout 3m -tags swiftv2 -run ^TestSwiftv2PodToPod$ -tags=swiftv2,integration -v
retryCountOnTaskFailure: 3
name: "Swiftv2_Tests_future_version"
displayName: "Swiftv2 Tests through code"
continueOnError: ${{ parameters.continueOnError }}
- task: AzureCLI@2
inputs:
azureSubscription: $(ACN_TEST_SERVICE_CONNECTION)
scriptLocation: "inlineScript"
scriptType: "bash"
addSpnToEnvironment: true
inlineScript: |
set -ex
source ./.pipelines/multitenancy/scripts/utils.sh
export_envVars ${{ parameters.scenario }}
echo "Deploying on Linux nodes"
export CNI_IMAGE=$(make cni-image-name-and-tag-multiarch CNI_VERSION=$(make cni-version))
make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${{ parameters.clusterName }} GROUP=$RG
echo "Keep CNS version up to date, grabbing pipeline parameter"
CNS_IMAGE=$(make cns-image-name-and-tag-multiarch)
sed -i '/containers:/{n;n;s/\(image\).*/\1: '"${CNS_IMAGE//\//\\/}"'/}' ./test/integration/manifests/cns/daemonset-linux.yaml
sed -i '/initContainers:/{n;n;s/\(image\).*/\1: '"${CNI_IMAGE//\//\\/}"'/}' ./test/integration/manifests/cns/daemonset-linux.yaml
sed -i '/- name: cni-installer/,/volumeMounts:/ {
/command: \["\/dropgz"\]/!b; a \ \ \ \ \ \ \ \ \ \ args:\n\ \ \ \ \ \ \ \ \ \ \ \ - deploy\n\ \ \ \ \ \ \ \ \ \ \ \ - --skip-verify\n\ \ \ \ \ \ \ \ \ \ \ \ - azure-vnet\n\ \ \ \ \ \ \ \ \ \ \ \ - -o\n\ \ \ \ \ \ \ \ \ \ \ \ - /opt/cni/bin/azure-vnet\n\ \ \ \ \ \ \ \ \ \ \ \ - azure-vnet-telemetry\n\ \ \ \ \ \ \ \ \ \ \ \ - -o\n\ \ \ \ \ \ \ \ \ \ \ \ - /opt/cni/bin/azure-vnet-telemetry
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I thought dropgz is deprecated already from the repo?

Copy link
Author

@jc2543 jc2543 Oct 3, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Oh, this is just referring to the command string in the daemonset-linux file for matching purposes (this command is just making image tag replacements for in the azure-cns daemonset yaml thats already being used). The image we are using is azure-cni. We are able to swap both CNI/CNS images by deploying this daemonset.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

can we do "kubectl apply daemonset" instead?

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

we apply the daemonset in the next couple of lines, but first we have to update the yaml first with the image and args we want so that the cni image gets deployed.

}' ./test/integration/manifests/cns/daemonset-linux.yaml
kubectl apply -f ./test/integration/manifests/cns/daemonset-linux.yaml
kubectl rollout status daemonset/azure-cns -n kube-system
kubectl get pod -A
name: "UpdateCNIandCNSVersion"
displayName: "Update CNI and CNS Version"

- task: AzureCLI@2
name: runSwiftv2Tests
displayName: "Run Tests"
inputs:
azureSubscription: $(ACN_TEST_SERVICE_CONNECTION)
addSpnToEnvironment: true
scriptType: "bash"
scriptLocation: "inlineScript"
inlineScript: |
set -xe

bash .pipelines/multitenancy/scripts/az-login.sh \
--service-principal "$servicePrincipalId" \
--id-token "$idToken" \
--tenant "$tenantId"

bash .pipelines/multitenancy/scripts/run-tests.sh \
--test-dir "$(System.DefaultWorkingDirectory)/.pipelines/multitenancy/scripts" \
--mt-test-cluster ${{ parameters.clusterName }} \
--scenario ${{ parameters.scenario }}

Loading
Loading