Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 9 additions & 8 deletions pkg/cloud/azure/actuators/machine/actuator_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -89,13 +89,13 @@ func newMachine(t *testing.T, machineConfig machinev1.AzureMachineProviderSpec,
}
}

func newFakeScope(t *testing.T, label string) *actuators.MachineScope {
func newFakeMachineScope(t *testing.T, label string, cloudEnv string) *actuators.MachineScope {
labels := make(map[string]string)
labels[actuators.MachineRoleLabel] = label
labels[machinev1.MachineClusterIDLabel] = "clusterID"
machineConfig := machinev1.AzureMachineProviderSpec{}
m := newMachine(t, machineConfig, labels)
return &actuators.MachineScope{
return actuators.NewFakeMachineScope(actuators.FakeMachineScopeParams{
Context: context.Background(),
Machine: m,
MachineConfig: &machinev1.AzureMachineProviderSpec{
Expand All @@ -106,7 +106,8 @@ func newFakeScope(t *testing.T, label string) *actuators.MachineScope {
ManagedIdentity: "dummyIdentity",
},
MachineStatus: &machinev1.AzureMachineProviderStatus{},
}
CloudEnv: cloudEnv,
})
}

func newFakeReconciler(t *testing.T) *Reconciler {
Expand Down Expand Up @@ -137,7 +138,7 @@ func newFakeReconciler(t *testing.T) *Reconciler {
networkInterfacesSvc.EXPECT().Delete(gomock.Any(), gomock.Any()).Return(nil).AnyTimes()

return &Reconciler{
scope: newFakeScope(t, actuators.ControlPlane),
scope: newFakeMachineScope(t, actuators.ControlPlane, string(configv1.AzurePublicCloud)),
networkInterfacesSvc: networkInterfacesSvc,
virtualMachinesSvc: fakeVMSuccessSvc,
virtualMachinesExtSvc: fakeSuccessSvc,
Expand Down Expand Up @@ -447,7 +448,7 @@ func (s *FakeAvailabilityZonesService) Delete(ctx context.Context, spec azure.Sp
}

func TestAvailabilityZones(t *testing.T) {
fakeScope := newFakeScope(t, actuators.ControlPlane)
fakeScope := newFakeMachineScope(t, actuators.ControlPlane, string(configv1.AzurePublicCloud))
fakeReconciler := newFakeReconcilerWithScope(t, fakeScope)

fakeReconciler.scope.MachineConfig.Zone = "2"
Expand Down Expand Up @@ -491,7 +492,7 @@ func TestGetZone(t *testing.T) {
}

for _, tc := range testCases {
fakeScope := newFakeScope(t, actuators.ControlPlane)
fakeScope := newFakeMachineScope(t, actuators.ControlPlane, string(configv1.AzurePublicCloud))
fakeReconciler := newFakeReconcilerWithScope(t, fakeScope)
fakeReconciler.scope.MachineConfig.Zone = tc.inputZone

Expand All @@ -512,7 +513,7 @@ func TestGetZone(t *testing.T) {
}

func TestCustomUserData(t *testing.T) {
fakeScope := newFakeScope(t, actuators.Node)
fakeScope := newFakeMachineScope(t, actuators.Node, string(configv1.AzurePublicCloud))
userDataSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "testCustomUserData",
Expand Down Expand Up @@ -541,7 +542,7 @@ func TestCustomUserData(t *testing.T) {
}

func TestCustomDataFailures(t *testing.T) {
fakeScope := newFakeScope(t, actuators.Node)
fakeScope := newFakeMachineScope(t, actuators.Node, string(configv1.AzurePublicCloud))
userDataSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "testCustomUserData",
Expand Down
23 changes: 23 additions & 0 deletions pkg/cloud/azure/actuators/machine/reconciler.go
Original file line number Diff line number Diff line change
Expand Up @@ -529,6 +529,29 @@ func (s *Reconciler) Delete(ctx context.Context) error {
return fmt.Errorf("failed to delete OS disk: %w", err)
}

// On Azure Stack Hub, DeleteOption is not supported on the VM API.
// We need to manually delete data disks that have deletionPolicy set to Delete.
if s.scope.IsStackHub() && len(s.scope.MachineConfig.DataDisks) > 0 {
for _, disk := range s.scope.MachineConfig.DataDisks {
if disk.DeletionPolicy == machinev1.DiskDeletionPolicyTypeDelete {
dataDiskName := azure.GenerateDataDiskName(s.scope.Machine.Name, disk.NameSuffix)

dataDiskSpec := &disks.Spec{
Name: dataDiskName,
}
if err := s.disksSvc.Delete(ctx, dataDiskSpec); err != nil {
metrics.RegisterFailedInstanceDelete(&metrics.MachineLabels{
Name: s.scope.Machine.Name,
Namespace: s.scope.Machine.Namespace,
Reason: "failed to delete data disk",
})

return fmt.Errorf("failed to delete data disk %s: %w", dataDiskName, err)
}
}
}
}

if s.scope.MachineConfig.Vnet == "" {
return fmt.Errorf("MachineConfig vnet is missing on machine %s", s.scope.Machine.Name)
}
Expand Down
204 changes: 189 additions & 15 deletions pkg/cloud/azure/actuators/machine/reconciler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,11 +11,13 @@ import (
"github.com/Azure/go-autorest/autorest"
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
configv1 "github.com/openshift/api/config/v1"
machinev1 "github.com/openshift/api/machine/v1beta1"
machinecontroller "github.com/openshift/machine-api-operator/pkg/controller/machine"
"github.com/openshift/machine-api-provider-azure/pkg/cloud/azure"
"github.com/openshift/machine-api-provider-azure/pkg/cloud/azure/actuators"
mock_azure "github.com/openshift/machine-api-provider-azure/pkg/cloud/azure/mock"
"github.com/openshift/machine-api-provider-azure/pkg/cloud/azure/services/disks"
"github.com/openshift/machine-api-provider-azure/pkg/cloud/azure/services/networkinterfaces"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
Expand Down Expand Up @@ -205,9 +207,11 @@ func TestSetMachineCloudProviderSpecificsTable(t *testing.T) {
expectedSpecLabels map[string]string
}{
{
name: "with a blank vm",
scope: func(t *testing.T) *actuators.MachineScope { return newFakeScope(t, "worker") },
vm: armcompute.VirtualMachine{},
name: "with a blank vm",
scope: func(t *testing.T) *actuators.MachineScope {
return newFakeMachineScope(t, "worker", string(configv1.AzurePublicCloud))
},
vm: armcompute.VirtualMachine{},
expectedLabels: map[string]string{
actuators.MachineRoleLabel: "worker",
machinev1.MachineClusterIDLabel: "clusterID",
Expand All @@ -218,8 +222,10 @@ func TestSetMachineCloudProviderSpecificsTable(t *testing.T) {
expectedSpecLabels: nil,
},
{
name: "with a running vm",
scope: func(t *testing.T) *actuators.MachineScope { return newFakeScope(t, "good-worker") },
name: "with a running vm",
scope: func(t *testing.T) *actuators.MachineScope {
return newFakeMachineScope(t, "good-worker", string(configv1.AzurePublicCloud))
},
vm: armcompute.VirtualMachine{
Properties: &armcompute.VirtualMachineProperties{
ProvisioningState: ptr.To[string]("Running"),
Expand All @@ -235,8 +241,10 @@ func TestSetMachineCloudProviderSpecificsTable(t *testing.T) {
expectedSpecLabels: nil,
},
{
name: "with a VMSize set vm",
scope: func(t *testing.T) *actuators.MachineScope { return newFakeScope(t, "sized-worker") },
name: "with a VMSize set vm",
scope: func(t *testing.T) *actuators.MachineScope {
return newFakeMachineScope(t, "sized-worker", string(configv1.AzurePublicCloud))
},
vm: armcompute.VirtualMachine{
Properties: &armcompute.VirtualMachineProperties{
HardwareProfile: &armcompute.HardwareProfile{
Expand All @@ -255,8 +263,10 @@ func TestSetMachineCloudProviderSpecificsTable(t *testing.T) {
expectedSpecLabels: nil,
},
{
name: "with a vm location",
scope: func(t *testing.T) *actuators.MachineScope { return newFakeScope(t, "located-worker") },
name: "with a vm location",
scope: func(t *testing.T) *actuators.MachineScope {
return newFakeMachineScope(t, "located-worker", string(configv1.AzurePublicCloud))
},
vm: armcompute.VirtualMachine{
Location: ptr.To[string]("nowhere"),
},
Expand All @@ -271,8 +281,10 @@ func TestSetMachineCloudProviderSpecificsTable(t *testing.T) {
expectedSpecLabels: nil,
},
{
name: "with a vm with zones",
scope: func(t *testing.T) *actuators.MachineScope { return newFakeScope(t, "zoned-worker") },
name: "with a vm with zones",
scope: func(t *testing.T) *actuators.MachineScope {
return newFakeMachineScope(t, "zoned-worker", string(configv1.AzurePublicCloud))
},
vm: armcompute.VirtualMachine{
Zones: abcZones,
},
Expand All @@ -287,8 +299,10 @@ func TestSetMachineCloudProviderSpecificsTable(t *testing.T) {
expectedSpecLabels: nil,
},
{
name: "with a vm with a faultdomain set",
scope: func(t *testing.T) *actuators.MachineScope { return newFakeScope(t, "availabilityset-worker") },
name: "with a vm with a faultdomain set",
scope: func(t *testing.T) *actuators.MachineScope {
return newFakeMachineScope(t, "availabilityset-worker", string(configv1.AzurePublicCloud))
},
vm: armcompute.VirtualMachine{
Properties: &armcompute.VirtualMachineProperties{
InstanceView: &armcompute.VirtualMachineInstanceView{
Expand All @@ -309,7 +323,7 @@ func TestSetMachineCloudProviderSpecificsTable(t *testing.T) {
{
name: "with a vm on spot",
scope: func(t *testing.T) *actuators.MachineScope {
scope := newFakeScope(t, "spot-worker")
scope := newFakeMachineScope(t, "spot-worker", string(configv1.AzurePublicCloud))
scope.MachineConfig.SpotVMOptions = &machinev1.SpotVMOptions{}
return scope
},
Expand Down Expand Up @@ -708,7 +722,9 @@ func TestMachineUpdateWithProvisionsngFailedNic(t *testing.T) {

testCtx := context.TODO()

scope := func(t *testing.T) *actuators.MachineScope { return newFakeScope(t, "worker") }
scope := func(t *testing.T) *actuators.MachineScope {
return newFakeMachineScope(t, "worker", string(configv1.AzurePublicCloud))
}
r := newFakeReconcilerWithScope(t, scope(t))
r.networkInterfacesSvc = networkSvc

Expand Down Expand Up @@ -744,3 +760,161 @@ func TestMachineUpdateWithProvisionsngFailedNic(t *testing.T) {
})
}
}

// TestStackHubDataDiskDeletion tests that data disks with DeletionPolicy=Delete are manually deleted on Azure Stack Hub
func TestStackHubDataDiskDeletion(t *testing.T) {
testCases := []struct {
name string
isStackHub bool
dataDisks []machinev1.DataDisk
}{
{
name: "Stack Hub with single data disk with Delete policy",
isStackHub: true,
dataDisks: []machinev1.DataDisk{
{
NameSuffix: "disk1",
DiskSizeGB: 128,
Lun: 0,
DeletionPolicy: machinev1.DiskDeletionPolicyTypeDelete,
ManagedDisk: machinev1.DataDiskManagedDiskParameters{StorageAccountType: machinev1.StorageAccountPremiumLRS},
},
},
},
{
name: "Stack Hub with multiple data disks with Delete policy",
isStackHub: true,
dataDisks: []machinev1.DataDisk{
{
NameSuffix: "disk1",
DiskSizeGB: 128,
Lun: 0,
DeletionPolicy: machinev1.DiskDeletionPolicyTypeDelete,
ManagedDisk: machinev1.DataDiskManagedDiskParameters{StorageAccountType: machinev1.StorageAccountPremiumLRS},
},
{
NameSuffix: "disk2",
DiskSizeGB: 256,
Lun: 1,
DeletionPolicy: machinev1.DiskDeletionPolicyTypeDelete,
ManagedDisk: machinev1.DataDiskManagedDiskParameters{StorageAccountType: machinev1.StorageAccountPremiumLRS},
},
},
},
{
name: "Stack Hub with data disk with Detach policy",
isStackHub: true,
dataDisks: []machinev1.DataDisk{
{
NameSuffix: "disk1",
DiskSizeGB: 128,
Lun: 0,
DeletionPolicy: machinev1.DiskDeletionPolicyTypeDetach,
ManagedDisk: machinev1.DataDiskManagedDiskParameters{StorageAccountType: machinev1.StorageAccountPremiumLRS},
},
},
},
{
name: "Stack Hub with mixed deletion policies",
isStackHub: true,
dataDisks: []machinev1.DataDisk{
{
NameSuffix: "disk1",
DiskSizeGB: 128,
Lun: 0,
DeletionPolicy: machinev1.DiskDeletionPolicyTypeDelete,
ManagedDisk: machinev1.DataDiskManagedDiskParameters{StorageAccountType: machinev1.StorageAccountPremiumLRS},
},
{
NameSuffix: "disk2",
DiskSizeGB: 256,
Lun: 1,
DeletionPolicy: machinev1.DiskDeletionPolicyTypeDetach,
ManagedDisk: machinev1.DataDiskManagedDiskParameters{StorageAccountType: machinev1.StorageAccountPremiumLRS},
},
},
},
{
name: "Non-Stack Hub with data disks",
isStackHub: false,
dataDisks: []machinev1.DataDisk{
{
NameSuffix: "disk1",
DiskSizeGB: 128,
Lun: 0,
DeletionPolicy: machinev1.DiskDeletionPolicyTypeDelete,
ManagedDisk: machinev1.DataDiskManagedDiskParameters{StorageAccountType: machinev1.StorageAccountPremiumLRS},
},
},
},
}

for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
g := NewWithT(t)
mockCtrl := gomock.NewController(t)

vmSvc := mock_azure.NewMockService(mockCtrl)
disksSvc := mock_azure.NewMockService(mockCtrl)
networkSvc := mock_azure.NewMockService(mockCtrl)
availabilitySetsSvc := mock_azure.NewMockService(mockCtrl)

testCtx := context.TODO()

platformType := string(configv1.AzureStackCloud)
if !tc.isStackHub {
platformType = string(configv1.AzurePublicCloud)
}
scope := newFakeMachineScope(t, "worker", platformType)
scope.MachineConfig.DataDisks = tc.dataDisks

r := &Reconciler{
scope: scope,
virtualMachinesSvc: vmSvc,
disksSvc: disksSvc,
networkInterfacesSvc: networkSvc,
availabilitySetsSvc: availabilitySetsSvc,
}

// Expect VM deletion
vmSvc.EXPECT().Delete(testCtx, gomock.Any()).Return(nil).Times(1)

// Expect OS disk deletion - always happens
expectedOSDiskName := azure.GenerateOSDiskName(scope.Machine.Name)
disksSvc.EXPECT().Delete(testCtx, gomock.Any()).DoAndReturn(
func(ctx context.Context, spec azure.Spec) error {
diskSpec, ok := spec.(*disks.Spec)
g.Expect(ok).To(BeTrue(), "spec should be a disk spec")
g.Expect(diskSpec.Name).To(Equal(expectedOSDiskName), "OS disk name should match")
return nil
},
).Times(1)

// Expect data disk deletions only for disks with Delete policy on Stack Hub
if tc.isStackHub {
for _, disk := range tc.dataDisks {
if disk.DeletionPolicy == machinev1.DiskDeletionPolicyTypeDelete {
expectedDataDiskName := azure.GenerateDataDiskName(scope.Machine.Name, disk.NameSuffix)
disksSvc.EXPECT().Delete(testCtx, gomock.Any()).DoAndReturn(
func(ctx context.Context, spec azure.Spec) error {
diskSpec, ok := spec.(*disks.Spec)
g.Expect(ok).To(BeTrue(), "spec should be a disk spec")
g.Expect(diskSpec.Name).To(Equal(expectedDataDiskName), "data disk name should match")
return nil
},
).Times(1)
}
}
}

// Expect network interface deletion
networkSvc.EXPECT().Delete(testCtx, gomock.Any()).Return(nil).Times(1)

// Expect availability set deletion
availabilitySetsSvc.EXPECT().Delete(testCtx, gomock.Any()).Return(nil).Times(1)

err := r.Delete(testCtx)
g.Expect(err).To(BeNil())
})
}
}
Loading