Skip to content

Commit a43272b

Browse files
committed
[Compute] migrate vm disk attach/detach to aaz
1 parent 2a6319e commit a43272b

File tree

2 files changed

+205
-127
lines changed

2 files changed

+205
-127
lines changed

src/azure-cli/azure/cli/command_modules/vm/_vm_utils.py

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -757,3 +757,24 @@ def _open(filename, mode):
757757
f.write(public_bytes)
758758

759759
return public_bytes.decode()
760+
761+
762+
def safe_get(d: dict, path: str, default=None):
763+
"""
764+
Safely fetch nested keys from a dict.
765+
Path format supports lists by index, e.g. 'storageProfile.dataDisks.0.managedDisk.id'.
766+
Returns `default` when any segment is missing or type doesn't match.
767+
"""
768+
cur = d
769+
for key in path.split('.'):
770+
if isinstance(cur, list):
771+
try:
772+
idx = int(key)
773+
cur = cur[idx]
774+
except Exception:
775+
return default
776+
elif isinstance(cur, dict):
777+
cur = cur.get(key, default)
778+
else:
779+
return default
780+
return cur

src/azure-cli/azure/cli/command_modules/vm/custom.py

Lines changed: 184 additions & 127 deletions
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@
3838
from azure.cli.core.profiles import ResourceType
3939
from azure.cli.core.util import sdk_no_wait
4040

41-
from ._vm_utils import read_content_if_is_file, import_aaz_by_profile
41+
from ._vm_utils import read_content_if_is_file, import_aaz_by_profile, safe_get
4242
from ._vm_diagnostics_templates import get_default_diag_config
4343

4444
from ._actions import (load_images_from_aliases_doc, load_extension_images_thru_services,
@@ -2233,121 +2233,150 @@ def show_default_diagnostics_configuration(is_windows_os=False):
22332233

22342234

22352235
# region VirtualMachines Disks (Managed)
2236-
def attach_managed_data_disk(cmd, resource_group_name, vm_name, disk=None, ids=None, disks=None, new=False, sku=None,
2237-
size_gb=None, lun=None, caching=None, enable_write_accelerator=False, disk_ids=None,
2238-
source_snapshots_or_disks=None, source_disk_restore_point=None,
2239-
new_names_of_source_snapshots_or_disks=None, new_names_of_source_disk_restore_point=None):
2240-
# attach multiple managed disks using disk attach API
2241-
vm = get_vm_to_update(cmd, resource_group_name, vm_name)
2236+
def attach_managed_data_disk(cmd, resource_group_name, vm_name,
2237+
disk=None, ids=None, disks=None, new=False, sku=None,
2238+
size_gb=None, lun=None, caching=None,
2239+
enable_write_accelerator=False, disk_ids=None,
2240+
source_snapshots_or_disks=None,
2241+
source_disk_restore_point=None,
2242+
new_names_of_source_snapshots_or_disks=None,
2243+
new_names_of_source_disk_restore_point=None,
2244+
no_wait=False):
2245+
if caching is None:
2246+
caching = 'None'
2247+
2248+
# attach existing managed disks
22422249
if not new and not sku and not size_gb and disk_ids is not None:
2243-
if lun:
2244-
disk_lun = lun
2245-
else:
2246-
disk_lun = _get_disk_lun(vm.storage_profile.data_disks)
2247-
2248-
data_disks = []
2249-
for disk_item in disk_ids:
2250-
disk = {
2251-
'diskId': disk_item,
2252-
'caching': caching,
2250+
from .operations.vm import VMShow
2251+
vm = VMShow(cli_ctx=cmd.cli_ctx)(command_args={
2252+
'resource_group': resource_group_name,
2253+
'vm_name': vm_name
2254+
})
2255+
vm_dict = vm if isinstance(vm, dict) else getattr(vm, 'result', vm)
2256+
data_disks = vm_dict.get('storageProfile', {}).get('dataDisks', []) or []
2257+
used_luns = {d.get('lun') for d in data_disks if isinstance(d, dict) and d.get('lun') is not None}
2258+
def _next_lun(start=0):
2259+
i = start
2260+
while i in used_luns:
2261+
i += 1
2262+
used_luns.add(i)
2263+
return i
2264+
attach_payload = []
2265+
current_lun = lun
2266+
for disk_id in disk_ids:
2267+
if current_lun is not None:
2268+
disk_lun = current_lun
2269+
current_lun += 1
2270+
else:
2271+
disk_lun = _next_lun()
2272+
payload = {
2273+
'diskId': disk_id,
22532274
'lun': disk_lun,
2275+
'caching': caching,
22542276
'writeAcceleratorEnabled': enable_write_accelerator
22552277
}
2256-
data_disks.append(disk)
2257-
disk_lun += 1
2258-
result = AttachDetachDataDisk(cli_ctx=cmd.cli_ctx)(command_args={
2278+
attach_payload.append(payload)
2279+
return AttachDetachDataDisk(cli_ctx=cmd.cli_ctx)(command_args={
22592280
'vm_name': vm_name,
22602281
'resource_group': resource_group_name,
2261-
'data_disks_to_attach': data_disks
2282+
'data_disks_to_attach': attach_payload,
2283+
'no_wait': no_wait
22622284
})
2263-
return result
2264-
else:
2265-
# attach multiple managed disks using vm PUT API
2266-
from azure.mgmt.core.tools import parse_resource_id
2267-
DataDisk, ManagedDiskParameters, DiskCreateOption = cmd.get_models(
2268-
'DataDisk', 'ManagedDiskParameters', 'DiskCreateOptionTypes')
2269-
if size_gb is None:
2270-
default_size_gb = 1023
2271-
2272-
if disk_ids is not None:
2273-
disks = disk_ids
22742285

2275-
for disk_item in disks:
2276-
if lun:
2277-
disk_lun = lun
2278-
else:
2279-
disk_lun = _get_disk_lun(vm.storage_profile.data_disks)
2280-
2281-
if new:
2282-
data_disk = DataDisk(lun=disk_lun, create_option=DiskCreateOption.empty,
2283-
name=parse_resource_id(disk_item)['name'],
2284-
disk_size_gb=size_gb if size_gb else default_size_gb, caching=caching,
2285-
managed_disk=ManagedDiskParameters(storage_account_type=sku))
2286-
else:
2287-
params = ManagedDiskParameters(id=disk_item, storage_account_type=sku)
2288-
data_disk = DataDisk(lun=disk_lun, create_option=DiskCreateOption.attach, managed_disk=params,
2289-
caching=caching)
2290-
2291-
if enable_write_accelerator:
2292-
data_disk.write_accelerator_enabled = enable_write_accelerator
2293-
2294-
vm.storage_profile.data_disks.append(data_disk)
2295-
disk_lun = _get_disk_lun(vm.storage_profile.data_disks)
2296-
if source_snapshots_or_disks is not None:
2297-
if new_names_of_source_snapshots_or_disks is None:
2298-
new_names_of_source_snapshots_or_disks = [None] * len(source_snapshots_or_disks)
2299-
for disk_id, disk_name in zip(source_snapshots_or_disks, new_names_of_source_snapshots_or_disks):
2300-
disk = {
2301-
'name': disk_name,
2302-
'create_option': 'Copy',
2303-
'caching': caching,
2304-
'lun': disk_lun,
2305-
'writeAcceleratorEnabled': enable_write_accelerator,
2306-
"sourceResource": {
2307-
"id": disk_id
2308-
}
2309-
}
2310-
if size_gb is not None:
2311-
disk.update({
2312-
'diskSizeGb': size_gb
2313-
})
2314-
if sku is not None:
2315-
disk.update({
2316-
"managedDisk": {
2317-
"storageAccountType": sku
2286+
# new / copy / restore
2287+
from azure.mgmt.core.tools import parse_resource_id
2288+
from .operations.vm import VMUpdate as _VMUpdate
2289+
class VMUpdate(_VMUpdate):
2290+
def pre_instance_update(self, instance):
2291+
storage_profile = instance.properties.storage_profile
2292+
data_disks = storage_profile.data_disks
2293+
data_disks_list = data_disks.to_serialized_data() if hasattr(data_disks, 'to_serialized_data') else data_disks
2294+
used_luns = set()
2295+
if isinstance(data_disks_list, list):
2296+
for d in data_disks_list:
2297+
if isinstance(d, dict) and 'lun' in d and d['lun'] is not None:
2298+
used_luns.add(d['lun'])
2299+
def _next_lun(start=0):
2300+
i = start
2301+
while i in used_luns:
2302+
i += 1
2303+
used_luns.add(i)
2304+
return i
2305+
default_size_gb = 1023
2306+
disks_to_process = disk_ids if disk_ids is not None else disks
2307+
2308+
# attach existing / new disks
2309+
if disks_to_process:
2310+
for disk_item in disks_to_process:
2311+
disk_lun = lun if lun is not None else _next_lun()
2312+
if new:
2313+
disk_name = parse_resource_id(disk_item)['name']
2314+
disk_obj = {
2315+
'name': disk_name,
2316+
'lun': disk_lun,
2317+
'createOption': 'Empty',
2318+
'diskSizeGb': size_gb if size_gb else default_size_gb,
2319+
'caching': caching
23182320
}
2319-
})
2320-
disk_lun += 1
2321-
vm.storage_profile.data_disks.append(disk)
2322-
if source_disk_restore_point is not None:
2323-
if new_names_of_source_disk_restore_point is None:
2324-
new_names_of_source_disk_restore_point = [None] * len(source_disk_restore_point)
2325-
for disk_id, disk_name in zip(source_disk_restore_point, new_names_of_source_disk_restore_point):
2326-
disk = {
2327-
'name': disk_name,
2328-
'create_option': 'Restore',
2329-
'caching': caching,
2330-
'lun': disk_lun,
2331-
'writeAcceleratorEnabled': enable_write_accelerator,
2332-
"sourceResource": {
2333-
"id": disk_id
2334-
}
2335-
}
2336-
if size_gb is not None:
2337-
disk.update({
2338-
'diskSizeGb': size_gb
2339-
})
2340-
if sku is not None:
2341-
disk.update({
2342-
"managedDisk": {
2343-
"storageAccountType": sku
2321+
if sku:
2322+
disk_obj['managedDisk'] = {'storageAccountType': sku}
2323+
else:
2324+
disk_obj = {
2325+
'lun': disk_lun,
2326+
'createOption': 'Attach',
2327+
'caching': caching,
2328+
'managedDisk': {'id': disk_item}
23442329
}
2345-
})
2346-
disk_lun += 1
2347-
vm.storage_profile.data_disks.append(disk)
2348-
2349-
set_vm(cmd, vm)
2330+
if sku:
2331+
disk_obj['managedDisk']['storageAccountType'] = sku
2332+
if enable_write_accelerator:
2333+
disk_obj['writeAcceleratorEnabled'] = True
2334+
data_disks.append(disk_obj)
2335+
2336+
# snapshot / copy
2337+
if source_snapshots_or_disks:
2338+
_new_names = new_names_of_source_snapshots_or_disks or [None] * len(source_snapshots_or_disks)
2339+
for src_id, name in zip(source_snapshots_or_disks, _new_names):
2340+
disk_lun = _next_lun()
2341+
disk_obj = {
2342+
'name': name,
2343+
'lun': disk_lun,
2344+
'createOption': 'Copy',
2345+
'sourceResource': {'id': src_id},
2346+
'caching': caching,
2347+
'writeAcceleratorEnabled': enable_write_accelerator
2348+
}
2349+
if size_gb is not None:
2350+
disk_obj['diskSizeGb'] = size_gb
2351+
if sku is not None:
2352+
disk_obj['managedDisk'] = {'storageAccountType': sku}
2353+
data_disks.append(disk_obj)
2354+
2355+
# restore point
2356+
if source_disk_restore_point:
2357+
_new_names_rp = new_names_of_source_disk_restore_point or [None] * len(source_disk_restore_point)
2358+
for src_id, name in zip(source_disk_restore_point, _new_names_rp):
2359+
disk_lun = _next_lun()
2360+
disk_obj = {
2361+
'name': name,
2362+
'lun': disk_lun,
2363+
'createOption': 'Restore',
2364+
'sourceResource': {'id': src_id},
2365+
'caching': caching,
2366+
'writeAcceleratorEnabled': enable_write_accelerator
2367+
}
2368+
if size_gb is not None:
2369+
disk_obj['diskSizeGb'] = size_gb
2370+
if sku is not None:
2371+
disk_obj['managedDisk'] = {'storageAccountType': sku}
2372+
data_disks.append(disk_obj)
23502373

2374+
args = {
2375+
'resource_group': resource_group_name,
2376+
'vm_name': vm_name,
2377+
'no_wait': no_wait
2378+
}
2379+
return VMUpdate(cli_ctx=cmd.cli_ctx)(command_args=args)
23512380

23522381
def detach_unmanaged_data_disk(cmd, resource_group_name, vm_name, disk_name):
23532382
# here we handle unmanaged disk
@@ -2361,7 +2390,9 @@ def detach_unmanaged_data_disk(cmd, resource_group_name, vm_name, disk_name):
23612390
# endregion
23622391

23632392

2364-
def detach_managed_data_disk(cmd, resource_group_name, vm_name, disk_name=None, force_detach=None, disk_ids=None):
2393+
def detach_managed_data_disk(cmd, resource_group_name, vm_name, disk_name=None,
2394+
force_detach=None, disk_ids=None,
2395+
no_wait=False):
23652396
if disk_ids is not None:
23662397
data_disks = []
23672398
for disk_item in disk_ids:
@@ -2375,27 +2406,53 @@ def detach_managed_data_disk(cmd, resource_group_name, vm_name, disk_name=None,
23752406
return result
23762407
else:
23772408
# here we handle managed disk
2378-
vm = get_vm_to_update(cmd, resource_group_name, vm_name)
2379-
if not force_detach:
2380-
# pylint: disable=no-member
2381-
leftovers = [d for d in vm.storage_profile.data_disks if d.name.lower() != disk_name.lower()]
2382-
if len(vm.storage_profile.data_disks) == len(leftovers):
2383-
raise ResourceNotFoundError("No disk with the name '{}' was found".format(disk_name))
2384-
else:
2385-
DiskDetachOptionTypes = cmd.get_models('DiskDetachOptionTypes', resource_type=ResourceType.MGMT_COMPUTE,
2386-
operation_group='virtual_machines')
2387-
leftovers = vm.storage_profile.data_disks
2388-
is_contains = False
2389-
for d in leftovers:
2390-
if d.name.lower() == disk_name.lower():
2391-
d.to_be_detached = True
2392-
d.detach_option = DiskDetachOptionTypes.FORCE_DETACH
2393-
is_contains = True
2394-
break
2395-
if not is_contains:
2396-
raise ResourceNotFoundError("No disk with the name '{}' was found".format(disk_name))
2397-
vm.storage_profile.data_disks = leftovers
2398-
set_vm(cmd, vm)
2409+
from .operations.vm import VMShow
2410+
2411+
vm = VMShow(cli_ctx=cmd.cli_ctx)(command_args={
2412+
'resource_group': resource_group_name,
2413+
"vm_name": vm_name
2414+
})
2415+
2416+
# To avoid unnecessary permission check of image
2417+
storage_profile = vm.get('storageProfile', {})
2418+
storage_profile["imageReference"] = None
2419+
2420+
vm_dict = vm if isinstance(vm, dict) else getattr(vm, 'result', vm)
2421+
2422+
target_disk = None
2423+
data_disks = safe_get(vm_dict, 'storageProfile.dataDisks', default=[]) or []
2424+
for d in data_disks:
2425+
# Use dict-style access; AAZ returns dicts.
2426+
name = (d.get('name') or '').lower()
2427+
if name == (disk_name or '').lower():
2428+
target_disk = d
2429+
break
2430+
2431+
if not target_disk:
2432+
attached_names = [d.get('name') for d in (_(vm_dict, 'storageProfile.dataDisks', []) or [])]
2433+
raise ResourceNotFoundError(
2434+
"No disk with the name '{}' was found. Attached: {}".format(disk_name, attached_names)
2435+
)
2436+
2437+
disk_id = safe_get(target_disk, 'managedDisk.id')
2438+
if not disk_id:
2439+
raise CLIError(
2440+
"Disk '{}' is not a managed disk (no managedDisk.id). Only managed disks are supported by AAZ detach."
2441+
.format(disk_name)
2442+
)
2443+
2444+
args = {
2445+
'vm_name': vm_name,
2446+
'resource_group': resource_group_name,
2447+
'data_disks_to_detach': [{
2448+
'diskId': disk_id,
2449+
'detachOption': 'ForceDetach' if force_detach else None
2450+
}],
2451+
'no_wait': no_wait
2452+
}
2453+
2454+
result = AttachDetachDataDisk(cli_ctx=cmd.cli_ctx)(command_args=args)
2455+
return result
23992456
# endregion
24002457

24012458

0 commit comments

Comments
 (0)