diff --git a/.zuul.yaml b/.zuul.yaml new file mode 100644 index 0000000..410cd04 --- /dev/null +++ b/.zuul.yaml @@ -0,0 +1,5 @@ +--- +- project: + check: + jobs: + - ansible-tox-linters diff --git a/plugins/connection/vmware_tools.py b/plugins/connection/vmware_tools.py index 71460f9..813aa86 100644 --- a/plugins/connection/vmware_tools.py +++ b/plugins/connection/vmware_tools.py @@ -3,6 +3,7 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type import re @@ -15,6 +16,7 @@ REQUESTS_IMP_ERR = None try: import requests + HAS_REQUESTS = True except ImportError: REQUESTS_IMP_ERR = traceback.format_exc() @@ -22,15 +24,21 @@ try: from requests.packages import urllib3 + HAS_URLLIB3 = True except ImportError: try: import urllib3 + HAS_URLLIB3 = True except ImportError: HAS_URLLIB3 = False -from ansible.errors import AnsibleError, AnsibleFileNotFound, AnsibleConnectionFailure +from ansible.errors import ( + AnsibleError, + AnsibleFileNotFound, + AnsibleConnectionFailure, +) from ansible.module_utils._text import to_bytes, to_native from ansible.plugins.connection import ConnectionBase from ansible.module_utils.basic import missing_required_lib @@ -45,7 +53,7 @@ PYVMOMI_IMP_ERR = traceback.format_exc() -DOCUMENTATION = ''' +DOCUMENTATION = """ author: Deric Crago connection: vmware_tools short_description: Execute tasks inside a VM via VMware Tools @@ -163,9 +171,9 @@ vars: - name: ansible_executable - name: ansible_vmware_tools_executable -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # example vars.yml --- ansible_connection: vmware_tools @@ -231,13 +239,13 @@ - win_file: path: C:\Users\user\foo state: absent -''' +""" class Connection(ConnectionBase): """VMware Tools Connection.""" - transport = 'vmware.general.vmware_tools' + transport = "vmware.general.vmware_tools" @property def vmware_host(self): @@ -272,7 +280,10 @@ def windowsGuest(self): def __init__(self, *args, **kwargs): """init.""" super(Connection, self).__init__(*args, **kwargs) - if hasattr(self, "_shell") and self._shell.SHELL_FAMILY == "powershell": + if ( + hasattr(self, "_shell") + and self._shell.SHELL_FAMILY == "powershell" + ): self.module_implementation_preferences = (".ps1", ".exe", "") self.become_methods = ["runas"] self.allow_executable = False @@ -292,7 +303,9 @@ def _establish_connection(self): connect = SmartConnect else: if HAS_URLLIB3: - urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) + urllib3.disable_warnings( + urllib3.exceptions.InsecureRequestWarning + ) connect = SmartConnectNoSSL try: @@ -300,7 +313,10 @@ def _establish_connection(self): except SSLError: raise AnsibleError("SSL Error: Certificate verification failed.") except (gaierror): - raise AnsibleError("Connection Error: Unable to connect to '%s'." % to_native(connection_kwargs["host"])) + raise AnsibleError( + "Connection Error: Unable to connect to '%s'." + % to_native(connection_kwargs["host"]) + ) except vim.fault.InvalidLogin as e: raise AnsibleError("Connection Login Error: %s" % to_native(e.msg)) @@ -309,43 +325,66 @@ def _establish_vm(self, check_vm_credentials=True): self.vm = searchIndex.FindByInventoryPath(self.get_option("vm_path")) if self.vm is None: - raise AnsibleError("Unable to find VM by path '%s'" % to_native(self.get_option("vm_path"))) + raise AnsibleError( + "Unable to find VM by path '%s'" + % to_native(self.get_option("vm_path")) + ) self.vm_auth = vim.NamePasswordAuthentication( - username=self.get_option("vm_user"), password=self.get_option("vm_password"), interactiveSession=False + username=self.get_option("vm_user"), + password=self.get_option("vm_password"), + interactiveSession=False, ) try: if check_vm_credentials: - self.authManager.ValidateCredentialsInGuest(vm=self.vm, auth=self.vm_auth) + self.authManager.ValidateCredentialsInGuest( + vm=self.vm, auth=self.vm_auth + ) except vim.fault.InvalidPowerState as e: raise AnsibleError("VM Power State Error: %s" % to_native(e.msg)) except vim.fault.RestrictedVersion as e: - raise AnsibleError("Restricted Version Error: %s" % to_native(e.msg)) + raise AnsibleError( + "Restricted Version Error: %s" % to_native(e.msg) + ) except vim.fault.GuestOperationsUnavailable as e: - raise AnsibleError("VM Guest Operations (VMware Tools) Error: %s" % to_native(e.msg)) + raise AnsibleError( + "VM Guest Operations (VMware Tools) Error: %s" + % to_native(e.msg) + ) except vim.fault.InvalidGuestLogin as e: raise AnsibleError("VM Login Error: %s" % to_native(e.msg)) except vim.fault.NoPermission as e: - raise AnsibleConnectionFailure("No Permission Error: %s %s" % (to_native(e.msg), to_native(e.privilegeId))) + raise AnsibleConnectionFailure( + "No Permission Error: %s %s" + % (to_native(e.msg), to_native(e.privilegeId)) + ) except vmodl.fault.SystemError as e: - if e.reason == 'vix error codes = (3016, 0).\n': + if e.reason == "vix error codes = (3016, 0).\n": raise AnsibleConnectionFailure( - "Connection failed, is the vm currently rebooting? Reason: %s" % ( - to_native(e.reason) - ) + "Connection failed, is the vm currently rebooting? Reason: %s" + % (to_native(e.reason)) ) else: - raise AnsibleConnectionFailure("Connection failed. Reason %s" % (to_native(e.reason))) + raise AnsibleConnectionFailure( + "Connection failed. Reason %s" % (to_native(e.reason)) + ) except vim.fault.GuestOperationsUnavailable: - raise AnsibleConnectionFailure("Cannot connect to guest. Native error: GuestOperationsUnavailable") + raise AnsibleConnectionFailure( + "Cannot connect to guest. Native error: GuestOperationsUnavailable" + ) def _connect(self, check_vm_credentials=True): if not HAS_REQUESTS: - raise AnsibleError("%s : %s" % (missing_required_lib('requests'), REQUESTS_IMP_ERR)) + raise AnsibleError( + "%s : %s" + % (missing_required_lib("requests"), REQUESTS_IMP_ERR) + ) if not HAS_PYVMOMI: - raise AnsibleError("%s : %s" % (missing_required_lib('PyVmomi'), PYVMOMI_IMP_ERR)) + raise AnsibleError( + "%s : %s" % (missing_required_lib("PyVmomi"), PYVMOMI_IMP_ERR) + ) super(Connection, self)._connect() @@ -372,28 +411,36 @@ def reset(self): def create_temporary_file_in_guest(self, prefix="", suffix=""): """Create a temporary file in the VM.""" try: - return self.fileManager.CreateTemporaryFileInGuest(vm=self.vm, auth=self.vm_auth, prefix=prefix, suffix=suffix) + return self.fileManager.CreateTemporaryFileInGuest( + vm=self.vm, auth=self.vm_auth, prefix=prefix, suffix=suffix + ) except vim.fault.NoPermission as e: - raise AnsibleError("No Permission Error: %s %s" % (to_native(e.msg), to_native(e.privilegeId))) + raise AnsibleError( + "No Permission Error: %s %s" + % (to_native(e.msg), to_native(e.privilegeId)) + ) except vmodl.fault.SystemError as e: - if e.reason == 'vix error codes = (3016, 0).\n': + if e.reason == "vix error codes = (3016, 0).\n": raise AnsibleConnectionFailure( - "Connection failed, is the vm currently rebooting? Reason: %s" % ( - to_native(e.reason) - ) + "Connection failed, is the vm currently rebooting? Reason: %s" + % (to_native(e.reason)) ) else: - raise AnsibleConnectionFailure("Connection failed. Reason %s" % (to_native(e.reason))) + raise AnsibleConnectionFailure( + "Connection failed. Reason %s" % (to_native(e.reason)) + ) except vim.fault.GuestOperationsUnavailable: - raise AnsibleConnectionFailure("Cannot connect to guest. Native error: GuestOperationsUnavailable") + raise AnsibleConnectionFailure( + "Cannot connect to guest. Native error: GuestOperationsUnavailable" + ) def _get_program_spec_program_path_and_arguments(self, cmd): if self.windowsGuest: - ''' + """ we need to warp the execution of powershell into a cmd /c because the call otherwise fails with "Authentication or permission failure" #FIXME: Fix the unecessary invocation of cmd and run the command directly - ''' + """ program_path = "cmd.exe" arguments = "/c %s" % cmd else: @@ -405,7 +452,9 @@ def _get_program_spec_program_path_and_arguments(self, cmd): def _get_guest_program_spec(self, cmd, stdout, stderr): guest_program_spec = vim.GuestProgramSpec() - program_path, arguments = self._get_program_spec_program_path_and_arguments(cmd) + program_path, arguments = self._get_program_spec_program_path_and_arguments( + cmd + ) arguments += " 1> %s 2> %s" % (stdout, stderr) @@ -416,24 +465,35 @@ def _get_guest_program_spec(self, cmd, stdout, stderr): def _get_pid_info(self, pid): try: - processes = self.processManager.ListProcessesInGuest(vm=self.vm, auth=self.vm_auth, pids=[pid]) + processes = self.processManager.ListProcessesInGuest( + vm=self.vm, auth=self.vm_auth, pids=[pid] + ) except vim.fault.NoPermission as e: - raise AnsibleError("No Permission Error: %s %s" % (to_native(e.msg), to_native(e.privilegeId))) + raise AnsibleError( + "No Permission Error: %s %s" + % (to_native(e.msg), to_native(e.privilegeId)) + ) except vmodl.fault.SystemError as e: # https://pubs.vmware.com/vsphere-6-5/index.jsp?topic=%2Fcom.vmware.wssdk.smssdk.doc%2Fvmodl.fault.SystemError.html # https://github.com/ansible/ansible/issues/57607 - if e.reason == 'vix error codes = (1, 0).\n': + if e.reason == "vix error codes = (1, 0).\n": raise AnsibleConnectionFailure( - "Connection failed, Netlogon service stopped or dcpromo in progress. Reason: %s" % ( - to_native(e.reason) - ) + "Connection failed, Netlogon service stopped or dcpromo in progress. Reason: %s" + % (to_native(e.reason)) ) else: - raise AnsibleConnectionFailure("Connection plugin failed. Reason: %s" % (to_native(e.reason))) + raise AnsibleConnectionFailure( + "Connection plugin failed. Reason: %s" + % (to_native(e.reason)) + ) except vim.fault.GuestOperationsUnavailable: - raise AnsibleConnectionFailure("Cannot connect to guest. Native error: GuestOperationsUnavailable") + raise AnsibleConnectionFailure( + "Cannot connect to guest. Native error: GuestOperationsUnavailable" + ) except vim.fault.InvalidGuestLogin: - raise AnsibleConnectionFailure("Guest login failed. Native error: InvalidGuestLogin") + raise AnsibleConnectionFailure( + "Guest login failed. Native error: InvalidGuestLogin" + ) return processes[0] @@ -451,20 +511,28 @@ def _fix_url_for_hosts(self, url): def _fetch_file_from_vm(self, guestFilePath): try: - fileTransferInformation = self.fileManager.InitiateFileTransferFromGuest(vm=self.vm, auth=self.vm_auth, guestFilePath=guestFilePath) + fileTransferInformation = self.fileManager.InitiateFileTransferFromGuest( + vm=self.vm, auth=self.vm_auth, guestFilePath=guestFilePath + ) except vim.fault.NoPermission as e: - raise AnsibleError("No Permission Error: %s %s" % (to_native(e.msg), to_native(e.privilegeId))) + raise AnsibleError( + "No Permission Error: %s %s" + % (to_native(e.msg), to_native(e.privilegeId)) + ) except vmodl.fault.SystemError as e: - if e.reason == 'vix error codes = (3016, 0).\n': + if e.reason == "vix error codes = (3016, 0).\n": raise AnsibleConnectionFailure( - "Connection failed, is the vm currently rebooting? Reason: %s" % ( - to_native(e.reason) - ) + "Connection failed, is the vm currently rebooting? Reason: %s" + % (to_native(e.reason)) ) else: - raise AnsibleConnectionFailure("Connection failed. Reason %s" % (to_native(e.reason))) + raise AnsibleConnectionFailure( + "Connection failed. Reason %s" % (to_native(e.reason)) + ) except vim.fault.GuestOperationsUnavailable: - raise AnsibleConnectionFailure("Cannot connect to guest. Native error: GuestOperationsUnavailable") + raise AnsibleConnectionFailure( + "Cannot connect to guest. Native error: GuestOperationsUnavailable" + ) url = self._fix_url_for_hosts(fileTransferInformation.url) response = requests.get(url, verify=self.validate_certs, stream=True) @@ -477,24 +545,34 @@ def _fetch_file_from_vm(self, guestFilePath): def delete_file_in_guest(self, filePath): """Delete file from VM.""" try: - self.fileManager.DeleteFileInGuest(vm=self.vm, auth=self.vm_auth, filePath=filePath) + self.fileManager.DeleteFileInGuest( + vm=self.vm, auth=self.vm_auth, filePath=filePath + ) except vim.fault.NoPermission as e: - raise AnsibleError("No Permission Error: %s %s" % (to_native(e.msg), to_native(e.privilegeId))) + raise AnsibleError( + "No Permission Error: %s %s" + % (to_native(e.msg), to_native(e.privilegeId)) + ) except vmodl.fault.SystemError as e: - if e.reason == 'vix error codes = (3016, 0).\n': + if e.reason == "vix error codes = (3016, 0).\n": raise AnsibleConnectionFailure( - "Connection failed, is the vm currently rebooting? Reason: %s" % ( - to_native(e.reason) - ) + "Connection failed, is the vm currently rebooting? Reason: %s" + % (to_native(e.reason)) ) else: - raise AnsibleConnectionFailure("Connection failed. Reason %s" % (to_native(e.reason))) + raise AnsibleConnectionFailure( + "Connection failed. Reason %s" % (to_native(e.reason)) + ) except vim.fault.GuestOperationsUnavailable: - raise AnsibleConnectionFailure("Cannot connect to guest. Native error: GuestOperationsUnavailable") + raise AnsibleConnectionFailure( + "Cannot connect to guest. Native error: GuestOperationsUnavailable" + ) def exec_command(self, cmd, in_data=None, sudoable=True): """Execute command.""" - super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) + super(Connection, self).exec_command( + cmd, in_data=in_data, sudoable=sudoable + ) stdout = self.create_temporary_file_in_guest(suffix=".stdout") stderr = self.create_temporary_file_in_guest(suffix=".stderr") @@ -502,22 +580,32 @@ def exec_command(self, cmd, in_data=None, sudoable=True): guest_program_spec = self._get_guest_program_spec(cmd, stdout, stderr) try: - pid = self.processManager.StartProgramInGuest(vm=self.vm, auth=self.vm_auth, spec=guest_program_spec) + pid = self.processManager.StartProgramInGuest( + vm=self.vm, auth=self.vm_auth, spec=guest_program_spec + ) except vim.fault.NoPermission as e: - raise AnsibleError("No Permission Error: %s %s" % (to_native(e.msg), to_native(e.privilegeId))) + raise AnsibleError( + "No Permission Error: %s %s" + % (to_native(e.msg), to_native(e.privilegeId)) + ) except vim.fault.FileNotFound as e: - raise AnsibleError("StartProgramInGuest Error: %s" % to_native(e.msg)) + raise AnsibleError( + "StartProgramInGuest Error: %s" % to_native(e.msg) + ) except vmodl.fault.SystemError as e: - if e.reason == 'vix error codes = (3016, 0).\n': + if e.reason == "vix error codes = (3016, 0).\n": raise AnsibleConnectionFailure( - "Connection failed, is the vm currently rebooting? Reason: %s" % ( - to_native(e.reason) - ) + "Connection failed, is the vm currently rebooting? Reason: %s" + % (to_native(e.reason)) ) else: - raise AnsibleConnectionFailure("Connection failed. Reason %s" % (to_native(e.reason))) + raise AnsibleConnectionFailure( + "Connection failed. Reason %s" % (to_native(e.reason)) + ) except vim.fault.GuestOperationsUnavailable: - raise AnsibleConnectionFailure("Cannot connect to guest. Native error: GuestOperationsUnavailable") + raise AnsibleConnectionFailure( + "Cannot connect to guest. Native error: GuestOperationsUnavailable" + ) pid_info = self._get_pid_info(pid) @@ -540,7 +628,9 @@ def fetch_file(self, in_path, out_path): in_path_response = self._fetch_file_from_vm(in_path) with open(out_path, "wb") as fd: - for chunk in in_path_response.iter_content(chunk_size=self.get_option("file_chunk_size")): + for chunk in in_path_response.iter_content( + chunk_size=self.get_option("file_chunk_size") + ): fd.write(chunk) def put_file(self, in_path, out_path): @@ -548,25 +638,38 @@ def put_file(self, in_path, out_path): super(Connection, self).put_file(in_path, out_path) if not exists(to_bytes(in_path, errors="surrogate_or_strict")): - raise AnsibleFileNotFound("file or module does not exist: '%s'" % to_native(in_path)) + raise AnsibleFileNotFound( + "file or module does not exist: '%s'" % to_native(in_path) + ) try: put_url = self.fileManager.InitiateFileTransferToGuest( - vm=self.vm, auth=self.vm_auth, guestFilePath=out_path, fileAttributes=vim.GuestFileAttributes(), fileSize=getsize(in_path), overwrite=True + vm=self.vm, + auth=self.vm_auth, + guestFilePath=out_path, + fileAttributes=vim.GuestFileAttributes(), + fileSize=getsize(in_path), + overwrite=True, ) except vim.fault.NoPermission as e: - raise AnsibleError("No Permission Error: %s %s" % (to_native(e.msg), to_native(e.privilegeId))) + raise AnsibleError( + "No Permission Error: %s %s" + % (to_native(e.msg), to_native(e.privilegeId)) + ) except vmodl.fault.SystemError as e: - if e.reason == 'vix error codes = (3016, 0).\n': + if e.reason == "vix error codes = (3016, 0).\n": raise AnsibleConnectionFailure( - "Connection failed, is the vm currently rebooting? Reason: %s" % ( - to_native(e.reason) - ) + "Connection failed, is the vm currently rebooting? Reason: %s" + % (to_native(e.reason)) ) else: - raise AnsibleConnectionFailure("Connection failed. Reason %s" % (to_native(e.reason))) + raise AnsibleConnectionFailure( + "Connection failed. Reason %s" % (to_native(e.reason)) + ) except vim.fault.GuestOperationsUnavailable: - raise AnsibleConnectionFailure("Cannot connect to guest. Native error: GuestOperationsUnavailable") + raise AnsibleConnectionFailure( + "Cannot connect to guest. Native error: GuestOperationsUnavailable" + ) url = self._fix_url_for_hosts(put_url) diff --git a/plugins/doc_fragments/VmwareRestModule.py b/plugins/doc_fragments/VmwareRestModule.py index 5e83d04..2f0c6f1 100644 --- a/plugins/doc_fragments/VmwareRestModule.py +++ b/plugins/doc_fragments/VmwareRestModule.py @@ -3,12 +3,13 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type class ModuleDocFragment(object): # Parameters for VMware ReST HTTPAPI modules omits filters and state - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: allow_multiples: description: @@ -29,4 +30,4 @@ class ModuleDocFragment(object): required: true type: list default: [200] -''' +""" diff --git a/plugins/doc_fragments/VmwareRestModule_filters.py b/plugins/doc_fragments/VmwareRestModule_filters.py index ca32f9c..48a471e 100644 --- a/plugins/doc_fragments/VmwareRestModule_filters.py +++ b/plugins/doc_fragments/VmwareRestModule_filters.py @@ -3,12 +3,13 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type class ModuleDocFragment(object): # Parameters for VMware ReST HTTPAPI modules includes filters - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: allow_multiples: description: @@ -34,4 +35,4 @@ class ModuleDocFragment(object): required: true type: list default: [200] -''' +""" diff --git a/plugins/doc_fragments/VmwareRestModule_full.py b/plugins/doc_fragments/VmwareRestModule_full.py index a5a32c9..6709e5f 100644 --- a/plugins/doc_fragments/VmwareRestModule_full.py +++ b/plugins/doc_fragments/VmwareRestModule_full.py @@ -3,12 +3,13 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type class ModuleDocFragment(object): # Parameters for VMware ReST HTTPAPI modules includes filters and state - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: allow_multiples: description: @@ -41,4 +42,4 @@ class ModuleDocFragment(object): required: true type: list default: [200] -''' +""" diff --git a/plugins/doc_fragments/VmwareRestModule_state.py b/plugins/doc_fragments/VmwareRestModule_state.py index 5f4dbab..ff39915 100644 --- a/plugins/doc_fragments/VmwareRestModule_state.py +++ b/plugins/doc_fragments/VmwareRestModule_state.py @@ -3,12 +3,13 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type class ModuleDocFragment(object): # Parameters for VMware ReST HTTPAPI modules includes filters and state - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: allow_multiples: description: @@ -36,4 +37,4 @@ class ModuleDocFragment(object): required: true type: list default: [200] -''' +""" diff --git a/plugins/doc_fragments/vmware.py b/plugins/doc_fragments/vmware.py index 755a218..01a0463 100644 --- a/plugins/doc_fragments/vmware.py +++ b/plugins/doc_fragments/vmware.py @@ -8,7 +8,7 @@ class ModuleDocFragment(object): # Parameters for VMware modules - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: hostname: description: @@ -59,10 +59,10 @@ class ModuleDocFragment(object): - If the value is not specified in the task, the value of environment variable C(VMWARE_PROXY_PORT) will be used instead. type: int required: False -''' +""" # This doc fragment is specific to vcenter modules like vcenter_license - VCENTER_DOCUMENTATION = r''' + VCENTER_DOCUMENTATION = r""" options: hostname: description: @@ -115,4 +115,4 @@ class ModuleDocFragment(object): type: int version_added: '2.9' required: False - ''' + """ diff --git a/plugins/doc_fragments/vmware_rest_client.py b/plugins/doc_fragments/vmware_rest_client.py index aaa1312..e1f4443 100644 --- a/plugins/doc_fragments/vmware_rest_client.py +++ b/plugins/doc_fragments/vmware_rest_client.py @@ -7,7 +7,7 @@ class ModuleDocFragment(object): # Parameters for VMware REST Client based modules - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: hostname: description: @@ -45,4 +45,4 @@ class ModuleDocFragment(object): type: str choices: [ http, https ] default: https -''' +""" diff --git a/plugins/httpapi/vmware.py b/plugins/httpapi/vmware.py index 1a6d816..34232b6 100644 --- a/plugins/httpapi/vmware.py +++ b/plugins/httpapi/vmware.py @@ -3,17 +3,18 @@ # Copyright: (c) 2019, Abhijeet Kasurde # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = """ --- author: Abhijeet Kasurde (Akasurde) httpapi : vmware short_description: HttpApi Plugin for VMware REST API description: - This HttpApi plugin provides methods to connect to VMware vCenter over a HTTP(S)-based APIs. -''' +""" import json @@ -24,8 +25,8 @@ from ansible.module_utils.connection import ConnectionError BASE_HEADERS = { - 'Content-Type': 'application/json', - 'Accept': 'application/json', + "Content-Type": "application/json", + "Accept": "application/json", } @@ -33,22 +34,31 @@ class HttpApi(HttpApiBase): def login(self, username, password): if username and password: payload = {} - url = '/rest/com/vmware/cis/session' + url = "/rest/com/vmware/cis/session" response, response_data = self.send_request(url, payload) else: - raise AnsibleConnectionFailure('Username and password are required for login') + raise AnsibleConnectionFailure( + "Username and password are required for login" + ) if response == 404: raise ConnectionError(response_data) - if not response_data.get('value'): - raise ConnectionError('Server returned response without token info during connection authentication: %s' % response) + if not response_data.get("value"): + raise ConnectionError( + "Server returned response without token info during connection authentication: %s" + % response + ) - self.connection._session_uid = "vmware-api-session-id:%s" % response_data['value'] - self.connection._token = response_data['value'] + self.connection._session_uid = ( + "vmware-api-session-id:%s" % response_data["value"] + ) + self.connection._token = response_data["value"] def logout(self): - response, dummy = self.send_request('/rest/com/vmware/cis/session', None, method='DELETE') + response, dummy = self.send_request( + "/rest/com/vmware/cis/session", None, method="DELETE" + ) def get_session_uid(self): return self.connection._session_uid @@ -56,22 +66,30 @@ def get_session_uid(self): def get_session_token(self): return self.connection._token - def send_request(self, path, body_params, method='POST'): - data = json.dumps(body_params) if body_params else '{}' + def send_request(self, path, body_params, method="POST"): + data = json.dumps(body_params) if body_params else "{}" try: self._display_request(method=method) - response, response_data = self.connection.send(path, data, method=method, headers=BASE_HEADERS, force_basic_auth=True) + response, response_data = self.connection.send( + path, + data, + method=method, + headers=BASE_HEADERS, + force_basic_auth=True, + ) response_value = self._get_response_value(response_data) return response.getcode(), self._response_to_json(response_value) except AnsibleConnectionFailure as e: - return 404, 'Object not found' + return 404, "Object not found" except HTTPError as e: return e.code, json.loads(e.read()) - def _display_request(self, method='POST'): - self.connection.queue_message('vvvv', 'Web Services: %s %s' % (method, self.connection._url)) + def _display_request(self, method="POST"): + self.connection.queue_message( + "vvvv", "Web Services: %s %s" % (method, self.connection._url) + ) def _get_response_value(self, response_data): return to_text(response_data.getvalue()) @@ -81,4 +99,4 @@ def _response_to_json(self, response_text): return json.loads(response_text) if response_text else {} # JSONDecodeError only available on Python 3.5+ except ValueError: - raise ConnectionError('Invalid JSON response: %s' % response_text) + raise ConnectionError("Invalid JSON response: %s" % response_text) diff --git a/plugins/inventory/vmware_vm_inventory.py b/plugins/inventory/vmware_vm_inventory.py index b52eb77..ec5badc 100644 --- a/plugins/inventory/vmware_vm_inventory.py +++ b/plugins/inventory/vmware_vm_inventory.py @@ -4,10 +4,11 @@ # # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = """ name: vmware_vm_inventory plugin_type: inventory short_description: VMware Guest inventory source @@ -76,9 +77,9 @@ extends_documentation_fragment: - inventory_cache -''' +""" -EXAMPLES = ''' +EXAMPLES = """ # Sample configuration file for VMware Guest dynamic inventory plugin: vmware_vm_inventory strict: False @@ -99,7 +100,7 @@ properties: - 'name' - 'guest.ipAddress' -''' +""" import ssl import atexit @@ -108,6 +109,7 @@ try: # requests is required for exception handling of the ConnectionError import requests + HAS_REQUESTS = True except ImportError: HAS_REQUESTS = False @@ -115,6 +117,7 @@ try: from pyVim import connect from pyVmomi import vim, vmodl + HAS_PYVMOMI = True except ImportError: HAS_PYVMOMI = False @@ -122,6 +125,7 @@ try: from com.vmware.vapi.std_client import DynamicID from vmware.vapi.vsphere.client import create_vsphere_client + HAS_VSPHERE = True except ImportError: HAS_VSPHERE = False @@ -131,7 +135,9 @@ class BaseVMwareInventory: - def __init__(self, hostname, username, password, port, validate_certs, with_tags): + def __init__( + self, hostname, username, password, port, validate_certs, with_tags + ): self.hostname = hostname self.username = username self.password = password @@ -165,12 +171,16 @@ def _login_vapi(self): server = self.hostname if self.port: server += ":" + str(self.port) - client = create_vsphere_client(server=server, - username=self.username, - password=self.password, - session=session) + client = create_vsphere_client( + server=server, + username=self.username, + password=self.password, + session=session, + ) if client is None: - raise AnsibleError("Failed to login to %s using %s" % (server, self.username)) + raise AnsibleError( + "Failed to login to %s using %s" % (server, self.username) + ) return client def _login(self): @@ -179,36 +189,59 @@ def _login(self): Returns: connection object """ - if self.validate_certs and not hasattr(ssl, 'SSLContext'): - raise AnsibleError('pyVim does not support changing verification mode with python < 2.7.9. Either update ' - 'python or set validate_certs to false in configuration YAML file.') + if self.validate_certs and not hasattr(ssl, "SSLContext"): + raise AnsibleError( + "pyVim does not support changing verification mode with python < 2.7.9. Either update " + "python or set validate_certs to false in configuration YAML file." + ) ssl_context = None - if not self.validate_certs and hasattr(ssl, 'SSLContext'): + if not self.validate_certs and hasattr(ssl, "SSLContext"): ssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) ssl_context.verify_mode = ssl.CERT_NONE service_instance = None try: - service_instance = connect.SmartConnect(host=self.hostname, user=self.username, - pwd=self.password, sslContext=ssl_context, - port=self.port) + service_instance = connect.SmartConnect( + host=self.hostname, + user=self.username, + pwd=self.password, + sslContext=ssl_context, + port=self.port, + ) except vim.fault.InvalidLogin as e: - raise AnsibleParserError("Unable to log on to vCenter or ESXi API at %s:%s as %s: %s" % (self.hostname, self.port, self.username, e.msg)) + raise AnsibleParserError( + "Unable to log on to vCenter or ESXi API at %s:%s as %s: %s" + % (self.hostname, self.port, self.username, e.msg) + ) except vim.fault.NoPermission as e: - raise AnsibleParserError("User %s does not have required permission" - " to log on to vCenter or ESXi API at %s:%s : %s" % (self.username, self.hostname, self.port, e.msg)) + raise AnsibleParserError( + "User %s does not have required permission" + " to log on to vCenter or ESXi API at %s:%s : %s" + % (self.username, self.hostname, self.port, e.msg) + ) except (requests.ConnectionError, ssl.SSLError) as e: - raise AnsibleParserError("Unable to connect to vCenter or ESXi API at %s on TCP/%s: %s" % (self.hostname, self.port, e)) + raise AnsibleParserError( + "Unable to connect to vCenter or ESXi API at %s on TCP/%s: %s" + % (self.hostname, self.port, e) + ) except vmodl.fault.InvalidRequest as e: # Request is malformed - raise AnsibleParserError("Failed to get a response from server %s:%s as " - "request is malformed: %s" % (self.hostname, self.port, e.msg)) + raise AnsibleParserError( + "Failed to get a response from server %s:%s as " + "request is malformed: %s" % (self.hostname, self.port, e.msg) + ) except Exception as e: - raise AnsibleParserError("Unknown error while connecting to vCenter or ESXi API at %s:%s : %s" % (self.hostname, self.port, e)) + raise AnsibleParserError( + "Unknown error while connecting to vCenter or ESXi API at %s:%s : %s" + % (self.hostname, self.port, e) + ) if service_instance is None: - raise AnsibleParserError("Unknown error while connecting to vCenter or ESXi API at %s:%s" % (self.hostname, self.port)) + raise AnsibleParserError( + "Unknown error while connecting to vCenter or ESXi API at %s:%s" + % (self.hostname, self.port) + ) atexit.register(connect.Disconnect, service_instance) return service_instance.RetrieveContent() @@ -216,11 +249,15 @@ def _login(self): def check_requirements(self): """ Check all requirements for this inventory are satisified""" if not HAS_REQUESTS: - raise AnsibleParserError('Please install "requests" Python module as this is required' - ' for VMware Guest dynamic inventory plugin.') + raise AnsibleParserError( + 'Please install "requests" Python module as this is required' + " for VMware Guest dynamic inventory plugin." + ) elif not HAS_PYVMOMI: - raise AnsibleParserError('Please install "PyVmomi" Python module as this is required' - ' for VMware Guest dynamic inventory plugin.') + raise AnsibleParserError( + 'Please install "PyVmomi" Python module as this is required' + " for VMware Guest dynamic inventory plugin." + ) if HAS_REQUESTS: # Pyvmomi 5.5 and onwards requires requests 2.3 # https://github.com/vmware/pyvmomi/blob/master/requirements.txt @@ -229,21 +266,32 @@ def check_requirements(self): try: requests_major_minor = tuple(map(int, requests_version)) except ValueError: - raise AnsibleParserError("Failed to parse 'requests' library version.") + raise AnsibleParserError( + "Failed to parse 'requests' library version." + ) if requests_major_minor < required_version: - raise AnsibleParserError("'requests' library version should" - " be >= %s, found: %s." % (".".join([str(w) for w in required_version]), - requests.__version__)) + raise AnsibleParserError( + "'requests' library version should" + " be >= %s, found: %s." + % ( + ".".join([str(w) for w in required_version]), + requests.__version__, + ) + ) if not HAS_VSPHERE and self.with_tags: - raise AnsibleError("Unable to find 'vSphere Automation SDK' Python library which is required." - " Please refer this URL for installation steps" - " - https://code.vmware.com/web/sdk/65/vsphere-automation-python") + raise AnsibleError( + "Unable to find 'vSphere Automation SDK' Python library which is required." + " Please refer this URL for installation steps" + " - https://code.vmware.com/web/sdk/65/vsphere-automation-python" + ) if not all([self.hostname, self.username, self.password]): - raise AnsibleError("Missing one of the following : hostname, username, password. Please read " - "the documentation for more information.") + raise AnsibleError( + "Missing one of the following : hostname, username, password. Please read " + "the documentation for more information." + ) def _get_managed_objects_properties(self, vim_type, properties=None): """ @@ -256,38 +304,38 @@ def _get_managed_objects_properties(self, vim_type, properties=None): root_folder = self.content.rootFolder if properties is None: - properties = ['name'] + properties = ["name"] # Create Container View with default root folder - mor = self.content.viewManager.CreateContainerView(root_folder, [vim_type], True) + mor = self.content.viewManager.CreateContainerView( + root_folder, [vim_type], True + ) # Create Traversal spec traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name="traversal_spec", - path='view', + path="view", skip=False, - type=vim.view.ContainerView + type=vim.view.ContainerView, ) # Create Property Spec property_spec = vmodl.query.PropertyCollector.PropertySpec( type=vim_type, # Type of object to retrieved all=False, - pathSet=properties + pathSet=properties, ) # Create Object Spec object_spec = vmodl.query.PropertyCollector.ObjectSpec( - obj=mor, - skip=True, - selectSet=[traversal_spec] + obj=mor, skip=True, selectSet=[traversal_spec] ) # Create Filter Spec filter_spec = vmodl.query.PropertyCollector.FilterSpec( objectSet=[object_spec], propSet=[property_spec], - reportMissingObjectsInResults=False + reportMissingObjectsInResults=False, ) return self.content.propertyCollector.RetrieveContents([filter_spec]) @@ -306,7 +354,7 @@ def _get_object_prop(vm, attributes): class InventoryModule(BaseInventoryPlugin, Cacheable): - NAME = 'vmware.general.vmware_vm_inventory' + NAME = "vmware.general.vmware_vm_inventory" def verify_file(self, path): """ @@ -317,7 +365,14 @@ def verify_file(self, path): """ valid = False if super(InventoryModule, self).verify_file(path): - if path.endswith(('vmware.yaml', 'vmware.yml', 'vmware_vm_inventory.yaml', 'vmware_vm_inventory.yml')): + if path.endswith( + ( + "vmware.yaml", + "vmware.yml", + "vmware_vm_inventory.yaml", + "vmware_vm_inventory.yml", + ) + ): valid = True return valid @@ -326,7 +381,9 @@ def parse(self, inventory, loader, path, cache=True): """ Parses the inventory file """ - super(InventoryModule, self).parse(inventory, loader, path, cache=cache) + super(InventoryModule, self).parse( + inventory, loader, path, cache=cache + ) cache_key = self.get_cache_key(path) @@ -336,12 +393,12 @@ def parse(self, inventory, loader, path, cache=True): self._consume_options(config_data) self.pyv = BaseVMwareInventory( - hostname=self.get_option('hostname'), - username=self.get_option('username'), - password=self.get_option('password'), - port=self.get_option('port'), - with_tags=self.get_option('with_tags'), - validate_certs=self.get_option('validate_certs') + hostname=self.get_option("hostname"), + username=self.get_option("username"), + password=self.get_option("password"), + port=self.get_option("port"), + with_tags=self.get_option("with_tags"), + validate_certs=self.get_option("validate_certs"), ) self.pyv.do_login() @@ -350,7 +407,7 @@ def parse(self, inventory, loader, path, cache=True): source_data = None if cache: - cache = self.get_option('cache') + cache = self.get_option("cache") update_cache = False if cache: @@ -360,23 +417,27 @@ def parse(self, inventory, loader, path, cache=True): update_cache = True using_current_cache = cache and not update_cache - cacheable_results = self._populate_from_source(source_data, using_current_cache) + cacheable_results = self._populate_from_source( + source_data, using_current_cache + ) if update_cache: self._cache[cache_key] = cacheable_results def _populate_from_cache(self, source_data): """ Populate cache using source data """ - hostvars = source_data.pop('_meta', {}).get('hostvars', {}) + hostvars = source_data.pop("_meta", {}).get("hostvars", {}) for group in source_data: - if group == 'all': + if group == "all": continue else: self.inventory.add_group(group) - hosts = source_data[group].get('hosts', []) + hosts = source_data[group].get("hosts", []) for host in hosts: - self._populate_host_vars([host], hostvars.get(host, {}), group) - self.inventory.add_child('all', group) + self._populate_host_vars( + [host], hostvars.get(host, {}), group + ) + self.inventory.add_child("all", group) def _populate_from_source(self, source_data, using_current_cache): """ @@ -387,10 +448,11 @@ def _populate_from_source(self, source_data, using_current_cache): self._populate_from_cache(source_data) return source_data - cacheable_results = {'_meta': {'hostvars': {}}} + cacheable_results = {"_meta": {"hostvars": {}}} hostvars = {} - objects = self.pyv._get_managed_objects_properties(vim_type=vim.VirtualMachine, - properties=['name']) + objects = self.pyv._get_managed_objects_properties( + vim_type=vim.VirtualMachine, properties=["name"] + ) if self.pyv.with_tags: tag_svc = self.pyv.rest_content.tagging.Tag @@ -402,7 +464,7 @@ def _populate_from_source(self, source_data, using_current_cache): tag_obj = tag_svc.get(tag) tags_info[tag_obj.id] = tag_obj.name if tag_obj.name not in cacheable_results: - cacheable_results[tag_obj.name] = {'hosts': []} + cacheable_results[tag_obj.name] = {"hosts": []} self.inventory.add_group(tag_obj.name) for vm_obj in objects: @@ -414,7 +476,9 @@ def _populate_from_source(self, source_data, using_current_cache): # Sometime orphaned VMs return no configurations continue - current_host = vm_obj_property.val + "_" + vm_obj.obj.config.uuid + current_host = ( + vm_obj_property.val + "_" + vm_obj.obj.config.uuid + ) if current_host not in hostvars: hostvars[current_host] = {} @@ -422,7 +486,9 @@ def _populate_from_source(self, source_data, using_current_cache): host_ip = vm_obj.obj.guest.ipAddress if host_ip: - self.inventory.set_variable(current_host, 'ansible_host', host_ip) + self.inventory.set_variable( + current_host, "ansible_host", host_ip + ) self._populate_host_properties(vm_obj, current_host) @@ -430,47 +496,63 @@ def _populate_from_source(self, source_data, using_current_cache): if HAS_VSPHERE and self.pyv.with_tags: # Add virtual machine to appropriate tag group vm_mo_id = vm_obj.obj._GetMoId() - vm_dynamic_id = DynamicID(type='VirtualMachine', id=vm_mo_id) - attached_tags = tag_association.list_attached_tags(vm_dynamic_id) + vm_dynamic_id = DynamicID( + type="VirtualMachine", id=vm_mo_id + ) + attached_tags = tag_association.list_attached_tags( + vm_dynamic_id + ) for tag_id in attached_tags: - self.inventory.add_child(tags_info[tag_id], current_host) - cacheable_results[tags_info[tag_id]]['hosts'].append(current_host) + self.inventory.add_child( + tags_info[tag_id], current_host + ) + cacheable_results[tags_info[tag_id]][ + "hosts" + ].append(current_host) # Based on power state of virtual machine vm_power = str(vm_obj.obj.summary.runtime.powerState) if vm_power not in cacheable_results: - cacheable_results[vm_power] = {'hosts': []} + cacheable_results[vm_power] = {"hosts": []} self.inventory.add_group(vm_power) - cacheable_results[vm_power]['hosts'].append(current_host) + cacheable_results[vm_power]["hosts"].append(current_host) self.inventory.add_child(vm_power, current_host) # Based on guest id vm_guest_id = vm_obj.obj.config.guestId if vm_guest_id and vm_guest_id not in cacheable_results: - cacheable_results[vm_guest_id] = {'hosts': []} + cacheable_results[vm_guest_id] = {"hosts": []} self.inventory.add_group(vm_guest_id) - cacheable_results[vm_guest_id]['hosts'].append(current_host) + cacheable_results[vm_guest_id]["hosts"].append( + current_host + ) self.inventory.add_child(vm_guest_id, current_host) for host in hostvars: h = self.inventory.get_host(host) - cacheable_results['_meta']['hostvars'][h.name] = h.vars + cacheable_results["_meta"]["hostvars"][h.name] = h.vars return cacheable_results def _populate_host_properties(self, vm_obj, current_host): # Load VM properties in host_vars - vm_properties = self.get_option('properties') or [] + vm_properties = self.get_option("properties") or [] field_mgr = self.pyv.content.customFieldsManager.field for vm_prop in vm_properties: - if vm_prop == 'customValue': + if vm_prop == "customValue": for cust_value in vm_obj.obj.customValue: - self.inventory.set_variable(current_host, - [y.name for y in field_mgr if y.key == cust_value.key][0], - cust_value.value) + self.inventory.set_variable( + current_host, + [y.name for y in field_mgr if y.key == cust_value.key][ + 0 + ], + cust_value.value, + ) else: - vm_value = self.pyv._get_object_prop(vm_obj.obj, vm_prop.split(".")) + vm_value = self.pyv._get_object_prop( + vm_obj.obj, vm_prop.split(".") + ) self.inventory.set_variable(current_host, vm_prop, vm_value) diff --git a/plugins/module_utils/vmware.py b/plugins/module_utils/vmware.py index 765e8c1..3fb39e0 100644 --- a/plugins/module_utils/vmware.py +++ b/plugins/module_utils/vmware.py @@ -5,6 +5,7 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type import atexit @@ -22,6 +23,7 @@ try: # requests is required for exception handling of the ConnectionError import requests + HAS_REQUESTS = True except ImportError: REQUESTS_IMP_ERR = traceback.format_exc() @@ -31,15 +33,21 @@ try: from pyVim import connect from pyVmomi import vim, vmodl, VmomiSupport + HAS_PYVMOMI = True - HAS_PYVMOMIJSON = hasattr(VmomiSupport, 'VmomiJSONEncoder') + HAS_PYVMOMIJSON = hasattr(VmomiSupport, "VmomiJSONEncoder") except ImportError: PYVMOMI_IMP_ERR = traceback.format_exc() HAS_PYVMOMI = False HAS_PYVMOMIJSON = False from ansible.module_utils._text import to_text, to_native -from ansible.module_utils.six import integer_types, iteritems, string_types, raise_from +from ansible.module_utils.six import ( + integer_types, + iteritems, + string_types, + raise_from, +) from ansible.module_utils.basic import env_fallback, missing_required_lib @@ -72,14 +80,21 @@ def wait_for_task(task, max_backoff=64, timeout=3600): host_thumbprint = None try: error_msg = error_msg.msg - if hasattr(task.info.error, 'thumbprint'): + if hasattr(task.info.error, "thumbprint"): host_thumbprint = task.info.error.thumbprint except AttributeError: pass finally: - raise_from(TaskError(error_msg, host_thumbprint), task.info.error) - if task.info.state in [vim.TaskInfo.State.running, vim.TaskInfo.State.queued]: - sleep_time = min(2 ** failure_counter + randint(1, 1000) / 1000, max_backoff) + raise_from( + TaskError(error_msg, host_thumbprint), task.info.error + ) + if task.info.state in [ + vim.TaskInfo.State.running, + vim.TaskInfo.State.queued, + ]: + sleep_time = min( + 2 ** failure_counter + randint(1, 1000) / 1000, max_backoff + ) time.sleep(sleep_time) failure_counter += 1 @@ -89,7 +104,7 @@ def wait_for_vm_ip(content, vm, timeout=300): interval = 15 while timeout > 0: _facts = gather_vm_facts(content, vm) - if _facts['ipv4'] or _facts['ipv6']: + if _facts["ipv4"] or _facts["ipv6"]: facts = _facts break time.sleep(interval) @@ -99,9 +114,15 @@ def wait_for_vm_ip(content, vm, timeout=300): def find_obj(content, vimtype, name, first=True, folder=None): - container = content.viewManager.CreateContainerView(folder or content.rootFolder, recursive=True, type=vimtype) + container = content.viewManager.CreateContainerView( + folder or content.rootFolder, recursive=True, type=vimtype + ) # Get all objects matching type (and name if given) - obj_list = [obj for obj in container.view if not name or to_text(obj.name) == to_text(name)] + obj_list = [ + obj + for obj in container.view + if not name or to_text(obj.name) == to_text(name) + ] container.Destroy() # Return first match or None @@ -139,12 +160,14 @@ def find_object_by_name(content, name, obj_type, folder=None, recurse=True): def find_cluster_by_name(content, cluster_name, datacenter=None): - if datacenter and hasattr(datacenter, 'hostFolder'): + if datacenter and hasattr(datacenter, "hostFolder"): folder = datacenter.hostFolder else: folder = content.rootFolder - return find_object_by_name(content, cluster_name, [vim.ClusterComputeResource], folder=folder) + return find_object_by_name( + content, cluster_name, [vim.ClusterComputeResource], folder=folder + ) def find_datacenter_by_name(content, datacenter_name): @@ -157,7 +180,7 @@ def get_parent_datacenter(obj): return obj datacenter = None while True: - if not hasattr(obj, 'parent'): + if not hasattr(obj, "parent"): break obj = obj.parent if isinstance(obj, vim.Datacenter): @@ -167,7 +190,9 @@ def get_parent_datacenter(obj): def find_datastore_by_name(content, datastore_name, datacenter_name=None): - return find_object_by_name(content, datastore_name, [vim.Datastore], datacenter_name) + return find_object_by_name( + content, datastore_name, [vim.Datastore], datacenter_name + ) def find_folder_by_name(content, folder_name): @@ -175,7 +200,9 @@ def find_folder_by_name(content, folder_name): def find_dvs_by_name(content, switch_name, folder=None): - return find_object_by_name(content, switch_name, [vim.DistributedVirtualSwitch], folder=folder) + return find_object_by_name( + content, switch_name, [vim.DistributedVirtualSwitch], folder=folder + ) def find_hostsystem_by_name(content, hostname): @@ -186,37 +213,59 @@ def find_resource_pool_by_name(content, resource_pool_name): return find_object_by_name(content, resource_pool_name, [vim.ResourcePool]) -def find_resource_pool_by_cluster(content, resource_pool_name='Resources', cluster=None): - return find_object_by_name(content, resource_pool_name, [vim.ResourcePool], folder=cluster) +def find_resource_pool_by_cluster( + content, resource_pool_name="Resources", cluster=None +): + return find_object_by_name( + content, resource_pool_name, [vim.ResourcePool], folder=cluster + ) def find_network_by_name(content, network_name): - return find_object_by_name(content, quote_obj_name(network_name), [vim.Network]) + return find_object_by_name( + content, quote_obj_name(network_name), [vim.Network] + ) -def find_vm_by_id(content, vm_id, vm_id_type="vm_name", datacenter=None, - cluster=None, folder=None, match_first=False): +def find_vm_by_id( + content, + vm_id, + vm_id_type="vm_name", + datacenter=None, + cluster=None, + folder=None, + match_first=False, +): """ UUID is unique to a VM, every other id returns the first match. """ si = content.searchIndex vm = None - if vm_id_type == 'dns_name': - vm = si.FindByDnsName(datacenter=datacenter, dnsName=vm_id, vmSearch=True) - elif vm_id_type == 'uuid': + if vm_id_type == "dns_name": + vm = si.FindByDnsName( + datacenter=datacenter, dnsName=vm_id, vmSearch=True + ) + elif vm_id_type == "uuid": # Search By BIOS UUID rather than instance UUID - vm = si.FindByUuid(datacenter=datacenter, instanceUuid=False, uuid=vm_id, vmSearch=True) - elif vm_id_type == 'instance_uuid': - vm = si.FindByUuid(datacenter=datacenter, instanceUuid=True, uuid=vm_id, vmSearch=True) - elif vm_id_type == 'ip': + vm = si.FindByUuid( + datacenter=datacenter, + instanceUuid=False, + uuid=vm_id, + vmSearch=True, + ) + elif vm_id_type == "instance_uuid": + vm = si.FindByUuid( + datacenter=datacenter, instanceUuid=True, uuid=vm_id, vmSearch=True + ) + elif vm_id_type == "ip": vm = si.FindByIp(datacenter=datacenter, ip=vm_id, vmSearch=True) - elif vm_id_type == 'vm_name': + elif vm_id_type == "vm_name": folder = None if cluster: folder = cluster elif datacenter: folder = datacenter.hostFolder vm = find_vm_by_name(content, vm_id, folder) - elif vm_id_type == 'inventory_path': + elif vm_id_type == "inventory_path": searchpath = folder # get all objects for this path f_obj = si.FindByInventoryPath(searchpath) @@ -234,7 +283,9 @@ def find_vm_by_id(content, vm_id, vm_id_type="vm_name", datacenter=None, def find_vm_by_name(content, vm_name, folder=None, recurse=True): - return find_object_by_name(content, vm_name, [vim.VirtualMachine], folder=folder, recurse=recurse) + return find_object_by_name( + content, vm_name, [vim.VirtualMachine], folder=folder, recurse=recurse + ) def find_host_portgroup_by_name(host, portgroup_name): @@ -253,18 +304,18 @@ def compile_folder_path_for_object(vobj): paths.append(vobj.name) thisobj = vobj - while hasattr(thisobj, 'parent'): + while hasattr(thisobj, "parent"): thisobj = thisobj.parent try: moid = thisobj._moId except AttributeError: moid = None - if moid in ['group-d1', 'ha-folder-root']: + if moid in ["group-d1", "ha-folder-root"]: break if isinstance(thisobj, vim.Folder): paths.append(thisobj.name) paths.reverse() - return '/' + '/'.join(paths) + return "/" + "/".join(paths) def _get_vm_prop(vm, attributes): @@ -281,45 +332,52 @@ def _get_vm_prop(vm, attributes): def gather_vm_facts(content, vm): """ Gather facts from vim.VirtualMachine object. """ facts = { - 'module_hw': True, - 'hw_name': vm.config.name, - 'hw_power_status': vm.summary.runtime.powerState, - 'hw_guest_full_name': vm.summary.guest.guestFullName, - 'hw_guest_id': vm.summary.guest.guestId, - 'hw_product_uuid': vm.config.uuid, - 'hw_processor_count': vm.config.hardware.numCPU, - 'hw_cores_per_socket': vm.config.hardware.numCoresPerSocket, - 'hw_memtotal_mb': vm.config.hardware.memoryMB, - 'hw_interfaces': [], - 'hw_datastores': [], - 'hw_files': [], - 'hw_esxi_host': None, - 'hw_guest_ha_state': None, - 'hw_is_template': vm.config.template, - 'hw_folder': None, - 'hw_version': vm.config.version, - 'instance_uuid': vm.config.instanceUuid, - 'guest_tools_status': _get_vm_prop(vm, ('guest', 'toolsRunningStatus')), - 'guest_tools_version': _get_vm_prop(vm, ('guest', 'toolsVersion')), - 'guest_question': vm.summary.runtime.question, - 'guest_consolidation_needed': vm.summary.runtime.consolidationNeeded, - 'ipv4': None, - 'ipv6': None, - 'annotation': vm.config.annotation, - 'customvalues': {}, - 'snapshots': [], - 'current_snapshot': None, - 'vnc': {}, - 'moid': vm._moId, - 'vimref': "vim.VirtualMachine:%s" % vm._moId, + "module_hw": True, + "hw_name": vm.config.name, + "hw_power_status": vm.summary.runtime.powerState, + "hw_guest_full_name": vm.summary.guest.guestFullName, + "hw_guest_id": vm.summary.guest.guestId, + "hw_product_uuid": vm.config.uuid, + "hw_processor_count": vm.config.hardware.numCPU, + "hw_cores_per_socket": vm.config.hardware.numCoresPerSocket, + "hw_memtotal_mb": vm.config.hardware.memoryMB, + "hw_interfaces": [], + "hw_datastores": [], + "hw_files": [], + "hw_esxi_host": None, + "hw_guest_ha_state": None, + "hw_is_template": vm.config.template, + "hw_folder": None, + "hw_version": vm.config.version, + "instance_uuid": vm.config.instanceUuid, + "guest_tools_status": _get_vm_prop( + vm, ("guest", "toolsRunningStatus") + ), + "guest_tools_version": _get_vm_prop(vm, ("guest", "toolsVersion")), + "guest_question": vm.summary.runtime.question, + "guest_consolidation_needed": vm.summary.runtime.consolidationNeeded, + "ipv4": None, + "ipv6": None, + "annotation": vm.config.annotation, + "customvalues": {}, + "snapshots": [], + "current_snapshot": None, + "vnc": {}, + "moid": vm._moId, + "vimref": "vim.VirtualMachine:%s" % vm._moId, } # facts that may or may not exist if vm.summary.runtime.host: try: host = vm.summary.runtime.host - facts['hw_esxi_host'] = host.summary.config.name - facts['hw_cluster'] = host.parent.name if host.parent and isinstance(host.parent, vim.ClusterComputeResource) else None + facts["hw_esxi_host"] = host.summary.config.name + facts["hw_cluster"] = ( + host.parent.name + if host.parent + and isinstance(host.parent, vim.ClusterComputeResource) + else None + ) except vim.fault.NoPermission: # User does not have read permission for the host system, @@ -327,32 +385,38 @@ def gather_vm_facts(content, vm): # provisioning or power management operations. pass if vm.summary.runtime.dasVmProtection: - facts['hw_guest_ha_state'] = vm.summary.runtime.dasVmProtection.dasProtected + facts[ + "hw_guest_ha_state" + ] = vm.summary.runtime.dasVmProtection.dasProtected datastores = vm.datastore for ds in datastores: - facts['hw_datastores'].append(ds.info.name) + facts["hw_datastores"].append(ds.info.name) try: files = vm.config.files layout = vm.layout if files: - facts['hw_files'] = [files.vmPathName] + facts["hw_files"] = [files.vmPathName] for item in layout.snapshot: for snap in item.snapshotFile: - if 'vmsn' in snap: - facts['hw_files'].append(snap) + if "vmsn" in snap: + facts["hw_files"].append(snap) for item in layout.configFile: - facts['hw_files'].append(os.path.join(os.path.dirname(files.vmPathName), item)) + facts["hw_files"].append( + os.path.join(os.path.dirname(files.vmPathName), item) + ) for item in vm.layout.logFile: - facts['hw_files'].append(os.path.join(files.logDirectory, item)) + facts["hw_files"].append( + os.path.join(files.logDirectory, item) + ) for item in vm.layout.disk: for disk in item.diskFile: - facts['hw_files'].append(disk) + facts["hw_files"].append(disk) except Exception: pass - facts['hw_folder'] = PyVmomi.get_vm_path(content, vm) + facts["hw_folder"] = PyVmomi.get_vm_path(content, vm) cfm = content.customFieldsManager # Resolve custom values @@ -365,76 +429,84 @@ def gather_vm_facts(content, vm): # Exit the loop immediately, we found it break - facts['customvalues'][kn] = value_obj.value + facts["customvalues"][kn] = value_obj.value net_dict = {} - vmnet = _get_vm_prop(vm, ('guest', 'net')) + vmnet = _get_vm_prop(vm, ("guest", "net")) if vmnet: for device in vmnet: if device.deviceConfigId > 0: net_dict[device.macAddress] = list(device.ipAddress) if vm.guest.ipAddress: - if ':' in vm.guest.ipAddress: - facts['ipv6'] = vm.guest.ipAddress + if ":" in vm.guest.ipAddress: + facts["ipv6"] = vm.guest.ipAddress else: - facts['ipv4'] = vm.guest.ipAddress + facts["ipv4"] = vm.guest.ipAddress ethernet_idx = 0 for entry in vm.config.hardware.device: - if not hasattr(entry, 'macAddress'): + if not hasattr(entry, "macAddress"): continue if entry.macAddress: mac_addr = entry.macAddress - mac_addr_dash = mac_addr.replace(':', '-') + mac_addr_dash = mac_addr.replace(":", "-") else: mac_addr = mac_addr_dash = None - if (hasattr(entry, 'backing') and hasattr(entry.backing, 'port') and - hasattr(entry.backing.port, 'portKey') and hasattr(entry.backing.port, 'portgroupKey')): + if ( + hasattr(entry, "backing") + and hasattr(entry.backing, "port") + and hasattr(entry.backing.port, "portKey") + and hasattr(entry.backing.port, "portgroupKey") + ): port_group_key = entry.backing.port.portgroupKey port_key = entry.backing.port.portKey else: port_group_key = None port_key = None - factname = 'hw_eth' + str(ethernet_idx) + factname = "hw_eth" + str(ethernet_idx) facts[factname] = { - 'addresstype': entry.addressType, - 'label': entry.deviceInfo.label, - 'macaddress': mac_addr, - 'ipaddresses': net_dict.get(entry.macAddress, None), - 'macaddress_dash': mac_addr_dash, - 'summary': entry.deviceInfo.summary, - 'portgroup_portkey': port_key, - 'portgroup_key': port_group_key, + "addresstype": entry.addressType, + "label": entry.deviceInfo.label, + "macaddress": mac_addr, + "ipaddresses": net_dict.get(entry.macAddress, None), + "macaddress_dash": mac_addr_dash, + "summary": entry.deviceInfo.summary, + "portgroup_portkey": port_key, + "portgroup_key": port_group_key, } - facts['hw_interfaces'].append('eth' + str(ethernet_idx)) + facts["hw_interfaces"].append("eth" + str(ethernet_idx)) ethernet_idx += 1 snapshot_facts = list_snapshots(vm) - if 'snapshots' in snapshot_facts: - facts['snapshots'] = snapshot_facts['snapshots'] - facts['current_snapshot'] = snapshot_facts['current_snapshot'] + if "snapshots" in snapshot_facts: + facts["snapshots"] = snapshot_facts["snapshots"] + facts["current_snapshot"] = snapshot_facts["current_snapshot"] - facts['vnc'] = get_vnc_extraconfig(vm) + facts["vnc"] = get_vnc_extraconfig(vm) return facts def deserialize_snapshot_obj(obj): - return {'id': obj.id, - 'name': obj.name, - 'description': obj.description, - 'creation_time': obj.createTime, - 'state': obj.state} + return { + "id": obj.id, + "name": obj.name, + "description": obj.description, + "creation_time": obj.createTime, + "state": obj.state, + } def list_snapshots_recursively(snapshots): snapshot_data = [] for snapshot in snapshots: snapshot_data.append(deserialize_snapshot_obj(snapshot)) - snapshot_data = snapshot_data + list_snapshots_recursively(snapshot.childSnapshotList) + snapshot_data = snapshot_data + list_snapshots_recursively( + snapshot.childSnapshotList + ) return snapshot_data @@ -443,32 +515,40 @@ def get_current_snap_obj(snapshots, snapob): for snapshot in snapshots: if snapshot.snapshot == snapob: snap_obj.append(snapshot) - snap_obj = snap_obj + get_current_snap_obj(snapshot.childSnapshotList, snapob) + snap_obj = snap_obj + get_current_snap_obj( + snapshot.childSnapshotList, snapob + ) return snap_obj def list_snapshots(vm): result = {} - snapshot = _get_vm_prop(vm, ('snapshot',)) + snapshot = _get_vm_prop(vm, ("snapshot",)) if not snapshot: return result if vm.snapshot is None: return result - result['snapshots'] = list_snapshots_recursively(vm.snapshot.rootSnapshotList) + result["snapshots"] = list_snapshots_recursively( + vm.snapshot.rootSnapshotList + ) current_snapref = vm.snapshot.currentSnapshot - current_snap_obj = get_current_snap_obj(vm.snapshot.rootSnapshotList, current_snapref) + current_snap_obj = get_current_snap_obj( + vm.snapshot.rootSnapshotList, current_snapref + ) if current_snap_obj: - result['current_snapshot'] = deserialize_snapshot_obj(current_snap_obj[0]) + result["current_snapshot"] = deserialize_snapshot_obj( + current_snap_obj[0] + ) else: - result['current_snapshot'] = dict() + result["current_snapshot"] = dict() return result def get_vnc_extraconfig(vm): result = {} for opts in vm.config.extraConfig: - for optkeyname in ['enabled', 'ip', 'port', 'password']: + for optkeyname in ["enabled", "ip", "port", "password"]: if opts.key.lower() == "remotedisplay.vnc." + optkeyname: result[optkeyname] = opts.value return result @@ -476,69 +556,98 @@ def get_vnc_extraconfig(vm): def vmware_argument_spec(): return dict( - hostname=dict(type='str', - required=False, - fallback=(env_fallback, ['VMWARE_HOST']), - ), - username=dict(type='str', - aliases=['user', 'admin'], - required=False, - fallback=(env_fallback, ['VMWARE_USER'])), - password=dict(type='str', - aliases=['pass', 'pwd'], - required=False, - no_log=True, - fallback=(env_fallback, ['VMWARE_PASSWORD'])), - port=dict(type='int', - default=443, - fallback=(env_fallback, ['VMWARE_PORT'])), - validate_certs=dict(type='bool', - required=False, - default=True, - fallback=(env_fallback, ['VMWARE_VALIDATE_CERTS']) - ), - proxy_host=dict(type='str', - required=False, - default=None, - fallback=(env_fallback, ['VMWARE_PROXY_HOST'])), - proxy_port=dict(type='int', - required=False, - default=None, - fallback=(env_fallback, ['VMWARE_PROXY_PORT'])), + hostname=dict( + type="str", + required=False, + fallback=(env_fallback, ["VMWARE_HOST"]), + ), + username=dict( + type="str", + aliases=["user", "admin"], + required=False, + fallback=(env_fallback, ["VMWARE_USER"]), + ), + password=dict( + type="str", + aliases=["pass", "pwd"], + required=False, + no_log=True, + fallback=(env_fallback, ["VMWARE_PASSWORD"]), + ), + port=dict( + type="int", default=443, fallback=(env_fallback, ["VMWARE_PORT"]) + ), + validate_certs=dict( + type="bool", + required=False, + default=True, + fallback=(env_fallback, ["VMWARE_VALIDATE_CERTS"]), + ), + proxy_host=dict( + type="str", + required=False, + default=None, + fallback=(env_fallback, ["VMWARE_PROXY_HOST"]), + ), + proxy_port=dict( + type="int", + required=False, + default=None, + fallback=(env_fallback, ["VMWARE_PROXY_PORT"]), + ), ) -def connect_to_api(module, disconnect_atexit=True, return_si=False, hostname=None, username=None, password=None, port=None, validate_certs=None): - hostname = hostname if hostname else module.params['hostname'] - username = username if username else module.params['username'] - password = password if password else module.params['password'] - port = port if port else module.params.get('port', 443) - validate_certs = validate_certs if validate_certs else module.params['validate_certs'] +def connect_to_api( + module, + disconnect_atexit=True, + return_si=False, + hostname=None, + username=None, + password=None, + port=None, + validate_certs=None, +): + hostname = hostname if hostname else module.params["hostname"] + username = username if username else module.params["username"] + password = password if password else module.params["password"] + port = port if port else module.params.get("port", 443) + validate_certs = ( + validate_certs if validate_certs else module.params["validate_certs"] + ) if not hostname: - module.fail_json(msg="Hostname parameter is missing." - " Please specify this parameter in task or" - " export environment variable like 'export VMWARE_HOST=ESXI_HOSTNAME'") + module.fail_json( + msg="Hostname parameter is missing." + " Please specify this parameter in task or" + " export environment variable like 'export VMWARE_HOST=ESXI_HOSTNAME'" + ) if not username: - module.fail_json(msg="Username parameter is missing." - " Please specify this parameter in task or" - " export environment variable like 'export VMWARE_USER=ESXI_USERNAME'") + module.fail_json( + msg="Username parameter is missing." + " Please specify this parameter in task or" + " export environment variable like 'export VMWARE_USER=ESXI_USERNAME'" + ) if not password: - module.fail_json(msg="Password parameter is missing." - " Please specify this parameter in task or" - " export environment variable like 'export VMWARE_PASSWORD=ESXI_PASSWORD'") + module.fail_json( + msg="Password parameter is missing." + " Please specify this parameter in task or" + " export environment variable like 'export VMWARE_PASSWORD=ESXI_PASSWORD'" + ) - if validate_certs and not hasattr(ssl, 'SSLContext'): - module.fail_json(msg='pyVim does not support changing verification mode with python < 2.7.9. Either update ' - 'python or use validate_certs=false.') + if validate_certs and not hasattr(ssl, "SSLContext"): + module.fail_json( + msg="pyVim does not support changing verification mode with python < 2.7.9. Either update " + "python or use validate_certs=false." + ) elif validate_certs: ssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) ssl_context.verify_mode = ssl.CERT_REQUIRED ssl_context.check_hostname = True ssl_context.load_default_certs() - elif hasattr(ssl, 'SSLContext'): + elif hasattr(ssl, "SSLContext"): ssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) ssl_context.verify_mode = ssl.CERT_NONE ssl_context.check_hostname = False @@ -546,45 +655,73 @@ def connect_to_api(module, disconnect_atexit=True, return_si=False, hostname=Non ssl_context = None service_instance = None - proxy_host = module.params.get('proxy_host') - proxy_port = module.params.get('proxy_port') + proxy_host = module.params.get("proxy_host") + proxy_port = module.params.get("proxy_port") - connect_args = dict( - host=hostname, - port=port, - ) + connect_args = dict(host=hostname, port=port) if ssl_context: connect_args.update(sslContext=ssl_context) - msg_suffix = '' + msg_suffix = "" try: if proxy_host: msg_suffix = " [proxy: %s:%d]" % (proxy_host, proxy_port) - connect_args.update(httpProxyHost=proxy_host, httpProxyPort=proxy_port) + connect_args.update( + httpProxyHost=proxy_host, httpProxyPort=proxy_port + ) smart_stub = connect.SmartStubAdapter(**connect_args) - session_stub = connect.VimSessionOrientedStub(smart_stub, connect.VimSessionOrientedStub.makeUserLoginMethod(username, password)) - service_instance = vim.ServiceInstance('ServiceInstance', session_stub) + session_stub = connect.VimSessionOrientedStub( + smart_stub, + connect.VimSessionOrientedStub.makeUserLoginMethod( + username, password + ), + ) + service_instance = vim.ServiceInstance( + "ServiceInstance", session_stub + ) else: connect_args.update(user=username, pwd=password) service_instance = connect.SmartConnect(**connect_args) except vim.fault.InvalidLogin as invalid_login: - msg = "Unable to log on to vCenter or ESXi API at %s:%s " % (hostname, port) - module.fail_json(msg="%s as %s: %s" % (msg, username, invalid_login.msg) + msg_suffix) + msg = "Unable to log on to vCenter or ESXi API at %s:%s " % ( + hostname, + port, + ) + module.fail_json( + msg="%s as %s: %s" % (msg, username, invalid_login.msg) + + msg_suffix + ) except vim.fault.NoPermission as no_permission: - module.fail_json(msg="User %s does not have required permission" - " to log on to vCenter or ESXi API at %s:%s : %s" % (username, hostname, port, no_permission.msg)) + module.fail_json( + msg="User %s does not have required permission" + " to log on to vCenter or ESXi API at %s:%s : %s" + % (username, hostname, port, no_permission.msg) + ) except (requests.ConnectionError, ssl.SSLError) as generic_req_exc: - module.fail_json(msg="Unable to connect to vCenter or ESXi API at %s on TCP/%s: %s" % (hostname, port, generic_req_exc)) + module.fail_json( + msg="Unable to connect to vCenter or ESXi API at %s on TCP/%s: %s" + % (hostname, port, generic_req_exc) + ) except vmodl.fault.InvalidRequest as invalid_request: # Request is malformed msg = "Failed to get a response from server %s:%s " % (hostname, port) - module.fail_json(msg="%s as request is malformed: %s" % (msg, invalid_request.msg) + msg_suffix) + module.fail_json( + msg="%s as request is malformed: %s" % (msg, invalid_request.msg) + + msg_suffix + ) except Exception as generic_exc: - msg = "Unknown error while connecting to vCenter or ESXi API at %s:%s" % (hostname, port) + msg_suffix + msg = ( + "Unknown error while connecting to vCenter or ESXi API at %s:%s" + % (hostname, port) + + msg_suffix + ) module.fail_json(msg="%s : %s" % (msg, generic_exc)) if service_instance is None: - msg = "Unknown error while connecting to vCenter or ESXi API at %s:%s" % (hostname, port) + msg = ( + "Unknown error while connecting to vCenter or ESXi API at %s:%s" + % (hostname, port) + ) module.fail_json(msg=msg + msg_suffix) # Disabling atexit should be used in special cases only. @@ -602,21 +739,36 @@ def get_all_objs(content, vimtype, folder=None, recurse=True): folder = content.rootFolder obj = {} - container = content.viewManager.CreateContainerView(folder, vimtype, recurse) + container = content.viewManager.CreateContainerView( + folder, vimtype, recurse + ) for managed_object_ref in container.view: obj.update({managed_object_ref: managed_object_ref.name}) return obj -def run_command_in_guest(content, vm, username, password, program_path, program_args, program_cwd, program_env): +def run_command_in_guest( + content, + vm, + username, + password, + program_path, + program_args, + program_cwd, + program_env, +): - result = {'failed': False} + result = {"failed": False} tools_status = vm.guest.toolsStatus - if (tools_status == 'toolsNotInstalled' or - tools_status == 'toolsNotRunning'): - result['failed'] = True - result['msg'] = "VMwareTools is not installed or is not running in the guest" + if ( + tools_status == "toolsNotInstalled" + or tools_status == "toolsNotRunning" + ): + result["failed"] = True + result[ + "msg" + ] = "VMwareTools is not installed or is not running in the guest" return result # https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/NamePasswordAuthentication.rst @@ -637,7 +789,7 @@ def run_command_in_guest(content, vm, username, password, program_path, program_ ) res = pm.StartProgramInGuest(vm, creds, ps) - result['pid'] = res + result["pid"] = res pdata = pm.ListProcessesInGuest(vm, creds, [res]) # wait for pid to finish @@ -645,19 +797,19 @@ def run_command_in_guest(content, vm, username, password, program_path, program_ time.sleep(1) pdata = pm.ListProcessesInGuest(vm, creds, [res]) - result['owner'] = pdata[0].owner - result['startTime'] = pdata[0].startTime.isoformat() - result['endTime'] = pdata[0].endTime.isoformat() - result['exitCode'] = pdata[0].exitCode - if result['exitCode'] != 0: - result['failed'] = True - result['msg'] = "program exited non-zero" + result["owner"] = pdata[0].owner + result["startTime"] = pdata[0].startTime.isoformat() + result["endTime"] = pdata[0].endTime.isoformat() + result["exitCode"] = pdata[0].exitCode + if result["exitCode"] != 0: + result["failed"] = True + result["msg"] = "program exited non-zero" else: - result['msg'] = "program completed successfully" + result["msg"] = "program completed successfully" except Exception as e: - result['msg'] = str(e) - result['failed'] = True + result["msg"] = str(e) + result["failed"] = True return result @@ -666,7 +818,7 @@ def serialize_spec(clonespec): """Serialize a clonespec or a relocation spec""" data = {} attrs = dir(clonespec) - attrs = [x for x in attrs if not x.startswith('_')] + attrs = [x for x in attrs if not x.startswith("_")] for x in attrs: xo = getattr(clonespec, x) if callable(xo): @@ -684,13 +836,13 @@ def serialize_spec(clonespec): data[x] = to_text(xo) elif isinstance(xo, vim.Description): data[x] = { - 'dynamicProperty': serialize_spec(xo.dynamicProperty), - 'dynamicType': serialize_spec(xo.dynamicType), - 'label': serialize_spec(xo.label), - 'summary': serialize_spec(xo.summary), + "dynamicProperty": serialize_spec(xo.dynamicProperty), + "dynamicType": serialize_spec(xo.dynamicType), + "label": serialize_spec(xo.label), + "summary": serialize_spec(xo.summary), } - elif hasattr(xo, 'name'): - data[x] = to_text(xo) + ':' + to_text(xo.name) + elif hasattr(xo, "name"): + data[x] = to_text(xo) + ":" + to_text(xo.name) elif isinstance(xo, vim.vm.ProfileSpec): pass elif issubclass(xt, list): @@ -715,13 +867,19 @@ def serialize_spec(clonespec): return data -def find_host_by_cluster_datacenter(module, content, datacenter_name, cluster_name, host_name): +def find_host_by_cluster_datacenter( + module, content, datacenter_name, cluster_name, host_name +): dc = find_datacenter_by_name(content, datacenter_name) if dc is None: - module.fail_json(msg="Unable to find datacenter with name %s" % datacenter_name) + module.fail_json( + msg="Unable to find datacenter with name %s" % datacenter_name + ) cluster = find_cluster_by_name(content, cluster_name, datacenter=dc) if cluster is None: - module.fail_json(msg="Unable to find cluster with name %s" % cluster_name) + module.fail_json( + msg="Unable to find cluster with name %s" % cluster_name + ) for host in cluster.host: if host.name == host_name: @@ -736,48 +894,59 @@ def set_vm_power_state(content, vm, state, force, timeout=0): requested states. force is forceful """ facts = gather_vm_facts(content, vm) - expected_state = state.replace('_', '').replace('-', '').lower() - current_state = facts['hw_power_status'].lower() - result = dict( - changed=False, - failed=False, - ) + expected_state = state.replace("_", "").replace("-", "").lower() + current_state = facts["hw_power_status"].lower() + result = dict(changed=False, failed=False) # Need Force - if not force and current_state not in ['poweredon', 'poweredoff']: - result['failed'] = True - result['msg'] = "Virtual Machine is in %s power state. Force is required!" % current_state - result['instance'] = gather_vm_facts(content, vm) + if not force and current_state not in ["poweredon", "poweredoff"]: + result["failed"] = True + result["msg"] = ( + "Virtual Machine is in %s power state. Force is required!" + % current_state + ) + result["instance"] = gather_vm_facts(content, vm) return result # State is not already true if current_state != expected_state: task = None try: - if expected_state == 'poweredoff': + if expected_state == "poweredoff": task = vm.PowerOff() - elif expected_state == 'poweredon': + elif expected_state == "poweredon": task = vm.PowerOn() - elif expected_state == 'restarted': - if current_state in ('poweredon', 'poweringon', 'resetting', 'poweredoff'): + elif expected_state == "restarted": + if current_state in ( + "poweredon", + "poweringon", + "resetting", + "poweredoff", + ): task = vm.Reset() else: - result['failed'] = True - result['msg'] = "Cannot restart virtual machine in the current state %s" % current_state - - elif expected_state == 'suspended': - if current_state in ('poweredon', 'poweringon'): + result["failed"] = True + result["msg"] = ( + "Cannot restart virtual machine in the current state %s" + % current_state + ) + + elif expected_state == "suspended": + if current_state in ("poweredon", "poweringon"): task = vm.Suspend() else: - result['failed'] = True - result['msg'] = 'Cannot suspend virtual machine in the current state %s' % current_state - - elif expected_state in ['shutdownguest', 'rebootguest']: - if current_state == 'poweredon': - if vm.guest.toolsRunningStatus == 'guestToolsRunning': - if expected_state == 'shutdownguest': + result["failed"] = True + result["msg"] = ( + "Cannot suspend virtual machine in the current state %s" + % current_state + ) + + elif expected_state in ["shutdownguest", "rebootguest"]: + if current_state == "poweredon": + if vm.guest.toolsRunningStatus == "guestToolsRunning": + if expected_state == "shutdownguest": task = vm.ShutdownGuest() if timeout > 0: result.update(wait_for_poweroff(vm, timeout)) @@ -785,32 +954,39 @@ def set_vm_power_state(content, vm, state, force, timeout=0): task = vm.RebootGuest() # Set result['changed'] immediately because # shutdown and reboot return None. - result['changed'] = True + result["changed"] = True else: - result['failed'] = True - result['msg'] = "VMware tools should be installed for guest shutdown/reboot" + result["failed"] = True + result[ + "msg" + ] = "VMware tools should be installed for guest shutdown/reboot" else: - result['failed'] = True - result['msg'] = "Virtual machine %s must be in poweredon state for guest shutdown/reboot" % vm.name + result["failed"] = True + result["msg"] = ( + "Virtual machine %s must be in poweredon state for guest shutdown/reboot" + % vm.name + ) else: - result['failed'] = True - result['msg'] = "Unsupported expected state provided: %s" % expected_state + result["failed"] = True + result["msg"] = ( + "Unsupported expected state provided: %s" % expected_state + ) except Exception as e: - result['failed'] = True - result['msg'] = to_text(e) + result["failed"] = True + result["msg"] = to_text(e) if task: wait_for_task(task) - if task.info.state == 'error': - result['failed'] = True - result['msg'] = task.info.error.msg + if task.info.state == "error": + result["failed"] = True + result["msg"] = task.info.error.msg else: - result['changed'] = True + result["changed"] = True # need to get new metadata if changed - result['instance'] = gather_vm_facts(content, vm) + result["instance"] = gather_vm_facts(content, vm) return result @@ -819,17 +995,17 @@ def wait_for_poweroff(vm, timeout=300): result = dict() interval = 15 while timeout > 0: - if vm.runtime.powerState.lower() == 'poweredoff': + if vm.runtime.powerState.lower() == "poweredoff": break time.sleep(interval) timeout -= interval else: - result['failed'] = True - result['msg'] = 'Timeout while waiting for VM power off.' + result["failed"] = True + result["msg"] = "Timeout while waiting for VM power off." return result -def is_integer(value, type_of='int'): +def is_integer(value, type_of="int"): try: VmomiSupport.vmodlTypes[type_of](value) return True @@ -838,13 +1014,13 @@ def is_integer(value, type_of='int'): def is_boolean(value): - if str(value).lower() in ['true', 'on', 'yes', 'false', 'off', 'no']: + if str(value).lower() in ["true", "on", "yes", "false", "off", "no"]: return True return False def is_truthy(value): - if str(value).lower() in ['true', 'on', 'yes']: + if str(value).lower() in ["true", "on", "yes"]: return True return False @@ -859,16 +1035,23 @@ def option_diff(options, current_options): change_option_list = [] for option_key, option_value in options.items(): if is_boolean(option_value): - option_value = VmomiSupport.vmodlTypes['bool'](is_truthy(option_value)) + option_value = VmomiSupport.vmodlTypes["bool"]( + is_truthy(option_value) + ) elif isinstance(option_value, int): - option_value = VmomiSupport.vmodlTypes['int'](option_value) + option_value = VmomiSupport.vmodlTypes["int"](option_value) elif isinstance(option_value, float): - option_value = VmomiSupport.vmodlTypes['float'](option_value) + option_value = VmomiSupport.vmodlTypes["float"](option_value) elif isinstance(option_value, str): - option_value = VmomiSupport.vmodlTypes['string'](option_value) + option_value = VmomiSupport.vmodlTypes["string"](option_value) - if option_key not in current_options_dict or current_options_dict[option_key] != option_value: - change_option_list.append(vim.option.OptionValue(key=option_key, value=option_value)) + if ( + option_key not in current_options_dict + or current_options_dict[option_key] != option_value + ): + change_option_list.append( + vim.option.OptionValue(key=option_key, value=option_value) + ) return change_option_list @@ -883,11 +1066,8 @@ def quote_obj_name(object_name=None): return None from collections import OrderedDict - SPECIAL_CHARS = OrderedDict({ - '%': '%25', - '/': '%2f', - '\\': '%5c' - }) + + SPECIAL_CHARS = OrderedDict({"%": "%25", "/": "%2f", "\\": "%5c"}) for key in SPECIAL_CHARS.keys(): if key in object_name: object_name = object_name.replace(key, SPECIAL_CHARS[key]) @@ -901,12 +1081,15 @@ def __init__(self, module): Constructor """ if not HAS_REQUESTS: - module.fail_json(msg=missing_required_lib('requests'), - exception=REQUESTS_IMP_ERR) + module.fail_json( + msg=missing_required_lib("requests"), + exception=REQUESTS_IMP_ERR, + ) if not HAS_PYVMOMI: - module.fail_json(msg=missing_required_lib('PyVmomi'), - exception=PYVMOMI_IMP_ERR) + module.fail_json( + msg=missing_required_lib("PyVmomi"), exception=PYVMOMI_IMP_ERR + ) self.module = module self.params = module.params @@ -927,11 +1110,13 @@ def is_vcenter(self): try: api_type = self.content.about.apiType except (vmodl.RuntimeFault, vim.fault.VimFault) as exc: - self.module.fail_json(msg="Failed to get status of vCenter server : %s" % exc.msg) + self.module.fail_json( + msg="Failed to get status of vCenter server : %s" % exc.msg + ) - if api_type == 'VirtualCenter': + if api_type == "VirtualCenter": return True - elif api_type == 'HostAgent': + elif api_type == "HostAgent": return False def get_managed_objects_properties(self, vim_type, properties=None): @@ -945,38 +1130,38 @@ def get_managed_objects_properties(self, vim_type, properties=None): root_folder = self.content.rootFolder if properties is None: - properties = ['name'] + properties = ["name"] # Create Container View with default root folder - mor = self.content.viewManager.CreateContainerView(root_folder, [vim_type], True) + mor = self.content.viewManager.CreateContainerView( + root_folder, [vim_type], True + ) # Create Traversal spec traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( name="traversal_spec", - path='view', + path="view", skip=False, - type=vim.view.ContainerView + type=vim.view.ContainerView, ) # Create Property Spec property_spec = vmodl.query.PropertyCollector.PropertySpec( type=vim_type, # Type of object to retrieved all=False, - pathSet=properties + pathSet=properties, ) # Create Object Spec object_spec = vmodl.query.PropertyCollector.ObjectSpec( - obj=mor, - skip=True, - selectSet=[traversal_spec] + obj=mor, skip=True, selectSet=[traversal_spec] ) # Create Filter Spec filter_spec = vmodl.query.PropertyCollector.FilterSpec( objectSet=[object_spec], propSet=[property_spec], - reportMissingObjectsInResults=False + reportMissingObjectsInResults=False, ) return self.content.propertyCollector.RetrieveContents([filter_spec]) @@ -990,58 +1175,75 @@ def get_vm(self): """ vm_obj = None user_desired_path = None - use_instance_uuid = self.params.get('use_instance_uuid') or False - if 'uuid' in self.params and self.params['uuid']: + use_instance_uuid = self.params.get("use_instance_uuid") or False + if "uuid" in self.params and self.params["uuid"]: if not use_instance_uuid: - vm_obj = find_vm_by_id(self.content, vm_id=self.params['uuid'], vm_id_type="uuid") + vm_obj = find_vm_by_id( + self.content, vm_id=self.params["uuid"], vm_id_type="uuid" + ) elif use_instance_uuid: - vm_obj = find_vm_by_id(self.content, - vm_id=self.params['uuid'], - vm_id_type="instance_uuid") - elif 'name' in self.params and self.params['name']: - objects = self.get_managed_objects_properties(vim_type=vim.VirtualMachine, properties=['name']) + vm_obj = find_vm_by_id( + self.content, + vm_id=self.params["uuid"], + vm_id_type="instance_uuid", + ) + elif "name" in self.params and self.params["name"]: + objects = self.get_managed_objects_properties( + vim_type=vim.VirtualMachine, properties=["name"] + ) vms = [] for temp_vm_object in objects: if ( - len(temp_vm_object.propSet) == 1 and - temp_vm_object.propSet[0].val == self.params['name']): + len(temp_vm_object.propSet) == 1 + and temp_vm_object.propSet[0].val == self.params["name"] + ): vms.append(temp_vm_object.obj) # get_managed_objects_properties may return multiple virtual machine, # following code tries to find user desired one depending upon the folder specified. if len(vms) > 1: # We have found multiple virtual machines, decide depending upon folder value - if self.params['folder'] is None: - self.module.fail_json(msg="Multiple virtual machines with same name [%s] found, " - "Folder value is a required parameter to find uniqueness " - "of the virtual machine" % self.params['name'], - details="Please see documentation of the vmware_guest module " - "for folder parameter.") + if self.params["folder"] is None: + self.module.fail_json( + msg="Multiple virtual machines with same name [%s] found, " + "Folder value is a required parameter to find uniqueness " + "of the virtual machine" % self.params["name"], + details="Please see documentation of the vmware_guest module " + "for folder parameter.", + ) # Get folder path where virtual machine is located # User provided folder where user thinks virtual machine is present - user_folder = self.params['folder'] + user_folder = self.params["folder"] # User defined datacenter - user_defined_dc = self.params['datacenter'] + user_defined_dc = self.params["datacenter"] # User defined datacenter's object - datacenter_obj = find_datacenter_by_name(self.content, self.params['datacenter']) + datacenter_obj = find_datacenter_by_name( + self.content, self.params["datacenter"] + ) # Get Path for Datacenter dcpath = compile_folder_path_for_object(vobj=datacenter_obj) # Nested folder does not return trailing / - if not dcpath.endswith('/'): - dcpath += '/' + if not dcpath.endswith("/"): + dcpath += "/" - if user_folder in [None, '', '/']: + if user_folder in [None, "", "/"]: # User provided blank value or # User provided only root value, we fail - self.module.fail_json(msg="vmware_guest found multiple virtual machines with same " - "name [%s], please specify folder path other than blank " - "or '/'" % self.params['name']) - elif user_folder.startswith('/vm/'): + self.module.fail_json( + msg="vmware_guest found multiple virtual machines with same " + "name [%s], please specify folder path other than blank " + "or '/'" % self.params["name"] + ) + elif user_folder.startswith("/vm/"): # User provided nested folder under VMware default vm folder i.e. folder = /vm/india/finance - user_desired_path = "%s%s%s" % (dcpath, user_defined_dc, user_folder) + user_desired_path = "%s%s%s" % ( + dcpath, + user_defined_dc, + user_folder, + ) else: # User defined datacenter is not nested i.e. dcpath = '/' , or # User defined datacenter is nested i.e. dcpath = '/F0/DC0' or @@ -1053,21 +1255,29 @@ def get_vm(self): for vm in vms: # Check if user has provided same path as virtual machine - actual_vm_folder_path = self.get_vm_path(content=self.content, vm_name=vm) - if not actual_vm_folder_path.startswith("%s%s" % (dcpath, user_defined_dc)): + actual_vm_folder_path = self.get_vm_path( + content=self.content, vm_name=vm + ) + if not actual_vm_folder_path.startswith( + "%s%s" % (dcpath, user_defined_dc) + ): continue if user_desired_path in actual_vm_folder_path: vm_obj = vm break elif vms: # Unique virtual machine found. - actual_vm_folder_path = self.get_vm_path(content=self.content, vm_name=vms[0]) - if self.params.get('folder') is None: + actual_vm_folder_path = self.get_vm_path( + content=self.content, vm_name=vms[0] + ) + if self.params.get("folder") is None: vm_obj = vms[0] - elif self.params['folder'] in actual_vm_folder_path: + elif self.params["folder"] in actual_vm_folder_path: vm_obj = vms[0] - elif 'moid' in self.params and self.params['moid']: - vm_obj = VmomiSupport.templateOf('VirtualMachine')(self.params['moid'], self.si._stub) + elif "moid" in self.params and self.params["moid"]: + vm_obj = VmomiSupport.templateOf("VirtualMachine")( + self.params["moid"], self.si._stub + ) if vm_obj: self.current_vm_obj = vm_obj @@ -1102,13 +1312,17 @@ def get_vm_path(content, vm_name): folder_name = folder.name fp = folder.parent # climb back up the tree to find our path, stop before the root folder - while fp is not None and fp.name is not None and fp != content.rootFolder: - folder_name = fp.name + '/' + folder_name + while ( + fp is not None + and fp.name is not None + and fp != content.rootFolder + ): + folder_name = fp.name + "/" + folder_name try: fp = fp.parent except Exception: break - folder_name = '/' + folder_name + folder_name = "/" + folder_name return folder_name def get_vm_or_template(self, template_name=None): @@ -1128,15 +1342,24 @@ def get_vm_or_template(self, template_name=None): if "/" in template_name: vm_obj_path = os.path.dirname(template_name) vm_obj_name = os.path.basename(template_name) - template_obj = find_vm_by_id(self.content, vm_obj_name, vm_id_type="inventory_path", folder=vm_obj_path) + template_obj = find_vm_by_id( + self.content, + vm_obj_name, + vm_id_type="inventory_path", + folder=vm_obj_path, + ) if template_obj: return template_obj else: - template_obj = find_vm_by_id(self.content, vm_id=template_name, vm_id_type="uuid") + template_obj = find_vm_by_id( + self.content, vm_id=template_name, vm_id_type="uuid" + ) if template_obj: return template_obj - objects = self.get_managed_objects_properties(vim_type=vim.VirtualMachine, properties=['name']) + objects = self.get_managed_objects_properties( + vim_type=vim.VirtualMachine, properties=["name"] + ) templates = [] for temp_vm_object in objects: @@ -1149,7 +1372,10 @@ def get_vm_or_template(self, template_name=None): if len(templates) > 1: # We have found multiple virtual machine templates - self.module.fail_json(msg="Multiple virtual machines or templates with same name [%s] found." % template_name) + self.module.fail_json( + msg="Multiple virtual machines or templates with same name [%s] found." + % template_name + ) elif templates: template_obj = templates[0] @@ -1166,7 +1392,9 @@ def find_cluster_by_name(self, cluster_name, datacenter_name=None): Returns: True if found """ - return find_cluster_by_name(self.content, cluster_name, datacenter=datacenter_name) + return find_cluster_by_name( + self.content, cluster_name, datacenter=datacenter_name + ) def get_all_hosts_by_cluster(self, cluster_name): """ @@ -1213,21 +1441,30 @@ def get_all_host_objs(self, cluster_name=None, esxi_host_name=None): host_obj_list.append(list(hosts)[0]) else: if cluster_name: - cluster_obj = self.find_cluster_by_name(cluster_name=cluster_name) + cluster_obj = self.find_cluster_by_name( + cluster_name=cluster_name + ) if cluster_obj: host_obj_list = [host for host in cluster_obj.host] else: - self.module.fail_json(changed=False, msg="Cluster '%s' not found" % cluster_name) + self.module.fail_json( + changed=False, + msg="Cluster '%s' not found" % cluster_name, + ) elif esxi_host_name: if isinstance(esxi_host_name, str): esxi_host_name = [esxi_host_name] for host in esxi_host_name: - esxi_host_obj = self.find_hostsystem_by_name(host_name=host) + esxi_host_obj = self.find_hostsystem_by_name( + host_name=host + ) if esxi_host_obj: host_obj_list.append(esxi_host_obj) else: - self.module.fail_json(changed=False, msg="ESXi '%s' not found" % host) + self.module.fail_json( + changed=False, msg="ESXi '%s' not found" % host + ) return host_obj_list @@ -1245,13 +1482,20 @@ def host_version_at_least(self, version=None, vm_obj=None, host_name=None): elif host_name: host_system = self.find_hostsystem_by_name(host_name=host_name) else: - self.module.fail_json(msg='VM object or ESXi host name must be set one.') + self.module.fail_json( + msg="VM object or ESXi host name must be set one." + ) if host_system and version: host_version = host_system.summary.config.product.version - return StrictVersion(host_version) >= StrictVersion('.'.join(map(str, version))) + return StrictVersion(host_version) >= StrictVersion( + ".".join(map(str, version)) + ) else: - self.module.fail_json(msg='Unable to get the ESXi host from vm: %s, or hostname %s,' - 'or the passed ESXi version: %s is None.' % (vm_obj, host_name, version)) + self.module.fail_json( + msg="Unable to get the ESXi host from vm: %s, or hostname %s," + "or the passed ESXi version: %s is None." + % (vm_obj, host_name, version) + ) # Network related functions @staticmethod @@ -1296,7 +1540,9 @@ def find_network_by_name(self, network_name=None): if not network_name: return networks - objects = self.get_managed_objects_properties(vim_type=vim.Network, properties=['name']) + objects = self.get_managed_objects_properties( + vim_type=vim.Network, properties=["name"] + ) for temp_vm_object in objects: if len(temp_vm_object.propSet) != 1: @@ -1318,7 +1564,11 @@ def network_exists_by_name(self, network_name=None): ret = False if not network_name: return ret - ret = True if self.find_network_by_name(network_name=network_name) else False + ret = ( + True + if self.find_network_by_name(network_name=network_name) + else False + ) return ret # Datacenter @@ -1332,7 +1582,9 @@ def find_datacenter_by_name(self, datacenter_name): Returns: datacenter managed object if found else None """ - return find_datacenter_by_name(self.content, datacenter_name=datacenter_name) + return find_datacenter_by_name( + self.content, datacenter_name=datacenter_name + ) def is_datastore_valid(self, datastore_obj=None): """ @@ -1342,9 +1594,11 @@ def is_datastore_valid(self, datastore_obj=None): Returns: True if datastore is valid, False if not """ - if not datastore_obj \ - or datastore_obj.summary.maintenanceMode != 'normal' \ - or not datastore_obj.summary.accessible: + if ( + not datastore_obj + or datastore_obj.summary.maintenanceMode != "normal" + or not datastore_obj.summary.accessible + ): return False return True @@ -1359,7 +1613,11 @@ def find_datastore_by_name(self, datastore_name, datacenter_name=None): Returns: datastore managed object if found else None """ - return find_datastore_by_name(self.content, datastore_name=datastore_name, datacenter_name=datacenter_name) + return find_datastore_by_name( + self.content, + datastore_name=datastore_name, + datacenter_name=datacenter_name, + ) def find_folder_by_name(self, folder_name): """ @@ -1401,13 +1659,17 @@ def find_resource_pool_by_name(self, resource_pool_name, folder=None): if not folder: folder = self.content.rootFolder - resource_pools = get_all_objs(self.content, [vim.ResourcePool], folder=folder) + resource_pools = get_all_objs( + self.content, [vim.ResourcePool], folder=folder + ) for rp in resource_pools: if rp.name == resource_pool_name: return rp return None - def find_resource_pool_by_cluster(self, resource_pool_name='Resources', cluster=None): + def find_resource_pool_by_cluster( + self, resource_pool_name="Resources", cluster=None + ): """ Get resource pool managed object by cluster object Args: @@ -1421,7 +1683,7 @@ def find_resource_pool_by_cluster(self, resource_pool_name='Resources', cluster= if not cluster: return desired_rp - if resource_pool_name != 'Resources': + if resource_pool_name != "Resources": # Resource pool name is different than default 'Resources' resource_pools = cluster.resourcePool.resourcePool if resource_pools: @@ -1449,15 +1711,22 @@ def vmdk_disk_path_split(self, vmdk_path): 4. vmdk_folder: The "path/to/" portion of the string (os.path.dirname equivalent) """ try: - datastore_name = re.match(r'^\[(.*?)\]', vmdk_path, re.DOTALL).groups()[0] - vmdk_fullpath = re.match(r'\[.*?\] (.*)$', vmdk_path).groups()[0] + datastore_name = re.match( + r"^\[(.*?)\]", vmdk_path, re.DOTALL + ).groups()[0] + vmdk_fullpath = re.match(r"\[.*?\] (.*)$", vmdk_path).groups()[0] vmdk_filename = os.path.basename(vmdk_fullpath) vmdk_folder = os.path.dirname(vmdk_fullpath) return datastore_name, vmdk_fullpath, vmdk_filename, vmdk_folder except (IndexError, AttributeError) as e: - self.module.fail_json(msg="Bad path '%s' for filename disk vmdk image: %s" % (vmdk_path, to_native(e))) - - def find_vmdk_file(self, datastore_obj, vmdk_fullpath, vmdk_filename, vmdk_folder): + self.module.fail_json( + msg="Bad path '%s' for filename disk vmdk image: %s" + % (vmdk_path, to_native(e)) + ) + + def find_vmdk_file( + self, datastore_obj, vmdk_fullpath, vmdk_filename, vmdk_folder + ): """ Return vSphere file object or fail_json Args: @@ -1472,13 +1741,13 @@ def find_vmdk_file(self, datastore_obj, vmdk_fullpath, vmdk_filename, vmdk_folde datastore_name = datastore_obj.name datastore_name_sq = "[" + datastore_name + "]" if browser is None: - self.module.fail_json(msg="Unable to access browser for datastore %s" % datastore_name) + self.module.fail_json( + msg="Unable to access browser for datastore %s" + % datastore_name + ) detail_query = vim.host.DatastoreBrowser.FileInfo.Details( - fileOwner=True, - fileSize=True, - fileType=True, - modification=True + fileOwner=True, fileSize=True, fileType=True, modification=True ) search_spec = vim.host.DatastoreBrowser.SearchSpec( details=detail_query, @@ -1486,8 +1755,7 @@ def find_vmdk_file(self, datastore_obj, vmdk_fullpath, vmdk_filename, vmdk_folde searchCaseInsensitive=True, ) search_res = browser.SearchSubFolders( - datastorePath=datastore_name_sq, - searchSpec=search_spec + datastorePath=datastore_name_sq, searchSpec=search_spec ) changed = False @@ -1498,19 +1766,26 @@ def find_vmdk_file(self, datastore_obj, vmdk_fullpath, vmdk_filename, vmdk_folde self.module.fail_json(msg=to_native(task_e)) if not changed: - self.module.fail_json(msg="No valid disk vmdk image found for path %s" % vmdk_path) + self.module.fail_json( + msg="No valid disk vmdk image found for path %s" % vmdk_path + ) target_folder_paths = [ - datastore_name_sq + " " + vmdk_folder + '/', + datastore_name_sq + " " + vmdk_folder + "/", datastore_name_sq + " " + vmdk_folder, ] for file_result in search_res.info.result: - for f in getattr(file_result, 'file'): - if f.path == vmdk_filename and file_result.folderPath in target_folder_paths: + for f in getattr(file_result, "file"): + if ( + f.path == vmdk_filename + and file_result.folderPath in target_folder_paths + ): return f - self.module.fail_json(msg="No vmdk file found for path specified [%s]" % vmdk_path) + self.module.fail_json( + msg="No vmdk file found for path specified [%s]" % vmdk_path + ) # # Conversion to JSON @@ -1553,10 +1828,10 @@ def _extract(self, data, remainder): dict """ result = dict() - if '.' not in remainder: + if "." not in remainder: result[remainder] = data[remainder] return result - key, remainder = remainder.split('.', 1) + key, remainder = remainder.split(".", 1) result[key] = self._extract(data[key], remainder) return result @@ -1570,8 +1845,14 @@ def _jsonify(self, obj): Return: dict """ - return json.loads(json.dumps(obj, cls=VmomiSupport.VmomiJSONEncoder, - sort_keys=True, strip_dynamic=True)) + return json.loads( + json.dumps( + obj, + cls=VmomiSupport.VmomiJSONEncoder, + sort_keys=True, + strip_dynamic=True, + ) + ) def to_json(self, obj, properties=None): """ @@ -1594,37 +1875,43 @@ def to_json(self, obj, properties=None): dict """ if not HAS_PYVMOMIJSON: - self.module.fail_json(msg='The installed version of pyvmomi lacks JSON output support; need pyvmomi>6.7.1') + self.module.fail_json( + msg="The installed version of pyvmomi lacks JSON output support; need pyvmomi>6.7.1" + ) result = dict() if properties: for prop in properties: try: - if '.' in prop: - key, remainder = prop.split('.', 1) + if "." in prop: + key, remainder = prop.split(".", 1) tmp = dict() - tmp[key] = self._extract(self._jsonify(getattr(obj, key)), remainder) + tmp[key] = self._extract( + self._jsonify(getattr(obj, key)), remainder + ) self._deepmerge(result, tmp) else: result[prop] = self._jsonify(getattr(obj, prop)) # To match gather_vm_facts output prop_name = prop - if prop.lower() == '_moid': - prop_name = 'moid' - elif prop.lower() == '_vimref': - prop_name = 'vimref' + if prop.lower() == "_moid": + prop_name = "moid" + elif prop.lower() == "_vimref": + prop_name = "vimref" result[prop_name] = result[prop] except (AttributeError, KeyError): - self.module.fail_json(msg="Property '{0}' not found.".format(prop)) + self.module.fail_json( + msg="Property '{0}' not found.".format(prop) + ) else: result = self._jsonify(obj) return result def get_folder_path(self, cur): - full_path = '/' + cur.name - while hasattr(cur, 'parent') and cur.parent: + full_path = "/" + cur.name + while hasattr(cur, "parent") and cur.parent: if cur.parent == self.content.rootFolder: break cur = cur.parent - full_path = '/' + cur.name + full_path + full_path = "/" + cur.name + full_path return full_path diff --git a/plugins/module_utils/vmware_httpapi/VmwareRestModule.py b/plugins/module_utils/vmware_httpapi/VmwareRestModule.py index 1bb9ba9..08a47b5 100644 --- a/plugins/module_utils/vmware_httpapi/VmwareRestModule.py +++ b/plugins/module_utils/vmware_httpapi/VmwareRestModule.py @@ -3,6 +3,7 @@ # Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) from __future__ import absolute_import, division, print_function + __metaclass__ = type import re @@ -17,12 +18,12 @@ # Describes each supported VMware ReST APIs and lists its base URL. All # vSphere ReST APIs begin with '/rest'. API = dict( - appliance=dict(base='/rest/appliance'), - cis=dict(base='/rest/com/vmware/cis'), - content=dict(base='/rest/com/vmware/content'), - vapi=dict(base='/rest'), - vcenter=dict(base='/rest/vcenter'), - vrops=dict(base='/suiteapi') + appliance=dict(base="/rest/appliance"), + cis=dict(base="/rest/com/vmware/cis"), + content=dict(base="/rest/com/vmware/content"), + vapi=dict(base="/rest"), + vcenter=dict(base="/rest/vcenter"), + vrops=dict(base="/suiteapi"), ) # Query Filters @@ -34,153 +35,118 @@ # format of the expected values is provided as a regex. FILTER = dict( clusters=dict( - name='clusters', - id='id', - type='str', - format=r'domain\-[0-9a-fA-F]+', + name="clusters", id="id", type="str", format=r"domain\-[0-9a-fA-F]+" ), connection_states=dict( - name='connection_states', - id='connection state', - type='list', - choices=[ - 'CONNECTED', - 'DISCONNECTED', - 'NOT_RESPONDING', - ], + name="connection_states", + id="connection state", + type="list", + choices=["CONNECTED", "DISCONNECTED", "NOT_RESPONDING"], ), datacenters=dict( - name='datacenters', - id='id', - type='str', - format=r'datacenter\-[0-9a-fA-F]+', + name="datacenters", + id="id", + type="str", + format=r"datacenter\-[0-9a-fA-F]+", ), datastore_types=dict( - name='types', - id='type', - type='list', - choices=[ - '', - 'CIFS', - 'NFS', - 'NFS41', - 'VFFS', - 'VMFS', - 'VSAN', - 'VVOL', - ] + name="types", + id="type", + type="list", + choices=["", "CIFS", "NFS", "NFS41", "VFFS", "VMFS", "VSAN", "VVOL"], ), datastores=dict( - name='datastores', - id='id', - type='str', - format=r'datastore\-[0-9a-fA-F]+', + name="datastores", + id="id", + type="str", + format=r"datastore\-[0-9a-fA-F]+", ), folder_types=dict( - name='type', - id='type', - type='list', + name="type", + id="type", + type="list", choices=[ - '', - 'DATACENTER', - 'DATASTORE', - 'HOST', - 'NETWORK', - 'VIRTUAL_MACHINE', - ] + "", + "DATACENTER", + "DATASTORE", + "HOST", + "NETWORK", + "VIRTUAL_MACHINE", + ], ), folders=dict( - name='folders', - id='id', - type='str', - format=r'group\-[hnv][0-9a-fA-F]+', + name="folders", id="id", type="str", format=r"group\-[hnv][0-9a-fA-F]+" ), hosts=dict( - name='hosts', - id='id', - type='str', - format=r'host\-[0-9a-fA-F]+', - ), - names=dict( - name='names', - id='name', - type='str', - format=r'.+', + name="hosts", id="id", type="str", format=r"host\-[0-9a-fA-F]+" ), + names=dict(name="names", id="name", type="str", format=r".+"), network_types=dict( - name='types', - id='type', - type='list', + name="types", + id="type", + type="list", choices=[ - 'DISTRIBUTED_PORTGROUP', - 'OPAQUE_NETWORK', - 'STANDARD_PORTGROUP', + "DISTRIBUTED_PORTGROUP", + "OPAQUE_NETWORK", + "STANDARD_PORTGROUP", ], ), networks=dict( - name='networks', - id='id', - type='str', - format=r'[dvportgroup|network]\-[0-9a-fA-F]+', + name="networks", + id="id", + type="str", + format=r"[dvportgroup|network]\-[0-9a-fA-F]+", ), parent_folders=dict( - name='parent_folders', - id='id', - type='str', - format=r'group\-[hnv][0-9a-fA-F]+', + name="parent_folders", + id="id", + type="str", + format=r"group\-[hnv][0-9a-fA-F]+", ), parent_resource_pools=dict( - name='parent_resource_pools', - id='id', - type='str', - format=r'resgroup\-[0-9a-fA-F]+', + name="parent_resource_pools", + id="id", + type="str", + format=r"resgroup\-[0-9a-fA-F]+", ), policies=dict( - name='policies', - id='GUID', - type='str', - format=(r'[0-9a-fA-F]{8}' - r'\-[0-9a-fA-F]{4}' - r'\-[0-9a-fA-F]{4}' - r'\-[0-9a-fA-F]{4}' - r'\-[0-9a-fA-F]{12}'), + name="policies", + id="GUID", + type="str", + format=( + r"[0-9a-fA-F]{8}" + r"\-[0-9a-fA-F]{4}" + r"\-[0-9a-fA-F]{4}" + r"\-[0-9a-fA-F]{4}" + r"\-[0-9a-fA-F]{12}" + ), ), power_states=dict( - name='power_states', - id='power state', - type='list', - choices=[ - '', - 'POWERED_OFF', - 'POWERED_ON', - 'SUSPENDED', - ], + name="power_states", + id="power state", + type="list", + choices=["", "POWERED_OFF", "POWERED_ON", "SUSPENDED"], ), resource_pools=dict( - name='resource_pools', - id='id', - type='str', - format=r'resgroup\-[0-9a-fA-F]+', + name="resource_pools", + id="id", + type="str", + format=r"resgroup\-[0-9a-fA-F]+", ), status=dict( - name='status', - id='status', - type='list', + name="status", + id="status", + type="list", choices=[ - 'COMPLIANT', - 'NON_COMPLIANT', - 'NOT_APPLICABLE', - 'UNKNOWN', - 'UNKNOWN_COMPLIANCE', - 'OUT_OF_DATE', + "COMPLIANT", + "NON_COMPLIANT", + "NOT_APPLICABLE", + "UNKNOWN", + "UNKNOWN_COMPLIANCE", + "OUT_OF_DATE", ], ), - vms=dict( - name='vms', - id='id', - type='str', - format=r'vm\-[0-9a-fA-F]+', - ), + vms=dict(name="vms", id="id", type="str", format=r"vm\-[0-9a-fA-F]+"), ) # vSphere Inventory Objects @@ -193,147 +159,109 @@ # NOTE: This will be replaced with a class factory pattern as get_id() # and the get_url() family are tied to this structure. INVENTORY = dict( - category=dict( - api='cis', - url='/tagging/category', - filters=[], - ), + category=dict(api="cis", url="/tagging/category", filters=[]), cluster=dict( - api='vcenter', - url='/cluster', - filters=[ - 'clusters', - 'datacenters', - 'folders', - 'names', - ], - ), - content_library=dict( - api='content', - url='/library', - filters=[], - ), - content_type=dict( - api='content', - url='/type', - filters=[], + api="vcenter", + url="/cluster", + filters=["clusters", "datacenters", "folders", "names"], ), + content_library=dict(api="content", url="/library", filters=[]), + content_type=dict(api="content", url="/type", filters=[]), datacenter=dict( - api='vcenter', - url='/datacenter', - filters=[ - 'datacenters', - 'folders', - 'names', - ], + api="vcenter", + url="/datacenter", + filters=["datacenters", "folders", "names"], ), datastore=dict( - api='vcenter', - url='/datastore', + api="vcenter", + url="/datastore", filters=[ - 'datacenters', - 'datastore_types', - 'datastores', - 'folders', - 'names', + "datacenters", + "datastore_types", + "datastores", + "folders", + "names", ], ), folder=dict( - api='vcenter', - url='/folder', + api="vcenter", + url="/folder", filters=[ - 'datacenters', - 'folder_types', - 'folders', - 'names', - 'parent_folders', + "datacenters", + "folder_types", + "folders", + "names", + "parent_folders", ], ), host=dict( - api='vcenter', - url='/host', + api="vcenter", + url="/host", filters=[ - 'clusters', - 'connection_states', - 'datacenters', - 'folders', - 'hosts', - 'names', + "clusters", + "connection_states", + "datacenters", + "folders", + "hosts", + "names", ], ), - local_library=dict( - api='content', - url='/local-library', - filters=[], - ), + local_library=dict(api="content", url="/local-library", filters=[]), network=dict( - api='vcenter', - url='/network', + api="vcenter", + url="/network", filters=[ - 'datacenters', - 'folders', - 'names', - 'network_types', - 'networks', + "datacenters", + "folders", + "names", + "network_types", + "networks", ], ), resource_pool=dict( - api='vcenter', - url='/resource-pool', + api="vcenter", + url="/resource-pool", filters=[ - 'clusters', - 'datacenters', - 'hosts', - 'names', - 'parent_resource_pools', - 'resource_pools', - ] + "clusters", + "datacenters", + "hosts", + "names", + "parent_resource_pools", + "resource_pools", + ], ), storage_policy=dict( - api='vcenter', - url='/storage/policies', - filters=[ - 'policies', - 'status', - 'vms', - ], + api="vcenter", + url="/storage/policies", + filters=["policies", "status", "vms"], ), subscribed_library=dict( - api='content', - url='/subscribed-library', - filters=[], - ), - tag=dict( - api='cis', - url='/tagging/tag', - filters=[], - ), - session=dict( - api='cis', - url='/session', - filters=[], + api="content", url="/subscribed-library", filters=[] ), + tag=dict(api="cis", url="/tagging/tag", filters=[]), + session=dict(api="cis", url="/session", filters=[]), vm=dict( - api='vcenter', - url='/vm', + api="vcenter", + url="/vm", filters=[ - 'clusters', - 'datacenters', - 'folders', - 'hosts', - 'names', - 'power_states', - 'resource_pools', - 'vms', + "clusters", + "datacenters", + "folders", + "hosts", + "names", + "power_states", + "resource_pools", + "vms", ], ), ) class VmwareRestModule(AnsibleModule): - - def __init__(self, is_multipart=False, use_object_handler=False, *args, **kwargs): - '''Constructor - This module mediates interactions with the + def __init__( + self, is_multipart=False, use_object_handler=False, *args, **kwargs + ): + """Constructor - This module mediates interactions with the VMware httpapi connector plugin, implementing VMware's ReST API. :module: VmwareRestModule extended from AnsibleModule. @@ -341,7 +269,7 @@ def __init__(self, is_multipart=False, use_object_handler=False, *args, **kwargs Default False :kw use_object_handler: Indicates whether module supports multiple object types. Default False - ''' + """ # Initialize instance arguments self.is_multipart = is_multipart self.use_object_handler = use_object_handler @@ -353,18 +281,10 @@ def __init__(self, is_multipart=False, use_object_handler=False, *args, **kwargs self.key = None # Current information going to httpapi - self.request = dict( - url=None, - filter=None, - data={}, - method=None, - ) + self.request = dict(url=None, filter=None, data={}, method=None) # Last response from httpapi - self.response = dict( - status=None, - data={}, - ) + self.response = dict(status=None, data={}) # Initialize AnsibleModule superclass before params super(VmwareRestModule, self).__init__(*args, **kwargs) @@ -372,18 +292,18 @@ def __init__(self, is_multipart=False, use_object_handler=False, *args, **kwargs # Turn on debug if not specified, but ANSIBLE_DEBUG is set self.module_debug = {} if self._debug: - self.warn('Enable debug output because ANSIBLE_DEBUG was set.') - self.params['log_level'] = 'debug' - self.log_level = self.params['log_level'] + self.warn("Enable debug output because ANSIBLE_DEBUG was set.") + self.params["log_level"] = "debug" + self.log_level = self.params["log_level"] # Params # # REQUIRED: Their absence will chuck a rod - self.allow_multiples = self.params['allow_multiples'] - self.status_code = self.params['status_code'] + self.allow_multiples = self.params["allow_multiples"] + self.status_code = self.params["status_code"] # OPTIONAL: Use params.get() to gracefully fail - self.filters = self.params.get('filters') - self.state = self.params.get('state') + self.filters = self.params.get("filters") + self.state = self.params.get("state") # Initialize connection via httpapi connector. See "REST API Calls" try: @@ -393,36 +313,36 @@ def __init__(self, is_multipart=False, use_object_handler=False, *args, **kwargs # Register default status handlers. See "Dynamic Status Handlers" self._status_handlers = { - 'success': self.handle_default_success, - '401': self.handle_default_401, - '404': self.handle_default_404, - 'default': self.handle_default_generic, + "success": self.handle_default_success, + "401": self.handle_default_401, + "404": self.handle_default_404, + "default": self.handle_default_generic, } if self.use_object_handler: - self._status_handlers['default'] = self.handle_default_object + self._status_handlers["default"] = self.handle_default_object # Debugging # # Tools to handle debugging output from the APIs. def _mod_debug(self, key, **kwargs): self.module_debug[key] = kwargs - if 'module_debug' not in self.module_debug: + if "module_debug" not in self.module_debug: self.module_debug = dict(key=kwargs) else: self.module_debug.update(key=kwargs) def _api_debug(self): - '''Route debugging output to the module output. + """Route debugging output to the module output. NOTE: Adding self.path to result['path'] causes an absent in output. Adding response['data'] causes infinite loop. - ''' + """ return dict( - url=self.request['url'], - filter=self.request['filter'], - data=self.request['data'], - method=self.request['method'], - status=self.response['status'], + url=self.request["url"], + filter=self.request["filter"], + data=self.request["data"], + method=self.request["method"], + status=self.response["status"], state=self.state, ) @@ -450,55 +370,62 @@ def _api_debug(self): # method. User handlers can also chain to the default handlers if # desired. def set_handler(self, status_key, handler): - '''Registers the handler to the status_key''' + """Registers the handler to the status_key""" self._status_handlers[status_key] = handler def _use_handler(self): - '''Invokes the appropriate handler based on status_code''' - if self.response['status'] in self.status_code: - status_key = 'success' + """Invokes the appropriate handler based on status_code""" + if self.response["status"] in self.status_code: + status_key = "success" else: - status_key = str(self.response['status']) + status_key = str(self.response["status"]) if status_key in self._status_handlers.keys(): self._status_handlers[status_key]() else: - self._status_handlers['default']() + self._status_handlers["default"]() def handle_default_success(self): - '''Default handler for all successful status codes''' - self.result[self.key] = self.response['data'] - if self.log_level == 'debug': - self.result[self.key].update( - debug=self._api_debug() - ) + """Default handler for all successful status codes""" + self.result[self.key] = self.response["data"] + if self.log_level == "debug": + self.result[self.key].update(debug=self._api_debug()) if not self.is_multipart: self.exit() def handle_default_401(self): - '''Default handler for Unauthorized (401) errors''' - self.fail(msg="Unable to authenticate. Provided credentials are not valid.") + """Default handler for Unauthorized (401) errors""" + self.fail( + msg="Unable to authenticate. Provided credentials are not valid." + ) def handle_default_404(self): - '''Default handler for Not-Found (404) errors''' + """Default handler for Not-Found (404) errors""" self.fail(msg="Requested object was not found.") def handle_default_generic(self): - '''Catch-all handler for all other status codes''' - msg = self.response['data']['value']['messages'][0]['default_message'] + """Catch-all handler for all other status codes""" + msg = self.response["data"]["value"]["messages"][0]["default_message"] self.fail(msg=msg) def handle_default_object(self): - '''Catch-all handler capable of distinguishing multiple objects''' + """Catch-all handler capable of distinguishing multiple objects""" try: - msg = self.response['data']['value']['messages'][0]['default_message'] + msg = self.response["data"]["value"]["messages"][0][ + "default_message" + ] except (KeyError, TypeError): - msg = 'Unable to find the %s object specified due to %s' % (self.key, self.response) + msg = "Unable to find the %s object specified due to %s" % ( + self.key, + self.response, + ) self.fail(msg=msg) def handle_object_key_error(self): - '''Lazy exception handler''' - msg = ('Please specify correct object type to get information, ' - 'choices are [%s].' % ", ".join(list(INVENTORY.keys()))) + """Lazy exception handler""" + msg = ( + "Please specify correct object type to get information, " + "choices are [%s]." % ", ".join(list(INVENTORY.keys())) + ) self.fail(msg=msg) # REST API Calls @@ -506,98 +433,96 @@ def handle_object_key_error(self): # VMware's REST API uses GET, POST, PUT, PATCH and DELETE http # calls to read, create, update and delete objects and their # attributes. These calls are implemented as functions here. - def get(self, url='/rest', key='result'): - '''Sends a GET request to the httpapi plugin connection to the + def get(self, url="/rest", key="result"): + """Sends a GET request to the httpapi plugin connection to the specified URL. If successful, the returned data will be placed in the output under the specified key. - ''' - self.request.update( - url=url, - data={}, - method='GET', - ) + """ + self.request.update(url=url, data={}, method="GET") self.key = key - self.response['status'], self.response['data'] = self._connection.send_request(url, {}, method='GET') + self.response["status"], self.response[ + "data" + ] = self._connection.send_request(url, {}, method="GET") self._use_handler() - def post(self, url='/rest', data=None, key='result'): - '''Sends a POST request to the httpapi plugin connection to the + def post(self, url="/rest", data=None, key="result"): + """Sends a POST request to the httpapi plugin connection to the specified URL, with the supplied data. If successful, any returned data will be placed in the output under the specified key. - ''' - self.request.update( - url=url, - data=data, - method='POST', - ) + """ + self.request.update(url=url, data=data, method="POST") self.key = key try: - self.response['status'], self.response['data'] = self._connection.send_request(url, data, method='POST') + self.response["status"], self.response[ + "data" + ] = self._connection.send_request(url, data, method="POST") except Exception as e: self.fail(msg=to_native(e)) self._use_handler() - def put(self, url='/rest', data=None, key='result'): - '''Sends a PUT request to the httpapi plugin connection to the + def put(self, url="/rest", data=None, key="result"): + """Sends a PUT request to the httpapi plugin connection to the specified URL, with the supplied data. If successful, any returned data will be placed in the output under the specified key. - ''' - self.request.update( - url=url, - data=data, - method='PUT', - ) + """ + self.request.update(url=url, data=data, method="PUT") self.key = key - self.response['status'], self.response['data'] = self._connection.send_request(url, data, method='PUT') + self.response["status"], self.response[ + "data" + ] = self._connection.send_request(url, data, method="PUT") self._use_handler() - def delete(self, url='/rest', data='result', key='result'): - '''Sends a DELETE request to the httpapi plugin connection to + def delete(self, url="/rest", data="result", key="result"): + """Sends a DELETE request to the httpapi plugin connection to the specified URL, with the supplied data. If successful, any returned data will be placed in the output under the specified key. - ''' - self.request.update( - url=url, - data=data, - method='DELETE', - ) + """ + self.request.update(url=url, data=data, method="DELETE") self.key = key - self.response['status'], self.response['data'] = self._connection.send_request(url, data, method='DELETE') + self.response["status"], self.response[ + "data" + ] = self._connection.send_request(url, data, method="DELETE") self._use_handler() def get_id(self, object_type, name): - '''Find id(s) of object(s) with given name. allow_multiples + """Find id(s) of object(s) with given name. allow_multiples determines whether multiple IDs are returned or not. :kw object_type: The inventory object type whose id is desired. :kw name: The name of the object(s) to be retrieved. :returns: a list of strings representing the IDs of the objects. - ''' + """ try: - url = (API[INVENTORY[object_type]['api']]['base'] - + INVENTORY[object_type]['url']) - if '/' in name: - name.replace('/', '%2F') - url += '&filter.names=' + name + url = ( + API[INVENTORY[object_type]["api"]]["base"] + + INVENTORY[object_type]["url"] + ) + if "/" in name: + name.replace("/", "%2F") + url += "&filter.names=" + name except KeyError: - self.fail(msg='object_type must be one of [%s].' - % ", ".join(list(INVENTORY.keys()))) + self.fail( + msg="object_type must be one of [%s]." + % ", ".join(list(INVENTORY.keys())) + ) - status, data = self._connection.send_request(url, {}, method='GET') + status, data = self._connection.send_request(url, {}, method="GET") if status != 200: - self.request.update(url=url, data={}, method='GET') + self.request.update(url=url, data={}, method="GET") self.response.update(status=status, data=data) self.handle_default_generic() - num_items = len(data['value']) + num_items = len(data["value"]) if not self.allow_multiples and num_items > 1: - msg = ('Found %d objects of type %s with name %s. ' - 'Set allow_multiples to True if this is expected.' - % (num_items, object_type, name)) + msg = ( + "Found %d objects of type %s with name %s. " + "Set allow_multiples to True if this is expected." + % (num_items, object_type, name) + ) self.fail(msg=msg) ids = [] @@ -606,7 +531,7 @@ def get_id(self, object_type, name): return ids def _build_filter(self, object_type): - '''Builds a filter from the optionally supplied params''' + """Builds a filter from the optionally supplied params""" if self.filters: try: first = True @@ -614,36 +539,60 @@ def _build_filter(self, object_type): for key in list(filter.keys()): filter_key = key.lower() # Check if filter is valid for current object type or not - if filter_key not in INVENTORY[object_type]['filters']: - msg = ('%s is not a valid %s filter, choices are [%s].' - % (key, object_type, ", ".join(INVENTORY[object_type]['filters']))) + if filter_key not in INVENTORY[object_type]["filters"]: + msg = ( + "%s is not a valid %s filter, choices are [%s]." + % ( + key, + object_type, + ", ".join( + INVENTORY[object_type]["filters"] + ), + ) + ) self.fail(msg=msg) # Check if value is valid for the current filter - if ((FILTER[filter_key]['type'] == 'str' and not re.match(FILTER[filter_key]['format'], filter[key])) or - (FILTER[filter_key]['type'] == 'list' and filter[key] not in FILTER[filter_key]['choices'])): - msg = ('%s is not a valid %s %s' % (filter[key], object_type, FILTER[filter_key]['name'])) + if ( + FILTER[filter_key]["type"] == "str" + and not re.match( + FILTER[filter_key]["format"], filter[key] + ) + ) or ( + FILTER[filter_key]["type"] == "list" + and filter[key] + not in FILTER[filter_key]["choices"] + ): + msg = "%s is not a valid %s %s" % ( + filter[key], + object_type, + FILTER[filter_key]["name"], + ) self.fail(msg=msg) if first: - self.request['filter'] = '?' + self.request["filter"] = "?" first = False else: - self.request['filter'] += '&' + self.request["filter"] += "&" # Escape characters - if '/' in filter[key]: - filter[key].replace('/', '%2F') - self.request['filter'] += ('filter.%s=%s' - % (FILTER[filter_key]['name'], filter[key])) + if "/" in filter[key]: + filter[key].replace("/", "%2F") + self.request["filter"] += "filter.%s=%s" % ( + FILTER[filter_key]["name"], + filter[key], + ) except KeyError: self.handle_object_key_error() else: - self.request['filter'] = None - return self.request['filter'] + self.request["filter"] = None + return self.request["filter"] def get_url(self, object_type, with_filter=False): - '''Retrieves the URL of a particular inventory object with or without filter''' + """Retrieves the URL of a particular inventory object with or without filter""" try: - self.url = (API[INVENTORY[object_type]['api']]['base'] - + INVENTORY[object_type]['url']) + self.url = ( + API[INVENTORY[object_type]["api"]]["base"] + + INVENTORY[object_type]["url"] + ) if with_filter: self.url += self._build_filter(object_type) except KeyError: @@ -651,42 +600,37 @@ def get_url(self, object_type, with_filter=False): return self.url def get_url_with_filter(self, object_type): - '''Same as get_url, only with_filter is explicitly set''' + """Same as get_url, only with_filter is explicitly set""" return self.get_url(object_type, with_filter=True) def reset(self): - '''Clears the decks for next request''' - self.request.update( - url=None, - data={}, - method=None, - ) - self.response.update( - status=None, - data={}, - ) + """Clears the decks for next request""" + self.request.update(url=None, data={}, method=None) + self.response.update(status=None, data={}) def fail(self, msg): - if self.log_level == 'debug': - if self.request['url'] is not None: - self.result['debug'] = self._api_debug() + if self.log_level == "debug": + if self.request["url"] is not None: + self.result["debug"] = self._api_debug() AnsibleModule.fail_json(self, msg=msg, **self.result) def exit(self): - '''Called to end client interaction''' - if 'invocation' not in self.result: - self.result['invocation'] = { - 'module_args': self.params, - 'module_kwargs': { - 'is_multipart': self.is_multipart, - 'use_object_handler': self.use_object_handler, - } + """Called to end client interaction""" + if "invocation" not in self.result: + self.result["invocation"] = { + "module_args": self.params, + "module_kwargs": { + "is_multipart": self.is_multipart, + "use_object_handler": self.use_object_handler, + }, } - if self.log_level == 'debug': + if self.log_level == "debug": if not self.is_multipart: - self.result['invocation'].update(debug=self._api_debug()) + self.result["invocation"].update(debug=self._api_debug()) if self.module_debug: - self.result['invocation'].update(module_debug=self.module_debug) + self.result["invocation"].update( + module_debug=self.module_debug + ) AnsibleModule.exit_json(self, **self.result) @@ -697,24 +641,30 @@ def _merge_dictionaries(self, a, b): @staticmethod def create_argument_spec(use_filters=False, use_state=False): - '''Provide a default argument spec for this module. Filters and + """Provide a default argument spec for this module. Filters and state are optional parameters dependinf on the module's needs. Additional parameters can be added. The supplied parameters can have defaults changed or choices pared down, but should not be removed. - ''' + """ argument_spec = dict( - allow_multiples=dict(type='bool', default=False), - log_level=dict(type='str', - choices=['debug', 'info', 'normal'], - default='normal'), - status_code=dict(type='list', default=[200]), + allow_multiples=dict(type="bool", default=False), + log_level=dict( + type="str", + choices=["debug", "info", "normal"], + default="normal", + ), + status_code=dict(type="list", default=[200]), ) if use_filters: - argument_spec.update(filters=dict(type='list', default=[])) + argument_spec.update(filters=dict(type="list", default=[])) if use_state: - argument_spec.update(state=dict(type='list', - choices=['absent', 'present', 'query'], - default='query')) + argument_spec.update( + state=dict( + type="list", + choices=["absent", "present", "query"], + default="query", + ) + ) return argument_spec diff --git a/plugins/module_utils/vmware_rest_client.py b/plugins/module_utils/vmware_rest_client.py index fe89ade..9211a1a 100644 --- a/plugins/module_utils/vmware_rest_client.py +++ b/plugins/module_utils/vmware_rest_client.py @@ -4,6 +4,7 @@ # Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) from __future__ import absolute_import, division, print_function + __metaclass__ = type import traceback @@ -11,6 +12,7 @@ REQUESTS_IMP_ERR = None try: import requests + HAS_REQUESTS = True except ImportError: REQUESTS_IMP_ERR = traceback.format_exc() @@ -20,6 +22,7 @@ try: from pyVim import connect from pyVmomi import vim, vmodl + HAS_PYVMOMI = True except ImportError: PYVMOMI_IMP_ERR = traceback.format_exc() @@ -31,12 +34,15 @@ from vmware.vapi.vsphere.client import create_vsphere_client from com.vmware.vapi.std.errors_client import Unauthorized from com.vmware.content.library_client import Item - from com.vmware.vcenter_client import (Folder, - Datacenter, - ResourcePool, - Datastore, - Cluster, - Host) + from com.vmware.vcenter_client import ( + Folder, + Datacenter, + ResourcePool, + Datastore, + Cluster, + Host, + ) + HAS_VSPHERE = True except ImportError: VSPHERE_IMP_ERR = traceback.format_exc() @@ -78,38 +84,53 @@ def check_required_library(self): """ if not HAS_REQUESTS: - self.module.fail_json(msg=missing_required_lib('requests'), - exception=REQUESTS_IMP_ERR) + self.module.fail_json( + msg=missing_required_lib("requests"), + exception=REQUESTS_IMP_ERR, + ) if not HAS_PYVMOMI: - self.module.fail_json(msg=missing_required_lib('PyVmomi'), - exception=PYVMOMI_IMP_ERR) + self.module.fail_json( + msg=missing_required_lib("PyVmomi"), exception=PYVMOMI_IMP_ERR + ) if not HAS_VSPHERE: self.module.fail_json( - msg=missing_required_lib('vSphere Automation SDK', - url='https://code.vmware.com/web/sdk/65/vsphere-automation-python'), - exception=VSPHERE_IMP_ERR) + msg=missing_required_lib( + "vSphere Automation SDK", + url="https://code.vmware.com/web/sdk/65/vsphere-automation-python", + ), + exception=VSPHERE_IMP_ERR, + ) @staticmethod def vmware_client_argument_spec(): return dict( - hostname=dict(type='str', - fallback=(env_fallback, ['VMWARE_HOST'])), - username=dict(type='str', - fallback=(env_fallback, ['VMWARE_USER']), - aliases=['user', 'admin']), - password=dict(type='str', - fallback=(env_fallback, ['VMWARE_PASSWORD']), - aliases=['pass', 'pwd'], - no_log=True), - port=dict(type='int', - default=443, - fallback=(env_fallback, ['VMWARE_PORT'])), - protocol=dict(type='str', - default='https', - choices=['https', 'http']), - validate_certs=dict(type='bool', - fallback=(env_fallback, ['VMWARE_VALIDATE_CERTS']), - default=True), + hostname=dict( + type="str", fallback=(env_fallback, ["VMWARE_HOST"]) + ), + username=dict( + type="str", + fallback=(env_fallback, ["VMWARE_USER"]), + aliases=["user", "admin"], + ), + password=dict( + type="str", + fallback=(env_fallback, ["VMWARE_PASSWORD"]), + aliases=["pass", "pwd"], + no_log=True, + ), + port=dict( + type="int", + default=443, + fallback=(env_fallback, ["VMWARE_PORT"]), + ), + protocol=dict( + type="str", default="https", choices=["https", "http"] + ), + validate_certs=dict( + type="bool", + fallback=(env_fallback, ["VMWARE_VALIDATE_CERTS"]), + default=True, + ), ) def connect_to_vsphere_client(self): @@ -117,28 +138,33 @@ def connect_to_vsphere_client(self): Connect to vSphere API Client with Username and Password """ - username = self.params.get('username') - password = self.params.get('password') - hostname = self.params.get('hostname') - port = self.params.get('port') + username = self.params.get("username") + password = self.params.get("password") + hostname = self.params.get("hostname") + port = self.params.get("port") session = requests.Session() - session.verify = self.params.get('validate_certs') + session.verify = self.params.get("validate_certs") if not all([hostname, username, password]): - self.module.fail_json(msg="Missing one of the following : hostname, username, password." - " Please read the documentation for more information.") + self.module.fail_json( + msg="Missing one of the following : hostname, username, password." + " Please read the documentation for more information." + ) client = create_vsphere_client( server="%s:%s" % (hostname, port), username=username, password=password, - session=session) + session=session, + ) if client is None: self.module.fail_json(msg="Failed to login to %s" % hostname) return client - def get_tags_for_object(self, tag_service=None, tag_assoc_svc=None, dobj=None): + def get_tags_for_object( + self, tag_service=None, tag_assoc_svc=None, dobj=None + ): """ Return list of tag objects associated with an object Args: @@ -184,13 +210,17 @@ def get_tags_for_dynamic_obj(self, dobj=None): category_service = self.api_client.tagging.Category for tag_obj in temp_tags_model: - tags.append({ - 'id': tag_obj.id, - 'category_name': category_service.get(tag_obj.category_id).name, - 'name': tag_obj.name, - 'description': tag_obj.description, - 'category_id': tag_obj.category_id, - }) + tags.append( + { + "id": tag_obj.id, + "category_name": category_service.get( + tag_obj.category_id + ).name, + "name": tag_obj.name, + "description": tag_obj.description, + "category_id": tag_obj.category_id, + } + ) return tags @@ -203,7 +233,7 @@ def get_tags_for_cluster(self, cluster_mid=None): Returns: List of tag object associated with the given cluster """ - dobj = DynamicID(type='cluster', id=cluster_mid) + dobj = DynamicID(type="cluster", id=cluster_mid) return self.get_tags_for_dynamic_obj(dobj) def get_tags_for_hostsystem(self, hostsystem_mid=None): @@ -215,7 +245,7 @@ def get_tags_for_hostsystem(self, hostsystem_mid=None): Returns: List of tag object associated with the given host system """ - dobj = DynamicID(type='HostSystem', id=hostsystem_mid) + dobj = DynamicID(type="HostSystem", id=hostsystem_mid) return self.get_tags_for_dynamic_obj(dobj) def get_tags_for_vm(self, vm_mid=None): @@ -227,10 +257,12 @@ def get_tags_for_vm(self, vm_mid=None): Returns: List of tag object associated with the given virtual machine """ - dobj = DynamicID(type='VirtualMachine', id=vm_mid) + dobj = DynamicID(type="VirtualMachine", id=vm_mid) return self.get_tags_for_dynamic_obj(dobj) - def get_vm_tags(self, tag_service=None, tag_association_svc=None, vm_mid=None): + def get_vm_tags( + self, tag_service=None, tag_association_svc=None, vm_mid=None + ): """ Return list of tag name associated with virtual machine Args: @@ -250,7 +282,7 @@ def get_vm_tags(self, tag_service=None, tag_association_svc=None, vm_mid=None): temp_tags_model = self.get_tags_for_object( tag_service=tag_service, tag_assoc_svc=tag_association_svc, - dobj=vm_mid + dobj=vm_mid, ) for tag_obj in temp_tags_model: @@ -279,8 +311,14 @@ def get_datacenter_by_name(self, datacenter_name): Note: The method assumes only one datacenter with the mentioned name. """ filter_spec = Datacenter.FilterSpec(names=set([datacenter_name])) - datacenter_summaries = self.api_client.vcenter.Datacenter.list(filter_spec) - datacenter = datacenter_summaries[0].datacenter if len(datacenter_summaries) > 0 else None + datacenter_summaries = self.api_client.vcenter.Datacenter.list( + filter_spec + ) + datacenter = ( + datacenter_summaries[0].datacenter + if len(datacenter_summaries) > 0 + else None + ) return datacenter def get_folder_by_name(self, datacenter_name, folder_name): @@ -291,11 +329,15 @@ def get_folder_by_name(self, datacenter_name, folder_name): datacenter = self.get_datacenter_by_name(datacenter_name) if not datacenter: return None - filter_spec = Folder.FilterSpec(type=Folder.Type.VIRTUAL_MACHINE, - names=set([folder_name]), - datacenters=set([datacenter])) + filter_spec = Folder.FilterSpec( + type=Folder.Type.VIRTUAL_MACHINE, + names=set([folder_name]), + datacenters=set([datacenter]), + ) folder_summaries = self.api_client.vcenter.Folder.list(filter_spec) - folder = folder_summaries[0].folder if len(folder_summaries) > 0 else None + folder = ( + folder_summaries[0].folder if len(folder_summaries) > 0 else None + ) return folder def get_resource_pool_by_name(self, datacenter_name, resourcepool_name): @@ -307,10 +349,17 @@ def get_resource_pool_by_name(self, datacenter_name, resourcepool_name): if not datacenter: return None names = set([resourcepool_name]) if resourcepool_name else None - filter_spec = ResourcePool.FilterSpec(datacenters=set([datacenter]), - names=names) - resource_pool_summaries = self.api_client.vcenter.ResourcePool.list(filter_spec) - resource_pool = resource_pool_summaries[0].resource_pool if len(resource_pool_summaries) > 0 else None + filter_spec = ResourcePool.FilterSpec( + datacenters=set([datacenter]), names=names + ) + resource_pool_summaries = self.api_client.vcenter.ResourcePool.list( + filter_spec + ) + resource_pool = ( + resource_pool_summaries[0].resource_pool + if len(resource_pool_summaries) > 0 + else None + ) return resource_pool def get_datastore_by_name(self, datacenter_name, datastore_name): @@ -322,10 +371,17 @@ def get_datastore_by_name(self, datacenter_name, datastore_name): if not datacenter: return None names = set([datastore_name]) if datastore_name else None - filter_spec = Datastore.FilterSpec(datacenters=set([datacenter]), - names=names) - datastore_summaries = self.api_client.vcenter.Datastore.list(filter_spec) - datastore = datastore_summaries[0].datastore if len(datastore_summaries) > 0 else None + filter_spec = Datastore.FilterSpec( + datacenters=set([datacenter]), names=names + ) + datastore_summaries = self.api_client.vcenter.Datastore.list( + filter_spec + ) + datastore = ( + datastore_summaries[0].datastore + if len(datastore_summaries) > 0 + else None + ) return datastore def get_cluster_by_name(self, datacenter_name, cluster_name): @@ -337,10 +393,15 @@ def get_cluster_by_name(self, datacenter_name, cluster_name): if not datacenter: return None names = set([cluster_name]) if cluster_name else None - filter_spec = Cluster.FilterSpec(datacenters=set([datacenter]), - names=names) + filter_spec = Cluster.FilterSpec( + datacenters=set([datacenter]), names=names + ) cluster_summaries = self.api_client.vcenter.Cluster.list(filter_spec) - cluster = cluster_summaries[0].cluster if len(cluster_summaries) > 0 else None + cluster = ( + cluster_summaries[0].cluster + if len(cluster_summaries) > 0 + else None + ) return cluster def get_host_by_name(self, datacenter_name, host_name): @@ -352,8 +413,9 @@ def get_host_by_name(self, datacenter_name, host_name): if not datacenter: return None names = set([host_name]) if host_name else None - filter_spec = Host.FilterSpec(datacenters=set([datacenter]), - names=names) + filter_spec = Host.FilterSpec( + datacenters=set([datacenter]), names=names + ) host_summaries = self.api_client.vcenter.Host.list(filter_spec) host = host_summaries[0].host if len(host_summaries) > 0 else None return host @@ -389,7 +451,9 @@ def get_tag_by_name(self, tag_name=None): if not tag_name: return None - return self.search_svc_object_by_name(service=self.api_client.tagging.Tag, svc_obj_name=tag_name) + return self.search_svc_object_by_name( + service=self.api_client.tagging.Tag, svc_obj_name=tag_name + ) def get_category_by_name(self, category_name=None): """ @@ -402,7 +466,10 @@ def get_category_by_name(self, category_name=None): if not category_name: return None - return self.search_svc_object_by_name(service=self.api_client.tagging.Category, svc_obj_name=category_name) + return self.search_svc_object_by_name( + service=self.api_client.tagging.Category, + svc_obj_name=category_name, + ) def get_tag_by_category(self, tag_name=None, category_name=None): """ @@ -418,7 +485,9 @@ def get_tag_by_category(self, tag_name=None, category_name=None): return None if category_name: - category_obj = self.get_category_by_name(category_name=category_name) + category_obj = self.get_category_by_name( + category_name=category_name + ) if not category_obj: return None @@ -426,7 +495,12 @@ def get_tag_by_category(self, tag_name=None, category_name=None): for tag_object in self.api_client.tagging.Tag.list(): tag_obj = self.api_client.tagging.Tag.get(tag_object) - if tag_obj.name == tag_name and tag_obj.category_id == category_obj.id: + if ( + tag_obj.name == tag_name + and tag_obj.category_id == category_obj.id + ): return tag_obj else: - return self.search_svc_object_by_name(service=self.api_client.tagging.Tag, svc_obj_name=tag_name) + return self.search_svc_object_by_name( + service=self.api_client.tagging.Tag, svc_obj_name=tag_name + ) diff --git a/plugins/module_utils/vmware_spbm.py b/plugins/module_utils/vmware_spbm.py index 0b01666..f7cad2a 100644 --- a/plugins/module_utils/vmware_spbm.py +++ b/plugins/module_utils/vmware_spbm.py @@ -4,6 +4,7 @@ # Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) from __future__ import absolute_import, division, print_function + __metaclass__ = type try: @@ -12,7 +13,9 @@ except ImportError: pass -from ansible_collections.vmware.general.plugins.module_utils.vmware import PyVmomi +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, +) class SPBM(PyVmomi): @@ -31,13 +34,20 @@ def get_spbm_connection(self): session_cookie = client_stub.cookie.split('"')[1] except IndexError: self.module.fail_json(msg="Failed to get session cookie") - ssl_context = client_stub.schemeArgs.get('context') - additional_headers = {'vcSessionCookie': session_cookie} - hostname = self.module.params['hostname'] + ssl_context = client_stub.schemeArgs.get("context") + additional_headers = {"vcSessionCookie": session_cookie} + hostname = self.module.params["hostname"] if not hostname: - self.module.fail_json(msg="Please specify required parameter - hostname") - stub = SoapStubAdapter(host=hostname, path="/pbm/sdk", version=self.version, - sslContext=ssl_context, requestContext=additional_headers) + self.module.fail_json( + msg="Please specify required parameter - hostname" + ) + stub = SoapStubAdapter( + host=hostname, + path="/pbm/sdk", + version=self.version, + sslContext=ssl_context, + requestContext=additional_headers, + ) self.spbm_si = pbm.ServiceInstance("ServiceInstance", stub) self.spbm_content = self.spbm_si.PbmRetrieveServiceContent() diff --git a/plugins/modules/_vcenter_extension_facts.py b/plugins/modules/_vcenter_extension_facts.py index a4a7093..ee88369 100644 --- a/plugins/modules/_vcenter_extension_facts.py +++ b/plugins/modules/_vcenter_extension_facts.py @@ -5,15 +5,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['deprecated'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["deprecated"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vcenter_extension_facts deprecated: @@ -33,9 +34,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather facts about vCenter Extensions vcenter_extension_facts: hostname: '{{ vcenter_hostname }}' @@ -43,9 +44,9 @@ password: '{{ vcenter_password }}' register: ext_facts delegate_to: localhost -''' +""" -RETURN = r''' +RETURN = r""" extension_facts: description: List of extensions returned: success @@ -72,10 +73,13 @@ "extension_version": "5.5" } ] -''' +""" from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + vmware_argument_spec, + PyVmomi, +) class VmwareExtManager(PyVmomi): @@ -95,11 +99,13 @@ def gather_plugin_facts(self): extension_key=ext.key, extension_company=ext.company, extension_version=ext.version, - extension_type=ext.type if ext.type else '', - extension_subject_name=ext.subjectName if ext.subjectName else '', + extension_type=ext.type if ext.type else "", + extension_subject_name=ext.subjectName + if ext.subjectName + else "", extension_last_heartbeat_time=ext.lastHeartbeatTime, ) - result['extension_facts'].append(ext_info) + result["extension_facts"].append(ext_info) self.module.exit_json(**result) @@ -108,8 +114,7 @@ def main(): argument_spec = vmware_argument_spec() module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, + argument_spec=argument_spec, supports_check_mode=True ) vcenter_extension_facts_mgr = VmwareExtManager(module) diff --git a/plugins/modules/_vmware_about_facts.py b/plugins/modules/_vmware_about_facts.py index 117887f..8f91ff3 100644 --- a/plugins/modules/_vmware_about_facts.py +++ b/plugins/modules/_vmware_about_facts.py @@ -6,15 +6,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['deprecated'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["deprecated"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_about_facts deprecated: @@ -34,9 +35,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Provide information about vCenter vmware_about_facts: hostname: '{{ vcenter_hostname }}' @@ -52,9 +53,9 @@ password: '{{ esxi_password }}' delegate_to: localhost register: esxi_about_info -''' +""" -RETURN = r''' +RETURN = r""" about_facts: description: - dict about VMware server @@ -77,10 +78,13 @@ "vendor": "VMware, Inc.", "version": "6.5.0" } -''' +""" from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + vmware_argument_spec, + PyVmomi, +) class VmwareAboutManager(PyVmomi): @@ -111,7 +115,7 @@ def gather_about_facts(self): instance_uuid=about.instanceUuid, license_product_name=about.licenseProductName, license_product_version=about.licenseProductVersion, - ) + ), ) @@ -119,8 +123,7 @@ def main(): argument_spec = vmware_argument_spec() module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, + argument_spec=argument_spec, supports_check_mode=True ) vmware_about_facts_mgr = VmwareAboutManager(module) diff --git a/plugins/modules/_vmware_category_facts.py b/plugins/modules/_vmware_category_facts.py index 1ca4f26..4d08a19 100644 --- a/plugins/modules/_vmware_category_facts.py +++ b/plugins/modules/_vmware_category_facts.py @@ -7,15 +7,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['deprecated'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["deprecated"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_category_facts deprecated: @@ -38,9 +39,9 @@ extends_documentation_fragment: - vmware.general.vmware_rest_client.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather facts about tag categories vmware_category_facts: hostname: "{{ vcenter_hostname }}" @@ -64,9 +65,9 @@ query: "[?category_name==`Category0001`]" - debug: var=category_id -''' +""" -RETURN = r''' +RETURN = r""" tag_category_facts: description: metadata of tag categories returned: always @@ -91,10 +92,12 @@ "category_used_by": [] } ] -''' +""" from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware_rest_client import VmwareRestClient +from ansible_collections.vmware.general.plugins.module_utils.vmware_rest_client import ( + VmwareRestClient, +) class VmwareCategoryFactsManager(VmwareRestClient): @@ -118,16 +121,20 @@ def get_all_tag_categories(self): ) ) - self.module.exit_json(changed=False, tag_category_facts=global_tag_categories) + self.module.exit_json( + changed=False, tag_category_facts=global_tag_categories + ) def main(): argument_spec = VmwareRestClient.vmware_client_argument_spec() - module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + module = AnsibleModule( + argument_spec=argument_spec, supports_check_mode=True + ) vmware_category_facts = VmwareCategoryFactsManager(module) vmware_category_facts.get_all_tag_categories() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/_vmware_dns_config.py b/plugins/modules/_vmware_dns_config.py index aab0f68..719402d 100644 --- a/plugins/modules/_vmware_dns_config.py +++ b/plugins/modules/_vmware_dns_config.py @@ -5,13 +5,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['deprecated'], - 'supported_by': 'community'} +ANSIBLE_METADATA = { + "metadata_version": "1.1", + "status": ["deprecated"], + "supported_by": "community", +} -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_dns_config short_description: Manage VMware ESXi DNS Configuration @@ -47,9 +50,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Configure ESXi hostname and DNS servers vmware_dns_config: hostname: '{{ esxi_hostname }}' @@ -61,15 +64,21 @@ - 8.8.8.8 - 8.8.4.4 delegate_to: localhost -''' +""" try: from pyVmomi import vim, vmodl + HAS_PYVMOMI = True except ImportError: HAS_PYVMOMI = False from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import HAS_PYVMOMI, connect_to_api, get_all_objs, vmware_argument_spec +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + HAS_PYVMOMI, + connect_to_api, + get_all_objs, + vmware_argument_spec, +) def configure_dns(host_system, hostname, domainname, dns_servers): @@ -99,25 +108,33 @@ def configure_dns(host_system, hostname, domainname, dns_servers): def main(): argument_spec = vmware_argument_spec() - argument_spec.update(dict(change_hostname_to=dict(required=True, type='str'), - domainname=dict(required=True, type='str'), - dns_servers=dict(required=True, type='list'))) - - module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) + argument_spec.update( + dict( + change_hostname_to=dict(required=True, type="str"), + domainname=dict(required=True, type="str"), + dns_servers=dict(required=True, type="list"), + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, supports_check_mode=False + ) if not HAS_PYVMOMI: - module.fail_json(msg='pyvmomi is required for this module') + module.fail_json(msg="pyvmomi is required for this module") - change_hostname_to = module.params['change_hostname_to'] - domainname = module.params['domainname'] - dns_servers = module.params['dns_servers'] + change_hostname_to = module.params["change_hostname_to"] + domainname = module.params["domainname"] + dns_servers = module.params["dns_servers"] try: content = connect_to_api(module) host = get_all_objs(content, [vim.HostSystem]) if not host: module.fail_json(msg="Unable to locate Physical Host.") host_system = list(host)[0] - changed = configure_dns(host_system, change_hostname_to, domainname, dns_servers) + changed = configure_dns( + host_system, change_hostname_to, domainname, dns_servers + ) module.exit_json(changed=changed) except vmodl.RuntimeFault as runtime_fault: module.fail_json(msg=runtime_fault.msg) @@ -127,5 +144,5 @@ def main(): module.fail_json(msg=str(e)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/_vmware_drs_group_facts.py b/plugins/modules/_vmware_drs_group_facts.py index 1d4e45c..a45beed 100644 --- a/plugins/modules/_vmware_drs_group_facts.py +++ b/plugins/modules/_vmware_drs_group_facts.py @@ -9,12 +9,12 @@ __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['deprecated'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["deprecated"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- deprecated: removed_in: '2.13' @@ -49,9 +49,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" --- - name: "Gather DRS facts about given Cluster" register: cluster_drs_group_facts @@ -71,9 +71,9 @@ username: "{{ vcenter_username }}" datacenter: "{{ datacenter }}" delegate_to: localhost -''' +""" -RETURN = r''' +RETURN = r""" drs_group_facts: description: Metadata about DRS group from given cluster / datacenter returned: always @@ -114,7 +114,7 @@ ], "DC0_C1": [] } -''' +""" try: from pyVmomi import vim @@ -122,11 +122,15 @@ pass from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi, find_datacenter_by_name, get_all_objs +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + vmware_argument_spec, + PyVmomi, + find_datacenter_by_name, + get_all_objs, +) class VmwareDrsGroupFactManager(PyVmomi): - def __init__(self, module, datacenter_name, cluster_name=None): """ Doctring: Init @@ -138,24 +142,32 @@ def __init__(self, module, datacenter_name, cluster_name=None): self.__datacenter_obj = None self.__cluster_name = cluster_name self.__cluster_obj = None - self.__msg = 'Nothing to see here...' + self.__msg = "Nothing to see here..." self.__result = dict() self.__changed = False if datacenter_name: - datacenter_obj = find_datacenter_by_name(self.content, datacenter_name=datacenter_name) + datacenter_obj = find_datacenter_by_name( + self.content, datacenter_name=datacenter_name + ) self.cluster_obj_list = [] if datacenter_obj: folder = datacenter_obj.hostFolder - self.cluster_obj_list = get_all_objs(self.content, [vim.ClusterComputeResource], folder) + self.cluster_obj_list = get_all_objs( + self.content, [vim.ClusterComputeResource], folder + ) else: - raise Exception("Datacenter '%s' not found" % self.__datacenter_name) + raise Exception( + "Datacenter '%s' not found" % self.__datacenter_name + ) if cluster_name: - cluster_obj = self.find_cluster_by_name(cluster_name=self.__cluster_name) + cluster_obj = self.find_cluster_by_name( + cluster_name=self.__cluster_name + ) if cluster_obj is None: raise Exception("Cluster '%s' not found" % self.__cluster_name) @@ -214,17 +226,19 @@ def __normalize_group_data(self, group_obj): return {} # Check if group is a host group - if hasattr(group_obj, 'host'): + if hasattr(group_obj, "host"): return dict( group_name=group_obj.name, - hosts=self.__get_all_from_group(group_obj=group_obj, host_group=True), - type="host" + hosts=self.__get_all_from_group( + group_obj=group_obj, host_group=True + ), + type="host", ) else: return dict( group_name=group_obj.name, vms=self.__get_all_from_group(group_obj=group_obj), - type="vm" + type="vm", ) def gather_facts(self): @@ -240,7 +254,9 @@ def gather_facts(self): cluster_group_facts[cluster_obj.name] = [] for drs_group in cluster_obj.configurationEx.group: - cluster_group_facts[cluster_obj.name].append(self.__normalize_group_data(drs_group)) + cluster_group_facts[cluster_obj.name].append( + self.__normalize_group_data(drs_group) + ) self.__set_result(cluster_group_facts) @@ -250,33 +266,38 @@ def main(): argument_spec = vmware_argument_spec() argument_spec.update( - datacenter=dict(type='str', required=False, aliases=['datacenter_name']), - cluster_name=dict(type='str', required=False), + datacenter=dict( + type="str", required=False, aliases=["datacenter_name"] + ), + cluster_name=dict(type="str", required=False), ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, - required_one_of=[['cluster_name', 'datacenter']], - mutually_exclusive=[['cluster_name', 'datacenter']], + required_one_of=[["cluster_name", "datacenter"]], + mutually_exclusive=[["cluster_name", "datacenter"]], ) try: # Create instance of VmwareDrsGroupManager - vmware_drs_group_facts = VmwareDrsGroupFactManager(module=module, - datacenter_name=module.params.get('datacenter'), - cluster_name=module.params.get('cluster_name', None)) + vmware_drs_group_facts = VmwareDrsGroupFactManager( + module=module, + datacenter_name=module.params.get("datacenter"), + cluster_name=module.params.get("cluster_name", None), + ) vmware_drs_group_facts.gather_facts() # Set results - results = dict(failed=False, - drs_group_facts=vmware_drs_group_facts.get_result()) + results = dict( + failed=False, drs_group_facts=vmware_drs_group_facts.get_result() + ) except Exception as error: results = dict(failed=True, msg="Error: %s" % error) - if results['failed']: + if results["failed"]: module.fail_json(**results) else: module.exit_json(**results) diff --git a/plugins/modules/_vmware_drs_rule_facts.py b/plugins/modules/_vmware_drs_rule_facts.py index 5b17bf4..520b08d 100644 --- a/plugins/modules/_vmware_drs_rule_facts.py +++ b/plugins/modules/_vmware_drs_rule_facts.py @@ -9,12 +9,12 @@ __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['deprecated'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["deprecated"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_drs_rule_facts deprecated: @@ -47,9 +47,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather DRS facts about given Cluster vmware_drs_rule_facts: hostname: '{{ vcenter_hostname }}' @@ -67,9 +67,9 @@ datacenter: '{{ datacenter_name }}' delegate_to: localhost register: datacenter_drs_facts -''' +""" -RETURN = r''' +RETURN = r""" drs_rule_facts: description: metadata about DRS rule from given cluster / datacenter returned: always @@ -112,7 +112,7 @@ } ], } -''' +""" try: from pyVmomi import vim @@ -120,32 +120,48 @@ pass from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi, find_datacenter_by_name, get_all_objs +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + vmware_argument_spec, + PyVmomi, + find_datacenter_by_name, + get_all_objs, +) class VmwareDrsFactManager(PyVmomi): def __init__(self, module): super(VmwareDrsFactManager, self).__init__(module) - datacenter_name = self.params.get('datacenter', None) + datacenter_name = self.params.get("datacenter", None) if datacenter_name: - datacenter_obj = find_datacenter_by_name(self.content, datacenter_name=datacenter_name) + datacenter_obj = find_datacenter_by_name( + self.content, datacenter_name=datacenter_name + ) self.cluster_obj_list = [] if datacenter_obj: folder = datacenter_obj.hostFolder - self.cluster_obj_list = get_all_objs(self.content, [vim.ClusterComputeResource], folder) + self.cluster_obj_list = get_all_objs( + self.content, [vim.ClusterComputeResource], folder + ) else: - self.module.fail_json(changed=False, msg="Datacenter '%s' not found" % datacenter_name) + self.module.fail_json( + changed=False, + msg="Datacenter '%s' not found" % datacenter_name, + ) - cluster_name = self.params.get('cluster_name', None) + cluster_name = self.params.get("cluster_name", None) if cluster_name: cluster_obj = self.find_cluster_by_name(cluster_name=cluster_name) if cluster_obj is None: - self.module.fail_json(changed=False, msg="Cluster '%s' not found" % cluster_name) + self.module.fail_json( + changed=False, msg="Cluster '%s' not found" % cluster_name + ) else: self.cluster_obj_list = [cluster_obj] - def get_all_from_group(self, group_name=None, cluster_obj=None, hostgroup=False): + def get_all_from_group( + self, group_name=None, cluster_obj=None, hostgroup=False + ): """ Return all VM / Host names using given group name Args: @@ -183,15 +199,18 @@ def normalize_vm_vm_rule_spec(rule_obj=None): """ if rule_obj is None: return {} - return dict(rule_key=rule_obj.key, - rule_enabled=rule_obj.enabled, - rule_name=rule_obj.name, - rule_mandatory=rule_obj.mandatory, - rule_uuid=rule_obj.ruleUuid, - rule_vms=[vm.name for vm in rule_obj.vm], - rule_type="vm_vm_rule", - rule_affinity=True if isinstance(rule_obj, vim.cluster.AffinityRuleSpec) else False, - ) + return dict( + rule_key=rule_obj.key, + rule_enabled=rule_obj.enabled, + rule_name=rule_obj.name, + rule_mandatory=rule_obj.mandatory, + rule_uuid=rule_obj.ruleUuid, + rule_vms=[vm.name for vm in rule_obj.vm], + rule_type="vm_vm_rule", + rule_affinity=True + if isinstance(rule_obj, vim.cluster.AffinityRuleSpec) + else False, + ) def normalize_vm_host_rule_spec(self, rule_obj=None, cluster_obj=None): """ @@ -205,24 +224,30 @@ def normalize_vm_host_rule_spec(self, rule_obj=None, cluster_obj=None): """ if not all([rule_obj, cluster_obj]): return {} - return dict(rule_key=rule_obj.key, - rule_enabled=rule_obj.enabled, - rule_name=rule_obj.name, - rule_mandatory=rule_obj.mandatory, - rule_uuid=rule_obj.ruleUuid, - rule_vm_group_name=rule_obj.vmGroupName, - rule_affine_host_group_name=rule_obj.affineHostGroupName, - rule_anti_affine_host_group_name=rule_obj.antiAffineHostGroupName, - rule_vms=self.get_all_from_group(group_name=rule_obj.vmGroupName, - cluster_obj=cluster_obj), - rule_affine_hosts=self.get_all_from_group(group_name=rule_obj.affineHostGroupName, - cluster_obj=cluster_obj, - hostgroup=True), - rule_anti_affine_hosts=self.get_all_from_group(group_name=rule_obj.antiAffineHostGroupName, - cluster_obj=cluster_obj, - hostgroup=True), - rule_type="vm_host_rule", - ) + return dict( + rule_key=rule_obj.key, + rule_enabled=rule_obj.enabled, + rule_name=rule_obj.name, + rule_mandatory=rule_obj.mandatory, + rule_uuid=rule_obj.ruleUuid, + rule_vm_group_name=rule_obj.vmGroupName, + rule_affine_host_group_name=rule_obj.affineHostGroupName, + rule_anti_affine_host_group_name=rule_obj.antiAffineHostGroupName, + rule_vms=self.get_all_from_group( + group_name=rule_obj.vmGroupName, cluster_obj=cluster_obj + ), + rule_affine_hosts=self.get_all_from_group( + group_name=rule_obj.affineHostGroupName, + cluster_obj=cluster_obj, + hostgroup=True, + ), + rule_anti_affine_hosts=self.get_all_from_group( + group_name=rule_obj.antiAffineHostGroupName, + cluster_obj=cluster_obj, + hostgroup=True, + ), + rule_type="vm_host_rule", + ) def gather_drs_rule_facts(self): """ @@ -235,10 +260,15 @@ def gather_drs_rule_facts(self): cluster_rule_facts[cluster_obj.name] = [] for drs_rule in cluster_obj.configuration.rule: if isinstance(drs_rule, vim.cluster.VmHostRuleInfo): - cluster_rule_facts[cluster_obj.name].append(self.normalize_vm_host_rule_spec(rule_obj=drs_rule, - cluster_obj=cluster_obj)) + cluster_rule_facts[cluster_obj.name].append( + self.normalize_vm_host_rule_spec( + rule_obj=drs_rule, cluster_obj=cluster_obj + ) + ) else: - cluster_rule_facts[cluster_obj.name].append(self.normalize_vm_vm_rule_spec(rule_obj=drs_rule)) + cluster_rule_facts[cluster_obj.name].append( + self.normalize_vm_vm_rule_spec(rule_obj=drs_rule) + ) return cluster_rule_facts @@ -246,20 +276,20 @@ def gather_drs_rule_facts(self): def main(): argument_spec = vmware_argument_spec() argument_spec.update( - datacenter=dict(type='str', required=False), - cluster_name=dict(type='str', required=False), + datacenter=dict(type="str", required=False), + cluster_name=dict(type="str", required=False), ) module = AnsibleModule( argument_spec=argument_spec, - required_one_of=[ - ['cluster_name', 'datacenter'], - ], + required_one_of=[["cluster_name", "datacenter"]], supports_check_mode=True, ) vmware_drs_facts = VmwareDrsFactManager(module) - module.exit_json(changed=False, drs_rule_facts=vmware_drs_facts.gather_drs_rule_facts()) + module.exit_json( + changed=False, drs_rule_facts=vmware_drs_facts.gather_drs_rule_facts() + ) if __name__ == "__main__": diff --git a/plugins/modules/_vmware_dvs_portgroup_facts.py b/plugins/modules/_vmware_dvs_portgroup_facts.py index ff1a8d1..173da0f 100644 --- a/plugins/modules/_vmware_dvs_portgroup_facts.py +++ b/plugins/modules/_vmware_dvs_portgroup_facts.py @@ -6,16 +6,17 @@ from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['deprecated'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["deprecated"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_dvs_portgroup_facts deprecated: @@ -66,9 +67,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Get facts about DVPG vmware_dvs_portgroup_facts: hostname: "{{ vcenter_server }}" @@ -84,9 +85,9 @@ - "{{ dvpg_facts.dvs_portgroup_facts['dvs_001'] | json_query(query) }}" vars: query: "[?portgroup_name=='dvpg_001']" -''' +""" -RETURN = r''' +RETURN = r""" dvs_portgroup_facts: description: metadata about DVS portgroup configuration returned: on success @@ -131,7 +132,7 @@ }, ] } -''' +""" try: from pyVmomi import vim @@ -139,28 +140,41 @@ pass from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi, get_all_objs, find_dvs_by_name +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + vmware_argument_spec, + PyVmomi, + get_all_objs, + find_dvs_by_name, +) class DVSPortgroupFactsManager(PyVmomi): def __init__(self, module): super(DVSPortgroupFactsManager, self).__init__(module) - self.dc_name = self.params['datacenter'] - self.dvs_name = self.params['dvswitch'] + self.dc_name = self.params["datacenter"] + self.dvs_name = self.params["dvswitch"] datacenter = self.find_datacenter_by_name(self.dc_name) if datacenter is None: - self.module.fail_json(msg="Failed to find the datacenter %s" % self.dc_name) + self.module.fail_json( + msg="Failed to find the datacenter %s" % self.dc_name + ) if self.dvs_name: # User specified specific dvswitch name to gather information dvsn = find_dvs_by_name(self.content, self.dvs_name) if dvsn is None: - self.module.fail_json(msg="Failed to find the dvswitch %s" % self.dvs_name) + self.module.fail_json( + msg="Failed to find the dvswitch %s" % self.dvs_name + ) self.dvsls = [dvsn] else: # default behaviour, gather information about all dvswitches - self.dvsls = get_all_objs(self.content, [vim.DistributedVirtualSwitch], folder=datacenter.networkFolder) + self.dvsls = get_all_objs( + self.content, + [vim.DistributedVirtualSwitch], + folder=datacenter.networkFolder, + ) def get_vlan_info(self, vlan_obj=None): """ @@ -174,7 +188,9 @@ def get_vlan_info(self, vlan_obj=None): if not vlan_obj: return vdret - if isinstance(vlan_obj, vim.dvs.VmwareDistributedVirtualSwitch.TrunkVlanSpec): + if isinstance( + vlan_obj, vim.dvs.VmwareDistributedVirtualSwitch.TrunkVlanSpec + ): vlan_id_list = [] for vli in vlan_obj.vlanId: if vli.start == vli.end: @@ -182,10 +198,16 @@ def get_vlan_info(self, vlan_obj=None): else: vlan_id_list.append(str(vli.start) + "-" + str(vli.end)) vdret = dict(trunk=True, pvlan=False, vlan_id=vlan_id_list) - elif isinstance(vlan_obj, vim.dvs.VmwareDistributedVirtualSwitch.PvlanSpec): - vdret = dict(trunk=False, pvlan=True, vlan_id=str(vlan_obj.pvlanId)) + elif isinstance( + vlan_obj, vim.dvs.VmwareDistributedVirtualSwitch.PvlanSpec + ): + vdret = dict( + trunk=False, pvlan=True, vlan_id=str(vlan_obj.pvlanId) + ) else: - vdret = dict(trunk=False, pvlan=False, vlan_id=str(vlan_obj.vlanId)) + vdret = dict( + trunk=False, pvlan=False, vlan_id=str(vlan_obj.vlanId) + ) return vdret @@ -200,13 +222,16 @@ def gather_dvs_portgroup_facts(self): port_policy = dict() vlan_info = dict() - if self.module.params['show_network_policy'] and dvs_pg.config.defaultPortConfig.securityPolicy: + if ( + self.module.params["show_network_policy"] + and dvs_pg.config.defaultPortConfig.securityPolicy + ): network_policy = dict( forged_transmits=dvs_pg.config.defaultPortConfig.securityPolicy.forgedTransmits.value, promiscuous=dvs_pg.config.defaultPortConfig.securityPolicy.allowPromiscuous.value, - mac_changes=dvs_pg.config.defaultPortConfig.securityPolicy.macChanges.value + mac_changes=dvs_pg.config.defaultPortConfig.securityPolicy.macChanges.value, ) - if self.module.params['show_teaming_policy']: + if self.module.params["show_teaming_policy"]: # govcsim does not have uplinkTeamingPolicy, remove this check once # PR https://github.com/vmware/govmomi/pull/1524 merged. if dvs_pg.config.defaultPortConfig.uplinkTeamingPolicy: @@ -217,7 +242,7 @@ def gather_dvs_portgroup_facts(self): rolling_order=dvs_pg.config.defaultPortConfig.uplinkTeamingPolicy.rollingOrder.value, ) - if self.params['show_port_policy']: + if self.params["show_port_policy"]: # govcsim does not have port policy if dvs_pg.config.policy: port_policy = dict( @@ -231,11 +256,13 @@ def gather_dvs_portgroup_facts(self): traffic_filter_override=dvs_pg.config.policy.trafficFilterOverrideAllowed, uplink_teaming_override=dvs_pg.config.policy.uplinkTeamingOverrideAllowed, vendor_config_override=dvs_pg.config.policy.vendorConfigOverrideAllowed, - vlan_override=dvs_pg.config.policy.vlanOverrideAllowed + vlan_override=dvs_pg.config.policy.vlanOverrideAllowed, ) - if self.params['show_vlan_info']: - vlan_info = self.get_vlan_info(dvs_pg.config.defaultPortConfig.vlan) + if self.params["show_vlan_info"]: + vlan_info = self.get_vlan_info( + dvs_pg.config.defaultPortConfig.vlan + ) dvpg_details = dict( portgroup_name=dvs_pg.name, @@ -256,21 +283,22 @@ def gather_dvs_portgroup_facts(self): def main(): argument_spec = vmware_argument_spec() argument_spec.update( - datacenter=dict(type='str', required=True), - show_network_policy=dict(type='bool', default=True), - show_teaming_policy=dict(type='bool', default=True), - show_port_policy=dict(type='bool', default=True), + datacenter=dict(type="str", required=True), + show_network_policy=dict(type="bool", default=True), + show_teaming_policy=dict(type="bool", default=True), + show_port_policy=dict(type="bool", default=True), dvswitch=dict(), - show_vlan_info=dict(type='bool', default=False), + show_vlan_info=dict(type="bool", default=False), ) module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, + argument_spec=argument_spec, supports_check_mode=True ) dvs_pg_mgr = DVSPortgroupFactsManager(module) - module.exit_json(changed=False, - dvs_portgroup_facts=dvs_pg_mgr.gather_dvs_portgroup_facts()) + module.exit_json( + changed=False, + dvs_portgroup_facts=dvs_pg_mgr.gather_dvs_portgroup_facts(), + ) if __name__ == "__main__": diff --git a/plugins/modules/_vmware_guest_boot_facts.py b/plugins/modules/_vmware_guest_boot_facts.py index 8d735c4..c05a636 100644 --- a/plugins/modules/_vmware_guest_boot_facts.py +++ b/plugins/modules/_vmware_guest_boot_facts.py @@ -7,15 +7,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['deprecated'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["deprecated"], + "supported_by": "community", } -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_guest_boot_facts deprecated: @@ -62,9 +63,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather facts about virtual machine's boot order and related parameters vmware_guest_boot_facts: hostname: "{{ vcenter_hostname }}" @@ -82,7 +83,7 @@ validate_certs: no moid: "vm-42" register: vm_moid_boot_order_facts -''' +""" RETURN = r""" vm_boot_facts: @@ -107,7 +108,11 @@ from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec, find_vm_by_id +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + vmware_argument_spec, + find_vm_by_id, +) try: from pyVmomi import vim, VmomiSupport @@ -118,10 +123,10 @@ class VmBootFactsManager(PyVmomi): def __init__(self, module): super(VmBootFactsManager, self).__init__(module) - self.name = self.params['name'] - self.uuid = self.params['uuid'] - self.moid = self.params['moid'] - self.use_instance_uuid = self.params['use_instance_uuid'] + self.name = self.params["name"] + self.uuid = self.params["uuid"] + self.moid = self.params["moid"] + self.use_instance_uuid = self.params["use_instance_uuid"] self.vm = None def _get_vm(self): @@ -129,44 +134,60 @@ def _get_vm(self): if self.uuid: if self.use_instance_uuid: - vm_obj = find_vm_by_id(self.content, vm_id=self.uuid, vm_id_type="use_instance_uuid") + vm_obj = find_vm_by_id( + self.content, + vm_id=self.uuid, + vm_id_type="use_instance_uuid", + ) else: - vm_obj = find_vm_by_id(self.content, vm_id=self.uuid, vm_id_type="uuid") + vm_obj = find_vm_by_id( + self.content, vm_id=self.uuid, vm_id_type="uuid" + ) if vm_obj is None: - self.module.fail_json(msg="Failed to find the virtual machine with UUID : %s" % self.uuid) + self.module.fail_json( + msg="Failed to find the virtual machine with UUID : %s" + % self.uuid + ) vms = [vm_obj] elif self.name: - objects = self.get_managed_objects_properties(vim_type=vim.VirtualMachine, properties=['name']) + objects = self.get_managed_objects_properties( + vim_type=vim.VirtualMachine, properties=["name"] + ) for temp_vm_object in objects: if temp_vm_object.obj.name == self.name: vms.append(temp_vm_object.obj) elif self.moid: - vm_obj = VmomiSupport.templateOf('VirtualMachine')(self.module.params['moid'], self.si._stub) + vm_obj = VmomiSupport.templateOf("VirtualMachine")( + self.module.params["moid"], self.si._stub + ) if vm_obj: vms.append(vm_obj) if vms: - if self.params.get('name_match') == 'first': + if self.params.get("name_match") == "first": self.vm = vms[0] - elif self.params.get('name_match') == 'last': + elif self.params.get("name_match") == "last": self.vm = vms[-1] else: - self.module.fail_json(msg="Failed to find virtual machine using %s" % (self.name or self.uuid or self.moid)) + self.module.fail_json( + msg="Failed to find virtual machine using %s" + % (self.name or self.uuid or self.moid) + ) @staticmethod def humanize_boot_order(boot_order): results = [] for device in boot_order: if isinstance(device, vim.vm.BootOptions.BootableCdromDevice): - results.append('cdrom') + results.append("cdrom") elif isinstance(device, vim.vm.BootOptions.BootableDiskDevice): - results.append('disk') + results.append("disk") elif isinstance(device, vim.vm.BootOptions.BootableEthernetDevice): - results.append('ethernet') + results.append("ethernet") elif isinstance(device, vim.vm.BootOptions.BootableFloppyDevice): - results.append('floppy') + results.append("floppy") return results def ensure(self): @@ -175,13 +196,15 @@ def ensure(self): results = dict() if self.vm and self.vm.config: results = dict( - current_boot_order=self.humanize_boot_order(self.vm.config.bootOptions.bootOrder), + current_boot_order=self.humanize_boot_order( + self.vm.config.bootOptions.bootOrder + ), current_boot_delay=self.vm.config.bootOptions.bootDelay, current_enter_bios_setup=self.vm.config.bootOptions.enterBIOSSetup, current_boot_retry_enabled=self.vm.config.bootOptions.bootRetryEnabled, current_boot_retry_delay=self.vm.config.bootOptions.bootRetryDelay, current_boot_firmware=self.vm.config.firmware, - current_secure_boot_enabled=self.vm.config.bootOptions.efiSecureBootEnabled + current_secure_boot_enabled=self.vm.config.bootOptions.efiSecureBootEnabled, ) self.module.exit_json(changed=False, vm_boot_facts=results) @@ -190,24 +213,17 @@ def ensure(self): def main(): argument_spec = vmware_argument_spec() argument_spec.update( - name=dict(type='str'), - uuid=dict(type='str'), - moid=dict(type='str'), - use_instance_uuid=dict(type='bool', default=False), - name_match=dict( - choices=['first', 'last'], - default='first' - ), + name=dict(type="str"), + uuid=dict(type="str"), + moid=dict(type="str"), + use_instance_uuid=dict(type="bool", default=False), + name_match=dict(choices=["first", "last"], default="first"), ) module = AnsibleModule( argument_spec=argument_spec, - required_one_of=[ - ['name', 'uuid', 'moid'] - ], - mutually_exclusive=[ - ['name', 'uuid', 'moid'] - ], + required_one_of=[["name", "uuid", "moid"]], + mutually_exclusive=[["name", "uuid", "moid"]], supports_check_mode=True, ) @@ -215,5 +231,5 @@ def main(): pyv.ensure() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/_vmware_guest_customization_facts.py b/plugins/modules/_vmware_guest_customization_facts.py index a8173e9..017970f 100644 --- a/plugins/modules/_vmware_guest_customization_facts.py +++ b/plugins/modules/_vmware_guest_customization_facts.py @@ -6,15 +6,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['deprecated'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["deprecated"], + "supported_by": "community", } -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_guest_customization_facts deprecated: @@ -41,9 +42,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Gather facts about all customization specification vmware_guest_customization_facts: hostname: "{{ vcenter_hostname }}" @@ -62,7 +63,7 @@ spec_name: custom_linux_spec delegate_to: localhost register: custom_spec_facts -''' +""" RETURN = """ custom_spec_facts: @@ -105,7 +106,10 @@ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_text -from ansible_collections.vmware.general.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + vmware_argument_spec, +) class VmwareCustomSpecManger(PyVmomi): @@ -113,20 +117,25 @@ def __init__(self, module): super(VmwareCustomSpecManger, self).__init__(module) self.cc_mgr = self.content.customizationSpecManager if self.cc_mgr is None: - self.module.fail_json(msg="Failed to get customization spec manager.") + self.module.fail_json( + msg="Failed to get customization spec manager." + ) def gather_custom_spec_facts(self): """ Gather facts about customization specifications """ - spec_name = self.params.get('spec_name', None) + spec_name = self.params.get("spec_name", None) specs_list = [] if spec_name: if self.cc_mgr.DoesCustomizationSpecExist(name=spec_name): specs_list.append(spec_name) else: - self.module.fail_json(msg="Unable to find customization specification named '%s'" % spec_name) + self.module.fail_json( + msg="Unable to find customization specification named '%s'" + % spec_name + ) else: available_specs = self.cc_mgr.info for spec_info in available_specs: @@ -142,7 +151,9 @@ def gather_custom_spec_facts(self): ip_address=nic.adapter.ip.ipAddress, subnet_mask=nic.adapter.subnetMask, gateway=[gw for gw in nic.adapter.gateway], - nic_dns_server_list=[ndsl for ndsl in nic.adapter.dnsServerList], + nic_dns_server_list=[ + ndsl for ndsl in nic.adapter.dnsServerList + ], dns_domain=nic.adapter.dnsDomain, primary_wins=nic.adapter.primaryWINS, secondry_wins=nic.adapter.secondaryWINS, @@ -151,9 +162,15 @@ def gather_custom_spec_facts(self): adapter_mapping_list.append(temp_data) current_hostname = None - if isinstance(current_spec.spec.identity.hostName, vim.vm.customization.PrefixNameGenerator): + if isinstance( + current_spec.spec.identity.hostName, + vim.vm.customization.PrefixNameGenerator, + ): current_hostname = current_spec.spec.identity.hostName.base - elif isinstance(current_spec.spec.identity.hostName, vim.vm.customization.FixedName): + elif isinstance( + current_spec.spec.identity.hostName, + vim.vm.customization.FixedName, + ): current_hostname = current_spec.spec.identity.hostName.name spec_facts[spec] = dict( @@ -169,8 +186,12 @@ def gather_custom_spec_facts(self): time_zone=current_spec.spec.identity.timeZone, hw_clock_utc=current_spec.spec.identity.hwClockUTC, # global IP Settings - dns_suffix_list=[i for i in current_spec.spec.globalIPSettings.dnsSuffixList], - dns_server_list=[i for i in current_spec.spec.globalIPSettings.dnsServerList], + dns_suffix_list=[ + i for i in current_spec.spec.globalIPSettings.dnsSuffixList + ], + dns_server_list=[ + i for i in current_spec.spec.globalIPSettings.dnsServerList + ], # NIC setting map nic_setting_map=adapter_mapping_list, ) @@ -179,20 +200,19 @@ def gather_custom_spec_facts(self): def main(): argument_spec = vmware_argument_spec() - argument_spec.update( - spec_name=dict(type='str'), - ) + argument_spec.update(spec_name=dict(type="str")) module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True + argument_spec=argument_spec, supports_check_mode=True ) pyv = VmwareCustomSpecManger(module) try: module.exit_json(custom_spec_facts=pyv.gather_custom_spec_facts()) except Exception as exc: - module.fail_json(msg="Failed to gather facts with exception : %s" % to_text(exc)) + module.fail_json( + msg="Failed to gather facts with exception : %s" % to_text(exc) + ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/_vmware_guest_disk_facts.py b/plugins/modules/_vmware_guest_disk_facts.py index adba247..399f3cb 100644 --- a/plugins/modules/_vmware_guest_disk_facts.py +++ b/plugins/modules/_vmware_guest_disk_facts.py @@ -7,15 +7,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['deprecated'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["deprecated"], + "supported_by": "community", } -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_guest_disk_facts deprecated: @@ -80,9 +81,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Gather disk facts from virtual machine using UUID vmware_guest_disk_facts: hostname: "{{ vcenter_hostname }}" @@ -115,7 +116,7 @@ moid: vm-42 delegate_to: localhost register: disk_facts -''' +""" RETURN = """ guest_disk_facts: @@ -170,7 +171,10 @@ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_text -from ansible_collections.vmware.general.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + vmware_argument_spec, +) class PyVmomiHelper(PyVmomi): @@ -192,11 +196,11 @@ def gather_disk_facts(self, vm_obj): return disks_facts controller_types = { - vim.vm.device.VirtualLsiLogicController: 'lsilogic', - vim.vm.device.ParaVirtualSCSIController: 'paravirtual', - vim.vm.device.VirtualBusLogicController: 'buslogic', - vim.vm.device.VirtualLsiLogicSASController: 'lsilogicsas', - vim.vm.device.VirtualIDEController: 'ide' + vim.vm.device.VirtualLsiLogicController: "lsilogic", + vim.vm.device.ParaVirtualSCSIController: "paravirtual", + vim.vm.device.VirtualBusLogicController: "buslogic", + vim.vm.device.VirtualLsiLogicSASController: "lsilogicsas", + vim.vm.device.VirtualIDEController: "ide", } controller_index = 0 @@ -206,7 +210,7 @@ def gather_disk_facts(self, vm_obj): key=controller.key, controller_type=controller_types[type(controller)], bus_number=controller.busNumber, - devices=controller.device + devices=controller.device, ) controller_index += 1 @@ -224,64 +228,141 @@ def gather_disk_facts(self, vm_obj): capacity_in_kb=disk.capacityInKB, capacity_in_bytes=disk.capacityInBytes, ) - if isinstance(disk.backing, vim.vm.device.VirtualDisk.FlatVer1BackingInfo): - disks_facts[disk_index]['backing_type'] = 'FlatVer1' - disks_facts[disk_index]['backing_writethrough'] = disk.backing.writeThrough - - elif isinstance(disk.backing, vim.vm.device.VirtualDisk.FlatVer2BackingInfo): - disks_facts[disk_index]['backing_type'] = 'FlatVer2' - disks_facts[disk_index]['backing_writethrough'] = bool(disk.backing.writeThrough) - disks_facts[disk_index]['backing_thinprovisioned'] = bool(disk.backing.thinProvisioned) - disks_facts[disk_index]['backing_eagerlyscrub'] = bool(disk.backing.eagerlyScrub) - disks_facts[disk_index]['backing_uuid'] = disk.backing.uuid - - elif isinstance(disk.backing, vim.vm.device.VirtualDisk.LocalPMemBackingInfo): - disks_facts[disk_index]['backing_type'] = 'LocalPMem' - disks_facts[disk_index]['backing_volumeuuid'] = disk.backing.volumeUUID - disks_facts[disk_index]['backing_uuid'] = disk.backing.uuid - - elif isinstance(disk.backing, vim.vm.device.VirtualDisk.PartitionedRawDiskVer2BackingInfo): - disks_facts[disk_index]['backing_type'] = 'PartitionedRawDiskVer2' - disks_facts[disk_index]['backing_descriptorfilename'] = disk.backing.descriptorFileName - disks_facts[disk_index]['backing_uuid'] = disk.backing.uuid - - elif isinstance(disk.backing, vim.vm.device.VirtualDisk.RawDiskMappingVer1BackingInfo): - disks_facts[disk_index]['backing_type'] = 'RawDiskMappingVer1' - disks_facts[disk_index]['backing_devicename'] = disk.backing.deviceName - disks_facts[disk_index]['backing_diskmode'] = disk.backing.diskMode - disks_facts[disk_index]['backing_lunuuid'] = disk.backing.lunUuid - disks_facts[disk_index]['backing_uuid'] = disk.backing.uuid - - elif isinstance(disk.backing, vim.vm.device.VirtualDisk.RawDiskVer2BackingInfo): - disks_facts[disk_index]['backing_type'] = 'RawDiskVer2' - disks_facts[disk_index]['backing_descriptorfilename'] = disk.backing.descriptorFileName - disks_facts[disk_index]['backing_uuid'] = disk.backing.uuid - - elif isinstance(disk.backing, vim.vm.device.VirtualDisk.SeSparseBackingInfo): - disks_facts[disk_index]['backing_type'] = 'SeSparse' - disks_facts[disk_index]['backing_diskmode'] = disk.backing.diskMode - disks_facts[disk_index]['backing_writethrough'] = bool(disk.backing.writeThrough) - disks_facts[disk_index]['backing_uuid'] = disk.backing.uuid - - elif isinstance(disk.backing, vim.vm.device.VirtualDisk.SparseVer1BackingInfo): - disks_facts[disk_index]['backing_type'] = 'SparseVer1' - disks_facts[disk_index]['backing_diskmode'] = disk.backing.diskMode - disks_facts[disk_index]['backing_spaceusedinkb'] = disk.backing.spaceUsedInKB - disks_facts[disk_index]['backing_split'] = bool(disk.backing.split) - disks_facts[disk_index]['backing_writethrough'] = bool(disk.backing.writeThrough) - - elif isinstance(disk.backing, vim.vm.device.VirtualDisk.SparseVer2BackingInfo): - disks_facts[disk_index]['backing_type'] = 'SparseVer2' - disks_facts[disk_index]['backing_diskmode'] = disk.backing.diskMode - disks_facts[disk_index]['backing_spaceusedinkb'] = disk.backing.spaceUsedInKB - disks_facts[disk_index]['backing_split'] = bool(disk.backing.split) - disks_facts[disk_index]['backing_writethrough'] = bool(disk.backing.writeThrough) - disks_facts[disk_index]['backing_uuid'] = disk.backing.uuid + if isinstance( + disk.backing, vim.vm.device.VirtualDisk.FlatVer1BackingInfo + ): + disks_facts[disk_index]["backing_type"] = "FlatVer1" + disks_facts[disk_index][ + "backing_writethrough" + ] = disk.backing.writeThrough + + elif isinstance( + disk.backing, vim.vm.device.VirtualDisk.FlatVer2BackingInfo + ): + disks_facts[disk_index]["backing_type"] = "FlatVer2" + disks_facts[disk_index]["backing_writethrough"] = bool( + disk.backing.writeThrough + ) + disks_facts[disk_index]["backing_thinprovisioned"] = bool( + disk.backing.thinProvisioned + ) + disks_facts[disk_index]["backing_eagerlyscrub"] = bool( + disk.backing.eagerlyScrub + ) + disks_facts[disk_index]["backing_uuid"] = disk.backing.uuid + + elif isinstance( + disk.backing, + vim.vm.device.VirtualDisk.LocalPMemBackingInfo, + ): + disks_facts[disk_index]["backing_type"] = "LocalPMem" + disks_facts[disk_index][ + "backing_volumeuuid" + ] = disk.backing.volumeUUID + disks_facts[disk_index]["backing_uuid"] = disk.backing.uuid + + elif isinstance( + disk.backing, + vim.vm.device.VirtualDisk.PartitionedRawDiskVer2BackingInfo, + ): + disks_facts[disk_index][ + "backing_type" + ] = "PartitionedRawDiskVer2" + disks_facts[disk_index][ + "backing_descriptorfilename" + ] = disk.backing.descriptorFileName + disks_facts[disk_index]["backing_uuid"] = disk.backing.uuid + + elif isinstance( + disk.backing, + vim.vm.device.VirtualDisk.RawDiskMappingVer1BackingInfo, + ): + disks_facts[disk_index][ + "backing_type" + ] = "RawDiskMappingVer1" + disks_facts[disk_index][ + "backing_devicename" + ] = disk.backing.deviceName + disks_facts[disk_index][ + "backing_diskmode" + ] = disk.backing.diskMode + disks_facts[disk_index][ + "backing_lunuuid" + ] = disk.backing.lunUuid + disks_facts[disk_index]["backing_uuid"] = disk.backing.uuid + + elif isinstance( + disk.backing, + vim.vm.device.VirtualDisk.RawDiskVer2BackingInfo, + ): + disks_facts[disk_index]["backing_type"] = "RawDiskVer2" + disks_facts[disk_index][ + "backing_descriptorfilename" + ] = disk.backing.descriptorFileName + disks_facts[disk_index]["backing_uuid"] = disk.backing.uuid + + elif isinstance( + disk.backing, vim.vm.device.VirtualDisk.SeSparseBackingInfo + ): + disks_facts[disk_index]["backing_type"] = "SeSparse" + disks_facts[disk_index][ + "backing_diskmode" + ] = disk.backing.diskMode + disks_facts[disk_index]["backing_writethrough"] = bool( + disk.backing.writeThrough + ) + disks_facts[disk_index]["backing_uuid"] = disk.backing.uuid + + elif isinstance( + disk.backing, + vim.vm.device.VirtualDisk.SparseVer1BackingInfo, + ): + disks_facts[disk_index]["backing_type"] = "SparseVer1" + disks_facts[disk_index][ + "backing_diskmode" + ] = disk.backing.diskMode + disks_facts[disk_index][ + "backing_spaceusedinkb" + ] = disk.backing.spaceUsedInKB + disks_facts[disk_index]["backing_split"] = bool( + disk.backing.split + ) + disks_facts[disk_index]["backing_writethrough"] = bool( + disk.backing.writeThrough + ) + + elif isinstance( + disk.backing, + vim.vm.device.VirtualDisk.SparseVer2BackingInfo, + ): + disks_facts[disk_index]["backing_type"] = "SparseVer2" + disks_facts[disk_index][ + "backing_diskmode" + ] = disk.backing.diskMode + disks_facts[disk_index][ + "backing_spaceusedinkb" + ] = disk.backing.spaceUsedInKB + disks_facts[disk_index]["backing_split"] = bool( + disk.backing.split + ) + disks_facts[disk_index]["backing_writethrough"] = bool( + disk.backing.writeThrough + ) + disks_facts[disk_index]["backing_uuid"] = disk.backing.uuid for controller_index in range(len(controller_facts)): - if controller_facts[controller_index]['key'] == disks_facts[disk_index]['controller_key']: - disks_facts[disk_index]['controller_bus_number'] = controller_facts[controller_index]['bus_number'] - disks_facts[disk_index]['controller_type'] = controller_facts[controller_index]['controller_type'] + if ( + controller_facts[controller_index]["key"] + == disks_facts[disk_index]["controller_key"] + ): + disks_facts[disk_index][ + "controller_bus_number" + ] = controller_facts[controller_index]["bus_number"] + disks_facts[disk_index][ + "controller_type" + ] = controller_facts[controller_index][ + "controller_type" + ] disk_index += 1 return disks_facts @@ -290,25 +371,23 @@ def gather_disk_facts(self, vm_obj): def main(): argument_spec = vmware_argument_spec() argument_spec.update( - name=dict(type='str'), - uuid=dict(type='str'), - moid=dict(type='str'), - use_instance_uuid=dict(type='bool', default=False), - folder=dict(type='str'), - datacenter=dict(type='str', required=True), + name=dict(type="str"), + uuid=dict(type="str"), + moid=dict(type="str"), + use_instance_uuid=dict(type="bool", default=False), + folder=dict(type="str"), + datacenter=dict(type="str", required=True), ) module = AnsibleModule( argument_spec=argument_spec, - required_one_of=[ - ['name', 'uuid', 'moid'] - ], + required_one_of=[["name", "uuid", "moid"]], supports_check_mode=True, ) - if module.params['folder']: + if module.params["folder"]: # FindByInventoryPath() does not require an absolute path # so we should leave the input folder path unmodified - module.params['folder'] = module.params['folder'].rstrip('/') + module.params["folder"] = module.params["folder"].rstrip("/") pyv = PyVmomiHelper(module) # Check if the VM exists before continuing @@ -319,13 +398,21 @@ def main(): try: module.exit_json(guest_disk_facts=pyv.gather_disk_facts(vm)) except Exception as exc: - module.fail_json(msg="Failed to gather facts with exception : %s" % to_text(exc)) + module.fail_json( + msg="Failed to gather facts with exception : %s" % to_text(exc) + ) else: # We unable to find the virtual machine user specified # Bail out - vm_id = (module.params.get('uuid') or module.params.get('moid') or module.params.get('name')) - module.fail_json(msg="Unable to gather disk facts for non-existing VM %s" % vm_id) + vm_id = ( + module.params.get("uuid") + or module.params.get("moid") + or module.params.get("name") + ) + module.fail_json( + msg="Unable to gather disk facts for non-existing VM %s" % vm_id + ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/_vmware_host_capability_facts.py b/plugins/modules/_vmware_host_capability_facts.py index 8939ac2..34be98e 100644 --- a/plugins/modules/_vmware_host_capability_facts.py +++ b/plugins/modules/_vmware_host_capability_facts.py @@ -5,15 +5,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['deprecated'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["deprecated"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_host_capability_facts deprecated: @@ -44,9 +45,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather capability facts about all ESXi Host in given Cluster vmware_host_capability_facts: hostname: '{{ vcenter_hostname }}' @@ -64,9 +65,9 @@ esxi_hostname: '{{ esxi_hostname }}' delegate_to: localhost register: hosts_facts -''' +""" -RETURN = r''' +RETURN = r""" hosts_capability_facts: description: metadata about host's capability information returned: always @@ -81,18 +82,23 @@ "cpuHwMmuSupported": true, } } -''' +""" from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + vmware_argument_spec, + PyVmomi, +) class CapabilityFactsManager(PyVmomi): def __init__(self, module): super(CapabilityFactsManager, self).__init__(module) - cluster_name = self.params.get('cluster_name', None) - esxi_host_name = self.params.get('esxi_hostname', None) - self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name) + cluster_name = self.params.get("cluster_name", None) + esxi_host_name = self.params.get("esxi_hostname", None) + self.hosts = self.get_all_host_objs( + cluster_name=cluster_name, esxi_host_name=esxi_host_name + ) def gather_host_capability_facts(self): hosts_capability_facts = dict() @@ -193,12 +199,24 @@ def gather_host_capability_facts(self): encryptionVFlashSupported=hc.encryptionVFlashSupported, encryptionCBRCSupported=hc.encryptionCBRCSupported, encryptionHBRSupported=hc.encryptionHBRSupported, - supportedVmfsMajorVersion=[version for version in hc.supportedVmfsMajorVersion], - vmDirectPathGen2UnsupportedReason=[reason for reason in hc.vmDirectPathGen2UnsupportedReason], - ftCompatibilityIssues=[issue for issue in hc.ftCompatibilityIssues], - checkpointFtCompatibilityIssues=[issue for issue in hc.checkpointFtCompatibilityIssues], - smpFtCompatibilityIssues=[issue for issue in hc.smpFtCompatibilityIssues], - replayCompatibilityIssues=[issue for issue in hc.replayCompatibilityIssues], + supportedVmfsMajorVersion=[ + version for version in hc.supportedVmfsMajorVersion + ], + vmDirectPathGen2UnsupportedReason=[ + reason for reason in hc.vmDirectPathGen2UnsupportedReason + ], + ftCompatibilityIssues=[ + issue for issue in hc.ftCompatibilityIssues + ], + checkpointFtCompatibilityIssues=[ + issue for issue in hc.checkpointFtCompatibilityIssues + ], + smpFtCompatibilityIssues=[ + issue for issue in hc.smpFtCompatibilityIssues + ], + replayCompatibilityIssues=[ + issue for issue in hc.replayCompatibilityIssues + ], ) return hosts_capability_facts @@ -206,21 +224,21 @@ def gather_host_capability_facts(self): def main(): argument_spec = vmware_argument_spec() argument_spec.update( - cluster_name=dict(type='str', required=False), - esxi_hostname=dict(type='str', required=False), + cluster_name=dict(type="str", required=False), + esxi_hostname=dict(type="str", required=False), ) module = AnsibleModule( argument_spec=argument_spec, - required_one_of=[ - ['cluster_name', 'esxi_hostname'], - ], + required_one_of=[["cluster_name", "esxi_hostname"]], supports_check_mode=True, ) host_capability_manager = CapabilityFactsManager(module) - module.exit_json(changed=False, - hosts_capability_facts=host_capability_manager.gather_host_capability_facts()) + module.exit_json( + changed=False, + hosts_capability_facts=host_capability_manager.gather_host_capability_facts(), + ) if __name__ == "__main__": diff --git a/plugins/modules/_vmware_host_config_facts.py b/plugins/modules/_vmware_host_config_facts.py index 73f79d7..2963f4b 100644 --- a/plugins/modules/_vmware_host_config_facts.py +++ b/plugins/modules/_vmware_host_config_facts.py @@ -5,15 +5,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['deprecated'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["deprecated"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_host_config_facts deprecated: @@ -44,9 +45,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather facts about all ESXi Host in given Cluster vmware_host_config_facts: hostname: '{{ vcenter_hostname }}' @@ -62,9 +63,9 @@ password: '{{ vcenter_password }}' esxi_hostname: '{{ esxi_hostname }}' delegate_to: localhost -''' +""" -RETURN = r''' +RETURN = r""" hosts_facts: description: - dict with hostname as key and dict with host config facts @@ -79,18 +80,23 @@ "BufferCache.SoftMaxDirty": 15, } } -''' +""" from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + vmware_argument_spec, + PyVmomi, +) class VmwareConfigFactsManager(PyVmomi): def __init__(self, module): super(VmwareConfigFactsManager, self).__init__(module) - cluster_name = self.params.get('cluster_name', None) - esxi_host_name = self.params.get('esxi_hostname', None) - self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name) + cluster_name = self.params.get("cluster_name", None) + esxi_host_name = self.params.get("esxi_hostname", None) + self.hosts = self.get_all_host_objs( + cluster_name=cluster_name, esxi_host_name=esxi_host_name + ) def gather_host_facts(self): hosts_facts = {} @@ -105,20 +111,20 @@ def gather_host_facts(self): def main(): argument_spec = vmware_argument_spec() argument_spec.update( - cluster_name=dict(type='str', required=False), - esxi_hostname=dict(type='str', required=False), + cluster_name=dict(type="str", required=False), + esxi_hostname=dict(type="str", required=False), ) module = AnsibleModule( argument_spec=argument_spec, - required_one_of=[ - ['cluster_name', 'esxi_hostname'], - ], - supports_check_mode=True + required_one_of=[["cluster_name", "esxi_hostname"]], + supports_check_mode=True, ) vmware_host_config = VmwareConfigFactsManager(module) - module.exit_json(changed=False, hosts_facts=vmware_host_config.gather_host_facts()) + module.exit_json( + changed=False, hosts_facts=vmware_host_config.gather_host_facts() + ) if __name__ == "__main__": diff --git a/plugins/modules/_vmware_host_dns_facts.py b/plugins/modules/_vmware_host_dns_facts.py index 4feb799..e4441ec 100644 --- a/plugins/modules/_vmware_host_dns_facts.py +++ b/plugins/modules/_vmware_host_dns_facts.py @@ -5,16 +5,17 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['deprecated'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["deprecated"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_host_dns_facts deprecated: @@ -46,9 +47,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather DNS facts about all ESXi Hosts in given Cluster vmware_host_dns_facts: hostname: '{{ vcenter_hostname }}' @@ -64,9 +65,9 @@ password: '{{ vcenter_password }}' esxi_hostname: '{{ esxi_hostname }}' delegate_to: localhost -''' +""" -RETURN = r''' +RETURN = r""" hosts_dns_facts: description: metadata about DNS config from given cluster / host system returned: always @@ -85,30 +86,37 @@ "virtual_nic_device": "vmk0" } } -''' +""" from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + vmware_argument_spec, + PyVmomi, +) class VmwareDnsFactsManager(PyVmomi): def __init__(self, module): super(VmwareDnsFactsManager, self).__init__(module) - cluster_name = self.params.get('cluster_name', None) - esxi_host_name = self.params.get('esxi_hostname', None) - self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name) + cluster_name = self.params.get("cluster_name", None) + esxi_host_name = self.params.get("esxi_hostname", None) + self.hosts = self.get_all_host_objs( + cluster_name=cluster_name, esxi_host_name=esxi_host_name + ) def gather_dns_facts(self): hosts_facts = {} for host in self.hosts: host_facts = {} dns_config = host.config.network.dnsConfig - host_facts['dhcp'] = dns_config.dhcp - host_facts['virtual_nic_device'] = dns_config.virtualNicDevice - host_facts['host_name'] = dns_config.hostName - host_facts['domain_name'] = dns_config.domainName - host_facts['ip_address'] = [ip for ip in dns_config.address] - host_facts['search_domain'] = [domain for domain in dns_config.searchDomain] + host_facts["dhcp"] = dns_config.dhcp + host_facts["virtual_nic_device"] = dns_config.virtualNicDevice + host_facts["host_name"] = dns_config.hostName + host_facts["domain_name"] = dns_config.domainName + host_facts["ip_address"] = [ip for ip in dns_config.address] + host_facts["search_domain"] = [ + domain for domain in dns_config.searchDomain + ] hosts_facts[host.name] = host_facts return hosts_facts @@ -116,20 +124,20 @@ def gather_dns_facts(self): def main(): argument_spec = vmware_argument_spec() argument_spec.update( - cluster_name=dict(type='str', required=False), - esxi_hostname=dict(type='str', required=False), + cluster_name=dict(type="str", required=False), + esxi_hostname=dict(type="str", required=False), ) module = AnsibleModule( argument_spec=argument_spec, - required_one_of=[ - ['cluster_name', 'esxi_hostname'], - ], - supports_check_mode=True + required_one_of=[["cluster_name", "esxi_hostname"]], + supports_check_mode=True, ) vmware_dns_config = VmwareDnsFactsManager(module) - module.exit_json(changed=False, hosts_dns_facts=vmware_dns_config.gather_dns_facts()) + module.exit_json( + changed=False, hosts_dns_facts=vmware_dns_config.gather_dns_facts() + ) if __name__ == "__main__": diff --git a/plugins/modules/_vmware_host_feature_facts.py b/plugins/modules/_vmware_host_feature_facts.py index c75fcac..4d44c79 100644 --- a/plugins/modules/_vmware_host_feature_facts.py +++ b/plugins/modules/_vmware_host_feature_facts.py @@ -4,15 +4,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['deprecated'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["deprecated"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_host_feature_facts deprecated: @@ -43,9 +44,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather feature capability facts about all ESXi Hosts in given Cluster vmware_host_feature_facts: hostname: '{{ vcenter_hostname }}' @@ -72,9 +73,9 @@ that: - ssbd|int == 1 when: ssbd is defined -''' +""" -RETURN = r''' +RETURN = r""" hosts_feature_facts: description: metadata about host's feature capability information returned: always @@ -93,18 +94,23 @@ }, ] } -''' +""" from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + vmware_argument_spec, + PyVmomi, +) class FeatureCapabilityFactsManager(PyVmomi): def __init__(self, module): super(FeatureCapabilityFactsManager, self).__init__(module) - cluster_name = self.params.get('cluster_name', None) - esxi_host_name = self.params.get('esxi_hostname', None) - self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name) + cluster_name = self.params.get("cluster_name", None) + esxi_host_name = self.params.get("esxi_hostname", None) + self.hosts = self.get_all_host_objs( + cluster_name=cluster_name, esxi_host_name=esxi_host_name + ) def gather_host_feature_facts(self): host_feature_facts = dict() @@ -113,9 +119,9 @@ def gather_host_feature_facts(self): capability = [] for fc in host_feature_capabilities: temp_dict = { - 'key': fc.key, - 'feature_name': fc.featureName, - 'value': fc.value, + "key": fc.key, + "feature_name": fc.featureName, + "value": fc.value, } capability.append(temp_dict) @@ -127,21 +133,21 @@ def gather_host_feature_facts(self): def main(): argument_spec = vmware_argument_spec() argument_spec.update( - cluster_name=dict(type='str', required=False), - esxi_hostname=dict(type='str', required=False), + cluster_name=dict(type="str", required=False), + esxi_hostname=dict(type="str", required=False), ) module = AnsibleModule( argument_spec=argument_spec, - required_one_of=[ - ['cluster_name', 'esxi_hostname'], - ], + required_one_of=[["cluster_name", "esxi_hostname"]], supports_check_mode=True, ) host_capability_manager = FeatureCapabilityFactsManager(module) - module.exit_json(changed=False, - hosts_feature_facts=host_capability_manager.gather_host_feature_facts()) + module.exit_json( + changed=False, + hosts_feature_facts=host_capability_manager.gather_host_feature_facts(), + ) if __name__ == "__main__": diff --git a/plugins/modules/_vmware_host_firewall_facts.py b/plugins/modules/_vmware_host_firewall_facts.py index 276f9ed..a29db14 100644 --- a/plugins/modules/_vmware_host_firewall_facts.py +++ b/plugins/modules/_vmware_host_firewall_facts.py @@ -5,15 +5,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['deprecated'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["deprecated"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_host_firewall_facts deprecated: @@ -44,9 +45,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather firewall facts about all ESXi Host in given Cluster vmware_host_firewall_facts: hostname: '{{ vcenter_hostname }}' @@ -62,9 +63,9 @@ password: '{{ vcenter_password }}' esxi_hostname: '{{ esxi_hostname }}' delegate_to: localhost -''' +""" -RETURN = r''' +RETURN = r""" hosts_firewall_facts: description: metadata about host's firewall configuration returned: on success @@ -97,42 +98,50 @@ }, ] } -''' +""" from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + vmware_argument_spec, + PyVmomi, +) class FirewallFactsManager(PyVmomi): def __init__(self, module): super(FirewallFactsManager, self).__init__(module) - cluster_name = self.params.get('cluster_name', None) - esxi_host_name = self.params.get('esxi_hostname', None) - self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name) + cluster_name = self.params.get("cluster_name", None) + esxi_host_name = self.params.get("esxi_hostname", None) + self.hosts = self.get_all_host_objs( + cluster_name=cluster_name, esxi_host_name=esxi_host_name + ) @staticmethod def normalize_rule_set(rule_obj): rule_dict = dict() - rule_dict['key'] = rule_obj.key - rule_dict['service'] = rule_obj.service - rule_dict['enabled'] = rule_obj.enabled - rule_dict['rule'] = [] + rule_dict["key"] = rule_obj.key + rule_dict["service"] = rule_obj.service + rule_dict["enabled"] = rule_obj.enabled + rule_dict["rule"] = [] for rule in rule_obj.rule: rule_set_dict = dict() - rule_set_dict['port'] = rule.port - rule_set_dict['end_port'] = rule.endPort - rule_set_dict['direction'] = rule.direction - rule_set_dict['port_type'] = rule.portType - rule_set_dict['protocol'] = rule.protocol - rule_dict['rule'].append(rule_set_dict) + rule_set_dict["port"] = rule.port + rule_set_dict["end_port"] = rule.endPort + rule_set_dict["direction"] = rule.direction + rule_set_dict["port_type"] = rule.portType + rule_set_dict["protocol"] = rule.protocol + rule_dict["rule"].append(rule_set_dict) allowed_host = rule_obj.allowedHosts rule_allow_host = dict() - rule_allow_host['ip_address'] = [ip for ip in allowed_host.ipAddress] - rule_allow_host['ip_network'] = [ip.network + "/" + str(ip.prefixLength) for ip in allowed_host.ipNetwork] - rule_allow_host['all_ip'] = allowed_host.allIp - rule_dict['allowed_hosts'] = rule_allow_host + rule_allow_host["ip_address"] = [ip for ip in allowed_host.ipAddress] + rule_allow_host["ip_network"] = [ + ip.network + "/" + str(ip.prefixLength) + for ip in allowed_host.ipNetwork + ] + rule_allow_host["all_ip"] = allowed_host.allIp + rule_dict["allowed_hosts"] = rule_allow_host return rule_dict def gather_host_firewall_facts(self): @@ -142,27 +151,30 @@ def gather_host_firewall_facts(self): if firewall_system: hosts_firewall_facts[host.name] = [] for rule_set_obj in firewall_system.firewallInfo.ruleset: - hosts_firewall_facts[host.name].append(self.normalize_rule_set(rule_obj=rule_set_obj)) + hosts_firewall_facts[host.name].append( + self.normalize_rule_set(rule_obj=rule_set_obj) + ) return hosts_firewall_facts def main(): argument_spec = vmware_argument_spec() argument_spec.update( - cluster_name=dict(type='str', required=False), - esxi_hostname=dict(type='str', required=False), + cluster_name=dict(type="str", required=False), + esxi_hostname=dict(type="str", required=False), ) module = AnsibleModule( argument_spec=argument_spec, - required_one_of=[ - ['cluster_name', 'esxi_hostname'], - ], - supports_check_mode=True + required_one_of=[["cluster_name", "esxi_hostname"]], + supports_check_mode=True, ) vmware_host_firewall = FirewallFactsManager(module) - module.exit_json(changed=False, hosts_firewall_facts=vmware_host_firewall.gather_host_firewall_facts()) + module.exit_json( + changed=False, + hosts_firewall_facts=vmware_host_firewall.gather_host_firewall_facts(), + ) if __name__ == "__main__": diff --git a/plugins/modules/_vmware_host_ntp_facts.py b/plugins/modules/_vmware_host_ntp_facts.py index 3e4b573..6944ceb 100644 --- a/plugins/modules/_vmware_host_ntp_facts.py +++ b/plugins/modules/_vmware_host_ntp_facts.py @@ -5,15 +5,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['deprecated'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["deprecated"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_host_ntp_facts deprecated: @@ -46,9 +47,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather NTP facts about all ESXi Host in the given Cluster vmware_host_ntp_facts: hostname: '{{ vcenter_hostname }}' @@ -66,9 +67,9 @@ esxi_hostname: '{{ esxi_hostname }}' delegate_to: localhost register: host_ntp -''' +""" -RETURN = r''' +RETURN = r""" hosts_ntp_facts: description: - dict with hostname as key and dict with NTP facts as value @@ -85,18 +86,23 @@ } ] } -''' +""" from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + vmware_argument_spec, + PyVmomi, +) class VmwareNtpFactManager(PyVmomi): def __init__(self, module): super(VmwareNtpFactManager, self).__init__(module) - cluster_name = self.params.get('cluster_name', None) - esxi_host_name = self.params.get('esxi_hostname', None) - self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name) + cluster_name = self.params.get("cluster_name", None) + esxi_host_name = self.params.get("esxi_hostname", None) + self.hosts = self.get_all_host_objs( + cluster_name=cluster_name, esxi_host_name=esxi_host_name + ) def gather_ntp_facts(self): hosts_facts = {} @@ -110,7 +116,10 @@ def gather_ntp_facts(self): time_zone_name=host_date_time_manager.dateTimeInfo.timeZone.name, time_zone_description=host_date_time_manager.dateTimeInfo.timeZone.description, time_zone_gmt_offset=host_date_time_manager.dateTimeInfo.timeZone.gmtOffset, - ntp_servers=[ntp_server for ntp_server in host_date_time_manager.dateTimeInfo.ntpConfig.server] + ntp_servers=[ + ntp_server + for ntp_server in host_date_time_manager.dateTimeInfo.ntpConfig.server + ], ) ) hosts_facts[host.name] = host_ntp_facts @@ -120,20 +129,21 @@ def gather_ntp_facts(self): def main(): argument_spec = vmware_argument_spec() argument_spec.update( - cluster_name=dict(type='str', required=False), - esxi_hostname=dict(type='str', required=False), + cluster_name=dict(type="str", required=False), + esxi_hostname=dict(type="str", required=False), ) module = AnsibleModule( argument_spec=argument_spec, - required_one_of=[ - ['cluster_name', 'esxi_hostname'], - ], + required_one_of=[["cluster_name", "esxi_hostname"]], supports_check_mode=True, ) vmware_host_ntp_config = VmwareNtpFactManager(module) - module.exit_json(changed=False, hosts_ntp_facts=vmware_host_ntp_config.gather_ntp_facts()) + module.exit_json( + changed=False, + hosts_ntp_facts=vmware_host_ntp_config.gather_ntp_facts(), + ) if __name__ == "__main__": diff --git a/plugins/modules/_vmware_host_package_facts.py b/plugins/modules/_vmware_host_package_facts.py index 7605471..6940066 100644 --- a/plugins/modules/_vmware_host_package_facts.py +++ b/plugins/modules/_vmware_host_package_facts.py @@ -5,15 +5,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['deprecated'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["deprecated"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_host_package_facts deprecated: @@ -46,9 +47,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather facts about all ESXi Host in given Cluster vmware_host_package_facts: hostname: '{{ vcenter_hostname }}' @@ -66,27 +67,32 @@ esxi_hostname: '{{ esxi_hostname }}' delegate_to: localhost register: host_packages -''' +""" -RETURN = r''' +RETURN = r""" hosts_package_facts: description: - dict with hostname as key and dict with package facts as value returned: hosts_package_facts type: dict sample: { "hosts_package_facts": { "localhost.localdomain": []}} -''' +""" from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + vmware_argument_spec, + PyVmomi, +) class VmwarePackageManager(PyVmomi): def __init__(self, module): super(VmwarePackageManager, self).__init__(module) - cluster_name = self.params.get('cluster_name', None) - esxi_host_name = self.params.get('esxi_hostname', None) - self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name) + cluster_name = self.params.get("cluster_name", None) + esxi_host_name = self.params.get("esxi_hostname", None) + self.hosts = self.get_all_host_objs( + cluster_name=cluster_name, esxi_host_name=esxi_host_name + ) def gather_package_facts(self): hosts_facts = {} @@ -96,16 +102,18 @@ def gather_package_facts(self): if host_pkg_mgr: pkgs = host_pkg_mgr.FetchSoftwarePackages() for pkg in pkgs: - host_package_facts.append(dict(name=pkg.name, - version=pkg.version, - vendor=pkg.vendor, - summary=pkg.summary, - description=pkg.description, - acceptance_level=pkg.acceptanceLevel, - maintenance_mode_required=pkg.maintenanceModeRequired, - creation_date=pkg.creationDate, - ) - ) + host_package_facts.append( + dict( + name=pkg.name, + version=pkg.version, + vendor=pkg.vendor, + summary=pkg.summary, + description=pkg.description, + acceptance_level=pkg.acceptanceLevel, + maintenance_mode_required=pkg.maintenanceModeRequired, + creation_date=pkg.creationDate, + ) + ) hosts_facts[host.name] = host_package_facts return hosts_facts @@ -113,20 +121,21 @@ def gather_package_facts(self): def main(): argument_spec = vmware_argument_spec() argument_spec.update( - cluster_name=dict(type='str', required=False), - esxi_hostname=dict(type='str', required=False), + cluster_name=dict(type="str", required=False), + esxi_hostname=dict(type="str", required=False), ) module = AnsibleModule( argument_spec=argument_spec, - required_one_of=[ - ['cluster_name', 'esxi_hostname'], - ], + required_one_of=[["cluster_name", "esxi_hostname"]], supports_check_mode=True, ) vmware_host_package_config = VmwarePackageManager(module) - module.exit_json(changed=False, hosts_package_facts=vmware_host_package_config.gather_package_facts()) + module.exit_json( + changed=False, + hosts_package_facts=vmware_host_package_config.gather_package_facts(), + ) if __name__ == "__main__": diff --git a/plugins/modules/_vmware_host_service_facts.py b/plugins/modules/_vmware_host_service_facts.py index 1d9ce05..30906ff 100644 --- a/plugins/modules/_vmware_host_service_facts.py +++ b/plugins/modules/_vmware_host_service_facts.py @@ -5,15 +5,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['deprecated'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["deprecated"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_host_service_facts deprecated: @@ -47,9 +48,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather facts about all ESXi Host in given Cluster vmware_host_service_facts: hostname: '{{ vcenter_hostname }}' @@ -67,9 +68,9 @@ esxi_hostname: '{{ esxi_hostname }}' delegate_to: localhost register: host_services -''' +""" -RETURN = r''' +RETURN = r""" host_service_facts: description: - dict with hostname as key and dict with host service config facts @@ -99,18 +100,23 @@ }, ] } -''' +""" from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + vmware_argument_spec, + PyVmomi, +) class VmwareServiceManager(PyVmomi): def __init__(self, module): super(VmwareServiceManager, self).__init__(module) - cluster_name = self.params.get('cluster_name', None) - esxi_host_name = self.params.get('esxi_hostname', None) - self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name) + cluster_name = self.params.get("cluster_name", None) + esxi_host_name = self.params.get("esxi_hostname", None) + self.hosts = self.get_all_host_objs( + cluster_name=cluster_name, esxi_host_name=esxi_host_name + ) def gather_host_facts(self): hosts_facts = {} @@ -128,8 +134,12 @@ def gather_host_facts(self): uninstallable=service.uninstallable, running=service.running, policy=service.policy, - source_package_name=service.sourcePackage.sourcePackageName if service.sourcePackage else None, - source_package_desc=service.sourcePackage.description if service.sourcePackage else None, + source_package_name=service.sourcePackage.sourcePackageName + if service.sourcePackage + else None, + source_package_desc=service.sourcePackage.description + if service.sourcePackage + else None, ) ) hosts_facts[host.name] = host_service_facts @@ -139,20 +149,21 @@ def gather_host_facts(self): def main(): argument_spec = vmware_argument_spec() argument_spec.update( - cluster_name=dict(type='str', required=False), - esxi_hostname=dict(type='str', required=False), + cluster_name=dict(type="str", required=False), + esxi_hostname=dict(type="str", required=False), ) module = AnsibleModule( argument_spec=argument_spec, - required_one_of=[ - ['cluster_name', 'esxi_hostname'], - ], + required_one_of=[["cluster_name", "esxi_hostname"]], supports_check_mode=True, ) vmware_host_service_config = VmwareServiceManager(module) - module.exit_json(changed=False, host_service_facts=vmware_host_service_config.gather_host_facts()) + module.exit_json( + changed=False, + host_service_facts=vmware_host_service_config.gather_host_facts(), + ) if __name__ == "__main__": diff --git a/plugins/modules/_vmware_host_ssl_facts.py b/plugins/modules/_vmware_host_ssl_facts.py index eb0180c..00ac323 100644 --- a/plugins/modules/_vmware_host_ssl_facts.py +++ b/plugins/modules/_vmware_host_ssl_facts.py @@ -5,16 +5,17 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['deprecated'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["deprecated"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_host_ssl_facts deprecated: @@ -47,9 +48,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather SSL thumbprint information about all ESXi Hosts in given Cluster vmware_host_ssl_facts: hostname: '{{ vcenter_hostname }}' @@ -82,9 +83,9 @@ esxi_password: '{{ esxi_password }}' esxi_ssl_thumbprint: '{{ ssl_thumbprint }}' state: present -''' +""" -RETURN = r''' +RETURN = r""" host_ssl_facts: description: - dict with hostname as key and dict with SSL thumbprint related facts @@ -101,31 +102,42 @@ ] } } -''' +""" from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + vmware_argument_spec, + PyVmomi, +) class VMwareHostSslManager(PyVmomi): def __init__(self, module): super(VMwareHostSslManager, self).__init__(module) - cluster_name = self.params.get('cluster_name', None) - esxi_host_name = self.params.get('esxi_hostname', None) - self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name) + cluster_name = self.params.get("cluster_name", None) + esxi_host_name = self.params.get("esxi_hostname", None) + self.hosts = self.get_all_host_objs( + cluster_name=cluster_name, esxi_host_name=esxi_host_name + ) self.hosts_facts = {} def gather_ssl_facts(self): for host in self.hosts: - self.hosts_facts[host.name] = dict(principal='', - owner_tag='', - ssl_thumbprints=[]) + self.hosts_facts[host.name] = dict( + principal="", owner_tag="", ssl_thumbprints=[] + ) host_ssl_info_mgr = host.config.sslThumbprintInfo if host_ssl_info_mgr: - self.hosts_facts[host.name]['principal'] = host_ssl_info_mgr.principal - self.hosts_facts[host.name]['owner_tag'] = host_ssl_info_mgr.ownerTag - self.hosts_facts[host.name]['ssl_thumbprints'] = [i for i in host_ssl_info_mgr.sslThumbprints] + self.hosts_facts[host.name][ + "principal" + ] = host_ssl_info_mgr.principal + self.hosts_facts[host.name][ + "owner_tag" + ] = host_ssl_info_mgr.ownerTag + self.hosts_facts[host.name]["ssl_thumbprints"] = [ + i for i in host_ssl_info_mgr.sslThumbprints + ] self.module.exit_json(changed=False, host_ssl_facts=self.hosts_facts) @@ -133,15 +145,12 @@ def gather_ssl_facts(self): def main(): argument_spec = vmware_argument_spec() argument_spec.update( - cluster_name=dict(type='str'), - esxi_hostname=dict(type='str'), + cluster_name=dict(type="str"), esxi_hostname=dict(type="str") ) module = AnsibleModule( argument_spec=argument_spec, - required_one_of=[ - ['cluster_name', 'esxi_hostname'], - ], + required_one_of=[["cluster_name", "esxi_hostname"]], supports_check_mode=True, ) diff --git a/plugins/modules/_vmware_host_vmhba_facts.py b/plugins/modules/_vmware_host_vmhba_facts.py index e2ef093..b1e1951 100644 --- a/plugins/modules/_vmware_host_vmhba_facts.py +++ b/plugins/modules/_vmware_host_vmhba_facts.py @@ -5,16 +5,17 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['deprecated'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["deprecated"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_host_vmhba_facts deprecated: @@ -49,9 +50,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather facts about vmhbas of all ESXi Host in the given Cluster vmware_host_vmhba_facts: hostname: '{{ vcenter_hostname }}' @@ -69,9 +70,9 @@ esxi_hostname: '{{ esxi_hostname }}' delegate_to: localhost register: host_vmhbas -''' +""" -RETURN = r''' +RETURN = r""" hosts_vmhbas_facts: description: - dict with hostname as key and dict with vmhbas facts as value. @@ -123,19 +124,25 @@ ], } } -''' +""" from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + vmware_argument_spec, + PyVmomi, +) class HostVmhbaMgr(PyVmomi): """Class to manage vmhba facts""" + def __init__(self, module): super(HostVmhbaMgr, self).__init__(module) - cluster_name = self.params.get('cluster_name', None) - esxi_host_name = self.params.get('esxi_hostname', None) - self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name) + cluster_name = self.params.get("cluster_name", None) + esxi_host_name = self.params.get("esxi_hostname", None) + self.hosts = self.get_all_host_objs( + cluster_name=cluster_name, esxi_host_name=esxi_host_name + ) if not self.hosts: self.module.fail_json(msg="Failed to find host system.") @@ -147,47 +154,55 @@ def gather_host_vmhba_facts(self): host_st_system = host.configManager.storageSystem if host_st_system: device_info = host_st_system.storageDeviceInfo - host_vmhba_facts['vmhba_details'] = [] + host_vmhba_facts["vmhba_details"] = [] for hba in device_info.hostBusAdapter: hba_facts = dict() if hba.pci: - hba_facts['location'] = hba.pci + hba_facts["location"] = hba.pci for pci_device in host.hardware.pciDevice: if pci_device.id == hba.pci: - hba_facts['adapter'] = pci_device.vendorName + ' ' + pci_device.deviceName + hba_facts["adapter"] = ( + pci_device.vendorName + + " " + + pci_device.deviceName + ) break else: - hba_facts['location'] = 'PCI' - hba_facts['device'] = hba.device + hba_facts["location"] = "PCI" + hba_facts["device"] = hba.device # contains type as string in format of 'key-vim.host.FibreChannelHba-vmhba1' hba_type = hba.key.split(".")[-1].split("-")[0] - if hba_type == 'SerialAttachedHba': - hba_facts['type'] = 'SAS' - elif hba_type == 'FibreChannelHba': - hba_facts['type'] = 'Fibre Channel' + if hba_type == "SerialAttachedHba": + hba_facts["type"] = "SAS" + elif hba_type == "FibreChannelHba": + hba_facts["type"] = "Fibre Channel" else: - hba_facts['type'] = hba_type - hba_facts['bus'] = hba.bus - hba_facts['status'] = hba.status - hba_facts['model'] = hba.model - hba_facts['driver'] = hba.driver + hba_facts["type"] = hba_type + hba_facts["bus"] = hba.bus + hba_facts["status"] = hba.status + hba_facts["model"] = hba.model + hba_facts["driver"] = hba.driver try: - hba_facts['node_wwn'] = self.format_number(hba.nodeWorldWideName) + hba_facts["node_wwn"] = self.format_number( + hba.nodeWorldWideName + ) except AttributeError: pass try: - hba_facts['port_wwn'] = self.format_number(hba.portWorldWideName) + hba_facts["port_wwn"] = self.format_number( + hba.portWorldWideName + ) except AttributeError: pass try: - hba_facts['port_type'] = hba.portType + hba_facts["port_type"] = hba.portType except AttributeError: pass try: - hba_facts['speed'] = hba.speed + hba_facts["speed"] = hba.speed except AttributeError: pass - host_vmhba_facts['vmhba_details'].append(hba_facts) + host_vmhba_facts["vmhba_details"].append(hba_facts) hosts_vmhba_facts[host.name] = host_vmhba_facts return hosts_vmhba_facts @@ -196,27 +211,28 @@ def gather_host_vmhba_facts(self): def format_number(number): """Format number""" string = str(number) - return ':'.join(a + b for a, b in zip(string[::2], string[1::2])) + return ":".join(a + b for a, b in zip(string[::2], string[1::2])) def main(): """Main""" argument_spec = vmware_argument_spec() argument_spec.update( - cluster_name=dict(type='str', required=False), - esxi_hostname=dict(type='str', required=False), + cluster_name=dict(type="str", required=False), + esxi_hostname=dict(type="str", required=False), ) module = AnsibleModule( argument_spec=argument_spec, - required_one_of=[ - ['cluster_name', 'esxi_hostname'], - ], + required_one_of=[["cluster_name", "esxi_hostname"]], supports_check_mode=True, ) host_vmhba_mgr = HostVmhbaMgr(module) - module.exit_json(changed=False, hosts_vmhbas_facts=host_vmhba_mgr.gather_host_vmhba_facts()) + module.exit_json( + changed=False, + hosts_vmhbas_facts=host_vmhba_mgr.gather_host_vmhba_facts(), + ) if __name__ == "__main__": diff --git a/plugins/modules/_vmware_host_vmnic_facts.py b/plugins/modules/_vmware_host_vmnic_facts.py index 01db493..6a40341 100644 --- a/plugins/modules/_vmware_host_vmnic_facts.py +++ b/plugins/modules/_vmware_host_vmnic_facts.py @@ -6,16 +6,17 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['deprecated'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["deprecated"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_host_vmnic_facts deprecated: @@ -67,9 +68,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather facts about vmnics of all ESXi Host in the given Cluster vmware_host_vmnic_facts: hostname: '{{ vcenter_hostname }}' @@ -87,9 +88,9 @@ esxi_hostname: '{{ esxi_hostname }}' delegate_to: localhost register: host_vmnics -''' +""" -RETURN = r''' +RETURN = r""" hosts_vmnics_facts: description: - dict with hostname as key and dict with vmnics facts as value. @@ -149,7 +150,7 @@ } } } -''' +""" try: from pyVmomi import vim @@ -157,19 +158,26 @@ pass from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi, get_all_objs +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + vmware_argument_spec, + PyVmomi, + get_all_objs, +) class HostVmnicMgr(PyVmomi): """Class to manage vmnic facts""" + def __init__(self, module): super(HostVmnicMgr, self).__init__(module) - self.capabilities = self.params.get('capabilities') - self.directpath_io = self.params.get('directpath_io') - self.sriov = self.params.get('sriov') - cluster_name = self.params.get('cluster_name', None) - esxi_host_name = self.params.get('esxi_hostname', None) - self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name) + self.capabilities = self.params.get("capabilities") + self.directpath_io = self.params.get("directpath_io") + self.sriov = self.params.get("sriov") + cluster_name = self.params.get("cluster_name", None) + esxi_host_name = self.params.get("esxi_hostname", None) + self.hosts = self.get_all_host_objs( + cluster_name=cluster_name, esxi_host_name=esxi_host_name + ) if not self.hosts: self.module.fail_json(msg="Failed to find host system.") @@ -191,89 +199,151 @@ def gather_host_vmnic_facts(self): """Gather vmnic facts""" hosts_vmnic_facts = {} for host in self.hosts: - host_vmnic_facts = dict(all=[], available=[], used=[], vswitch=dict(), dvswitch=dict()) + host_vmnic_facts = dict( + all=[], available=[], used=[], vswitch=dict(), dvswitch=dict() + ) host_nw_system = host.configManager.networkSystem if host_nw_system: nw_config = host_nw_system.networkConfig - vmnics = [pnic.device for pnic in nw_config.pnic if pnic.device.startswith('vmnic')] - host_vmnic_facts['all'] = [pnic.device for pnic in nw_config.pnic] - host_vmnic_facts['num_vmnics'] = len(vmnics) - host_vmnic_facts['vmnic_details'] = [] + vmnics = [ + pnic.device + for pnic in nw_config.pnic + if pnic.device.startswith("vmnic") + ] + host_vmnic_facts["all"] = [ + pnic.device for pnic in nw_config.pnic + ] + host_vmnic_facts["num_vmnics"] = len(vmnics) + host_vmnic_facts["vmnic_details"] = [] for pnic in host.config.network.pnic: pnic_facts = dict() - if pnic.device.startswith('vmnic'): + if pnic.device.startswith("vmnic"): if pnic.pci: - pnic_facts['location'] = pnic.pci + pnic_facts["location"] = pnic.pci for pci_device in host.hardware.pciDevice: if pci_device.id == pnic.pci: - pnic_facts['adapter'] = pci_device.vendorName + ' ' + pci_device.deviceName + pnic_facts["adapter"] = ( + pci_device.vendorName + + " " + + pci_device.deviceName + ) break else: - pnic_facts['location'] = 'PCI' - pnic_facts['device'] = pnic.device - pnic_facts['driver'] = pnic.driver + pnic_facts["location"] = "PCI" + pnic_facts["device"] = pnic.device + pnic_facts["driver"] = pnic.driver if pnic.linkSpeed: - pnic_facts['status'] = 'Connected' - pnic_facts['actual_speed'] = pnic.linkSpeed.speedMb - pnic_facts['actual_duplex'] = 'Full Duplex' if pnic.linkSpeed.duplex else 'Half Duplex' + pnic_facts["status"] = "Connected" + pnic_facts["actual_speed"] = pnic.linkSpeed.speedMb + pnic_facts["actual_duplex"] = ( + "Full Duplex" + if pnic.linkSpeed.duplex + else "Half Duplex" + ) else: - pnic_facts['status'] = 'Disconnected' - pnic_facts['actual_speed'] = 'N/A' - pnic_facts['actual_duplex'] = 'N/A' + pnic_facts["status"] = "Disconnected" + pnic_facts["actual_speed"] = "N/A" + pnic_facts["actual_duplex"] = "N/A" if pnic.spec.linkSpeed: - pnic_facts['configured_speed'] = pnic.spec.linkSpeed.speedMb - pnic_facts['configured_duplex'] = 'Full Duplex' if pnic.spec.linkSpeed.duplex else 'Half Duplex' + pnic_facts[ + "configured_speed" + ] = pnic.spec.linkSpeed.speedMb + pnic_facts["configured_duplex"] = ( + "Full Duplex" + if pnic.spec.linkSpeed.duplex + else "Half Duplex" + ) else: - pnic_facts['configured_speed'] = 'Auto negotiate' - pnic_facts['configured_duplex'] = 'Auto negotiate' - pnic_facts['mac'] = pnic.mac + pnic_facts["configured_speed"] = "Auto negotiate" + pnic_facts["configured_duplex"] = "Auto negotiate" + pnic_facts["mac"] = pnic.mac # General NIC capabilities if self.capabilities: - pnic_facts['nioc_status'] = 'Allowed' if pnic.resourcePoolSchedulerAllowed else 'Not allowed' - pnic_facts['auto_negotiation_supported'] = pnic.autoNegotiateSupported - pnic_facts['wake_on_lan_supported'] = pnic.wakeOnLanSupported + pnic_facts["nioc_status"] = ( + "Allowed" + if pnic.resourcePoolSchedulerAllowed + else "Not allowed" + ) + pnic_facts[ + "auto_negotiation_supported" + ] = pnic.autoNegotiateSupported + pnic_facts[ + "wake_on_lan_supported" + ] = pnic.wakeOnLanSupported # DirectPath I/O and SR-IOV capabilities and configuration if self.directpath_io: - pnic_facts['directpath_io_supported'] = pnic.vmDirectPathGen2Supported + pnic_facts[ + "directpath_io_supported" + ] = pnic.vmDirectPathGen2Supported if self.directpath_io or self.sriov: if pnic.pci: - for pci_device in host.configManager.pciPassthruSystem.pciPassthruInfo: + for ( + pci_device + ) in ( + host.configManager.pciPassthruSystem.pciPassthruInfo + ): if pci_device.id == pnic.pci: if self.directpath_io: - pnic_facts['passthru_enabled'] = pci_device.passthruEnabled - pnic_facts['passthru_capable'] = pci_device.passthruCapable - pnic_facts['passthru_active'] = pci_device.passthruActive + pnic_facts[ + "passthru_enabled" + ] = pci_device.passthruEnabled + pnic_facts[ + "passthru_capable" + ] = pci_device.passthruCapable + pnic_facts[ + "passthru_active" + ] = pci_device.passthruActive if self.sriov: try: if pci_device.sriovCapable: - pnic_facts['sriov_status'] = ( - 'Enabled' if pci_device.sriovEnabled else 'Disabled' + pnic_facts[ + "sriov_status" + ] = ( + "Enabled" + if pci_device.sriovEnabled + else "Disabled" ) - pnic_facts['sriov_active'] = \ - pci_device.sriovActive - pnic_facts['sriov_virt_functions'] = \ + pnic_facts[ + "sriov_active" + ] = pci_device.sriovActive + pnic_facts[ + "sriov_virt_functions" + ] = ( pci_device.numVirtualFunction - pnic_facts['sriov_virt_functions_requested'] = \ + ) + pnic_facts[ + "sriov_virt_functions_requested" + ] = ( pci_device.numVirtualFunctionRequested - pnic_facts['sriov_virt_functions_supported'] = \ + ) + pnic_facts[ + "sriov_virt_functions_supported" + ] = ( pci_device.maxVirtualFunctionSupported + ) else: - pnic_facts['sriov_status'] = 'Not supported' + pnic_facts[ + "sriov_status" + ] = "Not supported" except AttributeError: - pnic_facts['sriov_status'] = 'Not supported' - host_vmnic_facts['vmnic_details'].append(pnic_facts) + pnic_facts[ + "sriov_status" + ] = "Not supported" + host_vmnic_facts["vmnic_details"].append(pnic_facts) vswitch_vmnics = [] proxy_switch_vmnics = [] if nw_config.vswitch: for vswitch in nw_config.vswitch: - host_vmnic_facts['vswitch'][vswitch.name] = [] + host_vmnic_facts["vswitch"][vswitch.name] = [] # Workaround for "AttributeError: 'NoneType' object has no attribute 'nicDevice'" # this issue doesn't happen every time; vswitch.spec.bridge.nicDevice exists! try: for vnic in vswitch.spec.bridge.nicDevice: vswitch_vmnics.append(vnic) - host_vmnic_facts['vswitch'][vswitch.name].append(vnic) + host_vmnic_facts["vswitch"][ + vswitch.name + ].append(vnic) except AttributeError: pass @@ -281,15 +351,21 @@ def gather_host_vmnic_facts(self): for proxy_config in nw_config.proxySwitch: dvs_obj = self.find_dvs_by_uuid(uuid=proxy_config.uuid) if dvs_obj: - host_vmnic_facts['dvswitch'][dvs_obj.name] = [] + host_vmnic_facts["dvswitch"][dvs_obj.name] = [] for proxy_nic in proxy_config.spec.backing.pnicSpec: proxy_switch_vmnics.append(proxy_nic.pnicDevice) if dvs_obj: - host_vmnic_facts['dvswitch'][dvs_obj.name].append(proxy_nic.pnicDevice) + host_vmnic_facts["dvswitch"][ + dvs_obj.name + ].append(proxy_nic.pnicDevice) used_vmics = proxy_switch_vmnics + vswitch_vmnics - host_vmnic_facts['used'] = used_vmics - host_vmnic_facts['available'] = [pnic.device for pnic in nw_config.pnic if pnic.device not in used_vmics] + host_vmnic_facts["used"] = used_vmics + host_vmnic_facts["available"] = [ + pnic.device + for pnic in nw_config.pnic + if pnic.device not in used_vmics + ] hosts_vmnic_facts[host.name] = host_vmnic_facts return hosts_vmnic_facts @@ -299,23 +375,24 @@ def main(): """Main""" argument_spec = vmware_argument_spec() argument_spec.update( - cluster_name=dict(type='str', required=False), - esxi_hostname=dict(type='str', required=False), - capabilities=dict(type='bool', required=False, default=False), - directpath_io=dict(type='bool', required=False, default=False), - sriov=dict(type='bool', required=False, default=False), + cluster_name=dict(type="str", required=False), + esxi_hostname=dict(type="str", required=False), + capabilities=dict(type="bool", required=False, default=False), + directpath_io=dict(type="bool", required=False, default=False), + sriov=dict(type="bool", required=False, default=False), ) module = AnsibleModule( argument_spec=argument_spec, - required_one_of=[ - ['cluster_name', 'esxi_hostname'], - ], + required_one_of=[["cluster_name", "esxi_hostname"]], supports_check_mode=True, ) host_vmnic_mgr = HostVmnicMgr(module) - module.exit_json(changed=False, hosts_vmnics_facts=host_vmnic_mgr.gather_host_vmnic_facts()) + module.exit_json( + changed=False, + hosts_vmnics_facts=host_vmnic_mgr.gather_host_vmnic_facts(), + ) if __name__ == "__main__": diff --git a/plugins/modules/_vmware_local_role_facts.py b/plugins/modules/_vmware_local_role_facts.py index 19c3bd7..815fc29 100644 --- a/plugins/modules/_vmware_local_role_facts.py +++ b/plugins/modules/_vmware_local_role_facts.py @@ -5,17 +5,18 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['deprecated'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["deprecated"], + "supported_by": "community", } -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_local_role_facts deprecated: @@ -37,9 +38,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Gather facts about local role from an ESXi vmware_local_role_facts: hostname: '{{ esxi_hostname }}' @@ -52,9 +53,9 @@ admin_priv: "{{ fact_details.local_role_facts['Admin']['privileges'] }}" - debug: msg: "{{ admin_priv }}" -''' +""" -RETURN = r''' +RETURN = r""" local_role_facts: description: Facts about role present on ESXi host returned: always @@ -93,14 +94,18 @@ "role_system": true } ] -''' +""" from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + vmware_argument_spec, +) class VMwareLocalRoleFacts(PyVmomi): """Class to manage local role facts""" + def __init__(self, module): super(VMwareLocalRoleFacts, self).__init__(module) self.module = module @@ -109,7 +114,8 @@ def __init__(self, module): if self.content.authorizationManager is None: self.module.fail_json( msg="Failed to get local authorization manager settings.", - details="It seems that '%s' is a vCenter server instead of an ESXi server" % self.params['hostname'] + details="It seems that '%s' is a vCenter server instead of an ESXi server" + % self.params["hostname"], ) def gather_local_role_facts(self): @@ -133,12 +139,13 @@ def gather_local_role_facts(self): def main(): """Main""" argument_spec = vmware_argument_spec() - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True) + module = AnsibleModule( + argument_spec=argument_spec, supports_check_mode=True + ) vmware_local_role_facts = VMwareLocalRoleFacts(module) vmware_local_role_facts.gather_local_role_facts() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/_vmware_local_user_facts.py b/plugins/modules/_vmware_local_user_facts.py index 3daf57c..139d461 100644 --- a/plugins/modules/_vmware_local_user_facts.py +++ b/plugins/modules/_vmware_local_user_facts.py @@ -5,17 +5,18 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['deprecated'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["deprecated"], + "supported_by": "community", } -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_local_user_facts deprecated: @@ -39,9 +40,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather facts about all Users on given ESXi host system vmware_local_user_facts: hostname: '{{ esxi_hostname }}' @@ -49,9 +50,9 @@ password: '{{ esxi_password }}' delegate_to: localhost register: all_user_facts -''' +""" -RETURN = r''' +RETURN = r""" local_user_facts: description: metadata about all local users returned: always @@ -80,7 +81,7 @@ "shell_access": false }, ] -''' +""" try: from pyVmomi import vmodl @@ -88,48 +89,58 @@ pass from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + vmware_argument_spec, +) from ansible.module_utils._text import to_native class VMwareUserFactsManager(PyVmomi): """Class to manage local user facts""" + def __init__(self, module): super(VMwareUserFactsManager, self).__init__(module) if self.is_vcenter(): self.module.fail_json( msg="Failed to get local account manager settings.", - details="It seems that '%s' is a vCenter server instead of an ESXi server" % self.module.params['hostname'] + details="It seems that '%s' is a vCenter server instead of an ESXi server" + % self.module.params["hostname"], ) def gather_user_facts(self): """Gather facts about local users""" results = dict(changed=False, local_user_facts=[]) - search_string = '' + search_string = "" exact_match = False find_users = True find_groups = False user_accounts = self.content.userDirectory.RetrieveUserGroups( - None, search_string, None, None, exact_match, find_users, find_groups + None, + search_string, + None, + None, + exact_match, + find_users, + find_groups, ) if user_accounts: for user in user_accounts: temp_user = dict() # NOTE: the properties full_name, principal, and user_group are deprecated starting from Ansible v2.12 - temp_user['full_name'] = user.fullName - temp_user['principal'] = user.principal - temp_user['user_group'] = user.group - temp_user['user_name'] = user.principal - temp_user['description'] = user.fullName - temp_user['group'] = user.group - temp_user['user_id'] = user.id - temp_user['shell_access'] = user.shellAccess - temp_user['role'] = None + temp_user["full_name"] = user.fullName + temp_user["principal"] = user.principal + temp_user["user_group"] = user.group + temp_user["user_name"] = user.principal + temp_user["description"] = user.fullName + temp_user["group"] = user.group + temp_user["user_id"] = user.id + temp_user["shell_access"] = user.shellAccess + temp_user["role"] = None try: permissions = self.content.authorizationManager.RetrieveEntityPermissions( - entity=self.content.rootFolder, - inherited=False + entity=self.content.rootFolder, inherited=False ) except vmodl.fault.ManagedObjectNotFound as not_found: self.module.fail_json( @@ -137,10 +148,13 @@ def gather_user_facts(self): ) for permission in permissions: if permission.principal == user.principal: - temp_user['role'] = self.get_role_name(permission.roleId, self.content.authorizationManager.roleList) + temp_user["role"] = self.get_role_name( + permission.roleId, + self.content.authorizationManager.roleList, + ) break - results['local_user_facts'].append(temp_user) + results["local_user_facts"].append(temp_user) self.module.exit_json(**results) @staticmethod @@ -149,13 +163,13 @@ def get_role_name(role_id, role_list): role_name = None # Default role: No access if role_id == -5: - role_name = 'no-access' + role_name = "no-access" # Default role: Read-only elif role_id == -2: - role_name = 'read-only' + role_name = "read-only" # Default role: Administrator elif role_id == -1: - role_name = 'admin' + role_name = "admin" # Custom roles else: for role in role_list: @@ -168,11 +182,12 @@ def get_role_name(role_id, role_list): def main(): """Main""" argument_spec = vmware_argument_spec() - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True) + module = AnsibleModule( + argument_spec=argument_spec, supports_check_mode=True + ) vmware_local_user_facts = VMwareUserFactsManager(module) vmware_local_user_facts.gather_user_facts() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/_vmware_portgroup_facts.py b/plugins/modules/_vmware_portgroup_facts.py index a0df22d..57def9f 100644 --- a/plugins/modules/_vmware_portgroup_facts.py +++ b/plugins/modules/_vmware_portgroup_facts.py @@ -5,16 +5,17 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['deprecated'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["deprecated"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_portgroup_facts deprecated: @@ -54,9 +55,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather portgroup facts about all ESXi Host in given Cluster vmware_portgroup_facts: hostname: '{{ vcenter_hostname }}' @@ -72,9 +73,9 @@ password: '{{ vcenter_password }}' esxi_hostname: '{{ esxi_hostname }}' delegate_to: localhost -''' +""" -RETURN = r''' +RETURN = r""" hosts_portgroup_facts: description: metadata about host's portgroup configuration returned: on success @@ -111,33 +112,39 @@ } ] } -''' +""" from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + vmware_argument_spec, + PyVmomi, +) class PortgroupFactsManager(PyVmomi): """Class to manage Port Group facts""" + def __init__(self, module): super(PortgroupFactsManager, self).__init__(module) - cluster_name = self.params.get('cluster_name', None) - esxi_host_name = self.params.get('esxi_hostname', None) - self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name) + cluster_name = self.params.get("cluster_name", None) + esxi_host_name = self.params.get("esxi_hostname", None) + self.hosts = self.get_all_host_objs( + cluster_name=cluster_name, esxi_host_name=esxi_host_name + ) if not self.hosts: self.module.fail_json(msg="Failed to find host system.") - self.policies = self.params.get('policies') + self.policies = self.params.get("policies") @staticmethod def normalize_pg_info(portgroup_obj, policy_facts): """Create Port Group information""" pg_info_dict = dict() spec = portgroup_obj.spec - pg_info_dict['portgroup'] = spec.name - pg_info_dict['vlan_id'] = spec.vlanId + pg_info_dict["portgroup"] = spec.name + pg_info_dict["vlan_id"] = spec.vlanId # NOTE: the property vswitch_name is deprecated starting from Ansible v2.12 - pg_info_dict['vswitch_name'] = spec.vswitchName - pg_info_dict['vswitch'] = spec.vswitchName + pg_info_dict["vswitch_name"] = spec.vswitchName + pg_info_dict["vswitch"] = spec.vswitchName if policy_facts: # Security facts @@ -145,54 +152,79 @@ def normalize_pg_info(portgroup_obj, policy_facts): promiscuous_mode = spec.policy.security.allowPromiscuous mac_changes = spec.policy.security.macChanges forged_transmits = spec.policy.security.forgedTransmits - pg_info_dict['security'] = ( - ["No override" if promiscuous_mode is None else promiscuous_mode, - "No override" if mac_changes is None else mac_changes, - "No override" if forged_transmits is None else forged_transmits] - ) + pg_info_dict["security"] = [ + "No override" + if promiscuous_mode is None + else promiscuous_mode, + "No override" if mac_changes is None else mac_changes, + "No override" + if forged_transmits is None + else forged_transmits, + ] else: - pg_info_dict['security'] = ["No override", "No override", "No override"] + pg_info_dict["security"] = [ + "No override", + "No override", + "No override", + ] # Traffic Shaping facts - if spec.policy.shapingPolicy and spec.policy.shapingPolicy.enabled is not None: - pg_info_dict['ts'] = portgroup_obj.spec.policy.shapingPolicy.enabled + if ( + spec.policy.shapingPolicy + and spec.policy.shapingPolicy.enabled is not None + ): + pg_info_dict[ + "ts" + ] = portgroup_obj.spec.policy.shapingPolicy.enabled else: - pg_info_dict['ts'] = "No override" + pg_info_dict["ts"] = "No override" # Teaming and failover facts if spec.policy.nicTeaming: if spec.policy.nicTeaming.policy is None: - pg_info_dict['lb'] = "No override" + pg_info_dict["lb"] = "No override" else: - pg_info_dict['lb'] = spec.policy.nicTeaming.policy + pg_info_dict["lb"] = spec.policy.nicTeaming.policy if spec.policy.nicTeaming.notifySwitches is None: - pg_info_dict['notify'] = "No override" + pg_info_dict["notify"] = "No override" else: - pg_info_dict['notify'] = spec.policy.nicTeaming.notifySwitches + pg_info_dict[ + "notify" + ] = spec.policy.nicTeaming.notifySwitches if spec.policy.nicTeaming.rollingOrder is None: - pg_info_dict['failback'] = "No override" + pg_info_dict["failback"] = "No override" else: - pg_info_dict['failback'] = not spec.policy.nicTeaming.rollingOrder + pg_info_dict[ + "failback" + ] = not spec.policy.nicTeaming.rollingOrder if spec.policy.nicTeaming.nicOrder is None: - pg_info_dict['failover_active'] = "No override" - pg_info_dict['failover_standby'] = "No override" + pg_info_dict["failover_active"] = "No override" + pg_info_dict["failover_standby"] = "No override" else: - pg_info_dict['failover_active'] = spec.policy.nicTeaming.nicOrder.activeNic - pg_info_dict['failover_standby'] = spec.policy.nicTeaming.nicOrder.standbyNic - if spec.policy.nicTeaming.failureCriteria and spec.policy.nicTeaming.failureCriteria.checkBeacon is None: - pg_info_dict['failure_detection'] = "No override" + pg_info_dict[ + "failover_active" + ] = spec.policy.nicTeaming.nicOrder.activeNic + pg_info_dict[ + "failover_standby" + ] = spec.policy.nicTeaming.nicOrder.standbyNic + if ( + spec.policy.nicTeaming.failureCriteria + and spec.policy.nicTeaming.failureCriteria.checkBeacon + is None + ): + pg_info_dict["failure_detection"] = "No override" else: if spec.policy.nicTeaming.failureCriteria.checkBeacon: - pg_info_dict['failure_detection'] = "beacon_probing" + pg_info_dict["failure_detection"] = "beacon_probing" else: - pg_info_dict['failure_detection'] = "link_status_only" + pg_info_dict["failure_detection"] = "link_status_only" else: - pg_info_dict['lb'] = "No override" - pg_info_dict['notify'] = "No override" - pg_info_dict['failback'] = "No override" - pg_info_dict['failover_active'] = "No override" - pg_info_dict['failover_standby'] = "No override" - pg_info_dict['failure_detection'] = "No override" + pg_info_dict["lb"] = "No override" + pg_info_dict["notify"] = "No override" + pg_info_dict["failback"] = "No override" + pg_info_dict["failover_active"] = "No override" + pg_info_dict["failover_standby"] = "No override" + pg_info_dict["failure_detection"] = "No override" return pg_info_dict @@ -204,7 +236,9 @@ def gather_host_portgroup_facts(self): hosts_pg_facts[host.name] = [] for portgroup in pgs: hosts_pg_facts[host.name].append( - self.normalize_pg_info(portgroup_obj=portgroup, policy_facts=self.policies) + self.normalize_pg_info( + portgroup_obj=portgroup, policy_facts=self.policies + ) ) return hosts_pg_facts @@ -213,21 +247,22 @@ def main(): """Main""" argument_spec = vmware_argument_spec() argument_spec.update( - cluster_name=dict(type='str', required=False), - esxi_hostname=dict(type='str', required=False), - policies=dict(type='bool', required=False, default=False), + cluster_name=dict(type="str", required=False), + esxi_hostname=dict(type="str", required=False), + policies=dict(type="bool", required=False, default=False), ) module = AnsibleModule( argument_spec=argument_spec, - required_one_of=[ - ['cluster_name', 'esxi_hostname'], - ], - supports_check_mode=True + required_one_of=[["cluster_name", "esxi_hostname"]], + supports_check_mode=True, ) host_pg_mgr = PortgroupFactsManager(module) - module.exit_json(changed=False, hosts_portgroup_facts=host_pg_mgr.gather_host_portgroup_facts()) + module.exit_json( + changed=False, + hosts_portgroup_facts=host_pg_mgr.gather_host_portgroup_facts(), + ) if __name__ == "__main__": diff --git a/plugins/modules/_vmware_resource_pool_facts.py b/plugins/modules/_vmware_resource_pool_facts.py index 8262faa..e1e956f 100644 --- a/plugins/modules/_vmware_resource_pool_facts.py +++ b/plugins/modules/_vmware_resource_pool_facts.py @@ -4,15 +4,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['deprecated'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["deprecated"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_resource_pool_facts deprecated: @@ -32,9 +33,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather resource pool facts about all resource pools available vmware_resource_pool_facts: hostname: '{{ vcenter_hostname }}' @@ -42,9 +43,9 @@ password: '{{ vcenter_password }}' register: rp_facts delegate_to: localhost -''' +""" -RETURN = r''' +RETURN = r""" resource_pool_facts: description: metadata about resource pool configuration returned: on success @@ -80,7 +81,7 @@ "runtime_memory_unreserved_for_vm": 1007681536 }, ] -''' +""" try: from pyVmomi import vim @@ -88,7 +89,11 @@ pass from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi, get_all_objs +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + vmware_argument_spec, + PyVmomi, + get_all_objs, +) class ResourcePoolFactsManager(PyVmomi): @@ -135,10 +140,14 @@ def gather_rp_facts(self): def main(): argument_spec = vmware_argument_spec() - module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + module = AnsibleModule( + argument_spec=argument_spec, supports_check_mode=True + ) vmware_rp_mgr = ResourcePoolFactsManager(module) - module.exit_json(changed=False, resource_pool_facts=vmware_rp_mgr.gather_rp_facts()) + module.exit_json( + changed=False, resource_pool_facts=vmware_rp_mgr.gather_rp_facts() + ) if __name__ == "__main__": diff --git a/plugins/modules/_vmware_target_canonical_facts.py b/plugins/modules/_vmware_target_canonical_facts.py index 3b6e7e7..83c5c57 100644 --- a/plugins/modules/_vmware_target_canonical_facts.py +++ b/plugins/modules/_vmware_target_canonical_facts.py @@ -5,15 +5,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['deprecated'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["deprecated"], + "supported_by": "community", } -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_target_canonical_facts deprecated: @@ -53,9 +54,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Get Canonical name of particular target on particular ESXi host system vmware_target_canonical_facts: hostname: '{{ vcenter_hostname }}' @@ -80,7 +81,7 @@ password: '{{ vcenter_password }}' cluster_name: '{{ cluster_name }}' delegate_to: localhost -''' +""" RETURN = r""" canonical: @@ -116,15 +117,20 @@ """ from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + vmware_argument_spec, +) class ScsiTargetFactsManager(PyVmomi): def __init__(self, module): super(ScsiTargetFactsManager, self).__init__(module) - cluster_name = self.module.params.get('cluster_name') - self.esxi_hostname = self.module.params.get('esxi_hostname') - self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=self.esxi_hostname) + cluster_name = self.module.params.get("cluster_name") + self.esxi_hostname = self.module.params.get("esxi_hostname") + self.hosts = self.get_all_host_objs( + cluster_name=cluster_name, esxi_host_name=self.esxi_hostname + ) def gather_scsi_device_facts(self): """ @@ -134,7 +140,7 @@ def gather_scsi_device_facts(self): scsi_tgt_facts = {} target_lun_uuid = {} scsilun_canonical = {} - target_id = self.module.params['target_id'] + target_id = self.module.params["target_id"] for host in self.hosts: # Associate the scsiLun key with the canonicalName (NAA) @@ -142,19 +148,29 @@ def gather_scsi_device_facts(self): scsilun_canonical[scsilun.key] = scsilun.canonicalName # Associate target number with LUN uuid - for target in host.config.storageDevice.scsiTopology.adapter[0].target: + for target in host.config.storageDevice.scsiTopology.adapter[ + 0 + ].target: for lun in target.lun: target_lun_uuid[target.target] = lun.scsiLun - scsi_tgt_facts[host.name] = dict(scsilun_canonical=scsilun_canonical, - target_lun_uuid=target_lun_uuid) + scsi_tgt_facts[host.name] = dict( + scsilun_canonical=scsilun_canonical, + target_lun_uuid=target_lun_uuid, + ) if target_id is not None and self.esxi_hostname is not None: - canonical = '' - temp_lun_data = scsi_tgt_facts[self.esxi_hostname]['target_lun_uuid'] - if self.esxi_hostname in scsi_tgt_facts and \ - target_id in temp_lun_data: - temp_scsi_data = scsi_tgt_facts[self.esxi_hostname]['scsilun_canonical'] + canonical = "" + temp_lun_data = scsi_tgt_facts[self.esxi_hostname][ + "target_lun_uuid" + ] + if ( + self.esxi_hostname in scsi_tgt_facts + and target_id in temp_lun_data + ): + temp_scsi_data = scsi_tgt_facts[self.esxi_hostname][ + "scsilun_canonical" + ] temp_target = temp_lun_data[target_id] canonical = temp_scsi_data[temp_target] self.module.exit_json(changed=False, canonical=canonical) @@ -166,17 +182,15 @@ def main(): argument_spec = vmware_argument_spec() argument_spec.update( dict( - target_id=dict(required=False, type='int'), - cluster_name=dict(type='str', required=False), - esxi_hostname=dict(type='str', required=False), + target_id=dict(required=False, type="int"), + cluster_name=dict(type="str", required=False), + esxi_hostname=dict(type="str", required=False), ) ) module = AnsibleModule( argument_spec=argument_spec, - required_one_of=[ - ['cluster_name', 'esxi_hostname'], - ], + required_one_of=[["cluster_name", "esxi_hostname"]], supports_check_mode=True, ) @@ -184,5 +198,5 @@ def main(): scsi_tgt_manager.gather_scsi_device_facts() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/_vmware_vmkernel_facts.py b/plugins/modules/_vmware_vmkernel_facts.py index 646185f..9c52e14 100644 --- a/plugins/modules/_vmware_vmkernel_facts.py +++ b/plugins/modules/_vmware_vmkernel_facts.py @@ -5,15 +5,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['deprecated'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["deprecated"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_vmkernel_facts deprecated: @@ -46,9 +47,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather VMKernel facts about all ESXi Host in given Cluster vmware_vmkernel_facts: hostname: '{{ vcenter_hostname }}' @@ -66,9 +67,9 @@ esxi_hostname: '{{ esxi_hostname }}' delegate_to: localhost register: host_vmks -''' +""" -RETURN = r''' +RETURN = r""" host_vmk_facts: description: metadata about VMKernel present on given host system returned: success @@ -94,7 +95,7 @@ ] } -''' +""" try: from pyVmomi import vim, vmodl @@ -102,16 +103,21 @@ pass from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + vmware_argument_spec, + PyVmomi, +) from ansible.module_utils._text import to_native class VmkernelFactsManager(PyVmomi): def __init__(self, module): super(VmkernelFactsManager, self).__init__(module) - cluster_name = self.params.get('cluster_name', None) - esxi_host_name = self.params.get('esxi_hostname', None) - self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name) + cluster_name = self.params.get("cluster_name", None) + esxi_host_name = self.params.get("esxi_hostname", None) + self.hosts = self.get_all_host_objs( + cluster_name=cluster_name, esxi_host_name=esxi_host_name + ) self.service_type_vmks = dict() self.get_all_vmks_by_service_type() @@ -121,9 +127,13 @@ def get_all_vmks_by_service_type(self): """ for host in self.hosts: - self.service_type_vmks[host.name] = dict(vmotion=[], vsan=[], management=[], faultToleranceLogging=[]) + self.service_type_vmks[host.name] = dict( + vmotion=[], vsan=[], management=[], faultToleranceLogging=[] + ) for service_type in self.service_type_vmks[host.name].keys(): - vmks_list = self.query_service_type_for_vmks(host, service_type) + vmks_list = self.query_service_type_for_vmks( + host, service_type + ) self.service_type_vmks[host.name][service_type] = vmks_list def query_service_type_for_vmks(self, host_system, service_type): @@ -139,21 +149,35 @@ def query_service_type_for_vmks(self, host_system, service_type): vmks_list = [] query = None try: - query = host_system.configManager.virtualNicManager.QueryNetConfig(service_type) + query = host_system.configManager.virtualNicManager.QueryNetConfig( + service_type + ) except vim.fault.HostConfigFault as config_fault: - self.module.fail_json(msg="Failed to get all VMKs for service type %s due to" - " host config fault : %s" % (service_type, to_native(config_fault.msg))) + self.module.fail_json( + msg="Failed to get all VMKs for service type %s due to" + " host config fault : %s" + % (service_type, to_native(config_fault.msg)) + ) except vmodl.fault.InvalidArgument as invalid_argument: - self.module.fail_json(msg="Failed to get all VMKs for service type %s due to" - " invalid arguments : %s" % (service_type, to_native(invalid_argument.msg))) + self.module.fail_json( + msg="Failed to get all VMKs for service type %s due to" + " invalid arguments : %s" + % (service_type, to_native(invalid_argument.msg)) + ) except Exception as e: - self.module.fail_json(msg="Failed to get all VMKs for service type %s due to" - "%s" % (service_type, to_native(e))) + self.module.fail_json( + msg="Failed to get all VMKs for service type %s due to" + "%s" % (service_type, to_native(e)) + ) if not query.selectedVnic: return vmks_list selected_vnics = [vnic for vnic in query.selectedVnic] - vnics_with_service_type = [vnic.device for vnic in query.candidateVnic if vnic.key in selected_vnics] + vnics_with_service_type = [ + vnic.device + for vnic in query.candidateVnic + if vnic.key in selected_vnics + ] return vnics_with_service_type def gather_host_vmk_facts(self): @@ -165,21 +189,28 @@ def gather_host_vmk_facts(self): if host_network_system: vmks_config = host.config.network.vnic for vmk in vmks_config: - host_vmk_facts.append(dict( - device=vmk.device, - key=vmk.key, - portgroup=vmk.portgroup, - ipv4_address=vmk.spec.ip.ipAddress, - ipv4_subnet_mask=vmk.spec.ip.subnetMask, - dhcp=vmk.spec.ip.dhcp, - mac=vmk.spec.mac, - mtu=vmk.spec.mtu, - stack=vmk.spec.netStackInstanceKey, - enable_vsan=vmk.device in self.service_type_vmks[host.name]['vsan'], - enable_vmotion=vmk.device in self.service_type_vmks[host.name]['vmotion'], - enable_management=vmk.device in self.service_type_vmks[host.name]['management'], - enable_ft=vmk.device in self.service_type_vmks[host.name]['faultToleranceLogging'], - ) + host_vmk_facts.append( + dict( + device=vmk.device, + key=vmk.key, + portgroup=vmk.portgroup, + ipv4_address=vmk.spec.ip.ipAddress, + ipv4_subnet_mask=vmk.spec.ip.subnetMask, + dhcp=vmk.spec.ip.dhcp, + mac=vmk.spec.mac, + mtu=vmk.spec.mtu, + stack=vmk.spec.netStackInstanceKey, + enable_vsan=vmk.device + in self.service_type_vmks[host.name]["vsan"], + enable_vmotion=vmk.device + in self.service_type_vmks[host.name]["vmotion"], + enable_management=vmk.device + in self.service_type_vmks[host.name]["management"], + enable_ft=vmk.device + in self.service_type_vmks[host.name][ + "faultToleranceLogging" + ], + ) ) hosts_facts[host.name] = host_vmk_facts return hosts_facts @@ -188,20 +219,20 @@ def gather_host_vmk_facts(self): def main(): argument_spec = vmware_argument_spec() argument_spec.update( - cluster_name=dict(type='str', required=False), - esxi_hostname=dict(type='str', required=False), + cluster_name=dict(type="str", required=False), + esxi_hostname=dict(type="str", required=False), ) module = AnsibleModule( argument_spec=argument_spec, - required_one_of=[ - ['cluster_name', 'esxi_hostname'], - ], - supports_check_mode=True + required_one_of=[["cluster_name", "esxi_hostname"]], + supports_check_mode=True, ) vmware_vmk_config = VmkernelFactsManager(module) - module.exit_json(changed=False, host_vmk_facts=vmware_vmk_config.gather_host_vmk_facts()) + module.exit_json( + changed=False, host_vmk_facts=vmware_vmk_config.gather_host_vmk_facts() + ) if __name__ == "__main__": diff --git a/plugins/modules/_vmware_vswitch_facts.py b/plugins/modules/_vmware_vswitch_facts.py index a08e6d2..6c33963 100644 --- a/plugins/modules/_vmware_vswitch_facts.py +++ b/plugins/modules/_vmware_vswitch_facts.py @@ -4,15 +4,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['deprecated'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["deprecated"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_vswitch_facts deprecated: @@ -47,9 +48,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather vswitch facts about all ESXi Host in given Cluster vmware_vswitch_facts: hostname: '{{ vcenter_hostname }}' @@ -67,9 +68,9 @@ esxi_hostname: '{{ esxi_hostname }}' delegate_to: localhost register: all_vswitch_facts -''' +""" -RETURN = r''' +RETURN = r""" hosts_vswitch_facts: description: metadata about host's vswitch configuration returned: on success @@ -93,19 +94,25 @@ }, }, } -''' +""" from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + vmware_argument_spec, + PyVmomi, +) class VswitchFactsManager(PyVmomi): """Class to gather vSwitch facts""" + def __init__(self, module): super(VswitchFactsManager, self).__init__(module) - cluster_name = self.params.get('cluster_name', None) - esxi_host_name = self.params.get('esxi_hostname', None) - self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name) + cluster_name = self.params.get("cluster_name", None) + esxi_host_name = self.params.get("esxi_hostname", None) + self.hosts = self.get_all_host_objs( + cluster_name=cluster_name, esxi_host_name=esxi_host_name + ) if not self.hosts: self.module.fail_json(msg="Failed to find host system.") @@ -132,7 +139,7 @@ def gather_vswitch_facts(self): # we need to use the spec to get the ports # otherwise, the output might be different compared to the vswitch config module # (e.g. 5632 ports instead of 128) - num_ports=available_vswitch.spec.numPorts + num_ports=available_vswitch.spec.numPorts, ) hosts_vswitch_facts[host.name] = temp_switch_dict return hosts_vswitch_facts @@ -142,20 +149,21 @@ def main(): """Main""" argument_spec = vmware_argument_spec() argument_spec.update( - cluster_name=dict(type='str', required=False), - esxi_hostname=dict(type='str', required=False), + cluster_name=dict(type="str", required=False), + esxi_hostname=dict(type="str", required=False), ) module = AnsibleModule( argument_spec=argument_spec, - required_one_of=[ - ['cluster_name', 'esxi_hostname'], - ], - supports_check_mode=True + required_one_of=[["cluster_name", "esxi_hostname"]], + supports_check_mode=True, ) vmware_vswitch_mgr = VswitchFactsManager(module) - module.exit_json(changed=False, hosts_vswitch_facts=vmware_vswitch_mgr.gather_vswitch_facts()) + module.exit_json( + changed=False, + hosts_vswitch_facts=vmware_vswitch_mgr.gather_vswitch_facts(), + ) if __name__ == "__main__": diff --git a/plugins/modules/vca_fw.py b/plugins/modules/vca_fw.py index 7e233e6..06df740 100644 --- a/plugins/modules/vca_fw.py +++ b/plugins/modules/vca_fw.py @@ -4,13 +4,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} +ANSIBLE_METADATA = { + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", +} -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vca_fw short_description: add remove firewall rules in a gateway in a vca @@ -27,9 +30,9 @@ extends_documentation_fragment: - community.general.vca.documentation -''' +""" -EXAMPLES = ''' +EXAMPLES = """ #Add a set of firewall rules @@ -54,11 +57,15 @@ protocol: "Tcp" policy: "allow" -''' +""" try: - from pyvcloud.schema.vcd.v1_5.schemas.vcloud.networkType import FirewallRuleType - from pyvcloud.schema.vcd.v1_5.schemas.vcloud.networkType import ProtocolsType + from pyvcloud.schema.vcd.v1_5.schemas.vcloud.networkType import ( + FirewallRuleType, + ) + from pyvcloud.schema.vcd.v1_5.schemas.vcloud.networkType import ( + ProtocolsType, + ) except ImportError: # normally set a flag here but it will be caught when testing for # the existence of pyvcloud (see module_utils/vca.py). This just @@ -66,35 +73,49 @@ pass from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.vca import VcaError, vca_argument_spec, vca_login - - -VALID_PROTO = ['Tcp', 'Udp', 'Icmp', 'Other', 'Any'] -VALID_RULE_KEYS = ['policy', 'is_enable', 'enable_logging', 'description', - 'dest_ip', 'dest_port', 'source_ip', 'source_port', - 'protocol'] +from ansible_collections.community.general.plugins.module_utils.vca import ( + VcaError, + vca_argument_spec, + vca_login, +) + + +VALID_PROTO = ["Tcp", "Udp", "Icmp", "Other", "Any"] +VALID_RULE_KEYS = [ + "policy", + "is_enable", + "enable_logging", + "description", + "dest_ip", + "dest_port", + "source_ip", + "source_port", + "protocol", +] def protocol_to_tuple(protocol): - return (protocol.get_Tcp(), - protocol.get_Udp(), - protocol.get_Icmp(), - protocol.get_Other(), - protocol.get_Any()) + return ( + protocol.get_Tcp(), + protocol.get_Udp(), + protocol.get_Icmp(), + protocol.get_Other(), + protocol.get_Any(), + ) def protocol_to_string(protocol): protocol = protocol_to_tuple(protocol) if protocol[0] is True: - return 'Tcp' + return "Tcp" elif protocol[1] is True: - return 'Udp' + return "Udp" elif protocol[2] is True: - return 'Icmp' + return "Icmp" elif protocol[3] is True: - return 'Other' + return "Other" elif protocol[4] is True: - return 'Any' + return "Any" def protocol_to_type(protocol): @@ -110,18 +131,21 @@ def validate_fw_rules(fw_rules): for rule in fw_rules: for k in rule.keys(): if k not in VALID_RULE_KEYS: - raise VcaError("%s is not a valid key in fw rules, please " - "check above.." % k, valid_keys=VALID_RULE_KEYS) - - rule['dest_port'] = str(rule.get('dest_port', 'Any')).lower() - rule['dest_ip'] = rule.get('dest_ip', 'Any').lower() - rule['source_port'] = str(rule.get('source_port', 'Any')).lower() - rule['source_ip'] = rule.get('source_ip', 'Any').lower() - rule['protocol'] = rule.get('protocol', 'Any').lower() - rule['policy'] = rule.get('policy', 'allow').lower() - rule['is_enable'] = rule.get('is_enable', True) - rule['enable_logging'] = rule.get('enable_logging', False) - rule['description'] = rule.get('description', 'rule added by Ansible') + raise VcaError( + "%s is not a valid key in fw rules, please " + "check above.." % k, + valid_keys=VALID_RULE_KEYS, + ) + + rule["dest_port"] = str(rule.get("dest_port", "Any")).lower() + rule["dest_ip"] = rule.get("dest_ip", "Any").lower() + rule["source_port"] = str(rule.get("source_port", "Any")).lower() + rule["source_ip"] = rule.get("source_ip", "Any").lower() + rule["protocol"] = rule.get("protocol", "Any").lower() + rule["policy"] = rule.get("policy", "allow").lower() + rule["is_enable"] = rule.get("is_enable", True) + rule["enable_logging"] = rule.get("enable_logging", False) + rule["description"] = rule.get("description", "rule added by Ansible") return fw_rules @@ -139,48 +163,61 @@ def fw_rules_to_dict(rules): policy=rule.get_Policy().lower(), is_enable=rule.get_IsEnabled(), enable_logging=rule.get_EnableLogging(), - description=rule.get_Description() + description=rule.get_Description(), ) ) return fw_rules -def create_fw_rule(is_enable, description, policy, protocol, dest_port, - dest_ip, source_port, source_ip, enable_logging): - - return FirewallRuleType(IsEnabled=is_enable, - Description=description, - Policy=policy, - Protocols=protocol_to_type(protocol), - DestinationPortRange=dest_port, - DestinationIp=dest_ip, - SourcePortRange=source_port, - SourceIp=source_ip, - EnableLogging=enable_logging) +def create_fw_rule( + is_enable, + description, + policy, + protocol, + dest_port, + dest_ip, + source_port, + source_ip, + enable_logging, +): + + return FirewallRuleType( + IsEnabled=is_enable, + Description=description, + Policy=policy, + Protocols=protocol_to_type(protocol), + DestinationPortRange=dest_port, + DestinationIp=dest_ip, + SourcePortRange=source_port, + SourceIp=source_ip, + EnableLogging=enable_logging, + ) def main(): argument_spec = vca_argument_spec() argument_spec.update( dict( - fw_rules=dict(required=True, type='list'), - gateway_name=dict(default='gateway'), - state=dict(default='present', choices=['present', 'absent']) + fw_rules=dict(required=True, type="list"), + gateway_name=dict(default="gateway"), + state=dict(default="present", choices=["present", "absent"]), ) ) module = AnsibleModule(argument_spec, supports_check_mode=True) - fw_rules = module.params.get('fw_rules') - gateway_name = module.params.get('gateway_name') - vdc_name = module.params['vdc_name'] + fw_rules = module.params.get("fw_rules") + gateway_name = module.params.get("gateway_name") + vdc_name = module.params["vdc_name"] vca = vca_login(module) gateway = vca.get_gateway(vdc_name, gateway_name) if not gateway: - module.fail_json(msg="Not able to find the gateway %s, please check " - "the gateway_name param" % gateway_name) + module.fail_json( + msg="Not able to find the gateway %s, please check " + "the gateway_name param" % gateway_name + ) fwservice = gateway._getFirewallService() @@ -193,8 +230,8 @@ def main(): module.fail_json(msg=e.message) result = dict(changed=False) - result['current_rules'] = current_rules - result['desired_rules'] = desired_rules + result["current_rules"] = current_rules + result["desired_rules"] = desired_rules updates = list() additions = list() @@ -214,35 +251,35 @@ def main(): for rule in additions: if not module.check_mode: - rule['protocol'] = rule['protocol'].capitalize() + rule["protocol"] = rule["protocol"].capitalize() gateway.add_fw_rule(**rule) - result['changed'] = True + result["changed"] = True for index, rule in updates: if not module.check_mode: rule = create_fw_rule(**rule) fwservice.replace_FirewallRule_at(index, rule) - result['changed'] = True + result["changed"] = True - keys = ['protocol', 'dest_port', 'dest_ip', 'source_port', 'source_ip'] + keys = ["protocol", "dest_port", "dest_ip", "source_port", "source_ip"] for rule in deletions: if not module.check_mode: kwargs = dict([(k, v) for k, v in rule.items() if k in keys]) - kwargs['protocol'] = protocol_to_string(kwargs['protocol']) + kwargs["protocol"] = protocol_to_string(kwargs["protocol"]) gateway.delete_fw_rule(**kwargs) - result['changed'] = True + result["changed"] = True - if not module.check_mode and result['changed'] is True: + if not module.check_mode and result["changed"] is True: task = gateway.save_services_configuration() if task: vca.block_until_completed(task) - result['rules_updated'] = len(updates) - result['rules_added'] = len(additions) - result['rules_deleted'] = len(deletions) + result["rules_updated"] = len(updates) + result["rules_added"] = len(additions) + result["rules_deleted"] = len(deletions) return module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vca_nat.py b/plugins/modules/vca_nat.py index d7a6b97..c7fffde 100644 --- a/plugins/modules/vca_nat.py +++ b/plugins/modules/vca_nat.py @@ -4,13 +4,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} +ANSIBLE_METADATA = { + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", +} -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vca_nat short_description: add remove nat rules in a gateway in a vca @@ -31,9 +34,9 @@ extends_documentation_fragment: - community.general.vca.documentation -''' +""" -EXAMPLES = ''' +EXAMPLES = """ #An example for a source nat @@ -64,33 +67,50 @@ translated_ip: 192.0.2.42 translated_port: 22 -''' +""" from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.vca import VcaError, vca_argument_spec, vca_login +from ansible_collections.community.general.plugins.module_utils.vca import ( + VcaError, + vca_argument_spec, + vca_login, +) -VALID_RULE_KEYS = ['rule_type', 'original_ip', 'original_port', - 'translated_ip', 'translated_port', 'protocol'] +VALID_RULE_KEYS = [ + "rule_type", + "original_ip", + "original_port", + "translated_ip", + "translated_port", + "protocol", +] def validate_nat_rules(nat_rules): for rule in nat_rules: if not isinstance(rule, dict): - raise VcaError("nat rules must be a list of dictionaries, " - "Please check", valid_keys=VALID_RULE_KEYS) + raise VcaError( + "nat rules must be a list of dictionaries, " "Please check", + valid_keys=VALID_RULE_KEYS, + ) for k in rule.keys(): if k not in VALID_RULE_KEYS: - raise VcaError("%s is not a valid key in nat rules, please " - "check above.." % k, valid_keys=VALID_RULE_KEYS) - - rule['original_port'] = str(rule.get('original_port', 'any')).lower() - rule['original_ip'] = rule.get('original_ip', 'any').lower() - rule['translated_ip'] = rule.get('translated_ip', 'any').lower() - rule['translated_port'] = str(rule.get('translated_port', 'any')).lower() - rule['protocol'] = rule.get('protocol', 'any').lower() - rule['rule_type'] = rule.get('rule_type', 'DNAT').lower() + raise VcaError( + "%s is not a valid key in nat rules, please " + "check above.." % k, + valid_keys=VALID_RULE_KEYS, + ) + + rule["original_port"] = str(rule.get("original_port", "any")).lower() + rule["original_ip"] = rule.get("original_ip", "any").lower() + rule["translated_ip"] = rule.get("translated_ip", "any").lower() + rule["translated_port"] = str( + rule.get("translated_port", "any") + ).lower() + rule["protocol"] = rule.get("protocol", "any").lower() + rule["rule_type"] = rule.get("rule_type", "DNAT").lower() return nat_rules @@ -103,10 +123,12 @@ def nat_rules_to_dict(nat_rules): dict( rule_type=rule.get_RuleType().lower(), original_ip=gw_rule.get_OriginalIp().lower(), - original_port=(gw_rule.get_OriginalPort().lower() or 'any'), + original_port=(gw_rule.get_OriginalPort().lower() or "any"), translated_ip=gw_rule.get_TranslatedIp().lower(), - translated_port=(gw_rule.get_TranslatedPort().lower() or 'any'), - protocol=(gw_rule.get_Protocol().lower() or 'any') + translated_port=( + gw_rule.get_TranslatedPort().lower() or "any" + ), + protocol=(gw_rule.get_Protocol().lower() or "any"), ) ) return result @@ -115,37 +137,39 @@ def nat_rules_to_dict(nat_rules): def rule_to_string(rule): strings = list() for key, value in rule.items(): - strings.append('%s=%s' % (key, value)) - return ', '.join(strings) + strings.append("%s=%s" % (key, value)) + return ", ".join(strings) def main(): argument_spec = vca_argument_spec() argument_spec.update( dict( - nat_rules=dict(type='list', default=[]), - gateway_name=dict(default='gateway'), - purge_rules=dict(default=False, type='bool'), - state=dict(default='present', choices=['present', 'absent']) + nat_rules=dict(type="list", default=[]), + gateway_name=dict(default="gateway"), + purge_rules=dict(default=False, type="bool"), + state=dict(default="present", choices=["present", "absent"]), ) ) module = AnsibleModule(argument_spec, supports_check_mode=True) - vdc_name = module.params.get('vdc_name') - nat_rules = module.params['nat_rules'] - gateway_name = module.params['gateway_name'] - purge_rules = module.params['purge_rules'] + vdc_name = module.params.get("vdc_name") + nat_rules = module.params["nat_rules"] + gateway_name = module.params["gateway_name"] + purge_rules = module.params["purge_rules"] if not purge_rules and not nat_rules: - module.fail_json(msg='Must define purge_rules or nat_rules') + module.fail_json(msg="Must define purge_rules or nat_rules") vca = vca_login(module) gateway = vca.get_gateway(vdc_name, gateway_name) if not gateway: - module.fail_json(msg="Not able to find the gateway %s, please check " - "the gateway_name param" % gateway_name) + module.fail_json( + msg="Not able to find the gateway %s, please check " + "the gateway_name param" % gateway_name + ) try: desired_rules = validate_nat_rules(nat_rules) @@ -160,40 +184,40 @@ def main(): additions = 0 if purge_rules is True and len(rules) > 0: - result['rules_purged'] = len(rules) - deletions = result['rules_purged'] + result["rules_purged"] = len(rules) + deletions = result["rules_purged"] rules = list() if not module.check_mode: gateway.del_all_nat_rules() task = gateway.save_services_configuration() vca.block_until_completed(task) rules = gateway.get_nat_rules() - result['changed'] = True + result["changed"] = True current_rules = nat_rules_to_dict(rules) - result['current_rules'] = current_rules - result['desired_rules'] = desired_rules + result["current_rules"] = current_rules + result["desired_rules"] = desired_rules for rule in desired_rules: if rule not in current_rules: additions += 1 if not module.check_mode: gateway.add_nat_rule(**rule) - result['changed'] = True - result['rules_added'] = additions + result["changed"] = True + result["rules_added"] = additions - result['delete_rule'] = list() - result['delete_rule_rc'] = list() + result["delete_rule"] = list() + result["delete_rule_rc"] = list() for rule in current_rules: if rule not in desired_rules: deletions += 1 if not module.check_mode: - result['delete_rule'].append(rule) + result["delete_rule"].append(rule) rc = gateway.del_nat_rule(**rule) - result['delete_rule_rc'].append(rc) - result['changed'] = True - result['rules_deleted'] = deletions + result["delete_rule_rc"].append(rc) + result["changed"] = True + result["rules_deleted"] = deletions if not module.check_mode and (additions > 0 or deletions > 0): task = gateway.save_services_configuration() @@ -202,5 +226,5 @@ def main(): module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vca_vapp.py b/plugins/modules/vca_vapp.py index 687d638..1719ee3 100644 --- a/plugins/modules/vca_vapp.py +++ b/plugins/modules/vca_vapp.py @@ -3,15 +3,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vca_vapp short_description: Manages vCloud Air vApp instances. @@ -102,9 +103,9 @@ extends_documentation_fragment: - community.general.vca -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Creates a new vApp in a VCA instance vca_vapp: vapp_name: tower @@ -115,108 +116,134 @@ username: '' password: '' delegate_to: localhost -''' +""" -from ansible_collections.community.general.plugins.module_utils.vca import VcaAnsibleModule, VcaError +from ansible_collections.community.general.plugins.module_utils.vca import ( + VcaAnsibleModule, + VcaError, +) -DEFAULT_VAPP_OPERATION = 'noop' +DEFAULT_VAPP_OPERATION = "noop" VAPP_STATUS = { - 'Powered off': 'poweroff', - 'Powered on': 'poweron', - 'Suspended': 'suspend' + "Powered off": "poweroff", + "Powered on": "poweron", + "Suspended": "suspend", } -VAPP_STATES = ['present', 'absent', 'deployed', 'undeployed'] -VAPP_OPERATIONS = ['poweron', 'poweroff', 'suspend', 'shutdown', - 'reboot', 'reset', 'noop'] +VAPP_STATES = ["present", "absent", "deployed", "undeployed"] +VAPP_OPERATIONS = [ + "poweron", + "poweroff", + "suspend", + "shutdown", + "reboot", + "reset", + "noop", +] def get_instance(module): - vapp_name = module.params['vapp_name'] - inst = dict(vapp_name=vapp_name, state='absent') + vapp_name = module.params["vapp_name"] + inst = dict(vapp_name=vapp_name, state="absent") try: vapp = module.get_vapp(vapp_name) if vapp: status = module.vca.get_status(vapp.me.get_status()) - inst['status'] = VAPP_STATUS.get(status, 'unknown') - inst['state'] = 'deployed' if vapp.me.deployed else 'undeployed' + inst["status"] = VAPP_STATUS.get(status, "unknown") + inst["state"] = "deployed" if vapp.me.deployed else "undeployed" return inst except VcaError: return inst def create(module): - vdc_name = module.params['vdc_name'] - vapp_name = module.params['vapp_name'] - template_name = module.params['template_name'] - catalog_name = module.params['catalog_name'] - network_name = module.params['network_name'] - network_mode = module.params['network_mode'] - vm_name = module.params['vm_name'] - vm_cpus = module.params['vm_cpus'] - vm_memory = module.params['vm_memory'] - deploy = module.params['state'] == 'deploy' - poweron = module.params['operation'] == 'poweron' - - task = module.vca.create_vapp(vdc_name, vapp_name, template_name, - catalog_name, network_name, 'bridged', - vm_name, vm_cpus, vm_memory, deploy, poweron) + vdc_name = module.params["vdc_name"] + vapp_name = module.params["vapp_name"] + template_name = module.params["template_name"] + catalog_name = module.params["catalog_name"] + network_name = module.params["network_name"] + network_mode = module.params["network_mode"] + vm_name = module.params["vm_name"] + vm_cpus = module.params["vm_cpus"] + vm_memory = module.params["vm_memory"] + deploy = module.params["state"] == "deploy" + poweron = module.params["operation"] == "poweron" + + task = module.vca.create_vapp( + vdc_name, + vapp_name, + template_name, + catalog_name, + network_name, + "bridged", + vm_name, + vm_cpus, + vm_memory, + deploy, + poweron, + ) if task is False: - module.fail('Failed to create vapp: %s' % vapp_name) + module.fail("Failed to create vapp: %s" % vapp_name) module.vca.block_until_completed(task) # Connect the network to the Vapp/VM and return assigned IP if network_name is not None: - vm_ip = connect_to_network(module, vdc_name, vapp_name, network_name, network_mode) + vm_ip = connect_to_network( + module, vdc_name, vapp_name, network_name, network_mode + ) return vm_ip def delete(module): - vdc_name = module.params['vdc_name'] - vapp_name = module.params['vapp_name'] + vdc_name = module.params["vdc_name"] + vapp_name = module.params["vapp_name"] module.vca.delete_vapp(vdc_name, vapp_name) def do_operation(module): - vapp_name = module.params['vapp_name'] - operation = module.params['operation'] + vapp_name = module.params["vapp_name"] + operation = module.params["operation"] - vm_name = module.params.get('vm_name') + vm_name = module.params.get("vm_name") vm = None if vm_name: vm = module.get_vm(vapp_name, vm_name) - if operation == 'poweron': - operation = 'powerOn' - elif operation == 'poweroff': - operation = 'powerOff' + if operation == "poweron": + operation = "powerOn" + elif operation == "poweroff": + operation = "powerOff" - cmd = 'power:%s' % operation - module.get_vapp(vapp_name).execute(cmd, 'post', targetVM=vm) + cmd = "power:%s" % operation + module.get_vapp(vapp_name).execute(cmd, "post", targetVM=vm) def set_state(module): - state = module.params['state'] - vapp = module.get_vapp(module.params['vapp_name']) - if state == 'deployed': - action = module.params['operation'] == 'poweron' + state = module.params["state"] + vapp = module.get_vapp(module.params["vapp_name"]) + if state == "deployed": + action = module.params["operation"] == "poweron" if not vapp.deploy(action): - module.fail('unable to deploy vapp') - elif state == 'undeployed': - action = module.params['operation'] - if action == 'poweroff': - action = 'powerOff' - elif action != 'suspend': + module.fail("unable to deploy vapp") + elif state == "undeployed": + action = module.params["operation"] + if action == "poweroff": + action = "powerOff" + elif action != "suspend": action = None if not vapp.undeploy(action): - module.fail('unable to undeploy vapp') + module.fail("unable to undeploy vapp") -def connect_to_network(module, vdc_name, vapp_name, network_name, network_mode): - nets = filter(lambda n: n.name == network_name, module.vca.get_networks(vdc_name)) +def connect_to_network( + module, vdc_name, vapp_name, network_name, network_mode +): + nets = filter( + lambda n: n.name == network_name, module.vca.get_networks(vdc_name) + ) if len(nets) != 1: module.fail_json("Unable to find network %s " % network_name) @@ -235,12 +262,14 @@ def connect_to_network(module, vdc_name, vapp_name, network_name, network_mode): # Connect VM ip_allocation_mode = None - if network_mode == 'pool': - ip_allocation_mode = 'POOL' - elif network_mode == 'dhcp': - ip_allocation_mode = 'DHCP' + if network_mode == "pool": + ip_allocation_mode = "POOL" + elif network_mode == "dhcp": + ip_allocation_mode = "DHCP" - task = the_vapp.connect_vms(nets[0].name, connection_index=0, ip_allocation_mode=ip_allocation_mode) + task = the_vapp.connect_vms( + nets[0].name, connection_index=0, ip_allocation_mode=ip_allocation_mode + ) if result is None: module.fail_json(msg="Failed to complete task") @@ -257,9 +286,9 @@ def connect_to_network(module, vdc_name, vapp_name, network_name, network_mode): def get_vm_details(module): - vdc_name = module.params['vdc_name'] - vapp_name = module.params['vapp_name'] - vm_name = module.params['vm_name'] + vdc_name = module.params["vdc_name"] + vapp_name = module.params["vapp_name"] + vm_name = module.params["vm_name"] the_vdc = module.vca.get_vdc(vdc_name) the_vapp = module.vca.get_vapp(the_vdc, vapp_name) if the_vapp and the_vapp.name != vapp_name: @@ -269,29 +298,29 @@ def get_vm_details(module): for vm in the_vapp.me.Children.Vm: sections = vm.get_Section() - customization_section = ( - filter(lambda section: - section.__class__.__name__ == - "GuestCustomizationSectionType", - sections)[0]) + customization_section = filter( + lambda section: section.__class__.__name__ + == "GuestCustomizationSectionType", + sections, + )[0] if customization_section.get_AdminPasswordEnabled(): - the_vm_details["vm_admin_password"] = customization_section.get_AdminPassword() - - virtual_hardware_section = ( - filter(lambda section: - section.__class__.__name__ == - "VirtualHardwareSection_Type", - sections)[0]) + the_vm_details[ + "vm_admin_password" + ] = customization_section.get_AdminPassword() + + virtual_hardware_section = filter( + lambda section: section.__class__.__name__ + == "VirtualHardwareSection_Type", + sections, + )[0] items = virtual_hardware_section.get_Item() ips = [] - _url = '{http://www.vmware.com/vcloud/v1.5}ipAddress' + _url = "{http://www.vmware.com/vcloud/v1.5}ipAddress" for item in items: if item.Connection: for c in item.Connection: - if c.anyAttributes_.get( - _url): - ips.append(c.anyAttributes_.get( - _url)) + if c.anyAttributes_.get(_url): + ips.append(c.anyAttributes_.get(_url)) if len(ips) > 0: the_vm_details["vm_ip"] = ips[0] @@ -303,50 +332,53 @@ def main(): vapp_name=dict(required=True), vdc_name=dict(required=True), template_name=dict(), - catalog_name=dict(default='Public Catalog'), + catalog_name=dict(default="Public Catalog"), network_name=dict(), - network_mode=dict(default='pool', choices=['dhcp', 'static', 'pool']), + network_mode=dict(default="pool", choices=["dhcp", "static", "pool"]), vm_name=dict(), vm_cpus=dict(), vm_memory=dict(), - operation=dict(default=DEFAULT_VAPP_OPERATION, choices=VAPP_OPERATIONS), - state=dict(default='present', choices=VAPP_STATES) + operation=dict( + default=DEFAULT_VAPP_OPERATION, choices=VAPP_OPERATIONS + ), + state=dict(default="present", choices=VAPP_STATES), ) - module = VcaAnsibleModule(argument_spec=argument_spec, - supports_check_mode=True) + module = VcaAnsibleModule( + argument_spec=argument_spec, supports_check_mode=True + ) - state = module.params['state'] - operation = module.params['operation'] + state = module.params["state"] + operation = module.params["operation"] instance = get_instance(module) result = dict(changed=False) - if instance and state == 'absent': + if instance and state == "absent": if not module.check_mode: delete(module) - result['changed'] = True + result["changed"] = True - elif state != 'absent': - if instance['state'] == 'absent': + elif state != "absent": + if instance["state"] == "absent": if not module.check_mode: - result['ansible_facts'] = create(module) - result['changed'] = True + result["ansible_facts"] = create(module) + result["changed"] = True - elif instance['state'] != state and state != 'present': + elif instance["state"] != state and state != "present": if not module.check_mode: set_state(module) - result['changed'] = True + result["changed"] = True - if operation != instance.get('status') and operation != 'noop': + if operation != instance.get("status") and operation != "noop": if not module.check_mode: do_operation(module) - result['changed'] = True - result['ansible_facts'] = get_vm_details(module) + result["changed"] = True + result["ansible_facts"] = get_vm_details(module) return module.exit(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vcenter_extension.py b/plugins/modules/vcenter_extension.py index f974678..cfd9e9a 100644 --- a/plugins/modules/vcenter_extension.py +++ b/plugins/modules/vcenter_extension.py @@ -5,13 +5,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} +ANSIBLE_METADATA = { + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", +} -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vcenter_extension short_description: Register/deregister vCenter Extensions @@ -83,9 +86,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Register vCenter Extension vcenter_extension: hostname: "{{ groups['vcsa'][0] }}" @@ -113,7 +116,7 @@ state: absent delegate_to: localhost register: deregister_extension -''' +""" RETURN = """ result: @@ -131,57 +134,81 @@ import datetime from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import connect_to_api, vmware_argument_spec +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + connect_to_api, + vmware_argument_spec, +) def main(): argument_spec = vmware_argument_spec() - argument_spec.update(dict( - extension_key=dict(type='str', required=True), - version=dict(type='str', required=True), - email=dict(type='str', required=False), - description=dict(type='str', required=False), - company=dict(type='str', required=False), - name=dict(type='str', required=False), - url=dict(type='str', required=False), - ssl_thumbprint=dict(type='str', required=False), - client_type=dict(type='str', default='vsphere-client-serenity', required=False), - server_type=dict(type='str', default='vsphere-client-serenity', required=False), - visible=dict(type='bool', default='True', required=False), - state=dict(type='str', default='present', choices=['absent', 'present']), - )) + argument_spec.update( + dict( + extension_key=dict(type="str", required=True), + version=dict(type="str", required=True), + email=dict(type="str", required=False), + description=dict(type="str", required=False), + company=dict(type="str", required=False), + name=dict(type="str", required=False), + url=dict(type="str", required=False), + ssl_thumbprint=dict(type="str", required=False), + client_type=dict( + type="str", default="vsphere-client-serenity", required=False + ), + server_type=dict( + type="str", default="vsphere-client-serenity", required=False + ), + visible=dict(type="bool", default="True", required=False), + state=dict( + type="str", default="present", choices=["absent", "present"] + ), + ) + ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=False, required_if=[ - ['state', 'present', ['email', 'description', 'company', 'name', 'url', 'ssl_thumbprint', 'server_type', 'client_type']] - ] + [ + "state", + "present", + [ + "email", + "description", + "company", + "name", + "url", + "ssl_thumbprint", + "server_type", + "client_type", + ], + ] + ], ) - state = module.params['state'] - extension_key = module.params['extension_key'] - version = module.params['version'] - email = module.params['email'] - desc = module.params['description'] - name = module.params['name'] - company = module.params['company'] - client_type = module.params['client_type'] - server_type = module.params['server_type'] - url = module.params['url'] - visible = module.params['visible'] - thumbprint = module.params['ssl_thumbprint'] + state = module.params["state"] + extension_key = module.params["extension_key"] + version = module.params["version"] + email = module.params["email"] + desc = module.params["description"] + name = module.params["name"] + company = module.params["company"] + client_type = module.params["client_type"] + server_type = module.params["server_type"] + url = module.params["url"] + visible = module.params["visible"] + thumbprint = module.params["ssl_thumbprint"] content = connect_to_api(module, False) em = content.extensionManager key_check = em.FindExtension(extension_key) results = dict(changed=False, installed=dict()) - if state == 'present' and key_check: - results['changed'] = False - results['installed'] = "'%s' is already installed" % (extension_key) + if state == "present" and key_check: + results["changed"] = False + results["installed"] = "'%s' is already installed" % (extension_key) - elif state == 'present' and not key_check: + elif state == "present" and not key_check: extension = vim.Extension() extension.key = extension_key extension.company = company @@ -211,20 +238,20 @@ def main(): extension.server = [server] em.RegisterExtension(extension) - results['changed'] = True - results['installed'] = "'%s' installed." % (extension_key) + results["changed"] = True + results["installed"] = "'%s' installed." % (extension_key) - elif state == 'absent' and key_check: + elif state == "absent" and key_check: em.UnregisterExtension(extension_key) - results['changed'] = True - results['installed'] = "'%s' uninstalled." % (extension_key) + results["changed"] = True + results["installed"] = "'%s' uninstalled." % (extension_key) - elif state == 'absent' and not key_check: - results['changed'] = False - results['installed'] = "'%s' is not installed." % (extension_key) + elif state == "absent" and not key_check: + results["changed"] = False + results["installed"] = "'%s' is not installed." % (extension_key) module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vcenter_extension_info.py b/plugins/modules/vcenter_extension_info.py index fe4aef4..b945ca0 100644 --- a/plugins/modules/vcenter_extension_info.py +++ b/plugins/modules/vcenter_extension_info.py @@ -5,15 +5,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vcenter_extension_info short_description: Gather info vCenter extensions @@ -29,9 +30,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather info about vCenter Extensions vcenter_extension_info: hostname: '{{ vcenter_hostname }}' @@ -39,9 +40,9 @@ password: '{{ vcenter_password }}' register: ext_info delegate_to: localhost -''' +""" -RETURN = r''' +RETURN = r""" extension_info: description: List of extensions returned: success @@ -68,10 +69,13 @@ "extension_version": "5.5" } ] -''' +""" from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + vmware_argument_spec, + PyVmomi, +) class VmwareExtManager(PyVmomi): @@ -91,11 +95,13 @@ def gather_plugin_info(self): extension_key=ext.key, extension_company=ext.company, extension_version=ext.version, - extension_type=ext.type if ext.type else '', - extension_subject_name=ext.subjectName if ext.subjectName else '', + extension_type=ext.type if ext.type else "", + extension_subject_name=ext.subjectName + if ext.subjectName + else "", extension_last_heartbeat_time=ext.lastHeartbeatTime, ) - result['extension_info'].append(ext_info) + result["extension_info"].append(ext_info) self.module.exit_json(**result) @@ -104,8 +110,7 @@ def main(): argument_spec = vmware_argument_spec() module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, + argument_spec=argument_spec, supports_check_mode=True ) vcenter_extension_info_mgr = VmwareExtManager(module) diff --git a/plugins/modules/vcenter_folder.py b/plugins/modules/vcenter_folder.py index cd33176..9cfc72b 100644 --- a/plugins/modules/vcenter_folder.py +++ b/plugins/modules/vcenter_folder.py @@ -5,15 +5,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vcenter_folder short_description: Manage folders on given datacenter @@ -76,9 +77,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create a VM folder on given datacenter vcenter_folder: hostname: '{{ vcenter_hostname }}' @@ -126,9 +127,9 @@ state: absent register: vm_folder_deletion_result delegate_to: localhost -''' +""" -RETURN = r''' +RETURN = r""" result: description: The detail about the new folder returned: On success @@ -140,7 +141,7 @@ msg: description: string stating about result type: str -''' +""" try: from pyVmomi import vim @@ -148,169 +149,250 @@ pass from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi, find_datacenter_by_name, wait_for_task, get_all_objs +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + vmware_argument_spec, + PyVmomi, + find_datacenter_by_name, + wait_for_task, + get_all_objs, +) from ansible.module_utils._text import to_native class VmwareFolderManager(PyVmomi): def __init__(self, module): super(VmwareFolderManager, self).__init__(module) - datacenter_name = self.params.get('datacenter', None) - self.datacenter_obj = find_datacenter_by_name(self.content, datacenter_name=datacenter_name) + datacenter_name = self.params.get("datacenter", None) + self.datacenter_obj = find_datacenter_by_name( + self.content, datacenter_name=datacenter_name + ) if self.datacenter_obj is None: - self.module.fail_json(msg="Failed to find datacenter %s" % datacenter_name) + self.module.fail_json( + msg="Failed to find datacenter %s" % datacenter_name + ) self.datacenter_folder_type = { - 'vm': self.datacenter_obj.vmFolder, - 'host': self.datacenter_obj.hostFolder, - 'datastore': self.datacenter_obj.datastoreFolder, - 'network': self.datacenter_obj.networkFolder, + "vm": self.datacenter_obj.vmFolder, + "host": self.datacenter_obj.hostFolder, + "datastore": self.datacenter_obj.datastoreFolder, + "network": self.datacenter_obj.networkFolder, } def ensure(self): """ Manage internal state management """ - state = self.module.params.get('state') - datacenter_name = self.module.params.get('datacenter') - folder_type = self.module.params.get('folder_type') - folder_name = self.module.params.get('folder_name') - parent_folder = self.module.params.get('parent_folder', None) - results = {'changed': False, 'result': {}} - if state == 'present': + state = self.module.params.get("state") + datacenter_name = self.module.params.get("datacenter") + folder_type = self.module.params.get("folder_type") + folder_name = self.module.params.get("folder_name") + parent_folder = self.module.params.get("parent_folder", None) + results = {"changed": False, "result": {}} + if state == "present": # Check if the folder already exists p_folder_obj = None if parent_folder: if "/" in parent_folder: - parent_folder_parts = parent_folder.strip('/').split('/') + parent_folder_parts = parent_folder.strip("/").split("/") p_folder_obj = None for part in parent_folder_parts: - part_folder_obj = self.get_folder(datacenter_name=datacenter_name, - folder_name=part, - folder_type=folder_type, - parent_folder=p_folder_obj) + part_folder_obj = self.get_folder( + datacenter_name=datacenter_name, + folder_name=part, + folder_type=folder_type, + parent_folder=p_folder_obj, + ) if not part_folder_obj: - self.module.fail_json(msg="Could not find folder %s" % part) + self.module.fail_json( + msg="Could not find folder %s" % part + ) p_folder_obj = part_folder_obj else: - p_folder_obj = self.get_folder(datacenter_name=datacenter_name, - folder_name=parent_folder, - folder_type=folder_type) + p_folder_obj = self.get_folder( + datacenter_name=datacenter_name, + folder_name=parent_folder, + folder_type=folder_type, + ) if not p_folder_obj: - self.module.fail_json(msg="Parent folder %s does not exist" % parent_folder) + self.module.fail_json( + msg="Parent folder %s does not exist" + % parent_folder + ) # Check if folder exists under parent folder - child_folder_obj = self.get_folder(datacenter_name=datacenter_name, - folder_name=folder_name, - folder_type=folder_type, - parent_folder=p_folder_obj) + child_folder_obj = self.get_folder( + datacenter_name=datacenter_name, + folder_name=folder_name, + folder_type=folder_type, + parent_folder=p_folder_obj, + ) if child_folder_obj: - results['result']['path'] = self.get_folder_path(child_folder_obj) - results['result'] = "Folder %s already exists under" \ - " parent folder %s" % (folder_name, parent_folder) + results["result"]["path"] = self.get_folder_path( + child_folder_obj + ) + results["result"] = ( + "Folder %s already exists under" + " parent folder %s" % (folder_name, parent_folder) + ) self.module.exit_json(**results) else: - folder_obj = self.get_folder(datacenter_name=datacenter_name, - folder_name=folder_name, - folder_type=folder_type) + folder_obj = self.get_folder( + datacenter_name=datacenter_name, + folder_name=folder_name, + folder_type=folder_type, + ) if folder_obj: - results['result']['path'] = self.get_folder_path(folder_obj) - results['result']['msg'] = "Folder %s already exists" % folder_name + results["result"]["path"] = self.get_folder_path( + folder_obj + ) + results["result"]["msg"] = ( + "Folder %s already exists" % folder_name + ) self.module.exit_json(**results) # Create a new folder try: if parent_folder and p_folder_obj: if self.module.check_mode: - results['msg'] = "Folder '%s' of type '%s' under '%s' will be created." % \ - (folder_name, folder_type, parent_folder) + results["msg"] = ( + "Folder '%s' of type '%s' under '%s' will be created." + % (folder_name, folder_type, parent_folder) + ) else: new_folder = p_folder_obj.CreateFolder(folder_name) - results['result']['path'] = self.get_folder_path(new_folder) - results['result']['msg'] = "Folder '%s' of type '%s' under '%s' created" \ - " successfully." % (folder_name, folder_type, parent_folder) - results['changed'] = True + results["result"]["path"] = self.get_folder_path( + new_folder + ) + results["result"]["msg"] = ( + "Folder '%s' of type '%s' under '%s' created" + " successfully." + % (folder_name, folder_type, parent_folder) + ) + results["changed"] = True elif not parent_folder and not p_folder_obj: if self.module.check_mode: - results['msg'] = "Folder '%s' of type '%s' will be created." % (folder_name, folder_type) + results["msg"] = ( + "Folder '%s' of type '%s' will be created." + % (folder_name, folder_type) + ) else: - new_folder = self.datacenter_folder_type[folder_type].CreateFolder(folder_name) - results['result']['msg'] = "Folder '%s' of type '%s' created successfully." % (folder_name, folder_type) - results['result']['path'] = self.get_folder_path(new_folder) - results['changed'] = True + new_folder = self.datacenter_folder_type[ + folder_type + ].CreateFolder(folder_name) + results["result"]["msg"] = ( + "Folder '%s' of type '%s' created successfully." + % (folder_name, folder_type) + ) + results["result"]["path"] = self.get_folder_path( + new_folder + ) + results["changed"] = True except vim.fault.DuplicateName as duplicate_name: # To be consistent with the other vmware modules, We decided to accept this error # and the playbook should simply carry on with other tasks. # User will have to take care of this exception # https://github.com/ansible/ansible/issues/35388#issuecomment-362283078 - results['changed'] = False - results['msg'] = "Failed to create folder as another object has same name" \ - " in the same target folder : %s" % to_native(duplicate_name.msg) + results["changed"] = False + results["msg"] = ( + "Failed to create folder as another object has same name" + " in the same target folder : %s" + % to_native(duplicate_name.msg) + ) except vim.fault.InvalidName as invalid_name: - self.module.fail_json(msg="Failed to create folder as folder name is not a valid " - "entity name : %s" % to_native(invalid_name.msg)) + self.module.fail_json( + msg="Failed to create folder as folder name is not a valid " + "entity name : %s" % to_native(invalid_name.msg) + ) except Exception as general_exc: - self.module.fail_json(msg="Failed to create folder due to generic" - " exception : %s " % to_native(general_exc)) + self.module.fail_json( + msg="Failed to create folder due to generic" + " exception : %s " % to_native(general_exc) + ) self.module.exit_json(**results) - elif state == 'absent': + elif state == "absent": # Check if the folder already exists p_folder_obj = None if parent_folder: - p_folder_obj = self.get_folder(datacenter_name=datacenter_name, - folder_name=parent_folder, - folder_type=folder_type) + p_folder_obj = self.get_folder( + datacenter_name=datacenter_name, + folder_name=parent_folder, + folder_type=folder_type, + ) if not p_folder_obj: - self.module.fail_json(msg="Parent folder %s does not exist" % parent_folder) + self.module.fail_json( + msg="Parent folder %s does not exist" % parent_folder + ) # Check if folder exists under parent folder - folder_obj = self.get_folder(datacenter_name=datacenter_name, - folder_name=folder_name, - folder_type=folder_type, - parent_folder=p_folder_obj) + folder_obj = self.get_folder( + datacenter_name=datacenter_name, + folder_name=folder_name, + folder_type=folder_type, + parent_folder=p_folder_obj, + ) else: - folder_obj = self.get_folder(datacenter_name=datacenter_name, - folder_name=folder_name, - folder_type=folder_type) + folder_obj = self.get_folder( + datacenter_name=datacenter_name, + folder_name=folder_name, + folder_type=folder_type, + ) if folder_obj: try: if parent_folder: if self.module.check_mode: - results['changed'] = True - results['msg'] = "Folder '%s' of type '%s' under '%s' will be removed." % \ - (folder_name, folder_type, parent_folder) + results["changed"] = True + results["msg"] = ( + "Folder '%s' of type '%s' under '%s' will be removed." + % (folder_name, folder_type, parent_folder) + ) else: - if folder_type == 'vm': + if folder_type == "vm": task = folder_obj.UnregisterAndDestroy() else: task = folder_obj.Destroy() - results['changed'], results['msg'] = wait_for_task(task=task) + results["changed"], results["msg"] = wait_for_task( + task=task + ) else: if self.module.check_mode: - results['changed'] = True - results['msg'] = "Folder '%s' of type '%s' will be removed." % (folder_name, folder_type) + results["changed"] = True + results["msg"] = ( + "Folder '%s' of type '%s' will be removed." + % (folder_name, folder_type) + ) else: - if folder_type == 'vm': + if folder_type == "vm": task = folder_obj.UnregisterAndDestroy() else: task = folder_obj.Destroy() - results['changed'], results['msg'] = wait_for_task(task=task) + results["changed"], results["msg"] = wait_for_task( + task=task + ) except vim.fault.ConcurrentAccess as concurrent_access: - self.module.fail_json(msg="Failed to remove folder as another client" - " modified folder before this operation : %s" % to_native(concurrent_access.msg)) + self.module.fail_json( + msg="Failed to remove folder as another client" + " modified folder before this operation : %s" + % to_native(concurrent_access.msg) + ) except vim.fault.InvalidState as invalid_state: - self.module.fail_json(msg="Failed to remove folder as folder is in" - " invalid state : %s" % to_native(invalid_state.msg)) + self.module.fail_json( + msg="Failed to remove folder as folder is in" + " invalid state : %s" % to_native(invalid_state.msg) + ) except Exception as gen_exec: - self.module.fail_json(msg="Failed to remove folder due to generic" - " exception %s " % to_native(gen_exec)) + self.module.fail_json( + msg="Failed to remove folder due to generic" + " exception %s " % to_native(gen_exec) + ) self.module.exit_json(**results) - def get_folder(self, datacenter_name, folder_name, folder_type, parent_folder=None): + def get_folder( + self, datacenter_name, folder_name, folder_type, parent_folder=None + ): """ Get managed object of folder by name Returns: Managed object of folder by name @@ -319,13 +401,19 @@ def get_folder(self, datacenter_name, folder_name, folder_type, parent_folder=No folder_objs = get_all_objs(self.content, [vim.Folder], parent_folder) for folder in folder_objs: if parent_folder: - if folder.name == folder_name and \ - self.datacenter_folder_type[folder_type].childType == folder.childType: + if ( + folder.name == folder_name + and self.datacenter_folder_type[folder_type].childType + == folder.childType + ): return folder else: - if folder.name == folder_name and \ - self.datacenter_folder_type[folder_type].childType == folder.childType and \ - folder.parent.parent.name == datacenter_name: # e.g. folder.parent.parent.name == /DC01/host/folder + if ( + folder.name == folder_name + and self.datacenter_folder_type[folder_type].childType + == folder.childType + and folder.parent.parent.name == datacenter_name + ): # e.g. folder.parent.parent.name == /DC01/host/folder return folder return None @@ -334,30 +422,37 @@ def get_folder(self, datacenter_name, folder_name, folder_type, parent_folder=No def main(): argument_spec = vmware_argument_spec() argument_spec.update( - datacenter=dict(type='str', required=True, aliases=['datacenter_name']), - folder_name=dict(type='str', required=True), - parent_folder=dict(type='str', required=False), - state=dict(type='str', - choices=['present', 'absent'], - default='present'), - folder_type=dict(type='str', - default='vm', - choices=['datastore', 'host', 'network', 'vm'], - required=False), + datacenter=dict( + type="str", required=True, aliases=["datacenter_name"] + ), + folder_name=dict(type="str", required=True), + parent_folder=dict(type="str", required=False), + state=dict( + type="str", choices=["present", "absent"], default="present" + ), + folder_type=dict( + type="str", + default="vm", + choices=["datastore", "host", "network", "vm"], + required=False, + ), ) module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, + argument_spec=argument_spec, supports_check_mode=True ) - if len(module.params.get('folder_name')) > 79: - module.fail_json(msg="Failed to manage folder as folder_name can only contain 80 characters.") + if len(module.params.get("folder_name")) > 79: + module.fail_json( + msg="Failed to manage folder as folder_name can only contain 80 characters." + ) vcenter_folder_mgr = VmwareFolderManager(module) if not vcenter_folder_mgr.is_vcenter(): - module.fail_json(msg="Module vcenter_folder is meant for vCenter, hostname %s " - "is not vCenter server." % module.params.get('hostname')) + module.fail_json( + msg="Module vcenter_folder is meant for vCenter, hostname %s " + "is not vCenter server." % module.params.get("hostname") + ) vcenter_folder_mgr.ensure() diff --git a/plugins/modules/vcenter_license.py b/plugins/modules/vcenter_license.py index d804e4b..42484ec 100644 --- a/plugins/modules/vcenter_license.py +++ b/plugins/modules/vcenter_license.py @@ -4,13 +4,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} +ANSIBLE_METADATA = { + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", +} -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: vcenter_license short_description: Manage VMware vCenter license keys description: @@ -63,9 +66,9 @@ extends_documentation_fragment: - vmware.general.vmware.vcenter_documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Add a new vCenter license vcenter_license: hostname: '{{ vcenter_hostname }}' @@ -104,9 +107,9 @@ license: f600d-21ae3-5592b-249e0-dd502 state: present delegate_to: localhost -''' +""" -RETURN = r''' +RETURN = r""" licenses: description: list of license keys after module executed returned: always @@ -114,7 +117,7 @@ sample: - f600d-21ae3-5592b-249e0-cc341 - 143cc-0e942-b2955-3ea12-d006f -''' +""" try: from pyVmomi import vim @@ -123,7 +126,11 @@ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_native -from ansible_collections.vmware.general.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec, find_hostsystem_by_name +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + vmware_argument_spec, + find_hostsystem_by_name, +) class VcenterLicenseMgr(PyVmomi): @@ -148,52 +155,54 @@ def list_keys(self, licenses): def main(): argument_spec = vmware_argument_spec() - argument_spec.update(dict( - labels=dict(type='dict', default=dict(source='ansible')), - license=dict(type='str', required=True), - state=dict(type='str', default='present', choices=['absent', 'present']), - esxi_hostname=dict(type='str'), - datacenter=dict(type='str'), - cluster_name=dict(type='str'), - )) + argument_spec.update( + dict( + labels=dict(type="dict", default=dict(source="ansible")), + license=dict(type="str", required=True), + state=dict( + type="str", default="present", choices=["absent", "present"] + ), + esxi_hostname=dict(type="str"), + datacenter=dict(type="str"), + cluster_name=dict(type="str"), + ) + ) module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, + argument_spec=argument_spec, supports_check_mode=True ) - license = module.params['license'] - state = module.params['state'] + license = module.params["license"] + state = module.params["state"] # FIXME: This does not seem to work on vCenter v6.0 labels = [] - for k in module.params['labels']: + for k in module.params["labels"]: kv = vim.KeyValue() kv.key = k - kv.value = module.params['labels'][k] + kv.value = module.params["labels"][k] labels.append(kv) - result = dict( - changed=False, - diff=dict(), - ) + result = dict(changed=False, diff=dict()) pyv = VcenterLicenseMgr(module) if not pyv.is_vcenter(): - module.fail_json(msg="vcenter_license is meant for vCenter, hostname %s " - "is not vCenter server." % module.params.get('hostname')) + module.fail_json( + msg="vcenter_license is meant for vCenter, hostname %s " + "is not vCenter server." % module.params.get("hostname") + ) lm = pyv.content.licenseManager - result['licenses'] = pyv.list_keys(lm.licenses) + result["licenses"] = pyv.list_keys(lm.licenses) if module._diff: - result['diff']['before'] = '\n'.join(result['licenses']) + '\n' + result["diff"]["before"] = "\n".join(result["licenses"]) + "\n" - if state == 'present': - if license not in result['licenses']: - result['changed'] = True + if state == "present": + if license not in result["licenses"]: + result["changed"] = True if module.check_mode: - result['licenses'].append(license) + result["licenses"].append(license) else: lm.AddLicense(license, labels) @@ -201,16 +210,21 @@ def main(): if key is not None: lam = lm.licenseAssignmentManager assigned_license = None - datacenter = module.params['datacenter'] + datacenter = module.params["datacenter"] datacenter_obj = None if datacenter: datacenter_obj = pyv.find_datacenter_by_name(datacenter) if not datacenter_obj: - module.fail_json(msg="Unable to find the datacenter %(datacenter)s" % module.params) + module.fail_json( + msg="Unable to find the datacenter %(datacenter)s" + % module.params + ) - cluster = module.params['cluster_name'] + cluster = module.params["cluster_name"] if cluster: - cluster_obj = pyv.find_cluster_by_name(cluster_name=cluster, datacenter_name=datacenter_obj) + cluster_obj = pyv.find_cluster_by_name( + cluster_name=cluster, datacenter_name=datacenter_obj + ) if not cluster_obj: msg = "Unable to find the cluster %(cluster_name)s" if datacenter: @@ -218,56 +232,84 @@ def main(): module.fail_json(msg=msg % module.params) entityId = cluster_obj._moId # assign to current vCenter, if esxi_hostname is not specified - elif module.params['esxi_hostname'] is None: + elif module.params["esxi_hostname"] is None: entityId = pyv.content.about.instanceUuid # if key name not contain "VMware vCenter Server" if pyv.content.about.name not in key.name: - module.warn('License key "%s" (%s) is not suitable for "%s"' % (license, key.name, pyv.content.about.name)) + module.warn( + 'License key "%s" (%s) is not suitable for "%s"' + % (license, key.name, pyv.content.about.name) + ) # assign to ESXi server else: - esxi_host = find_hostsystem_by_name(pyv.content, module.params['esxi_hostname']) + esxi_host = find_hostsystem_by_name( + pyv.content, module.params["esxi_hostname"] + ) if esxi_host is None: - module.fail_json(msg='Cannot find the specified ESXi host "%s".' % module.params['esxi_hostname']) + module.fail_json( + msg='Cannot find the specified ESXi host "%s".' + % module.params["esxi_hostname"] + ) entityId = esxi_host._moId # e.g., key.editionKey is "esx.enterprisePlus.cpuPackage", not sure all keys are in this format - if 'esx' not in key.editionKey: - module.warn('License key "%s" edition "%s" is not suitable for ESXi server' % (license, key.editionKey)) + if "esx" not in key.editionKey: + module.warn( + 'License key "%s" edition "%s" is not suitable for ESXi server' + % (license, key.editionKey) + ) try: assigned_license = lam.QueryAssignedLicenses(entityId=entityId) except Exception as e: - module.fail_json(msg='Could not query vCenter "%s" assigned license info due to %s.' % (entityId, to_native(e))) - - if not assigned_license or (len(assigned_license) != 0 and assigned_license[0].assignedLicense.licenseKey != license): + module.fail_json( + msg='Could not query vCenter "%s" assigned license info due to %s.' + % (entityId, to_native(e)) + ) + + if not assigned_license or ( + len(assigned_license) != 0 + and assigned_license[0].assignedLicense.licenseKey != license + ): try: - lam.UpdateAssignedLicense(entity=entityId, licenseKey=license) + lam.UpdateAssignedLicense( + entity=entityId, licenseKey=license + ) except Exception: - module.fail_json(msg='Could not assign "%s" (%s) to vCenter.' % (license, key.name)) - result['changed'] = True - result['licenses'] = pyv.list_keys(lm.licenses) + module.fail_json( + msg='Could not assign "%s" (%s) to vCenter.' + % (license, key.name) + ) + result["changed"] = True + result["licenses"] = pyv.list_keys(lm.licenses) else: - module.fail_json(msg='License "%s" is not existing or can not be added' % license) + module.fail_json( + msg='License "%s" is not existing or can not be added' + % license + ) if module._diff: - result['diff']['after'] = '\n'.join(result['licenses']) + '\n' + result["diff"]["after"] = "\n".join(result["licenses"]) + "\n" - elif state == 'absent' and license in result['licenses']: + elif state == "absent" and license in result["licenses"]: # Check if key is in use key = pyv.find_key(lm.licenses, license) if key.used > 0: - module.fail_json(msg='Cannot remove key "%s", still in use %s time(s).' % (license, key.used)) + module.fail_json( + msg='Cannot remove key "%s", still in use %s time(s).' + % (license, key.used) + ) - result['changed'] = True + result["changed"] = True if module.check_mode: - result['licenses'].remove(license) + result["licenses"].remove(license) else: lm.RemoveLicense(license) - result['licenses'] = pyv.list_keys(lm.licenses) + result["licenses"] = pyv.list_keys(lm.licenses) if module._diff: - result['diff']['after'] = '\n'.join(result['licenses']) + '\n' + result["diff"]["after"] = "\n".join(result["licenses"]) + "\n" module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_about_info.py b/plugins/modules/vmware_about_info.py index be7d2a5..3deabef 100644 --- a/plugins/modules/vmware_about_info.py +++ b/plugins/modules/vmware_about_info.py @@ -6,15 +6,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_about_info short_description: Provides information about VMware server to which user is connecting to @@ -30,9 +31,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Provide information about vCenter vmware_about_info: hostname: '{{ vcenter_hostname }}' @@ -48,9 +49,9 @@ password: '{{ esxi_password }}' delegate_to: localhost register: esxi_about_info -''' +""" -RETURN = r''' +RETURN = r""" about_info: description: - dict about VMware server @@ -73,10 +74,13 @@ "vendor": "VMware, Inc.", "version": "6.5.0" } -''' +""" from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + vmware_argument_spec, + PyVmomi, +) class VmwareAboutManager(PyVmomi): @@ -107,7 +111,7 @@ def gather_about_info(self): instance_uuid=about.instanceUuid, license_product_name=about.licenseProductName, license_product_version=about.licenseProductVersion, - ) + ), ) @@ -115,8 +119,7 @@ def main(): argument_spec = vmware_argument_spec() module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, + argument_spec=argument_spec, supports_check_mode=True ) vmware_about_info_mgr = VmwareAboutManager(module) diff --git a/plugins/modules/vmware_appliance_access_info.py b/plugins/modules/vmware_appliance_access_info.py index 918a27d..14bf58e 100644 --- a/plugins/modules/vmware_appliance_access_info.py +++ b/plugins/modules/vmware_appliance_access_info.py @@ -4,15 +4,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_appliance_access_info short_description: Gathers info about modes of access to the vCenter appliance using REST API. @@ -37,9 +38,9 @@ extends_documentation_fragment: - vmware.general.VmwareRestModule.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - hosts: all connection: httpapi gather_facts: false @@ -58,9 +59,9 @@ - name: Get ssh access mode information vmware_appliance_access_info: access_mode: ssh -''' +""" -RETURN = r''' +RETURN = r""" access_mode: description: facts about the specified access mode returned: always @@ -68,26 +69,31 @@ sample: { "value": true } -''' +""" -from ansible_collections.vmware.general.plugins.module_utils.vmware_httpapi.VmwareRestModule import API, VmwareRestModule +from ansible_collections.vmware.general.plugins.module_utils.vmware_httpapi.VmwareRestModule import ( + API, + VmwareRestModule, +) SLUG = dict( - consolecli='/access/consolecli', - dcui='/access/dcui', - shell='/access/shell', - ssh='/access/ssh', + consolecli="/access/consolecli", + dcui="/access/dcui", + shell="/access/shell", + ssh="/access/ssh", ) def get_mode(module, mode): try: - url = API['appliance']['base'] + SLUG[mode] + url = API["appliance"]["base"] + SLUG[mode] except KeyError: - module.fail(msg='[%s] is not a valid access mode. ' - 'Please specify correct mode, valid choices are ' - '[%s].' % (mode, ", ".join(list(SLUG.keys())))) + module.fail( + msg="[%s] is not a valid access mode. " + "Please specify correct mode, valid choices are " + "[%s]." % (mode, ", ".join(list(SLUG.keys()))) + ) module.get(url=url, key=mode) @@ -95,14 +101,20 @@ def get_mode(module, mode): def main(): argument_spec = VmwareRestModule.create_argument_spec() argument_spec.update( - access_mode=dict(type='str', choices=['consolecli', 'dcui', 'shell', 'ssh'], default=None), + access_mode=dict( + type="str", + choices=["consolecli", "dcui", "shell", "ssh"], + default=None, + ) ) - module = VmwareRestModule(argument_spec=argument_spec, - supports_check_mode=True, - is_multipart=True, - use_object_handler=True) - access_mode = module.params['access_mode'] + module = VmwareRestModule( + argument_spec=argument_spec, + supports_check_mode=True, + is_multipart=True, + use_object_handler=True, + ) + access_mode = module.params["access_mode"] if access_mode is None: access_mode = SLUG.keys() @@ -114,5 +126,5 @@ def main(): module.exit() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_appliance_health_info.py b/plugins/modules/vmware_appliance_health_info.py index 8207b35..996e1c7 100644 --- a/plugins/modules/vmware_appliance_health_info.py +++ b/plugins/modules/vmware_appliance_health_info.py @@ -4,15 +4,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_appliance_health_info short_description: Gathers info about health of the VCSA. @@ -42,9 +43,9 @@ extends_documentation_fragment: - vmware.general.VmwareRestModule.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - hosts: all connection: httpapi gather_facts: false @@ -63,9 +64,9 @@ - name: Get system health information vmware_appliance_health_info: subsystem: system -''' +""" -RETURN = r''' +RETURN = r""" attribute: description: facts about the specified health attribute returned: always @@ -73,31 +74,36 @@ sample: { "value": true } -''' +""" -from ansible_collections.vmware.general.plugins.module_utils.vmware_httpapi.VmwareRestModule import API, VmwareRestModule +from ansible_collections.vmware.general.plugins.module_utils.vmware_httpapi.VmwareRestModule import ( + API, + VmwareRestModule, +) SLUG = dict( - applmgmt='/health/applmgmt', - databasestorage='/health/database-storage', - load='/health/load', - mem='/health/mem', - softwarepackages='/health/software-packages', - storage='/health/storage', - swap='/health/swap', - system='/health/system', - lastcheck='/health/system/lastcheck', + applmgmt="/health/applmgmt", + databasestorage="/health/database-storage", + load="/health/load", + mem="/health/mem", + softwarepackages="/health/software-packages", + storage="/health/storage", + swap="/health/swap", + system="/health/system", + lastcheck="/health/system/lastcheck", ) def get_subsystem(module, subsystem): try: - url = API['appliance']['base'] + SLUG[subsystem] + url = API["appliance"]["base"] + SLUG[subsystem] except KeyError: - module.fail(msg='[%s] is not a valid subsystem. ' - 'Please specify correct subsystem, valid choices are ' - '[%s].' % (subsystem, ", ".join(list(SLUG.keys())))) + module.fail( + msg="[%s] is not a valid subsystem. " + "Please specify correct subsystem, valid choices are " + "[%s]." % (subsystem, ", ".join(list(SLUG.keys()))) + ) module.get(url=url, key=subsystem) @@ -106,33 +112,34 @@ def main(): argument_spec = VmwareRestModule.create_argument_spec() argument_spec.update( subsystem=dict( - type='str', + type="str", required=False, choices=[ - 'applmgmt', - 'databasestorage', - 'lastcheck', - 'load', - 'mem', - 'softwarepackages', - 'storage', - 'swap', - 'system', + "applmgmt", + "databasestorage", + "lastcheck", + "load", + "mem", + "softwarepackages", + "storage", + "swap", + "system", ], ), - asset=dict(type='str', required=False), + asset=dict(type="str", required=False), ) - module = VmwareRestModule(argument_spec=argument_spec, - supports_check_mode=True, - is_multipart=True, - use_object_handler=True) - subsystem = module.params['subsystem'] - asset = module.params['asset'] + module = VmwareRestModule( + argument_spec=argument_spec, + supports_check_mode=True, + is_multipart=True, + use_object_handler=True, + ) + subsystem = module.params["subsystem"] + asset = module.params["asset"] if asset is not None: - url = (API['appliance']['base'] - + ('/health/%s/messages' % asset)) + url = API["appliance"]["base"] + ("/health/%s/messages" % asset) module.get(url=url, key=asset) elif subsystem is None: @@ -145,5 +152,5 @@ def main(): module.exit() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_category.py b/plugins/modules/vmware_category.py index e08a0e7..1ece352 100644 --- a/plugins/modules/vmware_category.py +++ b/plugins/modules/vmware_category.py @@ -7,15 +7,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_category short_description: Manage VMware categories @@ -91,9 +92,9 @@ extends_documentation_fragment: - vmware.general.vmware_rest_client.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create a category vmware_category: hostname: "{{ vcenter_server }}" @@ -142,9 +143,9 @@ - Datastore - Cluster state: present -''' +""" -RETURN = r''' +RETURN = r""" category_results: description: dictionary of category metadata returned: on success @@ -153,10 +154,13 @@ "category_id": "urn:vmomi:InventoryServiceCategory:d7120bda-9fa5-4f92-9d71-aa1acff2e5a8:GLOBAL", "msg": "Category NewCat_0001 updated." } -''' +""" from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware_rest_client import VmwareRestClient +from ansible_collections.vmware.general.plugins.module_utils.vmware_rest_client import ( + VmwareRestClient, +) + try: from com.vmware.cis.tagging_client import CategoryModel from com.vmware.vapi.std.errors_client import Error @@ -169,21 +173,21 @@ def __init__(self, module): super(VmwareCategory, self).__init__(module) self.category_service = self.api_client.tagging.Category self.global_categories = dict() - self.category_name = self.params.get('category_name') + self.category_name = self.params.get("category_name") self.get_all_categories() def ensure_state(self): """Manage internal states of categories. """ - desired_state = self.params.get('state') + desired_state = self.params.get("state") states = { - 'present': { - 'present': self.state_update_category, - 'absent': self.state_create_category, + "present": { + "present": self.state_update_category, + "absent": self.state_create_category, + }, + "absent": { + "present": self.state_delete_category, + "absent": self.state_unchanged, }, - 'absent': { - 'present': self.state_delete_category, - 'absent': self.state_unchanged, - } } states[desired_state][self.check_category_status()]() @@ -191,19 +195,19 @@ def state_create_category(self): """Create category.""" category_spec = self.category_service.CreateSpec() category_spec.name = self.category_name - category_spec.description = self.params.get('category_description') + category_spec.description = self.params.get("category_description") - if self.params.get('category_cardinality') == 'single': + if self.params.get("category_cardinality") == "single": category_spec.cardinality = CategoryModel.Cardinality.SINGLE else: category_spec.cardinality = CategoryModel.Cardinality.MULTIPLE - associable_object_types = self.params.get('associable_object_types') + associable_object_types = self.params.get("associable_object_types") obj_types_set = [] if associable_object_types: for obj_type in associable_object_types: - if obj_type.lower() == 'all objects': + if obj_type.lower() == "all objects": obj_types_set = [] break else: @@ -217,11 +221,17 @@ def state_create_category(self): self.module.fail_json(msg="%s" % self.get_error_message(error)) if category_id: - self.module.exit_json(changed=True, - category_results=dict(msg="Category '%s' created." % category_spec.name, - category_id=category_id)) - self.module.exit_json(changed=False, - category_results=dict(msg="No category created", category_id='')) + self.module.exit_json( + changed=True, + category_results=dict( + msg="Category '%s' created." % category_spec.name, + category_id=category_id, + ), + ) + self.module.exit_json( + changed=False, + category_results=dict(msg="No category created", category_id=""), + ) def state_unchanged(self): """Return unchanged state.""" @@ -229,29 +239,38 @@ def state_unchanged(self): def state_update_category(self): """Update category.""" - category_id = self.global_categories[self.category_name]['category_id'] + category_id = self.global_categories[self.category_name]["category_id"] changed = False - results = dict(msg="Category %s is unchanged." % self.category_name, - category_id=category_id) + results = dict( + msg="Category %s is unchanged." % self.category_name, + category_id=category_id, + ) category_update_spec = self.category_service.UpdateSpec() change_list = [] - old_cat_desc = self.global_categories[self.category_name]['category_description'] - new_cat_desc = self.params.get('category_description') + old_cat_desc = self.global_categories[self.category_name][ + "category_description" + ] + new_cat_desc = self.params.get("category_description") if new_cat_desc and new_cat_desc != old_cat_desc: category_update_spec.description = new_cat_desc - results['msg'] = 'Category %s updated.' % self.category_name + results["msg"] = "Category %s updated." % self.category_name change_list.append(True) - new_cat_name = self.params.get('new_category_name') + new_cat_name = self.params.get("new_category_name") if new_cat_name in self.global_categories: - self.module.fail_json(msg="Unable to rename %s as %s already" - " exists in configuration." % (self.category_name, new_cat_name)) - old_cat_name = self.global_categories[self.category_name]['category_name'] + self.module.fail_json( + msg="Unable to rename %s as %s already" + " exists in configuration." + % (self.category_name, new_cat_name) + ) + old_cat_name = self.global_categories[self.category_name][ + "category_name" + ] if new_cat_name and new_cat_name != old_cat_name: category_update_spec.name = new_cat_name - results['msg'] = 'Category %s updated.' % self.category_name + results["msg"] = "Category %s updated." % self.category_name change_list.append(True) if any(change_list): @@ -261,19 +280,22 @@ def state_update_category(self): except Error as error: self.module.fail_json(msg="%s" % self.get_error_message(error)) - self.module.exit_json(changed=changed, - category_results=results) + self.module.exit_json(changed=changed, category_results=results) def state_delete_category(self): """Delete category.""" - category_id = self.global_categories[self.category_name]['category_id'] + category_id = self.global_categories[self.category_name]["category_id"] try: self.category_service.delete(category_id=category_id) except Error as error: self.module.fail_json(msg="%s" % self.get_error_message(error)) - self.module.exit_json(changed=True, - category_results=dict(msg="Category '%s' deleted." % self.category_name, - category_id=category_id)) + self.module.exit_json( + changed=True, + category_results=dict( + msg="Category '%s' deleted." % self.category_name, + category_id=category_id, + ), + ) def check_category_status(self): """ @@ -282,9 +304,9 @@ def check_category_status(self): """ if self.category_name in self.global_categories: - return 'present' + return "present" else: - return 'absent' + return "absent" def get_all_categories(self): """Retrieve all category information.""" @@ -303,19 +325,33 @@ def get_all_categories(self): def main(): argument_spec = VmwareRestClient.vmware_client_argument_spec() argument_spec.update( - category_name=dict(type='str', required=True), - category_description=dict(type='str', default='', required=False), - category_cardinality=dict(type='str', choices=["multiple", "single"], default="multiple"), - new_category_name=dict(type='str'), - state=dict(type='str', choices=['present', 'absent'], default='present'), + category_name=dict(type="str", required=True), + category_description=dict(type="str", default="", required=False), + category_cardinality=dict( + type="str", choices=["multiple", "single"], default="multiple" + ), + new_category_name=dict(type="str"), + state=dict( + type="str", choices=["present", "absent"], default="present" + ), associable_object_types=dict( - type='list', + type="list", choices=[ - 'All objects', 'Folder', 'Cluster', - 'Datacenter', 'Datastore', 'Datastore Cluster', - 'Distributed Port Group', 'Distributed Switch', - 'Host', 'Content Library', 'Library item', 'Network', - 'Resource Pool', 'vApp', 'Virtual Machine', + "All objects", + "Folder", + "Cluster", + "Datacenter", + "Datastore", + "Datastore Cluster", + "Distributed Port Group", + "Distributed Switch", + "Host", + "Content Library", + "Library item", + "Network", + "Resource Pool", + "vApp", + "Virtual Machine", ], elements=str, ), @@ -326,5 +362,5 @@ def main(): vmware_category.ensure_state() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_category_info.py b/plugins/modules/vmware_category_info.py index 485d4aa..aab81ba 100644 --- a/plugins/modules/vmware_category_info.py +++ b/plugins/modules/vmware_category_info.py @@ -7,15 +7,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_category_info short_description: Gather info about VMware tag categories @@ -34,9 +35,9 @@ extends_documentation_fragment: - vmware.general.vmware_rest_client.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather info about tag categories vmware_category_info: hostname: "{{ vcenter_hostname }}" @@ -60,9 +61,9 @@ query: "[?category_name==`Category0001`]" - debug: var=category_id -''' +""" -RETURN = r''' +RETURN = r""" tag_category_info: description: metadata of tag categories returned: always @@ -87,10 +88,12 @@ "category_used_by": [] } ] -''' +""" from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware_rest_client import VmwareRestClient +from ansible_collections.vmware.general.plugins.module_utils.vmware_rest_client import ( + VmwareRestClient, +) class VmwareCategoryInfoManager(VmwareRestClient): @@ -114,16 +117,20 @@ def get_all_tag_categories(self): ) ) - self.module.exit_json(changed=False, tag_category_info=global_tag_categories) + self.module.exit_json( + changed=False, tag_category_info=global_tag_categories + ) def main(): argument_spec = VmwareRestClient.vmware_client_argument_spec() - module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + module = AnsibleModule( + argument_spec=argument_spec, supports_check_mode=True + ) vmware_category_info = VmwareCategoryInfoManager(module) vmware_category_info.get_all_tag_categories() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_cfg_backup.py b/plugins/modules/vmware_cfg_backup.py index c7fe859..c681354 100644 --- a/plugins/modules/vmware_cfg_backup.py +++ b/plugins/modules/vmware_cfg_backup.py @@ -6,13 +6,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} +ANSIBLE_METADATA = { + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", +} -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_cfg_backup short_description: Backup / Restore / Reset ESXi host configuration @@ -53,9 +56,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Save the ESXi configuration locally by authenticating directly against the ESXi host vmware_cfg_backup: hostname: '{{ esxi_hostname }}' @@ -74,23 +77,29 @@ state: saved dest: /tmp/ delegate_to: localhost -''' +""" -RETURN = ''' +RETURN = """ dest_file: description: The full path of where the file holding the ESXi configurations was stored returned: changed type: str sample: /tmp/configBundle-esxi.host.domain.tgz -''' +""" import os + try: from pyVmomi import vim except ImportError: pass -from ansible_collections.vmware.general.plugins.module_utils.vmware import vmware_argument_spec, get_all_objs, wait_for_task, PyVmomi +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + vmware_argument_spec, + get_all_objs, + wait_for_task, + PyVmomi, +) from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.urls import open_url from ansible.module_utils.six.moves.urllib.error import HTTPError @@ -100,61 +109,80 @@ class VMwareConfigurationBackup(PyVmomi): def __init__(self, module): super(VMwareConfigurationBackup, self).__init__(module) - self.state = self.module.params['state'] - self.dest = self.module.params['dest'] - self.src = self.module.params['src'] - self.hostname = self.module.params['hostname'] - self.username = self.module.params['username'] - self.password = self.module.params['password'] - self.validate_certs = self.module.params['validate_certs'] - self.esxi_hostname = self.module.params.get('esxi_hostname', None) + self.state = self.module.params["state"] + self.dest = self.module.params["dest"] + self.src = self.module.params["src"] + self.hostname = self.module.params["hostname"] + self.username = self.module.params["username"] + self.password = self.module.params["password"] + self.validate_certs = self.module.params["validate_certs"] + self.esxi_hostname = self.module.params.get("esxi_hostname", None) self.host = self.find_host_system() def find_host_system(self): if self.esxi_hostname: - host_system_obj = self.find_hostsystem_by_name(host_name=self.esxi_hostname) + host_system_obj = self.find_hostsystem_by_name( + host_name=self.esxi_hostname + ) if host_system_obj: return host_system_obj else: - self.module.fail_json(msg="Failed to find ESXi %s" % self.esxi_hostname) + self.module.fail_json( + msg="Failed to find ESXi %s" % self.esxi_hostname + ) host_system = get_all_objs(self.content, [vim.HostSystem]) return list(host_system)[0] def process_state(self): - if self.state == 'saved': + if self.state == "saved": self.save_configuration() - if self.state == 'absent': + if self.state == "absent": self.reset_configuration() - if self.state == 'loaded': + if self.state == "loaded": self.load_configuration() def load_configuration(self): if not os.path.isfile(self.src): - self.module.fail_json(msg="Source file {0} does not exist".format(self.src)) + self.module.fail_json( + msg="Source file {0} does not exist".format(self.src) + ) - url = self.host.configManager.firmwareSystem.QueryFirmwareConfigUploadURL() - url = url.replace('*', self.host.name) + url = ( + self.host.configManager.firmwareSystem.QueryFirmwareConfigUploadURL() + ) + url = url.replace("*", self.host.name) # find manually the url if there is a redirect because urllib2 -per RFC- doesn't do automatic redirects for PUT requests try: - request = open_url(url=url, method='HEAD', validate_certs=self.validate_certs) + request = open_url( + url=url, method="HEAD", validate_certs=self.validate_certs + ) except HTTPError as e: url = e.geturl() try: - with open(self.src, 'rb') as file: + with open(self.src, "rb") as file: data = file.read() - request = open_url(url=url, data=data, method='PUT', validate_certs=self.validate_certs, - url_username=self.username, url_password=self.password, force_basic_auth=True) + request = open_url( + url=url, + data=data, + method="PUT", + validate_certs=self.validate_certs, + url_username=self.username, + url_password=self.password, + force_basic_auth=True, + ) except Exception as e: self.module.fail_json(msg=to_native(e)) if not self.host.runtime.inMaintenanceMode: self.enter_maintenance() try: - self.host.configManager.firmwareSystem.RestoreFirmwareConfiguration(force=True) + self.host.configManager.firmwareSystem.RestoreFirmwareConfiguration( + force=True + ) self.module.exit_json(changed=True) except Exception as e: self.exit_maintenance() @@ -171,10 +199,12 @@ def reset_configuration(self): self.module.fail_json(msg=to_native(e)) def save_configuration(self): - url = self.host.configManager.firmwareSystem.BackupFirmwareConfiguration() - url = url.replace('*', self.host.name) + url = ( + self.host.configManager.firmwareSystem.BackupFirmwareConfiguration() + ) + url = url.replace("*", self.host.name) if os.path.isdir(self.dest): - filename = url.rsplit('/', 1)[1] + filename = url.rsplit("/", 1)[1] self.dest = os.path.join(self.dest, filename) else: filename, file_extension = os.path.splitext(self.dest) @@ -186,8 +216,11 @@ def save_configuration(self): file.write(request.read()) self.module.exit_json(changed=True, dest_file=self.dest) except IOError as e: - self.module.fail_json(msg="Failed to write backup file. Ensure that " - "the dest path exists and is writable. Details : %s" % to_native(e)) + self.module.fail_json( + msg="Failed to write backup file. Ensure that " + "the dest path exists and is writable. Details : %s" + % to_native(e) + ) except Exception as e: self.module.fail_json(msg=to_native(e)) @@ -196,33 +229,48 @@ def enter_maintenance(self): task = self.host.EnterMaintenanceMode_Task(timeout=15) success, result = wait_for_task(task) except Exception as e: - self.module.fail_json(msg="Failed to enter maintenance mode." - " Ensure that there are no powered on machines on the host. %s" % to_native(e)) + self.module.fail_json( + msg="Failed to enter maintenance mode." + " Ensure that there are no powered on machines on the host. %s" + % to_native(e) + ) def exit_maintenance(self): try: task = self.host.ExitMaintenanceMode_Task(timeout=15) success, result = wait_for_task(task) except Exception as generic_exc: - self.module.fail_json(msg="Failed to exit maintenance mode due to %s" % to_native(generic_exc)) + self.module.fail_json( + msg="Failed to exit maintenance mode due to %s" + % to_native(generic_exc) + ) def main(): argument_spec = vmware_argument_spec() - argument_spec.update(dict(dest=dict(required=False, type='path'), - esxi_hostname=dict(required=False, type='str'), - src=dict(required=False, type='path'), - state=dict(required=True, choices=['saved', 'absent', 'loaded'], type='str'))) - required_if = [('state', 'saved', ['dest']), - ('state', 'loaded', ['src'])] + argument_spec.update( + dict( + dest=dict(required=False, type="path"), + esxi_hostname=dict(required=False, type="str"), + src=dict(required=False, type="path"), + state=dict( + required=True, + choices=["saved", "absent", "loaded"], + type="str", + ), + ) + ) + required_if = [("state", "saved", ["dest"]), ("state", "loaded", ["src"])] - module = AnsibleModule(argument_spec=argument_spec, - required_if=required_if, - supports_check_mode=False) + module = AnsibleModule( + argument_spec=argument_spec, + required_if=required_if, + supports_check_mode=False, + ) vmware_cfg_backup = VMwareConfigurationBackup(module) vmware_cfg_backup.process_state() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_cis_category_info.py b/plugins/modules/vmware_cis_category_info.py index ce97b6a..f178159 100644 --- a/plugins/modules/vmware_cis_category_info.py +++ b/plugins/modules/vmware_cis_category_info.py @@ -4,15 +4,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_cis_category_info short_description: Gathers info about all, or a specified category. @@ -60,14 +61,14 @@ extends_documentation_fragment: - vmware.general.VmwareRestModule.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Get all categories vmware_cis_category_info: -''' +""" -RETURN = r''' +RETURN = r""" category: description: facts about the specified category returned: always @@ -75,76 +76,76 @@ sample: { "value": true } -''' +""" -from ansible_collections.vmware.general.plugins.module_utils.vmware_httpapi.VmwareRestModule import VmwareRestModule +from ansible_collections.vmware.general.plugins.module_utils.vmware_httpapi.VmwareRestModule import ( + VmwareRestModule, +) def main(): argument_spec = VmwareRestModule.create_argument_spec() argument_spec.update( - category_name=dict(type='str', required=False), - category_id=dict(type='str', required=False), - used_by_name=dict(type='str', required=False), + category_name=dict(type="str", required=False), + category_id=dict(type="str", required=False), + used_by_name=dict(type="str", required=False), used_by_type=dict( - type='str', + type="str", required=False, choices=[ - 'cluster', - 'content_library', - 'content_type', - 'datacenter', - 'datastore', - 'folder', - 'host', - 'local_library', - 'network', - 'resource_pool', - 'subscribed_library', - 'tag', - 'vm', + "cluster", + "content_library", + "content_type", + "datacenter", + "datastore", + "folder", + "host", + "local_library", + "network", + "resource_pool", + "subscribed_library", + "tag", + "vm", ], ), - used_by_id=dict(type='str', required=False), + used_by_id=dict(type="str", required=False), ) - required_together = [ - ['used_by_name', 'used_by_type'] - ] + required_together = [["used_by_name", "used_by_type"]] mutually_exclusive = [ - ['category_name', 'category_id', 'used_by_id', 'used_by_name'], - ['category_name', 'category_id', 'used_by_id', 'used_by_type'], + ["category_name", "category_id", "used_by_id", "used_by_name"], + ["category_name", "category_id", "used_by_id", "used_by_type"], ] - module = VmwareRestModule(argument_spec=argument_spec, - required_together=required_together, - mutually_exclusive=mutually_exclusive, - supports_check_mode=True) + module = VmwareRestModule( + argument_spec=argument_spec, + required_together=required_together, + mutually_exclusive=mutually_exclusive, + supports_check_mode=True, + ) - category_name = module.params['category_name'] - category_id = module.params['category_id'] - used_by_name = module.params['used_by_name'] - used_by_type = module.params['used_by_type'] - used_by_id = module.params['used_by_id'] + category_name = module.params["category_name"] + category_id = module.params["category_id"] + used_by_name = module.params["used_by_name"] + used_by_type = module.params["used_by_type"] + used_by_id = module.params["used_by_id"] - url = module.get_url('category') + url = module.get_url("category") data = {} if category_name is not None: - category_id = module.get_id('category', category_name) + category_id = module.get_id("category", category_name) if category_id is not None: - url += '/id:' + category_id + url += "/id:" + category_id module.get(url=url) else: if used_by_name is not None: used_by_id = module.get_id(used_by_type, used_by_name) - url += '?~action=list-used-categories' - data = { - 'used_by_entity': used_by_id - } + url += "?~action=list-used-categories" + data = {"used_by_entity": used_by_id} module.post(url=url, data=data) module.exit() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_cluster.py b/plugins/modules/vmware_cluster.py index b785bbb..72b4e9a 100644 --- a/plugins/modules/vmware_cluster.py +++ b/plugins/modules/vmware_cluster.py @@ -7,15 +7,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_cluster short_description: Manage VMware vSphere clusters @@ -179,7 +180,7 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" EXAMPLES = r""" - name: Create Cluster @@ -233,23 +234,28 @@ pass from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import (PyVmomi, TaskError, find_datacenter_by_name, - vmware_argument_spec, wait_for_task) +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + TaskError, + find_datacenter_by_name, + vmware_argument_spec, + wait_for_task, +) from ansible.module_utils._text import to_native class VMwareCluster(PyVmomi): def __init__(self, module): super(VMwareCluster, self).__init__(module) - self.cluster_name = module.params['cluster_name'] - self.datacenter_name = module.params['datacenter'] - self.ignore_drs = module.params['ignore_drs'] - self.ignore_ha = module.params['ignore_ha'] - self.ignore_vsan = module.params['ignore_vsan'] - self.enable_drs = module.params['enable_drs'] - self.enable_ha = module.params['enable_ha'] - self.enable_vsan = module.params['enable_vsan'] - self.desired_state = module.params['state'] + self.cluster_name = module.params["cluster_name"] + self.datacenter_name = module.params["datacenter"] + self.ignore_drs = module.params["ignore_drs"] + self.ignore_ha = module.params["ignore_ha"] + self.ignore_vsan = module.params["ignore_vsan"] + self.enable_drs = module.params["enable_drs"] + self.enable_ha = module.params["enable_ha"] + self.enable_vsan = module.params["enable_vsan"] + self.desired_state = module.params["state"] self.datacenter = None self.cluster = None @@ -258,14 +264,14 @@ def process_state(self): Manage internal states of cluster """ cluster_states = { - 'absent': { - 'present': self.state_destroy_cluster, - 'absent': self.state_exit_unchanged, + "absent": { + "present": self.state_destroy_cluster, + "absent": self.state_exit_unchanged, + }, + "present": { + "present": self.state_update_cluster, + "absent": self.state_create_cluster, }, - 'present': { - 'present': self.state_update_cluster, - 'absent': self.state_create_cluster, - } } current_state = self.check_cluster_configuration() # Based on the desired_state and the current_state call @@ -278,34 +284,48 @@ def configure_ha(self): Returns: Cluster DAS configuration spec """ - msg = 'Configuring HA using vmware_cluster module is deprecated and will be removed in version 2.12. ' \ - 'Please use vmware_cluster_ha module for the new functionality.' - self.module.deprecate(msg, '2.12') + msg = ( + "Configuring HA using vmware_cluster module is deprecated and will be removed in version 2.12. " + "Please use vmware_cluster_ha module for the new functionality." + ) + self.module.deprecate(msg, "2.12") das_config = vim.cluster.DasConfigInfo() das_config.enabled = self.enable_ha - das_config.admissionControlPolicy = vim.cluster.FailoverLevelAdmissionControlPolicy() - das_config.admissionControlPolicy.failoverLevel = self.params.get('ha_failover_level') - - ha_vm_monitoring = self.params.get('ha_vm_monitoring') + das_config.admissionControlPolicy = ( + vim.cluster.FailoverLevelAdmissionControlPolicy() + ) + das_config.admissionControlPolicy.failoverLevel = self.params.get( + "ha_failover_level" + ) + + ha_vm_monitoring = self.params.get("ha_vm_monitoring") das_vm_config = None - if ha_vm_monitoring in ['vmMonitoringOnly', 'vmAndAppMonitoring']: + if ha_vm_monitoring in ["vmMonitoringOnly", "vmAndAppMonitoring"]: vm_tool_spec = vim.cluster.VmToolsMonitoringSettings() vm_tool_spec.enabled = True vm_tool_spec.vmMonitoring = ha_vm_monitoring - vm_tool_spec.failureInterval = self.params.get('ha_vm_failure_interval') - vm_tool_spec.minUpTime = self.params.get('ha_vm_min_up_time') - vm_tool_spec.maxFailures = self.params.get('ha_vm_max_failures') - vm_tool_spec.maxFailureWindow = self.params.get('ha_vm_max_failure_window') + vm_tool_spec.failureInterval = self.params.get( + "ha_vm_failure_interval" + ) + vm_tool_spec.minUpTime = self.params.get("ha_vm_min_up_time") + vm_tool_spec.maxFailures = self.params.get("ha_vm_max_failures") + vm_tool_spec.maxFailureWindow = self.params.get( + "ha_vm_max_failure_window" + ) das_vm_config = vim.cluster.DasVmSettings() - das_vm_config.restartPriority = self.params.get('ha_restart_priority') + das_vm_config.restartPriority = self.params.get( + "ha_restart_priority" + ) das_vm_config.isolationResponse = None das_vm_config.vmToolsMonitoringSettings = vm_tool_spec - das_config.admissionControlEnabled = self.params.get('ha_admission_control_enabled') + das_config.admissionControlEnabled = self.params.get( + "ha_admission_control_enabled" + ) - das_config.hostMonitoring = self.params.get('ha_host_monitoring') + das_config.hostMonitoring = self.params.get("ha_host_monitoring") das_config.vmMonitoring = ha_vm_monitoring das_config.defaultVmSettings = das_vm_config @@ -317,16 +337,22 @@ def configure_drs(self): Returns: Cluster DRS configuration spec """ - msg = 'Configuring DRS using vmware_cluster module is deprecated and will be removed in version 2.12. ' \ - 'Please use vmware_cluster_drs module for the new functionality.' - self.module.deprecate(msg, '2.12') + msg = ( + "Configuring DRS using vmware_cluster module is deprecated and will be removed in version 2.12. " + "Please use vmware_cluster_drs module for the new functionality." + ) + self.module.deprecate(msg, "2.12") drs_config = vim.cluster.DrsConfigInfo() drs_config.enabled = self.enable_drs - drs_config.enableVmBehaviorOverrides = self.params.get('drs_enable_vm_behavior_overrides') - drs_config.defaultVmBehavior = self.params.get('drs_default_vm_behavior') - drs_config.vmotionRate = self.params.get('drs_vmotion_rate') + drs_config.enableVmBehaviorOverrides = self.params.get( + "drs_enable_vm_behavior_overrides" + ) + drs_config.defaultVmBehavior = self.params.get( + "drs_default_vm_behavior" + ) + drs_config.vmotionRate = self.params.get("drs_vmotion_rate") return drs_config @@ -336,14 +362,20 @@ def configure_vsan(self): Returns: Cluster VSAN configuration spec """ - msg = 'Configuring VSAN using vmware_cluster module is deprecated and will be removed in version 2.12. ' \ - 'Please use vmware_cluster_vsan module for the new functionality.' - self.module.deprecate(msg, '2.12') + msg = ( + "Configuring VSAN using vmware_cluster module is deprecated and will be removed in version 2.12. " + "Please use vmware_cluster_vsan module for the new functionality." + ) + self.module.deprecate(msg, "2.12") vsan_config = vim.vsan.cluster.ConfigInfo() vsan_config.enabled = self.enable_vsan - vsan_config.defaultConfig = vim.vsan.cluster.ConfigInfo.HostDefaultInfo() - vsan_config.defaultConfig.autoClaimStorage = self.params.get('vsan_auto_claim_storage') + vsan_config.defaultConfig = ( + vim.vsan.cluster.ConfigInfo.HostDefaultInfo() + ) + vsan_config.defaultConfig.autoClaimStorage = self.params.get( + "vsan_auto_claim_storage" + ) return vsan_config def state_create_cluster(self): @@ -359,30 +391,40 @@ def state_create_cluster(self): if self.enable_vsan and not self.ignore_vsan: cluster_config_spec.vsanConfig = self.configure_vsan() if not self.module.check_mode: - self.datacenter.hostFolder.CreateClusterEx(self.cluster_name, cluster_config_spec) + self.datacenter.hostFolder.CreateClusterEx( + self.cluster_name, cluster_config_spec + ) self.module.exit_json(changed=True) except vim.fault.DuplicateName: # To match other vmware_* modules pass except vmodl.fault.InvalidArgument as invalid_args: - self.module.fail_json(msg="Cluster configuration specification" - " parameter is invalid : %s" % to_native(invalid_args.msg)) + self.module.fail_json( + msg="Cluster configuration specification" + " parameter is invalid : %s" % to_native(invalid_args.msg) + ) except vim.fault.InvalidName as invalid_name: - self.module.fail_json(msg="'%s' is an invalid name for a" - " cluster : %s" % (self.cluster_name, - to_native(invalid_name.msg))) + self.module.fail_json( + msg="'%s' is an invalid name for a" + " cluster : %s" + % (self.cluster_name, to_native(invalid_name.msg)) + ) except vmodl.fault.NotSupported as not_supported: # This should never happen - self.module.fail_json(msg="Trying to create a cluster on an incorrect" - " folder object : %s" % to_native(not_supported.msg)) + self.module.fail_json( + msg="Trying to create a cluster on an incorrect" + " folder object : %s" % to_native(not_supported.msg) + ) except vmodl.RuntimeFault as runtime_fault: self.module.fail_json(msg=to_native(runtime_fault.msg)) except vmodl.MethodFault as method_fault: # This should never happen either self.module.fail_json(msg=to_native(method_fault.msg)) except Exception as generic_exc: - self.module.fail_json(msg="Failed to create cluster" - " due to generic exception %s" % to_native(generic_exc)) + self.module.fail_json( + msg="Failed to create cluster" + " due to generic exception %s" % to_native(generic_exc) + ) def state_destroy_cluster(self): """ @@ -402,8 +444,10 @@ def state_destroy_cluster(self): except vmodl.MethodFault as method_fault: self.module.fail_json(msg=to_native(method_fault.msg)) except Exception as generic_exc: - self.module.fail_json(msg="Failed to destroy cluster" - " due to generic exception %s" % to_native(generic_exc)) + self.module.fail_json( + msg="Failed to destroy cluster" + " due to generic exception %s" % to_native(generic_exc) + ) def state_exit_unchanged(self): """ @@ -430,7 +474,9 @@ def state_update_cluster(self): try: if not self.module.check_mode and diff: - task = self.cluster.ReconfigureComputeResource_Task(cluster_config_spec, True) + task = self.cluster.ReconfigureComputeResource_Task( + cluster_config_spec, True + ) changed, result = wait_for_task(task) self.module.exit_json(changed=changed, result=result) except vmodl.RuntimeFault as runtime_fault: @@ -440,8 +486,10 @@ def state_update_cluster(self): except TaskError as task_e: self.module.fail_json(msg=to_native(task_e)) except Exception as generic_exc: - self.module.fail_json(msg="Failed to update cluster" - " due to generic exception %s" % to_native(generic_exc)) + self.module.fail_json( + msg="Failed to update cluster" + " due to generic exception %s" % to_native(generic_exc) + ) def check_ha_config_diff(self): """ @@ -450,18 +498,30 @@ def check_ha_config_diff(self): """ das_config = self.cluster.configurationEx.dasConfig - if das_config.enabled != self.enable_ha or \ - das_config.admissionControlPolicy.failoverLevel != self.params.get('ha_failover_level') or \ - das_config.vmMonitoring != self.params.get('ha_vm_monitoring') or \ - das_config.hostMonitoring != self.params.get('ha_host_monitoring') or \ - das_config.admissionControlPolicy.failoverLevel != self.params.get('ha_failover_level') or \ - das_config.admissionControlEnabled != self.params.get('ha_admission_control_enabled') or \ - das_config.defaultVmSettings.restartPriority != self.params.get('ha_restart_priority') or \ - das_config.defaultVmSettings.vmToolsMonitoringSettings.vmMonitoring != self.params.get('ha_vm_monitoring') or \ - das_config.defaultVmSettings.vmToolsMonitoringSettings.failureInterval != self.params.get('ha_vm_failure_interval') or \ - das_config.defaultVmSettings.vmToolsMonitoringSettings.minUpTime != self.params.get('ha_vm_min_up_time') or \ - das_config.defaultVmSettings.vmToolsMonitoringSettings.maxFailures != self.params.get('ha_vm_max_failures') or \ - das_config.defaultVmSettings.vmToolsMonitoringSettings.maxFailureWindow != self.params.get('ha_vm_max_failure_window'): + if ( + das_config.enabled != self.enable_ha + or das_config.admissionControlPolicy.failoverLevel + != self.params.get("ha_failover_level") + or das_config.vmMonitoring != self.params.get("ha_vm_monitoring") + or das_config.hostMonitoring + != self.params.get("ha_host_monitoring") + or das_config.admissionControlPolicy.failoverLevel + != self.params.get("ha_failover_level") + or das_config.admissionControlEnabled + != self.params.get("ha_admission_control_enabled") + or das_config.defaultVmSettings.restartPriority + != self.params.get("ha_restart_priority") + or das_config.defaultVmSettings.vmToolsMonitoringSettings.vmMonitoring + != self.params.get("ha_vm_monitoring") + or das_config.defaultVmSettings.vmToolsMonitoringSettings.failureInterval + != self.params.get("ha_vm_failure_interval") + or das_config.defaultVmSettings.vmToolsMonitoringSettings.minUpTime + != self.params.get("ha_vm_min_up_time") + or das_config.defaultVmSettings.vmToolsMonitoringSettings.maxFailures + != self.params.get("ha_vm_max_failures") + or das_config.defaultVmSettings.vmToolsMonitoringSettings.maxFailureWindow + != self.params.get("ha_vm_max_failure_window") + ): return True return False @@ -473,10 +533,14 @@ def check_drs_config_diff(self): """ drs_config = self.cluster.configurationEx.drsConfig - if drs_config.enabled != self.enable_drs or \ - drs_config.enableVmBehaviorOverrides != self.params.get('drs_enable_vm_behavior_overrides') or \ - drs_config.defaultVmBehavior != self.params.get('drs_default_vm_behavior') or \ - drs_config.vmotionRate != self.params.get('drs_vmotion_rate'): + if ( + drs_config.enabled != self.enable_drs + or drs_config.enableVmBehaviorOverrides + != self.params.get("drs_enable_vm_behavior_overrides") + or drs_config.defaultVmBehavior + != self.params.get("drs_default_vm_behavior") + or drs_config.vmotionRate != self.params.get("drs_vmotion_rate") + ): return True return False @@ -488,8 +552,11 @@ def check_vsan_config_diff(self): """ vsan_config = self.cluster.configurationEx.vsanConfigInfo - if vsan_config.enabled != self.enable_vsan or \ - vsan_config.defaultConfig.autoClaimStorage != self.params.get('vsan_auto_claim_storage'): + if ( + vsan_config.enabled != self.enable_vsan + or vsan_config.defaultConfig.autoClaimStorage + != self.params.get("vsan_auto_claim_storage") + ): return True return False @@ -500,76 +567,94 @@ def check_cluster_configuration(self): """ try: - self.datacenter = find_datacenter_by_name(self.content, self.datacenter_name) + self.datacenter = find_datacenter_by_name( + self.content, self.datacenter_name + ) if self.datacenter is None: - self.module.fail_json(msg="Datacenter %s does not exist." % self.datacenter_name) - self.cluster = self.find_cluster_by_name(cluster_name=self.cluster_name) + self.module.fail_json( + msg="Datacenter %s does not exist." % self.datacenter_name + ) + self.cluster = self.find_cluster_by_name( + cluster_name=self.cluster_name + ) if self.cluster is None: - return 'absent' + return "absent" - return 'present' + return "present" except vmodl.RuntimeFault as runtime_fault: self.module.fail_json(msg=to_native(runtime_fault.msg)) except vmodl.MethodFault as method_fault: self.module.fail_json(msg=to_native(method_fault.msg)) except Exception as generic_exc: - self.module.fail_json(msg="Failed to check configuration" - " due to generic exception %s" % to_native(generic_exc)) + self.module.fail_json( + msg="Failed to check configuration" + " due to generic exception %s" % to_native(generic_exc) + ) def main(): argument_spec = vmware_argument_spec() - argument_spec.update(dict( - cluster_name=dict(type='str', required=True), - datacenter=dict(type='str', required=True, aliases=['datacenter_name']), - state=dict(type='str', - default='present', - choices=['absent', 'present']), - # DRS - ignore_drs=dict(type='bool', default=False), - enable_drs=dict(type='bool', default=False), - drs_enable_vm_behavior_overrides=dict(type='bool', default=True), - drs_default_vm_behavior=dict(type='str', - choices=['fullyAutomated', 'manual', 'partiallyAutomated'], - default='fullyAutomated'), - drs_vmotion_rate=dict(type='int', - choices=range(1, 6), - default=3), - # HA - ignore_ha=dict(type='bool', default=False), - enable_ha=dict(type='bool', default=False), - ha_failover_level=dict(type='int', default=2), - ha_host_monitoring=dict(type='str', - default='enabled', - choices=['enabled', 'disabled']), - # HA VM Monitoring related parameters - ha_vm_monitoring=dict(type='str', - choices=['vmAndAppMonitoring', 'vmMonitoringOnly', 'vmMonitoringDisabled'], - default='vmMonitoringDisabled'), - ha_vm_failure_interval=dict(type='int', default=30), - ha_vm_min_up_time=dict(type='int', default=120), - ha_vm_max_failures=dict(type='int', default=3), - ha_vm_max_failure_window=dict(type='int', default=-1), - - ha_restart_priority=dict(type='str', - choices=['high', 'low', 'medium', 'disabled'], - default='medium'), - ha_admission_control_enabled=dict(type='bool', default=True), - # VSAN - ignore_vsan=dict(type='bool', default=False), - enable_vsan=dict(type='bool', default=False), - vsan_auto_claim_storage=dict(type='bool', default=False), - )) + argument_spec.update( + dict( + cluster_name=dict(type="str", required=True), + datacenter=dict( + type="str", required=True, aliases=["datacenter_name"] + ), + state=dict( + type="str", default="present", choices=["absent", "present"] + ), + # DRS + ignore_drs=dict(type="bool", default=False), + enable_drs=dict(type="bool", default=False), + drs_enable_vm_behavior_overrides=dict(type="bool", default=True), + drs_default_vm_behavior=dict( + type="str", + choices=["fullyAutomated", "manual", "partiallyAutomated"], + default="fullyAutomated", + ), + drs_vmotion_rate=dict(type="int", choices=range(1, 6), default=3), + # HA + ignore_ha=dict(type="bool", default=False), + enable_ha=dict(type="bool", default=False), + ha_failover_level=dict(type="int", default=2), + ha_host_monitoring=dict( + type="str", default="enabled", choices=["enabled", "disabled"] + ), + # HA VM Monitoring related parameters + ha_vm_monitoring=dict( + type="str", + choices=[ + "vmAndAppMonitoring", + "vmMonitoringOnly", + "vmMonitoringDisabled", + ], + default="vmMonitoringDisabled", + ), + ha_vm_failure_interval=dict(type="int", default=30), + ha_vm_min_up_time=dict(type="int", default=120), + ha_vm_max_failures=dict(type="int", default=3), + ha_vm_max_failure_window=dict(type="int", default=-1), + ha_restart_priority=dict( + type="str", + choices=["high", "low", "medium", "disabled"], + default="medium", + ), + ha_admission_control_enabled=dict(type="bool", default=True), + # VSAN + ignore_vsan=dict(type="bool", default=False), + enable_vsan=dict(type="bool", default=False), + vsan_auto_claim_storage=dict(type="bool", default=False), + ) + ) module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, + argument_spec=argument_spec, supports_check_mode=True ) vmware_cluster = VMwareCluster(module) vmware_cluster.process_state() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_cluster_drs.py b/plugins/modules/vmware_cluster_drs.py index 05196fc..850ab31 100644 --- a/plugins/modules/vmware_cluster_drs.py +++ b/plugins/modules/vmware_cluster_drs.py @@ -7,15 +7,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_cluster_drs short_description: Manage Distributed Resource Scheduler (DRS) on VMware vSphere clusters @@ -77,7 +78,7 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" EXAMPLES = r""" - name: Enable DRS @@ -124,31 +125,48 @@ pass from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import (PyVmomi, TaskError, find_datacenter_by_name, - vmware_argument_spec, wait_for_task, option_diff) +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + TaskError, + find_datacenter_by_name, + vmware_argument_spec, + wait_for_task, + option_diff, +) from ansible.module_utils._text import to_native class VMwareCluster(PyVmomi): def __init__(self, module): super(VMwareCluster, self).__init__(module) - self.cluster_name = module.params['cluster_name'] - self.datacenter_name = module.params['datacenter'] - self.enable_drs = module.params['enable_drs'] + self.cluster_name = module.params["cluster_name"] + self.datacenter_name = module.params["datacenter"] + self.enable_drs = module.params["enable_drs"] self.datacenter = None self.cluster = None - self.datacenter = find_datacenter_by_name(self.content, self.datacenter_name) + self.datacenter = find_datacenter_by_name( + self.content, self.datacenter_name + ) if self.datacenter is None: - self.module.fail_json(msg="Datacenter %s does not exist." % self.datacenter_name) + self.module.fail_json( + msg="Datacenter %s does not exist." % self.datacenter_name + ) - self.cluster = self.find_cluster_by_name(cluster_name=self.cluster_name) + self.cluster = self.find_cluster_by_name( + cluster_name=self.cluster_name + ) if self.cluster is None: - self.module.fail_json(msg="Cluster %s does not exist." % self.cluster_name) + self.module.fail_json( + msg="Cluster %s does not exist." % self.cluster_name + ) - self.advanced_settings = self.params.get('advanced_settings') + self.advanced_settings = self.params.get("advanced_settings") if self.advanced_settings: - self.changed_advanced_settings = option_diff(self.advanced_settings, self.cluster.configurationEx.drsConfig.option) + self.changed_advanced_settings = option_diff( + self.advanced_settings, + self.cluster.configurationEx.drsConfig.option, + ) else: self.changed_advanced_settings = None @@ -160,10 +178,14 @@ def check_drs_config_diff(self): """ drs_config = self.cluster.configurationEx.drsConfig - if drs_config.enabled != self.enable_drs or \ - drs_config.enableVmBehaviorOverrides != self.params.get('drs_enable_vm_behavior_overrides') or \ - drs_config.defaultVmBehavior != self.params.get('drs_default_vm_behavior') or \ - drs_config.vmotionRate != self.params.get('drs_vmotion_rate'): + if ( + drs_config.enabled != self.enable_drs + or drs_config.enableVmBehaviorOverrides + != self.params.get("drs_enable_vm_behavior_overrides") + or drs_config.defaultVmBehavior + != self.params.get("drs_default_vm_behavior") + or drs_config.vmotionRate != self.params.get("drs_vmotion_rate") + ): return True if self.changed_advanced_settings: @@ -183,15 +205,25 @@ def configure_drs(self): cluster_config_spec = vim.cluster.ConfigSpecEx() cluster_config_spec.drsConfig = vim.cluster.DrsConfigInfo() cluster_config_spec.drsConfig.enabled = self.enable_drs - cluster_config_spec.drsConfig.enableVmBehaviorOverrides = self.params.get('drs_enable_vm_behavior_overrides') - cluster_config_spec.drsConfig.defaultVmBehavior = self.params.get('drs_default_vm_behavior') - cluster_config_spec.drsConfig.vmotionRate = self.params.get('drs_vmotion_rate') + cluster_config_spec.drsConfig.enableVmBehaviorOverrides = self.params.get( + "drs_enable_vm_behavior_overrides" + ) + cluster_config_spec.drsConfig.defaultVmBehavior = self.params.get( + "drs_default_vm_behavior" + ) + cluster_config_spec.drsConfig.vmotionRate = self.params.get( + "drs_vmotion_rate" + ) if self.changed_advanced_settings: - cluster_config_spec.drsConfig.option = self.changed_advanced_settings + cluster_config_spec.drsConfig.option = ( + self.changed_advanced_settings + ) try: - task = self.cluster.ReconfigureComputeResource_Task(cluster_config_spec, True) + task = self.cluster.ReconfigureComputeResource_Task( + cluster_config_spec, True + ) changed, result = wait_for_task(task) except vmodl.RuntimeFault as runtime_fault: self.module.fail_json(msg=to_native(runtime_fault.msg)) @@ -200,8 +232,10 @@ def configure_drs(self): except TaskError as task_e: self.module.fail_json(msg=to_native(task_e)) except Exception as generic_exc: - self.module.fail_json(msg="Failed to update cluster" - " due to generic exception %s" % to_native(generic_exc)) + self.module.fail_json( + msg="Failed to update cluster" + " due to generic exception %s" % to_native(generic_exc) + ) else: changed = True @@ -210,29 +244,34 @@ def configure_drs(self): def main(): argument_spec = vmware_argument_spec() - argument_spec.update(dict( - cluster_name=dict(type='str', required=True), - datacenter=dict(type='str', required=True, aliases=['datacenter_name']), - # DRS - enable_drs=dict(type='bool', default=False), - drs_enable_vm_behavior_overrides=dict(type='bool', default=True), - drs_default_vm_behavior=dict(type='str', - choices=['fullyAutomated', 'manual', 'partiallyAutomated'], - default='fullyAutomated'), - drs_vmotion_rate=dict(type='int', - choices=range(1, 6), - default=3), - advanced_settings=dict(type='dict', default=dict(), required=False), - )) + argument_spec.update( + dict( + cluster_name=dict(type="str", required=True), + datacenter=dict( + type="str", required=True, aliases=["datacenter_name"] + ), + # DRS + enable_drs=dict(type="bool", default=False), + drs_enable_vm_behavior_overrides=dict(type="bool", default=True), + drs_default_vm_behavior=dict( + type="str", + choices=["fullyAutomated", "manual", "partiallyAutomated"], + default="fullyAutomated", + ), + drs_vmotion_rate=dict(type="int", choices=range(1, 6), default=3), + advanced_settings=dict( + type="dict", default=dict(), required=False + ), + ) + ) module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, + argument_spec=argument_spec, supports_check_mode=True ) vmware_cluster_drs = VMwareCluster(module) vmware_cluster_drs.configure_drs() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_cluster_ha.py b/plugins/modules/vmware_cluster_ha.py index e99e8ee..3b7f8c4 100644 --- a/plugins/modules/vmware_cluster_ha.py +++ b/plugins/modules/vmware_cluster_ha.py @@ -11,12 +11,12 @@ __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_cluster_ha short_description: Manage High Availability (HA) on VMware vSphere clusters @@ -180,7 +180,7 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" EXAMPLES = r""" - name: Enable HA without admission control @@ -230,40 +230,61 @@ pass from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import (PyVmomi, TaskError, find_datacenter_by_name, - vmware_argument_spec, wait_for_task, option_diff) +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + TaskError, + find_datacenter_by_name, + vmware_argument_spec, + wait_for_task, + option_diff, +) from ansible.module_utils._text import to_native class VMwareCluster(PyVmomi): def __init__(self, module): super(VMwareCluster, self).__init__(module) - self.cluster_name = module.params['cluster_name'] - self.datacenter_name = module.params['datacenter'] - self.enable_ha = module.params['enable_ha'] + self.cluster_name = module.params["cluster_name"] + self.datacenter_name = module.params["datacenter"] + self.enable_ha = module.params["enable_ha"] self.datacenter = None self.cluster = None - self.host_isolation_response = getattr(vim.cluster.DasVmSettings.IsolationResponse, self.params.get('host_isolation_response')) + self.host_isolation_response = getattr( + vim.cluster.DasVmSettings.IsolationResponse, + self.params.get("host_isolation_response"), + ) if self.enable_ha and ( - self.params.get('slot_based_admission_control') or - self.params.get('reservation_based_admission_control') or - self.params.get('failover_host_admission_control')): + self.params.get("slot_based_admission_control") + or self.params.get("reservation_based_admission_control") + or self.params.get("failover_host_admission_control") + ): self.ha_admission_control = True else: self.ha_admission_control = False - self.datacenter = find_datacenter_by_name(self.content, self.datacenter_name) + self.datacenter = find_datacenter_by_name( + self.content, self.datacenter_name + ) if self.datacenter is None: - self.module.fail_json(msg="Datacenter %s does not exist." % self.datacenter_name) + self.module.fail_json( + msg="Datacenter %s does not exist." % self.datacenter_name + ) - self.cluster = self.find_cluster_by_name(cluster_name=self.cluster_name) + self.cluster = self.find_cluster_by_name( + cluster_name=self.cluster_name + ) if self.cluster is None: - self.module.fail_json(msg="Cluster %s does not exist." % self.cluster_name) + self.module.fail_json( + msg="Cluster %s does not exist." % self.cluster_name + ) - self.advanced_settings = self.params.get('advanced_settings') + self.advanced_settings = self.params.get("advanced_settings") if self.advanced_settings: - self.changed_advanced_settings = option_diff(self.advanced_settings, self.cluster.configurationEx.dasConfig.option) + self.changed_advanced_settings = option_diff( + self.advanced_settings, + self.cluster.configurationEx.dasConfig.option, + ) else: self.changed_advanced_settings = None @@ -273,14 +294,20 @@ def get_failover_hosts(self): Returns: List of ESXi hosts sorted by name """ - policy = self.params.get('failover_host_admission_control') + policy = self.params.get("failover_host_admission_control") hosts = [] - all_hosts = dict((h.name, h) for h in self.get_all_hosts_by_cluster(self.cluster_name)) - for host in policy.get('failover_hosts'): + all_hosts = dict( + (h.name, h) + for h in self.get_all_hosts_by_cluster(self.cluster_name) + ) + for host in policy.get("failover_hosts"): if host in all_hosts: hosts.append(all_hosts.get(host)) else: - self.module.fail_json(msg="Host %s is not a member of cluster %s." % (host, self.cluster_name)) + self.module.fail_json( + msg="Host %s is not a member of cluster %s." + % (host, self.cluster_name) + ) hosts.sort(key=lambda h: h.name) return hosts @@ -295,41 +322,74 @@ def check_ha_config_diff(self): return True if self.enable_ha and ( - das_config.vmMonitoring != self.params.get('ha_vm_monitoring') or - das_config.hostMonitoring != self.params.get('ha_host_monitoring') or - das_config.admissionControlEnabled != self.ha_admission_control or - das_config.defaultVmSettings.restartPriority != self.params.get('ha_restart_priority') or - das_config.defaultVmSettings.isolationResponse != self.host_isolation_response or - das_config.defaultVmSettings.vmToolsMonitoringSettings.vmMonitoring != self.params.get('ha_vm_monitoring') or - das_config.defaultVmSettings.vmToolsMonitoringSettings.failureInterval != self.params.get('ha_vm_failure_interval') or - das_config.defaultVmSettings.vmToolsMonitoringSettings.minUpTime != self.params.get('ha_vm_min_up_time') or - das_config.defaultVmSettings.vmToolsMonitoringSettings.maxFailures != self.params.get('ha_vm_max_failures') or - das_config.defaultVmSettings.vmToolsMonitoringSettings.maxFailureWindow != self.params.get('ha_vm_max_failure_window')): + das_config.vmMonitoring != self.params.get("ha_vm_monitoring") + or das_config.hostMonitoring + != self.params.get("ha_host_monitoring") + or das_config.admissionControlEnabled != self.ha_admission_control + or das_config.defaultVmSettings.restartPriority + != self.params.get("ha_restart_priority") + or das_config.defaultVmSettings.isolationResponse + != self.host_isolation_response + or das_config.defaultVmSettings.vmToolsMonitoringSettings.vmMonitoring + != self.params.get("ha_vm_monitoring") + or das_config.defaultVmSettings.vmToolsMonitoringSettings.failureInterval + != self.params.get("ha_vm_failure_interval") + or das_config.defaultVmSettings.vmToolsMonitoringSettings.minUpTime + != self.params.get("ha_vm_min_up_time") + or das_config.defaultVmSettings.vmToolsMonitoringSettings.maxFailures + != self.params.get("ha_vm_max_failures") + or das_config.defaultVmSettings.vmToolsMonitoringSettings.maxFailureWindow + != self.params.get("ha_vm_max_failure_window") + ): return True if self.ha_admission_control: - if self.params.get('slot_based_admission_control'): - policy = self.params.get('slot_based_admission_control') - if not isinstance(das_config.admissionControlPolicy, vim.cluster.FailoverLevelAdmissionControlPolicy) or \ - das_config.admissionControlPolicy.failoverLevel != policy.get('failover_level'): + if self.params.get("slot_based_admission_control"): + policy = self.params.get("slot_based_admission_control") + if not isinstance( + das_config.admissionControlPolicy, + vim.cluster.FailoverLevelAdmissionControlPolicy, + ) or das_config.admissionControlPolicy.failoverLevel != policy.get( + "failover_level" + ): return True - elif self.params.get('reservation_based_admission_control'): - policy = self.params.get('reservation_based_admission_control') - auto_compute_percentages = policy.get('auto_compute_percentages') - if not isinstance(das_config.admissionControlPolicy, vim.cluster.FailoverResourcesAdmissionControlPolicy) or \ - das_config.admissionControlPolicy.autoComputePercentages != auto_compute_percentages or \ - das_config.admissionControlPolicy.failoverLevel != policy.get('failover_level'): + elif self.params.get("reservation_based_admission_control"): + policy = self.params.get("reservation_based_admission_control") + auto_compute_percentages = policy.get( + "auto_compute_percentages" + ) + if ( + not isinstance( + das_config.admissionControlPolicy, + vim.cluster.FailoverResourcesAdmissionControlPolicy, + ) + or das_config.admissionControlPolicy.autoComputePercentages + != auto_compute_percentages + or das_config.admissionControlPolicy.failoverLevel + != policy.get("failover_level") + ): return True if not auto_compute_percentages: - if das_config.admissionControlPolicy.cpuFailoverResourcesPercent != policy.get('cpu_failover_resources_percent') or \ - das_config.admissionControlPolicy.memoryFailoverResourcesPercent != policy.get('memory_failover_resources_percent'): + if das_config.admissionControlPolicy.cpuFailoverResourcesPercent != policy.get( + "cpu_failover_resources_percent" + ) or das_config.admissionControlPolicy.memoryFailoverResourcesPercent != policy.get( + "memory_failover_resources_percent" + ): return True - elif self.params.get('failover_host_admission_control'): - policy = self.params.get('failover_host_admission_control') - if not isinstance(das_config.admissionControlPolicy, vim.cluster.FailoverHostAdmissionControlPolicy): + elif self.params.get("failover_host_admission_control"): + policy = self.params.get("failover_host_admission_control") + if not isinstance( + das_config.admissionControlPolicy, + vim.cluster.FailoverHostAdmissionControlPolicy, + ): return True - das_config.admissionControlPolicy.failoverHosts.sort(key=lambda h: h.name) - if das_config.admissionControlPolicy.failoverHosts != self.get_failover_hosts(): + das_config.admissionControlPolicy.failoverHosts.sort( + key=lambda h: h.name + ) + if ( + das_config.admissionControlPolicy.failoverHosts + != self.get_failover_hosts() + ): return True if self.changed_advanced_settings: @@ -353,49 +413,101 @@ def configure_ha(self): if self.enable_ha: vm_tool_spec = vim.cluster.VmToolsMonitoringSettings() vm_tool_spec.enabled = True - vm_tool_spec.vmMonitoring = self.params.get('ha_vm_monitoring') - vm_tool_spec.failureInterval = self.params.get('ha_vm_failure_interval') - vm_tool_spec.minUpTime = self.params.get('ha_vm_min_up_time') - vm_tool_spec.maxFailures = self.params.get('ha_vm_max_failures') - vm_tool_spec.maxFailureWindow = self.params.get('ha_vm_max_failure_window') + vm_tool_spec.vmMonitoring = self.params.get( + "ha_vm_monitoring" + ) + vm_tool_spec.failureInterval = self.params.get( + "ha_vm_failure_interval" + ) + vm_tool_spec.minUpTime = self.params.get( + "ha_vm_min_up_time" + ) + vm_tool_spec.maxFailures = self.params.get( + "ha_vm_max_failures" + ) + vm_tool_spec.maxFailureWindow = self.params.get( + "ha_vm_max_failure_window" + ) das_vm_config = vim.cluster.DasVmSettings() - das_vm_config.restartPriority = self.params.get('ha_restart_priority') - das_vm_config.isolationResponse = self.host_isolation_response + das_vm_config.restartPriority = self.params.get( + "ha_restart_priority" + ) + das_vm_config.isolationResponse = ( + self.host_isolation_response + ) das_vm_config.vmToolsMonitoringSettings = vm_tool_spec - cluster_config_spec.dasConfig.defaultVmSettings = das_vm_config + cluster_config_spec.dasConfig.defaultVmSettings = ( + das_vm_config + ) - cluster_config_spec.dasConfig.admissionControlEnabled = self.ha_admission_control + cluster_config_spec.dasConfig.admissionControlEnabled = ( + self.ha_admission_control + ) if self.ha_admission_control: - if self.params.get('slot_based_admission_control'): - cluster_config_spec.dasConfig.admissionControlPolicy = vim.cluster.FailoverLevelAdmissionControlPolicy() - policy = self.params.get('slot_based_admission_control') - cluster_config_spec.dasConfig.admissionControlPolicy.failoverLevel = policy.get('failover_level') - elif self.params.get('reservation_based_admission_control'): - cluster_config_spec.dasConfig.admissionControlPolicy = vim.cluster.FailoverResourcesAdmissionControlPolicy() - policy = self.params.get('reservation_based_admission_control') - auto_compute_percentages = policy.get('auto_compute_percentages') - cluster_config_spec.dasConfig.admissionControlPolicy.autoComputePercentages = auto_compute_percentages - cluster_config_spec.dasConfig.admissionControlPolicy.failoverLevel = policy.get('failover_level') + if self.params.get("slot_based_admission_control"): + cluster_config_spec.dasConfig.admissionControlPolicy = ( + vim.cluster.FailoverLevelAdmissionControlPolicy() + ) + policy = self.params.get( + "slot_based_admission_control" + ) + cluster_config_spec.dasConfig.admissionControlPolicy.failoverLevel = policy.get( + "failover_level" + ) + elif self.params.get( + "reservation_based_admission_control" + ): + cluster_config_spec.dasConfig.admissionControlPolicy = ( + vim.cluster.FailoverResourcesAdmissionControlPolicy() + ) + policy = self.params.get( + "reservation_based_admission_control" + ) + auto_compute_percentages = policy.get( + "auto_compute_percentages" + ) + cluster_config_spec.dasConfig.admissionControlPolicy.autoComputePercentages = ( + auto_compute_percentages + ) + cluster_config_spec.dasConfig.admissionControlPolicy.failoverLevel = policy.get( + "failover_level" + ) if not auto_compute_percentages: - cluster_config_spec.dasConfig.admissionControlPolicy.cpuFailoverResourcesPercent = \ - policy.get('cpu_failover_resources_percent') - cluster_config_spec.dasConfig.admissionControlPolicy.memoryFailoverResourcesPercent = \ - policy.get('memory_failover_resources_percent') - elif self.params.get('failover_host_admission_control'): - cluster_config_spec.dasConfig.admissionControlPolicy = vim.cluster.FailoverHostAdmissionControlPolicy() - policy = self.params.get('failover_host_admission_control') - cluster_config_spec.dasConfig.admissionControlPolicy.failoverHosts = self.get_failover_hosts() - - cluster_config_spec.dasConfig.hostMonitoring = self.params.get('ha_host_monitoring') - cluster_config_spec.dasConfig.vmMonitoring = self.params.get('ha_vm_monitoring') + cluster_config_spec.dasConfig.admissionControlPolicy.cpuFailoverResourcesPercent = policy.get( + "cpu_failover_resources_percent" + ) + cluster_config_spec.dasConfig.admissionControlPolicy.memoryFailoverResourcesPercent = policy.get( + "memory_failover_resources_percent" + ) + elif self.params.get("failover_host_admission_control"): + cluster_config_spec.dasConfig.admissionControlPolicy = ( + vim.cluster.FailoverHostAdmissionControlPolicy() + ) + policy = self.params.get( + "failover_host_admission_control" + ) + cluster_config_spec.dasConfig.admissionControlPolicy.failoverHosts = ( + self.get_failover_hosts() + ) + + cluster_config_spec.dasConfig.hostMonitoring = self.params.get( + "ha_host_monitoring" + ) + cluster_config_spec.dasConfig.vmMonitoring = self.params.get( + "ha_vm_monitoring" + ) if self.changed_advanced_settings: - cluster_config_spec.dasConfig.option = self.changed_advanced_settings + cluster_config_spec.dasConfig.option = ( + self.changed_advanced_settings + ) try: - task = self.cluster.ReconfigureComputeResource_Task(cluster_config_spec, True) + task = self.cluster.ReconfigureComputeResource_Task( + cluster_config_spec, True + ) changed, result = wait_for_task(task) except vmodl.RuntimeFault as runtime_fault: self.module.fail_json(msg=to_native(runtime_fault.msg)) @@ -404,8 +516,10 @@ def configure_ha(self): except TaskError as task_e: self.module.fail_json(msg=to_native(task_e)) except Exception as generic_exc: - self.module.fail_json(msg="Failed to update cluster" - " due to generic exception %s" % to_native(generic_exc)) + self.module.fail_json( + msg="Failed to update cluster" + " due to generic exception %s" % to_native(generic_exc) + ) else: changed = True @@ -414,56 +528,88 @@ def configure_ha(self): def main(): argument_spec = vmware_argument_spec() - argument_spec.update(dict( - cluster_name=dict(type='str', required=True), - datacenter=dict(type='str', required=True, aliases=['datacenter_name']), - # HA - enable_ha=dict(type='bool', default=False), - ha_host_monitoring=dict(type='str', - default='enabled', - choices=['enabled', 'disabled']), - host_isolation_response=dict(type='str', - default='none', - choices=['none', 'powerOff', 'shutdown']), - advanced_settings=dict(type='dict', default=dict(), required=False), - # HA VM Monitoring related parameters - ha_vm_monitoring=dict(type='str', - choices=['vmAndAppMonitoring', 'vmMonitoringOnly', 'vmMonitoringDisabled'], - default='vmMonitoringDisabled'), - ha_vm_failure_interval=dict(type='int', default=30), - ha_vm_min_up_time=dict(type='int', default=120), - ha_vm_max_failures=dict(type='int', default=3), - ha_vm_max_failure_window=dict(type='int', default=-1), - - ha_restart_priority=dict(type='str', - choices=['high', 'low', 'medium', 'disabled'], - default='medium'), - # HA Admission Control related parameters - slot_based_admission_control=dict(type='dict', options=dict( - failover_level=dict(type='int', required=True), - )), - reservation_based_admission_control=dict(type='dict', options=dict( - auto_compute_percentages=dict(type='bool', default=True), - failover_level=dict(type='int', required=True), - cpu_failover_resources_percent=dict(type='int', default=50), - memory_failover_resources_percent=dict(type='int', default=50), - )), - failover_host_admission_control=dict(type='dict', options=dict( - failover_hosts=dict(type='list', elements='str', required=True), - )), - )) + argument_spec.update( + dict( + cluster_name=dict(type="str", required=True), + datacenter=dict( + type="str", required=True, aliases=["datacenter_name"] + ), + # HA + enable_ha=dict(type="bool", default=False), + ha_host_monitoring=dict( + type="str", default="enabled", choices=["enabled", "disabled"] + ), + host_isolation_response=dict( + type="str", + default="none", + choices=["none", "powerOff", "shutdown"], + ), + advanced_settings=dict( + type="dict", default=dict(), required=False + ), + # HA VM Monitoring related parameters + ha_vm_monitoring=dict( + type="str", + choices=[ + "vmAndAppMonitoring", + "vmMonitoringOnly", + "vmMonitoringDisabled", + ], + default="vmMonitoringDisabled", + ), + ha_vm_failure_interval=dict(type="int", default=30), + ha_vm_min_up_time=dict(type="int", default=120), + ha_vm_max_failures=dict(type="int", default=3), + ha_vm_max_failure_window=dict(type="int", default=-1), + ha_restart_priority=dict( + type="str", + choices=["high", "low", "medium", "disabled"], + default="medium", + ), + # HA Admission Control related parameters + slot_based_admission_control=dict( + type="dict", + options=dict(failover_level=dict(type="int", required=True)), + ), + reservation_based_admission_control=dict( + type="dict", + options=dict( + auto_compute_percentages=dict(type="bool", default=True), + failover_level=dict(type="int", required=True), + cpu_failover_resources_percent=dict( + type="int", default=50 + ), + memory_failover_resources_percent=dict( + type="int", default=50 + ), + ), + ), + failover_host_admission_control=dict( + type="dict", + options=dict( + failover_hosts=dict( + type="list", elements="str", required=True + ) + ), + ), + ) + ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=[ - ['slot_based_admission_control', 'reservation_based_admission_control', 'failover_host_admission_control'] - ] + [ + "slot_based_admission_control", + "reservation_based_admission_control", + "failover_host_admission_control", + ] + ], ) vmware_cluster_ha = VMwareCluster(module) vmware_cluster_ha.configure_ha() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_cluster_info.py b/plugins/modules/vmware_cluster_info.py index 806e676..6e6fd3c 100644 --- a/plugins/modules/vmware_cluster_info.py +++ b/plugins/modules/vmware_cluster_info.py @@ -5,15 +5,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_cluster_info short_description: Gather info about clusters available in given vCenter @@ -51,9 +52,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Gather cluster info from given datacenter vmware_cluster_info: hostname: '{{ vcenter_hostname }}' @@ -82,7 +83,7 @@ show_tag: True delegate_to: localhost register: cluster_info -''' +""" RETURN = """ clusters: @@ -145,25 +146,42 @@ pass from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec, find_datacenter_by_name, find_cluster_by_name -from ansible_collections.vmware.general.plugins.module_utils.vmware_rest_client import VmwareRestClient +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + vmware_argument_spec, + find_datacenter_by_name, + find_cluster_by_name, +) +from ansible_collections.vmware.general.plugins.module_utils.vmware_rest_client import ( + VmwareRestClient, +) class VmwreClusterInfoManager(PyVmomi): def __init__(self, module): super(VmwreClusterInfoManager, self).__init__(module) - datacenter = self.params.get('datacenter') - cluster_name = self.params.get('cluster_name') + datacenter = self.params.get("datacenter") + cluster_name = self.params.get("cluster_name") self.cluster_objs = [] if datacenter: - datacenter_obj = find_datacenter_by_name(self.content, datacenter_name=datacenter) + datacenter_obj = find_datacenter_by_name( + self.content, datacenter_name=datacenter + ) if datacenter_obj is None: - self.module.fail_json(msg="Failed to find datacenter '%s'" % datacenter) - self.cluster_objs = self.get_all_cluster_objs(parent=datacenter_obj) + self.module.fail_json( + msg="Failed to find datacenter '%s'" % datacenter + ) + self.cluster_objs = self.get_all_cluster_objs( + parent=datacenter_obj + ) elif cluster_name: - cluster_obj = find_cluster_by_name(self.content, cluster_name=cluster_name) + cluster_obj = find_cluster_by_name( + self.content, cluster_name=cluster_name + ) if cluster_obj is None: - self.module.fail_json(msg="Failed to find cluster '%s'" % cluster_name) + self.module.fail_json( + msg="Failed to find cluster '%s'" % cluster_name + ) self.cluster_objs = [cluster_obj] @@ -209,38 +227,58 @@ def gather_cluster_info(self): # Hosts for host in cluster.host: - hosts.append({ - 'name': host.name, - 'folder': self.get_vm_path(self.content, host), - }) + hosts.append( + { + "name": host.name, + "folder": self.get_vm_path(self.content, host), + } + ) # HA das_config = cluster.configurationEx.dasConfig if das_config.admissionControlPolicy: - ha_failover_level = das_config.admissionControlPolicy.failoverLevel + ha_failover_level = ( + das_config.admissionControlPolicy.failoverLevel + ) if das_config.defaultVmSettings: - ha_restart_priority = das_config.defaultVmSettings.restartPriority, - ha_vm_tools_monitoring = das_config.defaultVmSettings.vmToolsMonitoringSettings.vmMonitoring, - ha_vm_min_up_time = das_config.defaultVmSettings.vmToolsMonitoringSettings.minUpTime, - ha_vm_max_failures = das_config.defaultVmSettings.vmToolsMonitoringSettings.maxFailures, - ha_vm_max_failure_window = das_config.defaultVmSettings.vmToolsMonitoringSettings.maxFailureWindow, - ha_vm_failure_interval = das_config.defaultVmSettings.vmToolsMonitoringSettings.failureInterval, + ha_restart_priority = ( + das_config.defaultVmSettings.restartPriority, + ) + ha_vm_tools_monitoring = ( + das_config.defaultVmSettings.vmToolsMonitoringSettings.vmMonitoring, + ) + ha_vm_min_up_time = ( + das_config.defaultVmSettings.vmToolsMonitoringSettings.minUpTime, + ) + ha_vm_max_failures = ( + das_config.defaultVmSettings.vmToolsMonitoringSettings.maxFailures, + ) + ha_vm_max_failure_window = ( + das_config.defaultVmSettings.vmToolsMonitoringSettings.maxFailureWindow, + ) + ha_vm_failure_interval = ( + das_config.defaultVmSettings.vmToolsMonitoringSettings.failureInterval, + ) # DRS drs_config = cluster.configurationEx.drsConfig # VSAN - if hasattr(cluster.configurationEx, 'vsanConfig'): + if hasattr(cluster.configurationEx, "vsanConfig"): vsan_config = cluster.configurationEx.vsanConfig - enabled_vsan = vsan_config.enabled, - vsan_auto_claim_storage = vsan_config.defaultConfig.autoClaimStorage, + enabled_vsan = (vsan_config.enabled,) + vsan_auto_claim_storage = ( + vsan_config.defaultConfig.autoClaimStorage, + ) tag_info = [] - if self.params.get('show_tag'): + if self.params.get("show_tag"): vmware_client = VmwareRestClient(self.module) - tag_info = vmware_client.get_tags_for_cluster(cluster_mid=cluster._moId) + tag_info = vmware_client.get_tags_for_cluster( + cluster_mid=cluster._moId + ) - results['clusters'][cluster.name] = dict( + results["clusters"][cluster.name] = dict( hosts=hosts, enable_ha=das_config.enabled, ha_failover_level=ha_failover_level, @@ -268,23 +306,24 @@ def gather_cluster_info(self): def main(): argument_spec = vmware_argument_spec() argument_spec.update( - datacenter=dict(type='str'), - cluster_name=dict(type='str'), - show_tag=dict(type='bool', default=False), + datacenter=dict(type="str"), + cluster_name=dict(type="str"), + show_tag=dict(type="bool", default=False), ) module = AnsibleModule( argument_spec=argument_spec, - required_one_of=[ - ['cluster_name', 'datacenter'], - ], + required_one_of=[["cluster_name", "datacenter"]], supports_check_mode=True, ) - if module._name == 'vmware_cluster_facts': - module.deprecate("The 'vmware_cluster_facts' module has been renamed to 'vmware_cluster_info'", version='2.13') + if module._name == "vmware_cluster_facts": + module.deprecate( + "The 'vmware_cluster_facts' module has been renamed to 'vmware_cluster_info'", + version="2.13", + ) pyv = VmwreClusterInfoManager(module) pyv.gather_cluster_info() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_cluster_vsan.py b/plugins/modules/vmware_cluster_vsan.py index 7f911e3..f0f00c1 100644 --- a/plugins/modules/vmware_cluster_vsan.py +++ b/plugins/modules/vmware_cluster_vsan.py @@ -7,15 +7,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_cluster_vsan short_description: Manages virtual storage area network (vSAN) configuration on VMware vSphere clusters @@ -54,7 +55,7 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" EXAMPLES = r""" - name: Enable vSAN @@ -89,27 +90,40 @@ pass from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import (PyVmomi, TaskError, find_datacenter_by_name, - vmware_argument_spec, wait_for_task) +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + TaskError, + find_datacenter_by_name, + vmware_argument_spec, + wait_for_task, +) from ansible.module_utils._text import to_native class VMwareCluster(PyVmomi): def __init__(self, module): super(VMwareCluster, self).__init__(module) - self.cluster_name = module.params['cluster_name'] - self.datacenter_name = module.params['datacenter'] - self.enable_vsan = module.params['enable_vsan'] + self.cluster_name = module.params["cluster_name"] + self.datacenter_name = module.params["datacenter"] + self.enable_vsan = module.params["enable_vsan"] self.datacenter = None self.cluster = None - self.datacenter = find_datacenter_by_name(self.content, self.datacenter_name) + self.datacenter = find_datacenter_by_name( + self.content, self.datacenter_name + ) if self.datacenter is None: - self.module.fail_json(msg="Datacenter %s does not exist." % self.datacenter_name) + self.module.fail_json( + msg="Datacenter %s does not exist." % self.datacenter_name + ) - self.cluster = self.find_cluster_by_name(cluster_name=self.cluster_name) + self.cluster = self.find_cluster_by_name( + cluster_name=self.cluster_name + ) if self.cluster is None: - self.module.fail_json(msg="Cluster %s does not exist." % self.cluster_name) + self.module.fail_json( + msg="Cluster %s does not exist." % self.cluster_name + ) def check_vsan_config_diff(self): """ @@ -119,8 +133,11 @@ def check_vsan_config_diff(self): """ vsan_config = self.cluster.configurationEx.vsanConfigInfo - if vsan_config.enabled != self.enable_vsan or \ - vsan_config.defaultConfig.autoClaimStorage != self.params.get('vsan_auto_claim_storage'): + if ( + vsan_config.enabled != self.enable_vsan + or vsan_config.defaultConfig.autoClaimStorage + != self.params.get("vsan_auto_claim_storage") + ): return True return False @@ -136,10 +153,16 @@ def configure_vsan(self): cluster_config_spec = vim.cluster.ConfigSpecEx() cluster_config_spec.vsanConfig = vim.vsan.cluster.ConfigInfo() cluster_config_spec.vsanConfig.enabled = self.enable_vsan - cluster_config_spec.vsanConfig.defaultConfig = vim.vsan.cluster.ConfigInfo.HostDefaultInfo() - cluster_config_spec.vsanConfig.defaultConfig.autoClaimStorage = self.params.get('vsan_auto_claim_storage') + cluster_config_spec.vsanConfig.defaultConfig = ( + vim.vsan.cluster.ConfigInfo.HostDefaultInfo() + ) + cluster_config_spec.vsanConfig.defaultConfig.autoClaimStorage = self.params.get( + "vsan_auto_claim_storage" + ) try: - task = self.cluster.ReconfigureComputeResource_Task(cluster_config_spec, True) + task = self.cluster.ReconfigureComputeResource_Task( + cluster_config_spec, True + ) changed, result = wait_for_task(task) except vmodl.RuntimeFault as runtime_fault: self.module.fail_json(msg=to_native(runtime_fault.msg)) @@ -148,8 +171,10 @@ def configure_vsan(self): except TaskError as task_e: self.module.fail_json(msg=to_native(task_e)) except Exception as generic_exc: - self.module.fail_json(msg="Failed to update cluster" - " due to generic exception %s" % to_native(generic_exc)) + self.module.fail_json( + msg="Failed to update cluster" + " due to generic exception %s" % to_native(generic_exc) + ) else: changed = True @@ -158,22 +183,25 @@ def configure_vsan(self): def main(): argument_spec = vmware_argument_spec() - argument_spec.update(dict( - cluster_name=dict(type='str', required=True), - datacenter=dict(type='str', required=True, aliases=['datacenter_name']), - # VSAN - enable_vsan=dict(type='bool', default=False), - vsan_auto_claim_storage=dict(type='bool', default=False), - )) + argument_spec.update( + dict( + cluster_name=dict(type="str", required=True), + datacenter=dict( + type="str", required=True, aliases=["datacenter_name"] + ), + # VSAN + enable_vsan=dict(type="bool", default=False), + vsan_auto_claim_storage=dict(type="bool", default=False), + ) + ) module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, + argument_spec=argument_spec, supports_check_mode=True ) vmware_cluster_vsan = VMwareCluster(module) vmware_cluster_vsan.configure_vsan() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_content_deploy_template.py b/plugins/modules/vmware_content_deploy_template.py index 198f69b..8253c16 100644 --- a/plugins/modules/vmware_content_deploy_template.py +++ b/plugins/modules/vmware_content_deploy_template.py @@ -5,15 +5,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_content_deploy_template short_description: Deploy Virtual Machine from template stored in content library. @@ -88,9 +89,9 @@ extends_documentation_fragment: - vmware.general.vmware_rest_client.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Deploy Virtual Machine from template in content library vmware_content_deploy_template: hostname: '{{ vcenter_hostname }}' @@ -120,9 +121,9 @@ validate_certs: False state: poweredon delegate_to: localhost -''' +""" -RETURN = r''' +RETURN = r""" vm_deploy_info: description: Virtual machine deployment message and vm_id returned: on success @@ -131,17 +132,22 @@ "msg": "Deployed Virtual Machine 'Sample_VM'.", "vm_id": "vm-1009" } -''' +""" import uuid from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware_rest_client import VmwareRestClient -from ansible_collections.vmware.general.plugins.module_utils.vmware import PyVmomi +from ansible_collections.vmware.general.plugins.module_utils.vmware_rest_client import ( + VmwareRestClient, +) +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, +) from ansible.module_utils._text import to_native HAS_VAUTOMATION_PYTHON_SDK = False try: from com.vmware.vcenter.vm_template_client import LibraryItems + HAS_VAUTOMATION_PYTHON_SDK = True except ImportError: pass @@ -151,33 +157,49 @@ class VmwareContentDeployTemplate(VmwareRestClient): def __init__(self, module): """Constructor.""" super(VmwareContentDeployTemplate, self).__init__(module) - self.template_service = self.api_client.vcenter.vm_template.LibraryItems - self.template_name = self.params.get('template') - self.vm_name = self.params.get('name') - self.datacenter = self.params.get('datacenter') - self.datastore = self.params.get('datastore') - self.folder = self.params.get('folder') - self.resourcepool = self.params.get('resource_pool') - self.cluster = self.params.get('cluster') - self.host = self.params.get('host') + self.template_service = ( + self.api_client.vcenter.vm_template.LibraryItems + ) + self.template_name = self.params.get("template") + self.vm_name = self.params.get("name") + self.datacenter = self.params.get("datacenter") + self.datastore = self.params.get("datastore") + self.folder = self.params.get("folder") + self.resourcepool = self.params.get("resource_pool") + self.cluster = self.params.get("cluster") + self.host = self.params.get("host") def deploy_vm_from_template(self, power_on=False): # Find the datacenter by the given datacenter name - self.datacenter_id = self.get_datacenter_by_name(datacenter_name=self.datacenter) + self.datacenter_id = self.get_datacenter_by_name( + datacenter_name=self.datacenter + ) if not self.datacenter_id: - self.module.fail_json(msg="Failed to find the datacenter %s" % self.datacenter) + self.module.fail_json( + msg="Failed to find the datacenter %s" % self.datacenter + ) # Find the datastore by the given datastore name - self.datastore_id = self.get_datastore_by_name(self.datacenter, self.datastore) + self.datastore_id = self.get_datastore_by_name( + self.datacenter, self.datastore + ) if not self.datastore_id: - self.module.fail_json(msg="Failed to find the datastore %s" % self.datastore) + self.module.fail_json( + msg="Failed to find the datastore %s" % self.datastore + ) # Find the LibraryItem (Template) by the given LibraryItem name - self.library_item_id = self.get_library_item_by_name(self.template_name) + self.library_item_id = self.get_library_item_by_name( + self.template_name + ) if not self.library_item_id: - self.module.fail_json(msg="Failed to find the library Item %s" % self.template_name) + self.module.fail_json( + msg="Failed to find the library Item %s" % self.template_name + ) # Find the folder by the given folder name self.folder_id = self.get_folder_by_name(self.datacenter, self.folder) if not self.folder_id: - self.module.fail_json(msg="Failed to find the folder %s" % self.folder) + self.module.fail_json( + msg="Failed to find the folder %s" % self.folder + ) # Find the Host by given HostName self.host_id = self.get_host_by_name(self.datacenter, self.host) if not self.host_id: @@ -185,90 +207,113 @@ def deploy_vm_from_template(self, power_on=False): # Find the resourcepool by the given resourcepool name self.resourcepool_id = None if self.resourcepool: - self.resourcepool_id = self.get_resource_pool_by_name(self.datacenter, self.resourcepool) + self.resourcepool_id = self.get_resource_pool_by_name( + self.datacenter, self.resourcepool + ) if not self.resourcepool_id: - self.module.fail_json(msg="Failed to find the resource_pool %s" % self.resourcepool) + self.module.fail_json( + msg="Failed to find the resource_pool %s" + % self.resourcepool + ) # Find the Cluster by the given Cluster name self.cluster_id = None if self.cluster: - self.cluster_id = self.get_cluster_by_name(self.datacenter, self.cluster) + self.cluster_id = self.get_cluster_by_name( + self.datacenter, self.cluster + ) if not self.cluster_id: - self.module.fail_json(msg="Failed to find the Cluster %s" % self.cluster) + self.module.fail_json( + msg="Failed to find the Cluster %s" % self.cluster + ) # Create VM placement specs - self.placement_spec = LibraryItems.DeployPlacementSpec(folder=self.folder_id, - host=self.host_id - ) + self.placement_spec = LibraryItems.DeployPlacementSpec( + folder=self.folder_id, host=self.host_id + ) if self.resourcepool_id or self.cluster_id: self.placement_spec.resource_pool = self.resourcepool_id self.placement_spec.cluster = self.cluster_id - self.vm_home_storage_spec = LibraryItems.DeploySpecVmHomeStorage(datastore=to_native(self.datastore_id)) - self.disk_storage_spec = LibraryItems.DeploySpecDiskStorage(datastore=to_native(self.datastore_id)) - self.deploy_spec = LibraryItems.DeploySpec(name=self.vm_name, - placement=self.placement_spec, - vm_home_storage=self.vm_home_storage_spec, - disk_storage=self.disk_storage_spec, - powered_on=power_on - ) - vm_id = self.template_service.deploy(self.library_item_id, self.deploy_spec) + self.vm_home_storage_spec = LibraryItems.DeploySpecVmHomeStorage( + datastore=to_native(self.datastore_id) + ) + self.disk_storage_spec = LibraryItems.DeploySpecDiskStorage( + datastore=to_native(self.datastore_id) + ) + self.deploy_spec = LibraryItems.DeploySpec( + name=self.vm_name, + placement=self.placement_spec, + vm_home_storage=self.vm_home_storage_spec, + disk_storage=self.disk_storage_spec, + powered_on=power_on, + ) + vm_id = self.template_service.deploy( + self.library_item_id, self.deploy_spec + ) if vm_id: self.module.exit_json( changed=True, vm_deploy_info=dict( msg="Deployed Virtual Machine '%s'." % self.vm_name, vm_id=vm_id, - ) + ), ) - self.module.exit_json(changed=False, - vm_deploy_info=dict(msg="Virtual Machine deployment failed", vm_id='')) + self.module.exit_json( + changed=False, + vm_deploy_info=dict( + msg="Virtual Machine deployment failed", vm_id="" + ), + ) def main(): argument_spec = VmwareRestClient.vmware_client_argument_spec() argument_spec.update( - state=dict(type='str', default='present', - choices=['present', 'poweredon']), - template=dict(type='str', aliases=['template_src'], required=True), - name=dict(type='str', required=True, aliases=['vm_name']), - datacenter=dict(type='str', required=True), - datastore=dict(type='str', required=True), - folder=dict(type='str', required=True), - host=dict(type='str', required=True), - resource_pool=dict(type='str', required=False), - cluster=dict(type='str', required=False), + state=dict( + type="str", default="present", choices=["present", "poweredon"] + ), + template=dict(type="str", aliases=["template_src"], required=True), + name=dict(type="str", required=True, aliases=["vm_name"]), + datacenter=dict(type="str", required=True), + datastore=dict(type="str", required=True), + folder=dict(type="str", required=True), + host=dict(type="str", required=True), + resource_pool=dict(type="str", required=False), + cluster=dict(type="str", required=False), + ) + module = AnsibleModule( + argument_spec=argument_spec, supports_check_mode=True ) - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True) - result = {'failed': False, 'changed': False} + result = {"failed": False, "changed": False} pyv = PyVmomi(module=module) vm = pyv.get_vm() if vm: module.exit_json( changed=False, vm_deploy_info=dict( - msg="Virtual Machine '%s' already Exists." % module.params['name'], + msg="Virtual Machine '%s' already Exists." + % module.params["name"], vm_id=vm._moId, - ) + ), ) vmware_contentlib_create = VmwareContentDeployTemplate(module) - if module.params['state'] in ['present']: + if module.params["state"] in ["present"]: if module.check_mode: result.update( - vm_name=module.params['name'], + vm_name=module.params["name"], changed=True, - desired_operation='Create VM with PowerOff State', + desired_operation="Create VM with PowerOff State", ) module.exit_json(**result) vmware_contentlib_create.deploy_vm_from_template() - if module.params['state'] == 'poweredon': + if module.params["state"] == "poweredon": if module.check_mode: result.update( - vm_name=module.params['name'], + vm_name=module.params["name"], changed=True, - desired_operation='Create VM with PowerON State', + desired_operation="Create VM with PowerON State", ) module.exit_json(**result) vmware_contentlib_create.deploy_vm_from_template(power_on=True) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_content_library_info.py b/plugins/modules/vmware_content_library_info.py index 5bdce77..02b29a0 100644 --- a/plugins/modules/vmware_content_library_info.py +++ b/plugins/modules/vmware_content_library_info.py @@ -5,15 +5,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_content_library_info short_description: Gather information about VMWare Content Library @@ -39,9 +40,9 @@ extends_documentation_fragment: - vmware.general.vmware_rest_client.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Get List of Content Libraries vmware_content_library_info: hostname: '{{ vcenter_hostname }}' @@ -57,9 +58,9 @@ library_id: '13b0f060-f4d3-4f84-b61f-0fe1b0c0a5a8' validate_certs: no delegate_to: localhost -''' +""" -RETURN = r''' +RETURN = r""" content_lib_details: description: list of content library metadata returned: on success @@ -90,10 +91,12 @@ "ded9c4d5-0dcd-4837-b1d8-af7398511e33", "36b72549-14ed-4b5f-94cb-6213fecacc02" ] -''' +""" from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware_rest_client import VmwareRestClient +from ansible_collections.vmware.general.plugins.module_utils.vmware_rest_client import ( + VmwareRestClient, +) class VmwareContentLibInfo(VmwareRestClient): @@ -105,20 +108,27 @@ def __init__(self, module): def get_all_content_libs(self): """Method to retrieve List of content libraries.""" - self.module.exit_json(changed=False, content_libs=self.content_service.content.LocalLibrary.list()) + self.module.exit_json( + changed=False, + content_libs=self.content_service.content.LocalLibrary.list(), + ) def get_content_lib_details(self, library_id): """Method to retrieve Details of contentlib with library_id""" try: - lib_details = self.content_service.content.LocalLibrary.get(library_id) + lib_details = self.content_service.content.LocalLibrary.get( + library_id + ) except Exception as e: - self.module.fail_json(exists=False, msg="%s" % self.get_error_message(e)) + self.module.fail_json( + exists=False, msg="%s" % self.get_error_message(e) + ) lib_publish_info = dict( persist_json_enabled=lib_details.publish_info.persist_json_enabled, authentication_method=lib_details.publish_info.authentication_method, publish_url=lib_details.publish_info.publish_url, published=lib_details.publish_info.published, - user_name=lib_details.publish_info.user_name + user_name=lib_details.publish_info.user_name, ) self.library_info.append( dict( @@ -129,27 +139,30 @@ def get_content_lib_details(self, library_id): library_creation_time=lib_details.creation_time, library_server_guid=lib_details.server_guid, library_version=lib_details.version, - library_publish_info=lib_publish_info + library_publish_info=lib_publish_info, ) ) - self.module.exit_json(exists=False, changed=False, content_lib_details=self.library_info) + self.module.exit_json( + exists=False, changed=False, content_lib_details=self.library_info + ) def main(): argument_spec = VmwareRestClient.vmware_client_argument_spec() - argument_spec.update( - library_id=dict(type='str', required=False), + argument_spec.update(library_id=dict(type="str", required=False)) + module = AnsibleModule( + argument_spec=argument_spec, supports_check_mode=True ) - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True) vmware_contentlib_info = VmwareContentLibInfo(module) - if module.params.get('library_id'): - vmware_contentlib_info.get_content_lib_details(module.params['library_id']) + if module.params.get("library_id"): + vmware_contentlib_info.get_content_lib_details( + module.params["library_id"] + ) else: vmware_contentlib_info.get_all_content_libs() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_content_library_manager.py b/plugins/modules/vmware_content_library_manager.py index b236a6c..8248d4a 100644 --- a/plugins/modules/vmware_content_library_manager.py +++ b/plugins/modules/vmware_content_library_manager.py @@ -5,15 +5,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_content_library_manager short_description: Create, update and delete VMware content library @@ -76,9 +77,9 @@ extends_documentation_fragment: - vmware.general.vmware_rest_client.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create Content Library vmware_content_library_manager: hostname: '{{ vcenter_hostname }}' @@ -112,9 +113,9 @@ validate_certs: no state: absent delegate_to: localhost -''' +""" -RETURN = r''' +RETURN = r""" content_library_info: description: library creation success and library_id returned: on success @@ -125,17 +126,22 @@ "library_type": 'LOCAL', "msg": "Content Library 'demo-local-lib-4' created.", } -''' +""" import uuid from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware_rest_client import VmwareRestClient -from ansible_collections.vmware.general.plugins.module_utils.vmware import PyVmomi +from ansible_collections.vmware.general.plugins.module_utils.vmware_rest_client import ( + VmwareRestClient, +) +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, +) HAS_VAUTOMATION_PYTHON_SDK = False try: from com.vmware.content_client import LibraryModel from com.vmware.content.library_client import StorageBacking + HAS_VAUTOMATION_PYTHON_SDK = True except ImportError: pass @@ -147,11 +153,11 @@ def __init__(self, module): super(VmwareContentLibCreate, self).__init__(module) self.content_service = self.api_client self.local_libraries = dict() - self.library_name = self.params.get('library_name') - self.library_description = self.params.get('library_description') - self.library_type = self.params.get('library_type') + self.library_name = self.params.get("library_name") + self.library_description = self.params.get("library_description") + self.library_type = self.params.get("library_type") self.library_types = dict() - self.datastore_name = self.params.get('datastore_name') + self.datastore_name = self.params.get("datastore_name") self.get_all_libraries() self.pyv = PyVmomi(module=module) @@ -159,29 +165,33 @@ def process_state(self): """ Manage states of Content Library """ - self.desired_state = self.params.get('state') + self.desired_state = self.params.get("state") library_states = { - 'absent': { - 'present': self.state_destroy_library, - 'absent': self.state_exit_unchanged, + "absent": { + "present": self.state_destroy_library, + "absent": self.state_exit_unchanged, + }, + "present": { + "present": self.state_update_library, + "absent": self.state_create_library, }, - 'present': { - 'present': self.state_update_library, - 'absent': self.state_create_library, - } } - library_states[self.desired_state][self.check_content_library_status()]() + library_states[self.desired_state][ + self.check_content_library_status() + ]() def get_all_libraries(self): content_libs = self.content_service.content.LocalLibrary.list() if content_libs: for content_lib in content_libs: - lib_details = self.content_service.content.LocalLibrary.get(content_lib) + lib_details = self.content_service.content.LocalLibrary.get( + content_lib + ) self.local_libraries[lib_details.name] = dict( lib_name=lib_details.name, lib_description=lib_details.description, lib_id=lib_details.id, - lib_type=lib_details.type + lib_type=lib_details.type, ) def check_content_library_status(self): @@ -190,32 +200,45 @@ def check_content_library_status(self): Returns: 'present' if library found, else 'absent' """ - ret = 'present' if self.library_name in self.local_libraries else 'absent' + ret = ( + "present" + if self.library_name in self.local_libraries + else "absent" + ) return ret def state_create_library(self): # Find the datastore by the given datastore name - datastore_id = self.pyv.find_datastore_by_name(datastore_name=self.datastore_name) + datastore_id = self.pyv.find_datastore_by_name( + datastore_name=self.datastore_name + ) if not datastore_id: - self.module.fail_json(msg="Failed to find the datastore %s" % self.datastore_name) + self.module.fail_json( + msg="Failed to find the datastore %s" % self.datastore_name + ) self.datastore_id = datastore_id._moId # Build the storage backing for the library to be created storage_backings = [] - storage_backing = StorageBacking(type=StorageBacking.Type.DATASTORE, datastore_id=self.datastore_id) + storage_backing = StorageBacking( + type=StorageBacking.Type.DATASTORE, datastore_id=self.datastore_id + ) storage_backings.append(storage_backing) # Build the specification for the library to be created create_spec = LibraryModel() create_spec.name = self.library_name create_spec.description = self.library_description - self.library_types = {'local': create_spec.LibraryType.LOCAL, - 'subscribed': create_spec.LibraryType.SUBSCRIBED} + self.library_types = { + "local": create_spec.LibraryType.LOCAL, + "subscribed": create_spec.LibraryType.SUBSCRIBED, + } create_spec.type = self.library_types[self.library_type] create_spec.storage_backings = storage_backings # Create a local content library backed the VC datastore - library_id = self.content_service.content.LocalLibrary.create(create_spec=create_spec, - client_token=str(uuid.uuid4())) + library_id = self.content_service.content.LocalLibrary.create( + create_spec=create_spec, client_token=str(uuid.uuid4()) + ) if library_id: self.module.exit_json( changed=True, @@ -224,10 +247,15 @@ def state_create_library(self): library_id=library_id, library_description=self.library_description, library_type=create_spec.type, - ) + ), ) - self.module.exit_json(changed=False, - content_library_info=dict(msg="Content Library not created. Datastore and library_type required", library_id='')) + self.module.exit_json( + changed=False, + content_library_info=dict( + msg="Content Library not created. Datastore and library_type required", + library_id="", + ), + ) def state_update_library(self): """ @@ -235,32 +263,43 @@ def state_update_library(self): """ changed = False - library_id = self.local_libraries[self.library_name]['lib_id'] - content_library_info = dict(msg="Content Library %s is unchanged." % self.library_name, library_id=library_id) + library_id = self.local_libraries[self.library_name]["lib_id"] + content_library_info = dict( + msg="Content Library %s is unchanged." % self.library_name, + library_id=library_id, + ) library_update_spec = LibraryModel() - library_desc = self.local_libraries[self.library_name]['lib_description'] - desired_lib_desc = self.params.get('library_description') + library_desc = self.local_libraries[self.library_name][ + "lib_description" + ] + desired_lib_desc = self.params.get("library_description") if library_desc != desired_lib_desc: library_update_spec.description = desired_lib_desc - self.content_service.content.LocalLibrary.update(library_id, library_update_spec) - content_library_info['msg'] = 'Content Library %s updated.' % self.library_name + self.content_service.content.LocalLibrary.update( + library_id, library_update_spec + ) + content_library_info["msg"] = ( + "Content Library %s updated." % self.library_name + ) changed = True - self.module.exit_json(changed=changed, content_library_info=content_library_info) + self.module.exit_json( + changed=changed, content_library_info=content_library_info + ) def state_destroy_library(self): """ Delete Content Library """ - library_id = self.local_libraries[self.library_name]['lib_id'] + library_id = self.local_libraries[self.library_name]["lib_id"] self.content_service.content.LocalLibrary.delete(library_id=library_id) self.module.exit_json( changed=True, content_library_info=dict( msg="Content Library '%s' deleted." % self.library_name, - library_id=library_id - ) + library_id=library_id, + ), ) def state_exit_unchanged(self): @@ -274,18 +313,29 @@ def state_exit_unchanged(self): def main(): argument_spec = VmwareRestClient.vmware_client_argument_spec() argument_spec.update( - library_name=dict(type='str', required=False), - library_description=dict(type='str', required=False), - library_type=dict(type='str', required=False, choices=['local', 'subscribed'], default='local'), - datastore_name=dict(type='str', required=False, aliases=['datastore']), - state=dict(type='str', choices=['present', 'absent'], default='present', required=False), + library_name=dict(type="str", required=False), + library_description=dict(type="str", required=False), + library_type=dict( + type="str", + required=False, + choices=["local", "subscribed"], + default="local", + ), + datastore_name=dict(type="str", required=False, aliases=["datastore"]), + state=dict( + type="str", + choices=["present", "absent"], + default="present", + required=False, + ), + ) + module = AnsibleModule( + argument_spec=argument_spec, supports_check_mode=True ) - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True) vmware_contentlib_create = VmwareContentLibCreate(module) vmware_contentlib_create.process_state() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_core_info.py b/plugins/modules/vmware_core_info.py index 90d9fc5..d890b70 100644 --- a/plugins/modules/vmware_core_info.py +++ b/plugins/modules/vmware_core_info.py @@ -5,15 +5,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_core_info short_description: Gathers info about various VMware inventory objects using REST API @@ -53,9 +54,9 @@ extends_documentation_fragment: - vmware.general.VmwareRestModule_filters.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Get All VM without any filters block: - name: Get VMs @@ -75,9 +76,9 @@ filters: - datacenters: "{{ datacenter_obj }}" register: clusters_result -''' +""" -RETURN = r''' +RETURN = r""" object_info: description: information about the given VMware object returned: always @@ -92,21 +93,23 @@ } ] } -''' +""" -from ansible_collections.vmware.general.plugins.module_utils.vmware_httpapi.VmwareRestModule import VmwareRestModule +from ansible_collections.vmware.general.plugins.module_utils.vmware_httpapi.VmwareRestModule import ( + VmwareRestModule, +) def main(): argument_spec = VmwareRestModule.create_argument_spec(use_filters=True) - argument_spec.update( - object_type=dict(type='str', default='datacenter'), - ) + argument_spec.update(object_type=dict(type="str", default="datacenter")) - module = VmwareRestModule(argument_spec=argument_spec, - supports_check_mode=True, - use_object_handler=True) - object_type = module.params['object_type'] + module = VmwareRestModule( + argument_spec=argument_spec, + supports_check_mode=True, + use_object_handler=True, + ) + object_type = module.params["object_type"] url = module.get_url_with_filter(object_type) @@ -114,5 +117,5 @@ def main(): module.exit() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_datacenter.py b/plugins/modules/vmware_datacenter.py index 8eb7f38..5050315 100644 --- a/plugins/modules/vmware_datacenter.py +++ b/plugins/modules/vmware_datacenter.py @@ -5,16 +5,17 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_datacenter short_description: Manage VMware vSphere Datacenters @@ -43,9 +44,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Create Datacenter vmware_datacenter: hostname: '{{ vcenter_hostname }}' @@ -64,7 +65,7 @@ state: absent delegate_to: localhost register: datacenter_delete_result -''' +""" RETURN = """# """ @@ -75,37 +76,48 @@ pass from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import PyVmomi, find_datacenter_by_name, vmware_argument_spec, wait_for_task +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + find_datacenter_by_name, + vmware_argument_spec, + wait_for_task, +) from ansible.module_utils._text import to_native class VmwareDatacenterManager(PyVmomi): def __init__(self, module): super(VmwareDatacenterManager, self).__init__(module) - self.datacenter_name = self.params.get('datacenter_name') + self.datacenter_name = self.params.get("datacenter_name") self.datacenter_obj = self.get_datacenter() def ensure(self): - state = self.module.params.get('state') + state = self.module.params.get("state") - if state == 'present': + if state == "present": self.create_datacenter() - if state == 'absent': + if state == "absent": self.destroy_datacenter() def get_datacenter(self): try: - datacenter_obj = find_datacenter_by_name(self.content, self.datacenter_name) + datacenter_obj = find_datacenter_by_name( + self.content, self.datacenter_name + ) return datacenter_obj except (vmodl.MethodFault, vmodl.RuntimeFault) as runtime_fault: - self.module.fail_json(msg="Failed to get datacenter '%s'" - " due to : %s" % (self.datacenter_name, - to_native(runtime_fault.msg))) + self.module.fail_json( + msg="Failed to get datacenter '%s'" + " due to : %s" + % (self.datacenter_name, to_native(runtime_fault.msg)) + ) except Exception as generic_exc: - self.module.fail_json(msg="Failed to get datacenter" - " '%s' due to generic error: %s" % (self.datacenter_name, - to_native(generic_exc))) + self.module.fail_json( + msg="Failed to get datacenter" + " '%s' due to generic error: %s" + % (self.datacenter_name, to_native(generic_exc)) + ) def create_datacenter(self): folder = self.content.rootFolder @@ -118,22 +130,30 @@ def create_datacenter(self): except vim.fault.DuplicateName as duplicate_name: self.module.exit_json(changed=changed) except vim.fault.InvalidName as invalid_name: - self.module.fail_json(msg="Specified datacenter name '%s' is an" - " invalid name : %s" % (self.datacenter_name, - to_native(invalid_name.msg))) + self.module.fail_json( + msg="Specified datacenter name '%s' is an" + " invalid name : %s" + % (self.datacenter_name, to_native(invalid_name.msg)) + ) except vmodl.fault.NotSupported as not_supported: # This should never happen - self.module.fail_json(msg="Trying to create a datacenter '%s' on" - " an incorrect folder object : %s" % (self.datacenter_name, - to_native(not_supported.msg))) + self.module.fail_json( + msg="Trying to create a datacenter '%s' on" + " an incorrect folder object : %s" + % (self.datacenter_name, to_native(not_supported.msg)) + ) except (vmodl.RuntimeFault, vmodl.MethodFault) as runtime_fault: - self.module.fail_json(msg="Failed to create a datacenter" - " '%s' due to : %s" % (self.datacenter_name, - to_native(runtime_fault.msg))) + self.module.fail_json( + msg="Failed to create a datacenter" + " '%s' due to : %s" + % (self.datacenter_name, to_native(runtime_fault.msg)) + ) except Exception as generic_exc: - self.module.fail_json(msg="Failed to create a datacenter" - " '%s' due to generic error: %s" % (self.datacenter_name, - to_native(generic_exc))) + self.module.fail_json( + msg="Failed to create a datacenter" + " '%s' due to generic error: %s" + % (self.datacenter_name, to_native(generic_exc)) + ) def destroy_datacenter(self): results = dict(changed=False) @@ -141,32 +161,44 @@ def destroy_datacenter(self): if self.datacenter_obj and not self.module.check_mode: task = self.datacenter_obj.Destroy_Task() changed, result = wait_for_task(task) - results['changed'] = changed - results['result'] = result + results["changed"] = changed + results["result"] = result self.module.exit_json(**results) - except (vim.fault.VimFault, vmodl.RuntimeFault, vmodl.MethodFault) as runtime_fault: - self.module.fail_json(msg="Failed to delete a datacenter" - " '%s' due to : %s" % (self.datacenter_name, - to_native(runtime_fault.msg))) + except ( + vim.fault.VimFault, + vmodl.RuntimeFault, + vmodl.MethodFault, + ) as runtime_fault: + self.module.fail_json( + msg="Failed to delete a datacenter" + " '%s' due to : %s" + % (self.datacenter_name, to_native(runtime_fault.msg)) + ) except Exception as generic_exc: - self.module.fail_json(msg="Failed to delete a datacenter" - " '%s' due to generic error: %s" % (self.datacenter_name, - to_native(generic_exc))) + self.module.fail_json( + msg="Failed to delete a datacenter" + " '%s' due to generic error: %s" + % (self.datacenter_name, to_native(generic_exc)) + ) def main(): argument_spec = vmware_argument_spec() argument_spec.update( dict( - datacenter_name=dict(required=True, type='str'), - state=dict(default='present', choices=['present', 'absent'], type='str') + datacenter_name=dict(required=True, type="str"), + state=dict( + default="present", choices=["present", "absent"], type="str" + ), ) ) - module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + module = AnsibleModule( + argument_spec=argument_spec, supports_check_mode=True + ) vmware_datacenter_mgr = VmwareDatacenterManager(module) vmware_datacenter_mgr.ensure() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_datastore_cluster.py b/plugins/modules/vmware_datastore_cluster.py index a8d7470..90cd458 100644 --- a/plugins/modules/vmware_datastore_cluster.py +++ b/plugins/modules/vmware_datastore_cluster.py @@ -5,17 +5,18 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_datastore_cluster short_description: Manage VMware vSphere datastore clusters @@ -99,9 +100,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Create datastore cluster and enable SDRS vmware_datastore_cluster: hostname: '{{ vcenter_hostname }}' @@ -132,7 +133,7 @@ datastore_cluster_name: '{{ datastore_cluster_name }}' state: absent delegate_to: localhost -''' +""" RETURN = """ result: @@ -148,86 +149,162 @@ pass from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec, wait_for_task +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + vmware_argument_spec, + wait_for_task, +) from ansible.module_utils._text import to_native class VMwareDatastoreClusterManager(PyVmomi): def __init__(self, module): super(VMwareDatastoreClusterManager, self).__init__(module) - folder = self.params['folder'] + folder = self.params["folder"] if folder: - self.folder_obj = self.content.searchIndex.FindByInventoryPath(folder) + self.folder_obj = self.content.searchIndex.FindByInventoryPath( + folder + ) if not self.folder_obj: - self.module.fail_json(msg="Failed to find the folder specified by %(folder)s" % self.params) + self.module.fail_json( + msg="Failed to find the folder specified by %(folder)s" + % self.params + ) else: - datacenter_name = self.params.get('datacenter_name') + datacenter_name = self.params.get("datacenter_name") datacenter_obj = self.find_datacenter_by_name(datacenter_name) if not datacenter_obj: - self.module.fail_json(msg="Failed to find datacenter '%s' required" - " for managing datastore cluster." % datacenter_name) + self.module.fail_json( + msg="Failed to find datacenter '%s' required" + " for managing datastore cluster." % datacenter_name + ) self.folder_obj = datacenter_obj.datastoreFolder - self.datastore_cluster_name = self.params.get('datastore_cluster_name') - self.datastore_cluster_obj = self.find_datastore_cluster_by_name(self.datastore_cluster_name) + self.datastore_cluster_name = self.params.get("datastore_cluster_name") + self.datastore_cluster_obj = self.find_datastore_cluster_by_name( + self.datastore_cluster_name + ) def ensure(self): """ Manage internal state of datastore cluster """ - results = dict(changed=False, result='') - state = self.module.params.get('state') - enable_sdrs = self.params.get('enable_sdrs') - automation_level = self.params.get('automation_level') - keep_vmdks_together = self.params.get('keep_vmdks_together') - enable_io_loadbalance = self.params.get('enable_io_loadbalance') - loadbalance_interval = self.params.get('loadbalance_interval') + results = dict(changed=False, result="") + state = self.module.params.get("state") + enable_sdrs = self.params.get("enable_sdrs") + automation_level = self.params.get("automation_level") + keep_vmdks_together = self.params.get("keep_vmdks_together") + enable_io_loadbalance = self.params.get("enable_io_loadbalance") + loadbalance_interval = self.params.get("loadbalance_interval") if self.datastore_cluster_obj: - if state == 'present': - results['result'] = "Datastore cluster '%s' already available." % self.datastore_cluster_name + if state == "present": + results["result"] = ( + "Datastore cluster '%s' already available." + % self.datastore_cluster_name + ) sdrs_spec = vim.storageDrs.ConfigSpec() sdrs_spec.podConfigSpec = None - if enable_sdrs != self.datastore_cluster_obj.podStorageDrsEntry.storageDrsConfig.podConfig.enabled: + if ( + enable_sdrs + != self.datastore_cluster_obj.podStorageDrsEntry.storageDrsConfig.podConfig.enabled + ): if not sdrs_spec.podConfigSpec: - sdrs_spec.podConfigSpec = vim.storageDrs.PodConfigSpec() + sdrs_spec.podConfigSpec = ( + vim.storageDrs.PodConfigSpec() + ) sdrs_spec.podConfigSpec.enabled = enable_sdrs - results['result'] = results['result'] + " Changed SDRS to '%s'." % enable_sdrs - if automation_level != self.datastore_cluster_obj.podStorageDrsEntry.storageDrsConfig.podConfig.defaultVmBehavior: + results["result"] = ( + results["result"] + + " Changed SDRS to '%s'." % enable_sdrs + ) + if ( + automation_level + != self.datastore_cluster_obj.podStorageDrsEntry.storageDrsConfig.podConfig.defaultVmBehavior + ): if not sdrs_spec.podConfigSpec: - sdrs_spec.podConfigSpec = vim.storageDrs.PodConfigSpec() - sdrs_spec.podConfigSpec.defaultVmBehavior = automation_level - results['result'] = results['result'] + " Changed automation level to '%s'." % automation_level - if keep_vmdks_together != self.datastore_cluster_obj.podStorageDrsEntry.storageDrsConfig.podConfig.defaultIntraVmAffinity: + sdrs_spec.podConfigSpec = ( + vim.storageDrs.PodConfigSpec() + ) + sdrs_spec.podConfigSpec.defaultVmBehavior = ( + automation_level + ) + results["result"] = ( + results["result"] + + " Changed automation level to '%s'." + % automation_level + ) + if ( + keep_vmdks_together + != self.datastore_cluster_obj.podStorageDrsEntry.storageDrsConfig.podConfig.defaultIntraVmAffinity + ): if not sdrs_spec.podConfigSpec: - sdrs_spec.podConfigSpec = vim.storageDrs.PodConfigSpec() - sdrs_spec.podConfigSpec.defaultIntraVmAffinity = keep_vmdks_together - results['result'] = results['result'] + " Changed VMDK affinity to '%s'." % keep_vmdks_together - if enable_io_loadbalance != self.datastore_cluster_obj.podStorageDrsEntry.storageDrsConfig.podConfig.ioLoadBalanceEnabled: + sdrs_spec.podConfigSpec = ( + vim.storageDrs.PodConfigSpec() + ) + sdrs_spec.podConfigSpec.defaultIntraVmAffinity = ( + keep_vmdks_together + ) + results["result"] = ( + results["result"] + + " Changed VMDK affinity to '%s'." + % keep_vmdks_together + ) + if ( + enable_io_loadbalance + != self.datastore_cluster_obj.podStorageDrsEntry.storageDrsConfig.podConfig.ioLoadBalanceEnabled + ): if not sdrs_spec.podConfigSpec: - sdrs_spec.podConfigSpec = vim.storageDrs.PodConfigSpec() - sdrs_spec.podConfigSpec.ioLoadBalanceEnabled = enable_io_loadbalance - results['result'] = results['result'] + " Changed I/O workload balancing to '%s'." % enable_io_loadbalance - if loadbalance_interval != self.datastore_cluster_obj.podStorageDrsEntry.storageDrsConfig.podConfig.loadBalanceInterval: + sdrs_spec.podConfigSpec = ( + vim.storageDrs.PodConfigSpec() + ) + sdrs_spec.podConfigSpec.ioLoadBalanceEnabled = ( + enable_io_loadbalance + ) + results["result"] = ( + results["result"] + + " Changed I/O workload balancing to '%s'." + % enable_io_loadbalance + ) + if ( + loadbalance_interval + != self.datastore_cluster_obj.podStorageDrsEntry.storageDrsConfig.podConfig.loadBalanceInterval + ): if not sdrs_spec.podConfigSpec: - sdrs_spec.podConfigSpec = vim.storageDrs.PodConfigSpec() - sdrs_spec.podConfigSpec.loadBalanceInterval = loadbalance_interval - results['result'] = results['result'] + " Changed load balance interval to '%s' minutes." % loadbalance_interval + sdrs_spec.podConfigSpec = ( + vim.storageDrs.PodConfigSpec() + ) + sdrs_spec.podConfigSpec.loadBalanceInterval = ( + loadbalance_interval + ) + results["result"] = ( + results["result"] + + " Changed load balance interval to '%s' minutes." + % loadbalance_interval + ) if sdrs_spec.podConfigSpec: if not self.module.check_mode: try: - task = self.content.storageResourceManager.ConfigureStorageDrsForPod_Task(pod=self.datastore_cluster_obj, - spec=sdrs_spec, modify=True) + task = self.content.storageResourceManager.ConfigureStorageDrsForPod_Task( + pod=self.datastore_cluster_obj, + spec=sdrs_spec, + modify=True, + ) changed, result = wait_for_task(task) except Exception as generic_exc: - self.module.fail_json(msg="Failed to configure datastore cluster" - " '%s' due to %s" % (self.datastore_cluster_name, - to_native(generic_exc))) + self.module.fail_json( + msg="Failed to configure datastore cluster" + " '%s' due to %s" + % ( + self.datastore_cluster_name, + to_native(generic_exc), + ) + ) else: changed = True - results['changed'] = changed - elif state == 'absent': + results["changed"] = changed + elif state == "absent": # Delete datastore cluster if not self.module.check_mode: task = self.datastore_cluster_obj.Destroy_Task() @@ -235,38 +312,76 @@ def ensure(self): else: changed = True if changed: - results['result'] = "Datastore cluster '%s' deleted successfully." % self.datastore_cluster_name - results['changed'] = changed + results["result"] = ( + "Datastore cluster '%s' deleted successfully." + % self.datastore_cluster_name + ) + results["changed"] = changed else: - self.module.fail_json(msg="Failed to delete datastore cluster '%s'." % self.datastore_cluster_name) + self.module.fail_json( + msg="Failed to delete datastore cluster '%s'." + % self.datastore_cluster_name + ) else: - if state == 'present': + if state == "present": # Create datastore cluster if not self.module.check_mode: try: - self.datastore_cluster_obj = self.folder_obj.CreateStoragePod(name=self.datastore_cluster_name) + self.datastore_cluster_obj = self.folder_obj.CreateStoragePod( + name=self.datastore_cluster_name + ) except Exception as generic_exc: - self.module.fail_json(msg="Failed to create datastore cluster" - " '%s' due to %s" % (self.datastore_cluster_name, - to_native(generic_exc))) + self.module.fail_json( + msg="Failed to create datastore cluster" + " '%s' due to %s" + % ( + self.datastore_cluster_name, + to_native(generic_exc), + ) + ) try: sdrs_spec = vim.storageDrs.ConfigSpec() - sdrs_spec.podConfigSpec = vim.storageDrs.PodConfigSpec() + sdrs_spec.podConfigSpec = ( + vim.storageDrs.PodConfigSpec() + ) sdrs_spec.podConfigSpec.enabled = enable_sdrs - sdrs_spec.podConfigSpec.defaultVmBehavior = automation_level - sdrs_spec.podConfigSpec.defaultIntraVmAffinity = keep_vmdks_together - sdrs_spec.podConfigSpec.ioLoadBalanceEnabled = enable_io_loadbalance - sdrs_spec.podConfigSpec.loadBalanceInterval = loadbalance_interval - task = self.content.storageResourceManager.ConfigureStorageDrsForPod_Task(pod=self.datastore_cluster_obj, spec=sdrs_spec, modify=True) + sdrs_spec.podConfigSpec.defaultVmBehavior = ( + automation_level + ) + sdrs_spec.podConfigSpec.defaultIntraVmAffinity = ( + keep_vmdks_together + ) + sdrs_spec.podConfigSpec.ioLoadBalanceEnabled = ( + enable_io_loadbalance + ) + sdrs_spec.podConfigSpec.loadBalanceInterval = ( + loadbalance_interval + ) + task = self.content.storageResourceManager.ConfigureStorageDrsForPod_Task( + pod=self.datastore_cluster_obj, + spec=sdrs_spec, + modify=True, + ) changed, result = wait_for_task(task) except Exception as generic_exc: - self.module.fail_json(msg="Failed to configure datastore cluster" - " '%s' due to %s" % (self.datastore_cluster_name, - to_native(generic_exc))) - results['changed'] = True - results['result'] = "Datastore cluster '%s' created successfully." % self.datastore_cluster_name - elif state == 'absent': - results['result'] = "Datastore cluster '%s' not available or already deleted." % self.datastore_cluster_name + self.module.fail_json( + msg="Failed to configure datastore cluster" + " '%s' due to %s" + % ( + self.datastore_cluster_name, + to_native(generic_exc), + ) + ) + results["changed"] = True + results["result"] = ( + "Datastore cluster '%s' created successfully." + % self.datastore_cluster_name + ) + elif state == "absent": + results["result"] = ( + "Datastore cluster '%s' not available or already deleted." + % self.datastore_cluster_name + ) self.module.exit_json(**results) @@ -274,31 +389,37 @@ def main(): argument_spec = vmware_argument_spec() argument_spec.update( dict( - datacenter_name=dict(type='str', required=False, aliases=['datacenter']), - datastore_cluster_name=dict(type='str', required=True), - state=dict(default='present', choices=['present', 'absent'], type='str'), - folder=dict(type='str', required=False), - enable_sdrs=dict(type='bool', default=False, required=False), - keep_vmdks_together=dict(type='bool', default=True, required=False), - automation_level=dict(type='str', choices=['automated', 'manual'], default='manual'), - enable_io_loadbalance=dict(type='bool', default=False, required=False), - loadbalance_interval=dict(type='int', default=480, required=False) + datacenter_name=dict( + type="str", required=False, aliases=["datacenter"] + ), + datastore_cluster_name=dict(type="str", required=True), + state=dict( + default="present", choices=["present", "absent"], type="str" + ), + folder=dict(type="str", required=False), + enable_sdrs=dict(type="bool", default=False, required=False), + keep_vmdks_together=dict( + type="bool", default=True, required=False + ), + automation_level=dict( + type="str", choices=["automated", "manual"], default="manual" + ), + enable_io_loadbalance=dict( + type="bool", default=False, required=False + ), + loadbalance_interval=dict(type="int", default=480, required=False), ) ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, - mutually_exclusive=[ - ['datacenter_name', 'folder'], - ], - required_one_of=[ - ['datacenter_name', 'folder'], - ] + mutually_exclusive=[["datacenter_name", "folder"]], + required_one_of=[["datacenter_name", "folder"]], ) datastore_cluster_mgr = VMwareDatastoreClusterManager(module) datastore_cluster_mgr.ensure() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_datastore_info.py b/plugins/modules/vmware_datastore_info.py index ec62f4f..d9ac394 100644 --- a/plugins/modules/vmware_datastore_info.py +++ b/plugins/modules/vmware_datastore_info.py @@ -6,15 +6,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_datastore_info short_description: Gather info about datastores available in given vCenter @@ -91,9 +92,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Gather info from standalone ESXi server having datacenter as 'ha-datacenter' vmware_datastore_info: hostname: '{{ vcenter_hostname }}' @@ -128,7 +129,7 @@ - overallStatus delegate_to: localhost register: info -''' +""" RETURN = """ datastores: @@ -176,26 +177,40 @@ pass from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import (PyVmomi, vmware_argument_spec, get_all_objs, - find_cluster_by_name, get_parent_datacenter) +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + vmware_argument_spec, + get_all_objs, + find_cluster_by_name, + get_parent_datacenter, +) class VMwareHostDatastore(PyVmomi): """ This class populates the datastore list """ + def __init__(self, module): super(VMwareHostDatastore, self).__init__(module) - self.gather_nfs_mount_info = self.module.params['gather_nfs_mount_info'] - self.gather_vmfs_mount_info = self.module.params['gather_vmfs_mount_info'] - self.schema = self.module.params['schema'] - self.properties = self.module.params['properties'] + self.gather_nfs_mount_info = self.module.params[ + "gather_nfs_mount_info" + ] + self.gather_vmfs_mount_info = self.module.params[ + "gather_vmfs_mount_info" + ] + self.schema = self.module.params["schema"] + self.properties = self.module.params["properties"] def check_datastore_host(self, esxi_host, datastore): """ Get all datastores of specified ESXi host """ esxi = self.find_hostsystem_by_name(esxi_host) if esxi is None: - self.module.fail_json(msg="Failed to find ESXi hostname %s " % esxi_host) + self.module.fail_json( + msg="Failed to find ESXi hostname %s " % esxi_host + ) storage_system = esxi.configManager.storageSystem - host_file_sys_vol_mount_info = storage_system.fileSystemVolumeInfo.mountInfo + host_file_sys_vol_mount_info = ( + storage_system.fileSystemVolumeInfo.mountInfo + ) for host_mount_info in host_file_sys_vol_mount_info: if host_mount_info.volume.name == datastore: return host_mount_info @@ -205,56 +220,83 @@ def build_datastore_list(self, datastore_list): """ Build list with datastores """ datastores = list() for datastore in datastore_list: - if self.schema == 'summary': + if self.schema == "summary": summary = datastore.summary datastore_summary = dict() - datastore_summary['accessible'] = summary.accessible - datastore_summary['capacity'] = summary.capacity - datastore_summary['name'] = summary.name - datastore_summary['freeSpace'] = summary.freeSpace - datastore_summary['maintenanceMode'] = summary.maintenanceMode - datastore_summary['multipleHostAccess'] = summary.multipleHostAccess - datastore_summary['type'] = summary.type + datastore_summary["accessible"] = summary.accessible + datastore_summary["capacity"] = summary.capacity + datastore_summary["name"] = summary.name + datastore_summary["freeSpace"] = summary.freeSpace + datastore_summary["maintenanceMode"] = summary.maintenanceMode + datastore_summary[ + "multipleHostAccess" + ] = summary.multipleHostAccess + datastore_summary["type"] = summary.type if self.gather_nfs_mount_info or self.gather_vmfs_mount_info: - if self.gather_nfs_mount_info and summary.type.startswith("NFS"): + if self.gather_nfs_mount_info and summary.type.startswith( + "NFS" + ): # get mount info from the first ESXi host attached to this NFS datastore - host_mount_info = self.check_datastore_host(summary.datastore.host[0].key.name, summary.name) - datastore_summary['nfs_server'] = host_mount_info.volume.remoteHost - datastore_summary['nfs_path'] = host_mount_info.volume.remotePath + host_mount_info = self.check_datastore_host( + summary.datastore.host[0].key.name, summary.name + ) + datastore_summary[ + "nfs_server" + ] = host_mount_info.volume.remoteHost + datastore_summary[ + "nfs_path" + ] = host_mount_info.volume.remotePath if self.gather_vmfs_mount_info and summary.type == "VMFS": # get mount info from the first ESXi host attached to this VMFS datastore - host_mount_info = self.check_datastore_host(summary.datastore.host[0].key.name, summary.name) - datastore_summary['vmfs_blockSize'] = host_mount_info.volume.blockSize - datastore_summary['vmfs_version'] = host_mount_info.volume.version - datastore_summary['vmfs_uuid'] = host_mount_info.volume.uuid + host_mount_info = self.check_datastore_host( + summary.datastore.host[0].key.name, summary.name + ) + datastore_summary[ + "vmfs_blockSize" + ] = host_mount_info.volume.blockSize + datastore_summary[ + "vmfs_version" + ] = host_mount_info.volume.version + datastore_summary[ + "vmfs_uuid" + ] = host_mount_info.volume.uuid # vcsim does not return uncommitted if not summary.uncommitted: summary.uncommitted = 0 - datastore_summary['uncommitted'] = summary.uncommitted - datastore_summary['url'] = summary.url + datastore_summary["uncommitted"] = summary.uncommitted + datastore_summary["url"] = summary.url # Calculated values - datastore_summary['provisioned'] = summary.capacity - summary.freeSpace + summary.uncommitted - datastore_summary['datastore_cluster'] = 'N/A' + datastore_summary["provisioned"] = ( + summary.capacity - summary.freeSpace + summary.uncommitted + ) + datastore_summary["datastore_cluster"] = "N/A" if isinstance(datastore.parent, vim.StoragePod): - datastore_summary['datastore_cluster'] = datastore.parent.name + datastore_summary[ + "datastore_cluster" + ] = datastore.parent.name - if self.module.params['name']: - if datastore_summary['name'] == self.module.params['name']: + if self.module.params["name"]: + if datastore_summary["name"] == self.module.params["name"]: datastores.extend([datastore_summary]) else: datastores.extend([datastore_summary]) else: - if self.module.params['name']: - if datastore.name == self.module.params['name']: - datastores.extend(([self.to_json(datastore, self.properties)])) + if self.module.params["name"]: + if datastore.name == self.module.params["name"]: + datastores.extend( + ([self.to_json(datastore, self.properties)]) + ) else: - datastores.extend(([self.to_json(datastore, self.properties)])) + datastores.extend( + ([self.to_json(datastore, self.properties)]) + ) return datastores class PyVmomiCache(object): """ This class caches references to objects which are requested multiples times but not modified """ + def __init__(self, content, dc_name=None): self.content = content self.dc_name = dc_name @@ -265,7 +307,7 @@ def get_all_objs(self, content, types, confine_to_datacenter=True): """ Wrapper around get_all_objs to set datacenter context """ objects = get_all_objs(content, types) if confine_to_datacenter: - if hasattr(objects, 'items'): + if hasattr(objects, "items"): # resource pools come back as a dictionary for k, v in tuple(objects.items()): parent_dc = get_parent_datacenter(k) @@ -273,27 +315,38 @@ def get_all_objs(self, content, types, confine_to_datacenter=True): del objects[k] else: # everything else should be a list - objects = [x for x in objects if get_parent_datacenter(x).name == self.dc_name] + objects = [ + x + for x in objects + if get_parent_datacenter(x).name == self.dc_name + ] return objects class PyVmomiHelper(PyVmomi): """ This class gets datastores """ + def __init__(self, module): super(PyVmomiHelper, self).__init__(module) - self.cache = PyVmomiCache(self.content, dc_name=self.params['datacenter']) + self.cache = PyVmomiCache( + self.content, dc_name=self.params["datacenter"] + ) def lookup_datastore(self, confine_to_datacenter): """ Get datastore(s) per ESXi host or vCenter server """ - datastores = self.cache.get_all_objs(self.content, [vim.Datastore], confine_to_datacenter) + datastores = self.cache.get_all_objs( + self.content, [vim.Datastore], confine_to_datacenter + ) return datastores def lookup_datastore_by_cluster(self): """ Get datastore(s) per cluster """ - cluster = find_cluster_by_name(self.content, self.params['cluster']) + cluster = find_cluster_by_name(self.content, self.params["cluster"]) if not cluster: - self.module.fail_json(msg='Failed to find cluster "%(cluster)s"' % self.params) + self.module.fail_json( + msg='Failed to find cluster "%(cluster)s"' % self.params + ) c_dc = cluster.datastore return c_dc @@ -302,27 +355,32 @@ def main(): """ Main """ argument_spec = vmware_argument_spec() argument_spec.update( - name=dict(type='str'), - datacenter=dict(type='str', aliases=['datacenter_name']), - cluster=dict(type='str'), - gather_nfs_mount_info=dict(type='bool', default=False), - gather_vmfs_mount_info=dict(type='bool', default=False), - schema=dict(type='str', choices=['summary', 'vsphere'], default='summary'), - properties=dict(type='list') + name=dict(type="str"), + datacenter=dict(type="str", aliases=["datacenter_name"]), + cluster=dict(type="str"), + gather_nfs_mount_info=dict(type="bool", default=False), + gather_vmfs_mount_info=dict(type="bool", default=False), + schema=dict( + type="str", choices=["summary", "vsphere"], default="summary" + ), + properties=dict(type="list"), + ) + module = AnsibleModule( + argument_spec=argument_spec, supports_check_mode=True ) - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True - ) - if module._name == 'vmware_datastore_facts': - module.deprecate("The 'vmware_datastore_facts' module has been renamed to 'vmware_datastore_info'", version='2.13') + if module._name == "vmware_datastore_facts": + module.deprecate( + "The 'vmware_datastore_facts' module has been renamed to 'vmware_datastore_info'", + version="2.13", + ) result = dict(changed=False) pyv = PyVmomiHelper(module) - if module.params['cluster']: + if module.params["cluster"]: dxs = pyv.lookup_datastore_by_cluster() - elif module.params['datacenter']: + elif module.params["datacenter"]: dxs = pyv.lookup_datastore(confine_to_datacenter=True) else: dxs = pyv.lookup_datastore(confine_to_datacenter=False) @@ -330,10 +388,10 @@ def main(): vmware_host_datastore = VMwareHostDatastore(module) datastores = vmware_host_datastore.build_datastore_list(dxs) - result['datastores'] = datastores + result["datastores"] = datastores module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_datastore_maintenancemode.py b/plugins/modules/vmware_datastore_maintenancemode.py index a1e74d1..7c41008 100644 --- a/plugins/modules/vmware_datastore_maintenancemode.py +++ b/plugins/modules/vmware_datastore_maintenancemode.py @@ -5,17 +5,18 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_datastore_maintenancemode short_description: Place a datastore into maintenance mode @@ -58,9 +59,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Enter datastore into Maintenance Mode vmware_datastore_maintenancemode: hostname: '{{ vcenter_hostname }}' @@ -96,9 +97,9 @@ datastore: '{{ datastore_name }}' state: absent delegate_to: localhost -''' +""" -RETURN = ''' +RETURN = """ datastore_status: description: Action taken for datastore returned: always @@ -106,7 +107,7 @@ sample: { "ds_226_01": "Datastore 'ds_226_01' is already in maintenance mode." } -''' +""" try: from pyVmomi import vim @@ -114,53 +115,85 @@ pass from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import (PyVmomi, vmware_argument_spec, wait_for_task, - find_cluster_by_name, get_all_objs) +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + vmware_argument_spec, + wait_for_task, + find_cluster_by_name, + get_all_objs, +) from ansible.module_utils._text import to_native class VmwareDatastoreMaintenanceMgr(PyVmomi): def __init__(self, module): super(VmwareDatastoreMaintenanceMgr, self).__init__(module) - datastore_name = self.params.get('datastore') - cluster_name = self.params.get('cluster_name') - datastore_cluster = self.params.get('datastore_cluster') + datastore_name = self.params.get("datastore") + cluster_name = self.params.get("cluster_name") + datastore_cluster = self.params.get("datastore_cluster") self.datastore_objs = [] if datastore_name: ds = self.find_datastore_by_name(datastore_name=datastore_name) if not ds: - self.module.fail_json(msg='Failed to find datastore "%(datastore)s".' % self.params) + self.module.fail_json( + msg='Failed to find datastore "%(datastore)s".' + % self.params + ) self.datastore_objs = [ds] elif cluster_name: cluster = find_cluster_by_name(self.content, cluster_name) if not cluster: - self.module.fail_json(msg='Failed to find cluster "%(cluster_name)s".' % self.params) + self.module.fail_json( + msg='Failed to find cluster "%(cluster_name)s".' + % self.params + ) self.datastore_objs = cluster.datastore elif datastore_cluster: - datastore_cluster_obj = get_all_objs(self.content, [vim.StoragePod]) + datastore_cluster_obj = get_all_objs( + self.content, [vim.StoragePod] + ) if not datastore_cluster_obj: - self.module.fail_json(msg='Failed to find datastore cluster "%(datastore_cluster)s".' % self.params) + self.module.fail_json( + msg='Failed to find datastore cluster "%(datastore_cluster)s".' + % self.params + ) for datastore in datastore_cluster_obj.childEntity: self.datastore_objs.append(datastore) else: - self.module.fail_json(msg="Please select one of 'cluster_name', 'datastore' or 'datastore_cluster'.") - self.state = self.params.get('state') + self.module.fail_json( + msg="Please select one of 'cluster_name', 'datastore' or 'datastore_cluster'." + ) + self.state = self.params.get("state") def ensure(self): datastore_results = dict() change_datastore_list = [] for datastore in self.datastore_objs: changed = False - if self.state == 'present' and datastore.summary.maintenanceMode != 'normal': - datastore_results[datastore.name] = "Datastore '%s' is already in maintenance mode." % datastore.name + if ( + self.state == "present" + and datastore.summary.maintenanceMode != "normal" + ): + datastore_results[datastore.name] = ( + "Datastore '%s' is already in maintenance mode." + % datastore.name + ) break - elif self.state == 'absent' and datastore.summary.maintenanceMode == 'normal': - datastore_results[datastore.name] = "Datastore '%s' is not in maintenance mode." % datastore.name + elif ( + self.state == "absent" + and datastore.summary.maintenanceMode == "normal" + ): + datastore_results[datastore.name] = ( + "Datastore '%s' is not in maintenance mode." + % datastore.name + ) break try: - if self.state == 'present': - storage_replacement_result = datastore.DatastoreEnterMaintenanceMode() + if self.state == "present": + storage_replacement_result = ( + datastore.DatastoreEnterMaintenanceMode() + ) task = storage_replacement_result.task else: task = datastore.DatastoreExitMaintenanceMode_Task() @@ -169,51 +202,66 @@ def ensure(self): if success: changed = True - if self.state == 'present': - datastore_results[datastore.name] = "Datastore '%s' entered in maintenance mode." % datastore.name + if self.state == "present": + datastore_results[datastore.name] = ( + "Datastore '%s' entered in maintenance mode." + % datastore.name + ) else: - datastore_results[datastore.name] = "Datastore '%s' exited from maintenance mode." % datastore.name + datastore_results[datastore.name] = ( + "Datastore '%s' exited from maintenance mode." + % datastore.name + ) except vim.fault.InvalidState as invalid_state: - if self.state == 'present': + if self.state == "present": msg = "Unable to enter datastore '%s' in" % datastore.name else: msg = "Unable to exit datastore '%s' from" % datastore.name - msg += " maintenance mode due to : %s" % to_native(invalid_state.msg) + msg += " maintenance mode due to : %s" % to_native( + invalid_state.msg + ) self.module.fail_json(msg=msg) except Exception as exc: - if self.state == 'present': + if self.state == "present": msg = "Unable to enter datastore '%s' in" % datastore.name else: msg = "Unable to exit datastore '%s' from" % datastore.name - msg += " maintenance mode due to generic exception : %s" % to_native(exc) + msg += ( + " maintenance mode due to generic exception : %s" + % to_native(exc) + ) self.module.fail_json(msg=msg) change_datastore_list.append(changed) changed = False if any(change_datastore_list): changed = True - self.module.exit_json(changed=changed, datastore_status=datastore_results) + self.module.exit_json( + changed=changed, datastore_status=datastore_results + ) def main(): spec = vmware_argument_spec() - spec.update(dict( - datastore=dict(type='str', required=False), - cluster_name=dict(type='str', required=False), - datastore_cluster=dict(type='str', required=False), - state=dict(type='str', default='present', choices=['present', 'absent']), - )) + spec.update( + dict( + datastore=dict(type="str", required=False), + cluster_name=dict(type="str", required=False), + datastore_cluster=dict(type="str", required=False), + state=dict( + type="str", default="present", choices=["present", "absent"] + ), + ) + ) module = AnsibleModule( argument_spec=spec, - required_one_of=[ - ['datastore', 'cluster_name', 'datastore_cluster'], - ], + required_one_of=[["datastore", "cluster_name", "datastore_cluster"]], ) datastore_maintenance_mgr = VmwareDatastoreMaintenanceMgr(module=module) datastore_maintenance_mgr.ensure() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_deploy_ovf.py b/plugins/modules/vmware_deploy_ovf.py index 979990b..5cc8090 100644 --- a/plugins/modules/vmware_deploy_ovf.py +++ b/plugins/modules/vmware_deploy_ovf.py @@ -6,15 +6,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = ''' +DOCUMENTATION = """ author: 'Matt Martz (@sivel)' short_description: 'Deploys a VMware virtual machine from an OVF or OVA file' description: @@ -131,9 +132,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - vmware_deploy_ovf: hostname: '{{ vcenter_hostname }}' username: '{{ vcenter_username }}' @@ -156,16 +157,16 @@ power_on: no ovf: /absolute/path/to/template/mytemplate.ova delegate_to: localhost -''' +""" -RETURN = r''' +RETURN = r""" instance: description: metadata about the new virtual machine returned: always type: dict sample: None -''' +""" import io import os @@ -181,11 +182,26 @@ from ansible.module_utils._text import to_native from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.six import string_types -from ansible.module_utils.urls import generic_urlparse, open_url, urlparse, urlunparse -from ansible_collections.vmware.general.plugins.module_utils.vmware import (find_network_by_name, find_vm_by_name, PyVmomi, - gather_vm_facts, vmware_argument_spec, wait_for_task, wait_for_vm_ip) +from ansible.module_utils.urls import ( + generic_urlparse, + open_url, + urlparse, + urlunparse, +) +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + find_network_by_name, + find_vm_by_name, + PyVmomi, + gather_vm_facts, + vmware_argument_spec, + wait_for_task, + wait_for_vm_ip, +) + try: - from ansible_collections.vmware.general.plugins.module_utils.vmware import vim + from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + vim, + ) from pyVmomi import vmodl except ImportError: pass @@ -196,12 +212,12 @@ def path_exists(value): value = str(value) value = os.path.expanduser(os.path.expandvars(value)) if not os.path.exists(value): - raise ValueError('%s is not a valid path' % value) + raise ValueError("%s is not a valid path" % value) return value class ProgressReader(io.FileIO): - def __init__(self, name, mode='r', closefd=True): + def __init__(self, name, mode="r", closefd=True): self.bytes_read = 0 io.FileIO.__init__(self, name, mode=mode, closefd=closefd) @@ -232,7 +248,9 @@ def read(self, size=10240): class VMDKUploader(Thread): - def __init__(self, vmdk, url, validate_certs=True, tarinfo=None, create=False): + def __init__( + self, vmdk, url, validate_certs=True, tarinfo=None, create=False + ): Thread.__init__(self) self.vmdk = vmdk @@ -259,30 +277,32 @@ def bytes_read(self): return 0 def _request_opts(self): - ''' + """ Requests for vmdk files differ from other file types. Build the request options here to handle that - ''' + """ headers = { - 'Content-Length': self.size, - 'Content-Type': 'application/octet-stream', + "Content-Length": self.size, + "Content-Type": "application/octet-stream", } if self._create: # Non-VMDK - method = 'PUT' - headers['Overwrite'] = 't' + method = "PUT" + headers["Overwrite"] = "t" else: # VMDK - method = 'POST' - headers['Content-Type'] = 'application/x-vnd.vmware-streamVmdk' + method = "POST" + headers["Content-Type"] = "application/x-vnd.vmware-streamVmdk" - return { - 'method': method, - 'headers': headers, - } + return {"method": method, "headers": headers} def _open_url(self): - open_url(self.url, data=self.f, validate_certs=self.validate_certs, **self._request_opts()) + open_url( + self.url, + data=self.f, + validate_certs=self.validate_certs, + **self._request_opts() + ) def run(self): if self.tarinfo: @@ -293,7 +313,7 @@ def run(self): self.e = sys.exc_info() else: try: - with ProgressReader(self.vmdk, 'rb') as self.f: + with ProgressReader(self.vmdk, "rb") as self.f: self._open_url() except Exception: self.e = sys.exc_info() @@ -318,96 +338,138 @@ def __init__(self, module): self.entity = None def get_objects(self): - self.datacenter = self.find_datacenter_by_name(self.params['datacenter']) + self.datacenter = self.find_datacenter_by_name( + self.params["datacenter"] + ) if not self.datacenter: - self.module.fail_json(msg='%(datacenter)s could not be located' % self.params) + self.module.fail_json( + msg="%(datacenter)s could not be located" % self.params + ) self.datastore = None - datastore_cluster_obj = self.find_datastore_cluster_by_name(self.params['datastore']) + datastore_cluster_obj = self.find_datastore_cluster_by_name( + self.params["datastore"] + ) if datastore_cluster_obj: datastore = None datastore_freespace = 0 for ds in datastore_cluster_obj.childEntity: - if isinstance(ds, vim.Datastore) and ds.summary.freeSpace > datastore_freespace: + if ( + isinstance(ds, vim.Datastore) + and ds.summary.freeSpace > datastore_freespace + ): # If datastore field is provided, filter destination datastores - if ds.summary.maintenanceMode != 'normal' or not ds.summary.accessible: + if ( + ds.summary.maintenanceMode != "normal" + or not ds.summary.accessible + ): continue datastore = ds datastore_freespace = ds.summary.freeSpace if datastore: self.datastore = datastore else: - self.datastore = self.find_datastore_by_name(self.params['datastore'], self.datacenter) + self.datastore = self.find_datastore_by_name( + self.params["datastore"], self.datacenter + ) if not self.datastore: - self.module.fail_json(msg='%(datastore)s could not be located' % self.params) + self.module.fail_json( + msg="%(datastore)s could not be located" % self.params + ) - if self.params['cluster']: + if self.params["cluster"]: resource_pools = [] - cluster = self.find_cluster_by_name(self.params['cluster'], datacenter_name=self.datacenter) + cluster = self.find_cluster_by_name( + self.params["cluster"], datacenter_name=self.datacenter + ) if cluster is None: - self.module.fail_json(msg="Unable to find cluster '%(cluster)s'" % self.params) - self.resource_pool = self.find_resource_pool_by_cluster(self.params['resource_pool'], cluster=cluster) + self.module.fail_json( + msg="Unable to find cluster '%(cluster)s'" % self.params + ) + self.resource_pool = self.find_resource_pool_by_cluster( + self.params["resource_pool"], cluster=cluster + ) else: - self.resource_pool = self.find_resource_pool_by_name(self.params['resource_pool']) + self.resource_pool = self.find_resource_pool_by_name( + self.params["resource_pool"] + ) if not self.resource_pool: - self.module.fail_json(msg='%(resource_pool)s could not be located' % self.params) + self.module.fail_json( + msg="%(resource_pool)s could not be located" % self.params + ) - for key, value in self.params['networks'].items(): + for key, value in self.params["networks"].items(): network = find_network_by_name(self.content, value) if not network: - self.module.fail_json(msg='%(network)s could not be located' % self.params) + self.module.fail_json( + msg="%(network)s could not be located" % self.params + ) network_mapping = vim.OvfManager.NetworkMapping() network_mapping.name = key network_mapping.network = network self.network_mappings.append(network_mapping) - return self.datastore, self.datacenter, self.resource_pool, self.network_mappings + return ( + self.datastore, + self.datacenter, + self.resource_pool, + self.network_mappings, + ) def get_ovf_descriptor(self): - if tarfile.is_tarfile(self.params['ovf']): - self.tar = tarfile.open(self.params['ovf']) + if tarfile.is_tarfile(self.params["ovf"]): + self.tar = tarfile.open(self.params["ovf"]) ovf = None for candidate in self.tar.getmembers(): dummy, ext = os.path.splitext(candidate.name) - if ext.lower() == '.ovf': + if ext.lower() == ".ovf": ovf = candidate break if not ovf: - self.module.fail_json(msg='Could not locate OVF file in %(ovf)s' % self.params) + self.module.fail_json( + msg="Could not locate OVF file in %(ovf)s" % self.params + ) self.ovf_descriptor = to_native(self.tar.extractfile(ovf).read()) else: - with open(self.params['ovf']) as f: + with open(self.params["ovf"]) as f: self.ovf_descriptor = f.read() return self.ovf_descriptor def get_lease(self): - datastore, datacenter, resource_pool, network_mappings = self.get_objects() + datastore, datacenter, resource_pool, network_mappings = ( + self.get_objects() + ) - params = { - 'diskProvisioning': self.params['disk_provisioning'], - } - if self.params['name']: - params['entityName'] = self.params['name'] + params = {"diskProvisioning": self.params["disk_provisioning"]} + if self.params["name"]: + params["entityName"] = self.params["name"] if network_mappings: - params['networkMapping'] = network_mappings - if self.params['deployment_option']: - params['deploymentOption'] = self.params['deployment_option'] - if self.params['properties']: - params['propertyMapping'] = [] - for key, value in self.params['properties'].items(): + params["networkMapping"] = network_mappings + if self.params["deployment_option"]: + params["deploymentOption"] = self.params["deployment_option"] + if self.params["properties"]: + params["propertyMapping"] = [] + for key, value in self.params["properties"].items(): property_mapping = vim.KeyValue() property_mapping.key = key - property_mapping.value = str(value) if isinstance(value, bool) else value - params['propertyMapping'].append(property_mapping) + property_mapping.value = ( + str(value) if isinstance(value, bool) else value + ) + params["propertyMapping"].append(property_mapping) - if self.params['folder']: - folder = self.content.searchIndex.FindByInventoryPath(self.params['folder']) + if self.params["folder"]: + folder = self.content.searchIndex.FindByInventoryPath( + self.params["folder"] + ) if not folder: - self.module.fail_json(msg="Unable to find the specified folder %(folder)s" % self.params) + self.module.fail_json( + msg="Unable to find the specified folder %(folder)s" + % self.params + ) else: folder = datacenter.vmFolder @@ -416,42 +478,50 @@ def get_lease(self): ovf_descriptor = self.get_ovf_descriptor() self.import_spec = self.content.ovfManager.CreateImportSpec( - ovf_descriptor, - resource_pool, - datastore, - spec_params + ovf_descriptor, resource_pool, datastore, spec_params ) - errors = [to_native(e.msg) for e in getattr(self.import_spec, 'error', [])] - if self.params['fail_on_spec_warnings']: + errors = [ + to_native(e.msg) for e in getattr(self.import_spec, "error", []) + ] + if self.params["fail_on_spec_warnings"]: errors.extend( - (to_native(w.msg) for w in getattr(self.import_spec, 'warning', [])) + ( + to_native(w.msg) + for w in getattr(self.import_spec, "warning", []) + ) ) if errors: self.module.fail_json( - msg='Failure validating OVF import spec: %s' % '. '.join(errors) + msg="Failure validating OVF import spec: %s" + % ". ".join(errors) ) - for warning in getattr(self.import_spec, 'warning', []): - self.module.warn('Problem validating OVF import spec: %s' % to_native(warning.msg)) + for warning in getattr(self.import_spec, "warning", []): + self.module.warn( + "Problem validating OVF import spec: %s" + % to_native(warning.msg) + ) - if not self.params['allow_duplicates']: + if not self.params["allow_duplicates"]: name = self.import_spec.importSpec.configSpec.name match = find_vm_by_name(self.content, name, folder=folder) if match: - self.module.exit_json(instance=gather_vm_facts(self.content, match), changed=False) + self.module.exit_json( + instance=gather_vm_facts(self.content, match), + changed=False, + ) if self.module.check_mode: - self.module.exit_json(changed=True, instance={'hw_name': name}) + self.module.exit_json(changed=True, instance={"hw_name": name}) try: self.lease = resource_pool.ImportVApp( - self.import_spec.importSpec, - folder + self.import_spec.importSpec, folder ) except vmodl.fault.SystemError as e: self.module.fail_json( - msg='Failed to start import: %s' % to_native(e.msg) + msg="Failed to start import: %s" % to_native(e.msg) ) while self.lease.state != vim.HttpNfcLease.State.ready: @@ -462,23 +532,28 @@ def get_lease(self): return self.lease, self.import_spec def _normalize_url(self, url): - ''' + """ The hostname in URLs from vmware may be ``*`` update it accordingly - ''' + """ url_parts = generic_urlparse(urlparse(url)) - if url_parts.hostname == '*': + if url_parts.hostname == "*": if url_parts.port: - url_parts.netloc = '%s:%d' % (self.params['hostname'], url_parts.port) + url_parts.netloc = "%s:%d" % ( + self.params["hostname"], + url_parts.port, + ) else: - url_parts.netloc = self.params['hostname'] + url_parts.netloc = self.params["hostname"] return urlunparse(url_parts.as_list()) def upload(self): - if self.params['ovf'] is None: - self.module.fail_json(msg="OVF path is required for upload operation.") + if self.params["ovf"] is None: + self.module.fail_json( + msg="OVF path is required for upload operation." + ) - ovf_dir = os.path.dirname(self.params['ovf']) + ovf_dir = os.path.dirname(self.params["ovf"]) lease, import_spec = self.get_lease() @@ -493,10 +568,13 @@ def upload(self): if not device_upload_url: lease.HttpNfcLeaseAbort( - vmodl.fault.SystemError(reason='Failed to find deviceUrl for file %s' % file_item.path) + vmodl.fault.SystemError( + reason="Failed to find deviceUrl for file %s" + % file_item.path + ) ) self.module.fail_json( - msg='Failed to find deviceUrl for file %s' % file_item.path + msg="Failed to find deviceUrl for file %s" % file_item.path ) vmdk_tarinfo = None @@ -506,10 +584,14 @@ def upload(self): vmdk_tarinfo = self.tar.getmember(file_item.path) except KeyError: lease.HttpNfcLeaseAbort( - vmodl.fault.SystemError(reason='Failed to find VMDK file %s in OVA' % file_item.path) + vmodl.fault.SystemError( + reason="Failed to find VMDK file %s in OVA" + % file_item.path + ) ) self.module.fail_json( - msg='Failed to find VMDK file %s in OVA' % file_item.path + msg="Failed to find VMDK file %s in OVA" + % file_item.path ) else: vmdk = os.path.join(ovf_dir, file_item.path) @@ -517,19 +599,21 @@ def upload(self): path_exists(vmdk) except ValueError: lease.HttpNfcLeaseAbort( - vmodl.fault.SystemError(reason='Failed to find VMDK file at %s' % vmdk) + vmodl.fault.SystemError( + reason="Failed to find VMDK file at %s" % vmdk + ) ) self.module.fail_json( - msg='Failed to find VMDK file at %s' % vmdk + msg="Failed to find VMDK file at %s" % vmdk ) uploaders.append( VMDKUploader( vmdk, device_upload_url, - self.params['validate_certs'], + self.params["validate_certs"], tarinfo=vmdk_tarinfo, - create=file_item.create + create=file_item.create, ) ) @@ -540,15 +624,19 @@ def upload(self): while uploader.is_alive(): time.sleep(0.1) total_bytes_read[i] = uploader.bytes_read - lease.HttpNfcLeaseProgress(int(100.0 * sum(total_bytes_read) / total_size)) + lease.HttpNfcLeaseProgress( + int(100.0 * sum(total_bytes_read) / total_size) + ) if uploader.e: lease.HttpNfcLeaseAbort( - vmodl.fault.SystemError(reason='%s' % to_native(uploader.e[1])) + vmodl.fault.SystemError( + reason="%s" % to_native(uploader.e[1]) + ) ) self.module.fail_json( - msg='%s' % to_native(uploader.e[1]), - exception=''.join(traceback.format_tb(uploader.e[2])) + msg="%s" % to_native(uploader.e[1]), + exception="".join(traceback.format_tb(uploader.e[2])), ) def complete(self): @@ -556,32 +644,34 @@ def complete(self): def inject_ovf_env(self): attrib = { - 'xmlns': 'http://schemas.dmtf.org/ovf/environment/1', - 'xmlns:xsi': 'http://www.w3.org/2001/XMLSchema-instance', - 'xmlns:oe': 'http://schemas.dmtf.org/ovf/environment/1', - 'xmlns:ve': 'http://www.vmware.com/schema/ovfenv', - 'oe:id': '', - 've:esxId': self.entity._moId + "xmlns": "http://schemas.dmtf.org/ovf/environment/1", + "xmlns:xsi": "http://www.w3.org/2001/XMLSchema-instance", + "xmlns:oe": "http://schemas.dmtf.org/ovf/environment/1", + "xmlns:ve": "http://www.vmware.com/schema/ovfenv", + "oe:id": "", + "ve:esxId": self.entity._moId, } - env = ET.Element('Environment', **attrib) + env = ET.Element("Environment", **attrib) - platform = ET.SubElement(env, 'PlatformSection') - ET.SubElement(platform, 'Kind').text = self.content.about.name - ET.SubElement(platform, 'Version').text = self.content.about.version - ET.SubElement(platform, 'Vendor').text = self.content.about.vendor - ET.SubElement(platform, 'Locale').text = 'US' + platform = ET.SubElement(env, "PlatformSection") + ET.SubElement(platform, "Kind").text = self.content.about.name + ET.SubElement(platform, "Version").text = self.content.about.version + ET.SubElement(platform, "Vendor").text = self.content.about.vendor + ET.SubElement(platform, "Locale").text = "US" - prop_section = ET.SubElement(env, 'PropertySection') - for key, value in self.params['properties'].items(): + prop_section = ET.SubElement(env, "PropertySection") + for key, value in self.params["properties"].items(): params = { - 'oe:key': key, - 'oe:value': str(value) if isinstance(value, bool) else value + "oe:key": key, + "oe:value": str(value) if isinstance(value, bool) else value, } - ET.SubElement(prop_section, 'Property', **params) + ET.SubElement(prop_section, "Property", **params) opt = vim.option.OptionValue() - opt.key = 'guestinfo.ovfEnv' - opt.value = '' + to_native(ET.tostring(env)) + opt.key = "guestinfo.ovfEnv" + opt.value = '' + to_native( + ET.tostring(env) + ) config_spec = vim.vm.ConfigSpec() config_spec.extraConfig = [opt] @@ -592,17 +682,19 @@ def inject_ovf_env(self): def deploy(self): facts = {} - if self.params['inject_ovf_env']: + if self.params["inject_ovf_env"]: self.inject_ovf_env() - if self.params['power_on']: + if self.params["power_on"]: task = self.entity.PowerOn() - if self.params['wait']: + if self.params["wait"]: wait_for_task(task) - if self.params['wait_for_ip_address']: + if self.params["wait_for_ip_address"]: _facts = wait_for_vm_ip(self.content, self.entity) if not _facts: - self.module.fail_json(msg='Waiting for IP address timed out') + self.module.fail_json( + msg="Waiting for IP address timed out" + ) facts.update(_facts) if not facts: @@ -613,82 +705,46 @@ def deploy(self): def main(): argument_spec = vmware_argument_spec() - argument_spec.update({ - 'name': {}, - 'datastore': { - 'default': 'datastore1', - }, - 'datacenter': { - 'default': 'ha-datacenter', - }, - 'cluster': { - 'default': None, - }, - 'deployment_option': { - 'default': None, - }, - 'folder': { - 'default': None, - }, - 'inject_ovf_env': { - 'default': False, - 'type': 'bool', - }, - 'resource_pool': { - 'default': 'Resources', - }, - 'networks': { - 'default': { - 'VM Network': 'VM Network', + argument_spec.update( + { + "name": {}, + "datastore": {"default": "datastore1"}, + "datacenter": {"default": "ha-datacenter"}, + "cluster": {"default": None}, + "deployment_option": {"default": None}, + "folder": {"default": None}, + "inject_ovf_env": {"default": False, "type": "bool"}, + "resource_pool": {"default": "Resources"}, + "networks": { + "default": {"VM Network": "VM Network"}, + "type": "dict", + }, + "ovf": {"type": path_exists, "aliases": ["ova"]}, + "disk_provisioning": { + "choices": [ + "flat", + "eagerZeroedThick", + "monolithicSparse", + "twoGbMaxExtentSparse", + "twoGbMaxExtentFlat", + "thin", + "sparse", + "thick", + "seSparse", + "monolithicFlat", + ], + "default": "thin", }, - 'type': 'dict', - }, - 'ovf': { - 'type': path_exists, - 'aliases': ['ova'], - }, - 'disk_provisioning': { - 'choices': [ - 'flat', - 'eagerZeroedThick', - 'monolithicSparse', - 'twoGbMaxExtentSparse', - 'twoGbMaxExtentFlat', - 'thin', - 'sparse', - 'thick', - 'seSparse', - 'monolithicFlat' - ], - 'default': 'thin', - }, - 'power_on': { - 'type': 'bool', - 'default': True, - }, - 'properties': { - 'type': 'dict', - }, - 'wait': { - 'type': 'bool', - 'default': True, - }, - 'wait_for_ip_address': { - 'type': 'bool', - 'default': False, - }, - 'allow_duplicates': { - 'type': 'bool', - 'default': True, - }, - 'fail_on_spec_warnings': { - 'type': 'bool', - 'default': False, - }, - }) + "power_on": {"type": "bool", "default": True}, + "properties": {"type": "dict"}, + "wait": {"type": "bool", "default": True}, + "wait_for_ip_address": {"type": "bool", "default": False}, + "allow_duplicates": {"type": "bool", "default": True}, + "fail_on_spec_warnings": {"type": "bool", "default": False}, + } + ) module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, + argument_spec=argument_spec, supports_check_mode=True ) deploy_ovf = VMwareDeployOvf(module) @@ -699,5 +755,5 @@ def main(): module.exit_json(instance=facts, changed=True) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_drs_group.py b/plugins/modules/vmware_drs_group.py index 32cc32b..58c74f7 100644 --- a/plugins/modules/vmware_drs_group.py +++ b/plugins/modules/vmware_drs_group.py @@ -9,12 +9,12 @@ __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- author: - "Karsten Kaj Jakobsen (@karstenjakobsen)" @@ -70,9 +70,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" --- - name: "Create DRS VM group" delegate_to: localhost @@ -114,9 +114,9 @@ group_name: TEST_HOST_01 state: absent -''' +""" -RETURN = r''' +RETURN = r""" drs_group_facts: description: Metadata about DRS group created returned: always @@ -140,7 +140,7 @@ ] } } -''' +""" try: from pyVmomi import vim @@ -148,9 +148,15 @@ pass from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import (PyVmomi, vmware_argument_spec, - wait_for_task, find_cluster_by_name, - find_vm_by_id, find_datacenter_by_name, find_vm_by_name) +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + vmware_argument_spec, + wait_for_task, + find_cluster_by_name, + find_vm_by_id, + find_datacenter_by_name, + find_vm_by_name, +) class VmwareDrsGroupManager(PyVmomi): @@ -158,8 +164,16 @@ class VmwareDrsGroupManager(PyVmomi): Class to manage DRS groups """ - def __init__(self, module, cluster_name, group_name, state, - datacenter_name=None, vm_list=None, host_list=None): + def __init__( + self, + module, + cluster_name, + group_name, + state, + datacenter_name=None, + vm_list=None, + host_list=None, + ): """ Init """ @@ -177,21 +191,27 @@ def __init__(self, module, cluster_name, group_name, state, self.__vm_obj_list = [] self.__host_list = host_list self.__host_obj_list = [] - self.__msg = 'Nothing to see here...' + self.__msg = "Nothing to see here..." self.__result = dict() self.__changed = False self.__state = state if datacenter_name is not None: - self.__datacenter_obj = find_datacenter_by_name(self.content, self.__datacenter_name) + self.__datacenter_obj = find_datacenter_by_name( + self.content, self.__datacenter_name + ) if self.__datacenter_obj is None and module.check_mode is False: - raise Exception("Datacenter '%s' not found" % self.__datacenter_name) + raise Exception( + "Datacenter '%s' not found" % self.__datacenter_name + ) - self.__cluster_obj = find_cluster_by_name(content=self.content, - cluster_name=self.__cluster_name, - datacenter=self.__datacenter_obj) + self.__cluster_obj = find_cluster_by_name( + content=self.content, + cluster_name=self.__cluster_name, + datacenter=self.__datacenter_obj, + ) # Throw error if cluster does not exist if self.__cluster_obj is None: @@ -204,12 +224,12 @@ def __init__(self, module, cluster_name, group_name, state, self.__set_result(self.__group_obj) # Dont populate lists if we are deleting group - if state == 'present': + if state == "present": if self.__group_obj: - self.__operation = 'edit' + self.__operation = "edit" else: - self.__operation = 'add' + self.__operation = "add" if self.__vm_list is not None: self.__set_vm_obj_list(vm_list=self.__vm_list) @@ -217,7 +237,7 @@ def __init__(self, module, cluster_name, group_name, state, if self.__host_list is not None: self.__set_host_obj_list(host_list=self.__host_list) else: - self.__operation = 'remove' + self.__operation = "remove" def get_msg(self): """ @@ -250,7 +270,9 @@ def __set_result(self, group_obj): if (self.__cluster_obj is not None) and (group_obj is not None): self.__result[self.__cluster_obj.name] = [] - self.__result[self.__cluster_obj.name].append(self.__normalize_group_data(group_obj)) + self.__result[self.__cluster_obj.name].append( + self.__normalize_group_data(group_obj) + ) def get_changed(self): """ @@ -284,12 +306,18 @@ def __set_vm_obj_list(self, vm_list=None, cluster_obj=None): if self.module.check_mode is False: # Get host data - vm_obj = find_vm_by_id(content=self.content, vm_id=vm, - vm_id_type='vm_name', cluster=cluster_obj) + vm_obj = find_vm_by_id( + content=self.content, + vm_id=vm, + vm_id_type="vm_name", + cluster=cluster_obj, + ) if vm_obj is None: - raise Exception("VM %s does not exist in cluster %s" % (vm, - self.__cluster_name)) + raise Exception( + "VM %s does not exist in cluster %s" + % (vm, self.__cluster_name) + ) self.__vm_obj_list.append(vm_obj) @@ -316,7 +344,10 @@ def __set_host_obj_list(self, host_list=None): host_obj = self.find_hostsystem_by_name(host) if host_obj is None and self.module.check_mode is False: - raise Exception("ESXi host %s does not exist in cluster %s" % (host, self.__cluster_name)) + raise Exception( + "ESXi host %s does not exist in cluster %s" + % (host, self.__cluster_name) + ) self.__host_obj_list.append(host_obj) @@ -348,7 +379,9 @@ def __get_group_by_name(self, group_name=None, cluster_obj=None): # No group found return None - def __populate_vm_host_list(self, group_name=None, cluster_obj=None, host_group=False): + def __populate_vm_host_list( + self, group_name=None, cluster_obj=None, host_group=False + ): """ Return all VM/Host names using given group name Args: @@ -380,7 +413,9 @@ def __populate_vm_host_list(self, group_name=None, cluster_obj=None, host_group= return obj_name_list - def __check_if_vms_hosts_changed(self, group_name=None, cluster_obj=None, host_group=False): + def __check_if_vms_hosts_changed( + self, group_name=None, cluster_obj=None, host_group=False + ): """ Function to check if VMs/Hosts changed Args: @@ -410,53 +445,74 @@ def __check_if_vms_hosts_changed(self, group_name=None, cluster_obj=None, host_g def __create_host_group(self): # Check if anything has changed when editing - if self.__operation == 'add' or (self.__operation == 'edit' and self.__check_if_vms_hosts_changed(host_group=True)): + if self.__operation == "add" or ( + self.__operation == "edit" + and self.__check_if_vms_hosts_changed(host_group=True) + ): group = vim.cluster.HostGroup() group.name = self.__group_name group.host = self.__host_obj_list - group_spec = vim.cluster.GroupSpec(info=group, operation=self.__operation) + group_spec = vim.cluster.GroupSpec( + info=group, operation=self.__operation + ) config_spec = vim.cluster.ConfigSpecEx(groupSpec=[group_spec]) if not self.module.check_mode: - task = self.__cluster_obj.ReconfigureEx(config_spec, modify=True) + task = self.__cluster_obj.ReconfigureEx( + config_spec, modify=True + ) wait_for_task(task) # Set new result since something changed self.__set_result(group) self.__changed = True - if self.__operation == 'edit': - self.__msg = "Updated host group %s successfully" % (self.__group_name) + if self.__operation == "edit": + self.__msg = "Updated host group %s successfully" % ( + self.__group_name + ) else: - self.__msg = "Created host group %s successfully" % (self.__group_name) + self.__msg = "Created host group %s successfully" % ( + self.__group_name + ) def __create_vm_group(self): # Check if anything has changed when editing - if self.__operation == 'add' or (self.__operation == 'edit' and self.__check_if_vms_hosts_changed()): + if self.__operation == "add" or ( + self.__operation == "edit" and self.__check_if_vms_hosts_changed() + ): group = vim.cluster.VmGroup() group.name = self.__group_name group.vm = self.__vm_obj_list - group_spec = vim.cluster.GroupSpec(info=group, operation=self.__operation) + group_spec = vim.cluster.GroupSpec( + info=group, operation=self.__operation + ) config_spec = vim.cluster.ConfigSpecEx(groupSpec=[group_spec]) # Check if dry run if not self.module.check_mode: - task = self.__cluster_obj.ReconfigureEx(config_spec, modify=True) + task = self.__cluster_obj.ReconfigureEx( + config_spec, modify=True + ) wait_for_task(task) self.__set_result(group) self.__changed = True - if self.__operation == 'edit': - self.__msg = "Updated vm group %s successfully" % (self.__group_name) + if self.__operation == "edit": + self.__msg = "Updated vm group %s successfully" % ( + self.__group_name + ) else: - self.__msg = "Created vm group %s successfully" % (self.__group_name) + self.__msg = "Created vm group %s successfully" % ( + self.__group_name + ) def __normalize_group_data(self, group_obj): """ @@ -471,17 +527,13 @@ def __normalize_group_data(self, group_obj): return {} # Check if group is a host group - if hasattr(group_obj, 'host'): + if hasattr(group_obj, "host"): return dict( - group_name=group_obj.name, - hosts=self.__host_list, - type="host" + group_name=group_obj.name, hosts=self.__host_list, type="host" ) else: return dict( - group_name=group_obj.name, - vms=self.__vm_list, - type="vm" + group_name=group_obj.name, vms=self.__vm_list, type="vm" ) def create_drs_group(self): @@ -494,7 +546,7 @@ def create_drs_group(self): elif self.__host_list is None: self.__create_vm_group() else: - raise Exception('Failed, no hosts or vms defined') + raise Exception("Failed, no hosts or vms defined") def delete_drs_group(self): """ @@ -508,17 +560,26 @@ def delete_drs_group(self): # Check if dry run if not self.module.check_mode: - group_spec = vim.cluster.GroupSpec(removeKey=self.__group_name, operation=self.__operation) + group_spec = vim.cluster.GroupSpec( + removeKey=self.__group_name, operation=self.__operation + ) config_spec = vim.cluster.ConfigSpecEx(groupSpec=[group_spec]) - task = self.__cluster_obj.ReconfigureEx(config_spec, modify=True) + task = self.__cluster_obj.ReconfigureEx( + config_spec, modify=True + ) wait_for_task(task) # Dont throw error if group does not exist. Simply set changed = False if self.__changed: - self.__msg = "Deleted group `%s` successfully" % (self.__group_name) + self.__msg = "Deleted group `%s` successfully" % ( + self.__group_name + ) else: - self.__msg = "DRS group `%s` does not exists or already deleted" % (self.__group_name) + self.__msg = ( + "DRS group `%s` does not exists or already deleted" + % (self.__group_name) + ) def main(): @@ -529,53 +590,59 @@ def main(): argument_spec = vmware_argument_spec() argument_spec.update( - state=dict(type='str', default='present', choices=['present', 'absent']), - datacenter=dict(type='str', required=False, aliases=['datacenter_name']), - cluster_name=dict(type='str', required=True), - group_name=dict(type='str', required=True), - vms=dict(type='list'), - hosts=dict(type='list') + state=dict( + type="str", default="present", choices=["present", "absent"] + ), + datacenter=dict( + type="str", required=False, aliases=["datacenter_name"] + ), + cluster_name=dict(type="str", required=True), + group_name=dict(type="str", required=True), + vms=dict(type="list"), + hosts=dict(type="list"), ) - required_if = [ - ['state', 'absent', ['group_name']] - ] + required_if = [["state", "absent", ["group_name"]]] module = AnsibleModule( argument_spec=argument_spec, required_if=required_if, supports_check_mode=True, - mutually_exclusive=[['vms', 'hosts']], - required_one_of=[['vms', 'hosts']] + mutually_exclusive=[["vms", "hosts"]], + required_one_of=[["vms", "hosts"]], ) try: # Create instance of VmwareDrsGroupManager - vmware_drs_group = VmwareDrsGroupManager(module=module, - datacenter_name=module.params.get('datacenter', None), - cluster_name=module.params['cluster_name'], - group_name=module.params['group_name'], - vm_list=module.params['vms'], - host_list=module.params['hosts'], - state=module.params['state']) - - if module.params['state'] == 'present': + vmware_drs_group = VmwareDrsGroupManager( + module=module, + datacenter_name=module.params.get("datacenter", None), + cluster_name=module.params["cluster_name"], + group_name=module.params["group_name"], + vm_list=module.params["vms"], + host_list=module.params["hosts"], + state=module.params["state"], + ) + + if module.params["state"] == "present": # Add DRS group vmware_drs_group.create_drs_group() - elif module.params['state'] == 'absent': + elif module.params["state"] == "absent": # Delete DRS group vmware_drs_group.delete_drs_group() # Set results - results = dict(msg=vmware_drs_group.get_msg(), - failed=False, - changed=vmware_drs_group.get_changed(), - result=vmware_drs_group.get_result()) + results = dict( + msg=vmware_drs_group.get_msg(), + failed=False, + changed=vmware_drs_group.get_changed(), + result=vmware_drs_group.get_result(), + ) except Exception as error: results = dict(failed=True, msg="Error: %s" % error) - if results['failed']: + if results["failed"]: module.fail_json(**results) else: module.exit_json(**results) diff --git a/plugins/modules/vmware_drs_group_info.py b/plugins/modules/vmware_drs_group_info.py index 62d4935..cbd0c14 100644 --- a/plugins/modules/vmware_drs_group_info.py +++ b/plugins/modules/vmware_drs_group_info.py @@ -9,12 +9,12 @@ __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- author: - "Karsten Kaj Jakobsen (@karstenjakobsen)" @@ -45,9 +45,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" --- - name: "Gather DRS info about given Cluster" register: cluster_drs_group_info @@ -67,9 +67,9 @@ username: "{{ vcenter_username }}" datacenter: "{{ datacenter }}" delegate_to: localhost -''' +""" -RETURN = r''' +RETURN = r""" drs_group_info: description: Metadata about DRS group from given cluster / datacenter returned: always @@ -110,7 +110,7 @@ ], "DC0_C1": [] } -''' +""" try: from pyVmomi import vim @@ -118,11 +118,15 @@ pass from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi, find_datacenter_by_name, get_all_objs +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + vmware_argument_spec, + PyVmomi, + find_datacenter_by_name, + get_all_objs, +) class VmwareDrsGroupInfoManager(PyVmomi): - def __init__(self, module, datacenter_name, cluster_name=None): """ Doctring: Init @@ -134,24 +138,32 @@ def __init__(self, module, datacenter_name, cluster_name=None): self.__datacenter_obj = None self.__cluster_name = cluster_name self.__cluster_obj = None - self.__msg = 'Nothing to see here...' + self.__msg = "Nothing to see here..." self.__result = dict() self.__changed = False if datacenter_name: - datacenter_obj = find_datacenter_by_name(self.content, datacenter_name=datacenter_name) + datacenter_obj = find_datacenter_by_name( + self.content, datacenter_name=datacenter_name + ) self.cluster_obj_list = [] if datacenter_obj: folder = datacenter_obj.hostFolder - self.cluster_obj_list = get_all_objs(self.content, [vim.ClusterComputeResource], folder) + self.cluster_obj_list = get_all_objs( + self.content, [vim.ClusterComputeResource], folder + ) else: - raise Exception("Datacenter '%s' not found" % self.__datacenter_name) + raise Exception( + "Datacenter '%s' not found" % self.__datacenter_name + ) if cluster_name: - cluster_obj = self.find_cluster_by_name(cluster_name=self.__cluster_name) + cluster_obj = self.find_cluster_by_name( + cluster_name=self.__cluster_name + ) if cluster_obj is None: raise Exception("Cluster '%s' not found" % self.__cluster_name) @@ -210,17 +222,19 @@ def __normalize_group_data(self, group_obj): return {} # Check if group is a host group - if hasattr(group_obj, 'host'): + if hasattr(group_obj, "host"): return dict( group_name=group_obj.name, - hosts=self.__get_all_from_group(group_obj=group_obj, host_group=True), - type="host" + hosts=self.__get_all_from_group( + group_obj=group_obj, host_group=True + ), + type="host", ) else: return dict( group_name=group_obj.name, vms=self.__get_all_from_group(group_obj=group_obj), - type="vm" + type="vm", ) def gather_info(self): @@ -236,7 +250,9 @@ def gather_info(self): cluster_group_info[cluster_obj.name] = [] for drs_group in cluster_obj.configurationEx.group: - cluster_group_info[cluster_obj.name].append(self.__normalize_group_data(drs_group)) + cluster_group_info[cluster_obj.name].append( + self.__normalize_group_data(drs_group) + ) self.__set_result(cluster_group_info) @@ -246,34 +262,38 @@ def main(): argument_spec = vmware_argument_spec() argument_spec.update( - datacenter=dict(type='str', required=False, aliases=['datacenter_name']), - cluster_name=dict(type='str', required=False), + datacenter=dict( + type="str", required=False, aliases=["datacenter_name"] + ), + cluster_name=dict(type="str", required=False), ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, - required_one_of=[['cluster_name', 'datacenter']], - mutually_exclusive=[['cluster_name', 'datacenter']], + required_one_of=[["cluster_name", "datacenter"]], + mutually_exclusive=[["cluster_name", "datacenter"]], ) try: # Create instance of VmwareDrsGroupManager vmware_drs_group_info = VmwareDrsGroupInfoManager( module=module, - datacenter_name=module.params.get('datacenter'), - cluster_name=module.params.get('cluster_name', None)) + datacenter_name=module.params.get("datacenter"), + cluster_name=module.params.get("cluster_name", None), + ) vmware_drs_group_info.gather_info() # Set results - results = dict(failed=False, - drs_group_info=vmware_drs_group_info.get_result()) + results = dict( + failed=False, drs_group_info=vmware_drs_group_info.get_result() + ) except Exception as error: results = dict(failed=True, msg="Error: %s" % error) - if results['failed']: + if results["failed"]: module.fail_json(**results) else: module.exit_json(**results) diff --git a/plugins/modules/vmware_drs_rule_info.py b/plugins/modules/vmware_drs_rule_info.py index ece559a..1c52380 100644 --- a/plugins/modules/vmware_drs_rule_info.py +++ b/plugins/modules/vmware_drs_rule_info.py @@ -9,12 +9,12 @@ __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_drs_rule_info short_description: Gathers info about DRS rule on the given cluster @@ -43,9 +43,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather DRS info about given Cluster vmware_drs_rule_info: hostname: '{{ vcenter_hostname }}' @@ -63,9 +63,9 @@ datacenter: '{{ datacenter_name }}' delegate_to: localhost register: datacenter_drs_info -''' +""" -RETURN = r''' +RETURN = r""" drs_rule_info: description: metadata about DRS rule from given cluster / datacenter returned: always @@ -108,7 +108,7 @@ } ], } -''' +""" try: from pyVmomi import vim @@ -116,32 +116,48 @@ pass from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi, find_datacenter_by_name, get_all_objs +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + vmware_argument_spec, + PyVmomi, + find_datacenter_by_name, + get_all_objs, +) class VmwareDrsInfoManager(PyVmomi): def __init__(self, module): super(VmwareDrsInfoManager, self).__init__(module) - datacenter_name = self.params.get('datacenter', None) + datacenter_name = self.params.get("datacenter", None) if datacenter_name: - datacenter_obj = find_datacenter_by_name(self.content, datacenter_name=datacenter_name) + datacenter_obj = find_datacenter_by_name( + self.content, datacenter_name=datacenter_name + ) self.cluster_obj_list = [] if datacenter_obj: folder = datacenter_obj.hostFolder - self.cluster_obj_list = get_all_objs(self.content, [vim.ClusterComputeResource], folder) + self.cluster_obj_list = get_all_objs( + self.content, [vim.ClusterComputeResource], folder + ) else: - self.module.fail_json(changed=False, msg="Datacenter '%s' not found" % datacenter_name) + self.module.fail_json( + changed=False, + msg="Datacenter '%s' not found" % datacenter_name, + ) - cluster_name = self.params.get('cluster_name', None) + cluster_name = self.params.get("cluster_name", None) if cluster_name: cluster_obj = self.find_cluster_by_name(cluster_name=cluster_name) if cluster_obj is None: - self.module.fail_json(changed=False, msg="Cluster '%s' not found" % cluster_name) + self.module.fail_json( + changed=False, msg="Cluster '%s' not found" % cluster_name + ) else: self.cluster_obj_list = [cluster_obj] - def get_all_from_group(self, group_name=None, cluster_obj=None, hostgroup=False): + def get_all_from_group( + self, group_name=None, cluster_obj=None, hostgroup=False + ): """ Return all VM / Host names using given group name Args: @@ -179,15 +195,18 @@ def normalize_vm_vm_rule_spec(rule_obj=None): """ if rule_obj is None: return {} - return dict(rule_key=rule_obj.key, - rule_enabled=rule_obj.enabled, - rule_name=rule_obj.name, - rule_mandatory=rule_obj.mandatory, - rule_uuid=rule_obj.ruleUuid, - rule_vms=[vm.name for vm in rule_obj.vm], - rule_type="vm_vm_rule", - rule_affinity=True if isinstance(rule_obj, vim.cluster.AffinityRuleSpec) else False, - ) + return dict( + rule_key=rule_obj.key, + rule_enabled=rule_obj.enabled, + rule_name=rule_obj.name, + rule_mandatory=rule_obj.mandatory, + rule_uuid=rule_obj.ruleUuid, + rule_vms=[vm.name for vm in rule_obj.vm], + rule_type="vm_vm_rule", + rule_affinity=True + if isinstance(rule_obj, vim.cluster.AffinityRuleSpec) + else False, + ) def normalize_vm_host_rule_spec(self, rule_obj=None, cluster_obj=None): """ @@ -201,24 +220,30 @@ def normalize_vm_host_rule_spec(self, rule_obj=None, cluster_obj=None): """ if not all([rule_obj, cluster_obj]): return {} - return dict(rule_key=rule_obj.key, - rule_enabled=rule_obj.enabled, - rule_name=rule_obj.name, - rule_mandatory=rule_obj.mandatory, - rule_uuid=rule_obj.ruleUuid, - rule_vm_group_name=rule_obj.vmGroupName, - rule_affine_host_group_name=rule_obj.affineHostGroupName, - rule_anti_affine_host_group_name=rule_obj.antiAffineHostGroupName, - rule_vms=self.get_all_from_group(group_name=rule_obj.vmGroupName, - cluster_obj=cluster_obj), - rule_affine_hosts=self.get_all_from_group(group_name=rule_obj.affineHostGroupName, - cluster_obj=cluster_obj, - hostgroup=True), - rule_anti_affine_hosts=self.get_all_from_group(group_name=rule_obj.antiAffineHostGroupName, - cluster_obj=cluster_obj, - hostgroup=True), - rule_type="vm_host_rule", - ) + return dict( + rule_key=rule_obj.key, + rule_enabled=rule_obj.enabled, + rule_name=rule_obj.name, + rule_mandatory=rule_obj.mandatory, + rule_uuid=rule_obj.ruleUuid, + rule_vm_group_name=rule_obj.vmGroupName, + rule_affine_host_group_name=rule_obj.affineHostGroupName, + rule_anti_affine_host_group_name=rule_obj.antiAffineHostGroupName, + rule_vms=self.get_all_from_group( + group_name=rule_obj.vmGroupName, cluster_obj=cluster_obj + ), + rule_affine_hosts=self.get_all_from_group( + group_name=rule_obj.affineHostGroupName, + cluster_obj=cluster_obj, + hostgroup=True, + ), + rule_anti_affine_hosts=self.get_all_from_group( + group_name=rule_obj.antiAffineHostGroupName, + cluster_obj=cluster_obj, + hostgroup=True, + ), + rule_type="vm_host_rule", + ) def gather_drs_rule_info(self): """ @@ -231,11 +256,15 @@ def gather_drs_rule_info(self): cluster_rule_info[cluster_obj.name] = [] for drs_rule in cluster_obj.configuration.rule: if isinstance(drs_rule, vim.cluster.VmHostRuleInfo): - cluster_rule_info[cluster_obj.name].append(self.normalize_vm_host_rule_spec( - rule_obj=drs_rule, - cluster_obj=cluster_obj)) + cluster_rule_info[cluster_obj.name].append( + self.normalize_vm_host_rule_spec( + rule_obj=drs_rule, cluster_obj=cluster_obj + ) + ) else: - cluster_rule_info[cluster_obj.name].append(self.normalize_vm_vm_rule_spec(rule_obj=drs_rule)) + cluster_rule_info[cluster_obj.name].append( + self.normalize_vm_vm_rule_spec(rule_obj=drs_rule) + ) return cluster_rule_info @@ -243,20 +272,20 @@ def gather_drs_rule_info(self): def main(): argument_spec = vmware_argument_spec() argument_spec.update( - datacenter=dict(type='str', required=False), - cluster_name=dict(type='str', required=False), + datacenter=dict(type="str", required=False), + cluster_name=dict(type="str", required=False), ) module = AnsibleModule( argument_spec=argument_spec, - required_one_of=[ - ['cluster_name', 'datacenter'], - ], + required_one_of=[["cluster_name", "datacenter"]], supports_check_mode=True, ) vmware_drs_info = VmwareDrsInfoManager(module) - module.exit_json(changed=False, drs_rule_info=vmware_drs_info.gather_drs_rule_info()) + module.exit_json( + changed=False, drs_rule_info=vmware_drs_info.gather_drs_rule_info() + ) if __name__ == "__main__": diff --git a/plugins/modules/vmware_dvs_host.py b/plugins/modules/vmware_dvs_host.py index 18774bf..f25883e 100644 --- a/plugins/modules/vmware_dvs_host.py +++ b/plugins/modules/vmware_dvs_host.py @@ -8,15 +8,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_dvs_host short_description: Add or remove a host from distributed virtual switch @@ -65,9 +66,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Add Host to dVS vmware_dvs_host: hostname: '{{ vcenter_hostname }}' @@ -96,10 +97,11 @@ - vmnic1 state: present delegate_to: localhost -''' +""" try: from collections import Counter + HAS_COLLECTIONS_COUNTER = True except ImportError as e: HAS_COLLECTIONS_COUNTER = False @@ -110,8 +112,13 @@ pass from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import (PyVmomi, find_dvs_by_name, find_hostsystem_by_name, - vmware_argument_spec, wait_for_task) +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + find_dvs_by_name, + find_hostsystem_by_name, + vmware_argument_spec, + wait_for_task, +) from ansible.module_utils._text import to_native @@ -123,23 +130,25 @@ def __init__(self, module): self.host = None self.dv_switch = None self.nic = None - self.state = self.module.params['state'] - self.switch_name = self.module.params['switch_name'] - self.esxi_hostname = self.module.params['esxi_hostname'] - self.vmnics = self.module.params['vmnics'] - self.vendor_specific_config = self.module.params['vendor_specific_config'] + self.state = self.module.params["state"] + self.switch_name = self.module.params["switch_name"] + self.esxi_hostname = self.module.params["esxi_hostname"] + self.vmnics = self.module.params["vmnics"] + self.vendor_specific_config = self.module.params[ + "vendor_specific_config" + ] def process_state(self): dvs_host_states = { - 'absent': { - 'present': self.state_destroy_dvs_host, - 'absent': self.state_exit_unchanged, + "absent": { + "present": self.state_destroy_dvs_host, + "absent": self.state_exit_unchanged, + }, + "present": { + "update": self.state_update_dvs_host, + "present": self.state_exit_unchanged, + "absent": self.state_create_dvs_host, }, - 'present': { - 'update': self.state_update_dvs_host, - 'present': self.state_exit_unchanged, - 'absent': self.state_create_dvs_host, - } } try: @@ -154,7 +163,11 @@ def process_state(self): def find_dvs_uplink_pg(self): # There should only always be a single uplink port group on # a distributed virtual switch - dvs_uplink_pg = self.dv_switch.config.uplinkPortgroup[0] if len(self.dv_switch.config.uplinkPortgroup) else None + dvs_uplink_pg = ( + self.dv_switch.config.uplinkPortgroup[0] + if len(self.dv_switch.config.uplinkPortgroup) + else None + ) return dvs_uplink_pg # operation should be edit, add and remove @@ -168,7 +181,11 @@ def modify_dvs_host(self, operation): if self.vendor_specific_config: config = list() for item in self.vendor_specific_config: - config.append(vim.dvs.KeyedOpaqueBlob(key=item['key'], opaqueData=item['value'])) + config.append( + vim.dvs.KeyedOpaqueBlob( + key=item["key"], opaqueData=item["value"] + ) + ) spec.host[0].vendorSpecificConfig = config if operation in ("edit", "add"): @@ -176,18 +193,24 @@ def modify_dvs_host(self, operation): count = 0 for nic in self.vmnics: - spec.host[0].backing.pnicSpec.append(vim.dvs.HostMember.PnicSpec()) + spec.host[0].backing.pnicSpec.append( + vim.dvs.HostMember.PnicSpec() + ) spec.host[0].backing.pnicSpec[count].pnicDevice = nic - spec.host[0].backing.pnicSpec[count].uplinkPortgroupKey = self.uplink_portgroup.key + spec.host[0].backing.pnicSpec[ + count + ].uplinkPortgroupKey = self.uplink_portgroup.key count += 1 try: task = self.dv_switch.ReconfigureDvs_Task(spec) changed, result = wait_for_task(task) except vmodl.fault.NotSupported as not_supported: - self.module.fail_json(msg="Failed to configure DVS host %s as it is not" - " compatible with the VDS version." % self.esxi_hostname, - details=to_native(not_supported.msg)) + self.module.fail_json( + msg="Failed to configure DVS host %s as it is not" + " compatible with the VDS version." % self.esxi_hostname, + details=to_native(not_supported.msg), + ) return changed, result def state_destroy_dvs_host(self): @@ -235,61 +258,74 @@ def check_dvs_host_state(self): self.dv_switch = find_dvs_by_name(self.content, self.switch_name) if self.dv_switch is None: - self.module.fail_json(msg="A distributed virtual switch %s " - "does not exist" % self.switch_name) + self.module.fail_json( + msg="A distributed virtual switch %s " + "does not exist" % self.switch_name + ) self.uplink_portgroup = self.find_dvs_uplink_pg() if self.uplink_portgroup is None: - self.module.fail_json(msg="An uplink portgroup does not exist on" - " the distributed virtual switch %s" % self.switch_name) + self.module.fail_json( + msg="An uplink portgroup does not exist on" + " the distributed virtual switch %s" % self.switch_name + ) self.host = self.find_host_attached_dvs() if self.host is None: # We still need the HostSystem object to add the host # to the distributed vswitch - self.host = find_hostsystem_by_name(self.content, self.esxi_hostname) + self.host = find_hostsystem_by_name( + self.content, self.esxi_hostname + ) if self.host is None: - self.module.fail_json(msg="The esxi_hostname %s does not exist " - "in vCenter" % self.esxi_hostname) - return 'absent' + self.module.fail_json( + msg="The esxi_hostname %s does not exist " + "in vCenter" % self.esxi_hostname + ) + return "absent" else: if self.check_uplinks(): - return 'present' + return "present" else: - return 'update' + return "update" def main(): argument_spec = vmware_argument_spec() argument_spec.update( dict( - esxi_hostname=dict(required=True, type='str'), - switch_name=dict(required=True, type='str'), - vmnics=dict(required=True, type='list'), - state=dict(default='present', choices=['present', 'absent'], type='str'), + esxi_hostname=dict(required=True, type="str"), + switch_name=dict(required=True, type="str"), + vmnics=dict(required=True, type="list"), + state=dict( + default="present", choices=["present", "absent"], type="str" + ), vendor_specific_config=dict( - type='list', - elements='dict', + type="list", + elements="dict", required=False, options=dict( - key=dict(type='str', required=True), - value=dict(type='str', required=True), + key=dict(type="str", required=True), + value=dict(type="str", required=True), ), ), ) ) - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True) + module = AnsibleModule( + argument_spec=argument_spec, supports_check_mode=True + ) if not HAS_COLLECTIONS_COUNTER: - module.fail_json(msg='collections.Counter from Python-2.7 is required for this module') + module.fail_json( + msg="collections.Counter from Python-2.7 is required for this module" + ) vmware_dvs_host = VMwareDvsHost(module) vmware_dvs_host.process_state() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_dvs_portgroup.py b/plugins/modules/vmware_dvs_portgroup.py index 8c2812f..1cf86f5 100644 --- a/plugins/modules/vmware_dvs_portgroup.py +++ b/plugins/modules/vmware_dvs_portgroup.py @@ -6,15 +6,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_dvs_portgroup short_description: Create or remove a Distributed vSwitch portgroup. @@ -141,9 +142,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Create vlan portgroup vmware_dvs_portgroup: hostname: '{{ vcenter_hostname }}' @@ -212,7 +213,7 @@ vendor_config_override: yes vlan_override: yes delegate_to: localhost -''' +""" try: from pyVmomi import vim, vmodl @@ -220,8 +221,13 @@ pass from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import (PyVmomi, find_dvs_by_name, find_dvspg_by_name, - vmware_argument_spec, wait_for_task) +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + find_dvs_by_name, + find_dvspg_by_name, + vmware_argument_spec, + wait_for_task, +) class VMwareDvsPortgroup(PyVmomi): @@ -232,21 +238,30 @@ def __init__(self, module): def create_vlan_list(self): vlan_id_list = [] - for vlan_id_splitted in self.module.params['vlan_id'].split(','): - vlans = vlan_id_splitted.split('-') + for vlan_id_splitted in self.module.params["vlan_id"].split(","): + vlans = vlan_id_splitted.split("-") if len(vlans) > 2: - self.module.fail_json(msg="Invalid VLAN range %s." % vlan_id_splitted) + self.module.fail_json( + msg="Invalid VLAN range %s." % vlan_id_splitted + ) if len(vlans) == 2: vlan_id_start = vlans[0].strip() vlan_id_end = vlans[1].strip() if not vlan_id_start.isdigit(): - self.module.fail_json(msg="Invalid VLAN %s." % vlan_id_start) + self.module.fail_json( + msg="Invalid VLAN %s." % vlan_id_start + ) if not vlan_id_end.isdigit(): self.module.fail_json(msg="Invalid VLAN %s." % vlan_id_end) vlan_id_start = int(vlan_id_start) vlan_id_end = int(vlan_id_end) - if vlan_id_start not in range(0, 4095) or vlan_id_end not in range(0, 4095): - self.module.fail_json(msg="vlan_id range %s specified is incorrect. The valid vlan_id range is from 0 to 4094." % vlan_id_splitted) + if vlan_id_start not in range( + 0, 4095 + ) or vlan_id_end not in range(0, 4095): + self.module.fail_json( + msg="vlan_id range %s specified is incorrect. The valid vlan_id range is from 0 to 4094." + % vlan_id_splitted + ) vlan_id_list.append((vlan_id_start, vlan_id_end)) else: vlan_id = vlans[0].strip() @@ -263,64 +278,121 @@ def build_config(self): config = vim.dvs.DistributedVirtualPortgroup.ConfigSpec() # Basic config - config.name = self.module.params['portgroup_name'] - config.numPorts = self.module.params['num_ports'] + config.name = self.module.params["portgroup_name"] + config.numPorts = self.module.params["num_ports"] # Default port config - config.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy() - if self.module.params['vlan_trunk']: - config.defaultPortConfig.vlan = vim.dvs.VmwareDistributedVirtualSwitch.TrunkVlanSpec() - config.defaultPortConfig.vlan.vlanId = list(map(lambda x: vim.NumericRange(start=x[0], end=x[1]), self.create_vlan_list())) + config.defaultPortConfig = ( + vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy() + ) + if self.module.params["vlan_trunk"]: + config.defaultPortConfig.vlan = ( + vim.dvs.VmwareDistributedVirtualSwitch.TrunkVlanSpec() + ) + config.defaultPortConfig.vlan.vlanId = list( + map( + lambda x: vim.NumericRange(start=x[0], end=x[1]), + self.create_vlan_list(), + ) + ) else: - config.defaultPortConfig.vlan = vim.dvs.VmwareDistributedVirtualSwitch.VlanIdSpec() - config.defaultPortConfig.vlan.vlanId = int(self.module.params['vlan_id']) + config.defaultPortConfig.vlan = ( + vim.dvs.VmwareDistributedVirtualSwitch.VlanIdSpec() + ) + config.defaultPortConfig.vlan.vlanId = int( + self.module.params["vlan_id"] + ) config.defaultPortConfig.vlan.inherited = False - config.defaultPortConfig.securityPolicy = vim.dvs.VmwareDistributedVirtualSwitch.SecurityPolicy() - config.defaultPortConfig.securityPolicy.allowPromiscuous = vim.BoolPolicy(value=self.module.params['network_policy']['promiscuous']) - config.defaultPortConfig.securityPolicy.forgedTransmits = vim.BoolPolicy(value=self.module.params['network_policy']['forged_transmits']) - config.defaultPortConfig.securityPolicy.macChanges = vim.BoolPolicy(value=self.module.params['network_policy']['mac_changes']) + config.defaultPortConfig.securityPolicy = ( + vim.dvs.VmwareDistributedVirtualSwitch.SecurityPolicy() + ) + config.defaultPortConfig.securityPolicy.allowPromiscuous = vim.BoolPolicy( + value=self.module.params["network_policy"]["promiscuous"] + ) + config.defaultPortConfig.securityPolicy.forgedTransmits = vim.BoolPolicy( + value=self.module.params["network_policy"]["forged_transmits"] + ) + config.defaultPortConfig.securityPolicy.macChanges = vim.BoolPolicy( + value=self.module.params["network_policy"]["mac_changes"] + ) # Teaming Policy - teamingPolicy = vim.dvs.VmwareDistributedVirtualSwitch.UplinkPortTeamingPolicy() - teamingPolicy.policy = vim.StringPolicy(value=self.module.params['teaming_policy']['load_balance_policy']) - teamingPolicy.reversePolicy = vim.BoolPolicy(value=self.module.params['teaming_policy']['inbound_policy']) - teamingPolicy.notifySwitches = vim.BoolPolicy(value=self.module.params['teaming_policy']['notify_switches']) - teamingPolicy.rollingOrder = vim.BoolPolicy(value=self.module.params['teaming_policy']['rolling_order']) + teamingPolicy = ( + vim.dvs.VmwareDistributedVirtualSwitch.UplinkPortTeamingPolicy() + ) + teamingPolicy.policy = vim.StringPolicy( + value=self.module.params["teaming_policy"]["load_balance_policy"] + ) + teamingPolicy.reversePolicy = vim.BoolPolicy( + value=self.module.params["teaming_policy"]["inbound_policy"] + ) + teamingPolicy.notifySwitches = vim.BoolPolicy( + value=self.module.params["teaming_policy"]["notify_switches"] + ) + teamingPolicy.rollingOrder = vim.BoolPolicy( + value=self.module.params["teaming_policy"]["rolling_order"] + ) config.defaultPortConfig.uplinkTeamingPolicy = teamingPolicy # PG policy (advanced_policy) - config.policy = vim.dvs.VmwareDistributedVirtualSwitch.VMwarePortgroupPolicy() - config.policy.blockOverrideAllowed = self.module.params['port_policy']['block_override'] - config.policy.ipfixOverrideAllowed = self.module.params['port_policy']['ipfix_override'] - config.policy.livePortMovingAllowed = self.module.params['port_policy']['live_port_move'] - config.policy.networkResourcePoolOverrideAllowed = self.module.params['port_policy']['network_rp_override'] - config.policy.portConfigResetAtDisconnect = self.module.params['port_policy']['port_config_reset_at_disconnect'] - config.policy.securityPolicyOverrideAllowed = self.module.params['port_policy']['security_override'] - config.policy.shapingOverrideAllowed = self.module.params['port_policy']['shaping_override'] - config.policy.trafficFilterOverrideAllowed = self.module.params['port_policy']['traffic_filter_override'] - config.policy.uplinkTeamingOverrideAllowed = self.module.params['port_policy']['uplink_teaming_override'] - config.policy.vendorConfigOverrideAllowed = self.module.params['port_policy']['vendor_config_override'] - config.policy.vlanOverrideAllowed = self.module.params['port_policy']['vlan_override'] + config.policy = ( + vim.dvs.VmwareDistributedVirtualSwitch.VMwarePortgroupPolicy() + ) + config.policy.blockOverrideAllowed = self.module.params["port_policy"][ + "block_override" + ] + config.policy.ipfixOverrideAllowed = self.module.params["port_policy"][ + "ipfix_override" + ] + config.policy.livePortMovingAllowed = self.module.params[ + "port_policy" + ]["live_port_move"] + config.policy.networkResourcePoolOverrideAllowed = self.module.params[ + "port_policy" + ]["network_rp_override"] + config.policy.portConfigResetAtDisconnect = self.module.params[ + "port_policy" + ]["port_config_reset_at_disconnect"] + config.policy.securityPolicyOverrideAllowed = self.module.params[ + "port_policy" + ]["security_override"] + config.policy.shapingOverrideAllowed = self.module.params[ + "port_policy" + ]["shaping_override"] + config.policy.trafficFilterOverrideAllowed = self.module.params[ + "port_policy" + ]["traffic_filter_override"] + config.policy.uplinkTeamingOverrideAllowed = self.module.params[ + "port_policy" + ]["uplink_teaming_override"] + config.policy.vendorConfigOverrideAllowed = self.module.params[ + "port_policy" + ]["vendor_config_override"] + config.policy.vlanOverrideAllowed = self.module.params["port_policy"][ + "vlan_override" + ] # PG Type - config.type = self.module.params['portgroup_type'] + config.type = self.module.params["portgroup_type"] return config def process_state(self): dvspg_states = { - 'absent': { - 'present': self.state_destroy_dvspg, - 'absent': self.state_exit_unchanged, + "absent": { + "present": self.state_destroy_dvspg, + "absent": self.state_exit_unchanged, + }, + "present": { + "update": self.state_update_dvspg, + "present": self.state_exit_unchanged, + "absent": self.state_create_dvspg, }, - 'present': { - 'update': self.state_update_dvspg, - 'present': self.state_exit_unchanged, - 'absent': self.state_create_dvspg, - } } try: - dvspg_states[self.module.params['state']][self.check_dvspg_state()]() + dvspg_states[self.module.params["state"]][ + self.check_dvspg_state() + ]() except vmodl.RuntimeFault as runtime_fault: self.module.fail_json(msg=runtime_fault.msg) except vmodl.MethodFault as method_fault: @@ -370,130 +442,191 @@ def state_create_dvspg(self): self.module.exit_json(changed=changed, result=str(result)) def check_dvspg_state(self): - self.dv_switch = find_dvs_by_name(self.content, self.module.params['switch_name']) + self.dv_switch = find_dvs_by_name( + self.content, self.module.params["switch_name"] + ) if self.dv_switch is None: - self.module.fail_json(msg="A distributed virtual switch with name %s does not exist" % self.module.params['switch_name']) - self.dvs_portgroup = find_dvspg_by_name(self.dv_switch, self.module.params['portgroup_name']) + self.module.fail_json( + msg="A distributed virtual switch with name %s does not exist" + % self.module.params["switch_name"] + ) + self.dvs_portgroup = find_dvspg_by_name( + self.dv_switch, self.module.params["portgroup_name"] + ) if self.dvs_portgroup is None: - return 'absent' + return "absent" # Check config # Basic config - if self.dvs_portgroup.config.numPorts != self.module.params['num_ports']: - return 'update' + if ( + self.dvs_portgroup.config.numPorts + != self.module.params["num_ports"] + ): + return "update" # Default port config defaultPortConfig = self.dvs_portgroup.config.defaultPortConfig - if self.module.params['vlan_trunk']: - if not isinstance(defaultPortConfig.vlan, vim.dvs.VmwareDistributedVirtualSwitch.TrunkVlanSpec): - return 'update' - if map(lambda x: (x.start, x.end), defaultPortConfig.vlan.vlanId) != self.create_vlan_list(): - return 'update' + if self.module.params["vlan_trunk"]: + if not isinstance( + defaultPortConfig.vlan, + vim.dvs.VmwareDistributedVirtualSwitch.TrunkVlanSpec, + ): + return "update" + if ( + map(lambda x: (x.start, x.end), defaultPortConfig.vlan.vlanId) + != self.create_vlan_list() + ): + return "update" else: - if not isinstance(defaultPortConfig.vlan, vim.dvs.VmwareDistributedVirtualSwitch.VlanIdSpec): - return 'update' - if defaultPortConfig.vlan.vlanId != int(self.module.params['vlan_id']): - return 'update' - - if defaultPortConfig.securityPolicy.allowPromiscuous.value != self.module.params['network_policy']['promiscuous'] or \ - defaultPortConfig.securityPolicy.forgedTransmits.value != self.module.params['network_policy']['forged_transmits'] or \ - defaultPortConfig.securityPolicy.macChanges.value != self.module.params['network_policy']['mac_changes']: - return 'update' + if not isinstance( + defaultPortConfig.vlan, + vim.dvs.VmwareDistributedVirtualSwitch.VlanIdSpec, + ): + return "update" + if defaultPortConfig.vlan.vlanId != int( + self.module.params["vlan_id"] + ): + return "update" + + if ( + defaultPortConfig.securityPolicy.allowPromiscuous.value + != self.module.params["network_policy"]["promiscuous"] + or defaultPortConfig.securityPolicy.forgedTransmits.value + != self.module.params["network_policy"]["forged_transmits"] + or defaultPortConfig.securityPolicy.macChanges.value + != self.module.params["network_policy"]["mac_changes"] + ): + return "update" # Teaming Policy - teamingPolicy = self.dvs_portgroup.config.defaultPortConfig.uplinkTeamingPolicy - if teamingPolicy.policy.value != self.module.params['teaming_policy']['load_balance_policy'] or \ - teamingPolicy.reversePolicy.value != self.module.params['teaming_policy']['inbound_policy'] or \ - teamingPolicy.notifySwitches.value != self.module.params['teaming_policy']['notify_switches'] or \ - teamingPolicy.rollingOrder.value != self.module.params['teaming_policy']['rolling_order']: - return 'update' + teamingPolicy = ( + self.dvs_portgroup.config.defaultPortConfig.uplinkTeamingPolicy + ) + if ( + teamingPolicy.policy.value + != self.module.params["teaming_policy"]["load_balance_policy"] + or teamingPolicy.reversePolicy.value + != self.module.params["teaming_policy"]["inbound_policy"] + or teamingPolicy.notifySwitches.value + != self.module.params["teaming_policy"]["notify_switches"] + or teamingPolicy.rollingOrder.value + != self.module.params["teaming_policy"]["rolling_order"] + ): + return "update" # PG policy (advanced_policy) policy = self.dvs_portgroup.config.policy - if policy.blockOverrideAllowed != self.module.params['port_policy']['block_override'] or \ - policy.ipfixOverrideAllowed != self.module.params['port_policy']['ipfix_override'] or \ - policy.livePortMovingAllowed != self.module.params['port_policy']['live_port_move'] or \ - policy.networkResourcePoolOverrideAllowed != self.module.params['port_policy']['network_rp_override'] or \ - policy.portConfigResetAtDisconnect != self.module.params['port_policy']['port_config_reset_at_disconnect'] or \ - policy.securityPolicyOverrideAllowed != self.module.params['port_policy']['security_override'] or \ - policy.shapingOverrideAllowed != self.module.params['port_policy']['shaping_override'] or \ - policy.trafficFilterOverrideAllowed != self.module.params['port_policy']['traffic_filter_override'] or \ - policy.uplinkTeamingOverrideAllowed != self.module.params['port_policy']['uplink_teaming_override'] or \ - policy.vendorConfigOverrideAllowed != self.module.params['port_policy']['vendor_config_override'] or \ - policy.vlanOverrideAllowed != self.module.params['port_policy']['vlan_override']: - return 'update' + if ( + policy.blockOverrideAllowed + != self.module.params["port_policy"]["block_override"] + or policy.ipfixOverrideAllowed + != self.module.params["port_policy"]["ipfix_override"] + or policy.livePortMovingAllowed + != self.module.params["port_policy"]["live_port_move"] + or policy.networkResourcePoolOverrideAllowed + != self.module.params["port_policy"]["network_rp_override"] + or policy.portConfigResetAtDisconnect + != self.module.params["port_policy"][ + "port_config_reset_at_disconnect" + ] + or policy.securityPolicyOverrideAllowed + != self.module.params["port_policy"]["security_override"] + or policy.shapingOverrideAllowed + != self.module.params["port_policy"]["shaping_override"] + or policy.trafficFilterOverrideAllowed + != self.module.params["port_policy"]["traffic_filter_override"] + or policy.uplinkTeamingOverrideAllowed + != self.module.params["port_policy"]["uplink_teaming_override"] + or policy.vendorConfigOverrideAllowed + != self.module.params["port_policy"]["vendor_config_override"] + or policy.vlanOverrideAllowed + != self.module.params["port_policy"]["vlan_override"] + ): + return "update" # PG Type - if self.dvs_portgroup.config.type != self.module.params['portgroup_type']: - return 'update' + if ( + self.dvs_portgroup.config.type + != self.module.params["portgroup_type"] + ): + return "update" - return 'present' + return "present" def main(): argument_spec = vmware_argument_spec() argument_spec.update( dict( - portgroup_name=dict(required=True, type='str'), - switch_name=dict(required=True, type='str'), - vlan_id=dict(required=True, type='str'), - num_ports=dict(required=True, type='int'), - portgroup_type=dict(required=True, choices=['earlyBinding', 'lateBinding', 'ephemeral'], type='str'), - state=dict(required=True, choices=['present', 'absent'], type='str'), - vlan_trunk=dict(type='bool', default=False), + portgroup_name=dict(required=True, type="str"), + switch_name=dict(required=True, type="str"), + vlan_id=dict(required=True, type="str"), + num_ports=dict(required=True, type="int"), + portgroup_type=dict( + required=True, + choices=["earlyBinding", "lateBinding", "ephemeral"], + type="str", + ), + state=dict( + required=True, choices=["present", "absent"], type="str" + ), + vlan_trunk=dict(type="bool", default=False), network_policy=dict( - type='dict', + type="dict", options=dict( - promiscuous=dict(type='bool', default=False), - forged_transmits=dict(type='bool', default=False), - mac_changes=dict(type='bool', default=False) + promiscuous=dict(type="bool", default=False), + forged_transmits=dict(type="bool", default=False), + mac_changes=dict(type="bool", default=False), ), default=dict( promiscuous=False, forged_transmits=False, - mac_changes=False - ) + mac_changes=False, + ), ), teaming_policy=dict( - type='dict', + type="dict", options=dict( - inbound_policy=dict(type='bool', default=False), - notify_switches=dict(type='bool', default=True), - rolling_order=dict(type='bool', default=False), - load_balance_policy=dict(type='str', - default='loadbalance_srcid', - choices=[ - 'loadbalance_ip', - 'loadbalance_srcmac', - 'loadbalance_srcid', - 'loadbalance_loadbased', - 'failover_explicit', - ], - ) + inbound_policy=dict(type="bool", default=False), + notify_switches=dict(type="bool", default=True), + rolling_order=dict(type="bool", default=False), + load_balance_policy=dict( + type="str", + default="loadbalance_srcid", + choices=[ + "loadbalance_ip", + "loadbalance_srcmac", + "loadbalance_srcid", + "loadbalance_loadbased", + "failover_explicit", + ], + ), ), default=dict( inbound_policy=False, notify_switches=True, rolling_order=False, - load_balance_policy='loadbalance_srcid', + load_balance_policy="loadbalance_srcid", ), ), port_policy=dict( - type='dict', + type="dict", options=dict( - block_override=dict(type='bool', default=True), - ipfix_override=dict(type='bool', default=False), - live_port_move=dict(type='bool', default=False), - network_rp_override=dict(type='bool', default=False), - port_config_reset_at_disconnect=dict(type='bool', default=True), - security_override=dict(type='bool', default=False), - shaping_override=dict(type='bool', default=False), - traffic_filter_override=dict(type='bool', default=False), - uplink_teaming_override=dict(type='bool', default=False), - vendor_config_override=dict(type='bool', default=False), - vlan_override=dict(type='bool', default=False) + block_override=dict(type="bool", default=True), + ipfix_override=dict(type="bool", default=False), + live_port_move=dict(type="bool", default=False), + network_rp_override=dict(type="bool", default=False), + port_config_reset_at_disconnect=dict( + type="bool", default=True + ), + security_override=dict(type="bool", default=False), + shaping_override=dict(type="bool", default=False), + traffic_filter_override=dict(type="bool", default=False), + uplink_teaming_override=dict(type="bool", default=False), + vendor_config_override=dict(type="bool", default=False), + vlan_override=dict(type="bool", default=False), ), default=dict( block_override=True, @@ -506,18 +639,19 @@ def main(): traffic_filter_override=False, uplink_teaming_override=False, vendor_config_override=False, - vlan_override=False - ) - ) + vlan_override=False, + ), + ), ) ) - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True) + module = AnsibleModule( + argument_spec=argument_spec, supports_check_mode=True + ) vmware_dvs_portgroup = VMwareDvsPortgroup(module) vmware_dvs_portgroup.process_state() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_dvs_portgroup_find.py b/plugins/modules/vmware_dvs_portgroup_find.py index 38df495..0ad5069 100644 --- a/plugins/modules/vmware_dvs_portgroup_find.py +++ b/plugins/modules/vmware_dvs_portgroup_find.py @@ -5,16 +5,17 @@ from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_dvs_portgroup_find short_description: Find portgroup(s) in a VMware environment @@ -52,9 +53,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Get all portgroups in dvswitch vDS vmware_dvs_portgroup_find: hostname: "{{ vcenter_hostname }}" @@ -72,9 +73,9 @@ vlanid: '15' validate_certs: no delegate_to: localhost -''' +""" -RETURN = r''' +RETURN = r""" dvs_portgroups: description: basic details of portgroups found returned: on success @@ -88,7 +89,7 @@ "vlan_id": "0" } ] -''' +""" try: from pyVmomi import vim @@ -96,23 +97,31 @@ pass from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi, find_dvs_by_name +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + vmware_argument_spec, + PyVmomi, + find_dvs_by_name, +) class DVSPortgroupFindManager(PyVmomi): def __init__(self, module): super(DVSPortgroupFindManager, self).__init__(module) - self.dvs_name = self.params['dvswitch'] - self.vlan = self.params['vlanid'] + self.dvs_name = self.params["dvswitch"] + self.vlan = self.params["vlanid"] self.cmp_vlans = True if self.vlan else False - self.pgs = self.find_portgroups_by_name(self.content, self.module.params['name']) + self.pgs = self.find_portgroups_by_name( + self.content, self.module.params["name"] + ) if self.dvs_name: self.pgs = self.find_portgroups_by_dvs(self.pgs, self.dvs_name) def find_portgroups_by_name(self, content, name=None): vimtype = [vim.dvs.DistributedVirtualPortgroup] - container = content.viewManager.CreateContainerView(content.rootFolder, vimtype, True) + container = content.viewManager.CreateContainerView( + content.rootFolder, vimtype, True + ) if not name: obj = container.view else: @@ -137,8 +146,8 @@ def vlan_match(self, pgup, userup, vlanlst): return True for ln in vlanlst: - if '-' in ln: - arr = ln.split('-') + if "-" in ln: + arr = ln.split("-") if arr[0] < self.vlan and self.vlan < arr[1]: res = True elif ln == str(self.vlan): @@ -163,7 +172,9 @@ def get_dvs_portgroup(self): if item.start == item.end: vlan_id_list.append(str(item.start)) else: - vlan_id_list.append(str(item.start) + '-' + str(item.end)) + vlan_id_list.append( + str(item.start) + "-" + str(item.end) + ) elif isinstance(vlanInfo, cl2): pvlan = True vlan_id_list.append(str(vlanInfo.pvlanId)) @@ -171,20 +182,30 @@ def get_dvs_portgroup(self): vlan_id_list.append(str(vlanInfo.vlanId)) if self.cmp_vlans: - if self.vlan_match(pg.config.uplink, self.module.params['show_uplink'], vlan_id_list): - pglist.append(dict( + if self.vlan_match( + pg.config.uplink, + self.module.params["show_uplink"], + vlan_id_list, + ): + pglist.append( + dict( + name=pg.name, + trunk=trunk, + pvlan=pvlan, + vlan_id=",".join(vlan_id_list), + dvswitch=pg.config.distributedVirtualSwitch.name, + ) + ) + else: + pglist.append( + dict( name=pg.name, trunk=trunk, pvlan=pvlan, - vlan_id=','.join(vlan_id_list), - dvswitch=pg.config.distributedVirtualSwitch.name)) - else: - pglist.append(dict( - name=pg.name, - trunk=trunk, - pvlan=pvlan, - vlan_id=','.join(vlan_id_list), - dvswitch=pg.config.distributedVirtualSwitch.name)) + vlan_id=",".join(vlan_id_list), + dvswitch=pg.config.distributedVirtualSwitch.name, + ) + ) return pglist @@ -192,22 +213,21 @@ def get_dvs_portgroup(self): def main(): argument_spec = vmware_argument_spec() argument_spec.update( - dvswitch=dict(type='str', required=False), - vlanid=dict(type='int', required=False), - name=dict(type='str', required=False), - show_uplink=dict(type='bool', default=False), + dvswitch=dict(type="str", required=False), + vlanid=dict(type="int", required=False), + name=dict(type="str", required=False), + show_uplink=dict(type="bool", default=False), ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, - required_if=[ - ['show_uplink', 'True', 'vlanid'] - ] + required_if=[["show_uplink", "True", "vlanid"]], ) dvs_pg_mgr = DVSPortgroupFindManager(module) - module.exit_json(changed=False, - dvs_portgroups=dvs_pg_mgr.get_dvs_portgroup()) + module.exit_json( + changed=False, dvs_portgroups=dvs_pg_mgr.get_dvs_portgroup() + ) if __name__ == "__main__": diff --git a/plugins/modules/vmware_dvs_portgroup_info.py b/plugins/modules/vmware_dvs_portgroup_info.py index c7a5bf0..c640654 100644 --- a/plugins/modules/vmware_dvs_portgroup_info.py +++ b/plugins/modules/vmware_dvs_portgroup_info.py @@ -6,16 +6,17 @@ from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_dvs_portgroup_info short_description: Gathers info DVS portgroup configurations @@ -62,9 +63,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Get info about DVPG vmware_dvs_portgroup_info: hostname: "{{ vcenter_server }}" @@ -81,9 +82,9 @@ - "{{ dvpg_info.dvs_portgroup_info['dvs_001'] | json_query(query) }}" vars: query: "[?portgroup_name=='dvpg_001']" -''' +""" -RETURN = r''' +RETURN = r""" dvs_portgroup_info: description: metadata about DVS portgroup configuration returned: on success @@ -128,7 +129,7 @@ }, ] } -''' +""" try: from pyVmomi import vim @@ -136,28 +137,41 @@ pass from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi, get_all_objs, find_dvs_by_name +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + vmware_argument_spec, + PyVmomi, + get_all_objs, + find_dvs_by_name, +) class DVSPortgroupInfoManager(PyVmomi): def __init__(self, module): super(DVSPortgroupInfoManager, self).__init__(module) - self.dc_name = self.params['datacenter'] - self.dvs_name = self.params['dvswitch'] + self.dc_name = self.params["datacenter"] + self.dvs_name = self.params["dvswitch"] datacenter = self.find_datacenter_by_name(self.dc_name) if datacenter is None: - self.module.fail_json(msg="Failed to find the datacenter %s" % self.dc_name) + self.module.fail_json( + msg="Failed to find the datacenter %s" % self.dc_name + ) if self.dvs_name: # User specified specific dvswitch name to gather information dvsn = find_dvs_by_name(self.content, self.dvs_name) if dvsn is None: - self.module.fail_json(msg="Failed to find the dvswitch %s" % self.dvs_name) + self.module.fail_json( + msg="Failed to find the dvswitch %s" % self.dvs_name + ) self.dvsls = [dvsn] else: # default behaviour, gather information about all dvswitches - self.dvsls = get_all_objs(self.content, [vim.DistributedVirtualSwitch], folder=datacenter.networkFolder) + self.dvsls = get_all_objs( + self.content, + [vim.DistributedVirtualSwitch], + folder=datacenter.networkFolder, + ) def get_vlan_info(self, vlan_obj=None): """ @@ -171,7 +185,9 @@ def get_vlan_info(self, vlan_obj=None): if not vlan_obj: return vdret - if isinstance(vlan_obj, vim.dvs.VmwareDistributedVirtualSwitch.TrunkVlanSpec): + if isinstance( + vlan_obj, vim.dvs.VmwareDistributedVirtualSwitch.TrunkVlanSpec + ): vlan_id_list = [] for vli in vlan_obj.vlanId: if vli.start == vli.end: @@ -179,10 +195,16 @@ def get_vlan_info(self, vlan_obj=None): else: vlan_id_list.append(str(vli.start) + "-" + str(vli.end)) vdret = dict(trunk=True, pvlan=False, vlan_id=vlan_id_list) - elif isinstance(vlan_obj, vim.dvs.VmwareDistributedVirtualSwitch.PvlanSpec): - vdret = dict(trunk=False, pvlan=True, vlan_id=str(vlan_obj.pvlanId)) + elif isinstance( + vlan_obj, vim.dvs.VmwareDistributedVirtualSwitch.PvlanSpec + ): + vdret = dict( + trunk=False, pvlan=True, vlan_id=str(vlan_obj.pvlanId) + ) else: - vdret = dict(trunk=False, pvlan=False, vlan_id=str(vlan_obj.vlanId)) + vdret = dict( + trunk=False, pvlan=False, vlan_id=str(vlan_obj.vlanId) + ) return vdret @@ -197,13 +219,16 @@ def gather_dvs_portgroup_info(self): port_policy = dict() vlan_info = dict() - if self.module.params['show_network_policy'] and dvs_pg.config.defaultPortConfig.securityPolicy: + if ( + self.module.params["show_network_policy"] + and dvs_pg.config.defaultPortConfig.securityPolicy + ): network_policy = dict( forged_transmits=dvs_pg.config.defaultPortConfig.securityPolicy.forgedTransmits.value, promiscuous=dvs_pg.config.defaultPortConfig.securityPolicy.allowPromiscuous.value, - mac_changes=dvs_pg.config.defaultPortConfig.securityPolicy.macChanges.value + mac_changes=dvs_pg.config.defaultPortConfig.securityPolicy.macChanges.value, ) - if self.module.params['show_teaming_policy']: + if self.module.params["show_teaming_policy"]: # govcsim does not have uplinkTeamingPolicy, remove this check once # PR https://github.com/vmware/govmomi/pull/1524 merged. if dvs_pg.config.defaultPortConfig.uplinkTeamingPolicy: @@ -214,7 +239,7 @@ def gather_dvs_portgroup_info(self): rolling_order=dvs_pg.config.defaultPortConfig.uplinkTeamingPolicy.rollingOrder.value, ) - if self.params['show_port_policy']: + if self.params["show_port_policy"]: # govcsim does not have port policy if dvs_pg.config.policy: port_policy = dict( @@ -228,11 +253,13 @@ def gather_dvs_portgroup_info(self): traffic_filter_override=dvs_pg.config.policy.trafficFilterOverrideAllowed, uplink_teaming_override=dvs_pg.config.policy.uplinkTeamingOverrideAllowed, vendor_config_override=dvs_pg.config.policy.vendorConfigOverrideAllowed, - vlan_override=dvs_pg.config.policy.vlanOverrideAllowed + vlan_override=dvs_pg.config.policy.vlanOverrideAllowed, ) - if self.params['show_vlan_info']: - vlan_info = self.get_vlan_info(dvs_pg.config.defaultPortConfig.vlan) + if self.params["show_vlan_info"]: + vlan_info = self.get_vlan_info( + dvs_pg.config.defaultPortConfig.vlan + ) dvpg_details = dict( portgroup_name=dvs_pg.name, @@ -253,21 +280,22 @@ def gather_dvs_portgroup_info(self): def main(): argument_spec = vmware_argument_spec() argument_spec.update( - datacenter=dict(type='str', required=True), - show_network_policy=dict(type='bool', default=True), - show_teaming_policy=dict(type='bool', default=True), - show_port_policy=dict(type='bool', default=True), + datacenter=dict(type="str", required=True), + show_network_policy=dict(type="bool", default=True), + show_teaming_policy=dict(type="bool", default=True), + show_port_policy=dict(type="bool", default=True), dvswitch=dict(), - show_vlan_info=dict(type='bool', default=False), + show_vlan_info=dict(type="bool", default=False), ) module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, + argument_spec=argument_spec, supports_check_mode=True ) dvs_pg_mgr = DVSPortgroupInfoManager(module) - module.exit_json(changed=False, - dvs_portgroup_info=dvs_pg_mgr.gather_dvs_portgroup_info()) + module.exit_json( + changed=False, + dvs_portgroup_info=dvs_pg_mgr.gather_dvs_portgroup_info(), + ) if __name__ == "__main__": diff --git a/plugins/modules/vmware_dvswitch.py b/plugins/modules/vmware_dvswitch.py index 213f975..85ebfd3 100644 --- a/plugins/modules/vmware_dvswitch.py +++ b/plugins/modules/vmware_dvswitch.py @@ -9,15 +9,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_dvswitch short_description: Create or remove a Distributed Switch @@ -162,9 +163,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Create dvSwitch vmware_dvswitch: hostname: '{{ vcenter_hostname }}' @@ -211,7 +212,7 @@ switch: dvSwitch state: absent delegate_to: localhost -''' +""" RETURN = """ result: @@ -250,71 +251,92 @@ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_native from ansible_collections.vmware.general.plugins.module_utils.vmware import ( - PyVmomi, TaskError, find_dvs_by_name, vmware_argument_spec, wait_for_task + PyVmomi, + TaskError, + find_dvs_by_name, + vmware_argument_spec, + wait_for_task, ) class VMwareDvSwitch(PyVmomi): """Class to manage a Distributed Virtual Switch""" + def __init__(self, module): super(VMwareDvSwitch, self).__init__(module) self.dvs = None - self.switch_name = self.module.params['switch_name'] - self.switch_version = self.module.params['switch_version'] - if self.content.about.version == '6.7.0': - self.vcenter_switch_version = '6.6.0' + self.switch_name = self.module.params["switch_name"] + self.switch_version = self.module.params["switch_version"] + if self.content.about.version == "6.7.0": + self.vcenter_switch_version = "6.6.0" else: self.vcenter_switch_version = self.content.about.version - folder = self.params['folder'] + folder = self.params["folder"] if folder: - self.folder_obj = self.content.searchIndex.FindByInventoryPath(folder) + self.folder_obj = self.content.searchIndex.FindByInventoryPath( + folder + ) if not self.folder_obj: - self.module.fail_json(msg="Failed to find the folder specified by %(folder)s" % self.params) + self.module.fail_json( + msg="Failed to find the folder specified by %(folder)s" + % self.params + ) else: - datacenter_name = self.params.get('datacenter_name') + datacenter_name = self.params.get("datacenter_name") datacenter_obj = self.find_datacenter_by_name(datacenter_name) if not datacenter_obj: - self.module.fail_json(msg="Failed to find datacenter '%s' required" - " for managing distributed vSwitch." % datacenter_name) + self.module.fail_json( + msg="Failed to find datacenter '%s' required" + " for managing distributed vSwitch." % datacenter_name + ) self.folder_obj = datacenter_obj.networkFolder - self.mtu = self.module.params['mtu'] + self.mtu = self.module.params["mtu"] # MTU sanity check if not 1280 <= self.mtu <= 9000: self.module.fail_json( - msg="MTU value should be between 1280 and 9000 (both inclusive), provided %d." % self.mtu + msg="MTU value should be between 1280 and 9000 (both inclusive), provided %d." + % self.mtu ) - self.multicast_filtering_mode = self.module.params['multicast_filtering_mode'] - self.uplink_quantity = self.module.params['uplink_quantity'] - self.uplink_prefix = self.module.params['uplink_prefix'] - self.discovery_protocol = self.module.params['discovery_proto'] - self.discovery_operation = self.module.params['discovery_operation'] + self.multicast_filtering_mode = self.module.params[ + "multicast_filtering_mode" + ] + self.uplink_quantity = self.module.params["uplink_quantity"] + self.uplink_prefix = self.module.params["uplink_prefix"] + self.discovery_protocol = self.module.params["discovery_proto"] + self.discovery_operation = self.module.params["discovery_operation"] # TODO: add port mirroring - self.health_check_vlan = self.params['health_check'].get('vlan_mtu') - self.health_check_vlan_interval = self.params['health_check'].get('vlan_mtu_interval') - self.health_check_teaming = self.params['health_check'].get('teaming_failover') - self.health_check_teaming_interval = self.params['health_check'].get('teaming_failover_interval') - if self.params['contact']: - self.contact_name = self.params['contact'].get('name') - self.contact_details = self.params['contact'].get('details') + self.health_check_vlan = self.params["health_check"].get("vlan_mtu") + self.health_check_vlan_interval = self.params["health_check"].get( + "vlan_mtu_interval" + ) + self.health_check_teaming = self.params["health_check"].get( + "teaming_failover" + ) + self.health_check_teaming_interval = self.params["health_check"].get( + "teaming_failover_interval" + ) + if self.params["contact"]: + self.contact_name = self.params["contact"].get("name") + self.contact_details = self.params["contact"].get("details") else: self.contact_name = None self.contact_details = None - self.description = self.module.params['description'] - self.state = self.module.params['state'] + self.description = self.module.params["description"] + self.state = self.module.params["state"] def process_state(self): """Process the current state of the DVS""" dvs_states = { - 'absent': { - 'present': self.destroy_dvswitch, - 'absent': self.exit_unchanged, + "absent": { + "present": self.destroy_dvswitch, + "absent": self.exit_unchanged, + }, + "present": { + "present": self.update_dvswitch, + "absent": self.create_dvswitch, }, - 'present': { - 'present': self.update_dvswitch, - 'absent': self.create_dvswitch, - } } try: @@ -328,10 +350,12 @@ def process_state(self): def check_dvs(self): """Check if DVS is present""" - self.dvs = find_dvs_by_name(self.content, self.switch_name, folder=self.folder_obj) + self.dvs = find_dvs_by_name( + self.content, self.switch_name, folder=self.folder_obj + ) if self.dvs is None: - return 'absent' - return 'present' + return "absent" + return "present" def create_dvswitch(self): """Create a DVS""" @@ -341,32 +365,36 @@ def create_dvswitch(self): spec = vim.DistributedVirtualSwitch.CreateSpec() spec.configSpec = vim.dvs.VmwareDistributedVirtualSwitch.ConfigSpec() # Name - results['dvswitch'] = self.switch_name + results["dvswitch"] = self.switch_name spec.configSpec.name = self.switch_name # MTU - results['mtu'] = self.mtu + results["mtu"] = self.mtu spec.configSpec.maxMtu = self.mtu # Discovery Protocol type and operation - results['discovery_protocol'] = self.discovery_protocol - results['discovery_operation'] = self.discovery_operation + results["discovery_protocol"] = self.discovery_protocol + results["discovery_operation"] = self.discovery_operation spec.configSpec.linkDiscoveryProtocolConfig = self.create_ldp_spec() # Administrator contact - results['contact'] = self.contact_name - results['contact_details'] = self.contact_details + results["contact"] = self.contact_name + results["contact_details"] = self.contact_details if self.contact_name or self.contact_details: spec.contact = self.create_contact_spec() # Description - results['description'] = self.description + results["description"] = self.description if self.description: spec.description = self.description # Uplinks - results['uplink_quantity'] = self.uplink_quantity - spec.configSpec.uplinkPortPolicy = vim.DistributedVirtualSwitch.NameArrayUplinkPortPolicy() + results["uplink_quantity"] = self.uplink_quantity + spec.configSpec.uplinkPortPolicy = ( + vim.DistributedVirtualSwitch.NameArrayUplinkPortPolicy() + ) for count in range(1, self.uplink_quantity + 1): - spec.configSpec.uplinkPortPolicy.uplinkPortName.append("%s%d" % (self.uplink_prefix, count)) - results['uplinks'] = spec.configSpec.uplinkPortPolicy.uplinkPortName + spec.configSpec.uplinkPortPolicy.uplinkPortName.append( + "%s%d" % (self.uplink_prefix, count) + ) + results["uplinks"] = spec.configSpec.uplinkPortPolicy.uplinkPortName # Version - results['version'] = self.switch_version + results["version"] = self.switch_version if self.switch_version: spec.productInfo = self.create_product_spec(self.switch_version) @@ -380,7 +408,8 @@ def create_dvswitch(self): wait_for_task(task) except TaskError as invalid_argument: self.module.fail_json( - msg="Failed to create DVS : %s" % to_native(invalid_argument) + msg="Failed to create DVS : %s" + % to_native(invalid_argument) ) # Find new DVS self.dvs = find_dvs_by_name(self.content, self.switch_name) @@ -389,18 +418,27 @@ def create_dvswitch(self): # Use the same version in the new spec; The version will be increased by one by the API automatically spec.configVersion = self.dvs.config.configVersion # Set multicast filtering mode - results['multicast_filtering_mode'] = self.multicast_filtering_mode - multicast_filtering_mode = self.get_api_mc_filtering_mode(self.multicast_filtering_mode) - if self.dvs.config.multicastFilteringMode != multicast_filtering_mode: + results["multicast_filtering_mode"] = self.multicast_filtering_mode + multicast_filtering_mode = self.get_api_mc_filtering_mode( + self.multicast_filtering_mode + ) + if ( + self.dvs.config.multicastFilteringMode + != multicast_filtering_mode + ): changed_multicast = True spec.multicastFilteringMode = multicast_filtering_mode - spec.multicastFilteringMode = self.get_api_mc_filtering_mode(self.multicast_filtering_mode) + spec.multicastFilteringMode = self.get_api_mc_filtering_mode( + self.multicast_filtering_mode + ) if changed_multicast: self.update_dvs_config(self.dvs, spec) # Set Health Check config - results['health_check_vlan'] = self.health_check_vlan - results['health_check_teaming'] = self.health_check_teaming - result = self.check_health_check_config(self.dvs.config.healthCheckConfig) + results["health_check_vlan"] = self.health_check_vlan + results["health_check_teaming"] = self.health_check_teaming + result = self.check_health_check_config( + self.dvs.config.healthCheckConfig + ) changed_health_check = result[1] if changed_health_check: self.update_health_check_config(self.dvs, result[0]) @@ -410,9 +448,9 @@ def create_dvswitch(self): def create_ldp_spec(self): """Create Link Discovery Protocol config spec""" ldp_config_spec = vim.host.LinkDiscoveryProtocolConfig() - if self.discovery_protocol == 'disabled': - ldp_config_spec.protocol = 'cdp' - ldp_config_spec.operation = 'none' + if self.discovery_protocol == "disabled": + ldp_config_spec.protocol = "cdp" + ldp_config_spec.operation = "none" else: ldp_config_spec.protocol = self.discovery_protocol ldp_config_spec.operation = self.discovery_operation @@ -427,9 +465,9 @@ def create_product_spec(self, switch_version): @staticmethod def get_api_mc_filtering_mode(mode): """Get Multicast filtering mode""" - if mode == 'basic': - return 'legacyFiltering' - return 'snooping' + if mode == "basic": + return "legacyFiltering" + return "snooping" def create_contact_spec(self): """Create contact info spec""" @@ -450,75 +488,119 @@ def update_dvs_config(self, switch_object, spec): def check_health_check_config(self, health_check_config): """Check Health Check config""" - changed = changed_vlan = changed_vlan_interval = changed_teaming = changed_teaming_interval = False + changed = ( + changed_vlan + ) = ( + changed_vlan_interval + ) = changed_teaming = changed_teaming_interval = False vlan_previous = teaming_previous = None vlan_interval_previous = teaming_interval_previous = 0 for config in health_check_config: - if isinstance(config, vim.dvs.VmwareDistributedVirtualSwitch.VlanMtuHealthCheckConfig): + if isinstance( + config, + vim.dvs.VmwareDistributedVirtualSwitch.VlanMtuHealthCheckConfig, + ): if config.enable != self.health_check_vlan: changed = changed_vlan = True vlan_previous = config.enable config.enable = self.health_check_vlan - if config.enable and config.interval != self.health_check_vlan_interval: + if ( + config.enable + and config.interval != self.health_check_vlan_interval + ): changed = changed_vlan_interval = True vlan_interval_previous = config.interval config.interval = self.health_check_vlan_interval - if isinstance(config, vim.dvs.VmwareDistributedVirtualSwitch.TeamingHealthCheckConfig): + if isinstance( + config, + vim.dvs.VmwareDistributedVirtualSwitch.TeamingHealthCheckConfig, + ): if config.enable != self.health_check_teaming: changed = changed_teaming = True teaming_previous = config.enable config.enable = self.health_check_teaming - if config.enable and config.interval != self.health_check_teaming_interval: + if ( + config.enable + and config.interval != self.health_check_teaming_interval + ): changed = changed_teaming_interval = True teaming_interval_previous = config.interval config.interval = self.health_check_teaming_interval - return (health_check_config, changed, changed_vlan, vlan_previous, changed_vlan_interval, vlan_interval_previous, - changed_teaming, teaming_previous, changed_teaming_interval, teaming_interval_previous) + return ( + health_check_config, + changed, + changed_vlan, + vlan_previous, + changed_vlan_interval, + vlan_interval_previous, + changed_teaming, + teaming_previous, + changed_teaming_interval, + teaming_interval_previous, + ) def update_health_check_config(self, switch_object, health_check_config): """Update Health Check config""" try: - task = switch_object.UpdateDVSHealthCheckConfig_Task(healthCheckConfig=health_check_config) + task = switch_object.UpdateDVSHealthCheckConfig_Task( + healthCheckConfig=health_check_config + ) except vim.fault.DvsFault as dvs_fault: - self.module.fail_json(msg="Update failed due to DVS fault : %s" % to_native(dvs_fault)) + self.module.fail_json( + msg="Update failed due to DVS fault : %s" + % to_native(dvs_fault) + ) except vmodl.fault.NotSupported as not_supported: - self.module.fail_json(msg="Health check not supported on the switch : %s" % to_native(not_supported)) + self.module.fail_json( + msg="Health check not supported on the switch : %s" + % to_native(not_supported) + ) except TaskError as invalid_argument: - self.module.fail_json(msg="Failed to configure health check : %s" % to_native(invalid_argument)) + self.module.fail_json( + msg="Failed to configure health check : %s" + % to_native(invalid_argument) + ) try: wait_for_task(task) except TaskError as invalid_argument: - self.module.fail_json(msg="Failed to update health check config : %s" % to_native(invalid_argument)) + self.module.fail_json( + msg="Failed to update health check config : %s" + % to_native(invalid_argument) + ) def exit_unchanged(self): """Exit with status message""" changed = False results = dict(changed=changed) - results['dvswitch'] = self.switch_name - results['result'] = "DVS not present" + results["dvswitch"] = self.switch_name + results["result"] = "DVS not present" self.module.exit_json(**results) def destroy_dvswitch(self): """Delete a DVS""" changed = True results = dict(changed=changed) - results['dvswitch'] = self.switch_name + results["dvswitch"] = self.switch_name if self.module.check_mode: - results['result'] = "DVS would be deleted" + results["result"] = "DVS would be deleted" else: try: task = self.dvs.Destroy_Task() except vim.fault.VimFault as vim_fault: - self.module.fail_json(msg="Failed to deleted DVS : %s" % to_native(vim_fault)) + self.module.fail_json( + msg="Failed to deleted DVS : %s" % to_native(vim_fault) + ) wait_for_task(task) - results['result'] = "DVS deleted" + results["result"] = "DVS deleted" self.module.exit_json(**results) def update_dvswitch(self): """Check and update DVS settings""" - changed = changed_settings = changed_ldp = changed_version = changed_health_check = False + changed = ( + changed_settings + ) = changed_ldp = changed_version = changed_health_check = False results = dict(changed=changed) - results['dvswitch'] = self.switch_name + results["dvswitch"] = self.switch_name changed_list = [] config_spec = vim.dvs.VmwareDistributedVirtualSwitch.ConfigSpec() @@ -526,135 +608,191 @@ def update_dvswitch(self): config_spec.configVersion = self.dvs.config.configVersion # Check MTU - results['mtu'] = self.mtu + results["mtu"] = self.mtu if self.dvs.config.maxMtu != self.mtu: changed = changed_settings = True changed_list.append("mtu") - results['mtu_previous'] = config_spec.maxMtu + results["mtu_previous"] = config_spec.maxMtu config_spec.maxMtu = self.mtu # Check Discovery Protocol type and operation ldp_protocol = self.dvs.config.linkDiscoveryProtocolConfig.protocol ldp_operation = self.dvs.config.linkDiscoveryProtocolConfig.operation - if self.discovery_protocol == 'disabled': - results['discovery_protocol'] = self.discovery_protocol - results['discovery_operation'] = 'n/a' - if ldp_protocol != 'cdp' or ldp_operation != 'none': + if self.discovery_protocol == "disabled": + results["discovery_protocol"] = self.discovery_protocol + results["discovery_operation"] = "n/a" + if ldp_protocol != "cdp" or ldp_operation != "none": changed_ldp = True - results['discovery_protocol_previous'] = ldp_protocol - results['discovery_operation_previous'] = ldp_operation + results["discovery_protocol_previous"] = ldp_protocol + results["discovery_operation_previous"] = ldp_operation else: - results['discovery_protocol'] = self.discovery_protocol - results['discovery_operation'] = self.discovery_operation - if ldp_protocol != self.discovery_protocol or ldp_operation != self.discovery_operation: + results["discovery_protocol"] = self.discovery_protocol + results["discovery_operation"] = self.discovery_operation + if ( + ldp_protocol != self.discovery_protocol + or ldp_operation != self.discovery_operation + ): changed_ldp = True if ldp_protocol != self.discovery_protocol: - results['discovery_protocol_previous'] = ldp_protocol + results["discovery_protocol_previous"] = ldp_protocol if ldp_operation != self.discovery_operation: - results['discovery_operation_previous'] = ldp_operation + results["discovery_operation_previous"] = ldp_operation if changed_ldp: changed = changed_settings = True changed_list.append("discovery protocol") config_spec.linkDiscoveryProtocolConfig = self.create_ldp_spec() # Check Multicast filtering mode - results['multicast_filtering_mode'] = self.multicast_filtering_mode - multicast_filtering_mode = self.get_api_mc_filtering_mode(self.multicast_filtering_mode) + results["multicast_filtering_mode"] = self.multicast_filtering_mode + multicast_filtering_mode = self.get_api_mc_filtering_mode( + self.multicast_filtering_mode + ) if self.dvs.config.multicastFilteringMode != multicast_filtering_mode: changed = changed_settings = True changed_list.append("multicast filtering") - results['multicast_filtering_mode_previous'] = self.dvs.config.multicastFilteringMode + results[ + "multicast_filtering_mode_previous" + ] = self.dvs.config.multicastFilteringMode config_spec.multicastFilteringMode = multicast_filtering_mode # Check administrator contact - results['contact'] = self.contact_name - results['contact_details'] = self.contact_details - if self.dvs.config.contact.name != self.contact_name or self.dvs.config.contact.contact != self.contact_details: + results["contact"] = self.contact_name + results["contact_details"] = self.contact_details + if ( + self.dvs.config.contact.name != self.contact_name + or self.dvs.config.contact.contact != self.contact_details + ): changed = changed_settings = True changed_list.append("contact") - results['contact_previous'] = self.dvs.config.contact.name - results['contact_details_previous'] = self.dvs.config.contact.contact + results["contact_previous"] = self.dvs.config.contact.name + results[ + "contact_details_previous" + ] = self.dvs.config.contact.contact config_spec.contact = self.create_contact_spec() # Check description - results['description'] = self.description + results["description"] = self.description if self.dvs.config.description != self.description: changed = changed_settings = True changed_list.append("description") - results['description_previous'] = self.dvs.config.description + results["description_previous"] = self.dvs.config.description if self.description is None: # need to use empty string; will be set to None by API - config_spec.description = '' + config_spec.description = "" else: config_spec.description = self.description # Check uplinks - results['uplink_quantity'] = self.uplink_quantity - if len(self.dvs.config.uplinkPortPolicy.uplinkPortName) != self.uplink_quantity: + results["uplink_quantity"] = self.uplink_quantity + if ( + len(self.dvs.config.uplinkPortPolicy.uplinkPortName) + != self.uplink_quantity + ): changed = changed_settings = True changed_list.append("uplink quantity") - results['uplink_quantity_previous'] = len(self.dvs.config.uplinkPortPolicy.uplinkPortName) - config_spec.uplinkPortPolicy = vim.DistributedVirtualSwitch.NameArrayUplinkPortPolicy() + results["uplink_quantity_previous"] = len( + self.dvs.config.uplinkPortPolicy.uplinkPortName + ) + config_spec.uplinkPortPolicy = ( + vim.DistributedVirtualSwitch.NameArrayUplinkPortPolicy() + ) # just replace the uplink array if uplinks need to be added - if len(self.dvs.config.uplinkPortPolicy.uplinkPortName) < self.uplink_quantity: + if ( + len(self.dvs.config.uplinkPortPolicy.uplinkPortName) + < self.uplink_quantity + ): for count in range(1, self.uplink_quantity + 1): - config_spec.uplinkPortPolicy.uplinkPortName.append("%s%d" % (self.uplink_prefix, count)) + config_spec.uplinkPortPolicy.uplinkPortName.append( + "%s%d" % (self.uplink_prefix, count) + ) # just replace the uplink array if uplinks need to be removed - if len(self.dvs.config.uplinkPortPolicy.uplinkPortName) > self.uplink_quantity: + if ( + len(self.dvs.config.uplinkPortPolicy.uplinkPortName) + > self.uplink_quantity + ): for count in range(1, self.uplink_quantity + 1): - config_spec.uplinkPortPolicy.uplinkPortName.append("%s%d" % (self.uplink_prefix, count)) - results['uplinks'] = config_spec.uplinkPortPolicy.uplinkPortName - results['uplinks_previous'] = self.dvs.config.uplinkPortPolicy.uplinkPortName + config_spec.uplinkPortPolicy.uplinkPortName.append( + "%s%d" % (self.uplink_prefix, count) + ) + results["uplinks"] = config_spec.uplinkPortPolicy.uplinkPortName + results[ + "uplinks_previous" + ] = self.dvs.config.uplinkPortPolicy.uplinkPortName else: # No uplink name check; uplink names can't be changed easily if they are used by a portgroup - results['uplinks'] = self.dvs.config.uplinkPortPolicy.uplinkPortName + results[ + "uplinks" + ] = self.dvs.config.uplinkPortPolicy.uplinkPortName # Check Health Check - results['health_check_vlan'] = self.health_check_vlan - results['health_check_teaming'] = self.health_check_teaming - results['health_check_vlan_interval'] = self.health_check_vlan_interval - results['health_check_teaming_interval'] = self.health_check_teaming_interval - (health_check_config, changed_health_check, changed_vlan, vlan_previous, - changed_vlan_interval, vlan_interval_previous, changed_teaming, teaming_previous, - changed_teaming_interval, teaming_interval_previous) = \ - self.check_health_check_config(self.dvs.config.healthCheckConfig) + results["health_check_vlan"] = self.health_check_vlan + results["health_check_teaming"] = self.health_check_teaming + results["health_check_vlan_interval"] = self.health_check_vlan_interval + results[ + "health_check_teaming_interval" + ] = self.health_check_teaming_interval + ( + health_check_config, + changed_health_check, + changed_vlan, + vlan_previous, + changed_vlan_interval, + vlan_interval_previous, + changed_teaming, + teaming_previous, + changed_teaming_interval, + teaming_interval_previous, + ) = self.check_health_check_config(self.dvs.config.healthCheckConfig) if changed_health_check: changed = True changed_list.append("health check") if changed_vlan: - results['health_check_vlan_previous'] = vlan_previous + results["health_check_vlan_previous"] = vlan_previous if changed_vlan_interval: - results['health_check_vlan_interval_previous'] = vlan_interval_previous + results[ + "health_check_vlan_interval_previous" + ] = vlan_interval_previous if changed_teaming: - results['health_check_teaming_previous'] = teaming_previous + results["health_check_teaming_previous"] = teaming_previous if changed_teaming_interval: - results['health_check_teaming_interval_previous'] = teaming_interval_previous + results[ + "health_check_teaming_interval_previous" + ] = teaming_interval_previous # Check switch version if self.switch_version: - results['version'] = self.switch_version + results["version"] = self.switch_version if self.dvs.config.productInfo.version != self.switch_version: changed_version = True spec_product = self.create_product_spec(self.switch_version) else: - results['version'] = self.vcenter_switch_version - if self.dvs.config.productInfo.version != self.vcenter_switch_version: + results["version"] = self.vcenter_switch_version + if ( + self.dvs.config.productInfo.version + != self.vcenter_switch_version + ): changed_version = True - spec_product = self.create_product_spec(self.vcenter_switch_version) + spec_product = self.create_product_spec( + self.vcenter_switch_version + ) if changed_version: changed = True changed_list.append("switch version") - results['version_previous'] = self.dvs.config.productInfo.version + results["version_previous"] = self.dvs.config.productInfo.version if changed: if self.module.check_mode: - changed_suffix = ' would be changed' + changed_suffix = " would be changed" else: - changed_suffix = ' changed' + changed_suffix = " changed" if len(changed_list) > 2: - message = ', '.join(changed_list[:-1]) + ', and ' + str(changed_list[-1]) + message = ( + ", ".join(changed_list[:-1]) + + ", and " + + str(changed_list[-1]) + ) elif len(changed_list) == 2: - message = ' and '.join(changed_list) + message = " and ".join(changed_list) elif len(changed_list) == 1: message = changed_list[0] message += changed_suffix @@ -662,17 +800,24 @@ def update_dvswitch(self): if changed_settings: self.update_dvs_config(self.dvs, config_spec) if changed_health_check: - self.update_health_check_config(self.dvs, health_check_config) + self.update_health_check_config( + self.dvs, health_check_config + ) if changed_version: - task = self.dvs.PerformDvsProductSpecOperation_Task("upgrade", spec_product) + task = self.dvs.PerformDvsProductSpecOperation_Task( + "upgrade", spec_product + ) try: wait_for_task(task) except TaskError as invalid_argument: - self.module.fail_json(msg="Failed to update DVS version : %s" % to_native(invalid_argument)) + self.module.fail_json( + msg="Failed to update DVS version : %s" + % to_native(invalid_argument) + ) else: message = "DVS already configured properly" - results['changed'] = changed - results['result'] = message + results["changed"] = changed + results["result"] = message self.module.exit_json(**results) @@ -682,29 +827,38 @@ def main(): argument_spec = vmware_argument_spec() argument_spec.update( dict( - datacenter_name=dict(aliases=['datacenter']), + datacenter_name=dict(aliases=["datacenter"]), folder=dict(), - switch_name=dict(required=True, aliases=['switch', 'dvswitch']), - mtu=dict(type='int', default=1500), - multicast_filtering_mode=dict(type='str', default='basic', choices=['basic', 'snooping']), + switch_name=dict(required=True, aliases=["switch", "dvswitch"]), + mtu=dict(type="int", default=1500), + multicast_filtering_mode=dict( + type="str", default="basic", choices=["basic", "snooping"] + ), switch_version=dict( - choices=['5.0.0', '5.1.0', '5.5.0', '6.0.0', '6.5.0', '6.6.0'], - aliases=['version'], - default=None + choices=["5.0.0", "5.1.0", "5.5.0", "6.0.0", "6.5.0", "6.6.0"], + aliases=["version"], + default=None, ), - uplink_quantity=dict(type='int'), - uplink_prefix=dict(type='str', default='Uplink '), + uplink_quantity=dict(type="int"), + uplink_prefix=dict(type="str", default="Uplink "), discovery_proto=dict( - type='str', choices=['cdp', 'lldp', 'disabled'], default='cdp', aliases=['discovery_protocol'] + type="str", + choices=["cdp", "lldp", "disabled"], + default="cdp", + aliases=["discovery_protocol"], + ), + discovery_operation=dict( + type="str", + choices=["both", "advertise", "listen"], + default="listen", ), - discovery_operation=dict(type='str', choices=['both', 'advertise', 'listen'], default='listen'), health_check=dict( - type='dict', + type="dict", options=dict( - vlan_mtu=dict(type='bool', default=False), - teaming_failover=dict(type='bool', default=False), - vlan_mtu_interval=dict(type='int', default=0), - teaming_failover_interval=dict(type='int', default=0), + vlan_mtu=dict(type="bool", default=False), + teaming_failover=dict(type="bool", default=False), + vlan_mtu_interval=dict(type="int", default=0), + teaming_failover_interval=dict(type="int", default=0), ), default=dict( vlan_mtu=False, @@ -714,29 +868,21 @@ def main(): ), ), contact=dict( - type='dict', + type="dict", options=dict( - name=dict(type='str'), - description=dict(type='str'), + name=dict(type="str"), description=dict(type="str") ), ), - description=dict(type='str'), - state=dict(default='present', choices=['present', 'absent']), + description=dict(type="str"), + state=dict(default="present", choices=["present", "absent"]), ) ) module = AnsibleModule( argument_spec=argument_spec, - required_if=[ - ('state', 'present', - ['uplink_quantity']), - ], - required_one_of=[ - ['folder', 'datacenter_name'], - ], - mutually_exclusive=[ - ['folder', 'datacenter_name'], - ], + required_if=[("state", "present", ["uplink_quantity"])], + required_one_of=[["folder", "datacenter_name"]], + mutually_exclusive=[["folder", "datacenter_name"]], supports_check_mode=True, ) @@ -744,5 +890,5 @@ def main(): vmware_dvswitch.process_state() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_dvswitch_lacp.py b/plugins/modules/vmware_dvswitch_lacp.py index 1a5e1e2..62cac7c 100644 --- a/plugins/modules/vmware_dvswitch_lacp.py +++ b/plugins/modules/vmware_dvswitch_lacp.py @@ -6,15 +6,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_dvswitch_lacp short_description: Manage LACP configuration on a Distributed Switch @@ -79,9 +80,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Enable enhanced mode on a Distributed Switch vmware_dvswitch_lacp: hostname: '{{ inventory_hostname }}' @@ -116,7 +117,7 @@ loop_control: label: "{{ item.name }}" with_items: "{{ vcenter_distributed_switches }}" -''' +""" RETURN = """ result: @@ -144,67 +145,93 @@ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_native from ansible_collections.vmware.general.plugins.module_utils.vmware import ( - PyVmomi, TaskError, find_dvs_by_name, vmware_argument_spec, wait_for_task + PyVmomi, + TaskError, + find_dvs_by_name, + vmware_argument_spec, + wait_for_task, ) class VMwareDvSwitchLacp(PyVmomi): """Class to manage a LACP on a Distributed Virtual Switch""" + def __init__(self, module): super(VMwareDvSwitchLacp, self).__init__(module) - self.switch_name = self.module.params['switch'] - self.support_mode = self.module.params['support_mode'] - self.link_aggregation_groups = self.module.params['link_aggregation_groups'] - if self.support_mode == 'basic' and ( - self.link_aggregation_groups and not ( - len(self.link_aggregation_groups) == 1 and self.link_aggregation_groups[0] == '')): + self.switch_name = self.module.params["switch"] + self.support_mode = self.module.params["support_mode"] + self.link_aggregation_groups = self.module.params[ + "link_aggregation_groups" + ] + if self.support_mode == "basic" and ( + self.link_aggregation_groups + and not ( + len(self.link_aggregation_groups) == 1 + and self.link_aggregation_groups[0] == "" + ) + ): self.module.fail_json( msg="LAGs can only be configured if 'support_mode' is set to 'enhanced'!" ) self.dvs = find_dvs_by_name(self.content, self.switch_name) if self.dvs is None: - self.module.fail_json(msg="Failed to find DVS %s" % self.switch_name) + self.module.fail_json( + msg="Failed to find DVS %s" % self.switch_name + ) def ensure(self): """Manage LACP configuration""" changed = changed_support_mode = changed_lags = False results = dict(changed=changed) - results['dvswitch'] = self.switch_name + results["dvswitch"] = self.switch_name changed_list = [] spec = vim.dvs.VmwareDistributedVirtualSwitch.ConfigSpec() spec.configVersion = self.dvs.config.configVersion # Check support mode - results['support_mode'] = self.support_mode + results["support_mode"] = self.support_mode lacp_support_mode = self.get_lacp_support_mode(self.support_mode) if self.dvs.config.lacpApiVersion != lacp_support_mode: changed = changed_support_mode = True changed_list.append("support mode") - results['support_mode_previous'] = self.get_lacp_support_mode(self.dvs.config.lacpApiVersion) + results["support_mode_previous"] = self.get_lacp_support_mode( + self.dvs.config.lacpApiVersion + ) spec.lacpApiVersion = lacp_support_mode # Check LAGs - results['link_aggregation_groups'] = self.link_aggregation_groups + results["link_aggregation_groups"] = self.link_aggregation_groups if self.link_aggregation_groups and not ( - len(self.link_aggregation_groups) == 1 and self.link_aggregation_groups[0] == ''): + len(self.link_aggregation_groups) == 1 + and self.link_aggregation_groups[0] == "" + ): if self.dvs.config.lacpGroupConfig: lacp_lag_list = [] # Check if desired LAGs are configured for lag in self.link_aggregation_groups: - lag_name, lag_mode, lag_uplink_number, lag_load_balancing_mode = self.get_lacp_lag_options(lag) + lag_name, lag_mode, lag_uplink_number, lag_load_balancing_mode = self.get_lacp_lag_options( + lag + ) lag_found = False for lacp_group in self.dvs.config.lacpGroupConfig: if lacp_group.name == lag_name: lag_found = True - if (lag_mode != lacp_group.mode or - lag_uplink_number != lacp_group.uplinkNum or - lag_load_balancing_mode != lacp_group.loadbalanceAlgorithm): + if ( + lag_mode != lacp_group.mode + or lag_uplink_number != lacp_group.uplinkNum + or lag_load_balancing_mode + != lacp_group.loadbalanceAlgorithm + ): changed = changed_lags = True lacp_lag_list.append( self.create_lacp_group_spec( - 'edit', - lacp_group.key, lag_name, lag_uplink_number, lag_mode, lag_load_balancing_mode + "edit", + lacp_group.key, + lag_name, + lag_uplink_number, + lag_mode, + lag_load_balancing_mode, ) ) break @@ -212,7 +239,12 @@ def ensure(self): changed = changed_lags = True lacp_lag_list.append( self.create_lacp_group_spec( - 'add', None, lag_name, lag_uplink_number, lag_mode, lag_load_balancing_mode + "add", + None, + lag_name, + lag_uplink_number, + lag_mode, + lag_load_balancing_mode, ) ) # Check if LAGs need to be removed @@ -226,16 +258,30 @@ def ensure(self): if lag_found is False: changed = changed_lags = True lacp_lag_list.append( - self.create_lacp_group_spec('remove', lacp_group.key, lacp_group.name, None, None, None) + self.create_lacp_group_spec( + "remove", + lacp_group.key, + lacp_group.name, + None, + None, + None, + ) ) else: changed = changed_lags = True lacp_lag_list = [] for lag in self.link_aggregation_groups: - lag_name, lag_mode, lag_uplink_number, lag_load_balancing_mode = self.get_lacp_lag_options(lag) + lag_name, lag_mode, lag_uplink_number, lag_load_balancing_mode = self.get_lacp_lag_options( + lag + ) lacp_lag_list.append( self.create_lacp_group_spec( - 'add', None, lag_name, lag_uplink_number, lag_mode, lag_load_balancing_mode + "add", + None, + lag_name, + lag_uplink_number, + lag_mode, + lag_load_balancing_mode, ) ) else: @@ -244,34 +290,51 @@ def ensure(self): lacp_lag_list = [] for lacp_group in self.dvs.config.lacpGroupConfig: lacp_lag_list.append( - self.create_lacp_group_spec('remove', lacp_group.key, lacp_group.name, None, None, None) + self.create_lacp_group_spec( + "remove", + lacp_group.key, + lacp_group.name, + None, + None, + None, + ) ) if changed_lags: changed_list.append("link aggregation groups") current_lags_list = [] for lacp_group in self.dvs.config.lacpGroupConfig: temp_lag = dict() - temp_lag['name'] = lacp_group.name - temp_lag['uplink_number'] = lacp_group.uplinkNum - temp_lag['mode'] = lacp_group.mode - temp_lag['load_balancing_mode'] = lacp_group.loadbalanceAlgorithm + temp_lag["name"] = lacp_group.name + temp_lag["uplink_number"] = lacp_group.uplinkNum + temp_lag["mode"] = lacp_group.mode + temp_lag[ + "load_balancing_mode" + ] = lacp_group.loadbalanceAlgorithm current_lags_list.append(temp_lag) - results['link_aggregation_groups_previous'] = current_lags_list + results["link_aggregation_groups_previous"] = current_lags_list if changed: if self.module.check_mode: - changed_suffix = ' would be changed' + changed_suffix = " would be changed" else: - changed_suffix = ' changed' + changed_suffix = " changed" if len(changed_list) > 2: - message = ', '.join(changed_list[:-1]) + ', and ' + str(changed_list[-1]) + message = ( + ", ".join(changed_list[:-1]) + + ", and " + + str(changed_list[-1]) + ) elif len(changed_list) == 2: - message = ' and '.join(changed_list) + message = " and ".join(changed_list) elif len(changed_list) == 1: message = changed_list[0] message += changed_suffix if not self.module.check_mode: - if changed_support_mode and self.support_mode == 'basic' and changed_lags: + if ( + changed_support_mode + and self.support_mode == "basic" + and changed_lags + ): self.update_lacp_group_config(self.dvs, lacp_lag_list) # NOTE: You need to run the task again to change the support mode to 'basic' as well # No matter how long you sleep, you will always get the following error in vCenter: @@ -284,8 +347,8 @@ def ensure(self): self.update_lacp_group_config(self.dvs, lacp_lag_list) else: message = "LACP already configured properly" - results['changed'] = changed - results['result'] = message + results["changed"] = changed + results["result"] = message self.module.exit_json(**results) @@ -293,42 +356,75 @@ def ensure(self): def get_lacp_support_mode(mode): """Get LACP support mode""" return_mode = None - if mode == 'basic': - return_mode = 'singleLag' - elif mode == 'enhanced': - return_mode = 'multipleLag' - elif mode == 'singleLag': - return_mode = 'basic' - elif mode == 'multipleLag': - return_mode = 'enhanced' + if mode == "basic": + return_mode = "singleLag" + elif mode == "enhanced": + return_mode = "multipleLag" + elif mode == "singleLag": + return_mode = "basic" + elif mode == "multipleLag": + return_mode = "enhanced" return return_mode def get_lacp_lag_options(self, lag): """Get and check LACP LAG options""" - lag_name = lag.get('name', None) + lag_name = lag.get("name", None) if lag_name is None: - self.module.fail_json(msg="Please specify name in lag options as it's a required parameter") - lag_mode = lag.get('mode', None) + self.module.fail_json( + msg="Please specify name in lag options as it's a required parameter" + ) + lag_mode = lag.get("mode", None) if lag_mode is None: - self.module.fail_json(msg="Please specify mode in lag options as it's a required parameter") - lag_uplink_number = lag.get('uplink_number', None) + self.module.fail_json( + msg="Please specify mode in lag options as it's a required parameter" + ) + lag_uplink_number = lag.get("uplink_number", None) if lag_uplink_number is None: - self.module.fail_json(msg="Please specify uplink_number in lag options as it's a required parameter") + self.module.fail_json( + msg="Please specify uplink_number in lag options as it's a required parameter" + ) elif lag_uplink_number > 30: - self.module.fail_json(msg="More than 30 uplinks are not supported in a single LAG!") - lag_load_balancing_mode = lag.get('load_balancing_mode', None) - supported_lb_modes = ['srcTcpUdpPort', 'srcDestIpTcpUdpPortVlan', 'srcIpVlan', 'srcDestTcpUdpPort', - 'srcMac', 'destIp', 'destMac', 'vlan', 'srcDestIp', 'srcIpTcpUdpPortVlan', - 'srcDestIpTcpUdpPort', 'srcDestMac', 'destIpTcpUdpPort', 'srcPortId', 'srcIp', - 'srcIpTcpUdpPort', 'destIpTcpUdpPortVlan', 'destTcpUdpPort', 'destIpVlan', 'srcDestIpVlan'] + self.module.fail_json( + msg="More than 30 uplinks are not supported in a single LAG!" + ) + lag_load_balancing_mode = lag.get("load_balancing_mode", None) + supported_lb_modes = [ + "srcTcpUdpPort", + "srcDestIpTcpUdpPortVlan", + "srcIpVlan", + "srcDestTcpUdpPort", + "srcMac", + "destIp", + "destMac", + "vlan", + "srcDestIp", + "srcIpTcpUdpPortVlan", + "srcDestIpTcpUdpPort", + "srcDestMac", + "destIpTcpUdpPort", + "srcPortId", + "srcIp", + "srcIpTcpUdpPort", + "destIpTcpUdpPortVlan", + "destTcpUdpPort", + "destIpVlan", + "srcDestIpVlan", + ] if lag_load_balancing_mode is None: - self.module.fail_json(msg="Please specify load_balancing_mode in lag options as it's a required parameter") + self.module.fail_json( + msg="Please specify load_balancing_mode in lag options as it's a required parameter" + ) elif lag_load_balancing_mode not in supported_lb_modes: - self.module.fail_json(msg="The specified load balancing mode '%s' isn't supported!" % lag_load_balancing_mode) + self.module.fail_json( + msg="The specified load balancing mode '%s' isn't supported!" + % lag_load_balancing_mode + ) return lag_name, lag_mode, lag_uplink_number, lag_load_balancing_mode @staticmethod - def create_lacp_group_spec(operation, key, name, uplink_number, mode, load_balancing_mode): + def create_lacp_group_spec( + operation, key, name, uplink_number, mode, load_balancing_mode + ): """ Create LACP group spec operation: add, edit, or remove @@ -336,14 +432,18 @@ def create_lacp_group_spec(operation, key, name, uplink_number, mode, load_balan """ lacp_spec = vim.dvs.VmwareDistributedVirtualSwitch.LacpGroupSpec() lacp_spec.operation = operation - lacp_spec.lacpGroupConfig = vim.dvs.VmwareDistributedVirtualSwitch.LacpGroupConfig() + lacp_spec.lacpGroupConfig = ( + vim.dvs.VmwareDistributedVirtualSwitch.LacpGroupConfig() + ) lacp_spec.lacpGroupConfig.name = name - if operation in ('edit', 'remove'): + if operation in ("edit", "remove"): lacp_spec.lacpGroupConfig.key = key - if not operation == 'remove': + if not operation == "remove": lacp_spec.lacpGroupConfig.uplinkNum = uplink_number lacp_spec.lacpGroupConfig.mode = mode - lacp_spec.lacpGroupConfig.loadbalanceAlgorithm = load_balancing_mode + lacp_spec.lacpGroupConfig.loadbalanceAlgorithm = ( + load_balancing_mode + ) # greyed out in vSphere Client!? # lacp_spec.vlan = vim.dvs.VmwareDistributedVirtualSwitch.LagVlanConfig() # lacp_spec.vlan.vlanId = [vim.NumericRange(...)] @@ -365,18 +465,24 @@ def update_dvs_config(self, switch_object, spec): def update_lacp_group_config(self, switch_object, lacp_group_spec): """Update LACP group config""" try: - task = switch_object.UpdateDVSLacpGroupConfig_Task(lacpGroupSpec=lacp_group_spec) + task = switch_object.UpdateDVSLacpGroupConfig_Task( + lacpGroupSpec=lacp_group_spec + ) result = wait_for_task(task) except vim.fault.DvsFault as dvs_fault: - self.module.fail_json(msg="Update failed due to DVS fault : %s" % to_native(dvs_fault)) + self.module.fail_json( + msg="Update failed due to DVS fault : %s" + % to_native(dvs_fault) + ) except vmodl.fault.NotSupported as not_supported: self.module.fail_json( - msg="Multiple Link Aggregation Control Protocol groups not supported on the switch : %s" % - to_native(not_supported) + msg="Multiple Link Aggregation Control Protocol groups not supported on the switch : %s" + % to_native(not_supported) ) except TaskError as invalid_argument: self.module.fail_json( - msg="Failed to update Link Aggregation Group : %s" % to_native(invalid_argument) + msg="Failed to update Link Aggregation Group : %s" + % to_native(invalid_argument) ) return result @@ -386,20 +492,19 @@ def main(): argument_spec = vmware_argument_spec() argument_spec.update( dict( - switch=dict(required=True, aliases=['dvswitch']), - support_mode=dict(default='basic', choices=['basic', 'enhanced']), - link_aggregation_groups=dict(default=[], type='list'), + switch=dict(required=True, aliases=["dvswitch"]), + support_mode=dict(default="basic", choices=["basic", "enhanced"]), + link_aggregation_groups=dict(default=[], type="list"), ) ) module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, + argument_spec=argument_spec, supports_check_mode=True ) vmware_dvswitch_lacp = VMwareDvSwitchLacp(module) vmware_dvswitch_lacp.ensure() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_dvswitch_nioc.py b/plugins/modules/vmware_dvswitch_nioc.py index dd5f505..ec55820 100644 --- a/plugins/modules/vmware_dvswitch_nioc.py +++ b/plugins/modules/vmware_dvswitch_nioc.py @@ -6,15 +6,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_dvswitch_nioc short_description: Manage distributed switch Network IO Control @@ -70,9 +71,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -RETURN = r''' +RETURN = r""" dvswitch_nioc_status: description: - result of the changes @@ -84,9 +85,9 @@ returned: success type: list sample: [ "vmotion", "vsan" ] -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Enable NIOC vmware_dvswitch_nioc: hostname: '{{ vcenter_hostname }}' @@ -115,7 +116,7 @@ switch: dvSwitch state: absent delegate_to: localhost -''' +""" try: from pyVmomi import vim, vmodl @@ -124,37 +125,42 @@ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_native -from ansible_collections.vmware.general.plugins.module_utils.vmware import PyVmomi, find_datacenter_by_name, find_dvs_by_name, vmware_argument_spec, wait_for_task +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + find_datacenter_by_name, + find_dvs_by_name, + vmware_argument_spec, + wait_for_task, +) class VMwareDVSwitchNIOC(PyVmomi): - def __init__(self, module): super(VMwareDVSwitchNIOC, self).__init__(module) self.dvs = None self.resource_changes = list() - self.switch = module.params['switch'] - self.version = module.params.get('version') - self.state = module.params['state'] - self.resources = module.params.get('resources') + self.switch = module.params["switch"] + self.version = module.params.get("version") + self.state = module.params["state"] + self.resources = module.params.get("resources") self.result = { - 'changed': False, - 'dvswitch_nioc_status': 'Unchanged', - 'resources_changed': list(), + "changed": False, + "dvswitch_nioc_status": "Unchanged", + "resources_changed": list(), } def process_state(self): nioc_states = { - 'absent': { - 'present': self.state_disable_nioc, - 'absent': self.state_exit, + "absent": { + "present": self.state_disable_nioc, + "absent": self.state_exit, + }, + "present": { + "version": self.state_update_nioc_version, + "update": self.state_update_nioc_resources, + "present": self.state_exit, + "absent": self.state_enable_nioc, }, - 'present': { - 'version': self.state_update_nioc_version, - 'update': self.state_update_nioc_resources, - 'present': self.state_exit, - 'absent': self.state_enable_nioc, - } } nioc_states[self.state][self.check_nioc_state()]() self.state_exit() @@ -163,73 +169,98 @@ def state_exit(self): self.module.exit_json(**self.result) def state_disable_nioc(self): - self.result['changed'] = True + self.result["changed"] = True if not self.module.check_mode: self.set_nioc_enabled(False) - self.result['dvswitch_nioc_status'] = 'Disabled NIOC' + self.result["dvswitch_nioc_status"] = "Disabled NIOC" def state_enable_nioc(self): - self.result['changed'] = True + self.result["changed"] = True if not self.module.check_mode: self.set_nioc_enabled(True) self.set_nioc_version() - self.result['dvswitch_nioc_status'] = "Enabled NIOC with version %s" % self.version + self.result["dvswitch_nioc_status"] = ( + "Enabled NIOC with version %s" % self.version + ) # Check resource state and apply all required changes - if self.check_resources() == 'update': + if self.check_resources() == "update": self.set_nioc_resources(self.resource_changes) def state_update_nioc_version(self): - self.result['changed'] = True + self.result["changed"] = True if not self.module.check_mode: self.set_nioc_version() - self.result['dvswitch_nioc_status'] = "Set NIOC to version %s" % self.version + self.result["dvswitch_nioc_status"] = ( + "Set NIOC to version %s" % self.version + ) # Check resource state and apply all required changes - if self.check_resources() == 'update': + if self.check_resources() == "update": self.set_nioc_resources(self.resource_changes) def state_update_nioc_resources(self): - self.result['changed'] = True + self.result["changed"] = True if not self.module.check_mode: - self.result['dvswitch_nioc_status'] = "Resource configuration modified" + self.result[ + "dvswitch_nioc_status" + ] = "Resource configuration modified" self.set_nioc_resources(self.resource_changes) def set_nioc_enabled(self, state): try: self.dvs.EnableNetworkResourceManagement(enable=state) except vim.fault.DvsFault as dvs_fault: - self.module.fail_json(msg='DvsFault while setting NIOC enabled=%r: %s' % (state, to_native(dvs_fault.msg))) + self.module.fail_json( + msg="DvsFault while setting NIOC enabled=%r: %s" + % (state, to_native(dvs_fault.msg)) + ) except vim.fault.DvsNotAuthorized as auth_fault: - self.module.fail_json(msg='Not authorized to set NIOC enabled=%r: %s' % (state, to_native(auth_fault.msg))) + self.module.fail_json( + msg="Not authorized to set NIOC enabled=%r: %s" + % (state, to_native(auth_fault.msg)) + ) except vmodl.fault.NotSupported as support_fault: - self.module.fail_json(msg='NIOC not supported by DVS: %s' % to_native(support_fault.msg)) + self.module.fail_json( + msg="NIOC not supported by DVS: %s" + % to_native(support_fault.msg) + ) except vmodl.RuntimeFault as runtime_fault: - self.module.fail_json(msg='RuntimeFault while setting NIOC enabled=%r: %s' % (state, to_native(runtime_fault.msg))) + self.module.fail_json( + msg="RuntimeFault while setting NIOC enabled=%r: %s" + % (state, to_native(runtime_fault.msg)) + ) def set_nioc_version(self): upgrade_spec = vim.DistributedVirtualSwitch.ConfigSpec() upgrade_spec.configVersion = self.dvs.config.configVersion if not self.version: - self.version = 'version2' + self.version = "version2" upgrade_spec.networkResourceControlVersion = self.version try: task = self.dvs.ReconfigureDvs_Task(spec=upgrade_spec) wait_for_task(task) except vmodl.RuntimeFault as runtime_fault: - self.module.fail_json(msg="RuntimeFault when setting NIOC version: %s " % to_native(runtime_fault.msg)) + self.module.fail_json( + msg="RuntimeFault when setting NIOC version: %s " + % to_native(runtime_fault.msg) + ) def check_nioc_state(self): self.dvs = find_dvs_by_name(self.content, self.switch) if self.dvs is None: - self.module.fail_json(msg='DVS %s was not found.' % self.switch) + self.module.fail_json(msg="DVS %s was not found." % self.switch) else: if not self.dvs.config.networkResourceManagementEnabled: - return 'absent' - if self.version and self.dvs.config.networkResourceControlVersion != self.version: - return 'version' + return "absent" + if ( + self.version + and self.dvs.config.networkResourceControlVersion + != self.version + ): + return "version" # NIOC is enabled and the correct version, so return the state of the resources return self.check_resources() @@ -237,61 +268,78 @@ def check_nioc_state(self): def check_resources(self): self.dvs = find_dvs_by_name(self.content, self.switch) if self.dvs is None: - self.module.fail_json(msg="DVS named '%s' was not found" % self.switch) + self.module.fail_json( + msg="DVS named '%s' was not found" % self.switch + ) for resource in self.resources: - if self.check_resource_state(resource) == 'update': + if self.check_resource_state(resource) == "update": self.resource_changes.append(resource) - self.result['resources_changed'].append(resource['name']) + self.result["resources_changed"].append(resource["name"]) if len(self.resource_changes) > 0: - return 'update' - return 'present' + return "update" + return "present" def check_resource_state(self, resource): - resource_cfg = self.find_netioc_by_key(resource['name']) + resource_cfg = self.find_netioc_by_key(resource["name"]) if resource_cfg is None: - self.module.fail_json(msg="NetIOC resource named '%s' was not found" % resource['name']) + self.module.fail_json( + msg="NetIOC resource named '%s' was not found" + % resource["name"] + ) rc = { "limit": resource_cfg.allocationInfo.limit, - "shares_level": resource_cfg.allocationInfo.shares.level + "shares_level": resource_cfg.allocationInfo.shares.level, } - if resource_cfg.allocationInfo.shares.level == 'custom': + if resource_cfg.allocationInfo.shares.level == "custom": rc["shares"] = resource_cfg.allocationInfo.shares.shares if self.dvs.config.networkResourceControlVersion == "version3": rc["reservation"] = resource_cfg.allocationInfo.reservation for k, v in rc.items(): if k in resource and v != resource[k]: - return 'update' - return 'valid' + return "update" + return "valid" def set_nioc_resources(self, resources): - if self.dvs.config.networkResourceControlVersion == 'version3': + if self.dvs.config.networkResourceControlVersion == "version3": self._update_version3_resources(resources) - elif self.dvs.config.networkResourceControlVersion == 'version2': + elif self.dvs.config.networkResourceControlVersion == "version2": self._update_version2_resources(resources) def _update_version3_resources(self, resources): allocations = list() for resource in resources: - allocation = vim.DistributedVirtualSwitch.HostInfrastructureTrafficResource() - allocation.allocationInfo = vim.DistributedVirtualSwitch.HostInfrastructureTrafficResource.ResourceAllocation() - allocation.key = resource['name'] - if 'limit' in resource: - allocation.allocationInfo.limit = resource['limit'] - if 'reservation' in resource: - allocation.allocationInfo.reservation = resource['reservation'] - if 'shares_level' in resource: + allocation = ( + vim.DistributedVirtualSwitch.HostInfrastructureTrafficResource() + ) + allocation.allocationInfo = ( + vim.DistributedVirtualSwitch.HostInfrastructureTrafficResource.ResourceAllocation() + ) + allocation.key = resource["name"] + if "limit" in resource: + allocation.allocationInfo.limit = resource["limit"] + if "reservation" in resource: + allocation.allocationInfo.reservation = resource["reservation"] + if "shares_level" in resource: allocation.allocationInfo.shares = vim.SharesInfo() - allocation.allocationInfo.shares.level = resource['shares_level'] - if 'shares' in resource and resource['shares_level'] == 'custom': - allocation.allocationInfo.shares.shares = resource['shares'] - elif resource['shares_level'] == 'custom': + allocation.allocationInfo.shares.level = resource[ + "shares_level" + ] + if ( + "shares" in resource + and resource["shares_level"] == "custom" + ): + allocation.allocationInfo.shares.shares = resource[ + "shares" + ] + elif resource["shares_level"] == "custom": self.module.fail_json( - msg="Resource %s, shares_level set to custom but shares not specified" % resource['name'] + msg="Resource %s, shares_level set to custom but shares not specified" + % resource["name"] ) allocations.append(allocation) @@ -307,18 +355,27 @@ def _update_version2_resources(self, resources): allocations = list() for resource in resources: - resource_cfg = self.find_netioc_by_key(resource['name']) + resource_cfg = self.find_netioc_by_key(resource["name"]) allocation = vim.DVSNetworkResourcePoolConfigSpec() - allocation.allocationInfo = vim.DVSNetworkResourcePoolAllocationInfo() - allocation.key = resource['name'] + allocation.allocationInfo = ( + vim.DVSNetworkResourcePoolAllocationInfo() + ) + allocation.key = resource["name"] allocation.configVersion = resource_cfg.configVersion - if 'limit' in resource: - allocation.allocationInfo.limit = resource['limit'] - if 'shares_level' in resource: + if "limit" in resource: + allocation.allocationInfo.limit = resource["limit"] + if "shares_level" in resource: allocation.allocationInfo.shares = vim.SharesInfo() - allocation.allocationInfo.shares.level = resource['shares_level'] - if 'shares' in resource and resource['shares_level'] == 'custom': - allocation.allocationInfo.shares.shares = resource['shares'] + allocation.allocationInfo.shares.level = resource[ + "shares_level" + ] + if ( + "shares" in resource + and resource["shares_level"] == "custom" + ): + allocation.allocationInfo.shares.shares = resource[ + "shares" + ] allocations.append(allocation) @@ -342,48 +399,47 @@ def main(): argument_spec.update( dict( - switch=dict(required=True, type='str', aliases=['dvswitch']), - version=dict(type='str', choices=['version2', 'version3']), - state=dict(default='present', choices=['present', 'absent'], type='str'), + switch=dict(required=True, type="str", aliases=["dvswitch"]), + version=dict(type="str", choices=["version2", "version3"]), + state=dict( + default="present", choices=["present", "absent"], type="str" + ), resources=dict( - type='list', + type="list", default=list(), - elements='dict', + elements="dict", options=dict( name=dict( - type='str', + type="str", required=True, choices=[ - 'faultTolerance', - 'hbr', - 'iSCSI', - 'management', - 'nfs', - 'vdp', - 'virtualMachine', - 'vmotion', - 'vsan' - ] + "faultTolerance", + "hbr", + "iSCSI", + "management", + "nfs", + "vdp", + "virtualMachine", + "vmotion", + "vsan", + ], ), - limit=dict(type='int', default=-1), + limit=dict(type="int", default=-1), shares_level=dict( - type='str', + type="str", required=False, - choices=[ - 'low', - 'normal', - 'high', - 'custom' - ] + choices=["low", "normal", "high", "custom"], ), - shares=dict(type='int', required=False), - reservation=dict(type='int', default=0) - ) + shares=dict(type="int", required=False), + reservation=dict(type="int", default=0), + ), ), ) ) - module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + module = AnsibleModule( + argument_spec=argument_spec, supports_check_mode=True + ) try: vmware_dvswitch_nioc = VMwareDVSwitchNIOC(module) @@ -396,5 +452,5 @@ def main(): module.fail_json(msg=to_native(e)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_dvswitch_pvlans.py b/plugins/modules/vmware_dvswitch_pvlans.py index 993a12d..ded57ab 100644 --- a/plugins/modules/vmware_dvswitch_pvlans.py +++ b/plugins/modules/vmware_dvswitch_pvlans.py @@ -6,15 +6,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_dvswitch_pvlans short_description: Manage Private VLAN configuration of a Distributed Switch @@ -57,9 +58,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Create PVLANs on a Distributed Switch vmware_dvswitch_pvlans: hostname: '{{ inventory_hostname }}' @@ -100,7 +101,7 @@ primary_pvlans: [] secondary_pvlans: [] delegate_to: localhost -''' +""" RETURN = """ result: @@ -140,7 +141,11 @@ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_native from ansible_collections.vmware.general.plugins.module_utils.vmware import ( - PyVmomi, TaskError, find_dvs_by_name, vmware_argument_spec, wait_for_task + PyVmomi, + TaskError, + find_dvs_by_name, + vmware_argument_spec, + wait_for_task, ) @@ -149,11 +154,11 @@ class VMwareDvSwitchPvlans(PyVmomi): def __init__(self, module): super(VMwareDvSwitchPvlans, self).__init__(module) - self.switch_name = self.module.params['switch'] - if self.module.params['primary_pvlans']: - self.primary_pvlans = self.module.params['primary_pvlans'] - if self.module.params['secondary_pvlans']: - self.secondary_pvlans = self.module.params['secondary_pvlans'] + self.switch_name = self.module.params["switch"] + if self.module.params["primary_pvlans"]: + self.primary_pvlans = self.module.params["primary_pvlans"] + if self.module.params["secondary_pvlans"]: + self.secondary_pvlans = self.module.params["secondary_pvlans"] else: self.secondary_pvlans = None self.do_pvlan_sanity_checks() @@ -162,7 +167,9 @@ def __init__(self, module): self.secondary_pvlans = None self.dvs = find_dvs_by_name(self.content, self.switch_name) if self.dvs is None: - self.module.fail_json(msg="Failed to find DVS %s" % self.switch_name) + self.module.fail_json( + msg="Failed to find DVS %s" % self.switch_name + ) def do_pvlan_sanity_checks(self): """Do sanity checks for primary and secondary PVLANs""" @@ -171,12 +178,15 @@ def do_pvlan_sanity_checks(self): count = 0 primary_pvlan_id = self.get_primary_pvlan_option(primary_vlan) for primary_vlan_2 in self.primary_pvlans: - primary_pvlan_id_2 = self.get_primary_pvlan_option(primary_vlan_2) + primary_pvlan_id_2 = self.get_primary_pvlan_option( + primary_vlan_2 + ) if primary_pvlan_id == primary_pvlan_id_2: count += 1 if count > 1: self.module.fail_json( - msg="The primary PVLAN ID '%s' must be unique!" % primary_pvlan_id + msg="The primary PVLAN ID '%s' must be unique!" + % primary_pvlan_id ) if self.secondary_pvlans: # Check if secondary PVLANs are unique @@ -184,12 +194,15 @@ def do_pvlan_sanity_checks(self): count = 0 result = self.get_secondary_pvlan_options(secondary_pvlan) for secondary_pvlan_2 in self.secondary_pvlans: - result_2 = self.get_secondary_pvlan_options(secondary_pvlan_2) + result_2 = self.get_secondary_pvlan_options( + secondary_pvlan_2 + ) if result[0] == result_2[0]: count += 1 if count > 1: self.module.fail_json( - msg="The secondary PVLAN ID '%s' must be unique!" % result[0] + msg="The secondary PVLAN ID '%s' must be unique!" + % result[0] ) # Check if secondary PVLANs are already used as primary PVLANs for primary_vlan in self.primary_pvlans: @@ -198,29 +211,31 @@ def do_pvlan_sanity_checks(self): result = self.get_secondary_pvlan_options(secondary_pvlan) if primary_pvlan_id == result[0]: self.module.fail_json( - msg="The secondary PVLAN ID '%s' is already used as a primary PVLAN!" % - result[0] + msg="The secondary PVLAN ID '%s' is already used as a primary PVLAN!" + % result[0] ) # Check if a primary PVLAN is present for every secondary PVLANs for secondary_pvlan in self.secondary_pvlans: primary_pvlan_found = False result = self.get_secondary_pvlan_options(secondary_pvlan) for primary_vlan in self.primary_pvlans: - primary_pvlan_id = self.get_primary_pvlan_option(primary_vlan) + primary_pvlan_id = self.get_primary_pvlan_option( + primary_vlan + ) if result[1] == primary_pvlan_id: primary_pvlan_found = True break if not primary_pvlan_found: self.module.fail_json( - msg="The primary PVLAN ID '%s' isn't defined for the secondary PVLAN ID '%s'!" % - (result[1], result[0]) + msg="The primary PVLAN ID '%s' isn't defined for the secondary PVLAN ID '%s'!" + % (result[1], result[0]) ) def ensure(self): """Manage Private VLANs""" changed = False results = dict(changed=changed) - results['dvswitch'] = self.switch_name + results["dvswitch"] = self.switch_name changed_list_add = [] changed_list_remove = [] @@ -229,189 +244,259 @@ def ensure(self): config_spec.configVersion = self.dvs.config.configVersion # Check Private VLANs - results['private_vlans'] = None + results["private_vlans"] = None if self.primary_pvlans: desired_pvlan_list = [] for primary_vlan in self.primary_pvlans: primary_pvlan_id = self.get_primary_pvlan_option(primary_vlan) temp_pvlan = dict() - temp_pvlan['primary_pvlan_id'] = primary_pvlan_id - temp_pvlan['secondary_pvlan_id'] = primary_pvlan_id - temp_pvlan['pvlan_type'] = 'promiscuous' + temp_pvlan["primary_pvlan_id"] = primary_pvlan_id + temp_pvlan["secondary_pvlan_id"] = primary_pvlan_id + temp_pvlan["pvlan_type"] = "promiscuous" desired_pvlan_list.append(temp_pvlan) if self.secondary_pvlans: for secondary_pvlan in self.secondary_pvlans: - (secondary_pvlan_id, - secondary_vlan_primary_vlan_id, - pvlan_type) = self.get_secondary_pvlan_options(secondary_pvlan) + ( + secondary_pvlan_id, + secondary_vlan_primary_vlan_id, + pvlan_type, + ) = self.get_secondary_pvlan_options(secondary_pvlan) temp_pvlan = dict() - temp_pvlan['primary_pvlan_id'] = secondary_vlan_primary_vlan_id - temp_pvlan['secondary_pvlan_id'] = secondary_pvlan_id - temp_pvlan['pvlan_type'] = pvlan_type + temp_pvlan[ + "primary_pvlan_id" + ] = secondary_vlan_primary_vlan_id + temp_pvlan["secondary_pvlan_id"] = secondary_pvlan_id + temp_pvlan["pvlan_type"] = pvlan_type desired_pvlan_list.append(temp_pvlan) - results['private_vlans'] = desired_pvlan_list + results["private_vlans"] = desired_pvlan_list if self.dvs.config.pvlanConfig: pvlan_spec_list = [] # Check if desired PVLANs are configured for primary_vlan in self.primary_pvlans: - primary_pvlan_id = self.get_primary_pvlan_option(primary_vlan) + primary_pvlan_id = self.get_primary_pvlan_option( + primary_vlan + ) promiscuous_found = other_found = False for pvlan_object in self.dvs.config.pvlanConfig: - if pvlan_object.primaryVlanId == primary_pvlan_id and pvlan_object.pvlanType == 'promiscuous': + if ( + pvlan_object.primaryVlanId == primary_pvlan_id + and pvlan_object.pvlanType == "promiscuous" + ): promiscuous_found = True break if not promiscuous_found: changed = True - changed_list_add.append('promiscuous (%s, %s)' % (primary_pvlan_id, primary_pvlan_id)) + changed_list_add.append( + "promiscuous (%s, %s)" + % (primary_pvlan_id, primary_pvlan_id) + ) pvlan_spec_list.append( self.create_pvlan_config_spec( - operation='add', + operation="add", primary_pvlan_id=primary_pvlan_id, secondary_pvlan_id=primary_pvlan_id, - pvlan_type='promiscuous' + pvlan_type="promiscuous", ) ) if self.secondary_pvlans: for secondary_pvlan in self.secondary_pvlans: - (secondary_pvlan_id, - secondary_vlan_primary_vlan_id, - pvlan_type) = self.get_secondary_pvlan_options(secondary_pvlan) - if primary_pvlan_id == secondary_vlan_primary_vlan_id: - for pvlan_object_2 in self.dvs.config.pvlanConfig: - if (pvlan_object_2.primaryVlanId == secondary_vlan_primary_vlan_id - and pvlan_object_2.secondaryVlanId == secondary_pvlan_id - and pvlan_object_2.pvlanType == pvlan_type): + ( + secondary_pvlan_id, + secondary_vlan_primary_vlan_id, + pvlan_type, + ) = self.get_secondary_pvlan_options( + secondary_pvlan + ) + if ( + primary_pvlan_id + == secondary_vlan_primary_vlan_id + ): + for ( + pvlan_object_2 + ) in self.dvs.config.pvlanConfig: + if ( + pvlan_object_2.primaryVlanId + == secondary_vlan_primary_vlan_id + and pvlan_object_2.secondaryVlanId + == secondary_pvlan_id + and pvlan_object_2.pvlanType + == pvlan_type + ): other_found = True break if not other_found: changed = True changed_list_add.append( - '%s (%s, %s)' % (pvlan_type, primary_pvlan_id, secondary_pvlan_id) + "%s (%s, %s)" + % ( + pvlan_type, + primary_pvlan_id, + secondary_pvlan_id, + ) ) pvlan_spec_list.append( self.create_pvlan_config_spec( - operation='add', + operation="add", primary_pvlan_id=primary_pvlan_id, secondary_pvlan_id=secondary_pvlan_id, - pvlan_type=pvlan_type + pvlan_type=pvlan_type, ) ) # Check if a PVLAN needs to be removed for pvlan_object in self.dvs.config.pvlanConfig: promiscuous_found = other_found = False - if (pvlan_object.primaryVlanId == pvlan_object.secondaryVlanId - and pvlan_object.pvlanType == 'promiscuous'): + if ( + pvlan_object.primaryVlanId + == pvlan_object.secondaryVlanId + and pvlan_object.pvlanType == "promiscuous" + ): for primary_vlan in self.primary_pvlans: - primary_pvlan_id = self.get_primary_pvlan_option(primary_vlan) - if pvlan_object.primaryVlanId == primary_pvlan_id and pvlan_object.pvlanType == 'promiscuous': + primary_pvlan_id = self.get_primary_pvlan_option( + primary_vlan + ) + if ( + pvlan_object.primaryVlanId == primary_pvlan_id + and pvlan_object.pvlanType == "promiscuous" + ): promiscuous_found = True break if not promiscuous_found: changed = True changed_list_remove.append( - 'promiscuous (%s, %s)' % (pvlan_object.primaryVlanId, pvlan_object.secondaryVlanId) + "promiscuous (%s, %s)" + % ( + pvlan_object.primaryVlanId, + pvlan_object.secondaryVlanId, + ) ) pvlan_spec_list.append( self.create_pvlan_config_spec( - operation='remove', + operation="remove", primary_pvlan_id=pvlan_object.primaryVlanId, secondary_pvlan_id=pvlan_object.secondaryVlanId, - pvlan_type='promiscuous' + pvlan_type="promiscuous", ) ) elif self.secondary_pvlans: for secondary_pvlan in self.secondary_pvlans: - (secondary_pvlan_id, - secondary_vlan_primary_vlan_id, - pvlan_type) = self.get_secondary_pvlan_options(secondary_pvlan) - if (pvlan_object.primaryVlanId == secondary_vlan_primary_vlan_id - and pvlan_object.secondaryVlanId == secondary_pvlan_id - and pvlan_object.pvlanType == pvlan_type): + ( + secondary_pvlan_id, + secondary_vlan_primary_vlan_id, + pvlan_type, + ) = self.get_secondary_pvlan_options( + secondary_pvlan + ) + if ( + pvlan_object.primaryVlanId + == secondary_vlan_primary_vlan_id + and pvlan_object.secondaryVlanId + == secondary_pvlan_id + and pvlan_object.pvlanType == pvlan_type + ): other_found = True break if not other_found: changed = True changed_list_remove.append( - '%s (%s, %s)' % ( - pvlan_object.pvlanType, pvlan_object.primaryVlanId, pvlan_object.secondaryVlanId + "%s (%s, %s)" + % ( + pvlan_object.pvlanType, + pvlan_object.primaryVlanId, + pvlan_object.secondaryVlanId, ) ) pvlan_spec_list.append( self.create_pvlan_config_spec( - operation='remove', + operation="remove", primary_pvlan_id=pvlan_object.primaryVlanId, secondary_pvlan_id=pvlan_object.secondaryVlanId, - pvlan_type=pvlan_object.pvlanType + pvlan_type=pvlan_object.pvlanType, ) ) else: changed = True changed_list_remove.append( - '%s (%s, %s)' % ( - pvlan_object.pvlanType, pvlan_object.primaryVlanId, pvlan_object.secondaryVlanId + "%s (%s, %s)" + % ( + pvlan_object.pvlanType, + pvlan_object.primaryVlanId, + pvlan_object.secondaryVlanId, ) ) pvlan_spec_list.append( self.create_pvlan_config_spec( - operation='remove', + operation="remove", primary_pvlan_id=pvlan_object.primaryVlanId, secondary_pvlan_id=pvlan_object.secondaryVlanId, - pvlan_type=pvlan_object.pvlanType + pvlan_type=pvlan_object.pvlanType, ) ) else: changed = True - changed_list_add.append('All private VLANs') + changed_list_add.append("All private VLANs") pvlan_spec_list = [] for primary_vlan in self.primary_pvlans: # the first secondary VLAN's type is always promiscuous - primary_pvlan_id = self.get_primary_pvlan_option(primary_vlan) + primary_pvlan_id = self.get_primary_pvlan_option( + primary_vlan + ) pvlan_spec_list.append( self.create_pvlan_config_spec( - operation='add', + operation="add", primary_pvlan_id=primary_pvlan_id, secondary_pvlan_id=primary_pvlan_id, - pvlan_type='promiscuous' + pvlan_type="promiscuous", ) ) if self.secondary_pvlans: for secondary_pvlan in self.secondary_pvlans: - (secondary_pvlan_id, - secondary_vlan_primary_vlan_id, - pvlan_type) = self.get_secondary_pvlan_options(secondary_pvlan) - if primary_pvlan_id == secondary_vlan_primary_vlan_id: + ( + secondary_pvlan_id, + secondary_vlan_primary_vlan_id, + pvlan_type, + ) = self.get_secondary_pvlan_options( + secondary_pvlan + ) + if ( + primary_pvlan_id + == secondary_vlan_primary_vlan_id + ): pvlan_spec_list.append( self.create_pvlan_config_spec( - operation='add', + operation="add", primary_pvlan_id=primary_pvlan_id, secondary_pvlan_id=secondary_pvlan_id, - pvlan_type=pvlan_type + pvlan_type=pvlan_type, ) ) else: # Remove PVLAN configuration if present if self.dvs.config.pvlanConfig: changed = True - changed_list_remove.append('All private VLANs') + changed_list_remove.append("All private VLANs") pvlan_spec_list = [] for pvlan_object in self.dvs.config.pvlanConfig: pvlan_spec_list.append( self.create_pvlan_config_spec( - operation='remove', + operation="remove", primary_pvlan_id=pvlan_object.primaryVlanId, secondary_pvlan_id=pvlan_object.secondaryVlanId, - pvlan_type=pvlan_object.pvlanType + pvlan_type=pvlan_object.pvlanType, ) ) if changed: message_add = message_remove = None if changed_list_add: - message_add = self.build_change_message('add', changed_list_add) + message_add = self.build_change_message( + "add", changed_list_add + ) if changed_list_remove: - message_remove = self.build_change_message('remove', changed_list_remove) + message_remove = self.build_change_message( + "remove", changed_list_remove + ) if message_add and message_remove: - message = message_add + '. ' + message_remove + '.' + message = message_add + ". " + message_remove + "." elif message_add: message = message_add elif message_remove: @@ -419,11 +504,11 @@ def ensure(self): current_pvlan_list = [] for pvlan_object in self.dvs.config.pvlanConfig: temp_pvlan = dict() - temp_pvlan['primary_pvlan_id'] = pvlan_object.primaryVlanId - temp_pvlan['secondary_pvlan_id'] = pvlan_object.secondaryVlanId - temp_pvlan['pvlan_type'] = pvlan_object.pvlanType + temp_pvlan["primary_pvlan_id"] = pvlan_object.primaryVlanId + temp_pvlan["secondary_pvlan_id"] = pvlan_object.secondaryVlanId + temp_pvlan["pvlan_type"] = pvlan_object.pvlanType current_pvlan_list.append(temp_pvlan) - results['private_vlans_previous'] = current_pvlan_list + results["private_vlans_previous"] = current_pvlan_list config_spec.pvlanConfigSpec = pvlan_spec_list if not self.module.check_mode: try: @@ -431,34 +516,37 @@ def ensure(self): wait_for_task(task) except TaskError as invalid_argument: self.module.fail_json( - msg="Failed to update DVS : %s" % to_native(invalid_argument) + msg="Failed to update DVS : %s" + % to_native(invalid_argument) ) else: message = "PVLANs already configured properly" - results['changed'] = changed - results['result'] = message + results["changed"] = changed + results["result"] = message self.module.exit_json(**results) def get_primary_pvlan_option(self, primary_vlan): """Get Primary PVLAN option""" - primary_pvlan_id = primary_vlan.get('primary_pvlan_id', None) + primary_pvlan_id = primary_vlan.get("primary_pvlan_id", None) if primary_pvlan_id is None: self.module.fail_json( msg="Please specify primary_pvlan_id in primary_pvlans options as it's a required parameter" ) if primary_pvlan_id in (0, 4095): - self.module.fail_json(msg="The VLAN IDs of 0 and 4095 are reserved and cannot be used as a primary PVLAN.") + self.module.fail_json( + msg="The VLAN IDs of 0 and 4095 are reserved and cannot be used as a primary PVLAN." + ) return primary_pvlan_id def get_secondary_pvlan_options(self, secondary_pvlan): """Get Secondary PVLAN option""" - secondary_pvlan_id = secondary_pvlan.get('secondary_pvlan_id', None) + secondary_pvlan_id = secondary_pvlan.get("secondary_pvlan_id", None) if secondary_pvlan_id is None: self.module.fail_json( msg="Please specify secondary_pvlan_id in secondary_pvlans options as it's a required parameter" ) - primary_pvlan_id = secondary_pvlan.get('primary_pvlan_id', None) + primary_pvlan_id = secondary_pvlan.get("primary_pvlan_id", None) if primary_pvlan_id is None: self.module.fail_json( msg="Please specify primary_pvlan_id in secondary_pvlans options as it's a required parameter" @@ -467,16 +555,23 @@ def get_secondary_pvlan_options(self, secondary_pvlan): self.module.fail_json( msg="The VLAN IDs of 0 and 4095 are reserved and cannot be used as a primary or secondary PVLAN." ) - pvlan_type = secondary_pvlan.get('pvlan_type', None) - supported_pvlan_types = ['isolated', 'community'] + pvlan_type = secondary_pvlan.get("pvlan_type", None) + supported_pvlan_types = ["isolated", "community"] if pvlan_type is None: - self.module.fail_json(msg="Please specify pvlan_type in secondary_pvlans options as it's a required parameter") + self.module.fail_json( + msg="Please specify pvlan_type in secondary_pvlans options as it's a required parameter" + ) elif pvlan_type not in supported_pvlan_types: - self.module.fail_json(msg="The specified PVLAN type '%s' isn't supported!" % pvlan_type) + self.module.fail_json( + msg="The specified PVLAN type '%s' isn't supported!" + % pvlan_type + ) return secondary_pvlan_id, primary_pvlan_id, pvlan_type @staticmethod - def create_pvlan_config_spec(operation, primary_pvlan_id, secondary_pvlan_id, pvlan_type): + def create_pvlan_config_spec( + operation, primary_pvlan_id, secondary_pvlan_id, pvlan_type + ): """ Create PVLAN config spec operation: add, edit, or remove @@ -484,7 +579,9 @@ def create_pvlan_config_spec(operation, primary_pvlan_id, secondary_pvlan_id, pv """ pvlan_spec = vim.dvs.VmwareDistributedVirtualSwitch.PvlanConfigSpec() pvlan_spec.operation = operation - pvlan_spec.pvlanEntry = vim.dvs.VmwareDistributedVirtualSwitch.PvlanMapEntry() + pvlan_spec.pvlanEntry = ( + vim.dvs.VmwareDistributedVirtualSwitch.PvlanMapEntry() + ) pvlan_spec.pvlanEntry.primaryVlanId = primary_pvlan_id pvlan_spec.pvlanEntry.secondaryVlanId = secondary_pvlan_id pvlan_spec.pvlanEntry.pvlanType = pvlan_type @@ -492,18 +589,20 @@ def create_pvlan_config_spec(operation, primary_pvlan_id, secondary_pvlan_id, pv def build_change_message(self, operation, changed_list): """Build the changed message""" - if operation == 'add': - changed_operation = 'added' - elif operation == 'remove': - changed_operation = 'removed' + if operation == "add": + changed_operation = "added" + elif operation == "remove": + changed_operation = "removed" if self.module.check_mode: - changed_suffix = ' would be %s' % changed_operation + changed_suffix = " would be %s" % changed_operation else: - changed_suffix = ' %s' % changed_operation + changed_suffix = " %s" % changed_operation if len(changed_list) > 2: - message = ', '.join(changed_list[:-1]) + ', and ' + str(changed_list[-1]) + message = ( + ", ".join(changed_list[:-1]) + ", and " + str(changed_list[-1]) + ) elif len(changed_list) == 2: - message = ' and '.join(changed_list) + message = " and ".join(changed_list) elif len(changed_list) == 1: message = changed_list[0] message += changed_suffix @@ -515,20 +614,19 @@ def main(): argument_spec = vmware_argument_spec() argument_spec.update( dict( - switch=dict(required=True, aliases=['dvswitch']), - primary_pvlans=dict(type='list', default=list(), required=False), - secondary_pvlans=dict(type='list', default=list(), required=False), + switch=dict(required=True, aliases=["dvswitch"]), + primary_pvlans=dict(type="list", default=list(), required=False), + secondary_pvlans=dict(type="list", default=list(), required=False), ) ) module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, + argument_spec=argument_spec, supports_check_mode=True ) vmware_dvswitch_pvlans = VMwareDvSwitchPvlans(module) vmware_dvswitch_pvlans.ensure() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_dvswitch_uplink_pg.py b/plugins/modules/vmware_dvswitch_uplink_pg.py index a1680cd..1fc7313 100644 --- a/plugins/modules/vmware_dvswitch_uplink_pg.py +++ b/plugins/modules/vmware_dvswitch_uplink_pg.py @@ -6,15 +6,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_dvswitch_uplink_pg short_description: Manage uplink portproup configuration of a Distributed Switch @@ -96,9 +97,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Configure Uplink portgroup vmware_dvswitch_uplink_pg: hostname: '{{ inventory_hostname }}' @@ -129,7 +130,7 @@ status: enabled mode: active delegate_to: localhost -''' +""" RETURN = """ result: @@ -167,7 +168,11 @@ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_native from ansible_collections.vmware.general.plugins.module_utils.vmware import ( - PyVmomi, TaskError, find_dvs_by_name, vmware_argument_spec, wait_for_task + PyVmomi, + TaskError, + find_dvs_by_name, + vmware_argument_spec, + wait_for_task, ) @@ -176,67 +181,89 @@ class VMwareDvSwitchUplinkPortgroup(PyVmomi): def __init__(self, module): super(VMwareDvSwitchUplinkPortgroup, self).__init__(module) - self.switch_name = self.module.params['switch'] - self.uplink_pg_name = self.params['name'] - self.uplink_pg_description = self.params['description'] - self.uplink_pg_reset = self.params['advanced'].get('port_config_reset_at_disconnect') - self.uplink_pg_block_ports = self.params['advanced'].get('block_override') - self.uplink_pg_vendor_conf = self.params['advanced'].get('vendor_config_override') - self.uplink_pg_vlan = self.params['advanced'].get('vlan_override') - self.uplink_pg_netflow = self.params['advanced'].get('netflow_override') - self.uplink_pg_tf = self.params['advanced'].get('traffic_filter_override') - self.uplink_pg_vlan_trunk_range = self.params['vlan_trunk_range'] - self.uplink_pg_netflow_enabled = self.params['netflow_enabled'] - self.uplink_pg_block_all_ports = self.params['block_all_ports'] - self.lacp_status = self.params['lacp'].get('status') - self.lacp_mode = self.params['lacp'].get('mode') + self.switch_name = self.module.params["switch"] + self.uplink_pg_name = self.params["name"] + self.uplink_pg_description = self.params["description"] + self.uplink_pg_reset = self.params["advanced"].get( + "port_config_reset_at_disconnect" + ) + self.uplink_pg_block_ports = self.params["advanced"].get( + "block_override" + ) + self.uplink_pg_vendor_conf = self.params["advanced"].get( + "vendor_config_override" + ) + self.uplink_pg_vlan = self.params["advanced"].get("vlan_override") + self.uplink_pg_netflow = self.params["advanced"].get( + "netflow_override" + ) + self.uplink_pg_tf = self.params["advanced"].get( + "traffic_filter_override" + ) + self.uplink_pg_vlan_trunk_range = self.params["vlan_trunk_range"] + self.uplink_pg_netflow_enabled = self.params["netflow_enabled"] + self.uplink_pg_block_all_ports = self.params["block_all_ports"] + self.lacp_status = self.params["lacp"].get("status") + self.lacp_mode = self.params["lacp"].get("mode") self.dvs = find_dvs_by_name(self.content, self.switch_name) if self.dvs is None: - self.module.fail_json(msg="Failed to find DVS %s" % self.switch_name) + self.module.fail_json( + msg="Failed to find DVS %s" % self.switch_name + ) self.support_mode = self.dvs.config.lacpApiVersion def ensure(self): """Manage uplink portgroup""" - changed = changed_uplink_pg_policy = changed_vlan_trunk_range = changed_lacp = False + changed = ( + changed_uplink_pg_policy + ) = changed_vlan_trunk_range = changed_lacp = False results = dict(changed=changed) - results['dvswitch'] = self.switch_name + results["dvswitch"] = self.switch_name changed_list = [] uplink_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec() # Use the same version in the new spec; The version will be increased by one by the API automatically - uplink_pg_spec.configVersion = self.dvs.config.uplinkPortgroup[0].config.configVersion + uplink_pg_spec.configVersion = self.dvs.config.uplinkPortgroup[ + 0 + ].config.configVersion uplink_pg_config = self.dvs.config.uplinkPortgroup[0].config # Check name if self.uplink_pg_name: - results['name'] = self.uplink_pg_name + results["name"] = self.uplink_pg_name if uplink_pg_config.name != self.uplink_pg_name: changed = True changed_list.append("name") - results['name_previous'] = uplink_pg_config.name + results["name_previous"] = uplink_pg_config.name uplink_pg_spec.name = self.uplink_pg_name else: - results['name'] = uplink_pg_config.name + results["name"] = uplink_pg_config.name # Check description - results['description'] = self.uplink_pg_description + results["description"] = self.uplink_pg_description if uplink_pg_config.description != self.uplink_pg_description: changed = True changed_list.append("description") - results['description_previous'] = uplink_pg_config.description + results["description_previous"] = uplink_pg_config.description uplink_pg_spec.description = self.uplink_pg_description # Check port policies - results['adv_reset_at_disconnect'] = self.uplink_pg_reset - results['adv_block_ports'] = self.uplink_pg_block_ports - results['adv_vendor_conf'] = self.uplink_pg_vendor_conf - results['adv_vlan'] = self.uplink_pg_vlan - results['adv_netflow'] = self.uplink_pg_netflow - results['adv_traffic_filtering'] = self.uplink_pg_tf - uplink_pg_policy_spec = vim.dvs.VmwareDistributedVirtualSwitch.VMwarePortgroupPolicy() - uplink_pg_policy_spec.portConfigResetAtDisconnect = self.uplink_pg_reset + results["adv_reset_at_disconnect"] = self.uplink_pg_reset + results["adv_block_ports"] = self.uplink_pg_block_ports + results["adv_vendor_conf"] = self.uplink_pg_vendor_conf + results["adv_vlan"] = self.uplink_pg_vlan + results["adv_netflow"] = self.uplink_pg_netflow + results["adv_traffic_filtering"] = self.uplink_pg_tf + uplink_pg_policy_spec = ( + vim.dvs.VmwareDistributedVirtualSwitch.VMwarePortgroupPolicy() + ) + uplink_pg_policy_spec.portConfigResetAtDisconnect = ( + self.uplink_pg_reset + ) uplink_pg_policy_spec.blockOverrideAllowed = self.uplink_pg_block_ports - uplink_pg_policy_spec.vendorConfigOverrideAllowed = self.uplink_pg_vendor_conf + uplink_pg_policy_spec.vendorConfigOverrideAllowed = ( + self.uplink_pg_vendor_conf + ) uplink_pg_policy_spec.vlanOverrideAllowed = self.uplink_pg_vlan uplink_pg_policy_spec.ipfixOverrideAllowed = self.uplink_pg_netflow uplink_pg_policy_spec.trafficFilterOverrideAllowed = self.uplink_pg_tf @@ -248,56 +275,100 @@ def ensure(self): uplink_pg_policy_spec.securityPolicyOverrideAllowed = False uplink_pg_policy_spec.networkResourcePoolOverrideAllowed = False # Check policies - if uplink_pg_config.policy.portConfigResetAtDisconnect != self.uplink_pg_reset: + if ( + uplink_pg_config.policy.portConfigResetAtDisconnect + != self.uplink_pg_reset + ): changed_uplink_pg_policy = True - results['adv_reset_at_disconnect_previous'] = uplink_pg_config.policy.portConfigResetAtDisconnect - if uplink_pg_config.policy.blockOverrideAllowed != self.uplink_pg_block_ports: + results[ + "adv_reset_at_disconnect_previous" + ] = uplink_pg_config.policy.portConfigResetAtDisconnect + if ( + uplink_pg_config.policy.blockOverrideAllowed + != self.uplink_pg_block_ports + ): changed_uplink_pg_policy = True - results['adv_block_ports_previous'] = uplink_pg_config.policy.blockOverrideAllowed - if uplink_pg_config.policy.vendorConfigOverrideAllowed != self.uplink_pg_vendor_conf: + results[ + "adv_block_ports_previous" + ] = uplink_pg_config.policy.blockOverrideAllowed + if ( + uplink_pg_config.policy.vendorConfigOverrideAllowed + != self.uplink_pg_vendor_conf + ): changed_uplink_pg_policy = True - results['adv_vendor_conf_previous'] = uplink_pg_config.policy.vendorConfigOverrideAllowed + results[ + "adv_vendor_conf_previous" + ] = uplink_pg_config.policy.vendorConfigOverrideAllowed if uplink_pg_config.policy.vlanOverrideAllowed != self.uplink_pg_vlan: changed_uplink_pg_policy = True - results['adv_vlan_previous'] = uplink_pg_config.policy.vlanOverrideAllowed - if uplink_pg_config.policy.ipfixOverrideAllowed != self.uplink_pg_netflow: + results[ + "adv_vlan_previous" + ] = uplink_pg_config.policy.vlanOverrideAllowed + if ( + uplink_pg_config.policy.ipfixOverrideAllowed + != self.uplink_pg_netflow + ): changed_uplink_pg_policy = True - results['adv_netflow_previous'] = uplink_pg_config.policy.ipfixOverrideAllowed - if uplink_pg_config.policy.trafficFilterOverrideAllowed != self.uplink_pg_tf: + results[ + "adv_netflow_previous" + ] = uplink_pg_config.policy.ipfixOverrideAllowed + if ( + uplink_pg_config.policy.trafficFilterOverrideAllowed + != self.uplink_pg_tf + ): changed_uplink_pg_policy = True - results['adv_traffic_filtering_previous'] = uplink_pg_config.policy.trafficFilterOverrideAllowed + results[ + "adv_traffic_filtering_previous" + ] = uplink_pg_config.policy.trafficFilterOverrideAllowed if changed_uplink_pg_policy: changed = True changed_list.append("advanced") uplink_pg_spec.policy = uplink_pg_policy_spec - uplink_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy() + uplink_pg_spec.defaultPortConfig = ( + vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy() + ) # Check VLAN trunk - results['vlan_trunk_range'] = self.uplink_pg_vlan_trunk_range + results["vlan_trunk_range"] = self.uplink_pg_vlan_trunk_range vlan_id_ranges = self.uplink_pg_vlan_trunk_range - trunk_vlan_spec = vim.dvs.VmwareDistributedVirtualSwitch.TrunkVlanSpec() + trunk_vlan_spec = ( + vim.dvs.VmwareDistributedVirtualSwitch.TrunkVlanSpec() + ) vlan_id_list = [] for vlan_id_range in vlan_id_ranges: vlan_id_range_found = False - vlan_id_start, vlan_id_end = self.get_vlan_ids_from_range(vlan_id_range) + vlan_id_start, vlan_id_end = self.get_vlan_ids_from_range( + vlan_id_range + ) # Check if range is already configured - for current_vlan_id_range in uplink_pg_config.defaultPortConfig.vlan.vlanId: - if current_vlan_id_range.start == int(vlan_id_start) and current_vlan_id_range.end == int(vlan_id_end): + for ( + current_vlan_id_range + ) in uplink_pg_config.defaultPortConfig.vlan.vlanId: + if current_vlan_id_range.start == int( + vlan_id_start + ) and current_vlan_id_range.end == int(vlan_id_end): vlan_id_range_found = True break if vlan_id_range_found is False: changed_vlan_trunk_range = True vlan_id_list.append( - vim.NumericRange(start=int(vlan_id_start), end=int(vlan_id_end)) + vim.NumericRange( + start=int(vlan_id_start), end=int(vlan_id_end) + ) ) # Check if range needs to be removed - for current_vlan_id_range in uplink_pg_config.defaultPortConfig.vlan.vlanId: + for ( + current_vlan_id_range + ) in uplink_pg_config.defaultPortConfig.vlan.vlanId: vlan_id_range_found = False for vlan_id_range in vlan_id_ranges: - vlan_id_start, vlan_id_end = self.get_vlan_ids_from_range(vlan_id_range) - if (current_vlan_id_range.start == int(vlan_id_start) - and current_vlan_id_range.end == int(vlan_id_end)): + vlan_id_start, vlan_id_end = self.get_vlan_ids_from_range( + vlan_id_range + ) + if current_vlan_id_range.start == int( + vlan_id_start + ) and current_vlan_id_range.end == int(vlan_id_end): vlan_id_range_found = True break if vlan_id_range_found is False: @@ -307,40 +378,56 @@ def ensure(self): changed = True changed_list.append("vlan trunk range") current_vlan_id_list = [] - for current_vlan_id_range in uplink_pg_config.defaultPortConfig.vlan.vlanId: + for ( + current_vlan_id_range + ) in uplink_pg_config.defaultPortConfig.vlan.vlanId: if current_vlan_id_range.start == current_vlan_id_range.end: current_vlan_id_range_string = current_vlan_id_range.start else: - current_vlan_id_range_string = '-'.join( - [str(current_vlan_id_range.start), str(current_vlan_id_range.end)] + current_vlan_id_range_string = "-".join( + [ + str(current_vlan_id_range.start), + str(current_vlan_id_range.end), + ] ) current_vlan_id_list.append(current_vlan_id_range_string) - results['vlan_trunk_range_previous'] = current_vlan_id_list + results["vlan_trunk_range_previous"] = current_vlan_id_list uplink_pg_spec.defaultPortConfig.vlan = trunk_vlan_spec # Check LACP lacp_support_mode = self.get_lacp_support_mode(self.support_mode) - if lacp_support_mode == 'basic': - results['lacp_status'] = self.lacp_status - lacp_spec = vim.dvs.VmwareDistributedVirtualSwitch.UplinkLacpPolicy() + if lacp_support_mode == "basic": + results["lacp_status"] = self.lacp_status + lacp_spec = ( + vim.dvs.VmwareDistributedVirtualSwitch.UplinkLacpPolicy() + ) lacp_enabled = False - if self.lacp_status == 'enabled': + if self.lacp_status == "enabled": lacp_enabled = True - if uplink_pg_config.defaultPortConfig.lacpPolicy.enable.value != lacp_enabled: + if ( + uplink_pg_config.defaultPortConfig.lacpPolicy.enable.value + != lacp_enabled + ): changed_lacp = True changed_list.append("lacp status") if uplink_pg_config.defaultPortConfig.lacpPolicy.enable.value: - results['lacp_status_previous'] = 'enabled' + results["lacp_status_previous"] = "enabled" else: - results['lacp_status_previous'] = 'disabled' + results["lacp_status_previous"] = "disabled" lacp_spec.enable = vim.BoolPolicy() lacp_spec.enable.inherited = False lacp_spec.enable.value = lacp_enabled - if lacp_enabled and uplink_pg_config.defaultPortConfig.lacpPolicy.mode.value != self.lacp_mode: - results['lacp_mode'] = self.lacp_mode + if ( + lacp_enabled + and uplink_pg_config.defaultPortConfig.lacpPolicy.mode.value + != self.lacp_mode + ): + results["lacp_mode"] = self.lacp_mode changed_lacp = True changed_list.append("lacp mode") - results['lacp_mode_previous'] = uplink_pg_config.defaultPortConfig.lacpPolicy.mode.value + results[ + "lacp_mode_previous" + ] = uplink_pg_config.defaultPortConfig.lacpPolicy.mode.value lacp_spec.mode = vim.StringPolicy() lacp_spec.mode.inherited = False lacp_spec.mode.value = self.lacp_mode @@ -349,51 +436,72 @@ def ensure(self): uplink_pg_spec.defaultPortConfig.lacpPolicy = lacp_spec # Check NetFlow - results['netflow_enabled'] = self.uplink_pg_netflow_enabled + results["netflow_enabled"] = self.uplink_pg_netflow_enabled netflow_enabled_spec = vim.BoolPolicy() netflow_enabled_spec.inherited = False netflow_enabled_spec.value = self.uplink_pg_netflow_enabled - if uplink_pg_config.defaultPortConfig.ipfixEnabled.value != self.uplink_pg_netflow_enabled: + if ( + uplink_pg_config.defaultPortConfig.ipfixEnabled.value + != self.uplink_pg_netflow_enabled + ): changed = True - results['netflow_enabled_previous'] = uplink_pg_config.defaultPortConfig.ipfixEnabled.value + results[ + "netflow_enabled_previous" + ] = uplink_pg_config.defaultPortConfig.ipfixEnabled.value changed_list.append("netflow") - uplink_pg_spec.defaultPortConfig.ipfixEnabled = netflow_enabled_spec + uplink_pg_spec.defaultPortConfig.ipfixEnabled = ( + netflow_enabled_spec + ) # TODO: Check Traffic filtering and marking # Check Block all ports - results['block_all_ports'] = self.uplink_pg_block_all_ports + results["block_all_ports"] = self.uplink_pg_block_all_ports block_all_ports_spec = vim.BoolPolicy() block_all_ports_spec.inherited = False block_all_ports_spec.value = self.uplink_pg_block_all_ports - if uplink_pg_config.defaultPortConfig.blocked.value != self.uplink_pg_block_all_ports: + if ( + uplink_pg_config.defaultPortConfig.blocked.value + != self.uplink_pg_block_all_ports + ): changed = True changed_list.append("block all ports") - results['block_all_ports_previous'] = uplink_pg_config.defaultPortConfig.blocked.value + results[ + "block_all_ports_previous" + ] = uplink_pg_config.defaultPortConfig.blocked.value uplink_pg_spec.defaultPortConfig.blocked = block_all_ports_spec if changed: if self.module.check_mode: - changed_suffix = ' would be changed' + changed_suffix = " would be changed" else: - changed_suffix = ' changed' + changed_suffix = " changed" if len(changed_list) > 2: - message = ', '.join(changed_list[:-1]) + ', and ' + str(changed_list[-1]) + message = ( + ", ".join(changed_list[:-1]) + + ", and " + + str(changed_list[-1]) + ) elif len(changed_list) == 2: - message = ' and '.join(changed_list) + message = " and ".join(changed_list) elif len(changed_list) == 1: message = changed_list[0] message += changed_suffix if not self.module.check_mode: try: - task = self.dvs.config.uplinkPortgroup[0].ReconfigureDVPortgroup_Task(uplink_pg_spec) + task = self.dvs.config.uplinkPortgroup[ + 0 + ].ReconfigureDVPortgroup_Task(uplink_pg_spec) wait_for_task(task) except TaskError as invalid_argument: - self.module.fail_json(msg="Failed to update uplink portgroup : %s" % to_native(invalid_argument)) + self.module.fail_json( + msg="Failed to update uplink portgroup : %s" + % to_native(invalid_argument) + ) else: message = "Uplink portgroup already configured properly" - results['changed'] = changed - results['result'] = message + results["changed"] = changed + results["result"] = message self.module.exit_json(**results) @@ -401,7 +509,7 @@ def ensure(self): def get_vlan_ids_from_range(vlan_id_range): """Get start and end VLAN ID from VLAN ID range""" try: - vlan_id_start, vlan_id_end = vlan_id_range.split('-') + vlan_id_start, vlan_id_end = vlan_id_range.split("-") except (AttributeError, TypeError): vlan_id_start = vlan_id_end = vlan_id_range except ValueError: @@ -412,14 +520,14 @@ def get_vlan_ids_from_range(vlan_id_range): def get_lacp_support_mode(mode): """Get LACP support mode""" return_mode = None - if mode == 'basic': - return_mode = 'singleLag' - elif mode == 'enhanced': - return_mode = 'multipleLag' - elif mode == 'singleLag': - return_mode = 'basic' - elif mode == 'multipleLag': - return_mode = 'enhanced' + if mode == "basic": + return_mode = "singleLag" + elif mode == "enhanced": + return_mode = "multipleLag" + elif mode == "singleLag": + return_mode = "basic" + elif mode == "multipleLag": + return_mode = "enhanced" return return_mode @@ -428,18 +536,20 @@ def main(): argument_spec = vmware_argument_spec() argument_spec.update( dict( - switch=dict(required=True, aliases=['dvswitch']), - name=dict(type='str'), - description=dict(type='str'), + switch=dict(required=True, aliases=["dvswitch"]), + name=dict(type="str"), + description=dict(type="str"), advanced=dict( - type='dict', + type="dict", options=dict( - port_config_reset_at_disconnect=dict(type='bool', default=True), - block_override=dict(type='bool', default=True), - vendor_config_override=dict(type='bool', default=False), - vlan_override=dict(type='bool', default=False), - netflow_override=dict(type='bool', default=False), - traffic_filter_override=dict(type='bool', default=False), + port_config_reset_at_disconnect=dict( + type="bool", default=True + ), + block_override=dict(type="bool", default=True), + vendor_config_override=dict(type="bool", default=False), + vlan_override=dict(type="bool", default=False), + netflow_override=dict(type="bool", default=False), + traffic_filter_override=dict(type="bool", default=False), ), default=dict( port_config_reset_at_disconnect=True, @@ -449,33 +559,37 @@ def main(): netflow_override=False, traffic_filter_override=False, ), - aliases=['port_policy'], + aliases=["port_policy"], ), lacp=dict( - type='dict', + type="dict", options=dict( - status=dict(type='str', choices=['enabled', 'disabled'], default=['disabled']), - mode=dict(type='str', choices=['active', 'passive'], default=['passive']), - ), - default=dict( - status='disabled', - mode='passive', + status=dict( + type="str", + choices=["enabled", "disabled"], + default=["disabled"], + ), + mode=dict( + type="str", + choices=["active", "passive"], + default=["passive"], + ), ), + default=dict(status="disabled", mode="passive"), ), - vlan_trunk_range=dict(type='list', default=['0-4094']), - netflow_enabled=dict(type='bool', default=False), - block_all_ports=dict(type='bool', default=False), + vlan_trunk_range=dict(type="list", default=["0-4094"]), + netflow_enabled=dict(type="bool", default=False), + block_all_ports=dict(type="bool", default=False), ) ) module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, + argument_spec=argument_spec, supports_check_mode=True ) vmware_dvswitch_uplink_pg = VMwareDvSwitchUplinkPortgroup(module) vmware_dvswitch_uplink_pg.ensure() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_evc_mode.py b/plugins/modules/vmware_evc_mode.py index 115fe57..063a33d 100644 --- a/plugins/modules/vmware_evc_mode.py +++ b/plugins/modules/vmware_evc_mode.py @@ -5,13 +5,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} +ANSIBLE_METADATA = { + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", +} -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_evc_mode short_description: Enable/Disable EVC mode on vCenter @@ -50,9 +53,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Enable EVC Mode vmware_evc_mode: hostname: "{{ groups['vcsa'][0] }}" @@ -75,7 +78,7 @@ state: absent delegate_to: localhost register: disable_evc -''' +""" RETURN = """ result: @@ -93,17 +96,23 @@ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_native -from ansible_collections.vmware.general.plugins.module_utils.vmware import (PyVmomi, find_datacenter_by_name, find_cluster_by_name, - vmware_argument_spec, wait_for_task, TaskError) +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + find_datacenter_by_name, + find_cluster_by_name, + vmware_argument_spec, + wait_for_task, + TaskError, +) class VMwareEVC(PyVmomi): def __init__(self, module): super(VMwareEVC, self).__init__(module) - self.cluster_name = module.params['cluster_name'] - self.evc_mode = module.params['evc_mode'] - self.datacenter_name = module.params['datacenter_name'] - self.desired_state = module.params['state'] + self.cluster_name = module.params["cluster_name"] + self.evc_mode = module.params["evc_mode"] + self.datacenter_name = module.params["datacenter_name"] + self.desired_state = module.params["state"] self.datacenter = None self.cluster = None @@ -112,14 +121,14 @@ def process_state(self): Manage internal states of evc """ evc_states = { - 'absent': { - 'present': self.state_disable_evc, - 'absent': self.state_exit_unchanged, + "absent": { + "present": self.state_disable_evc, + "absent": self.state_exit_unchanged, + }, + "present": { + "present": self.state_update_evc, + "absent": self.state_enable_evc, }, - 'present': { - 'present': self.state_update_evc, - 'absent': self.state_enable_evc, - } } current_state = self.check_evc_configuration() # Based on the desired_state and the current_state call @@ -132,33 +141,51 @@ def check_evc_configuration(self): Returns: 'Present' if evc enabled, else 'absent' """ try: - self.datacenter = find_datacenter_by_name(self.content, self.datacenter_name) + self.datacenter = find_datacenter_by_name( + self.content, self.datacenter_name + ) if self.datacenter is None: - self.module.fail_json(msg="Datacenter '%s' does not exist." % self.datacenter_name) - self.cluster = self.find_cluster_by_name(cluster_name=self.cluster_name, datacenter_name=self.datacenter) + self.module.fail_json( + msg="Datacenter '%s' does not exist." + % self.datacenter_name + ) + self.cluster = self.find_cluster_by_name( + cluster_name=self.cluster_name, datacenter_name=self.datacenter + ) if self.cluster is None: - self.module.fail_json(msg="Cluster '%s' does not exist." % self.cluster_name) + self.module.fail_json( + msg="Cluster '%s' does not exist." % self.cluster_name + ) self.evcm = self.cluster.EvcManager() if not self.evcm: - self.module.fail_json(msg="Unable to get EVC manager for cluster '%s'." % self.cluster_name) + self.module.fail_json( + msg="Unable to get EVC manager for cluster '%s'." + % self.cluster_name + ) self.evc_state = self.evcm.evcState self.current_evc_mode = self.evc_state.currentEVCModeKey if not self.current_evc_mode: - return 'absent' + return "absent" - return 'present' + return "present" except Exception as generic_exc: - self.module.fail_json(msg="Failed to check configuration" - " due to generic exception %s" % to_native(generic_exc)) + self.module.fail_json( + msg="Failed to check configuration" + " due to generic exception %s" % to_native(generic_exc) + ) def state_exit_unchanged(self): """ Exit without any change """ - self.module.exit_json(changed=False, msg="EVC Mode is already disabled on cluster '%s'." % self.cluster_name) + self.module.exit_json( + changed=False, + msg="EVC Mode is already disabled on cluster '%s'." + % self.cluster_name, + ) def state_update_evc(self): """ @@ -166,16 +193,33 @@ def state_update_evc(self): """ changed, result = False, None try: - if not self.module.check_mode and self.current_evc_mode != self.evc_mode: + if ( + not self.module.check_mode + and self.current_evc_mode != self.evc_mode + ): evc_task = self.evcm.ConfigureEvcMode_Task(self.evc_mode) changed, result = wait_for_task(evc_task) - if self.module.check_mode and self.current_evc_mode != self.evc_mode: + if ( + self.module.check_mode + and self.current_evc_mode != self.evc_mode + ): changed, result = True, None if self.current_evc_mode == self.evc_mode: - self.module.exit_json(changed=changed, msg="EVC Mode is already set to '%(evc_mode)s' on '%(cluster_name)s'." % self.params) - self.module.exit_json(changed=changed, msg="EVC Mode has been updated to '%(evc_mode)s' on '%(cluster_name)s'." % self.params) + self.module.exit_json( + changed=changed, + msg="EVC Mode is already set to '%(evc_mode)s' on '%(cluster_name)s'." + % self.params, + ) + self.module.exit_json( + changed=changed, + msg="EVC Mode has been updated to '%(evc_mode)s' on '%(cluster_name)s'." + % self.params, + ) except TaskError as invalid_argument: - self.module.fail_json(msg="Failed to update EVC mode: %s" % to_native(invalid_argument)) + self.module.fail_json( + msg="Failed to update EVC mode: %s" + % to_native(invalid_argument) + ) def state_enable_evc(self): """ @@ -188,9 +232,16 @@ def state_enable_evc(self): changed, result = wait_for_task(evc_task) if self.module.check_mode: changed, result = True, None - self.module.exit_json(changed=changed, msg="EVC Mode for '%(evc_mode)s' has been enabled on '%(cluster_name)s'." % self.params) + self.module.exit_json( + changed=changed, + msg="EVC Mode for '%(evc_mode)s' has been enabled on '%(cluster_name)s'." + % self.params, + ) except TaskError as invalid_argument: - self.module.fail_json(msg="Failed to enable EVC mode: %s" % to_native(invalid_argument)) + self.module.fail_json( + msg="Failed to enable EVC mode: %s" + % to_native(invalid_argument) + ) def state_disable_evc(self): """ @@ -203,31 +254,46 @@ def state_disable_evc(self): changed, result = wait_for_task(evc_task) if self.module.check_mode: changed, result = True, None - self.module.exit_json(changed=changed, msg="EVC Mode has been disabled on cluster '%s'." % self.cluster_name) + self.module.exit_json( + changed=changed, + msg="EVC Mode has been disabled on cluster '%s'." + % self.cluster_name, + ) except TaskError as invalid_argument: - self.module.fail_json(msg="Failed to disable EVC mode: %s" % to_native(invalid_argument)) + self.module.fail_json( + msg="Failed to disable EVC mode: %s" + % to_native(invalid_argument) + ) def main(): argument_spec = vmware_argument_spec() - argument_spec.update(dict( - cluster_name=dict(type='str', required=True), - datacenter_name=dict(type='str', required=True), - evc_mode=dict(type='str', required=True), - state=dict(type='str', default='present', choices=['absent', 'present']), - )) + argument_spec.update( + dict( + cluster_name=dict(type="str", required=True), + datacenter_name=dict(type="str", required=True), + evc_mode=dict(type="str", required=True), + state=dict( + type="str", default="present", choices=["absent", "present"] + ), + ) + ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, required_if=[ - ['state', 'present', ['cluster_name', 'datacenter_name', 'evc_mode']] - ] + [ + "state", + "present", + ["cluster_name", "datacenter_name", "evc_mode"], + ] + ], ) vmware_evc = VMwareEVC(module) vmware_evc.process_state() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_export_ovf.py b/plugins/modules/vmware_export_ovf.py index fac2a74..557023a 100644 --- a/plugins/modules/vmware_export_ovf.py +++ b/plugins/modules/vmware_export_ovf.py @@ -5,13 +5,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} +ANSIBLE_METADATA = { + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", +} -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_export_ovf short_description: Exports a VMware virtual machine to an OVF file, device files and a manifest file @@ -82,9 +85,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - vmware_export_ovf: validate_certs: false hostname: '{{ vcenter_hostname }}' @@ -94,15 +97,15 @@ export_with_images: true export_dir: /path/to/ovf_template/ delegate_to: localhost -''' +""" -RETURN = r''' +RETURN = r""" instance: description: list of the exported files, if exported from vCenter server, device file is not named with vm name returned: always type: dict sample: None -''' +""" import os import hashlib @@ -111,7 +114,11 @@ from ansible.module_utils.urls import open_url from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_text, to_bytes -from ansible_collections.vmware.general.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + vmware_argument_spec, + PyVmomi, +) + try: from pyVmomi import vim from pyVim import connect @@ -141,7 +148,11 @@ def run(self): self.httpNfcLease.HttpNfcLeaseProgress(self.progressPercent) sleep_sec = 0 while True: - if self.httpNfcLease.state == vim.HttpNfcLease.State.done or self.httpNfcLease.state == vim.HttpNfcLease.State.error: + if ( + self.httpNfcLease.state == vim.HttpNfcLease.State.done + or self.httpNfcLease.state + == vim.HttpNfcLease.State.error + ): return sleep_sec += 1 sleep(1) @@ -154,47 +165,67 @@ def run(self): class VMwareExportVmOvf(PyVmomi): def __init__(self, module): super(VMwareExportVmOvf, self).__init__(module) - self.mf_file = '' - self.ovf_dir = '' + self.mf_file = "" + self.ovf_dir = "" # set read device content chunk size to 2 MB self.chunk_size = 2 * 2 ** 20 # set lease progress update interval to 15 seconds self.lease_interval = 15 - self.facts = {'device_files': []} + self.facts = {"device_files": []} self.download_timeout = None def create_export_dir(self, vm_obj): - self.ovf_dir = os.path.join(self.params['export_dir'], vm_obj.name) + self.ovf_dir = os.path.join(self.params["export_dir"], vm_obj.name) if not os.path.exists(self.ovf_dir): try: os.makedirs(self.ovf_dir) except OSError as err: - self.module.fail_json(msg='Exception caught when create folder %s, with error %s' - % (self.ovf_dir, to_text(err))) - self.mf_file = os.path.join(self.ovf_dir, vm_obj.name + '.mf') + self.module.fail_json( + msg="Exception caught when create folder %s, with error %s" + % (self.ovf_dir, to_text(err)) + ) + self.mf_file = os.path.join(self.ovf_dir, vm_obj.name + ".mf") - def download_device_files(self, headers, temp_target_disk, device_url, lease_updater, total_bytes_written, - total_bytes_to_write): - mf_content = 'SHA256(' + os.path.basename(temp_target_disk) + ')= ' + def download_device_files( + self, + headers, + temp_target_disk, + device_url, + lease_updater, + total_bytes_written, + total_bytes_to_write, + ): + mf_content = "SHA256(" + os.path.basename(temp_target_disk) + ")= " sha256_hash = hashlib.sha256() response = None - with open(self.mf_file, 'a') as mf_handle: - with open(temp_target_disk, 'wb') as handle: + with open(self.mf_file, "a") as mf_handle: + with open(temp_target_disk, "wb") as handle: try: - response = open_url(device_url, headers=headers, validate_certs=False, timeout=self.download_timeout) + response = open_url( + device_url, + headers=headers, + validate_certs=False, + timeout=self.download_timeout, + ) except Exception as err: lease_updater.httpNfcLease.HttpNfcLeaseAbort() lease_updater.stop() - self.module.fail_json(msg='Exception caught when getting %s, %s' % (device_url, to_text(err))) + self.module.fail_json( + msg="Exception caught when getting %s, %s" + % (device_url, to_text(err)) + ) if not response: lease_updater.httpNfcLease.HttpNfcLeaseAbort() lease_updater.stop() - self.module.fail_json(msg='Getting %s failed' % device_url) + self.module.fail_json(msg="Getting %s failed" % device_url) if response.getcode() >= 400: lease_updater.httpNfcLease.HttpNfcLeaseAbort() lease_updater.stop() - self.module.fail_json(msg='Getting %s return code %d' % (device_url, response.getcode())) + self.module.fail_json( + msg="Getting %s return code %d" + % (device_url, response.getcode()) + ) current_bytes_written = 0 block = response.read(self.chunk_size) while block: @@ -204,24 +235,31 @@ def download_device_files(self, headers, temp_target_disk, device_url, lease_upd os.fsync(handle.fileno()) current_bytes_written += len(block) block = response.read(self.chunk_size) - written_percent = ((current_bytes_written + total_bytes_written) * 100) / total_bytes_to_write + written_percent = ( + (current_bytes_written + total_bytes_written) * 100 + ) / total_bytes_to_write lease_updater.progressPercent = int(written_percent) - mf_handle.write(mf_content + sha256_hash.hexdigest() + '\n') - self.facts['device_files'].append(temp_target_disk) + mf_handle.write(mf_content + sha256_hash.hexdigest() + "\n") + self.facts["device_files"].append(temp_target_disk) return current_bytes_written def export_to_ovf_files(self, vm_obj): self.create_export_dir(vm_obj=vm_obj) export_with_iso = False - if 'export_with_images' in self.params and self.params['export_with_images']: + if ( + "export_with_images" in self.params + and self.params["export_with_images"] + ): export_with_iso = True - self.download_timeout = self.params['download_timeout'] + self.download_timeout = self.params["download_timeout"] ovf_files = [] # get http nfc lease firstly http_nfc_lease = vm_obj.ExportVm() # create a thread to track file download progress - lease_updater = LeaseProgressUpdater(http_nfc_lease, self.lease_interval) + lease_updater = LeaseProgressUpdater( + http_nfc_lease, self.lease_interval + ) total_bytes_written = 0 # total storage space occupied by the virtual machine across all datastores total_bytes_to_write = vm_obj.summary.storage.unshared @@ -230,11 +268,13 @@ def export_to_ovf_files(self, vm_obj): total_bytes_to_write = vm_obj.summary.storage.committed if total_bytes_to_write == 0: http_nfc_lease.HttpNfcLeaseAbort() - self.module.fail_json(msg='Total storage space occupied by the VM is 0.') - headers = {'Accept': 'application/x-vnd.vmware-streamVmdk'} + self.module.fail_json( + msg="Total storage space occupied by the VM is 0." + ) + headers = {"Accept": "application/x-vnd.vmware-streamVmdk"} cookies = connect.GetStub().cookie if cookies: - headers['Cookie'] = cookies + headers["Cookie"] = cookies lease_updater.start() try: while True: @@ -243,31 +283,47 @@ def export_to_ovf_files(self, vm_obj): file_download = False if deviceUrl.targetId and deviceUrl.disk: file_download = True - elif deviceUrl.url.split('/')[-1].split('.')[-1] == 'iso': + elif ( + deviceUrl.url.split("/")[-1].split(".")[-1] + == "iso" + ): if export_with_iso: file_download = True - elif deviceUrl.url.split('/')[-1].split('.')[-1] == 'nvram': - if self.host_version_at_least(version=(6, 7, 0), vm_obj=vm_obj): + elif ( + deviceUrl.url.split("/")[-1].split(".")[-1] + == "nvram" + ): + if self.host_version_at_least( + version=(6, 7, 0), vm_obj=vm_obj + ): file_download = True else: continue - device_file_name = deviceUrl.url.split('/')[-1] + device_file_name = deviceUrl.url.split("/")[-1] # device file named disk-0.iso, disk-1.vmdk, disk-2.vmdk, replace 'disk' with vm name - if device_file_name.split('.')[0][0:5] == "disk-": - device_file_name = device_file_name.replace('disk', vm_obj.name) - temp_target_disk = os.path.join(self.ovf_dir, device_file_name) + if device_file_name.split(".")[0][0:5] == "disk-": + device_file_name = device_file_name.replace( + "disk", vm_obj.name + ) + temp_target_disk = os.path.join( + self.ovf_dir, device_file_name + ) device_url = deviceUrl.url # if export from ESXi host, replace * with hostname in url # e.g., https://*/ha-nfc/5289bf27-da99-7c0e-3978-8853555deb8c/disk-1.vmdk - if '*' in device_url: - device_url = device_url.replace('*', self.params['hostname']) + if "*" in device_url: + device_url = device_url.replace( + "*", self.params["hostname"] + ) if file_download: - current_bytes_written = self.download_device_files(headers=headers, - temp_target_disk=temp_target_disk, - device_url=device_url, - lease_updater=lease_updater, - total_bytes_written=total_bytes_written, - total_bytes_to_write=total_bytes_to_write) + current_bytes_written = self.download_device_files( + headers=headers, + temp_target_disk=temp_target_disk, + device_url=device_url, + lease_updater=lease_updater, + total_bytes_written=total_bytes_written, + total_bytes_to_write=total_bytes_to_write, + ) total_bytes_written += current_bytes_written ovf_file = vim.OvfManager.OvfFile() ovf_file.deviceId = deviceUrl.key @@ -275,12 +331,17 @@ def export_to_ovf_files(self, vm_obj): ovf_file.size = current_bytes_written ovf_files.append(ovf_file) break - elif http_nfc_lease.state == vim.HttpNfcLease.State.initializing: + elif ( + http_nfc_lease.state == vim.HttpNfcLease.State.initializing + ): sleep(2) continue elif http_nfc_lease.state == vim.HttpNfcLease.State.error: lease_updater.stop() - self.module.fail_json(msg='Get HTTP NFC lease error %s.' % http_nfc_lease.state.error[0].fault) + self.module.fail_json( + msg="Get HTTP NFC lease error %s." + % http_nfc_lease.state.error[0].fault + ) # generate ovf file ovf_manager = self.content.ovfManager @@ -288,68 +349,82 @@ def export_to_ovf_files(self, vm_obj): ovf_parameters = vim.OvfManager.CreateDescriptorParams() ovf_parameters.name = ovf_descriptor_name ovf_parameters.ovfFiles = ovf_files - vm_descriptor_result = ovf_manager.CreateDescriptor(obj=vm_obj, cdp=ovf_parameters) + vm_descriptor_result = ovf_manager.CreateDescriptor( + obj=vm_obj, cdp=ovf_parameters + ) if vm_descriptor_result.error: http_nfc_lease.HttpNfcLeaseAbort() lease_updater.stop() - self.module.fail_json(msg='Create VM descriptor file error %s.' % vm_descriptor_result.error) + self.module.fail_json( + msg="Create VM descriptor file error %s." + % vm_descriptor_result.error + ) else: vm_descriptor = vm_descriptor_result.ovfDescriptor - ovf_descriptor_path = os.path.join(self.ovf_dir, ovf_descriptor_name + '.ovf') + ovf_descriptor_path = os.path.join( + self.ovf_dir, ovf_descriptor_name + ".ovf" + ) sha256_hash = hashlib.sha256() - with open(self.mf_file, 'a') as mf_handle: - with open(ovf_descriptor_path, 'w') as handle: + with open(self.mf_file, "a") as mf_handle: + with open(ovf_descriptor_path, "w") as handle: handle.write(vm_descriptor) sha256_hash.update(to_bytes(vm_descriptor)) - mf_handle.write('SHA256(' + os.path.basename(ovf_descriptor_path) + ')= ' + sha256_hash.hexdigest() + '\n') + mf_handle.write( + "SHA256(" + + os.path.basename(ovf_descriptor_path) + + ")= " + + sha256_hash.hexdigest() + + "\n" + ) http_nfc_lease.HttpNfcLeaseProgress(100) # self.facts = http_nfc_lease.HttpNfcLeaseGetManifest() http_nfc_lease.HttpNfcLeaseComplete() lease_updater.stop() - self.facts.update({'manifest': self.mf_file, 'ovf_file': ovf_descriptor_path}) + self.facts.update( + {"manifest": self.mf_file, "ovf_file": ovf_descriptor_path} + ) except Exception as err: kwargs = { - 'changed': False, - 'failed': True, - 'msg': "get exception: %s" % to_text(err), + "changed": False, + "failed": True, + "msg": "get exception: %s" % to_text(err), } http_nfc_lease.HttpNfcLeaseAbort() lease_updater.stop() return kwargs - return {'changed': True, 'failed': False, 'instance': self.facts} + return {"changed": True, "failed": False, "instance": self.facts} def main(): argument_spec = vmware_argument_spec() argument_spec.update( - name=dict(type='str'), - uuid=dict(type='str'), - moid=dict(type='str'), - folder=dict(type='str'), - datacenter=dict(type='str', default='ha-datacenter'), - export_dir=dict(type='path', required=True), - export_with_images=dict(type='bool', default=False), - download_timeout=dict(type='int', default=30), + name=dict(type="str"), + uuid=dict(type="str"), + moid=dict(type="str"), + folder=dict(type="str"), + datacenter=dict(type="str", default="ha-datacenter"), + export_dir=dict(type="path", required=True), + export_with_images=dict(type="bool", default=False), + download_timeout=dict(type="int", default=30), ) - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=[ - ['name', 'uuid', 'moid'], - ], - ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=[["name", "uuid", "moid"]], + ) pyv = VMwareExportVmOvf(module) vm = pyv.get_vm() if vm: vm_facts = pyv.gather_facts(vm) - vm_power_state = vm_facts['hw_power_status'].lower() - if vm_power_state != 'poweredoff': - module.fail_json(msg='VM state should be poweredoff to export') + vm_power_state = vm_facts["hw_power_status"].lower() + if vm_power_state != "poweredoff": + module.fail_json(msg="VM state should be poweredoff to export") results = pyv.export_to_ovf_files(vm_obj=vm) module.exit_json(**results) else: - module.fail_json(msg='The specified virtual machine not found') + module.fail_json(msg="The specified virtual machine not found") -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_folder_info.py b/plugins/modules/vmware_folder_info.py index 2284c75..afbfd38 100644 --- a/plugins/modules/vmware_folder_info.py +++ b/plugins/modules/vmware_folder_info.py @@ -6,15 +6,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_folder_info short_description: Provides information about folders in a datacenter @@ -37,9 +38,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Provide information about vCenter folders vmware_folder_info: hostname: '{{ vcenter_hostname }}' @@ -48,9 +49,9 @@ datacenter: datacenter_name delegate_to: localhost register: vcenter_folder_info -''' +""" -RETURN = r''' +RETURN = r""" folder_info: description: - dict about folders @@ -94,7 +95,7 @@ } } } -''' +""" try: from pyVmomi import vim @@ -102,38 +103,45 @@ pass from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + vmware_argument_spec, + PyVmomi, +) class VmwareFolderInfoManager(PyVmomi): def __init__(self, module): super(VmwareFolderInfoManager, self).__init__(module) - self.dc_name = self.params['datacenter'] + self.dc_name = self.params["datacenter"] def gather_folder_info(self): datacenter = self.find_datacenter_by_name(self.dc_name) if datacenter is None: - self.module.fail_json(msg="Failed to find the datacenter %s" % self.dc_name) + self.module.fail_json( + msg="Failed to find the datacenter %s" % self.dc_name + ) folder_trees = {} - folder_trees['vmFolders'] = self.build_folder_tree(datacenter.vmFolder, "/%s/vm" % self.dc_name) - folder_trees['hostFolders'] = self.build_folder_tree(datacenter.hostFolder, "/%s/host" % self.dc_name) - folder_trees['networkFolders'] = self.build_folder_tree(datacenter.networkFolder, "/%s/network" % self.dc_name) - folder_trees['datastoreFolders'] = self.build_folder_tree(datacenter.datastoreFolder, "/%s/datastore" % self.dc_name) - - self.module.exit_json( - changed=False, - folder_info=folder_trees + folder_trees["vmFolders"] = self.build_folder_tree( + datacenter.vmFolder, "/%s/vm" % self.dc_name + ) + folder_trees["hostFolders"] = self.build_folder_tree( + datacenter.hostFolder, "/%s/host" % self.dc_name + ) + folder_trees["networkFolders"] = self.build_folder_tree( + datacenter.networkFolder, "/%s/network" % self.dc_name ) + folder_trees["datastoreFolders"] = self.build_folder_tree( + datacenter.datastoreFolder, "/%s/datastore" % self.dc_name + ) + + self.module.exit_json(changed=False, folder_info=folder_trees) def build_folder_tree(self, folder, path): - tree = { - 'path': path, - 'subfolders': {} - } + tree = {"path": path, "subfolders": {}} children = None - if hasattr(folder, 'childEntity'): + if hasattr(folder, "childEntity"): children = folder.childEntity if children: @@ -141,20 +149,21 @@ def build_folder_tree(self, folder, path): if child == folder: continue if isinstance(child, vim.Folder): - ctree = self.build_folder_tree(child, "%s/%s" % (path, child.name)) - tree['subfolders'][child.name] = dict.copy(ctree) + ctree = self.build_folder_tree( + child, "%s/%s" % (path, child.name) + ) + tree["subfolders"][child.name] = dict.copy(ctree) return tree def main(): argument_spec = vmware_argument_spec() argument_spec.update( - datacenter=dict(type='str', required=True, aliases=['datacenter_name']) + datacenter=dict(type="str", required=True, aliases=["datacenter_name"]) ) module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True, + argument_spec=argument_spec, supports_check_mode=True ) vmware_folder_info_mgr = VmwareFolderInfoManager(module) diff --git a/plugins/modules/vmware_guest.py b/plugins/modules/vmware_guest.py index 418f1cc..582a406 100644 --- a/plugins/modules/vmware_guest.py +++ b/plugins/modules/vmware_guest.py @@ -5,13 +5,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} +ANSIBLE_METADATA = { + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", +} -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_guest short_description: Manages virtual machines in vCenter @@ -369,9 +372,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create a virtual machine on given ESXi hostname vmware_guest: hostname: "{{ vcenter_hostname }}" @@ -598,15 +601,15 @@ memory_mb: 1024 num_cpus: 2 num_cpu_cores_per_socket: 1 -''' +""" -RETURN = r''' +RETURN = r""" instance: description: metadata about the new virtual machine returned: always type: dict sample: None -''' +""" import re import time @@ -615,6 +618,7 @@ HAS_PYVMOMI = False try: from pyVmomi import vim, vmodl, VmomiSupport + HAS_PYVMOMI = True except ImportError: pass @@ -623,18 +627,31 @@ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.network import is_mac from ansible.module_utils._text import to_text, to_native -from ansible_collections.vmware.general.plugins.module_utils.vmware import (find_obj, gather_vm_facts, get_all_objs, - compile_folder_path_for_object, serialize_spec, - vmware_argument_spec, set_vm_power_state, PyVmomi, - find_dvs_by_name, find_dvspg_by_name, wait_for_vm_ip, - wait_for_task, TaskError, quote_obj_name) +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + find_obj, + gather_vm_facts, + get_all_objs, + compile_folder_path_for_object, + serialize_spec, + vmware_argument_spec, + set_vm_power_state, + PyVmomi, + find_dvs_by_name, + find_dvspg_by_name, + wait_for_vm_ip, + wait_for_task, + TaskError, + quote_obj_name, +) def list_or_dict(value): if isinstance(value, list) or isinstance(value, dict): return value else: - raise ValueError("'%s' is not valid, valid type is 'list' or 'dict'." % value) + raise ValueError( + "'%s' is not valid, valid type is 'list' or 'dict'." % value + ) class PyVmomiDeviceHelper(object): @@ -644,23 +661,25 @@ def __init__(self, module): self.module = module self.next_disk_unit_number = 0 self.scsi_device_type = { - 'lsilogic': vim.vm.device.VirtualLsiLogicController, - 'paravirtual': vim.vm.device.ParaVirtualSCSIController, - 'buslogic': vim.vm.device.VirtualBusLogicController, - 'lsilogicsas': vim.vm.device.VirtualLsiLogicSASController, + "lsilogic": vim.vm.device.VirtualLsiLogicController, + "paravirtual": vim.vm.device.ParaVirtualSCSIController, + "buslogic": vim.vm.device.VirtualBusLogicController, + "lsilogicsas": vim.vm.device.VirtualLsiLogicSASController, } def create_scsi_controller(self, scsi_type): scsi_ctl = vim.vm.device.VirtualDeviceSpec() scsi_ctl.operation = vim.vm.device.VirtualDeviceSpec.Operation.add - scsi_device = self.scsi_device_type.get(scsi_type, vim.vm.device.ParaVirtualSCSIController) + scsi_device = self.scsi_device_type.get( + scsi_type, vim.vm.device.ParaVirtualSCSIController + ) scsi_ctl.device = scsi_device() scsi_ctl.device.busNumber = 0 # While creating a new SCSI controller, temporary key value # should be unique negative integers scsi_ctl.device.key = -randint(1000, 9999) scsi_ctl.device.hotAddRemove = True - scsi_ctl.device.sharedBus = 'noSharing' + scsi_ctl.device.sharedBus = "noSharing" scsi_ctl.device.scsiCtlrUnitNumber = 7 return scsi_ctl @@ -689,47 +708,88 @@ def create_cdrom(ide_device, cdrom_type, iso_path=None, unit_number=0): cdrom_spec.device.controllerKey = ide_device.key cdrom_spec.device.key = -randint(3000, 3999) cdrom_spec.device.unitNumber = unit_number - cdrom_spec.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo() + cdrom_spec.device.connectable = ( + vim.vm.device.VirtualDevice.ConnectInfo() + ) cdrom_spec.device.connectable.allowGuestControl = True - cdrom_spec.device.connectable.startConnected = (cdrom_type != "none") + cdrom_spec.device.connectable.startConnected = cdrom_type != "none" if cdrom_type in ["none", "client"]: - cdrom_spec.device.backing = vim.vm.device.VirtualCdrom.RemotePassthroughBackingInfo() + cdrom_spec.device.backing = ( + vim.vm.device.VirtualCdrom.RemotePassthroughBackingInfo() + ) elif cdrom_type == "iso": - cdrom_spec.device.backing = vim.vm.device.VirtualCdrom.IsoBackingInfo(fileName=iso_path) + cdrom_spec.device.backing = vim.vm.device.VirtualCdrom.IsoBackingInfo( + fileName=iso_path + ) return cdrom_spec @staticmethod def is_equal_cdrom(vm_obj, cdrom_device, cdrom_type, iso_path): if cdrom_type == "none": - return (isinstance(cdrom_device.backing, vim.vm.device.VirtualCdrom.RemotePassthroughBackingInfo) and - cdrom_device.connectable.allowGuestControl and - not cdrom_device.connectable.startConnected and - (vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOn or not cdrom_device.connectable.connected)) + return ( + isinstance( + cdrom_device.backing, + vim.vm.device.VirtualCdrom.RemotePassthroughBackingInfo, + ) + and cdrom_device.connectable.allowGuestControl + and not cdrom_device.connectable.startConnected + and ( + vm_obj.runtime.powerState + != vim.VirtualMachinePowerState.poweredOn + or not cdrom_device.connectable.connected + ) + ) elif cdrom_type == "client": - return (isinstance(cdrom_device.backing, vim.vm.device.VirtualCdrom.RemotePassthroughBackingInfo) and - cdrom_device.connectable.allowGuestControl and - cdrom_device.connectable.startConnected and - (vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOn or cdrom_device.connectable.connected)) + return ( + isinstance( + cdrom_device.backing, + vim.vm.device.VirtualCdrom.RemotePassthroughBackingInfo, + ) + and cdrom_device.connectable.allowGuestControl + and cdrom_device.connectable.startConnected + and ( + vm_obj.runtime.powerState + != vim.VirtualMachinePowerState.poweredOn + or cdrom_device.connectable.connected + ) + ) elif cdrom_type == "iso": - return (isinstance(cdrom_device.backing, vim.vm.device.VirtualCdrom.IsoBackingInfo) and - cdrom_device.backing.fileName == iso_path and - cdrom_device.connectable.allowGuestControl and - cdrom_device.connectable.startConnected and - (vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOn or cdrom_device.connectable.connected)) + return ( + isinstance( + cdrom_device.backing, + vim.vm.device.VirtualCdrom.IsoBackingInfo, + ) + and cdrom_device.backing.fileName == iso_path + and cdrom_device.connectable.allowGuestControl + and cdrom_device.connectable.startConnected + and ( + vm_obj.runtime.powerState + != vim.VirtualMachinePowerState.poweredOn + or cdrom_device.connectable.connected + ) + ) @staticmethod def update_cdrom_config(vm_obj, cdrom_spec, cdrom_device, iso_path=None): # Updating an existing CD-ROM if cdrom_spec["type"] in ["client", "none"]: - cdrom_device.backing = vim.vm.device.VirtualCdrom.RemotePassthroughBackingInfo() + cdrom_device.backing = ( + vim.vm.device.VirtualCdrom.RemotePassthroughBackingInfo() + ) elif cdrom_spec["type"] == "iso" and iso_path is not None: - cdrom_device.backing = vim.vm.device.VirtualCdrom.IsoBackingInfo(fileName=iso_path) + cdrom_device.backing = vim.vm.device.VirtualCdrom.IsoBackingInfo( + fileName=iso_path + ) cdrom_device.connectable = vim.vm.device.VirtualDevice.ConnectInfo() cdrom_device.connectable.allowGuestControl = True - cdrom_device.connectable.startConnected = (cdrom_spec["type"] != "none") - if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn: - cdrom_device.connectable.connected = (cdrom_spec["type"] != "none") + cdrom_device.connectable.startConnected = cdrom_spec["type"] != "none" + if ( + vm_obj + and vm_obj.runtime.powerState + == vim.VirtualMachinePowerState.poweredOn + ): + cdrom_device.connectable.connected = cdrom_spec["type"] != "none" def remove_cdrom(self, cdrom_device): cdrom_spec = vim.vm.device.VirtualDeviceSpec() @@ -742,7 +802,9 @@ def create_scsi_disk(self, scsi_ctl, disk_index=None): diskspec = vim.vm.device.VirtualDeviceSpec() diskspec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add diskspec.device = vim.vm.device.VirtualDisk() - diskspec.device.backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo() + diskspec.device.backing = ( + vim.vm.device.VirtualDisk.FlatVer2BackingInfo() + ) diskspec.device.controllerKey = scsi_ctl.device.key if self.next_disk_unit_number == 7: @@ -766,35 +828,44 @@ def create_scsi_disk(self, scsi_ctl, disk_index=None): return diskspec def get_device(self, device_type, name): - nic_dict = dict(pcnet32=vim.vm.device.VirtualPCNet32(), - vmxnet2=vim.vm.device.VirtualVmxnet2(), - vmxnet3=vim.vm.device.VirtualVmxnet3(), - e1000=vim.vm.device.VirtualE1000(), - e1000e=vim.vm.device.VirtualE1000e(), - sriov=vim.vm.device.VirtualSriovEthernetCard(), - ) + nic_dict = dict( + pcnet32=vim.vm.device.VirtualPCNet32(), + vmxnet2=vim.vm.device.VirtualVmxnet2(), + vmxnet3=vim.vm.device.VirtualVmxnet3(), + e1000=vim.vm.device.VirtualE1000(), + e1000e=vim.vm.device.VirtualE1000e(), + sriov=vim.vm.device.VirtualSriovEthernetCard(), + ) if device_type in nic_dict: return nic_dict[device_type] else: - self.module.fail_json(msg='Invalid device_type "%s"' - ' for network "%s"' % (device_type, name)) + self.module.fail_json( + msg='Invalid device_type "%s"' + ' for network "%s"' % (device_type, name) + ) def create_nic(self, device_type, device_label, device_infos): nic = vim.vm.device.VirtualDeviceSpec() - nic.device = self.get_device(device_type, device_infos['name']) - nic.device.wakeOnLanEnabled = bool(device_infos.get('wake_on_lan', True)) + nic.device = self.get_device(device_type, device_infos["name"]) + nic.device.wakeOnLanEnabled = bool( + device_infos.get("wake_on_lan", True) + ) nic.device.deviceInfo = vim.Description() nic.device.deviceInfo.label = device_label - nic.device.deviceInfo.summary = device_infos['name'] + nic.device.deviceInfo.summary = device_infos["name"] nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo() - nic.device.connectable.startConnected = bool(device_infos.get('start_connected', True)) - nic.device.connectable.allowGuestControl = bool(device_infos.get('allow_guest_control', True)) + nic.device.connectable.startConnected = bool( + device_infos.get("start_connected", True) + ) + nic.device.connectable.allowGuestControl = bool( + device_infos.get("allow_guest_control", True) + ) nic.device.connectable.connected = True - if 'mac' in device_infos and is_mac(device_infos['mac']): - nic.device.addressType = 'manual' - nic.device.macAddress = device_infos['mac'] + if "mac" in device_infos and is_mac(device_infos["mac"]): + nic.device.addressType = "manual" + nic.device.macAddress = device_infos["mac"] else: - nic.device.addressType = 'generated' + nic.device.addressType = "generated" return nic @@ -811,8 +882,9 @@ def integer_value(self, input_value, name): elif isinstance(input_value, str) and input_value.isdigit(): return int(input_value) else: - self.module.fail_json(msg='"%s" attribute should be an' - ' integer value.' % name) + self.module.fail_json( + msg='"%s" attribute should be an' " integer value." % name + ) class PyVmomiCache(object): @@ -830,9 +902,13 @@ def find_obj(self, content, types, name, confine_to_datacenter=True): """ Wrapper around find_obj to set datacenter context """ result = find_obj(content, types, name) if result and confine_to_datacenter: - if to_text(self.get_parent_datacenter(result).name) != to_text(self.dc_name): + if to_text(self.get_parent_datacenter(result).name) != to_text( + self.dc_name + ): result = None - objects = self.get_all_objs(content, types, confine_to_datacenter=True) + objects = self.get_all_objs( + content, types, confine_to_datacenter=True + ) for obj in objects: if name is None or to_text(obj.name) == to_text(name): return obj @@ -842,7 +918,7 @@ def get_all_objs(self, content, types, confine_to_datacenter=True): """ Wrapper around get_all_objs to set datacenter context """ objects = get_all_objs(content, types) if confine_to_datacenter: - if hasattr(objects, 'items'): + if hasattr(objects, "items"): # resource pools come back as a dictionary # make a copy for k, v in tuple(objects.items()): @@ -851,7 +927,11 @@ def get_all_objs(self, content, types, confine_to_datacenter=True): del objects[k] else: # everything else should be a list - objects = [x for x in objects if self.get_parent_datacenter(x).name == self.dc_name] + objects = [ + x + for x in objects + if self.get_parent_datacenter(x).name == self.dc_name + ] return objects @@ -859,19 +939,25 @@ def get_network(self, network): network = quote_obj_name(network) if network not in self.networks: - self.networks[network] = self.find_obj(self.content, [vim.Network], network) + self.networks[network] = self.find_obj( + self.content, [vim.Network], network + ) return self.networks[network] def get_cluster(self, cluster): if cluster not in self.clusters: - self.clusters[cluster] = self.find_obj(self.content, [vim.ClusterComputeResource], cluster) + self.clusters[cluster] = self.find_obj( + self.content, [vim.ClusterComputeResource], cluster + ) return self.clusters[cluster] def get_esx_host(self, host): if host not in self.esx_hosts: - self.esx_hosts[host] = self.find_obj(self.content, [vim.HostSystem], host) + self.esx_hosts[host] = self.find_obj( + self.content, [vim.HostSystem], host + ) return self.esx_hosts[host] @@ -883,7 +969,7 @@ def get_parent_datacenter(self, obj): return self.parent_datacenters[obj] datacenter = None while True: - if not hasattr(obj, 'parent'): + if not hasattr(obj, "parent"): break obj = obj.parent if isinstance(obj, vim.Datacenter): @@ -899,50 +985,72 @@ def __init__(self, module): self.device_helper = PyVmomiDeviceHelper(self.module) self.configspec = None self.relospec = None - self.change_detected = False # a change was detected and needs to be applied through reconfiguration - self.change_applied = False # a change was applied meaning at least one task succeeded + self.change_detected = ( + False + ) # a change was detected and needs to be applied through reconfiguration + self.change_applied = ( + False + ) # a change was applied meaning at least one task succeeded self.customspec = None - self.cache = PyVmomiCache(self.content, dc_name=self.params['datacenter']) + self.cache = PyVmomiCache( + self.content, dc_name=self.params["datacenter"] + ) def gather_facts(self, vm): return gather_vm_facts(self.content, vm) def remove_vm(self, vm, delete_from_inventory=False): # https://www.vmware.com/support/developer/converter-sdk/conv60_apireference/vim.ManagedEntity.html#destroy - if vm.summary.runtime.powerState.lower() == 'poweredon': - self.module.fail_json(msg="Virtual machine %s found in 'powered on' state, " - "please use 'force' parameter to remove or poweroff VM " - "and try removing VM again." % vm.name) + if vm.summary.runtime.powerState.lower() == "poweredon": + self.module.fail_json( + msg="Virtual machine %s found in 'powered on' state, " + "please use 'force' parameter to remove or poweroff VM " + "and try removing VM again." % vm.name + ) # Delete VM from Inventory if delete_from_inventory: try: vm.UnregisterVM() - except (vim.fault.TaskInProgress, - vmodl.RuntimeFault) as e: - return {'changed': self.change_applied, 'failed': True, 'msg': e.msg, 'op': 'UnregisterVM'} + except (vim.fault.TaskInProgress, vmodl.RuntimeFault) as e: + return { + "changed": self.change_applied, + "failed": True, + "msg": e.msg, + "op": "UnregisterVM", + } self.change_applied = True - return {'changed': self.change_applied, 'failed': False} + return {"changed": self.change_applied, "failed": False} # Delete VM from Disk task = vm.Destroy() self.wait_for_task(task) - if task.info.state == 'error': - return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'destroy'} + if task.info.state == "error": + return { + "changed": self.change_applied, + "failed": True, + "msg": task.info.error.msg, + "op": "destroy", + } else: - return {'changed': self.change_applied, 'failed': False} + return {"changed": self.change_applied, "failed": False} def configure_guestid(self, vm_obj, vm_creation=False): # guest_id is not required when using templates - if self.params['template']: + if self.params["template"]: return # guest_id is only mandatory on VM creation - if vm_creation and self.params['guest_id'] is None: - self.module.fail_json(msg="guest_id attribute is mandatory for VM creation") - - if self.params['guest_id'] and \ - (vm_obj is None or self.params['guest_id'].lower() != vm_obj.summary.config.guestId.lower()): + if vm_creation and self.params["guest_id"] is None: + self.module.fail_json( + msg="guest_id attribute is mandatory for VM creation" + ) + + if self.params["guest_id"] and ( + vm_obj is None + or self.params["guest_id"].lower() + != vm_obj.summary.config.guestId.lower() + ): self.change_detected = True - self.configspec.guestId = self.params['guest_id'] + self.configspec.guestId = self.params["guest_id"] def configure_resource_alloc_info(self, vm_obj): """ @@ -954,50 +1062,81 @@ def configure_resource_alloc_info(self, vm_obj): memory_allocation = vim.ResourceAllocationInfo() cpu_allocation = vim.ResourceAllocationInfo() - if 'hardware' in self.params: - if 'mem_limit' in self.params['hardware']: + if "hardware" in self.params: + if "mem_limit" in self.params["hardware"]: mem_limit = None try: - mem_limit = int(self.params['hardware'].get('mem_limit')) + mem_limit = int(self.params["hardware"].get("mem_limit")) except ValueError: - self.module.fail_json(msg="hardware.mem_limit attribute should be an integer value.") + self.module.fail_json( + msg="hardware.mem_limit attribute should be an integer value." + ) memory_allocation.limit = mem_limit - if vm_obj is None or memory_allocation.limit != vm_obj.config.memoryAllocation.limit: + if ( + vm_obj is None + or memory_allocation.limit + != vm_obj.config.memoryAllocation.limit + ): rai_change_detected = True - if 'mem_reservation' in self.params['hardware'] or 'memory_reservation' in self.params['hardware']: - mem_reservation = self.params['hardware'].get('mem_reservation') + if ( + "mem_reservation" in self.params["hardware"] + or "memory_reservation" in self.params["hardware"] + ): + mem_reservation = self.params["hardware"].get( + "mem_reservation" + ) if mem_reservation is None: - mem_reservation = self.params['hardware'].get('memory_reservation') + mem_reservation = self.params["hardware"].get( + "memory_reservation" + ) try: mem_reservation = int(mem_reservation) except ValueError: - self.module.fail_json(msg="hardware.mem_reservation or hardware.memory_reservation should be an integer value.") + self.module.fail_json( + msg="hardware.mem_reservation or hardware.memory_reservation should be an integer value." + ) memory_allocation.reservation = mem_reservation - if vm_obj is None or \ - memory_allocation.reservation != vm_obj.config.memoryAllocation.reservation: + if ( + vm_obj is None + or memory_allocation.reservation + != vm_obj.config.memoryAllocation.reservation + ): rai_change_detected = True - if 'cpu_limit' in self.params['hardware']: + if "cpu_limit" in self.params["hardware"]: cpu_limit = None try: - cpu_limit = int(self.params['hardware'].get('cpu_limit')) + cpu_limit = int(self.params["hardware"].get("cpu_limit")) except ValueError: - self.module.fail_json(msg="hardware.cpu_limit attribute should be an integer value.") + self.module.fail_json( + msg="hardware.cpu_limit attribute should be an integer value." + ) cpu_allocation.limit = cpu_limit - if vm_obj is None or cpu_allocation.limit != vm_obj.config.cpuAllocation.limit: + if ( + vm_obj is None + or cpu_allocation.limit + != vm_obj.config.cpuAllocation.limit + ): rai_change_detected = True - if 'cpu_reservation' in self.params['hardware']: + if "cpu_reservation" in self.params["hardware"]: cpu_reservation = None try: - cpu_reservation = int(self.params['hardware'].get('cpu_reservation')) + cpu_reservation = int( + self.params["hardware"].get("cpu_reservation") + ) except ValueError: - self.module.fail_json(msg="hardware.cpu_reservation should be an integer value.") + self.module.fail_json( + msg="hardware.cpu_reservation should be an integer value." + ) cpu_allocation.reservation = cpu_reservation - if vm_obj is None or \ - cpu_allocation.reservation != vm_obj.config.cpuAllocation.reservation: + if ( + vm_obj is None + or cpu_allocation.reservation + != vm_obj.config.cpuAllocation.reservation + ): rai_change_detected = True if rai_change_detected: @@ -1007,163 +1146,312 @@ def configure_resource_alloc_info(self, vm_obj): def configure_cpu_and_memory(self, vm_obj, vm_creation=False): # set cpu/memory/etc - if 'hardware' in self.params: - if 'num_cpus' in self.params['hardware']: + if "hardware" in self.params: + if "num_cpus" in self.params["hardware"]: try: - num_cpus = int(self.params['hardware']['num_cpus']) + num_cpus = int(self.params["hardware"]["num_cpus"]) except ValueError: - self.module.fail_json(msg="hardware.num_cpus attribute should be an integer value.") + self.module.fail_json( + msg="hardware.num_cpus attribute should be an integer value." + ) # check VM power state and cpu hot-add/hot-remove state before re-config VM - if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn: - if not vm_obj.config.cpuHotRemoveEnabled and num_cpus < vm_obj.config.hardware.numCPU: - self.module.fail_json(msg="Configured cpu number is less than the cpu number of the VM, " - "cpuHotRemove is not enabled") - if not vm_obj.config.cpuHotAddEnabled and num_cpus > vm_obj.config.hardware.numCPU: - self.module.fail_json(msg="Configured cpu number is more than the cpu number of the VM, " - "cpuHotAdd is not enabled") - - if 'num_cpu_cores_per_socket' in self.params['hardware']: + if ( + vm_obj + and vm_obj.runtime.powerState + == vim.VirtualMachinePowerState.poweredOn + ): + if ( + not vm_obj.config.cpuHotRemoveEnabled + and num_cpus < vm_obj.config.hardware.numCPU + ): + self.module.fail_json( + msg="Configured cpu number is less than the cpu number of the VM, " + "cpuHotRemove is not enabled" + ) + if ( + not vm_obj.config.cpuHotAddEnabled + and num_cpus > vm_obj.config.hardware.numCPU + ): + self.module.fail_json( + msg="Configured cpu number is more than the cpu number of the VM, " + "cpuHotAdd is not enabled" + ) + + if "num_cpu_cores_per_socket" in self.params["hardware"]: try: - num_cpu_cores_per_socket = int(self.params['hardware']['num_cpu_cores_per_socket']) + num_cpu_cores_per_socket = int( + self.params["hardware"]["num_cpu_cores_per_socket"] + ) except ValueError: - self.module.fail_json(msg="hardware.num_cpu_cores_per_socket attribute " - "should be an integer value.") + self.module.fail_json( + msg="hardware.num_cpu_cores_per_socket attribute " + "should be an integer value." + ) if num_cpus % num_cpu_cores_per_socket != 0: - self.module.fail_json(msg="hardware.num_cpus attribute should be a multiple " - "of hardware.num_cpu_cores_per_socket") - self.configspec.numCoresPerSocket = num_cpu_cores_per_socket - if vm_obj is None or self.configspec.numCoresPerSocket != vm_obj.config.hardware.numCoresPerSocket: + self.module.fail_json( + msg="hardware.num_cpus attribute should be a multiple " + "of hardware.num_cpu_cores_per_socket" + ) + self.configspec.numCoresPerSocket = ( + num_cpu_cores_per_socket + ) + if ( + vm_obj is None + or self.configspec.numCoresPerSocket + != vm_obj.config.hardware.numCoresPerSocket + ): self.change_detected = True self.configspec.numCPUs = num_cpus - if vm_obj is None or self.configspec.numCPUs != vm_obj.config.hardware.numCPU: + if ( + vm_obj is None + or self.configspec.numCPUs != vm_obj.config.hardware.numCPU + ): self.change_detected = True # num_cpu is mandatory for VM creation - elif vm_creation and not self.params['template']: - self.module.fail_json(msg="hardware.num_cpus attribute is mandatory for VM creation") + elif vm_creation and not self.params["template"]: + self.module.fail_json( + msg="hardware.num_cpus attribute is mandatory for VM creation" + ) - if 'memory_mb' in self.params['hardware']: + if "memory_mb" in self.params["hardware"]: try: - memory_mb = int(self.params['hardware']['memory_mb']) + memory_mb = int(self.params["hardware"]["memory_mb"]) except ValueError: - self.module.fail_json(msg="Failed to parse hardware.memory_mb value." - " Please refer the documentation and provide" - " correct value.") + self.module.fail_json( + msg="Failed to parse hardware.memory_mb value." + " Please refer the documentation and provide" + " correct value." + ) # check VM power state and memory hotadd state before re-config VM - if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn: - if vm_obj.config.memoryHotAddEnabled and memory_mb < vm_obj.config.hardware.memoryMB: - self.module.fail_json(msg="Configured memory is less than memory size of the VM, " - "operation is not supported") - elif not vm_obj.config.memoryHotAddEnabled and memory_mb != vm_obj.config.hardware.memoryMB: - self.module.fail_json(msg="memoryHotAdd is not enabled") + if ( + vm_obj + and vm_obj.runtime.powerState + == vim.VirtualMachinePowerState.poweredOn + ): + if ( + vm_obj.config.memoryHotAddEnabled + and memory_mb < vm_obj.config.hardware.memoryMB + ): + self.module.fail_json( + msg="Configured memory is less than memory size of the VM, " + "operation is not supported" + ) + elif ( + not vm_obj.config.memoryHotAddEnabled + and memory_mb != vm_obj.config.hardware.memoryMB + ): + self.module.fail_json( + msg="memoryHotAdd is not enabled" + ) self.configspec.memoryMB = memory_mb - if vm_obj is None or self.configspec.memoryMB != vm_obj.config.hardware.memoryMB: + if ( + vm_obj is None + or self.configspec.memoryMB + != vm_obj.config.hardware.memoryMB + ): self.change_detected = True # memory_mb is mandatory for VM creation - elif vm_creation and not self.params['template']: - self.module.fail_json(msg="hardware.memory_mb attribute is mandatory for VM creation") - - if 'hotadd_memory' in self.params['hardware']: - if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn and \ - vm_obj.config.memoryHotAddEnabled != bool(self.params['hardware']['hotadd_memory']): - self.module.fail_json(msg="Configure hotadd memory operation is not supported when VM is power on") - self.configspec.memoryHotAddEnabled = bool(self.params['hardware']['hotadd_memory']) - if vm_obj is None or self.configspec.memoryHotAddEnabled != vm_obj.config.memoryHotAddEnabled: + elif vm_creation and not self.params["template"]: + self.module.fail_json( + msg="hardware.memory_mb attribute is mandatory for VM creation" + ) + + if "hotadd_memory" in self.params["hardware"]: + if ( + vm_obj + and vm_obj.runtime.powerState + == vim.VirtualMachinePowerState.poweredOn + and vm_obj.config.memoryHotAddEnabled + != bool(self.params["hardware"]["hotadd_memory"]) + ): + self.module.fail_json( + msg="Configure hotadd memory operation is not supported when VM is power on" + ) + self.configspec.memoryHotAddEnabled = bool( + self.params["hardware"]["hotadd_memory"] + ) + if ( + vm_obj is None + or self.configspec.memoryHotAddEnabled + != vm_obj.config.memoryHotAddEnabled + ): self.change_detected = True - if 'hotadd_cpu' in self.params['hardware']: - if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn and \ - vm_obj.config.cpuHotAddEnabled != bool(self.params['hardware']['hotadd_cpu']): - self.module.fail_json(msg="Configure hotadd cpu operation is not supported when VM is power on") - self.configspec.cpuHotAddEnabled = bool(self.params['hardware']['hotadd_cpu']) - if vm_obj is None or self.configspec.cpuHotAddEnabled != vm_obj.config.cpuHotAddEnabled: + if "hotadd_cpu" in self.params["hardware"]: + if ( + vm_obj + and vm_obj.runtime.powerState + == vim.VirtualMachinePowerState.poweredOn + and vm_obj.config.cpuHotAddEnabled + != bool(self.params["hardware"]["hotadd_cpu"]) + ): + self.module.fail_json( + msg="Configure hotadd cpu operation is not supported when VM is power on" + ) + self.configspec.cpuHotAddEnabled = bool( + self.params["hardware"]["hotadd_cpu"] + ) + if ( + vm_obj is None + or self.configspec.cpuHotAddEnabled + != vm_obj.config.cpuHotAddEnabled + ): self.change_detected = True - if 'hotremove_cpu' in self.params['hardware']: - if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn and \ - vm_obj.config.cpuHotRemoveEnabled != bool(self.params['hardware']['hotremove_cpu']): - self.module.fail_json(msg="Configure hotremove cpu operation is not supported when VM is power on") - self.configspec.cpuHotRemoveEnabled = bool(self.params['hardware']['hotremove_cpu']) - if vm_obj is None or self.configspec.cpuHotRemoveEnabled != vm_obj.config.cpuHotRemoveEnabled: + if "hotremove_cpu" in self.params["hardware"]: + if ( + vm_obj + and vm_obj.runtime.powerState + == vim.VirtualMachinePowerState.poweredOn + and vm_obj.config.cpuHotRemoveEnabled + != bool(self.params["hardware"]["hotremove_cpu"]) + ): + self.module.fail_json( + msg="Configure hotremove cpu operation is not supported when VM is power on" + ) + self.configspec.cpuHotRemoveEnabled = bool( + self.params["hardware"]["hotremove_cpu"] + ) + if ( + vm_obj is None + or self.configspec.cpuHotRemoveEnabled + != vm_obj.config.cpuHotRemoveEnabled + ): self.change_detected = True - if 'memory_reservation_lock' in self.params['hardware']: - self.configspec.memoryReservationLockedToMax = bool(self.params['hardware']['memory_reservation_lock']) - if vm_obj is None or self.configspec.memoryReservationLockedToMax != vm_obj.config.memoryReservationLockedToMax: + if "memory_reservation_lock" in self.params["hardware"]: + self.configspec.memoryReservationLockedToMax = bool( + self.params["hardware"]["memory_reservation_lock"] + ) + if ( + vm_obj is None + or self.configspec.memoryReservationLockedToMax + != vm_obj.config.memoryReservationLockedToMax + ): self.change_detected = True - if 'boot_firmware' in self.params['hardware']: + if "boot_firmware" in self.params["hardware"]: # boot firmware re-config can cause boot issue if vm_obj is not None: return - boot_firmware = self.params['hardware']['boot_firmware'].lower() - if boot_firmware not in ('bios', 'efi'): - self.module.fail_json(msg="hardware.boot_firmware value is invalid [%s]." - " Need one of ['bios', 'efi']." % boot_firmware) + boot_firmware = self.params["hardware"][ + "boot_firmware" + ].lower() + if boot_firmware not in ("bios", "efi"): + self.module.fail_json( + msg="hardware.boot_firmware value is invalid [%s]." + " Need one of ['bios', 'efi']." % boot_firmware + ) self.configspec.firmware = boot_firmware self.change_detected = True def sanitize_cdrom_params(self): # cdroms {'ide': [{num: 0, cdrom: []}, {}], 'sata': [{num: 0, cdrom: []}, {}, ...]} - cdroms = {'ide': [], 'sata': []} - expected_cdrom_spec = self.params.get('cdrom') + cdroms = {"ide": [], "sata": []} + expected_cdrom_spec = self.params.get("cdrom") if expected_cdrom_spec: for cdrom_spec in expected_cdrom_spec: - cdrom_spec['controller_type'] = cdrom_spec.get('controller_type', 'ide').lower() - if cdrom_spec['controller_type'] not in ['ide', 'sata']: - self.module.fail_json(msg="Invalid cdrom.controller_type: %s, valid value is 'ide' or 'sata'." - % cdrom_spec['controller_type']) - - cdrom_spec['state'] = cdrom_spec.get('state', 'present').lower() - if cdrom_spec['state'] not in ['present', 'absent']: - self.module.fail_json(msg="Invalid cdrom.state: %s, valid value is 'present', 'absent'." - % cdrom_spec['state']) - - if cdrom_spec['state'] == 'present': - if 'type' in cdrom_spec and cdrom_spec.get('type') not in ['none', 'client', 'iso']: - self.module.fail_json(msg="Invalid cdrom.type: %s, valid value is 'none', 'client' or 'iso'." - % cdrom_spec.get('type')) - if cdrom_spec.get('type') == 'iso' and not cdrom_spec.get('iso_path'): - self.module.fail_json(msg="cdrom.iso_path is mandatory when cdrom.type is set to iso.") - - if cdrom_spec['controller_type'] == 'ide' and \ - (cdrom_spec.get('controller_number') not in [0, 1] or cdrom_spec.get('unit_number') not in [0, 1]): - self.module.fail_json(msg="Invalid cdrom.controller_number: %s or cdrom.unit_number: %s, valid" - " values are 0 or 1 for IDE controller." % (cdrom_spec.get('controller_number'), cdrom_spec.get('unit_number'))) - - if cdrom_spec['controller_type'] == 'sata' and \ - (cdrom_spec.get('controller_number') not in range(0, 4) or cdrom_spec.get('unit_number') not in range(0, 30)): - self.module.fail_json(msg="Invalid cdrom.controller_number: %s or cdrom.unit_number: %s," - " valid controller_number value is 0-3, valid unit_number is 0-29" - " for SATA controller." % (cdrom_spec.get('controller_number'), cdrom_spec.get('unit_number'))) + cdrom_spec["controller_type"] = cdrom_spec.get( + "controller_type", "ide" + ).lower() + if cdrom_spec["controller_type"] not in ["ide", "sata"]: + self.module.fail_json( + msg="Invalid cdrom.controller_type: %s, valid value is 'ide' or 'sata'." + % cdrom_spec["controller_type"] + ) + + cdrom_spec["state"] = cdrom_spec.get( + "state", "present" + ).lower() + if cdrom_spec["state"] not in ["present", "absent"]: + self.module.fail_json( + msg="Invalid cdrom.state: %s, valid value is 'present', 'absent'." + % cdrom_spec["state"] + ) + + if cdrom_spec["state"] == "present": + if "type" in cdrom_spec and cdrom_spec.get("type") not in [ + "none", + "client", + "iso", + ]: + self.module.fail_json( + msg="Invalid cdrom.type: %s, valid value is 'none', 'client' or 'iso'." + % cdrom_spec.get("type") + ) + if cdrom_spec.get("type") == "iso" and not cdrom_spec.get( + "iso_path" + ): + self.module.fail_json( + msg="cdrom.iso_path is mandatory when cdrom.type is set to iso." + ) + + if cdrom_spec["controller_type"] == "ide" and ( + cdrom_spec.get("controller_number") not in [0, 1] + or cdrom_spec.get("unit_number") not in [0, 1] + ): + self.module.fail_json( + msg="Invalid cdrom.controller_number: %s or cdrom.unit_number: %s, valid" + " values are 0 or 1 for IDE controller." + % ( + cdrom_spec.get("controller_number"), + cdrom_spec.get("unit_number"), + ) + ) + + if cdrom_spec["controller_type"] == "sata" and ( + cdrom_spec.get("controller_number") not in range(0, 4) + or cdrom_spec.get("unit_number") not in range(0, 30) + ): + self.module.fail_json( + msg="Invalid cdrom.controller_number: %s or cdrom.unit_number: %s," + " valid controller_number value is 0-3, valid unit_number is 0-29" + " for SATA controller." + % ( + cdrom_spec.get("controller_number"), + cdrom_spec.get("unit_number"), + ) + ) ctl_exist = False - for exist_spec in cdroms.get(cdrom_spec['controller_type']): - if exist_spec['num'] == cdrom_spec['controller_number']: + for exist_spec in cdroms.get(cdrom_spec["controller_type"]): + if exist_spec["num"] == cdrom_spec["controller_number"]: ctl_exist = True - exist_spec['cdrom'].append(cdrom_spec) + exist_spec["cdrom"].append(cdrom_spec) break if not ctl_exist: - cdroms.get(cdrom_spec['controller_type']).append({'num': cdrom_spec['controller_number'], 'cdrom': [cdrom_spec]}) + cdroms.get(cdrom_spec["controller_type"]).append( + { + "num": cdrom_spec["controller_number"], + "cdrom": [cdrom_spec], + } + ) return cdroms def configure_cdrom(self, vm_obj): # Configure the VM CD-ROM - if self.params.get('cdrom'): + if self.params.get("cdrom"): if vm_obj and vm_obj.config.template: # Changing CD-ROM settings on a template is not supported return - if isinstance(self.params.get('cdrom'), dict): + if isinstance(self.params.get("cdrom"), dict): self.configure_cdrom_dict(vm_obj) - elif isinstance(self.params.get('cdrom'), list): + elif isinstance(self.params.get("cdrom"), list): self.configure_cdrom_list(vm_obj) def configure_cdrom_dict(self, vm_obj): - if self.params["cdrom"].get('type') not in ['none', 'client', 'iso']: - self.module.fail_json(msg="cdrom.type is mandatory. Options are 'none', 'client', and 'iso'.") - if self.params["cdrom"]['type'] == 'iso' and not self.params["cdrom"].get('iso_path'): - self.module.fail_json(msg="cdrom.iso_path is mandatory when cdrom.type is set to iso.") + if self.params["cdrom"].get("type") not in ["none", "client", "iso"]: + self.module.fail_json( + msg="cdrom.type is mandatory. Options are 'none', 'client', and 'iso'." + ) + if self.params["cdrom"]["type"] == "iso" and not self.params[ + "cdrom" + ].get("iso_path"): + self.module.fail_json( + msg="cdrom.iso_path is mandatory when cdrom.type is set to iso." + ) cdrom_spec = None cdrom_devices = self.get_vm_cdrom_devices(vm=vm_obj) @@ -1180,19 +1468,41 @@ def configure_cdrom_dict(self, vm_obj): else: ide_device = ide_devices[0] if len(ide_device.device) > 3: - self.module.fail_json(msg="hardware.cdrom specified for a VM or template which already has 4" - " IDE devices of which none are a cdrom") - - cdrom_spec = self.device_helper.create_cdrom(ide_device=ide_device, cdrom_type=self.params["cdrom"]["type"], - iso_path=iso_path) - if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn: - cdrom_spec.device.connectable.connected = (self.params["cdrom"]["type"] != "none") + self.module.fail_json( + msg="hardware.cdrom specified for a VM or template which already has 4" + " IDE devices of which none are a cdrom" + ) + + cdrom_spec = self.device_helper.create_cdrom( + ide_device=ide_device, + cdrom_type=self.params["cdrom"]["type"], + iso_path=iso_path, + ) + if ( + vm_obj + and vm_obj.runtime.powerState + == vim.VirtualMachinePowerState.poweredOn + ): + cdrom_spec.device.connectable.connected = ( + self.params["cdrom"]["type"] != "none" + ) - elif not self.device_helper.is_equal_cdrom(vm_obj=vm_obj, cdrom_device=cdrom_devices[0], - cdrom_type=self.params["cdrom"]["type"], iso_path=iso_path): - self.device_helper.update_cdrom_config(vm_obj, self.params["cdrom"], cdrom_devices[0], iso_path=iso_path) + elif not self.device_helper.is_equal_cdrom( + vm_obj=vm_obj, + cdrom_device=cdrom_devices[0], + cdrom_type=self.params["cdrom"]["type"], + iso_path=iso_path, + ): + self.device_helper.update_cdrom_config( + vm_obj, + self.params["cdrom"], + cdrom_devices[0], + iso_path=iso_path, + ) cdrom_spec = vim.vm.device.VirtualDeviceSpec() - cdrom_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit + cdrom_spec.operation = ( + vim.vm.device.VirtualDeviceSpec.Operation.edit + ) cdrom_spec.device = cdrom_devices[0] if cdrom_spec: @@ -1203,58 +1513,94 @@ def configure_cdrom_list(self, vm_obj): configured_cdroms = self.sanitize_cdrom_params() cdrom_devices = self.get_vm_cdrom_devices(vm=vm_obj) # configure IDE CD-ROMs - if configured_cdroms['ide']: + if configured_cdroms["ide"]: ide_devices = self.get_vm_ide_devices(vm=vm_obj) - for expected_cdrom_spec in configured_cdroms['ide']: + for expected_cdrom_spec in configured_cdroms["ide"]: ide_device = None for device in ide_devices: - if device.busNumber == expected_cdrom_spec['num']: + if device.busNumber == expected_cdrom_spec["num"]: ide_device = device break # if not find the matched ide controller or no existing ide controller if not ide_device: - ide_ctl = self.device_helper.create_ide_controller(bus_number=expected_cdrom_spec['num']) + ide_ctl = self.device_helper.create_ide_controller( + bus_number=expected_cdrom_spec["num"] + ) ide_device = ide_ctl.device self.change_detected = True self.configspec.deviceChange.append(ide_ctl) - for cdrom in expected_cdrom_spec['cdrom']: + for cdrom in expected_cdrom_spec["cdrom"]: cdrom_device = None - iso_path = cdrom.get('iso_path') - unit_number = cdrom.get('unit_number') + iso_path = cdrom.get("iso_path") + unit_number = cdrom.get("unit_number") for target_cdrom in cdrom_devices: - if target_cdrom.controllerKey == ide_device.key and target_cdrom.unitNumber == unit_number: + if ( + target_cdrom.controllerKey == ide_device.key + and target_cdrom.unitNumber == unit_number + ): cdrom_device = target_cdrom break # create new CD-ROM - if not cdrom_device and cdrom.get('state') != 'absent': - if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn: - self.module.fail_json(msg='CD-ROM attach to IDE controller not support hot-add.') + if not cdrom_device and cdrom.get("state") != "absent": + if ( + vm_obj + and vm_obj.runtime.powerState + == vim.VirtualMachinePowerState.poweredOn + ): + self.module.fail_json( + msg="CD-ROM attach to IDE controller not support hot-add." + ) if len(ide_device.device) == 2: - self.module.fail_json(msg='Maximum number of CD-ROMs attached to IDE controller is 2.') - cdrom_spec = self.device_helper.create_cdrom(ide_device=ide_device, cdrom_type=cdrom['type'], - iso_path=iso_path, unit_number=unit_number) + self.module.fail_json( + msg="Maximum number of CD-ROMs attached to IDE controller is 2." + ) + cdrom_spec = self.device_helper.create_cdrom( + ide_device=ide_device, + cdrom_type=cdrom["type"], + iso_path=iso_path, + unit_number=unit_number, + ) self.change_detected = True self.configspec.deviceChange.append(cdrom_spec) # re-configure CD-ROM - elif cdrom_device and cdrom.get('state') != 'absent' and \ - not self.device_helper.is_equal_cdrom(vm_obj=vm_obj, cdrom_device=cdrom_device, - cdrom_type=cdrom['type'], iso_path=iso_path): - self.device_helper.update_cdrom_config(vm_obj, cdrom, cdrom_device, iso_path=iso_path) + elif ( + cdrom_device + and cdrom.get("state") != "absent" + and not self.device_helper.is_equal_cdrom( + vm_obj=vm_obj, + cdrom_device=cdrom_device, + cdrom_type=cdrom["type"], + iso_path=iso_path, + ) + ): + self.device_helper.update_cdrom_config( + vm_obj, cdrom, cdrom_device, iso_path=iso_path + ) cdrom_spec = vim.vm.device.VirtualDeviceSpec() - cdrom_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit + cdrom_spec.operation = ( + vim.vm.device.VirtualDeviceSpec.Operation.edit + ) cdrom_spec.device = cdrom_device self.change_detected = True self.configspec.deviceChange.append(cdrom_spec) # delete CD-ROM - elif cdrom_device and cdrom.get('state') == 'absent': - if vm_obj and vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOff: - self.module.fail_json(msg='CD-ROM attach to IDE controller not support hot-remove.') - cdrom_spec = self.device_helper.remove_cdrom(cdrom_device) + elif cdrom_device and cdrom.get("state") == "absent": + if ( + vm_obj + and vm_obj.runtime.powerState + != vim.VirtualMachinePowerState.poweredOff + ): + self.module.fail_json( + msg="CD-ROM attach to IDE controller not support hot-remove." + ) + cdrom_spec = self.device_helper.remove_cdrom( + cdrom_device + ) self.change_detected = True self.configspec.deviceChange.append(cdrom_spec) # configure SATA CD-ROMs is not supported yet - if configured_cdroms['sata']: + if configured_cdroms["sata"]: pass def configure_hardware_params(self, vm_obj): @@ -1263,29 +1609,47 @@ def configure_hardware_params(self, vm_obj): Args: vm_obj: virtual machine object """ - if 'hardware' in self.params: - if 'max_connections' in self.params['hardware']: + if "hardware" in self.params: + if "max_connections" in self.params["hardware"]: # maxMksConnections == max_connections - self.configspec.maxMksConnections = int(self.params['hardware']['max_connections']) - if vm_obj is None or self.configspec.maxMksConnections != vm_obj.config.maxMksConnections: + self.configspec.maxMksConnections = int( + self.params["hardware"]["max_connections"] + ) + if ( + vm_obj is None + or self.configspec.maxMksConnections + != vm_obj.config.maxMksConnections + ): self.change_detected = True - if 'nested_virt' in self.params['hardware']: - self.configspec.nestedHVEnabled = bool(self.params['hardware']['nested_virt']) - if vm_obj is None or self.configspec.nestedHVEnabled != bool(vm_obj.config.nestedHVEnabled): + if "nested_virt" in self.params["hardware"]: + self.configspec.nestedHVEnabled = bool( + self.params["hardware"]["nested_virt"] + ) + if vm_obj is None or self.configspec.nestedHVEnabled != bool( + vm_obj.config.nestedHVEnabled + ): self.change_detected = True - if 'version' in self.params['hardware']: + if "version" in self.params["hardware"]: hw_version_check_failed = False - temp_version = self.params['hardware'].get('version', 10) - if isinstance(temp_version, str) and temp_version.lower() == 'latest': + temp_version = self.params["hardware"].get("version", 10) + if ( + isinstance(temp_version, str) + and temp_version.lower() == "latest" + ): # Check is to make sure vm_obj is not of type template if vm_obj and not vm_obj.config.template: try: task = vm_obj.UpgradeVM_Task() self.wait_for_task(task) - if task.info.state == 'error': - return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'upgrade'} + if task.info.state == "error": + return { + "changed": self.change_applied, + "failed": True, + "msg": task.info.error.msg, + "op": "upgrade", + } except vim.fault.AlreadyUpgraded: # Don't fail if VM is already upgraded. pass @@ -1299,12 +1663,18 @@ def configure_hardware_params(self, vm_obj): hw_version_check_failed = True if hw_version_check_failed: - self.module.fail_json(msg="Failed to set hardware.version '%s' value as valid" - " values range from 3 (ESX 2.x) to 14 (ESXi 6.5 and greater)." % temp_version) + self.module.fail_json( + msg="Failed to set hardware.version '%s' value as valid" + " values range from 3 (ESX 2.x) to 14 (ESXi 6.5 and greater)." + % temp_version + ) # Hardware version is denoted as "vmx-10" version = "vmx-%02d" % temp_version self.configspec.version = version - if vm_obj is None or self.configspec.version != vm_obj.config.version: + if ( + vm_obj is None + or self.configspec.version != vm_obj.config.version + ): self.change_detected = True # Check is to make sure vm_obj is not of type template if vm_obj and not vm_obj.config.template: @@ -1313,46 +1683,83 @@ def configure_hardware_params(self, vm_obj): # current_version = "vmx-10" version_digit = int(current_version.split("-", 1)[-1]) if temp_version < version_digit: - self.module.fail_json(msg="Current hardware version '%d' which is greater than the specified" - " version '%d'. Downgrading hardware version is" - " not supported. Please specify version greater" - " than the current version." % (version_digit, - temp_version)) + self.module.fail_json( + msg="Current hardware version '%d' which is greater than the specified" + " version '%d'. Downgrading hardware version is" + " not supported. Please specify version greater" + " than the current version." + % (version_digit, temp_version) + ) new_version = "vmx-%02d" % temp_version try: task = vm_obj.UpgradeVM_Task(new_version) self.wait_for_task(task) - if task.info.state == 'error': - return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'upgrade'} + if task.info.state == "error": + return { + "changed": self.change_applied, + "failed": True, + "msg": task.info.error.msg, + "op": "upgrade", + } except vim.fault.AlreadyUpgraded: # Don't fail if VM is already upgraded. pass - if 'virt_based_security' in self.params['hardware']: - host_version = self.select_host().summary.config.product.version - if int(host_version.split('.')[0]) < 6 or (int(host_version.split('.')[0]) == 6 and int(host_version.split('.')[1]) < 7): - self.module.fail_json(msg="ESXi version %s not support VBS." % host_version) - guest_ids = ['windows9_64Guest', 'windows9Server64Guest'] + if "virt_based_security" in self.params["hardware"]: + host_version = ( + self.select_host().summary.config.product.version + ) + if int(host_version.split(".")[0]) < 6 or ( + int(host_version.split(".")[0]) == 6 + and int(host_version.split(".")[1]) < 7 + ): + self.module.fail_json( + msg="ESXi version %s not support VBS." % host_version + ) + guest_ids = ["windows9_64Guest", "windows9Server64Guest"] if vm_obj is None: guestid = self.configspec.guestId else: guestid = vm_obj.summary.config.guestId if guestid not in guest_ids: - self.module.fail_json(msg="Guest '%s' not support VBS." % guestid) - if (vm_obj is None and int(self.configspec.version.split('-')[1]) >= 14) or \ - (vm_obj and int(vm_obj.config.version.split('-')[1]) >= 14 and (vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOff)): + self.module.fail_json( + msg="Guest '%s' not support VBS." % guestid + ) + if ( + vm_obj is None + and int(self.configspec.version.split("-")[1]) >= 14 + ) or ( + vm_obj + and int(vm_obj.config.version.split("-")[1]) >= 14 + and ( + vm_obj.runtime.powerState + == vim.VirtualMachinePowerState.poweredOff + ) + ): self.configspec.flags = vim.vm.FlagInfo() - self.configspec.flags.vbsEnabled = bool(self.params['hardware']['virt_based_security']) - if bool(self.params['hardware']['virt_based_security']): + self.configspec.flags.vbsEnabled = bool( + self.params["hardware"]["virt_based_security"] + ) + if bool(self.params["hardware"]["virt_based_security"]): self.configspec.flags.vvtdEnabled = True self.configspec.nestedHVEnabled = True - if (vm_obj is None and self.configspec.firmware == 'efi') or \ - (vm_obj and vm_obj.config.firmware == 'efi'): + if ( + vm_obj is None + and self.configspec.firmware == "efi" + ) or (vm_obj and vm_obj.config.firmware == "efi"): self.configspec.bootOptions = vim.vm.BootOptions() - self.configspec.bootOptions.efiSecureBootEnabled = True + self.configspec.bootOptions.efiSecureBootEnabled = ( + True + ) else: - self.module.fail_json(msg="Not support VBS when firmware is BIOS.") - if vm_obj is None or self.configspec.flags.vbsEnabled != vm_obj.config.flags.vbsEnabled: + self.module.fail_json( + msg="Not support VBS when firmware is BIOS." + ) + if ( + vm_obj is None + or self.configspec.flags.vbsEnabled + != vm_obj.config.flags.vbsEnabled + ): self.change_detected = True def get_device_by_type(self, vm=None, type=None): @@ -1369,16 +1776,23 @@ def get_vm_cdrom_devices(self, vm=None): return self.get_device_by_type(vm=vm, type=vim.vm.device.VirtualCdrom) def get_vm_ide_devices(self, vm=None): - return self.get_device_by_type(vm=vm, type=vim.vm.device.VirtualIDEController) + return self.get_device_by_type( + vm=vm, type=vim.vm.device.VirtualIDEController + ) def get_vm_network_interfaces(self, vm=None): device_list = [] if vm is None: return device_list - nw_device_types = (vim.vm.device.VirtualPCNet32, vim.vm.device.VirtualVmxnet2, - vim.vm.device.VirtualVmxnet3, vim.vm.device.VirtualE1000, - vim.vm.device.VirtualE1000e, vim.vm.device.VirtualSriovEthernetCard) + nw_device_types = ( + vim.vm.device.VirtualPCNet32, + vim.vm.device.VirtualVmxnet2, + vim.vm.device.VirtualVmxnet3, + vim.vm.device.VirtualE1000, + vim.vm.device.VirtualE1000e, + vim.vm.device.VirtualSriovEthernetCard, + ) for device in vm.config.hardware.device: if isinstance(device, nw_device_types): device_list.append(device) @@ -1394,67 +1808,113 @@ def sanitize_network_params(self): """ network_devices = list() # Clean up user data here - for network in self.params['networks']: - if 'name' not in network and 'vlan' not in network: - self.module.fail_json(msg="Please specify at least a network name or" - " a VLAN name under VM network list.") - - if 'name' in network and self.cache.get_network(network['name']) is None: - self.module.fail_json(msg="Network '%(name)s' does not exist." % network) - elif 'vlan' in network: - dvps = self.cache.get_all_objs(self.content, [vim.dvs.DistributedVirtualPortgroup]) + for network in self.params["networks"]: + if "name" not in network and "vlan" not in network: + self.module.fail_json( + msg="Please specify at least a network name or" + " a VLAN name under VM network list." + ) + + if ( + "name" in network + and self.cache.get_network(network["name"]) is None + ): + self.module.fail_json( + msg="Network '%(name)s' does not exist." % network + ) + elif "vlan" in network: + dvps = self.cache.get_all_objs( + self.content, [vim.dvs.DistributedVirtualPortgroup] + ) for dvp in dvps: - if hasattr(dvp.config.defaultPortConfig, 'vlan') and \ - isinstance(dvp.config.defaultPortConfig.vlan.vlanId, int) and \ - str(dvp.config.defaultPortConfig.vlan.vlanId) == str(network['vlan']): - network['name'] = dvp.config.name + if ( + hasattr(dvp.config.defaultPortConfig, "vlan") + and isinstance( + dvp.config.defaultPortConfig.vlan.vlanId, int + ) + and str(dvp.config.defaultPortConfig.vlan.vlanId) + == str(network["vlan"]) + ): + network["name"] = dvp.config.name break - if 'dvswitch_name' in network and \ - dvp.config.distributedVirtualSwitch.name == network['dvswitch_name'] and \ - dvp.config.name == network['vlan']: - network['name'] = dvp.config.name + if ( + "dvswitch_name" in network + and dvp.config.distributedVirtualSwitch.name + == network["dvswitch_name"] + and dvp.config.name == network["vlan"] + ): + network["name"] = dvp.config.name break - if dvp.config.name == network['vlan']: - network['name'] = dvp.config.name + if dvp.config.name == network["vlan"]: + network["name"] = dvp.config.name break else: - self.module.fail_json(msg="VLAN '%(vlan)s' does not exist." % network) - - if 'type' in network: - if network['type'] not in ['dhcp', 'static']: - self.module.fail_json(msg="Network type '%(type)s' is not a valid parameter." - " Valid parameters are ['dhcp', 'static']." % network) - if network['type'] != 'static' and ('ip' in network or 'netmask' in network): - self.module.fail_json(msg='Static IP information provided for network "%(name)s",' - ' but "type" is set to "%(type)s".' % network) + self.module.fail_json( + msg="VLAN '%(vlan)s' does not exist." % network + ) + + if "type" in network: + if network["type"] not in ["dhcp", "static"]: + self.module.fail_json( + msg="Network type '%(type)s' is not a valid parameter." + " Valid parameters are ['dhcp', 'static']." % network + ) + if network["type"] != "static" and ( + "ip" in network or "netmask" in network + ): + self.module.fail_json( + msg='Static IP information provided for network "%(name)s",' + ' but "type" is set to "%(type)s".' % network + ) else: # Type is optional parameter, if user provided IP or Subnet assume # network type as 'static' - if 'ip' in network or 'netmask' in network: - network['type'] = 'static' + if "ip" in network or "netmask" in network: + network["type"] = "static" else: # User wants network type as 'dhcp' - network['type'] = 'dhcp' - - if network.get('type') == 'static': - if 'ip' in network and 'netmask' not in network: - self.module.fail_json(msg="'netmask' is required if 'ip' is" - " specified under VM network list.") - if 'ip' not in network and 'netmask' in network: - self.module.fail_json(msg="'ip' is required if 'netmask' is" - " specified under VM network list.") - - validate_device_types = ['pcnet32', 'vmxnet2', 'vmxnet3', 'e1000', 'e1000e', 'sriov'] - if 'device_type' in network and network['device_type'] not in validate_device_types: - self.module.fail_json(msg="Device type specified '%s' is not valid." - " Please specify correct device" - " type from ['%s']." % (network['device_type'], - "', '".join(validate_device_types))) - - if 'mac' in network and not is_mac(network['mac']): - self.module.fail_json(msg="Device MAC address '%s' is invalid." - " Please provide correct MAC address." % network['mac']) + network["type"] = "dhcp" + + if network.get("type") == "static": + if "ip" in network and "netmask" not in network: + self.module.fail_json( + msg="'netmask' is required if 'ip' is" + " specified under VM network list." + ) + if "ip" not in network and "netmask" in network: + self.module.fail_json( + msg="'ip' is required if 'netmask' is" + " specified under VM network list." + ) + + validate_device_types = [ + "pcnet32", + "vmxnet2", + "vmxnet3", + "e1000", + "e1000e", + "sriov", + ] + if ( + "device_type" in network + and network["device_type"] not in validate_device_types + ): + self.module.fail_json( + msg="Device type specified '%s' is not valid." + " Please specify correct device" + " type from ['%s']." + % ( + network["device_type"], + "', '".join(validate_device_types), + ) + ) + + if "mac" in network and not is_mac(network["mac"]): + self.module.fail_json( + msg="Device MAC address '%s' is invalid." + " Please provide correct MAC address." % network["mac"] + ) network_devices.append(network) @@ -1462,7 +1922,7 @@ def sanitize_network_params(self): def configure_network(self, vm_obj): # Ignore empty networks, this permits to keep networks when deploying a template/cloning a VM - if len(self.params['networks']) == 0: + if len(self.params["networks"]) == 0: return network_devices = self.sanitize_network_params() @@ -1470,82 +1930,138 @@ def configure_network(self, vm_obj): # List current device for Clone or Idempotency current_net_devices = self.get_vm_network_interfaces(vm=vm_obj) if len(network_devices) < len(current_net_devices): - self.module.fail_json(msg="Given network device list is lesser than current VM device list (%d < %d). " - "Removing interfaces is not allowed" - % (len(network_devices), len(current_net_devices))) + self.module.fail_json( + msg="Given network device list is lesser than current VM device list (%d < %d). " + "Removing interfaces is not allowed" + % (len(network_devices), len(current_net_devices)) + ) for key in range(0, len(network_devices)): nic_change_detected = False - network_name = network_devices[key]['name'] - if key < len(current_net_devices) and (vm_obj or self.params['template']): + network_name = network_devices[key]["name"] + if key < len(current_net_devices) and ( + vm_obj or self.params["template"] + ): # We are editing existing network devices, this is either when # are cloning from VM or Template nic = vim.vm.device.VirtualDeviceSpec() nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit nic.device = current_net_devices[key] - if ('wake_on_lan' in network_devices[key] and - nic.device.wakeOnLanEnabled != network_devices[key].get('wake_on_lan')): - nic.device.wakeOnLanEnabled = network_devices[key].get('wake_on_lan') + if "wake_on_lan" in network_devices[ + key + ] and nic.device.wakeOnLanEnabled != network_devices[key].get( + "wake_on_lan" + ): + nic.device.wakeOnLanEnabled = network_devices[key].get( + "wake_on_lan" + ) nic_change_detected = True - if ('start_connected' in network_devices[key] and - nic.device.connectable.startConnected != network_devices[key].get('start_connected')): - nic.device.connectable.startConnected = network_devices[key].get('start_connected') + if "start_connected" in network_devices[ + key + ] and nic.device.connectable.startConnected != network_devices[ + key + ].get( + "start_connected" + ): + nic.device.connectable.startConnected = network_devices[ + key + ].get("start_connected") nic_change_detected = True - if ('allow_guest_control' in network_devices[key] and - nic.device.connectable.allowGuestControl != network_devices[key].get('allow_guest_control')): - nic.device.connectable.allowGuestControl = network_devices[key].get('allow_guest_control') + if "allow_guest_control" in network_devices[ + key + ] and nic.device.connectable.allowGuestControl != network_devices[ + key + ].get( + "allow_guest_control" + ): + nic.device.connectable.allowGuestControl = network_devices[ + key + ].get("allow_guest_control") nic_change_detected = True if nic.device.deviceInfo.summary != network_name: nic.device.deviceInfo.summary = network_name nic_change_detected = True - if 'device_type' in network_devices[key]: - device = self.device_helper.get_device(network_devices[key]['device_type'], network_name) + if "device_type" in network_devices[key]: + device = self.device_helper.get_device( + network_devices[key]["device_type"], network_name + ) device_class = type(device) if not isinstance(nic.device, device_class): - self.module.fail_json(msg="Changing the device type is not possible when interface is already present. " - "The failing device type is %s" % network_devices[key]['device_type']) + self.module.fail_json( + msg="Changing the device type is not possible when interface is already present. " + "The failing device type is %s" + % network_devices[key]["device_type"] + ) # Changing mac address has no effect when editing interface - if 'mac' in network_devices[key] and nic.device.macAddress != current_net_devices[key].macAddress: - self.module.fail_json(msg="Changing MAC address has not effect when interface is already present. " - "The failing new MAC address is %s" % nic.device.macAddress) + if ( + "mac" in network_devices[key] + and nic.device.macAddress + != current_net_devices[key].macAddress + ): + self.module.fail_json( + msg="Changing MAC address has not effect when interface is already present. " + "The failing new MAC address is %s" + % nic.device.macAddress + ) else: # Default device type is vmxnet3, VMware best practice - device_type = network_devices[key].get('device_type', 'vmxnet3') - nic = self.device_helper.create_nic(device_type, - 'Network Adapter %s' % (key + 1), - network_devices[key]) + device_type = network_devices[key].get( + "device_type", "vmxnet3" + ) + nic = self.device_helper.create_nic( + device_type, + "Network Adapter %s" % (key + 1), + network_devices[key], + ) nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.add nic_change_detected = True - if hasattr(self.cache.get_network(network_name), 'portKeys'): + if hasattr(self.cache.get_network(network_name), "portKeys"): # VDS switch pg_obj = None - if 'dvswitch_name' in network_devices[key]: - dvs_name = network_devices[key]['dvswitch_name'] + if "dvswitch_name" in network_devices[key]: + dvs_name = network_devices[key]["dvswitch_name"] dvs_obj = find_dvs_by_name(self.content, dvs_name) if dvs_obj is None: - self.module.fail_json(msg="Unable to find distributed virtual switch %s" % dvs_name) + self.module.fail_json( + msg="Unable to find distributed virtual switch %s" + % dvs_name + ) pg_obj = find_dvspg_by_name(dvs_obj, network_name) if pg_obj is None: - self.module.fail_json(msg="Unable to find distributed port group %s" % network_name) + self.module.fail_json( + msg="Unable to find distributed port group %s" + % network_name + ) else: - pg_obj = self.cache.find_obj(self.content, [vim.dvs.DistributedVirtualPortgroup], network_name) + pg_obj = self.cache.find_obj( + self.content, + [vim.dvs.DistributedVirtualPortgroup], + network_name, + ) # TODO: (akasurde) There is no way to find association between resource pool and distributed virtual portgroup # For now, check if we are able to find distributed virtual switch if not pg_obj.config.distributedVirtualSwitch: - self.module.fail_json(msg="Failed to find distributed virtual switch which is associated with" - " distributed virtual portgroup '%s'. Make sure hostsystem is associated with" - " the given distributed virtual portgroup. Also, check if user has correct" - " permission to access distributed virtual switch in the given portgroup." % pg_obj.name) - if (nic.device.backing and - (not hasattr(nic.device.backing, 'port') or - (nic.device.backing.port.portgroupKey != pg_obj.key or - nic.device.backing.port.switchUuid != pg_obj.config.distributedVirtualSwitch.uuid))): + self.module.fail_json( + msg="Failed to find distributed virtual switch which is associated with" + " distributed virtual portgroup '%s'. Make sure hostsystem is associated with" + " the given distributed virtual portgroup. Also, check if user has correct" + " permission to access distributed virtual switch in the given portgroup." + % pg_obj.name + ) + if nic.device.backing and ( + not hasattr(nic.device.backing, "port") + or ( + nic.device.backing.port.portgroupKey != pg_obj.key + or nic.device.backing.port.switchUuid + != pg_obj.config.distributedVirtualSwitch.uuid + ) + ): nic_change_detected = True dvs_port_connection = vim.dvs.PortConnection() @@ -1553,27 +2069,50 @@ def configure_network(self, vm_obj): # If user specifies distributed port group without associating to the hostsystem on which # virtual machine is going to be deployed then we get error. We can infer that there is no # association between given distributed port group and host system. - host_system = self.params.get('esxi_hostname') - if host_system and host_system not in [host.config.host.name for host in pg_obj.config.distributedVirtualSwitch.config.host]: - self.module.fail_json(msg="It seems that host system '%s' is not associated with distributed" - " virtual portgroup '%s'. Please make sure host system is associated" - " with given distributed virtual portgroup" % (host_system, pg_obj.name)) - dvs_port_connection.switchUuid = pg_obj.config.distributedVirtualSwitch.uuid - nic.device.backing = vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo() + host_system = self.params.get("esxi_hostname") + if host_system and host_system not in [ + host.config.host.name + for host in pg_obj.config.distributedVirtualSwitch.config.host + ]: + self.module.fail_json( + msg="It seems that host system '%s' is not associated with distributed" + " virtual portgroup '%s'. Please make sure host system is associated" + " with given distributed virtual portgroup" + % (host_system, pg_obj.name) + ) + dvs_port_connection.switchUuid = ( + pg_obj.config.distributedVirtualSwitch.uuid + ) + nic.device.backing = ( + vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo() + ) nic.device.backing.port = dvs_port_connection - elif isinstance(self.cache.get_network(network_name), vim.OpaqueNetwork): + elif isinstance( + self.cache.get_network(network_name), vim.OpaqueNetwork + ): # NSX-T Logical Switch - nic.device.backing = vim.vm.device.VirtualEthernetCard.OpaqueNetworkBackingInfo() - network_id = self.cache.get_network(network_name).summary.opaqueNetworkId - nic.device.backing.opaqueNetworkType = 'nsx.LogicalSwitch' + nic.device.backing = ( + vim.vm.device.VirtualEthernetCard.OpaqueNetworkBackingInfo() + ) + network_id = self.cache.get_network( + network_name + ).summary.opaqueNetworkId + nic.device.backing.opaqueNetworkType = "nsx.LogicalSwitch" nic.device.backing.opaqueNetworkId = network_id - nic.device.deviceInfo.summary = 'nsx.LogicalSwitch: %s' % network_id + nic.device.deviceInfo.summary = ( + "nsx.LogicalSwitch: %s" % network_id + ) nic_change_detected = True else: # vSwitch - if not isinstance(nic.device.backing, vim.vm.device.VirtualEthernetCard.NetworkBackingInfo): - nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo() + if not isinstance( + nic.device.backing, + vim.vm.device.VirtualEthernetCard.NetworkBackingInfo, + ): + nic.device.backing = ( + vim.vm.device.VirtualEthernetCard.NetworkBackingInfo() + ) nic_change_detected = True net_obj = self.cache.get_network(network_name) @@ -1589,83 +2128,124 @@ def configure_network(self, vm_obj): # Change to fix the issue found while configuring opaque network # VMs cloned from a template with opaque network will get disconnected # Replacing deprecated config parameter with relocation Spec - if isinstance(self.cache.get_network(network_name), vim.OpaqueNetwork): + if isinstance( + self.cache.get_network(network_name), vim.OpaqueNetwork + ): self.relospec.deviceChange.append(nic) else: self.configspec.deviceChange.append(nic) self.change_detected = True def configure_vapp_properties(self, vm_obj): - if len(self.params['vapp_properties']) == 0: + if len(self.params["vapp_properties"]) == 0: return - for x in self.params['vapp_properties']: - if not x.get('id'): - self.module.fail_json(msg="id is required to set vApp property") + for x in self.params["vapp_properties"]: + if not x.get("id"): + self.module.fail_json( + msg="id is required to set vApp property" + ) new_vmconfig_spec = vim.vApp.VmConfigSpec() if vm_obj: # VM exists # This is primarily for vcsim/integration tests, unset vAppConfig was not seen on my deployments - orig_spec = vm_obj.config.vAppConfig if vm_obj.config.vAppConfig else new_vmconfig_spec - - vapp_properties_current = dict((x.id, x) for x in orig_spec.property) - vapp_properties_to_change = dict((x['id'], x) for x in self.params['vapp_properties']) + orig_spec = ( + vm_obj.config.vAppConfig + if vm_obj.config.vAppConfig + else new_vmconfig_spec + ) + + vapp_properties_current = dict( + (x.id, x) for x in orig_spec.property + ) + vapp_properties_to_change = dict( + (x["id"], x) for x in self.params["vapp_properties"] + ) # each property must have a unique key # init key counter with max value + 1 all_keys = [x.key for x in orig_spec.property] new_property_index = max(all_keys) + 1 if all_keys else 0 - for property_id, property_spec in vapp_properties_to_change.items(): + for ( + property_id, + property_spec, + ) in vapp_properties_to_change.items(): is_property_changed = False new_vapp_property_spec = vim.vApp.PropertySpec() if property_id in vapp_properties_current: - if property_spec.get('operation') == 'remove': - new_vapp_property_spec.operation = 'remove' - new_vapp_property_spec.removeKey = vapp_properties_current[property_id].key + if property_spec.get("operation") == "remove": + new_vapp_property_spec.operation = "remove" + new_vapp_property_spec.removeKey = vapp_properties_current[ + property_id + ].key is_property_changed = True else: # this is 'edit' branch - new_vapp_property_spec.operation = 'edit' - new_vapp_property_spec.info = vapp_properties_current[property_id] + new_vapp_property_spec.operation = "edit" + new_vapp_property_spec.info = vapp_properties_current[ + property_id + ] try: - for property_name, property_value in property_spec.items(): + for ( + property_name, + property_value, + ) in property_spec.items(): - if property_name == 'operation': + if property_name == "operation": # operation is not an info object property # if set to anything other than 'remove' we don't fail continue # Updating attributes only if needed - if getattr(new_vapp_property_spec.info, property_name) != property_value: - setattr(new_vapp_property_spec.info, property_name, property_value) + if ( + getattr( + new_vapp_property_spec.info, + property_name, + ) + != property_value + ): + setattr( + new_vapp_property_spec.info, + property_name, + property_value, + ) is_property_changed = True except Exception as e: - msg = "Failed to set vApp property field='%s' and value='%s'. Error: %s" % (property_name, property_value, to_text(e)) + msg = ( + "Failed to set vApp property field='%s' and value='%s'. Error: %s" + % (property_name, property_value, to_text(e)) + ) self.module.fail_json(msg=msg) else: - if property_spec.get('operation') == 'remove': + if property_spec.get("operation") == "remove": # attempt to delete non-existent property continue # this is add new property branch - new_vapp_property_spec.operation = 'add' + new_vapp_property_spec.operation = "add" property_info = vim.vApp.PropertyInfo() - property_info.classId = property_spec.get('classId') - property_info.instanceId = property_spec.get('instanceId') - property_info.id = property_spec.get('id') - property_info.category = property_spec.get('category') - property_info.label = property_spec.get('label') - property_info.type = property_spec.get('type', 'string') - property_info.userConfigurable = property_spec.get('userConfigurable', True) - property_info.defaultValue = property_spec.get('defaultValue') - property_info.value = property_spec.get('value', '') - property_info.description = property_spec.get('description') + property_info.classId = property_spec.get("classId") + property_info.instanceId = property_spec.get("instanceId") + property_info.id = property_spec.get("id") + property_info.category = property_spec.get("category") + property_info.label = property_spec.get("label") + property_info.type = property_spec.get("type", "string") + property_info.userConfigurable = property_spec.get( + "userConfigurable", True + ) + property_info.defaultValue = property_spec.get( + "defaultValue" + ) + property_info.value = property_spec.get("value", "") + property_info.description = property_spec.get( + "description" + ) new_vapp_property_spec.info = property_info new_vapp_property_spec.info.key = new_property_index @@ -1678,25 +2258,32 @@ def configure_vapp_properties(self, vm_obj): # New VM all_keys = [x.key for x in new_vmconfig_spec.property] new_property_index = max(all_keys) + 1 if all_keys else 0 - vapp_properties_to_change = dict((x['id'], x) for x in self.params['vapp_properties']) + vapp_properties_to_change = dict( + (x["id"], x) for x in self.params["vapp_properties"] + ) is_property_changed = False - for property_id, property_spec in vapp_properties_to_change.items(): + for ( + property_id, + property_spec, + ) in vapp_properties_to_change.items(): new_vapp_property_spec = vim.vApp.PropertySpec() # this is add new property branch - new_vapp_property_spec.operation = 'add' + new_vapp_property_spec.operation = "add" property_info = vim.vApp.PropertyInfo() - property_info.classId = property_spec.get('classId') - property_info.instanceId = property_spec.get('instanceId') - property_info.id = property_spec.get('id') - property_info.category = property_spec.get('category') - property_info.label = property_spec.get('label') - property_info.type = property_spec.get('type', 'string') - property_info.userConfigurable = property_spec.get('userConfigurable', True) - property_info.defaultValue = property_spec.get('defaultValue') - property_info.value = property_spec.get('value', '') - property_info.description = property_spec.get('description') + property_info.classId = property_spec.get("classId") + property_info.instanceId = property_spec.get("instanceId") + property_info.id = property_spec.get("id") + property_info.category = property_spec.get("category") + property_info.label = property_spec.get("label") + property_info.type = property_spec.get("type", "string") + property_info.userConfigurable = property_spec.get( + "userConfigurable", True + ) + property_info.defaultValue = property_spec.get("defaultValue") + property_info.value = property_spec.get("value", "") + property_info.description = property_spec.get("description") new_vapp_property_spec.info = property_info new_vapp_property_spec.info.key = new_property_index @@ -1711,7 +2298,7 @@ def configure_vapp_properties(self, vm_obj): self.change_detected = True def customize_customvalues(self, vm_obj, config_spec): - if len(self.params['customvalues']) == 0: + if len(self.params["customvalues"]) == 0: return vm_custom_spec = config_spec @@ -1719,15 +2306,20 @@ def customize_customvalues(self, vm_obj, config_spec): changed = False facts = self.gather_facts(vm_obj) - for kv in self.params['customvalues']: - if 'key' not in kv or 'value' not in kv: - self.module.exit_json(msg="customvalues items required both 'key' and 'value' fields.") + for kv in self.params["customvalues"]: + if "key" not in kv or "value" not in kv: + self.module.exit_json( + msg="customvalues items required both 'key' and 'value' fields." + ) # If kv is not kv fetched from facts, change it - if kv['key'] not in facts['customvalues'] or facts['customvalues'][kv['key']] != kv['value']: + if ( + kv["key"] not in facts["customvalues"] + or facts["customvalues"][kv["key"]] != kv["value"] + ): option = vim.option.OptionValue() - option.key = kv['key'] - option.value = kv['value'] + option.key = kv["key"] + option.value = kv["value"] vm_custom_spec.extraConfig.append(option) changed = True @@ -1738,7 +2330,7 @@ def customize_customvalues(self, vm_obj, config_spec): def customize_vm(self, vm_obj): # User specified customization specification - custom_spec_name = self.params.get('customization_spec') + custom_spec_name = self.params.get("customization_spec") if custom_spec_name: cc_mgr = self.content.customizationSpecManager if cc_mgr.DoesCustomizationSpecExist(name=custom_spec_name): @@ -1746,63 +2338,71 @@ def customize_vm(self, vm_obj): self.customspec = temp_spec.spec return else: - self.module.fail_json(msg="Unable to find customization specification" - " '%s' in given configuration." % custom_spec_name) + self.module.fail_json( + msg="Unable to find customization specification" + " '%s' in given configuration." % custom_spec_name + ) # Network settings adaptermaps = [] - for network in self.params['networks']: + for network in self.params["networks"]: guest_map = vim.vm.customization.AdapterMapping() guest_map.adapter = vim.vm.customization.IPSettings() - if 'ip' in network and 'netmask' in network: + if "ip" in network and "netmask" in network: guest_map.adapter.ip = vim.vm.customization.FixedIp() - guest_map.adapter.ip.ipAddress = str(network['ip']) - guest_map.adapter.subnetMask = str(network['netmask']) - elif 'type' in network and network['type'] == 'dhcp': + guest_map.adapter.ip.ipAddress = str(network["ip"]) + guest_map.adapter.subnetMask = str(network["netmask"]) + elif "type" in network and network["type"] == "dhcp": guest_map.adapter.ip = vim.vm.customization.DhcpIpGenerator() - if 'gateway' in network: - guest_map.adapter.gateway = network['gateway'] + if "gateway" in network: + guest_map.adapter.gateway = network["gateway"] # On Windows, DNS domain and DNS servers can be set by network interface # https://pubs.vmware.com/vi3/sdk/ReferenceGuide/vim.vm.customization.IPSettings.html - if 'domain' in network: - guest_map.adapter.dnsDomain = network['domain'] - elif 'domain' in self.params['customization']: - guest_map.adapter.dnsDomain = self.params['customization']['domain'] - - if 'dns_servers' in network: - guest_map.adapter.dnsServerList = network['dns_servers'] - elif 'dns_servers' in self.params['customization']: - guest_map.adapter.dnsServerList = self.params['customization']['dns_servers'] + if "domain" in network: + guest_map.adapter.dnsDomain = network["domain"] + elif "domain" in self.params["customization"]: + guest_map.adapter.dnsDomain = self.params["customization"][ + "domain" + ] + + if "dns_servers" in network: + guest_map.adapter.dnsServerList = network["dns_servers"] + elif "dns_servers" in self.params["customization"]: + guest_map.adapter.dnsServerList = self.params["customization"][ + "dns_servers" + ] adaptermaps.append(guest_map) # Global DNS settings globalip = vim.vm.customization.GlobalIPSettings() - if 'dns_servers' in self.params['customization']: - globalip.dnsServerList = self.params['customization']['dns_servers'] + if "dns_servers" in self.params["customization"]: + globalip.dnsServerList = self.params["customization"][ + "dns_servers" + ] # TODO: Maybe list the different domains from the interfaces here by default ? - if 'dns_suffix' in self.params['customization']: - dns_suffix = self.params['customization']['dns_suffix'] + if "dns_suffix" in self.params["customization"]: + dns_suffix = self.params["customization"]["dns_suffix"] if isinstance(dns_suffix, list): globalip.dnsSuffixList = " ".join(dns_suffix) else: globalip.dnsSuffixList = dns_suffix - elif 'domain' in self.params['customization']: - globalip.dnsSuffixList = self.params['customization']['domain'] + elif "domain" in self.params["customization"]: + globalip.dnsSuffixList = self.params["customization"]["domain"] - if self.params['guest_id']: - guest_id = self.params['guest_id'] + if self.params["guest_id"]: + guest_id = self.params["guest_id"] else: guest_id = vm_obj.summary.config.guestId # For windows guest OS, use SysPrep # https://pubs.vmware.com/vi3/sdk/ReferenceGuide/vim.vm.customization.Sysprep.html#field_detail - if 'win' in guest_id: + if "win" in guest_id: ident = vim.vm.customization.Sysprep() ident.userData = vim.vm.customization.UserData() @@ -1810,52 +2410,89 @@ def customize_vm(self, vm_obj): # Setting hostName, orgName and fullName is mandatory, so we set some default when missing ident.userData.computerName = vim.vm.customization.FixedName() # computer name will be truncated to 15 characters if using VM name - default_name = self.params['name'].replace(' ', '') - punctuation = string.punctuation.replace('-', '') - default_name = ''.join([c for c in default_name if c not in punctuation]) - ident.userData.computerName.name = str(self.params['customization'].get('hostname', default_name[0:15])) - ident.userData.fullName = str(self.params['customization'].get('fullname', 'Administrator')) - ident.userData.orgName = str(self.params['customization'].get('orgname', 'ACME')) - - if 'productid' in self.params['customization']: - ident.userData.productId = str(self.params['customization']['productid']) + default_name = self.params["name"].replace(" ", "") + punctuation = string.punctuation.replace("-", "") + default_name = "".join( + [c for c in default_name if c not in punctuation] + ) + ident.userData.computerName.name = str( + self.params["customization"].get( + "hostname", default_name[0:15] + ) + ) + ident.userData.fullName = str( + self.params["customization"].get("fullname", "Administrator") + ) + ident.userData.orgName = str( + self.params["customization"].get("orgname", "ACME") + ) + + if "productid" in self.params["customization"]: + ident.userData.productId = str( + self.params["customization"]["productid"] + ) ident.guiUnattended = vim.vm.customization.GuiUnattended() - if 'autologon' in self.params['customization']: - ident.guiUnattended.autoLogon = self.params['customization']['autologon'] - ident.guiUnattended.autoLogonCount = self.params['customization'].get('autologoncount', 1) + if "autologon" in self.params["customization"]: + ident.guiUnattended.autoLogon = self.params["customization"][ + "autologon" + ] + ident.guiUnattended.autoLogonCount = self.params[ + "customization" + ].get("autologoncount", 1) - if 'timezone' in self.params['customization']: + if "timezone" in self.params["customization"]: # Check if timezone value is a int before proceeding. ident.guiUnattended.timeZone = self.device_helper.integer_value( - self.params['customization']['timezone'], - 'customization.timezone') + self.params["customization"]["timezone"], + "customization.timezone", + ) ident.identification = vim.vm.customization.Identification() - if self.params['customization'].get('password', '') != '': + if self.params["customization"].get("password", "") != "": ident.guiUnattended.password = vim.vm.customization.Password() - ident.guiUnattended.password.value = str(self.params['customization']['password']) + ident.guiUnattended.password.value = str( + self.params["customization"]["password"] + ) ident.guiUnattended.password.plainText = True - if 'joindomain' in self.params['customization']: - if 'domainadmin' not in self.params['customization'] or 'domainadminpassword' not in self.params['customization']: - self.module.fail_json(msg="'domainadmin' and 'domainadminpassword' entries are mandatory in 'customization' section to use " - "joindomain feature") - - ident.identification.domainAdmin = str(self.params['customization']['domainadmin']) - ident.identification.joinDomain = str(self.params['customization']['joindomain']) - ident.identification.domainAdminPassword = vim.vm.customization.Password() - ident.identification.domainAdminPassword.value = str(self.params['customization']['domainadminpassword']) + if "joindomain" in self.params["customization"]: + if ( + "domainadmin" not in self.params["customization"] + or "domainadminpassword" + not in self.params["customization"] + ): + self.module.fail_json( + msg="'domainadmin' and 'domainadminpassword' entries are mandatory in 'customization' section to use " + "joindomain feature" + ) + + ident.identification.domainAdmin = str( + self.params["customization"]["domainadmin"] + ) + ident.identification.joinDomain = str( + self.params["customization"]["joindomain"] + ) + ident.identification.domainAdminPassword = ( + vim.vm.customization.Password() + ) + ident.identification.domainAdminPassword.value = str( + self.params["customization"]["domainadminpassword"] + ) ident.identification.domainAdminPassword.plainText = True - elif 'joinworkgroup' in self.params['customization']: - ident.identification.joinWorkgroup = str(self.params['customization']['joinworkgroup']) + elif "joinworkgroup" in self.params["customization"]: + ident.identification.joinWorkgroup = str( + self.params["customization"]["joinworkgroup"] + ) - if 'runonce' in self.params['customization']: + if "runonce" in self.params["customization"]: ident.guiRunOnce = vim.vm.customization.GuiRunOnce() - ident.guiRunOnce.commandList = self.params['customization']['runonce'] + ident.guiRunOnce.commandList = self.params["customization"][ + "runonce" + ] else: # FIXME: We have no clue whether this non-Windows OS is actually Linux, hence it might fail! @@ -1865,21 +2502,25 @@ def customize_vm(self, vm_obj): ident = vim.vm.customization.LinuxPrep() # TODO: Maybe add domain from interface if missing ? - if 'domain' in self.params['customization']: - ident.domain = str(self.params['customization']['domain']) + if "domain" in self.params["customization"]: + ident.domain = str(self.params["customization"]["domain"]) ident.hostName = vim.vm.customization.FixedName() - hostname = str(self.params['customization'].get('hostname', self.params['name'].split('.')[0])) + hostname = str( + self.params["customization"].get( + "hostname", self.params["name"].split(".")[0] + ) + ) # Remove all characters except alphanumeric and minus which is allowed by RFC 952 valid_hostname = re.sub(r"[^a-zA-Z0-9\-]", "", hostname) ident.hostName.name = valid_hostname # List of supported time zones for different vSphere versions in Linux/Unix systems # https://kb.vmware.com/s/article/2145518 - if 'timezone' in self.params['customization']: - ident.timeZone = str(self.params['customization']['timezone']) - if 'hwclockUTC' in self.params['customization']: - ident.hwClockUTC = self.params['customization']['hwclockUTC'] + if "timezone" in self.params["customization"]: + ident.timeZone = str(self.params["customization"]["timezone"]) + if "hwclockUTC" in self.params["customization"]: + ident.hwClockUTC = self.params["customization"]["hwclockUTC"] self.customspec = vim.vm.customization.Specification() self.customspec.nicSettingMap = adaptermaps @@ -1901,11 +2542,15 @@ def get_vm_scsi_controller(self, vm_obj): def get_configured_disk_size(self, expected_disk_spec): # what size is it? - if [x for x in expected_disk_spec.keys() if x.startswith('size_') or x == 'size']: + if [ + x + for x in expected_disk_spec.keys() + if x.startswith("size_") or x == "size" + ]: # size, size_tb, size_gb, size_mb, size_kb - if 'size' in expected_disk_spec: - size_regex = re.compile(r'(\d+(?:\.\d+)?)([tgmkTGMK][bB])') - disk_size_m = size_regex.match(expected_disk_spec['size']) + if "size" in expected_disk_spec: + size_regex = re.compile(r"(\d+(?:\.\d+)?)([tgmkTGMK][bB])") + disk_size_m = size_regex.match(expected_disk_spec["size"]) try: if disk_size_m: expected = disk_size_m.group(1) @@ -1913,7 +2558,7 @@ def get_configured_disk_size(self, expected_disk_spec): else: raise ValueError - if re.match(r'\d+\.\d+', expected): + if re.match(r"\d+\.\d+", expected): # We found float value in string, let's typecast it expected = float(expected) else: @@ -1925,12 +2570,22 @@ def get_configured_disk_size(self, expected_disk_spec): except (TypeError, ValueError, NameError): # Common failure - self.module.fail_json(msg="Failed to parse disk size please review value" - " provided using documentation.") + self.module.fail_json( + msg="Failed to parse disk size please review value" + " provided using documentation." + ) else: - param = [x for x in expected_disk_spec.keys() if x.startswith('size_')][0] - unit = param.split('_')[-1].lower() - expected = [x[1] for x in expected_disk_spec.items() if x[0].startswith('size_')][0] + param = [ + x + for x in expected_disk_spec.keys() + if x.startswith("size_") + ][0] + unit = param.split("_")[-1].lower() + expected = [ + x[1] + for x in expected_disk_spec.items() + if x[0].startswith("size_") + ][0] expected = int(expected) disk_units = dict(tb=3, gb=2, mb=1, kb=0) @@ -1938,22 +2593,29 @@ def get_configured_disk_size(self, expected_disk_spec): unit = unit.lower() return expected * (1024 ** disk_units[unit]) else: - self.module.fail_json(msg="%s is not a supported unit for disk size." - " Supported units are ['%s']." % (unit, - "', '".join(disk_units.keys()))) + self.module.fail_json( + msg="%s is not a supported unit for disk size." + " Supported units are ['%s']." + % (unit, "', '".join(disk_units.keys())) + ) # No size found but disk, fail self.module.fail_json( - msg="No size, size_kb, size_mb, size_gb or size_tb attribute found into disk configuration") + msg="No size, size_kb, size_mb, size_gb or size_tb attribute found into disk configuration" + ) - def add_existing_vmdk(self, vm_obj, expected_disk_spec, diskspec, scsi_ctl): + def add_existing_vmdk( + self, vm_obj, expected_disk_spec, diskspec, scsi_ctl + ): """ Adds vmdk file described by expected_disk_spec['filename'], retrieves the file information and adds the correct spec to self.configspec.deviceChange. """ - filename = expected_disk_spec['filename'] + filename = expected_disk_spec["filename"] # If this is a new disk, or the disk file names are different - if (vm_obj and diskspec.device.backing.fileName != filename) or vm_obj is None: + if ( + vm_obj and diskspec.device.backing.fileName != filename + ) or vm_obj is None: diskspec.device.backing.fileName = filename diskspec.device.key = -1 self.change_detected = True @@ -1961,35 +2623,59 @@ def add_existing_vmdk(self, vm_obj, expected_disk_spec, diskspec, scsi_ctl): def configure_disks(self, vm_obj): # Ignore empty disk list, this permits to keep disks when deploying a template/cloning a VM - if len(self.params['disk']) == 0: + if len(self.params["disk"]) == 0: return scsi_ctl = self.get_vm_scsi_controller(vm_obj) # Create scsi controller only if we are deploying a new VM, not a template or reconfiguring if vm_obj is None or scsi_ctl is None: - scsi_ctl = self.device_helper.create_scsi_controller(self.get_scsi_type()) + scsi_ctl = self.device_helper.create_scsi_controller( + self.get_scsi_type() + ) self.change_detected = True self.configspec.deviceChange.append(scsi_ctl) - disks = [x for x in vm_obj.config.hardware.device if isinstance(x, vim.vm.device.VirtualDisk)] \ - if vm_obj is not None else None - - if disks is not None and self.params.get('disk') and len(self.params.get('disk')) < len(disks): - self.module.fail_json(msg="Provided disks configuration has less disks than " - "the target object (%d vs %d)" % (len(self.params.get('disk')), len(disks))) + disks = ( + [ + x + for x in vm_obj.config.hardware.device + if isinstance(x, vim.vm.device.VirtualDisk) + ] + if vm_obj is not None + else None + ) + + if ( + disks is not None + and self.params.get("disk") + and len(self.params.get("disk")) < len(disks) + ): + self.module.fail_json( + msg="Provided disks configuration has less disks than " + "the target object (%d vs %d)" + % (len(self.params.get("disk")), len(disks)) + ) disk_index = 0 - for expected_disk_spec in self.params.get('disk'): + for expected_disk_spec in self.params.get("disk"): disk_modified = False # If we are manipulating and existing objects which has disks and disk_index is in disks - if vm_obj is not None and disks is not None and disk_index < len(disks): + if ( + vm_obj is not None + and disks is not None + and disk_index < len(disks) + ): diskspec = vim.vm.device.VirtualDeviceSpec() # set the operation to edit so that it knows to keep other settings - diskspec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit + diskspec.operation = ( + vim.vm.device.VirtualDeviceSpec.Operation.edit + ) diskspec.device = disks[disk_index] else: - diskspec = self.device_helper.create_scsi_disk(scsi_ctl, disk_index) + diskspec = self.device_helper.create_scsi_disk( + scsi_ctl, disk_index + ) disk_modified = True # increment index for next disk search @@ -1998,38 +2684,56 @@ def configure_disks(self, vm_obj): if disk_index == 7: disk_index += 1 - if 'disk_mode' in expected_disk_spec: - disk_mode = expected_disk_spec.get('disk_mode', 'persistent').lower() - valid_disk_mode = ['persistent', 'independent_persistent', 'independent_nonpersistent'] + if "disk_mode" in expected_disk_spec: + disk_mode = expected_disk_spec.get( + "disk_mode", "persistent" + ).lower() + valid_disk_mode = [ + "persistent", + "independent_persistent", + "independent_nonpersistent", + ] if disk_mode not in valid_disk_mode: - self.module.fail_json(msg="disk_mode specified is not valid." - " Should be one of ['%s']" % "', '".join(valid_disk_mode)) - - if (vm_obj and diskspec.device.backing.diskMode != disk_mode) or (vm_obj is None): + self.module.fail_json( + msg="disk_mode specified is not valid." + " Should be one of ['%s']" + % "', '".join(valid_disk_mode) + ) + + if ( + vm_obj and diskspec.device.backing.diskMode != disk_mode + ) or (vm_obj is None): diskspec.device.backing.diskMode = disk_mode disk_modified = True else: diskspec.device.backing.diskMode = "persistent" # is it thin? - if 'type' in expected_disk_spec: - disk_type = expected_disk_spec.get('type', '').lower() - if disk_type == 'thin': + if "type" in expected_disk_spec: + disk_type = expected_disk_spec.get("type", "").lower() + if disk_type == "thin": diskspec.device.backing.thinProvisioned = True - elif disk_type == 'eagerzeroedthick': + elif disk_type == "eagerzeroedthick": diskspec.device.backing.eagerlyScrub = True - if 'filename' in expected_disk_spec and expected_disk_spec['filename'] is not None: - self.add_existing_vmdk(vm_obj, expected_disk_spec, diskspec, scsi_ctl) + if ( + "filename" in expected_disk_spec + and expected_disk_spec["filename"] is not None + ): + self.add_existing_vmdk( + vm_obj, expected_disk_spec, diskspec, scsi_ctl + ) continue - elif vm_obj is None or self.params['template']: + elif vm_obj is None or self.params["template"]: # We are creating new VM or from Template # Only create virtual device if not backed by vmdk in original template - if diskspec.device.backing.fileName == '': - diskspec.fileOperation = vim.vm.device.VirtualDeviceSpec.FileOperation.create + if diskspec.device.backing.fileName == "": + diskspec.fileOperation = ( + vim.vm.device.VirtualDeviceSpec.FileOperation.create + ) # which datastore? - if expected_disk_spec.get('datastore'): + if expected_disk_spec.get("datastore"): # TODO: This is already handled by the relocation spec, # but it needs to eventually be handled for all the # other disks defined @@ -2039,8 +2743,9 @@ def configure_disks(self, vm_obj): # VMware doesn't allow to reduce disk sizes if kb < diskspec.device.capacityInKB: self.module.fail_json( - msg="Given disk size is smaller than found (%d < %d). Reducing disks is not allowed." % - (kb, diskspec.device.capacityInKB)) + msg="Given disk size is smaller than found (%d < %d). Reducing disks is not allowed." + % (kb, diskspec.device.capacityInKB) + ) if kb != diskspec.device.capacityInKB or disk_modified: diskspec.device.capacityInKB = kb @@ -2049,11 +2754,19 @@ def configure_disks(self, vm_obj): self.change_detected = True def select_host(self): - hostsystem = self.cache.get_esx_host(self.params['esxi_hostname']) + hostsystem = self.cache.get_esx_host(self.params["esxi_hostname"]) if not hostsystem: - self.module.fail_json(msg='Failed to find ESX host "%(esxi_hostname)s"' % self.params) - if hostsystem.runtime.connectionState != 'connected' or hostsystem.runtime.inMaintenanceMode: - self.module.fail_json(msg='ESXi "%(esxi_hostname)s" is in invalid state or in maintenance mode.' % self.params) + self.module.fail_json( + msg='Failed to find ESX host "%(esxi_hostname)s"' % self.params + ) + if ( + hostsystem.runtime.connectionState != "connected" + or hostsystem.runtime.inMaintenanceMode + ): + self.module.fail_json( + msg='ESXi "%(esxi_hostname)s" is in invalid state or in maintenance mode.' + % self.params + ) return hostsystem def autoselect_datastore(self): @@ -2061,7 +2774,9 @@ def autoselect_datastore(self): datastores = self.cache.get_all_objs(self.content, [vim.Datastore]) if datastores is None or len(datastores) == 0: - self.module.fail_json(msg="Unable to find a datastore list when autoselecting") + self.module.fail_json( + msg="Unable to find a datastore list when autoselecting" + ) datastore_freespace = 0 for ds in datastores: @@ -2086,17 +2801,21 @@ def get_recommended_datastore(self, datastore_cluster_obj=None): if datastore_cluster_obj is None: return None # Check if Datastore Cluster provided by user is SDRS ready - sdrs_status = datastore_cluster_obj.podStorageDrsEntry.storageDrsConfig.podConfig.enabled + sdrs_status = ( + datastore_cluster_obj.podStorageDrsEntry.storageDrsConfig.podConfig.enabled + ) if sdrs_status: # We can get storage recommendation only if SDRS is enabled on given datastorage cluster pod_sel_spec = vim.storageDrs.PodSelectionSpec() pod_sel_spec.storagePod = datastore_cluster_obj storage_spec = vim.storageDrs.StoragePlacementSpec() storage_spec.podSelectionSpec = pod_sel_spec - storage_spec.type = 'create' + storage_spec.type = "create" try: - rec = self.content.storageResourceManager.RecommendDatastores(storageSpec=storage_spec) + rec = self.content.storageResourceManager.RecommendDatastores( + storageSpec=storage_spec + ) rec_action = rec.recommendations[0].action[0] return rec_action.destination.name except Exception: @@ -2105,7 +2824,10 @@ def get_recommended_datastore(self, datastore_cluster_obj=None): datastore = None datastore_freespace = 0 for ds in datastore_cluster_obj.childEntity: - if isinstance(ds, vim.Datastore) and ds.summary.freeSpace > datastore_freespace: + if ( + isinstance(ds, vim.Datastore) + and ds.summary.freeSpace > datastore_freespace + ): # If datastore field is provided, filter destination datastores if not self.is_datastore_valid(datastore_obj=ds): continue @@ -2120,75 +2842,136 @@ def select_datastore(self, vm_obj=None): datastore = None datastore_name = None - if len(self.params['disk']) != 0: + if len(self.params["disk"]) != 0: # TODO: really use the datastore for newly created disks - if 'autoselect_datastore' in self.params['disk'][0] and self.params['disk'][0]['autoselect_datastore']: + if ( + "autoselect_datastore" in self.params["disk"][0] + and self.params["disk"][0]["autoselect_datastore"] + ): datastores = [] - if self.params['cluster']: - cluster = self.find_cluster_by_name(self.params['cluster'], self.content) + if self.params["cluster"]: + cluster = self.find_cluster_by_name( + self.params["cluster"], self.content + ) for host in cluster.host: - for mi in host.configManager.storageSystem.fileSystemVolumeInfo.mountInfo: + for ( + mi + ) in ( + host.configManager.storageSystem.fileSystemVolumeInfo.mountInfo + ): if mi.volume.type == "VMFS": - datastores.append(self.cache.find_obj(self.content, [vim.Datastore], mi.volume.name)) - elif self.params['esxi_hostname']: - host = self.find_hostsystem_by_name(self.params['esxi_hostname']) - - for mi in host.configManager.storageSystem.fileSystemVolumeInfo.mountInfo: + datastores.append( + self.cache.find_obj( + self.content, + [vim.Datastore], + mi.volume.name, + ) + ) + elif self.params["esxi_hostname"]: + host = self.find_hostsystem_by_name( + self.params["esxi_hostname"] + ) + + for ( + mi + ) in ( + host.configManager.storageSystem.fileSystemVolumeInfo.mountInfo + ): if mi.volume.type == "VMFS": - datastores.append(self.cache.find_obj(self.content, [vim.Datastore], mi.volume.name)) + datastores.append( + self.cache.find_obj( + self.content, + [vim.Datastore], + mi.volume.name, + ) + ) else: - datastores = self.cache.get_all_objs(self.content, [vim.Datastore]) - datastores = [x for x in datastores if self.cache.get_parent_datacenter(x).name == self.params['datacenter']] + datastores = self.cache.get_all_objs( + self.content, [vim.Datastore] + ) + datastores = [ + x + for x in datastores + if self.cache.get_parent_datacenter(x).name + == self.params["datacenter"] + ] datastore_freespace = 0 for ds in datastores: if not self.is_datastore_valid(datastore_obj=ds): continue - if (ds.summary.freeSpace > datastore_freespace) or (ds.summary.freeSpace == datastore_freespace and not datastore): + if (ds.summary.freeSpace > datastore_freespace) or ( + ds.summary.freeSpace == datastore_freespace + and not datastore + ): # If datastore field is provided, filter destination datastores - if 'datastore' in self.params['disk'][0] and \ - isinstance(self.params['disk'][0]['datastore'], str) and \ - ds.name.find(self.params['disk'][0]['datastore']) < 0: + if ( + "datastore" in self.params["disk"][0] + and isinstance( + self.params["disk"][0]["datastore"], str + ) + and ds.name.find( + self.params["disk"][0]["datastore"] + ) + < 0 + ): continue datastore = ds datastore_name = datastore.name datastore_freespace = ds.summary.freeSpace - elif 'datastore' in self.params['disk'][0]: - datastore_name = self.params['disk'][0]['datastore'] + elif "datastore" in self.params["disk"][0]: + datastore_name = self.params["disk"][0]["datastore"] # Check if user has provided datastore cluster first - datastore_cluster = self.cache.find_obj(self.content, [vim.StoragePod], datastore_name) + datastore_cluster = self.cache.find_obj( + self.content, [vim.StoragePod], datastore_name + ) if datastore_cluster: # If user specified datastore cluster so get recommended datastore - datastore_name = self.get_recommended_datastore(datastore_cluster_obj=datastore_cluster) + datastore_name = self.get_recommended_datastore( + datastore_cluster_obj=datastore_cluster + ) # Check if get_recommended_datastore or user specified datastore exists or not - datastore = self.cache.find_obj(self.content, [vim.Datastore], datastore_name) + datastore = self.cache.find_obj( + self.content, [vim.Datastore], datastore_name + ) else: - self.module.fail_json(msg="Either datastore or autoselect_datastore should be provided to select datastore") + self.module.fail_json( + msg="Either datastore or autoselect_datastore should be provided to select datastore" + ) - if not datastore and self.params['template']: + if not datastore and self.params["template"]: # use the template's existing DS - disks = [x for x in vm_obj.config.hardware.device if isinstance(x, vim.vm.device.VirtualDisk)] + disks = [ + x + for x in vm_obj.config.hardware.device + if isinstance(x, vim.vm.device.VirtualDisk) + ] if disks: datastore = disks[0].backing.datastore datastore_name = datastore.name # validation if datastore: dc = self.cache.get_parent_datacenter(datastore) - if dc.name != self.params['datacenter']: + if dc.name != self.params["datacenter"]: datastore = self.autoselect_datastore() datastore_name = datastore.name if not datastore: - if len(self.params['disk']) != 0 or self.params['template'] is None: - self.module.fail_json(msg="Unable to find the datastore with given parameters." - " This could mean, %s is a non-existent virtual machine and module tried to" - " deploy it as new virtual machine with no disk. Please specify disks parameter" - " or specify template to clone from." % self.params['name']) + if ( + len(self.params["disk"]) != 0 + or self.params["template"] is None + ): + self.module.fail_json( + msg="Unable to find the datastore with given parameters." + " This could mean, %s is a non-existent virtual machine and module tried to" + " deploy it as new virtual machine with no disk. Please specify disks parameter" + " or specify template to clone from." % self.params["name"] + ) self.module.fail_json(msg="Failed to find a matching datastore") return datastore, datastore_name @@ -2204,7 +2987,7 @@ def obj_has_parent(self, obj, parent): # Check if we have reached till root folder moid = current_parent._moId - if moid in ['group-d1', 'ha-folder-root']: + if moid in ["group-d1", "ha-folder-root"]: return False current_parent = current_parent.parent @@ -2214,19 +2997,26 @@ def obj_has_parent(self, obj, parent): def get_scsi_type(self): disk_controller_type = "paravirtual" # set cpu/memory/etc - if 'hardware' in self.params: - if 'scsi' in self.params['hardware']: - if self.params['hardware']['scsi'] in ['buslogic', 'paravirtual', 'lsilogic', 'lsilogicsas']: - disk_controller_type = self.params['hardware']['scsi'] + if "hardware" in self.params: + if "scsi" in self.params["hardware"]: + if self.params["hardware"]["scsi"] in [ + "buslogic", + "paravirtual", + "lsilogic", + "lsilogicsas", + ]: + disk_controller_type = self.params["hardware"]["scsi"] else: - self.module.fail_json(msg="hardware.scsi attribute should be 'paravirtual' or 'lsilogic'") + self.module.fail_json( + msg="hardware.scsi attribute should be 'paravirtual' or 'lsilogic'" + ) return disk_controller_type def find_folder(self, searchpath): """ Walk inventory objects one position of the searchpath at a time """ # split the searchpath so we can iterate through it - paths = [x.replace('/', '') for x in searchpath.split('/')] + paths = [x.replace("/", "") for x in searchpath.split("/")] paths_total = len(paths) - 1 position = 0 @@ -2234,7 +3024,7 @@ def find_folder(self, searchpath): root = self.content.rootFolder while root and position <= paths_total: change = False - if hasattr(root, 'childEntity'): + if hasattr(root, "childEntity"): for child in root.childEntity: if child.name == paths[position]: root = child @@ -2242,7 +3032,7 @@ def find_folder(self, searchpath): change = True break elif isinstance(root, vim.Datacenter): - if hasattr(root, 'vmFolder'): + if hasattr(root, "vmFolder"): if root.vmFolder.name == paths[position]: root = root.vmFolder position += 1 @@ -2258,36 +3048,64 @@ def find_folder(self, searchpath): def get_resource_pool(self, cluster=None, host=None, resource_pool=None): """ Get a resource pool, filter on cluster, esxi_hostname or resource_pool if given """ - cluster_name = cluster or self.params.get('cluster', None) - host_name = host or self.params.get('esxi_hostname', None) - resource_pool_name = resource_pool or self.params.get('resource_pool', None) + cluster_name = cluster or self.params.get("cluster", None) + host_name = host or self.params.get("esxi_hostname", None) + resource_pool_name = resource_pool or self.params.get( + "resource_pool", None + ) # get the datacenter object - datacenter = find_obj(self.content, [vim.Datacenter], self.params['datacenter']) + datacenter = find_obj( + self.content, [vim.Datacenter], self.params["datacenter"] + ) if not datacenter: - self.module.fail_json(msg='Unable to find datacenter "%s"' % self.params['datacenter']) + self.module.fail_json( + msg='Unable to find datacenter "%s"' + % self.params["datacenter"] + ) # if cluster is given, get the cluster object if cluster_name: - cluster = find_obj(self.content, [vim.ComputeResource], cluster_name, folder=datacenter) + cluster = find_obj( + self.content, + [vim.ComputeResource], + cluster_name, + folder=datacenter, + ) if not cluster: - self.module.fail_json(msg='Unable to find cluster "%s"' % cluster_name) + self.module.fail_json( + msg='Unable to find cluster "%s"' % cluster_name + ) # if host is given, get the cluster object using the host elif host_name: - host = find_obj(self.content, [vim.HostSystem], host_name, folder=datacenter) + host = find_obj( + self.content, [vim.HostSystem], host_name, folder=datacenter + ) if not host: - self.module.fail_json(msg='Unable to find host "%s"' % host_name) + self.module.fail_json( + msg='Unable to find host "%s"' % host_name + ) cluster = host.parent else: cluster = None # get resource pools limiting search to cluster or datacenter - resource_pool = find_obj(self.content, [vim.ResourcePool], resource_pool_name, folder=cluster or datacenter) + resource_pool = find_obj( + self.content, + [vim.ResourcePool], + resource_pool_name, + folder=cluster or datacenter, + ) if not resource_pool: if resource_pool_name: - self.module.fail_json(msg='Unable to find resource_pool "%s"' % resource_pool_name) + self.module.fail_json( + msg='Unable to find resource_pool "%s"' + % resource_pool_name + ) else: - self.module.fail_json(msg='Unable to find resource pool, need esxi_hostname, resource_pool, or cluster') + self.module.fail_json( + msg="Unable to find resource pool, need esxi_hostname, resource_pool, or cluster" + ) return resource_pool def deploy_vm(self): @@ -2299,35 +3117,57 @@ def deploy_vm(self): # FIXME: # - static IPs - self.folder = self.params.get('folder', None) + self.folder = self.params.get("folder", None) if self.folder is None: - self.module.fail_json(msg="Folder is required parameter while deploying new virtual machine") + self.module.fail_json( + msg="Folder is required parameter while deploying new virtual machine" + ) # Prepend / if it was missing from the folder path, also strip trailing slashes - if not self.folder.startswith('/'): - self.folder = '/%(folder)s' % self.params - self.folder = self.folder.rstrip('/') + if not self.folder.startswith("/"): + self.folder = "/%(folder)s" % self.params + self.folder = self.folder.rstrip("/") - datacenter = self.cache.find_obj(self.content, [vim.Datacenter], self.params['datacenter']) + datacenter = self.cache.find_obj( + self.content, [vim.Datacenter], self.params["datacenter"] + ) if datacenter is None: - self.module.fail_json(msg='No datacenter named %(datacenter)s was found' % self.params) + self.module.fail_json( + msg="No datacenter named %(datacenter)s was found" + % self.params + ) dcpath = compile_folder_path_for_object(datacenter) # Nested folder does not have trailing / - if not dcpath.endswith('/'): - dcpath += '/' + if not dcpath.endswith("/"): + dcpath += "/" # Check for full path first in case it was already supplied - if (self.folder.startswith(dcpath + self.params['datacenter'] + '/vm') or - self.folder.startswith(dcpath + '/' + self.params['datacenter'] + '/vm')): + if self.folder.startswith( + dcpath + self.params["datacenter"] + "/vm" + ) or self.folder.startswith( + dcpath + "/" + self.params["datacenter"] + "/vm" + ): fullpath = self.folder - elif self.folder.startswith('/vm/') or self.folder == '/vm': - fullpath = "%s%s%s" % (dcpath, self.params['datacenter'], self.folder) - elif self.folder.startswith('/'): - fullpath = "%s%s/vm%s" % (dcpath, self.params['datacenter'], self.folder) + elif self.folder.startswith("/vm/") or self.folder == "/vm": + fullpath = "%s%s%s" % ( + dcpath, + self.params["datacenter"], + self.folder, + ) + elif self.folder.startswith("/"): + fullpath = "%s%s/vm%s" % ( + dcpath, + self.params["datacenter"], + self.folder, + ) else: - fullpath = "%s%s/vm/%s" % (dcpath, self.params['datacenter'], self.folder) + fullpath = "%s%s/vm/%s" % ( + dcpath, + self.params["datacenter"], + self.folder, + ) f_obj = self.content.searchIndex.FindByInventoryPath(fullpath) @@ -2335,20 +3175,28 @@ def deploy_vm(self): if f_obj is None: # Add some debugging values in failure. details = { - 'datacenter': datacenter.name, - 'datacenter_path': dcpath, - 'folder': self.folder, - 'full_search_path': fullpath, + "datacenter": datacenter.name, + "datacenter_path": dcpath, + "folder": self.folder, + "full_search_path": fullpath, } - self.module.fail_json(msg='No folder %s matched in the search path : %s' % (self.folder, fullpath), - details=details) + self.module.fail_json( + msg="No folder %s matched in the search path : %s" + % (self.folder, fullpath), + details=details, + ) destfolder = f_obj - if self.params['template']: - vm_obj = self.get_vm_or_template(template_name=self.params['template']) + if self.params["template"]: + vm_obj = self.get_vm_or_template( + template_name=self.params["template"] + ) if vm_obj is None: - self.module.fail_json(msg="Could not find a template named %(template)s" % self.params) + self.module.fail_json( + msg="Could not find a template named %(template)s" + % self.params + ) else: vm_obj = None @@ -2356,17 +3204,23 @@ def deploy_vm(self): resource_pool = self.get_resource_pool() # set the destination datastore for VM & disks - if self.params['datastore']: + if self.params["datastore"]: # Give precedence to datastore value provided by user # User may want to deploy VM to specific datastore. - datastore_name = self.params['datastore'] + datastore_name = self.params["datastore"] # Check if user has provided datastore cluster first - datastore_cluster = self.cache.find_obj(self.content, [vim.StoragePod], datastore_name) + datastore_cluster = self.cache.find_obj( + self.content, [vim.StoragePod], datastore_name + ) if datastore_cluster: # If user specified datastore cluster so get recommended datastore - datastore_name = self.get_recommended_datastore(datastore_cluster_obj=datastore_cluster) + datastore_name = self.get_recommended_datastore( + datastore_cluster_obj=datastore_cluster + ) # Check if get_recommended_datastore or user specified datastore exists or not - datastore = self.cache.find_obj(self.content, [vim.Datastore], datastore_name) + datastore = self.cache.find_obj( + self.content, [vim.Datastore], datastore_name + ) else: (datastore, datastore_name) = self.select_datastore(vm_obj) @@ -2386,40 +3240,60 @@ def deploy_vm(self): # Find if we need network customizations (find keys in dictionary that requires customizations) network_changes = False - for nw in self.params['networks']: + for nw in self.params["networks"]: for key in nw: # We don't need customizations for these keys - if key == 'type' and nw['type'] == 'dhcp': + if key == "type" and nw["type"] == "dhcp": network_changes = True break - if key not in ('device_type', 'mac', 'name', 'vlan', 'type', 'start_connected', 'dvswitch_name'): + if key not in ( + "device_type", + "mac", + "name", + "vlan", + "type", + "start_connected", + "dvswitch_name", + ): network_changes = True break - if len(self.params['customization']) > 0 or network_changes or self.params.get('customization_spec') is not None: + if ( + len(self.params["customization"]) > 0 + or network_changes + or self.params.get("customization_spec") is not None + ): self.customize_vm(vm_obj=vm_obj) clonespec = None clone_method = None try: - if self.params['template']: + if self.params["template"]: # Only select specific host when ESXi hostname is provided - if self.params['esxi_hostname']: + if self.params["esxi_hostname"]: self.relospec.host = self.select_host() self.relospec.datastore = datastore # Convert disk present in template if is set - if self.params['convert']: + if self.params["convert"]: for device in vm_obj.config.hardware.device: if isinstance(device, vim.vm.device.VirtualDisk): disk_locator = vim.vm.RelocateSpec.DiskLocator() - disk_locator.diskBackingInfo = vim.vm.device.VirtualDisk.FlatVer2BackingInfo() - if self.params['convert'] in ['thin']: - disk_locator.diskBackingInfo.thinProvisioned = True - if self.params['convert'] in ['eagerzeroedthick']: - disk_locator.diskBackingInfo.eagerlyScrub = True - if self.params['convert'] in ['thick']: - disk_locator.diskBackingInfo.diskMode = "persistent" + disk_locator.diskBackingInfo = ( + vim.vm.device.VirtualDisk.FlatVer2BackingInfo() + ) + if self.params["convert"] in ["thin"]: + disk_locator.diskBackingInfo.thinProvisioned = ( + True + ) + if self.params["convert"] in ["eagerzeroedthick"]: + disk_locator.diskBackingInfo.eagerlyScrub = ( + True + ) + if self.params["convert"] in ["thick"]: + disk_locator.diskBackingInfo.diskMode = ( + "persistent" + ) disk_locator.diskId = device.key disk_locator.datastore = datastore self.relospec.disk.append(disk_locator) @@ -2427,63 +3301,93 @@ def deploy_vm(self): # https://www.vmware.com/support/developer/vc-sdk/visdk41pubs/ApiReference/vim.vm.RelocateSpec.html # > pool: For a clone operation from a template to a virtual machine, this argument is required. self.relospec.pool = resource_pool - linked_clone = self.params.get('linked_clone') - snapshot_src = self.params.get('snapshot_src', None) + linked_clone = self.params.get("linked_clone") + snapshot_src = self.params.get("snapshot_src", None) if linked_clone: if snapshot_src is not None: - self.relospec.diskMoveType = vim.vm.RelocateSpec.DiskMoveOptions.createNewChildDiskBacking + self.relospec.diskMoveType = ( + vim.vm.RelocateSpec.DiskMoveOptions.createNewChildDiskBacking + ) else: - self.module.fail_json(msg="Parameter 'linked_src' and 'snapshot_src' are" - " required together for linked clone operation.") + self.module.fail_json( + msg="Parameter 'linked_src' and 'snapshot_src' are" + " required together for linked clone operation." + ) - clonespec = vim.vm.CloneSpec(template=self.params['is_template'], location=self.relospec) + clonespec = vim.vm.CloneSpec( + template=self.params["is_template"], location=self.relospec + ) if self.customspec: clonespec.customization = self.customspec if snapshot_src is not None: if vm_obj.snapshot is None: - self.module.fail_json(msg="No snapshots present for virtual machine or template [%(template)s]" % self.params) - snapshot = self.get_snapshots_by_name_recursively(snapshots=vm_obj.snapshot.rootSnapshotList, - snapname=snapshot_src) + self.module.fail_json( + msg="No snapshots present for virtual machine or template [%(template)s]" + % self.params + ) + snapshot = self.get_snapshots_by_name_recursively( + snapshots=vm_obj.snapshot.rootSnapshotList, + snapname=snapshot_src, + ) if len(snapshot) != 1: - self.module.fail_json(msg='virtual machine "%(template)s" does not contain' - ' snapshot named "%(snapshot_src)s"' % self.params) + self.module.fail_json( + msg='virtual machine "%(template)s" does not contain' + ' snapshot named "%(snapshot_src)s"' % self.params + ) clonespec.snapshot = snapshot[0].snapshot clonespec.config = self.configspec - clone_method = 'Clone' + clone_method = "Clone" try: - task = vm_obj.Clone(folder=destfolder, name=self.params['name'], spec=clonespec) + task = vm_obj.Clone( + folder=destfolder, + name=self.params["name"], + spec=clonespec, + ) except vim.fault.NoPermission as e: - self.module.fail_json(msg="Failed to clone virtual machine %s to folder %s " - "due to permission issue: %s" % (self.params['name'], - destfolder, - to_native(e.msg))) + self.module.fail_json( + msg="Failed to clone virtual machine %s to folder %s " + "due to permission issue: %s" + % (self.params["name"], destfolder, to_native(e.msg)) + ) self.change_detected = True else: # ConfigSpec require name for VM creation - self.configspec.name = self.params['name'] - self.configspec.files = vim.vm.FileInfo(logDirectory=None, - snapshotDirectory=None, - suspendDirectory=None, - vmPathName="[" + datastore_name + "]") + self.configspec.name = self.params["name"] + self.configspec.files = vim.vm.FileInfo( + logDirectory=None, + snapshotDirectory=None, + suspendDirectory=None, + vmPathName="[" + datastore_name + "]", + ) - clone_method = 'CreateVM_Task' + clone_method = "CreateVM_Task" try: - task = destfolder.CreateVM_Task(config=self.configspec, pool=resource_pool) + task = destfolder.CreateVM_Task( + config=self.configspec, pool=resource_pool + ) except vmodl.fault.InvalidRequest as e: - self.module.fail_json(msg="Failed to create virtual machine due to invalid configuration " - "parameter %s" % to_native(e.msg)) + self.module.fail_json( + msg="Failed to create virtual machine due to invalid configuration " + "parameter %s" % to_native(e.msg) + ) except vim.fault.RestrictedVersion as e: - self.module.fail_json(msg="Failed to create virtual machine due to " - "product versioning restrictions: %s" % to_native(e.msg)) + self.module.fail_json( + msg="Failed to create virtual machine due to " + "product versioning restrictions: %s" + % to_native(e.msg) + ) self.change_detected = True self.wait_for_task(task) except TypeError as e: - self.module.fail_json(msg="TypeError was returned, please ensure to give correct inputs. %s" % to_text(e)) + self.module.fail_json( + msg="TypeError was returned, please ensure to give correct inputs. %s" + % to_text(e) + ) - if task.info.state == 'error': + if task.info.state == "error": # https://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=2021361 # https://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=2173 @@ -2491,48 +3395,80 @@ def deploy_vm(self): clonespec_json = serialize_spec(clonespec) configspec_json = serialize_spec(self.configspec) kwargs = { - 'changed': self.change_applied, - 'failed': True, - 'msg': task.info.error.msg, - 'clonespec': clonespec_json, - 'configspec': configspec_json, - 'clone_method': clone_method + "changed": self.change_applied, + "failed": True, + "msg": task.info.error.msg, + "clonespec": clonespec_json, + "configspec": configspec_json, + "clone_method": clone_method, } return kwargs else: # set annotation vm = task.info.result - if self.params['annotation']: + if self.params["annotation"]: annotation_spec = vim.vm.ConfigSpec() - annotation_spec.annotation = str(self.params['annotation']) + annotation_spec.annotation = str(self.params["annotation"]) task = vm.ReconfigVM_Task(annotation_spec) self.wait_for_task(task) - if task.info.state == 'error': - return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'annotation'} - - if self.params['customvalues']: + if task.info.state == "error": + return { + "changed": self.change_applied, + "failed": True, + "msg": task.info.error.msg, + "op": "annotation", + } + + if self.params["customvalues"]: vm_custom_spec = vim.vm.ConfigSpec() - self.customize_customvalues(vm_obj=vm, config_spec=vm_custom_spec) + self.customize_customvalues( + vm_obj=vm, config_spec=vm_custom_spec + ) task = vm.ReconfigVM_Task(vm_custom_spec) self.wait_for_task(task) - if task.info.state == 'error': - return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'customvalues'} - - if self.params['wait_for_ip_address'] or self.params['wait_for_customization'] or self.params['state'] in ['poweredon', 'restarted']: - set_vm_power_state(self.content, vm, 'poweredon', force=False) - - if self.params['wait_for_ip_address']: - wait_for_vm_ip(self.content, vm, self.params['wait_for_ip_address_timeout']) - - if self.params['wait_for_customization']: - is_customization_ok = self.wait_for_customization(vm=vm, timeout=self.params['wait_for_customization_timeout']) + if task.info.state == "error": + return { + "changed": self.change_applied, + "failed": True, + "msg": task.info.error.msg, + "op": "customvalues", + } + + if ( + self.params["wait_for_ip_address"] + or self.params["wait_for_customization"] + or self.params["state"] in ["poweredon", "restarted"] + ): + set_vm_power_state(self.content, vm, "poweredon", force=False) + + if self.params["wait_for_ip_address"]: + wait_for_vm_ip( + self.content, + vm, + self.params["wait_for_ip_address_timeout"], + ) + + if self.params["wait_for_customization"]: + is_customization_ok = self.wait_for_customization( + vm=vm, + timeout=self.params["wait_for_customization_timeout"], + ) if not is_customization_ok: vm_facts = self.gather_facts(vm) - return {'changed': self.change_applied, 'failed': True, 'instance': vm_facts, 'op': 'customization'} + return { + "changed": self.change_applied, + "failed": True, + "instance": vm_facts, + "op": "customization", + } vm_facts = self.gather_facts(vm) - return {'changed': self.change_applied, 'failed': False, 'instance': vm_facts} + return { + "changed": self.change_applied, + "failed": False, + "instance": vm_facts, + } def get_snapshots_by_name_recursively(self, snapshots, snapname): snap_obj = [] @@ -2540,7 +3476,9 @@ def get_snapshots_by_name_recursively(self, snapshots, snapname): if snapshot.name == snapname: snap_obj.append(snapshot) else: - snap_obj = snap_obj + self.get_snapshots_by_name_recursively(snapshot.childSnapshotList, snapname) + snap_obj = snap_obj + self.get_snapshots_by_name_recursively( + snapshot.childSnapshotList, snapname + ) return snap_obj def reconfigure_vm(self): @@ -2555,57 +3493,94 @@ def reconfigure_vm(self): self.configure_disks(vm_obj=self.current_vm_obj) self.configure_network(vm_obj=self.current_vm_obj) self.configure_cdrom(vm_obj=self.current_vm_obj) - self.customize_customvalues(vm_obj=self.current_vm_obj, config_spec=self.configspec) + self.customize_customvalues( + vm_obj=self.current_vm_obj, config_spec=self.configspec + ) self.configure_resource_alloc_info(vm_obj=self.current_vm_obj) self.configure_vapp_properties(vm_obj=self.current_vm_obj) - if self.params['annotation'] and self.current_vm_obj.config.annotation != self.params['annotation']: - self.configspec.annotation = str(self.params['annotation']) + if ( + self.params["annotation"] + and self.current_vm_obj.config.annotation + != self.params["annotation"] + ): + self.configspec.annotation = str(self.params["annotation"]) self.change_detected = True - if self.params['resource_pool']: + if self.params["resource_pool"]: self.relospec.pool = self.get_resource_pool() if self.relospec.pool != self.current_vm_obj.resourcePool: task = self.current_vm_obj.RelocateVM_Task(spec=self.relospec) self.wait_for_task(task) - if task.info.state == 'error': - return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'relocate'} + if task.info.state == "error": + return { + "changed": self.change_applied, + "failed": True, + "msg": task.info.error.msg, + "op": "relocate", + } # Only send VMware task if we see a modification if self.change_detected: task = None try: - task = self.current_vm_obj.ReconfigVM_Task(spec=self.configspec) + task = self.current_vm_obj.ReconfigVM_Task( + spec=self.configspec + ) except vim.fault.RestrictedVersion as e: - self.module.fail_json(msg="Failed to reconfigure virtual machine due to" - " product versioning restrictions: %s" % to_native(e.msg)) + self.module.fail_json( + msg="Failed to reconfigure virtual machine due to" + " product versioning restrictions: %s" % to_native(e.msg) + ) self.wait_for_task(task) - if task.info.state == 'error': - return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'reconfig'} + if task.info.state == "error": + return { + "changed": self.change_applied, + "failed": True, + "msg": task.info.error.msg, + "op": "reconfig", + } # Rename VM - if self.params['uuid'] and self.params['name'] and self.params['name'] != self.current_vm_obj.config.name: - task = self.current_vm_obj.Rename_Task(self.params['name']) + if ( + self.params["uuid"] + and self.params["name"] + and self.params["name"] != self.current_vm_obj.config.name + ): + task = self.current_vm_obj.Rename_Task(self.params["name"]) self.wait_for_task(task) - if task.info.state == 'error': - return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'rename'} + if task.info.state == "error": + return { + "changed": self.change_applied, + "failed": True, + "msg": task.info.error.msg, + "op": "rename", + } # Mark VM as Template - if self.params['is_template'] and not self.current_vm_obj.config.template: + if ( + self.params["is_template"] + and not self.current_vm_obj.config.template + ): try: self.current_vm_obj.MarkAsTemplate() self.change_applied = True except vmodl.fault.NotSupported as e: - self.module.fail_json(msg="Failed to mark virtual machine [%s] " - "as template: %s" % (self.params['name'], e.msg)) + self.module.fail_json( + msg="Failed to mark virtual machine [%s] " + "as template: %s" % (self.params["name"], e.msg) + ) # Mark Template as VM - elif not self.params['is_template'] and self.current_vm_obj.config.template: + elif ( + not self.params["is_template"] + and self.current_vm_obj.config.template + ): resource_pool = self.get_resource_pool() kwargs = dict(pool=resource_pool) - if self.params.get('esxi_hostname', None): + if self.params.get("esxi_hostname", None): host_system_obj = self.select_host() kwargs.update(host=host_system_obj) @@ -2613,26 +3588,40 @@ def reconfigure_vm(self): self.current_vm_obj.MarkAsVirtualMachine(**kwargs) self.change_applied = True except vim.fault.InvalidState as invalid_state: - self.module.fail_json(msg="Virtual machine is not marked" - " as template : %s" % to_native(invalid_state.msg)) + self.module.fail_json( + msg="Virtual machine is not marked" + " as template : %s" % to_native(invalid_state.msg) + ) except vim.fault.InvalidDatastore as invalid_ds: - self.module.fail_json(msg="Converting template to virtual machine" - " operation cannot be performed on the" - " target datastores: %s" % to_native(invalid_ds.msg)) + self.module.fail_json( + msg="Converting template to virtual machine" + " operation cannot be performed on the" + " target datastores: %s" % to_native(invalid_ds.msg) + ) except vim.fault.CannotAccessVmComponent as cannot_access: - self.module.fail_json(msg="Failed to convert template to virtual machine" - " as operation unable access virtual machine" - " component: %s" % to_native(cannot_access.msg)) + self.module.fail_json( + msg="Failed to convert template to virtual machine" + " as operation unable access virtual machine" + " component: %s" % to_native(cannot_access.msg) + ) except vmodl.fault.InvalidArgument as invalid_argument: - self.module.fail_json(msg="Failed to convert template to virtual machine" - " due to : %s" % to_native(invalid_argument.msg)) + self.module.fail_json( + msg="Failed to convert template to virtual machine" + " due to : %s" % to_native(invalid_argument.msg) + ) except Exception as generic_exc: - self.module.fail_json(msg="Failed to convert template to virtual machine" - " due to generic error : %s" % to_native(generic_exc)) + self.module.fail_json( + msg="Failed to convert template to virtual machine" + " due to generic error : %s" % to_native(generic_exc) + ) # Automatically update VMware UUID when converting template to VM. # This avoids an interactive prompt during VM startup. - uuid_action = [x for x in self.current_vm_obj.config.extraConfig if x.key == "uuid.action"] + uuid_action = [ + x + for x in self.current_vm_obj.config.extraConfig + if x.key == "uuid.action" + ] if not uuid_action: uuid_action_opt = vim.option.OptionValue() uuid_action_opt.key = "uuid.action" @@ -2642,50 +3631,99 @@ def reconfigure_vm(self): self.change_detected = True # add customize existing VM after VM re-configure - if 'existing_vm' in self.params['customization'] and self.params['customization']['existing_vm']: + if ( + "existing_vm" in self.params["customization"] + and self.params["customization"]["existing_vm"] + ): if self.current_vm_obj.config.template: - self.module.fail_json(msg="VM is template, not support guest OS customization.") - if self.current_vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOff: - self.module.fail_json(msg="VM is not in poweroff state, can not do guest OS customization.") + self.module.fail_json( + msg="VM is template, not support guest OS customization." + ) + if ( + self.current_vm_obj.runtime.powerState + != vim.VirtualMachinePowerState.poweredOff + ): + self.module.fail_json( + msg="VM is not in poweroff state, can not do guest OS customization." + ) cus_result = self.customize_exist_vm() - if cus_result['failed']: + if cus_result["failed"]: return cus_result vm_facts = self.gather_facts(self.current_vm_obj) - return {'changed': self.change_applied, 'failed': False, 'instance': vm_facts} + return { + "changed": self.change_applied, + "failed": False, + "instance": vm_facts, + } def customize_exist_vm(self): task = None # Find if we need network customizations (find keys in dictionary that requires customizations) network_changes = False - for nw in self.params['networks']: + for nw in self.params["networks"]: for key in nw: # We don't need customizations for these keys - if key not in ('device_type', 'mac', 'name', 'vlan', 'type', 'start_connected', 'dvswitch_name'): + if key not in ( + "device_type", + "mac", + "name", + "vlan", + "type", + "start_connected", + "dvswitch_name", + ): network_changes = True break - if len(self.params['customization']) > 1 or network_changes or self.params.get('customization_spec'): + if ( + len(self.params["customization"]) > 1 + or network_changes + or self.params.get("customization_spec") + ): self.customize_vm(vm_obj=self.current_vm_obj) try: task = self.current_vm_obj.CustomizeVM_Task(self.customspec) except vim.fault.CustomizationFault as e: - self.module.fail_json(msg="Failed to customization virtual machine due to CustomizationFault: %s" % to_native(e.msg)) + self.module.fail_json( + msg="Failed to customization virtual machine due to CustomizationFault: %s" + % to_native(e.msg) + ) except vim.fault.RuntimeFault as e: - self.module.fail_json(msg="failed to customization virtual machine due to RuntimeFault: %s" % to_native(e.msg)) + self.module.fail_json( + msg="failed to customization virtual machine due to RuntimeFault: %s" + % to_native(e.msg) + ) except Exception as e: - self.module.fail_json(msg="failed to customization virtual machine due to fault: %s" % to_native(e.msg)) + self.module.fail_json( + msg="failed to customization virtual machine due to fault: %s" + % to_native(e.msg) + ) self.wait_for_task(task) - if task.info.state == 'error': - return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'customize_exist'} + if task.info.state == "error": + return { + "changed": self.change_applied, + "failed": True, + "msg": task.info.error.msg, + "op": "customize_exist", + } - if self.params['wait_for_customization']: - set_vm_power_state(self.content, self.current_vm_obj, 'poweredon', force=False) - is_customization_ok = self.wait_for_customization(vm=self.current_vm_obj, timeout=self.params['wait_for_customization_timeout']) + if self.params["wait_for_customization"]: + set_vm_power_state( + self.content, self.current_vm_obj, "poweredon", force=False + ) + is_customization_ok = self.wait_for_customization( + vm=self.current_vm_obj, + timeout=self.params["wait_for_customization_timeout"], + ) if not is_customization_ok: - return {'changed': self.change_applied, 'failed': True, - 'msg': 'Wait for customization failed due to timeout', 'op': 'wait_for_customize_exist'} + return { + "changed": self.change_applied, + "failed": True, + "msg": "Wait for customization failed due to timeout", + "op": "wait_for_customize_exist", + } - return {'changed': self.change_applied, 'failed': False} + return {"changed": self.change_applied, "failed": False} def wait_for_task(self, task, poll_interval=1): """ @@ -2701,13 +3739,19 @@ def wait_for_task(self, task, poll_interval=1): # https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.Task.html # https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.TaskInfo.html # https://github.com/virtdevninja/pyvmomi-community-samples/blob/master/samples/tools/tasks.py - while task.info.state not in ['error', 'success']: + while task.info.state not in ["error", "success"]: time.sleep(poll_interval) - self.change_applied = self.change_applied or task.info.state == 'success' + self.change_applied = ( + self.change_applied or task.info.state == "success" + ) def get_vm_events(self, vm, eventTypeIdList): - byEntity = vim.event.EventFilterSpec.ByEntity(entity=vm, recursion="self") - filterSpec = vim.event.EventFilterSpec(entity=byEntity, eventTypeId=eventTypeIdList) + byEntity = vim.event.EventFilterSpec.ByEntity( + entity=vm, recursion="self" + ) + filterSpec = vim.event.EventFilterSpec( + entity=byEntity, eventTypeId=eventTypeIdList + ) eventManager = self.content.eventManager return eventManager.QueryEvent(filterSpec) @@ -2715,15 +3759,29 @@ def wait_for_customization(self, vm, timeout=3600, sleep=10): poll = int(timeout // sleep) thispoll = 0 while thispoll <= poll: - eventStarted = self.get_vm_events(vm, ['CustomizationStartedEvent']) + eventStarted = self.get_vm_events( + vm, ["CustomizationStartedEvent"] + ) if len(eventStarted): thispoll = 0 while thispoll <= poll: - eventsFinishedResult = self.get_vm_events(vm, ['CustomizationSucceeded', 'CustomizationFailed']) + eventsFinishedResult = self.get_vm_events( + vm, ["CustomizationSucceeded", "CustomizationFailed"] + ) if len(eventsFinishedResult): - if not isinstance(eventsFinishedResult[0], vim.event.CustomizationSucceeded): - self.module.warn("Customization failed with error {%s}:{%s}" - % (eventsFinishedResult[0]._wsdlName, eventsFinishedResult[0].fullFormattedMessage)) + if not isinstance( + eventsFinishedResult[0], + vim.event.CustomizationSucceeded, + ): + self.module.warn( + "Customization failed with error {%s}:{%s}" + % ( + eventsFinishedResult[0]._wsdlName, + eventsFinishedResult[ + 0 + ].fullFormattedMessage, + ) + ) return False else: return True @@ -2731,68 +3789,87 @@ def wait_for_customization(self, vm, timeout=3600, sleep=10): time.sleep(sleep) thispoll += 1 if len(eventsFinishedResult) == 0: - self.module.warn('Waiting for customization result event timed out.') + self.module.warn( + "Waiting for customization result event timed out." + ) return False else: time.sleep(sleep) thispoll += 1 if len(eventStarted): - self.module.warn('Waiting for customization result event timed out.') + self.module.warn( + "Waiting for customization result event timed out." + ) else: - self.module.warn('Waiting for customization start event timed out.') + self.module.warn( + "Waiting for customization start event timed out." + ) return False def main(): argument_spec = vmware_argument_spec() argument_spec.update( - state=dict(type='str', default='present', - choices=['absent', 'poweredoff', 'poweredon', 'present', 'rebootguest', 'restarted', 'shutdownguest', 'suspended']), - template=dict(type='str', aliases=['template_src']), - is_template=dict(type='bool', default=False), - annotation=dict(type='str', aliases=['notes']), - customvalues=dict(type='list', default=[]), - name=dict(type='str'), - name_match=dict(type='str', choices=['first', 'last'], default='first'), - uuid=dict(type='str'), - use_instance_uuid=dict(type='bool', default=False), - folder=dict(type='str'), - guest_id=dict(type='str'), - disk=dict(type='list', default=[]), + state=dict( + type="str", + default="present", + choices=[ + "absent", + "poweredoff", + "poweredon", + "present", + "rebootguest", + "restarted", + "shutdownguest", + "suspended", + ], + ), + template=dict(type="str", aliases=["template_src"]), + is_template=dict(type="bool", default=False), + annotation=dict(type="str", aliases=["notes"]), + customvalues=dict(type="list", default=[]), + name=dict(type="str"), + name_match=dict( + type="str", choices=["first", "last"], default="first" + ), + uuid=dict(type="str"), + use_instance_uuid=dict(type="bool", default=False), + folder=dict(type="str"), + guest_id=dict(type="str"), + disk=dict(type="list", default=[]), cdrom=dict(type=list_or_dict, default=[]), - hardware=dict(type='dict', default={}), - force=dict(type='bool', default=False), - datacenter=dict(type='str', default='ha-datacenter'), - esxi_hostname=dict(type='str'), - cluster=dict(type='str'), - wait_for_ip_address=dict(type='bool', default=False), - wait_for_ip_address_timeout=dict(type='int', default=300), - state_change_timeout=dict(type='int', default=0), - snapshot_src=dict(type='str'), - linked_clone=dict(type='bool', default=False), - networks=dict(type='list', default=[]), - resource_pool=dict(type='str'), - customization=dict(type='dict', default={}, no_log=True), - customization_spec=dict(type='str', default=None), - wait_for_customization=dict(type='bool', default=False), - wait_for_customization_timeout=dict(type='int', default=3600), - vapp_properties=dict(type='list', default=[]), - datastore=dict(type='str'), - convert=dict(type='str', choices=['thin', 'thick', 'eagerzeroedthick']), - delete_from_inventory=dict(type='bool', default=False), + hardware=dict(type="dict", default={}), + force=dict(type="bool", default=False), + datacenter=dict(type="str", default="ha-datacenter"), + esxi_hostname=dict(type="str"), + cluster=dict(type="str"), + wait_for_ip_address=dict(type="bool", default=False), + wait_for_ip_address_timeout=dict(type="int", default=300), + state_change_timeout=dict(type="int", default=0), + snapshot_src=dict(type="str"), + linked_clone=dict(type="bool", default=False), + networks=dict(type="list", default=[]), + resource_pool=dict(type="str"), + customization=dict(type="dict", default={}, no_log=True), + customization_spec=dict(type="str", default=None), + wait_for_customization=dict(type="bool", default=False), + wait_for_customization_timeout=dict(type="int", default=3600), + vapp_properties=dict(type="list", default=[]), + datastore=dict(type="str"), + convert=dict( + type="str", choices=["thin", "thick", "eagerzeroedthick"] + ), + delete_from_inventory=dict(type="bool", default=False), ) - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - mutually_exclusive=[ - ['cluster', 'esxi_hostname'], - ], - required_one_of=[ - ['name', 'uuid'], - ], - ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[["cluster", "esxi_hostname"]], + required_one_of=[["name", "uuid"]], + ) - result = {'failed': False, 'changed': False} + result = {"failed": False, "changed": False} pyv = PyVmomiHelper(module) @@ -2801,50 +3878,75 @@ def main(): # VM already exists if vm: - if module.params['state'] == 'absent': + if module.params["state"] == "absent": # destroy it if module.check_mode: result.update( vm_name=vm.name, changed=True, current_powerstate=vm.summary.runtime.powerState.lower(), - desired_operation='remove_vm', + desired_operation="remove_vm", ) module.exit_json(**result) - if module.params['force']: + if module.params["force"]: # has to be poweredoff first - set_vm_power_state(pyv.content, vm, 'poweredoff', module.params['force']) - result = pyv.remove_vm(vm, module.params['delete_from_inventory']) - elif module.params['state'] == 'present': + set_vm_power_state( + pyv.content, vm, "poweredoff", module.params["force"] + ) + result = pyv.remove_vm(vm, module.params["delete_from_inventory"]) + elif module.params["state"] == "present": if module.check_mode: result.update( vm_name=vm.name, changed=True, - desired_operation='reconfigure_vm', + desired_operation="reconfigure_vm", ) module.exit_json(**result) result = pyv.reconfigure_vm() - elif module.params['state'] in ['poweredon', 'poweredoff', 'restarted', 'suspended', 'shutdownguest', 'rebootguest']: + elif module.params["state"] in [ + "poweredon", + "poweredoff", + "restarted", + "suspended", + "shutdownguest", + "rebootguest", + ]: if module.check_mode: result.update( vm_name=vm.name, changed=True, current_powerstate=vm.summary.runtime.powerState.lower(), - desired_operation='set_vm_power_state', + desired_operation="set_vm_power_state", ) module.exit_json(**result) # set powerstate - tmp_result = set_vm_power_state(pyv.content, vm, module.params['state'], module.params['force'], module.params['state_change_timeout']) - if tmp_result['changed']: + tmp_result = set_vm_power_state( + pyv.content, + vm, + module.params["state"], + module.params["force"], + module.params["state_change_timeout"], + ) + if tmp_result["changed"]: result["changed"] = True - if module.params['state'] in ['poweredon', 'restarted', 'rebootguest'] and module.params['wait_for_ip_address']: - wait_result = wait_for_vm_ip(pyv.content, vm, module.params['wait_for_ip_address_timeout']) + if ( + module.params["state"] + in ["poweredon", "restarted", "rebootguest"] + and module.params["wait_for_ip_address"] + ): + wait_result = wait_for_vm_ip( + pyv.content, + vm, + module.params["wait_for_ip_address_timeout"], + ) if not wait_result: - module.fail_json(msg='Waiting for IP address timed out') - tmp_result['instance'] = wait_result + module.fail_json( + msg="Waiting for IP address timed out" + ) + tmp_result["instance"] = wait_result if not tmp_result["failed"]: result["failed"] = False - result['instance'] = tmp_result['instance'] + result["instance"] = tmp_result["instance"] if tmp_result["failed"]: result["failed"] = True result["msg"] = tmp_result["msg"] @@ -2853,22 +3955,28 @@ def main(): raise AssertionError() # VM doesn't exist else: - if module.params['state'] in ['poweredon', 'poweredoff', 'present', 'restarted', 'suspended']: + if module.params["state"] in [ + "poweredon", + "poweredoff", + "present", + "restarted", + "suspended", + ]: if module.check_mode: - result.update( - changed=True, - desired_operation='deploy_vm', - ) + result.update(changed=True, desired_operation="deploy_vm") module.exit_json(**result) result = pyv.deploy_vm() - if result['failed']: - module.fail_json(msg='Failed to create a virtual machine : %s' % result['msg']) + if result["failed"]: + module.fail_json( + msg="Failed to create a virtual machine : %s" + % result["msg"] + ) - if result['failed']: + if result["failed"]: module.fail_json(**result) else: module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_guest_boot_info.py b/plugins/modules/vmware_guest_boot_info.py index e112b49..ab80e4d 100644 --- a/plugins/modules/vmware_guest_boot_info.py +++ b/plugins/modules/vmware_guest_boot_info.py @@ -7,15 +7,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_guest_boot_info short_description: Gather info about boot options for the given virtual machine @@ -58,9 +59,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather info about virtual machine's boot order and related parameters vmware_guest_boot_info: hostname: "{{ vcenter_hostname }}" @@ -78,7 +79,7 @@ validate_certs: no moid: "vm-42" register: vm_moid_boot_order_info -''' +""" RETURN = r""" vm_boot_info: @@ -103,7 +104,11 @@ from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec, find_vm_by_id +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + vmware_argument_spec, + find_vm_by_id, +) try: from pyVmomi import vim, VmomiSupport @@ -114,10 +119,10 @@ class VmBootInfoManager(PyVmomi): def __init__(self, module): super(VmBootInfoManager, self).__init__(module) - self.name = self.params['name'] - self.uuid = self.params['uuid'] - self.moid = self.params['moid'] - self.use_instance_uuid = self.params['use_instance_uuid'] + self.name = self.params["name"] + self.uuid = self.params["uuid"] + self.moid = self.params["moid"] + self.use_instance_uuid = self.params["use_instance_uuid"] self.vm = None def _get_vm(self): @@ -125,44 +130,60 @@ def _get_vm(self): if self.uuid: if self.use_instance_uuid: - vm_obj = find_vm_by_id(self.content, vm_id=self.uuid, vm_id_type="use_instance_uuid") + vm_obj = find_vm_by_id( + self.content, + vm_id=self.uuid, + vm_id_type="use_instance_uuid", + ) else: - vm_obj = find_vm_by_id(self.content, vm_id=self.uuid, vm_id_type="uuid") + vm_obj = find_vm_by_id( + self.content, vm_id=self.uuid, vm_id_type="uuid" + ) if vm_obj is None: - self.module.fail_json(msg="Failed to find the virtual machine with UUID : %s" % self.uuid) + self.module.fail_json( + msg="Failed to find the virtual machine with UUID : %s" + % self.uuid + ) vms = [vm_obj] elif self.name: - objects = self.get_managed_objects_properties(vim_type=vim.VirtualMachine, properties=['name']) + objects = self.get_managed_objects_properties( + vim_type=vim.VirtualMachine, properties=["name"] + ) for temp_vm_object in objects: if temp_vm_object.obj.name == self.name: vms.append(temp_vm_object.obj) elif self.moid: - vm_obj = VmomiSupport.templateOf('VirtualMachine')(self.module.params['moid'], self.si._stub) + vm_obj = VmomiSupport.templateOf("VirtualMachine")( + self.module.params["moid"], self.si._stub + ) if vm_obj: vms.append(vm_obj) if vms: - if self.params.get('name_match') == 'first': + if self.params.get("name_match") == "first": self.vm = vms[0] - elif self.params.get('name_match') == 'last': + elif self.params.get("name_match") == "last": self.vm = vms[-1] else: - self.module.fail_json(msg="Failed to find virtual machine using %s" % (self.name or self.uuid or self.moid)) + self.module.fail_json( + msg="Failed to find virtual machine using %s" + % (self.name or self.uuid or self.moid) + ) @staticmethod def humanize_boot_order(boot_order): results = [] for device in boot_order: if isinstance(device, vim.vm.BootOptions.BootableCdromDevice): - results.append('cdrom') + results.append("cdrom") elif isinstance(device, vim.vm.BootOptions.BootableDiskDevice): - results.append('disk') + results.append("disk") elif isinstance(device, vim.vm.BootOptions.BootableEthernetDevice): - results.append('ethernet') + results.append("ethernet") elif isinstance(device, vim.vm.BootOptions.BootableFloppyDevice): - results.append('floppy') + results.append("floppy") return results def ensure(self): @@ -171,13 +192,15 @@ def ensure(self): results = dict() if self.vm and self.vm.config: results = dict( - current_boot_order=self.humanize_boot_order(self.vm.config.bootOptions.bootOrder), + current_boot_order=self.humanize_boot_order( + self.vm.config.bootOptions.bootOrder + ), current_boot_delay=self.vm.config.bootOptions.bootDelay, current_enter_bios_setup=self.vm.config.bootOptions.enterBIOSSetup, current_boot_retry_enabled=self.vm.config.bootOptions.bootRetryEnabled, current_boot_retry_delay=self.vm.config.bootOptions.bootRetryDelay, current_boot_firmware=self.vm.config.firmware, - current_secure_boot_enabled=self.vm.config.bootOptions.efiSecureBootEnabled + current_secure_boot_enabled=self.vm.config.bootOptions.efiSecureBootEnabled, ) self.module.exit_json(changed=False, vm_boot_info=results) @@ -186,24 +209,17 @@ def ensure(self): def main(): argument_spec = vmware_argument_spec() argument_spec.update( - name=dict(type='str'), - uuid=dict(type='str'), - moid=dict(type='str'), - use_instance_uuid=dict(type='bool', default=False), - name_match=dict( - choices=['first', 'last'], - default='first' - ), + name=dict(type="str"), + uuid=dict(type="str"), + moid=dict(type="str"), + use_instance_uuid=dict(type="bool", default=False), + name_match=dict(choices=["first", "last"], default="first"), ) module = AnsibleModule( argument_spec=argument_spec, - required_one_of=[ - ['name', 'uuid', 'moid'] - ], - mutually_exclusive=[ - ['name', 'uuid', 'moid'] - ], + required_one_of=[["name", "uuid", "moid"]], + mutually_exclusive=[["name", "uuid", "moid"]], supports_check_mode=True, ) @@ -211,5 +227,5 @@ def main(): pyv.ensure() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_guest_boot_manager.py b/plugins/modules/vmware_guest_boot_manager.py index ed8f485..7e776f6 100644 --- a/plugins/modules/vmware_guest_boot_manager.py +++ b/plugins/modules/vmware_guest_boot_manager.py @@ -7,15 +7,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_guest_boot_manager short_description: Manage boot options for the given virtual machine @@ -96,9 +97,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Change virtual machine's boot order and related parameters vmware_guest_boot_manager: hostname: "{{ vcenter_hostname }}" @@ -138,7 +139,7 @@ - disk delegate_to: localhost register: vm_boot_order -''' +""" RETURN = r""" vm_boot_status: @@ -176,7 +177,13 @@ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_native -from ansible_collections.vmware.general.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec, find_vm_by_id, wait_for_task, TaskError +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + vmware_argument_spec, + find_vm_by_id, + wait_for_task, + TaskError, +) try: from pyVmomi import vim, VmomiSupport @@ -187,10 +194,10 @@ class VmBootManager(PyVmomi): def __init__(self, module): super(VmBootManager, self).__init__(module) - self.name = self.params['name'] - self.uuid = self.params['uuid'] - self.moid = self.params['moid'] - self.use_instance_uuid = self.params['use_instance_uuid'] + self.name = self.params["name"] + self.uuid = self.params["uuid"] + self.moid = self.params["moid"] + self.use_instance_uuid = self.params["use_instance_uuid"] self.vm = None def _get_vm(self): @@ -198,126 +205,205 @@ def _get_vm(self): if self.uuid: if self.use_instance_uuid: - vm_obj = find_vm_by_id(self.content, vm_id=self.uuid, vm_id_type="instance_uuid") + vm_obj = find_vm_by_id( + self.content, vm_id=self.uuid, vm_id_type="instance_uuid" + ) else: - vm_obj = find_vm_by_id(self.content, vm_id=self.uuid, vm_id_type="uuid") + vm_obj = find_vm_by_id( + self.content, vm_id=self.uuid, vm_id_type="uuid" + ) if vm_obj is None: - self.module.fail_json(msg="Failed to find the virtual machine with UUID : %s" % self.uuid) + self.module.fail_json( + msg="Failed to find the virtual machine with UUID : %s" + % self.uuid + ) vms = [vm_obj] elif self.name: - objects = self.get_managed_objects_properties(vim_type=vim.VirtualMachine, properties=['name']) + objects = self.get_managed_objects_properties( + vim_type=vim.VirtualMachine, properties=["name"] + ) for temp_vm_object in objects: if temp_vm_object.obj.name == self.name: vms.append(temp_vm_object.obj) elif self.moid: - vm_obj = VmomiSupport.templateOf('VirtualMachine')(self.module.params['moid'], self.si._stub) + vm_obj = VmomiSupport.templateOf("VirtualMachine")( + self.module.params["moid"], self.si._stub + ) if vm_obj: vms.append(vm_obj) if vms: - if self.params.get('name_match') == 'first': + if self.params.get("name_match") == "first": self.vm = vms[0] - elif self.params.get('name_match') == 'last': + elif self.params.get("name_match") == "last": self.vm = vms[-1] else: - self.module.fail_json(msg="Failed to find virtual machine using %s" % (self.name or self.uuid)) + self.module.fail_json( + msg="Failed to find virtual machine using %s" + % (self.name or self.uuid) + ) @staticmethod def humanize_boot_order(boot_order): results = [] for device in boot_order: if isinstance(device, vim.vm.BootOptions.BootableCdromDevice): - results.append('cdrom') + results.append("cdrom") elif isinstance(device, vim.vm.BootOptions.BootableDiskDevice): - results.append('disk') + results.append("disk") elif isinstance(device, vim.vm.BootOptions.BootableEthernetDevice): - results.append('ethernet') + results.append("ethernet") elif isinstance(device, vim.vm.BootOptions.BootableFloppyDevice): - results.append('floppy') + results.append("floppy") return results def ensure(self): self._get_vm() - valid_device_strings = ['cdrom', 'disk', 'ethernet', 'floppy'] + valid_device_strings = ["cdrom", "disk", "ethernet", "floppy"] boot_order_list = [] - for device_order in self.params.get('boot_order'): + for device_order in self.params.get("boot_order"): if device_order not in valid_device_strings: - self.module.fail_json(msg="Invalid device found [%s], please specify device from ['%s']" % (device_order, - "', '".join(valid_device_strings))) - if device_order == 'cdrom': - first_cdrom = [device for device in self.vm.config.hardware.device if isinstance(device, vim.vm.device.VirtualCdrom)] + self.module.fail_json( + msg="Invalid device found [%s], please specify device from ['%s']" + % (device_order, "', '".join(valid_device_strings)) + ) + if device_order == "cdrom": + first_cdrom = [ + device + for device in self.vm.config.hardware.device + if isinstance(device, vim.vm.device.VirtualCdrom) + ] if first_cdrom: - boot_order_list.append(vim.vm.BootOptions.BootableCdromDevice()) - elif device_order == 'disk': - first_hdd = [device for device in self.vm.config.hardware.device if isinstance(device, vim.vm.device.VirtualDisk)] + boot_order_list.append( + vim.vm.BootOptions.BootableCdromDevice() + ) + elif device_order == "disk": + first_hdd = [ + device + for device in self.vm.config.hardware.device + if isinstance(device, vim.vm.device.VirtualDisk) + ] if first_hdd: - boot_order_list.append(vim.vm.BootOptions.BootableDiskDevice(deviceKey=first_hdd[0].key)) - elif device_order == 'ethernet': - first_ether = [device for device in self.vm.config.hardware.device if isinstance(device, vim.vm.device.VirtualEthernetCard)] + boot_order_list.append( + vim.vm.BootOptions.BootableDiskDevice( + deviceKey=first_hdd[0].key + ) + ) + elif device_order == "ethernet": + first_ether = [ + device + for device in self.vm.config.hardware.device + if isinstance(device, vim.vm.device.VirtualEthernetCard) + ] if first_ether: - boot_order_list.append(vim.vm.BootOptions.BootableEthernetDevice(deviceKey=first_ether[0].key)) - elif device_order == 'floppy': - first_floppy = [device for device in self.vm.config.hardware.device if isinstance(device, vim.vm.device.VirtualFloppy)] + boot_order_list.append( + vim.vm.BootOptions.BootableEthernetDevice( + deviceKey=first_ether[0].key + ) + ) + elif device_order == "floppy": + first_floppy = [ + device + for device in self.vm.config.hardware.device + if isinstance(device, vim.vm.device.VirtualFloppy) + ] if first_floppy: - boot_order_list.append(vim.vm.BootOptions.BootableFloppyDevice()) + boot_order_list.append( + vim.vm.BootOptions.BootableFloppyDevice() + ) change_needed = False kwargs = dict() if len(boot_order_list) != len(self.vm.config.bootOptions.bootOrder): - kwargs.update({'bootOrder': boot_order_list}) + kwargs.update({"bootOrder": boot_order_list}) change_needed = True else: for i in range(0, len(boot_order_list)): boot_device_type = type(boot_order_list[i]) - vm_boot_device_type = type(self.vm.config.bootOptions.bootOrder[i]) + vm_boot_device_type = type( + self.vm.config.bootOptions.bootOrder[i] + ) if boot_device_type != vm_boot_device_type: - kwargs.update({'bootOrder': boot_order_list}) + kwargs.update({"bootOrder": boot_order_list}) change_needed = True - if self.vm.config.bootOptions.bootDelay != self.params.get('boot_delay'): - kwargs.update({'bootDelay': self.params.get('boot_delay')}) + if self.vm.config.bootOptions.bootDelay != self.params.get( + "boot_delay" + ): + kwargs.update({"bootDelay": self.params.get("boot_delay")}) change_needed = True - if self.vm.config.bootOptions.enterBIOSSetup != self.params.get('enter_bios_setup'): - kwargs.update({'enterBIOSSetup': self.params.get('enter_bios_setup')}) + if self.vm.config.bootOptions.enterBIOSSetup != self.params.get( + "enter_bios_setup" + ): + kwargs.update( + {"enterBIOSSetup": self.params.get("enter_bios_setup")} + ) change_needed = True - if self.vm.config.bootOptions.bootRetryEnabled != self.params.get('boot_retry_enabled'): - kwargs.update({'bootRetryEnabled': self.params.get('boot_retry_enabled')}) + if self.vm.config.bootOptions.bootRetryEnabled != self.params.get( + "boot_retry_enabled" + ): + kwargs.update( + {"bootRetryEnabled": self.params.get("boot_retry_enabled")} + ) change_needed = True - if self.vm.config.bootOptions.bootRetryDelay != self.params.get('boot_retry_delay'): + if self.vm.config.bootOptions.bootRetryDelay != self.params.get( + "boot_retry_delay" + ): if not self.vm.config.bootOptions.bootRetryEnabled: - kwargs.update({'bootRetryEnabled': True}) - kwargs.update({'bootRetryDelay': self.params.get('boot_retry_delay')}) + kwargs.update({"bootRetryEnabled": True}) + kwargs.update( + {"bootRetryDelay": self.params.get("boot_retry_delay")} + ) change_needed = True boot_firmware_required = False - if self.vm.config.firmware != self.params.get('boot_firmware'): + if self.vm.config.firmware != self.params.get("boot_firmware"): change_needed = True boot_firmware_required = True - if self.vm.config.bootOptions.efiSecureBootEnabled != self.params.get('secure_boot_enabled'): - if self.params.get('secure_boot_enabled') and self.params.get('boot_firmware') == "bios": - self.module.fail_json(msg="EFI secure boot cannot be enabled when boot_firmware = bios, but both are specified") + if self.vm.config.bootOptions.efiSecureBootEnabled != self.params.get( + "secure_boot_enabled" + ): + if ( + self.params.get("secure_boot_enabled") + and self.params.get("boot_firmware") == "bios" + ): + self.module.fail_json( + msg="EFI secure boot cannot be enabled when boot_firmware = bios, but both are specified" + ) # If the user is not specifying boot_firmware, make sure they aren't trying to enable it on a # system with boot_firmware already set to 'bios' - if self.params.get('secure_boot_enabled') and \ - self.params.get('boot_firmware') is None and \ - self.vm.config.firmware == 'bios': - self.module.fail_json(msg="EFI secure boot cannot be enabled when boot_firmware = bios. VM's boot_firmware currently set to bios") - - kwargs.update({'efiSecureBootEnabled': self.params.get('secure_boot_enabled')}) + if ( + self.params.get("secure_boot_enabled") + and self.params.get("boot_firmware") is None + and self.vm.config.firmware == "bios" + ): + self.module.fail_json( + msg="EFI secure boot cannot be enabled when boot_firmware = bios. VM's boot_firmware currently set to bios" + ) + + kwargs.update( + { + "efiSecureBootEnabled": self.params.get( + "secure_boot_enabled" + ) + } + ) change_needed = True changed = False results = dict( - previous_boot_order=self.humanize_boot_order(self.vm.config.bootOptions.bootOrder), + previous_boot_order=self.humanize_boot_order( + self.vm.config.bootOptions.bootOrder + ), previous_boot_delay=self.vm.config.bootOptions.bootDelay, previous_enter_bios_setup=self.vm.config.bootOptions.enterBIOSSetup, previous_boot_retry_enabled=self.vm.config.bootOptions.bootRetryEnabled, @@ -331,25 +417,29 @@ def ensure(self): vm_conf = vim.vm.ConfigSpec() vm_conf.bootOptions = vim.vm.BootOptions(**kwargs) if boot_firmware_required: - vm_conf.firmware = self.params.get('boot_firmware') + vm_conf.firmware = self.params.get("boot_firmware") task = self.vm.ReconfigVM_Task(vm_conf) try: changed, result = wait_for_task(task) except TaskError as e: - self.module.fail_json(msg="Failed to perform reconfigure virtual" - " machine %s for boot order due to: %s" % (self.name or self.uuid, - to_native(e))) + self.module.fail_json( + msg="Failed to perform reconfigure virtual" + " machine %s for boot order due to: %s" + % (self.name or self.uuid, to_native(e)) + ) results.update( { - 'current_boot_order': self.humanize_boot_order(self.vm.config.bootOptions.bootOrder), - 'current_boot_delay': self.vm.config.bootOptions.bootDelay, - 'current_enter_bios_setup': self.vm.config.bootOptions.enterBIOSSetup, - 'current_boot_retry_enabled': self.vm.config.bootOptions.bootRetryEnabled, - 'current_boot_retry_delay': self.vm.config.bootOptions.bootRetryDelay, - 'current_boot_firmware': self.vm.config.firmware, - 'current_secure_boot_enabled': self.vm.config.bootOptions.efiSecureBootEnabled, + "current_boot_order": self.humanize_boot_order( + self.vm.config.bootOptions.bootOrder + ), + "current_boot_delay": self.vm.config.bootOptions.bootDelay, + "current_enter_bios_setup": self.vm.config.bootOptions.enterBIOSSetup, + "current_boot_retry_enabled": self.vm.config.bootOptions.bootRetryEnabled, + "current_boot_retry_delay": self.vm.config.bootOptions.bootRetryDelay, + "current_boot_firmware": self.vm.config.firmware, + "current_secure_boot_enabled": self.vm.config.bootOptions.efiSecureBootEnabled, } ) @@ -359,57 +449,29 @@ def ensure(self): def main(): argument_spec = vmware_argument_spec() argument_spec.update( - name=dict(type='str'), - uuid=dict(type='str'), - moid=dict(type='str'), - use_instance_uuid=dict(type='bool', default=False), - boot_order=dict( - type='list', - default=[], - ), - name_match=dict( - choices=['first', 'last'], - default='first' - ), - boot_delay=dict( - type='int', - default=0, - ), - enter_bios_setup=dict( - type='bool', - default=False, - ), - boot_retry_enabled=dict( - type='bool', - default=False, - ), - boot_retry_delay=dict( - type='int', - default=0, - ), - secure_boot_enabled=dict( - type='bool', - default=False, - ), - boot_firmware=dict( - type='str', - choices=['efi', 'bios'], - ) + name=dict(type="str"), + uuid=dict(type="str"), + moid=dict(type="str"), + use_instance_uuid=dict(type="bool", default=False), + boot_order=dict(type="list", default=[]), + name_match=dict(choices=["first", "last"], default="first"), + boot_delay=dict(type="int", default=0), + enter_bios_setup=dict(type="bool", default=False), + boot_retry_enabled=dict(type="bool", default=False), + boot_retry_delay=dict(type="int", default=0), + secure_boot_enabled=dict(type="bool", default=False), + boot_firmware=dict(type="str", choices=["efi", "bios"]), ) module = AnsibleModule( argument_spec=argument_spec, - required_one_of=[ - ['name', 'uuid', 'moid'] - ], - mutually_exclusive=[ - ['name', 'uuid', 'moid'] - ], + required_one_of=[["name", "uuid", "moid"]], + mutually_exclusive=[["name", "uuid", "moid"]], ) pyv = VmBootManager(module) pyv.ensure() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_guest_controller.py b/plugins/modules/vmware_guest_controller.py index 3a3c588..da2e0e9 100644 --- a/plugins/modules/vmware_guest_controller.py +++ b/plugins/modules/vmware_guest_controller.py @@ -5,17 +5,18 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_guest_controller short_description: Manage disk or USB controllers related to virtual machine in given vCenter infrastructure @@ -131,9 +132,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Add disk and USB 3.0 controllers for virtual machine located by name vmware_guest_controller: hostname: "{{ vcenter_hostname }}" @@ -171,7 +172,7 @@ type: usb2 delegate_to: localhost register: disk_controller_facts -''' +""" RETURN = """ disk_controller_status: @@ -249,29 +250,43 @@ import time from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_native -from ansible_collections.vmware.general.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec, wait_for_task +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + vmware_argument_spec, + wait_for_task, +) class PyVmomiHelper(PyVmomi): def __init__(self, module): super(PyVmomiHelper, self).__init__(module) self.sleep_time = 10 - self.scsi_device_type = dict(lsilogic=vim.vm.device.VirtualLsiLogicController, - paravirtual=vim.vm.device.ParaVirtualSCSIController, - buslogic=vim.vm.device.VirtualBusLogicController, - lsilogicsas=vim.vm.device.VirtualLsiLogicSASController) + self.scsi_device_type = dict( + lsilogic=vim.vm.device.VirtualLsiLogicController, + paravirtual=vim.vm.device.ParaVirtualSCSIController, + buslogic=vim.vm.device.VirtualBusLogicController, + lsilogicsas=vim.vm.device.VirtualLsiLogicSASController, + ) self.sata_device_type = vim.vm.device.VirtualAHCIController self.nvme_device_type = vim.vm.device.VirtualNVMEController - self.usb_device_type = dict(usb2=vim.vm.device.VirtualUSBController, - usb3=vim.vm.device.VirtualUSBXHCIController) - self.controller_types = dict(self.scsi_device_type, sata=self.sata_device_type, nvme=self.nvme_device_type) + self.usb_device_type = dict( + usb2=vim.vm.device.VirtualUSBController, + usb3=vim.vm.device.VirtualUSBXHCIController, + ) + self.controller_types = dict( + self.scsi_device_type, + sata=self.sata_device_type, + nvme=self.nvme_device_type, + ) self.controller_types.update(self.usb_device_type) self.config_spec = vim.vm.ConfigSpec() self.config_spec.deviceChange = [] self.change_detected = False - self.disk_ctl_bus_num_list = dict(sata=list(range(0, 4)), - nvme=list(range(0, 4)), - scsi=list(range(0, 4))) + self.disk_ctl_bus_num_list = dict( + sata=list(range(0, 4)), + nvme=list(range(0, 4)), + scsi=list(range(0, 4)), + ) def get_unused_ctl_bus_number(self): """ @@ -279,14 +294,14 @@ def get_unused_ctl_bus_number(self): """ for device in self.current_vm_obj.config.hardware.device: if isinstance(device, self.sata_device_type): - if len(self.disk_ctl_bus_num_list['sata']) != 0: - self.disk_ctl_bus_num_list['sata'].remove(device.busNumber) + if len(self.disk_ctl_bus_num_list["sata"]) != 0: + self.disk_ctl_bus_num_list["sata"].remove(device.busNumber) if isinstance(device, self.nvme_device_type): - if len(self.disk_ctl_bus_num_list['nvme']) != 0: - self.disk_ctl_bus_num_list['nvme'].remove(device.busNumber) + if len(self.disk_ctl_bus_num_list["nvme"]) != 0: + self.disk_ctl_bus_num_list["nvme"].remove(device.busNumber) if isinstance(device, tuple(self.scsi_device_type.values())): - if len(self.disk_ctl_bus_num_list['scsi']) != 0: - self.disk_ctl_bus_num_list['scsi'].remove(device.busNumber) + if len(self.disk_ctl_bus_num_list["scsi"]) != 0: + self.disk_ctl_bus_num_list["scsi"].remove(device.busNumber) def check_ctl_disk_exist(self, ctl_type=None, bus_number=None): """ @@ -320,23 +335,23 @@ def create_controller(self, ctl_type, bus_number=0): """ disk_ctl = vim.vm.device.VirtualDeviceSpec() disk_ctl.operation = vim.vm.device.VirtualDeviceSpec.Operation.add - if ctl_type == 'sata': + if ctl_type == "sata": disk_ctl.device = self.sata_device_type() disk_ctl.device.key = -randint(15000, 19999) - elif ctl_type == 'nvme': + elif ctl_type == "nvme": disk_ctl.device = self.nvme_device_type() disk_ctl.device.key = -randint(31000, 39999) elif ctl_type in self.scsi_device_type.keys(): disk_ctl.device = self.scsi_device_type.get(ctl_type)() disk_ctl.device.key = -randint(1000, 6999) disk_ctl.device.hotAddRemove = True - disk_ctl.device.sharedBus = 'noSharing' + disk_ctl.device.sharedBus = "noSharing" disk_ctl.device.scsiCtlrUnitNumber = 7 elif ctl_type in self.usb_device_type.keys(): disk_ctl.device = self.usb_device_type.get(ctl_type)() - if ctl_type == 'usb2': + if ctl_type == "usb2": disk_ctl.device.key = 7000 - elif ctl_type == 'usb3': + elif ctl_type == "usb3": disk_ctl.device.key = 14000 disk_ctl.device.deviceInfo = vim.Description() @@ -351,11 +366,7 @@ def gather_disk_controller_facts(self): Return: A dictionary of each type controller facts """ disk_ctl_facts = dict( - scsi=dict(), - sata=dict(), - nvme=dict(), - usb2=dict(), - usb3=dict() + scsi=dict(), sata=dict(), nvme=dict(), usb2=dict(), usb3=dict() ) for device in self.current_vm_obj.config.hardware.device: ctl_facts_dict = dict() @@ -370,15 +381,15 @@ def gather_disk_controller_facts(self): controller_disks_devicekey=device.device, ) if isinstance(device, tuple(self.scsi_device_type.values())): - disk_ctl_facts['scsi'].update(ctl_facts_dict) + disk_ctl_facts["scsi"].update(ctl_facts_dict) if isinstance(device, self.nvme_device_type): - disk_ctl_facts['nvme'].update(ctl_facts_dict) + disk_ctl_facts["nvme"].update(ctl_facts_dict) if isinstance(device, self.sata_device_type): - disk_ctl_facts['sata'].update(ctl_facts_dict) - if isinstance(device, self.usb_device_type.get('usb2')): - disk_ctl_facts['usb2'].update(ctl_facts_dict) - if isinstance(device, self.usb_device_type.get('usb3')): - disk_ctl_facts['usb3'].update(ctl_facts_dict) + disk_ctl_facts["sata"].update(ctl_facts_dict) + if isinstance(device, self.usb_device_type.get("usb2")): + disk_ctl_facts["usb2"].update(ctl_facts_dict) + if isinstance(device, self.usb_device_type.get("usb3")): + disk_ctl_facts["usb3"].update(ctl_facts_dict) return disk_ctl_facts @@ -388,40 +399,80 @@ def sanitize_disk_controller_config(self): Return: A list of dictionary with checked controller configured """ - if not self.params.get('controllers'): - self.module.exit_json(changed=False, msg="No controller provided for virtual" - " machine '%s' for management." % self.current_vm_obj.name) - if 10 != self.params.get('sleep_time') <= 300: - self.sleep_time = self.params.get('sleep_time') + if not self.params.get("controllers"): + self.module.exit_json( + changed=False, + msg="No controller provided for virtual" + " machine '%s' for management." % self.current_vm_obj.name, + ) + if 10 != self.params.get("sleep_time") <= 300: + self.sleep_time = self.params.get("sleep_time") exec_get_unused_ctl_bus_number = False - controller_config = self.params.get('controllers') + controller_config = self.params.get("controllers") for ctl_config in controller_config: if ctl_config: - if ctl_config['type'] not in self.usb_device_type.keys(): - if ctl_config['state'] == 'absent' and ctl_config.get('controller_number') is None: - self.module.fail_json(msg="Disk controller number is required when removing it.") - if ctl_config['state'] == 'present' and not exec_get_unused_ctl_bus_number: + if ctl_config["type"] not in self.usb_device_type.keys(): + if ( + ctl_config["state"] == "absent" + and ctl_config.get("controller_number") is None + ): + self.module.fail_json( + msg="Disk controller number is required when removing it." + ) + if ( + ctl_config["state"] == "present" + and not exec_get_unused_ctl_bus_number + ): self.get_unused_ctl_bus_number() exec_get_unused_ctl_bus_number = True # starts from hardware version 13 nvme controller supported - if ctl_config['state'] == 'present' and ctl_config['type'] == 'nvme': - vm_hwv = int(self.current_vm_obj.config.version.split('-')[1]) + if ( + ctl_config["state"] == "present" + and ctl_config["type"] == "nvme" + ): + vm_hwv = int( + self.current_vm_obj.config.version.split("-")[1] + ) if vm_hwv < 13: - self.module.fail_json(msg="Can not create new NVMe disk controller due to VM hardware version" - " is '%s', not >= 13." % vm_hwv) + self.module.fail_json( + msg="Can not create new NVMe disk controller due to VM hardware version" + " is '%s', not >= 13." % vm_hwv + ) if exec_get_unused_ctl_bus_number: for ctl_config in controller_config: - if ctl_config and ctl_config['state'] == 'present' and ctl_config['type'] not in self.usb_device_type.keys(): - if ctl_config['type'] in self.scsi_device_type.keys(): - if len(self.disk_ctl_bus_num_list['scsi']) != 0: - ctl_config['controller_number'] = self.disk_ctl_bus_num_list['scsi'].pop(0) + if ( + ctl_config + and ctl_config["state"] == "present" + and ctl_config["type"] not in self.usb_device_type.keys() + ): + if ctl_config["type"] in self.scsi_device_type.keys(): + if len(self.disk_ctl_bus_num_list["scsi"]) != 0: + ctl_config[ + "controller_number" + ] = self.disk_ctl_bus_num_list["scsi"].pop(0) else: - ctl_config['controller_number'] = None - elif ctl_config['type'] == 'sata' or ctl_config['type'] == 'nvme': - if len(self.disk_ctl_bus_num_list.get(ctl_config['type'])) != 0: - ctl_config['controller_number'] = self.disk_ctl_bus_num_list.get(ctl_config['type']).pop(0) + ctl_config["controller_number"] = None + elif ( + ctl_config["type"] == "sata" + or ctl_config["type"] == "nvme" + ): + if ( + len( + self.disk_ctl_bus_num_list.get( + ctl_config["type"] + ) + ) + != 0 + ): + ctl_config[ + "controller_number" + ] = self.disk_ctl_bus_num_list.get( + ctl_config["type"] + ).pop( + 0 + ) else: - ctl_config['controller_number'] = None + ctl_config["controller_number"] = None return controller_config @@ -431,65 +482,120 @@ def configure_disk_controllers(self): Return: Operation result """ - if self.params['gather_disk_controller_facts']: - results = {'changed': False, 'failed': False, 'disk_controller_data': self.gather_disk_controller_facts()} + if self.params["gather_disk_controller_facts"]: + results = { + "changed": False, + "failed": False, + "disk_controller_data": self.gather_disk_controller_facts(), + } return results controller_config = self.sanitize_disk_controller_config() for disk_ctl_config in controller_config: - if disk_ctl_config and disk_ctl_config['state'] == 'present': + if disk_ctl_config and disk_ctl_config["state"] == "present": # create new USB controller, bus number is 0 - if disk_ctl_config['type'] in self.usb_device_type.keys(): - usb_exists, has_disks_attached = self.check_ctl_disk_exist(disk_ctl_config['type']) + if disk_ctl_config["type"] in self.usb_device_type.keys(): + usb_exists, has_disks_attached = self.check_ctl_disk_exist( + disk_ctl_config["type"] + ) if usb_exists: - self.module.warn("'%s' USB controller already exists, can not add more." % disk_ctl_config['type']) + self.module.warn( + "'%s' USB controller already exists, can not add more." + % disk_ctl_config["type"] + ) else: - disk_controller_new = self.create_controller(disk_ctl_config['type']) - self.config_spec.deviceChange.append(disk_controller_new) + disk_controller_new = self.create_controller( + disk_ctl_config["type"] + ) + self.config_spec.deviceChange.append( + disk_controller_new + ) self.change_detected = True # create other disk controller else: - if disk_ctl_config.get('controller_number') is not None: - disk_controller_new = self.create_controller(disk_ctl_config['type'], disk_ctl_config.get('controller_number')) - self.config_spec.deviceChange.append(disk_controller_new) + if disk_ctl_config.get("controller_number") is not None: + disk_controller_new = self.create_controller( + disk_ctl_config["type"], + disk_ctl_config.get("controller_number"), + ) + self.config_spec.deviceChange.append( + disk_controller_new + ) self.change_detected = True else: - if disk_ctl_config['type'] in self.scsi_device_type.keys(): - self.module.warn("Already 4 SCSI controllers, can not add new '%s' controller." % disk_ctl_config['type']) + if ( + disk_ctl_config["type"] + in self.scsi_device_type.keys() + ): + self.module.warn( + "Already 4 SCSI controllers, can not add new '%s' controller." + % disk_ctl_config["type"] + ) else: - self.module.warn("Already 4 '%s' controllers, can not add new one." % disk_ctl_config['type']) - elif disk_ctl_config and disk_ctl_config['state'] == 'absent': - existing_ctl, has_disks_attached = self.check_ctl_disk_exist(disk_ctl_config['type'], disk_ctl_config.get('controller_number')) + self.module.warn( + "Already 4 '%s' controllers, can not add new one." + % disk_ctl_config["type"] + ) + elif disk_ctl_config and disk_ctl_config["state"] == "absent": + existing_ctl, has_disks_attached = self.check_ctl_disk_exist( + disk_ctl_config["type"], + disk_ctl_config.get("controller_number"), + ) if existing_ctl is not None: if not has_disks_attached: ctl_spec = vim.vm.device.VirtualDeviceSpec() - ctl_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove + ctl_spec.operation = ( + vim.vm.device.VirtualDeviceSpec.Operation.remove + ) ctl_spec.device = existing_ctl self.config_spec.deviceChange.append(ctl_spec) self.change_detected = True else: - self.module.warn("Can not remove specified controller, type '%s', bus number '%s'," - " there are disks attaching to it." % (disk_ctl_config['type'], disk_ctl_config.get('controller_number'))) + self.module.warn( + "Can not remove specified controller, type '%s', bus number '%s'," + " there are disks attaching to it." + % ( + disk_ctl_config["type"], + disk_ctl_config.get("controller_number"), + ) + ) else: - self.module.warn("Not find specified controller to remove, type '%s', bus number '%s'." - % (disk_ctl_config['type'], disk_ctl_config.get('controller_number'))) + self.module.warn( + "Not find specified controller to remove, type '%s', bus number '%s'." + % ( + disk_ctl_config["type"], + disk_ctl_config.get("controller_number"), + ) + ) try: task = self.current_vm_obj.ReconfigVM_Task(spec=self.config_spec) wait_for_task(task) except vim.fault.InvalidDeviceSpec as e: - self.module.fail_json(msg="Failed to configure controller on given virtual machine due to invalid" - " device spec : %s" % to_native(e.msg), - details="Please check ESXi server logs for more details.") + self.module.fail_json( + msg="Failed to configure controller on given virtual machine due to invalid" + " device spec : %s" % to_native(e.msg), + details="Please check ESXi server logs for more details.", + ) except vim.fault.RestrictedVersion as e: - self.module.fail_json(msg="Failed to reconfigure virtual machine due to" - " product versioning restrictions: %s" % to_native(e.msg)) - if task.info.state == 'error': - results = {'changed': self.change_detected, 'failed': True, 'msg': task.info.error.msg} + self.module.fail_json( + msg="Failed to reconfigure virtual machine due to" + " product versioning restrictions: %s" % to_native(e.msg) + ) + if task.info.state == "error": + results = { + "changed": self.change_detected, + "failed": True, + "msg": task.info.error.msg, + } else: if self.change_detected: time.sleep(self.sleep_time) - results = {'changed': self.change_detected, 'failed': False, 'disk_controller_data': self.gather_disk_controller_facts()} + results = { + "changed": self.change_detected, + "failed": False, + "disk_controller_data": self.gather_disk_controller_facts(), + } return results @@ -497,40 +603,50 @@ def configure_disk_controllers(self): def main(): argument_spec = vmware_argument_spec() argument_spec.update( - name=dict(type='str'), - uuid=dict(type='str'), - moid=dict(type='str'), - folder=dict(type='str'), - datacenter=dict(type='str', default='ha-datacenter'), + name=dict(type="str"), + uuid=dict(type="str"), + moid=dict(type="str"), + folder=dict(type="str"), + datacenter=dict(type="str", default="ha-datacenter"), controllers=dict( - type='list', - elements='dict', + type="list", + elements="dict", required=False, options=dict( - state=dict(type='str', choices=['present', 'absent'], required=True), - controller_number=dict(type='int', choices=[0, 1, 2, 3], required=False), + state=dict( + type="str", choices=["present", "absent"], required=True + ), + controller_number=dict( + type="int", choices=[0, 1, 2, 3], required=False + ), type=dict( - type='str', - choices=['sata', 'nvme', 'lsilogic', 'buslogic', 'lsilogicsas', 'paravirtual', 'usb2', 'usb3'], + type="str", + choices=[ + "sata", + "nvme", + "lsilogic", + "buslogic", + "lsilogicsas", + "paravirtual", + "usb2", + "usb3", + ], required=True, ), ), ), - use_instance_uuid=dict(type='bool', default=False), - gather_disk_controller_facts=dict(type='bool', default=False), - sleep_time=dict(type='int', default=10), + use_instance_uuid=dict(type="bool", default=False), + gather_disk_controller_facts=dict(type="bool", default=False), + sleep_time=dict(type="int", default=10), ) module = AnsibleModule( - argument_spec=argument_spec, - required_one_of=[ - ['name', 'uuid', 'moid'] - ] + argument_spec=argument_spec, required_one_of=[["name", "uuid", "moid"]] ) - if module.params['folder']: + if module.params["folder"]: # FindByInventoryPath() does not require an absolute path # so we should leave the input folder path unmodified - module.params['folder'] = module.params['folder'].rstrip('/') + module.params["folder"] = module.params["folder"].rstrip("/") pyv = PyVmomiHelper(module) # Check if the VM exists before continuing @@ -539,16 +655,23 @@ def main(): if not vm: # We unable to find the virtual machine user specified # Bail out - vm_id = (module.params.get('name') or module.params.get('uuid') or module.params.get('moid')) - module.fail_json(msg="Unable to manage disk or USB controllers for non-existing virtual machine '%s'." % vm_id) + vm_id = ( + module.params.get("name") + or module.params.get("uuid") + or module.params.get("moid") + ) + module.fail_json( + msg="Unable to manage disk or USB controllers for non-existing virtual machine '%s'." + % vm_id + ) # VM exists result = pyv.configure_disk_controllers() - if result['failed']: + if result["failed"]: module.fail_json(**result) else: module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_guest_cross_vc_clone.py b/plugins/modules/vmware_guest_cross_vc_clone.py index 74234c1..500f77f 100644 --- a/plugins/modules/vmware_guest_cross_vc_clone.py +++ b/plugins/modules/vmware_guest_cross_vc_clone.py @@ -5,14 +5,17 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} +ANSIBLE_METADATA = { + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", +} -DOCUMENTATION = ''' +DOCUMENTATION = """ module: vmware_guest_cross_vc_clone short_description: Cross-vCenter VM/template clone @@ -116,9 +119,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = ''' +EXAMPLES = """ # Clone template - name: clone a template across VC vmware_guest_cross_vc_clone: @@ -171,9 +174,9 @@ destination_datastore: '{{ destination_datastore }}' destination_vm_folder: '{{ destination_vm_folder }}' check_mode: yes -''' +""" -RETURN = r''' +RETURN = r""" vm_info: description: metadata about the virtual machine returned: always @@ -186,16 +189,25 @@ "vm_folder": "", "power_on": "" } -''' +""" from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import (PyVmomi, find_hostsystem_by_name, - find_datastore_by_name, - find_folder_by_name, find_vm_by_name, - connect_to_api, vmware_argument_spec, - gather_vm_facts, find_obj, find_resource_pool_by_name, - wait_for_task, TaskError) +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + find_hostsystem_by_name, + find_datastore_by_name, + find_folder_by_name, + find_vm_by_name, + connect_to_api, + vmware_argument_spec, + gather_vm_facts, + find_obj, + find_resource_pool_by_name, + wait_for_task, + TaskError, +) from ansible.module_utils._text import to_native + try: from pyVmomi import vim except ImportError: @@ -209,11 +221,17 @@ def __init__(self, module): self.clone_spec = vim.vm.CloneSpec() self.relocate_spec = vim.vm.RelocateSpec() self.service_locator = vim.ServiceLocator() - self.destination_vcenter = self.params['destination_vcenter'] - self.destination_vcenter_username = self.params['destination_vcenter_username'] - self.destination_vcenter_password = self.params['destination_vcenter_password'] - self.destination_vcenter_port = self.params.get('port', 443) - self.destination_vcenter_validate_certs = self.params.get('destination_vcenter_validate_certs', None) + self.destination_vcenter = self.params["destination_vcenter"] + self.destination_vcenter_username = self.params[ + "destination_vcenter_username" + ] + self.destination_vcenter_password = self.params[ + "destination_vcenter_password" + ] + self.destination_vcenter_port = self.params.get("port", 443) + self.destination_vcenter_validate_certs = self.params.get( + "destination_vcenter_validate_certs", None + ) def get_new_vm_info(self, vm): # to check if vm has been cloned in the destination vc @@ -223,40 +241,55 @@ def get_new_vm_info(self, vm): info = {} vm_obj = find_vm_by_name(content=self.destination_content, vm_name=vm) if vm_obj is None: - self.module.fail_json(msg="Newly cloned VM is not found in the destination VCenter") + self.module.fail_json( + msg="Newly cloned VM is not found in the destination VCenter" + ) else: vm_facts = gather_vm_facts(self.destination_content, vm_obj) - info['vm_name'] = vm - info['vcenter'] = self.destination_vcenter - info['host'] = vm_facts['hw_esxi_host'] - info['datastore'] = vm_facts['hw_datastores'] - info['vm_folder'] = vm_facts['hw_folder'] - info['power_on'] = vm_facts['hw_power_status'] + info["vm_name"] = vm + info["vcenter"] = self.destination_vcenter + info["host"] = vm_facts["hw_esxi_host"] + info["datastore"] = vm_facts["hw_datastores"] + info["vm_folder"] = vm_facts["hw_folder"] + info["power_on"] = vm_facts["hw_power_status"] return info def clone(self): # clone the vm/template on destination VC - vm_folder = find_folder_by_name(content=self.destination_content, folder_name=self.params['destination_vm_folder']) + vm_folder = find_folder_by_name( + content=self.destination_content, + folder_name=self.params["destination_vm_folder"], + ) if not vm_folder: - self.module.fail_json(msg="Destination folder does not exist. Please refer to the documentation to correctly specify the folder.") - vm_name = self.params['destination_vm_name'] - task = self.vm_obj.Clone(folder=vm_folder, name=vm_name, spec=self.clone_spec) + self.module.fail_json( + msg="Destination folder does not exist. Please refer to the documentation to correctly specify the folder." + ) + vm_name = self.params["destination_vm_name"] + task = self.vm_obj.Clone( + folder=vm_folder, name=vm_name, spec=self.clone_spec + ) wait_for_task(task) - if task.info.state == 'error': - result = {'changed': False, 'failed': True, 'msg': task.info.error.msg} + if task.info.state == "error": + result = { + "changed": False, + "failed": True, + "msg": task.info.error.msg, + } else: vm_info = self.get_new_vm_info(vm_name) - result = {'changed': True, 'failed': False, 'vm_info': vm_info} + result = {"changed": True, "failed": False, "vm_info": vm_info} return result def sanitize_params(self): - ''' + """ this method is used to verify user provided parameters - ''' + """ self.vm_obj = self.get_vm() if self.vm_obj is None: vm_id = self.vm_uuid or self.vm_name or self.moid - self.module.fail_json(msg="Failed to find the VM/template with %s" % vm_id) + self.module.fail_json( + msg="Failed to find the VM/template with %s" % vm_id + ) # connect to destination VC self.destination_content = connect_to_api( @@ -265,38 +298,64 @@ def sanitize_params(self): username=self.destination_vcenter_username, password=self.destination_vcenter_password, port=self.destination_vcenter_port, - validate_certs=self.destination_vcenter_validate_certs) + validate_certs=self.destination_vcenter_validate_certs, + ) # Check if vm name already exists in the destination VC - vm = find_vm_by_name(content=self.destination_content, vm_name=self.params['destination_vm_name']) + vm = find_vm_by_name( + content=self.destination_content, + vm_name=self.params["destination_vm_name"], + ) if vm: - self.module.exit_json(changed=False, msg="A VM with the given name already exists") + self.module.exit_json( + changed=False, msg="A VM with the given name already exists" + ) - datastore_name = self.params['destination_datastore'] - datastore_cluster = find_obj(self.destination_content, [vim.StoragePod], datastore_name) + datastore_name = self.params["destination_datastore"] + datastore_cluster = find_obj( + self.destination_content, [vim.StoragePod], datastore_name + ) if datastore_cluster: # If user specified datastore cluster so get recommended datastore - datastore_name = self.get_recommended_datastore(datastore_cluster_obj=datastore_cluster) + datastore_name = self.get_recommended_datastore( + datastore_cluster_obj=datastore_cluster + ) # Check if get_recommended_datastore or user specified datastore exists or not - self.destination_datastore = find_datastore_by_name(content=self.destination_content, datastore_name=datastore_name) + self.destination_datastore = find_datastore_by_name( + content=self.destination_content, datastore_name=datastore_name + ) if self.destination_datastore is None: self.module.fail_json(msg="Destination datastore not found.") - self.destination_host = find_hostsystem_by_name(content=self.destination_content, hostname=self.params['destination_host']) + self.destination_host = find_hostsystem_by_name( + content=self.destination_content, + hostname=self.params["destination_host"], + ) if self.destination_host is None: self.module.fail_json(msg="Destination host not found.") - if self.params['destination_resource_pool']: + if self.params["destination_resource_pool"]: self.destination_resource_pool = find_resource_pool_by_name( content=self.destination_content, - resource_pool_name=self.params['destination_resource_pool']) + resource_pool_name=self.params["destination_resource_pool"], + ) else: - self.destination_resource_pool = self.destination_host.parent.resourcePool + self.destination_resource_pool = ( + self.destination_host.parent.resourcePool + ) def populate_specs(self): # populate service locator - self.service_locator.instanceUuid = self.destination_content.about.instanceUuid - self.service_locator.url = "https://" + self.destination_vcenter + ":" + str(self.params['port']) + "/sdk" + self.service_locator.instanceUuid = ( + self.destination_content.about.instanceUuid + ) + self.service_locator.url = ( + "https://" + + self.destination_vcenter + + ":" + + str(self.params["port"]) + + "/sdk" + ) creds = vim.ServiceLocatorNamePassword() creds.username = self.destination_vcenter_username creds.password = self.destination_vcenter_password @@ -310,7 +369,9 @@ def populate_specs(self): # populate clone spec self.clone_spec.config = self.config_spec - self.clone_spec.powerOn = True if self.params['state'].lower() == 'poweredon' else False + self.clone_spec.powerOn = ( + True if self.params["state"].lower() == "poweredon" else False + ) self.clone_spec.location = self.relocate_spec def get_recommended_datastore(self, datastore_cluster_obj=None): @@ -323,17 +384,21 @@ def get_recommended_datastore(self, datastore_cluster_obj=None): if datastore_cluster_obj is None: return None # Check if Datastore Cluster provided by user is SDRS ready - sdrs_status = datastore_cluster_obj.podStorageDrsEntry.storageDrsConfig.podConfig.enabled + sdrs_status = ( + datastore_cluster_obj.podStorageDrsEntry.storageDrsConfig.podConfig.enabled + ) if sdrs_status: # We can get storage recommendation only if SDRS is enabled on given datastorage cluster pod_sel_spec = vim.storageDrs.PodSelectionSpec() pod_sel_spec.storagePod = datastore_cluster_obj storage_spec = vim.storageDrs.StoragePlacementSpec() storage_spec.podSelectionSpec = pod_sel_spec - storage_spec.type = 'create' + storage_spec.type = "create" try: - rec = self.content.storageResourceManager.RecommendDatastores(storageSpec=storage_spec) + rec = self.content.storageResourceManager.RecommendDatastores( + storageSpec=storage_spec + ) rec_action = rec.recommendations[0].action[0] return rec_action.destination.name except Exception: @@ -342,7 +407,10 @@ def get_recommended_datastore(self, datastore_cluster_obj=None): datastore = None datastore_freespace = 0 for ds in datastore_cluster_obj.childEntity: - if isinstance(ds, vim.Datastore) and ds.summary.freeSpace > datastore_freespace: + if ( + isinstance(ds, vim.Datastore) + and ds.summary.freeSpace > datastore_freespace + ): # If datastore field is provided, filter destination datastores if not self.is_datastore_valid(datastore_obj=ds): continue @@ -360,57 +428,56 @@ def main(): """ argument_spec = vmware_argument_spec() argument_spec.update( - name=dict(type='str'), - uuid=dict(type='str'), - moid=dict(type='str'), - use_instance_uuid=dict(type='bool', default=False), - destination_vm_name=dict(type='str', required=True), - destination_datastore=dict(type='str', required=True), - destination_host=dict(type='str', required=True), - destination_vcenter=dict(type='str', required=True), - destination_vcenter_username=dict(type='str', required=True), - destination_vcenter_password=dict(type='str', required=True, no_log=True), - destination_vcenter_port=dict(type='int', default=443), - destination_vcenter_validate_certs=dict(type='bool', default=False), - destination_vm_folder=dict(type='str', required=True), - destination_resource_pool=dict(type='str', default=None), - state=dict(type='str', default='present', - choices=['present', 'poweredon']) + name=dict(type="str"), + uuid=dict(type="str"), + moid=dict(type="str"), + use_instance_uuid=dict(type="bool", default=False), + destination_vm_name=dict(type="str", required=True), + destination_datastore=dict(type="str", required=True), + destination_host=dict(type="str", required=True), + destination_vcenter=dict(type="str", required=True), + destination_vcenter_username=dict(type="str", required=True), + destination_vcenter_password=dict( + type="str", required=True, no_log=True + ), + destination_vcenter_port=dict(type="int", default=443), + destination_vcenter_validate_certs=dict(type="bool", default=False), + destination_vm_folder=dict(type="str", required=True), + destination_resource_pool=dict(type="str", default=None), + state=dict( + type="str", default="present", choices=["present", "poweredon"] + ), ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, - required_one_of=[ - ['uuid', 'name', 'moid'], - ], - mutually_exclusive=[ - ['uuid', 'name', 'moid'], - ], + required_one_of=[["uuid", "name", "moid"]], + mutually_exclusive=[["uuid", "name", "moid"]], ) - result = {'failed': False, 'changed': False} + result = {"failed": False, "changed": False} if module.check_mode: - if module.params['state'] in ['present']: + if module.params["state"] in ["present"]: result.update( - vm_name=module.params['destination_vm_name'], - vcenter=module.params['destination_vcenter'], - host=module.params['destination_host'], - datastore=module.params['destination_datastore'], - vm_folder=module.params['destination_vm_folder'], - state=module.params['state'], + vm_name=module.params["destination_vm_name"], + vcenter=module.params["destination_vcenter"], + host=module.params["destination_host"], + datastore=module.params["destination_datastore"], + vm_folder=module.params["destination_vm_folder"], + state=module.params["state"], changed=True, - desired_operation='Create VM with PowerOff State' + desired_operation="Create VM with PowerOff State", ) - if module.params['state'] == 'poweredon': + if module.params["state"] == "poweredon": result.update( - vm_name=module.params['destination_vm_name'], - vcenter=module.params['destination_vcenter'], - host=module.params['destination_host'], - datastore=module.params['destination_datastore'], - vm_folder=module.params['destination_vm_folder'], - state=module.params['state'], + vm_name=module.params["destination_vm_name"], + vcenter=module.params["destination_vcenter"], + host=module.params["destination_host"], + datastore=module.params["destination_datastore"], + vm_folder=module.params["destination_vm_folder"], + state=module.params["state"], changed=True, - desired_operation='Create VM with PowerON State' + desired_operation="Create VM with PowerON State", ) module.exit_json(**result) @@ -419,11 +486,11 @@ def main(): clone_manager.populate_specs() result = clone_manager.clone() - if result['failed']: + if result["failed"]: module.fail_json(**result) else: module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_guest_custom_attribute_defs.py b/plugins/modules/vmware_guest_custom_attribute_defs.py index f219af7..e223f39 100644 --- a/plugins/modules/vmware_guest_custom_attribute_defs.py +++ b/plugins/modules/vmware_guest_custom_attribute_defs.py @@ -6,16 +6,17 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_guest_custom_attribute_defs short_description: Manage custom attributes definitions for virtual machine from VMware @@ -50,9 +51,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Add VMware Attribute Definition vmware_guest_custom_attribute_defs: hostname: "{{ vcenter_hostname }}" @@ -72,7 +73,7 @@ attribute_key: custom_attr_def_1 delegate_to: localhost register: defs -''' +""" RETURN = """ custom_attribute_defs: @@ -83,7 +84,10 @@ """ from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + vmware_argument_spec, +) try: from pyVmomi import vim @@ -102,10 +106,16 @@ def remove_custom_def(self, field): if x.name == field and x.managedObjectType == vim.VirtualMachine: changed = True if not self.module.check_mode: - self.content.customFieldsManager.RemoveCustomFieldDef(key=x.key) + self.content.customFieldsManager.RemoveCustomFieldDef( + key=x.key + ) break f[x.name] = (x.key, x.managedObjectType) - return {'changed': changed, 'failed': False, 'custom_attribute_defs': list(f.keys())} + return { + "changed": changed, + "failed": False, + "custom_attribute_defs": list(f.keys()), + } def add_custom_def(self, field): changed = False @@ -119,35 +129,43 @@ def add_custom_def(self, field): if not found: changed = True if not self.module.check_mode: - new_field = self.content.customFieldsManager.AddFieldDefinition(name=field, moType=vim.VirtualMachine) + new_field = self.content.customFieldsManager.AddFieldDefinition( + name=field, moType=vim.VirtualMachine + ) f[new_field.name] = (new_field.key, new_field.type) - return {'changed': changed, 'failed': False, 'custom_attribute_defs': list(f.keys())} + return { + "changed": changed, + "failed": False, + "custom_attribute_defs": list(f.keys()), + } def main(): argument_spec = vmware_argument_spec() argument_spec.update( - attribute_key=dict(type='str'), - state=dict(type='str', default='present', choices=['absent', 'present']), + attribute_key=dict(type="str"), + state=dict( + type="str", default="present", choices=["absent", "present"] + ), ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, required_if=[ - ['state', 'present', ['attribute_key']], - ['state', 'absent', ['attribute_key']], - ] + ["state", "present", ["attribute_key"]], + ["state", "absent", ["attribute_key"]], + ], ) pyv = VmAttributeDefManager(module) results = dict(changed=False, custom_attribute_defs=list()) - if module.params['state'] == "present": - results = pyv.add_custom_def(module.params['attribute_key']) - elif module.params['state'] == "absent": - results = pyv.remove_custom_def(module.params['attribute_key']) + if module.params["state"] == "present": + results = pyv.add_custom_def(module.params["attribute_key"]) + elif module.params["state"] == "absent": + results = pyv.remove_custom_def(module.params["attribute_key"]) module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_guest_custom_attributes.py b/plugins/modules/vmware_guest_custom_attributes.py index 61103e5..4040b0e 100644 --- a/plugins/modules/vmware_guest_custom_attributes.py +++ b/plugins/modules/vmware_guest_custom_attributes.py @@ -7,17 +7,18 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_guest_custom_attributes short_description: Manage custom attributes from VMware for the given virtual machine @@ -79,9 +80,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Add virtual machine custom attributes vmware_guest_custom_attributes: hostname: "{{ vcenter_hostname }}" @@ -133,7 +134,7 @@ - name: MyAttribute delegate_to: localhost register: attributes -''' +""" RETURN = """ custom_attributes: @@ -155,7 +156,10 @@ pass from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + vmware_argument_spec, +) class VmAttributeManager(PyVmomi): @@ -168,30 +172,45 @@ def set_custom_field(self, vm, user_fields): changed = False for field in user_fields: - field_key = self.check_exists(field['name']) + field_key = self.check_exists(field["name"]) found = False - field_value = field.get('value', '') + field_value = field.get("value", "") - for k, v in [(x.name, v.value) for x in self.custom_field_mgr for v in vm.customValue if x.key == v.key]: - if k == field['name']: + for k, v in [ + (x.name, v.value) + for x in self.custom_field_mgr + for v in vm.customValue + if x.key == v.key + ]: + if k == field["name"]: found = True if v != field_value: if not self.module.check_mode: - self.content.customFieldsManager.SetField(entity=vm, key=field_key.key, value=field_value) + self.content.customFieldsManager.SetField( + entity=vm, key=field_key.key, value=field_value + ) result_fields[k] = field_value change_list.append(True) if not found and field_value != "": if not field_key and not self.module.check_mode: - field_key = self.content.customFieldsManager.AddFieldDefinition(name=field['name'], moType=vim.VirtualMachine) + field_key = self.content.customFieldsManager.AddFieldDefinition( + name=field["name"], moType=vim.VirtualMachine + ) change_list.append(True) if not self.module.check_mode: - self.content.customFieldsManager.SetField(entity=vm, key=field_key.key, value=field_value) - result_fields[field['name']] = field_value + self.content.customFieldsManager.SetField( + entity=vm, key=field_key.key, value=field_value + ) + result_fields[field["name"]] = field_value if any(change_list): changed = True - return {'changed': changed, 'failed': False, 'custom_attributes': result_fields} + return { + "changed": changed, + "failed": False, + "custom_attributes": result_fields, + } def check_exists(self, field): for x in self.custom_field_mgr: @@ -203,56 +222,60 @@ def check_exists(self, field): def main(): argument_spec = vmware_argument_spec() argument_spec.update( - datacenter=dict(type='str'), - name=dict(type='str'), - folder=dict(type='str'), - uuid=dict(type='str'), - moid=dict(type='str'), - use_instance_uuid=dict(type='bool', default=False), - state=dict(type='str', default='present', - choices=['absent', 'present']), + datacenter=dict(type="str"), + name=dict(type="str"), + folder=dict(type="str"), + uuid=dict(type="str"), + moid=dict(type="str"), + use_instance_uuid=dict(type="bool", default=False), + state=dict( + type="str", default="present", choices=["absent", "present"] + ), attributes=dict( - type='list', + type="list", default=[], options=dict( - name=dict(type='str', required=True), - value=dict(type='str'), - ) + name=dict(type="str", required=True), value=dict(type="str") + ), ), ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, - required_one_of=[ - ['name', 'uuid', 'moid'] - ], + required_one_of=[["name", "uuid", "moid"]], ) - if module.params.get('folder'): + if module.params.get("folder"): # FindByInventoryPath() does not require an absolute path # so we should leave the input folder path unmodified - module.params['folder'] = module.params['folder'].rstrip('/') + module.params["folder"] = module.params["folder"].rstrip("/") pyv = VmAttributeManager(module) - results = {'changed': False, 'failed': False, 'instance': dict()} + results = {"changed": False, "failed": False, "instance": dict()} # Check if the virtual machine exists before continuing vm = pyv.get_vm() if vm: # virtual machine already exists - if module.params['state'] == "present": - results = pyv.set_custom_field(vm, module.params['attributes']) - elif module.params['state'] == "absent": - results = pyv.set_custom_field(vm, module.params['attributes']) + if module.params["state"] == "present": + results = pyv.set_custom_field(vm, module.params["attributes"]) + elif module.params["state"] == "absent": + results = pyv.set_custom_field(vm, module.params["attributes"]) module.exit_json(**results) else: # virtual machine does not exists - vm_id = (module.params.get('name') or module.params.get('uuid') or module.params.get('moid')) - module.fail_json(msg="Unable to manage custom attributes for non-existing" - " virtual machine %s" % vm_id) + vm_id = ( + module.params.get("name") + or module.params.get("uuid") + or module.params.get("moid") + ) + module.fail_json( + msg="Unable to manage custom attributes for non-existing" + " virtual machine %s" % vm_id + ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_guest_customization_info.py b/plugins/modules/vmware_guest_customization_info.py index 5aaca4e..585c625 100644 --- a/plugins/modules/vmware_guest_customization_info.py +++ b/plugins/modules/vmware_guest_customization_info.py @@ -6,15 +6,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_guest_customization_info short_description: Gather info about VM customization specifications @@ -37,9 +38,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Gather info about all customization specification vmware_guest_customization_info: hostname: "{{ vcenter_hostname }}" @@ -58,7 +59,7 @@ spec_name: custom_linux_spec delegate_to: localhost register: custom_spec_info -''' +""" RETURN = """ custom_spec_info: @@ -101,7 +102,10 @@ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_text -from ansible_collections.vmware.general.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + vmware_argument_spec, +) class VmwareCustomSpecManger(PyVmomi): @@ -109,20 +113,25 @@ def __init__(self, module): super(VmwareCustomSpecManger, self).__init__(module) self.cc_mgr = self.content.customizationSpecManager if self.cc_mgr is None: - self.module.fail_json(msg="Failed to get customization spec manager.") + self.module.fail_json( + msg="Failed to get customization spec manager." + ) def gather_custom_spec_info(self): """ Gather information about customization specifications """ - spec_name = self.params.get('spec_name', None) + spec_name = self.params.get("spec_name", None) specs_list = [] if spec_name: if self.cc_mgr.DoesCustomizationSpecExist(name=spec_name): specs_list.append(spec_name) else: - self.module.fail_json(msg="Unable to find customization specification named '%s'" % spec_name) + self.module.fail_json( + msg="Unable to find customization specification named '%s'" + % spec_name + ) else: available_specs = self.cc_mgr.info for spec_info in available_specs: @@ -138,7 +147,9 @@ def gather_custom_spec_info(self): ip_address=nic.adapter.ip.ipAddress, subnet_mask=nic.adapter.subnetMask, gateway=[gw for gw in nic.adapter.gateway], - nic_dns_server_list=[ndsl for ndsl in nic.adapter.dnsServerList], + nic_dns_server_list=[ + ndsl for ndsl in nic.adapter.dnsServerList + ], dns_domain=nic.adapter.dnsDomain, primary_wins=nic.adapter.primaryWINS, secondry_wins=nic.adapter.secondaryWINS, @@ -147,9 +158,15 @@ def gather_custom_spec_info(self): adapter_mapping_list.append(temp_data) current_hostname = None - if isinstance(current_spec.spec.identity.hostName, vim.vm.customization.PrefixNameGenerator): + if isinstance( + current_spec.spec.identity.hostName, + vim.vm.customization.PrefixNameGenerator, + ): current_hostname = current_spec.spec.identity.hostName.base - elif isinstance(current_spec.spec.identity.hostName, vim.vm.customization.FixedName): + elif isinstance( + current_spec.spec.identity.hostName, + vim.vm.customization.FixedName, + ): current_hostname = current_spec.spec.identity.hostName.name spec_info[spec] = dict( @@ -165,8 +182,12 @@ def gather_custom_spec_info(self): time_zone=current_spec.spec.identity.timeZone, hw_clock_utc=current_spec.spec.identity.hwClockUTC, # global IP Settings - dns_suffix_list=[i for i in current_spec.spec.globalIPSettings.dnsSuffixList], - dns_server_list=[i for i in current_spec.spec.globalIPSettings.dnsServerList], + dns_suffix_list=[ + i for i in current_spec.spec.globalIPSettings.dnsSuffixList + ], + dns_server_list=[ + i for i in current_spec.spec.globalIPSettings.dnsServerList + ], # NIC setting map nic_setting_map=adapter_mapping_list, ) @@ -175,20 +196,20 @@ def gather_custom_spec_info(self): def main(): argument_spec = vmware_argument_spec() - argument_spec.update( - spec_name=dict(type='str'), - ) + argument_spec.update(spec_name=dict(type="str")) module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True + argument_spec=argument_spec, supports_check_mode=True ) pyv = VmwareCustomSpecManger(module) try: module.exit_json(custom_spec_info=pyv.gather_custom_spec_info()) except Exception as exc: - module.fail_json(msg="Failed to gather information with exception : %s" % to_text(exc)) + module.fail_json( + msg="Failed to gather information with exception : %s" + % to_text(exc) + ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_guest_disk.py b/plugins/modules/vmware_guest_disk.py index 0aaa845..f39384c 100644 --- a/plugins/modules/vmware_guest_disk.py +++ b/plugins/modules/vmware_guest_disk.py @@ -5,17 +5,18 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_guest_disk short_description: Manage disks related to virtual machine in given vCenter infrastructure @@ -151,9 +152,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Add disks to virtual machine using UUID vmware_guest_disk: hostname: "{{ vcenter_hostname }}" @@ -280,7 +281,7 @@ destroy: no delegate_to: localhost register: disk_facts -''' +""" RETURN = """ disk_status: @@ -307,6 +308,7 @@ """ import re + try: from pyVmomi import vim except ImportError: @@ -314,18 +316,28 @@ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_native -from ansible_collections.vmware.general.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec, wait_for_task, find_obj, get_all_objs +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + vmware_argument_spec, + wait_for_task, + find_obj, + get_all_objs, +) class PyVmomiHelper(PyVmomi): def __init__(self, module): super(PyVmomiHelper, self).__init__(module) - self.desired_disks = self.params['disk'] # Match with vmware_guest parameter + self.desired_disks = self.params[ + "disk" + ] # Match with vmware_guest parameter self.vm = None - self.scsi_device_type = dict(lsilogic=vim.vm.device.VirtualLsiLogicController, - paravirtual=vim.vm.device.ParaVirtualSCSIController, - buslogic=vim.vm.device.VirtualBusLogicController, - lsilogicsas=vim.vm.device.VirtualLsiLogicSASController) + self.scsi_device_type = dict( + lsilogic=vim.vm.device.VirtualLsiLogicController, + paravirtual=vim.vm.device.ParaVirtualSCSIController, + buslogic=vim.vm.device.VirtualBusLogicController, + lsilogicsas=vim.vm.device.VirtualLsiLogicSASController, + ) self.config_spec = vim.vm.ConfigSpec() self.config_spec.deviceChange = [] @@ -345,7 +357,7 @@ def create_scsi_controller(self, scsi_type, scsi_bus_number): scsi_ctl.device.unitNumber = 3 scsi_ctl.device.busNumber = scsi_bus_number scsi_ctl.device.hotAddRemove = True - scsi_ctl.device.sharedBus = 'noSharing' + scsi_ctl.device.sharedBus = "noSharing" scsi_ctl.device.scsiCtlrUnitNumber = 7 return scsi_ctl @@ -366,7 +378,9 @@ def create_scsi_disk(scsi_ctl_key, disk_index, disk_mode, disk_filename): disk_spec = vim.vm.device.VirtualDeviceSpec() disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add disk_spec.device = vim.vm.device.VirtualDisk() - disk_spec.device.backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo() + disk_spec.device.backing = ( + vim.vm.device.VirtualDisk.FlatVer2BackingInfo() + ) disk_spec.device.backing.diskMode = disk_mode disk_spec.device.controllerKey = scsi_ctl_key disk_spec.device.unitNumber = disk_index @@ -374,7 +388,9 @@ def create_scsi_disk(scsi_ctl_key, disk_index, disk_mode, disk_filename): if disk_filename is not None: disk_spec.device.backing.fileName = disk_filename else: - disk_spec.fileOperation = vim.vm.device.VirtualDeviceSpec.FileOperation.create + disk_spec.fileOperation = ( + vim.vm.device.VirtualDeviceSpec.FileOperation.create + ) return disk_spec @@ -388,37 +404,44 @@ def reconfigure_vm(self, config_spec, device_type): Returns: Boolean status 'changed' and actual task result """ - changed, results = (False, '') + changed, results = (False, "") try: # Perform actual VM reconfiguration task = self.vm.ReconfigVM_Task(spec=config_spec) changed, results = wait_for_task(task) except vim.fault.InvalidDeviceSpec as invalid_device_spec: - self.module.fail_json(msg="Failed to manage %s on given virtual machine due to invalid" - " device spec : %s" % (device_type, to_native(invalid_device_spec.msg)), - details="Please check ESXi server logs for more details.") + self.module.fail_json( + msg="Failed to manage %s on given virtual machine due to invalid" + " device spec : %s" + % (device_type, to_native(invalid_device_spec.msg)), + details="Please check ESXi server logs for more details.", + ) except vim.fault.RestrictedVersion as e: - self.module.fail_json(msg="Failed to reconfigure virtual machine due to" - " product versioning restrictions: %s" % to_native(e.msg)) + self.module.fail_json( + msg="Failed to reconfigure virtual machine due to" + " product versioning restrictions: %s" % to_native(e.msg) + ) return changed, results def get_ioandshares_diskconfig(self, disk_spec, disk): io_disk_spec = vim.StorageResourceManager.IOAllocationInfo() - if 'iolimit' in disk: - io_disk_spec.limit = disk['iolimit']['limit'] - if 'shares' in disk['iolimit']: + if "iolimit" in disk: + io_disk_spec.limit = disk["iolimit"]["limit"] + if "shares" in disk["iolimit"]: shares_spec = vim.SharesInfo() - shares_spec.level = disk['iolimit']['shares']['level'] - if shares_spec.level == 'custom': - shares_spec.shares = disk['iolimit']['shares']['level_value'] + shares_spec.level = disk["iolimit"]["shares"]["level"] + if shares_spec.level == "custom": + shares_spec.shares = disk["iolimit"]["shares"][ + "level_value" + ] io_disk_spec.shares = shares_spec disk_spec.device.storageIOAllocation = io_disk_spec - if 'shares' in disk: + if "shares" in disk: shares_spec = vim.SharesInfo() - shares_spec.level = disk['shares']['level'] - if shares_spec.level == 'custom': - shares_spec.shares = disk['shares']['level_value'] + shares_spec.level = disk["shares"]["level"] + if shares_spec.level == "custom": + shares_spec.shares = disk["shares"]["level_value"] io_disk_spec.shares = shares_spec disk_spec.device.storageIOAllocation = io_disk_spec return disk_spec @@ -448,14 +471,19 @@ def ensure_disks(self, vm_obj=None): scsi_changed = False for disk in disk_data: - scsi_controller = disk['scsi_controller'] + 1000 - if scsi_controller not in current_scsi_info and disk['state'] == 'present': - scsi_ctl = self.create_scsi_controller(disk['scsi_type'], disk['scsi_controller']) + scsi_controller = disk["scsi_controller"] + 1000 + if ( + scsi_controller not in current_scsi_info + and disk["state"] == "present" + ): + scsi_ctl = self.create_scsi_controller( + disk["scsi_type"], disk["scsi_controller"] + ) current_scsi_info[scsi_controller] = dict(disks=dict()) self.config_spec.deviceChange.append(scsi_ctl) scsi_changed = True if scsi_changed: - self.reconfigure_vm(self.config_spec, 'SCSI Controller') + self.reconfigure_vm(self.config_spec, "SCSI Controller") self.config_spec = vim.vm.ConfigSpec() self.config_spec.deviceChange = [] @@ -464,80 +492,128 @@ def ensure_disks(self, vm_obj=None): if isinstance(device, vim.vm.device.VirtualDisk): # Found Virtual Disk device if device.controllerKey not in current_scsi_info: - current_scsi_info[device.controllerKey] = dict(disks=dict()) - current_scsi_info[device.controllerKey]['disks'][device.unitNumber] = device + current_scsi_info[device.controllerKey] = dict( + disks=dict() + ) + current_scsi_info[device.controllerKey]["disks"][ + device.unitNumber + ] = device vm_name = self.vm.name disk_change_list = [] for disk in disk_data: disk_change = False - scsi_controller = disk['scsi_controller'] + 1000 # VMware auto assign 1000 + SCSI Controller - if disk['disk_unit_number'] not in current_scsi_info[scsi_controller]['disks'] and disk['state'] == 'present': + scsi_controller = ( + disk["scsi_controller"] + 1000 + ) # VMware auto assign 1000 + SCSI Controller + if ( + disk["disk_unit_number"] + not in current_scsi_info[scsi_controller]["disks"] + and disk["state"] == "present" + ): # Add new disk - disk_spec = self.create_scsi_disk(scsi_controller, disk['disk_unit_number'], disk['disk_mode'], disk['filename']) - if disk['filename'] is None: - disk_spec.device.capacityInKB = disk['size'] - if disk['disk_type'] == 'thin': + disk_spec = self.create_scsi_disk( + scsi_controller, + disk["disk_unit_number"], + disk["disk_mode"], + disk["filename"], + ) + if disk["filename"] is None: + disk_spec.device.capacityInKB = disk["size"] + if disk["disk_type"] == "thin": disk_spec.device.backing.thinProvisioned = True - elif disk['disk_type'] == 'eagerzeroedthick': + elif disk["disk_type"] == "eagerzeroedthick": disk_spec.device.backing.eagerlyScrub = True - if disk['filename'] is None: - disk_spec.device.backing.fileName = "[%s] %s/%s_%s_%s.vmdk" % ( - disk['datastore'].name, - vm_name, vm_name, - str(scsi_controller), - str(disk['disk_unit_number'])) + if disk["filename"] is None: + disk_spec.device.backing.fileName = ( + "[%s] %s/%s_%s_%s.vmdk" + % ( + disk["datastore"].name, + vm_name, + vm_name, + str(scsi_controller), + str(disk["disk_unit_number"]), + ) + ) else: - disk_spec.device.backing.fileName = disk['filename'] - disk_spec.device.backing.datastore = disk['datastore'] + disk_spec.device.backing.fileName = disk["filename"] + disk_spec.device.backing.datastore = disk["datastore"] disk_spec = self.get_ioandshares_diskconfig(disk_spec, disk) self.config_spec.deviceChange.append(disk_spec) disk_change = True - current_scsi_info[scsi_controller]['disks'][disk['disk_unit_number']] = disk_spec.device - results['disk_changes'][disk['disk_index']] = "Disk created." - elif disk['disk_unit_number'] in current_scsi_info[scsi_controller]['disks']: - if disk['state'] == 'present': + current_scsi_info[scsi_controller]["disks"][ + disk["disk_unit_number"] + ] = disk_spec.device + results["disk_changes"][disk["disk_index"]] = "Disk created." + elif ( + disk["disk_unit_number"] + in current_scsi_info[scsi_controller]["disks"] + ): + if disk["state"] == "present": disk_spec = vim.vm.device.VirtualDeviceSpec() # set the operation to edit so that it knows to keep other settings - disk_spec.device = current_scsi_info[scsi_controller]['disks'][disk['disk_unit_number']] + disk_spec.device = current_scsi_info[scsi_controller][ + "disks" + ][disk["disk_unit_number"]] # Edit and no resizing allowed - if disk['size'] < disk_spec.device.capacityInKB: - self.module.fail_json(msg="Given disk size at disk index [%s] is smaller than found (%d < %d)." - "Reducing disks is not allowed." % (disk['disk_index'], - disk['size'], - disk_spec.device.capacityInKB)) - if disk['size'] != disk_spec.device.capacityInKB: - disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit - disk_spec = self.get_ioandshares_diskconfig(disk_spec, disk) - disk_spec.device.capacityInKB = disk['size'] + if disk["size"] < disk_spec.device.capacityInKB: + self.module.fail_json( + msg="Given disk size at disk index [%s] is smaller than found (%d < %d)." + "Reducing disks is not allowed." + % ( + disk["disk_index"], + disk["size"], + disk_spec.device.capacityInKB, + ) + ) + if disk["size"] != disk_spec.device.capacityInKB: + disk_spec.operation = ( + vim.vm.device.VirtualDeviceSpec.Operation.edit + ) + disk_spec = self.get_ioandshares_diskconfig( + disk_spec, disk + ) + disk_spec.device.capacityInKB = disk["size"] self.config_spec.deviceChange.append(disk_spec) disk_change = True - results['disk_changes'][disk['disk_index']] = "Disk size increased." + results["disk_changes"][ + disk["disk_index"] + ] = "Disk size increased." else: - results['disk_changes'][disk['disk_index']] = "Disk already exists." + results["disk_changes"][ + disk["disk_index"] + ] = "Disk already exists." - elif disk['state'] == 'absent': + elif disk["state"] == "absent": # Disk already exists, deleting disk_spec = vim.vm.device.VirtualDeviceSpec() - disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove - if disk['destroy'] is True: - disk_spec.fileOperation = vim.vm.device.VirtualDeviceSpec.FileOperation.destroy - disk_spec.device = current_scsi_info[scsi_controller]['disks'][disk['disk_unit_number']] + disk_spec.operation = ( + vim.vm.device.VirtualDeviceSpec.Operation.remove + ) + if disk["destroy"] is True: + disk_spec.fileOperation = ( + vim.vm.device.VirtualDeviceSpec.FileOperation.destroy + ) + disk_spec.device = current_scsi_info[scsi_controller][ + "disks" + ][disk["disk_unit_number"]] self.config_spec.deviceChange.append(disk_spec) disk_change = True - results['disk_changes'][disk['disk_index']] = "Disk deleted." + results["disk_changes"][ + disk["disk_index"] + ] = "Disk deleted." if disk_change: # Adding multiple disks in a single attempt raises weird errors # So adding single disk at a time. - self.reconfigure_vm(self.config_spec, 'disks') + self.reconfigure_vm(self.config_spec, "disks") self.config_spec = vim.vm.ConfigSpec() self.config_spec.deviceChange = [] disk_change_list.append(disk_change) if any(disk_change_list): - results['changed'] = True - results['disk_data'] = self.gather_disk_facts(vm_obj=self.vm) + results["changed"] = True + results["disk_data"] = self.gather_disk_facts(vm_obj=self.vm) self.module.exit_json(**results) def sanitize_disk_inputs(self): @@ -548,57 +624,77 @@ def sanitize_disk_inputs(self): """ disks_data = list() if not self.desired_disks: - self.module.exit_json(changed=False, msg="No disks provided for virtual" - " machine '%s' for management." % self.vm.name) + self.module.exit_json( + changed=False, + msg="No disks provided for virtual" + " machine '%s' for management." % self.vm.name, + ) for disk_index, disk in enumerate(self.desired_disks): # Initialize default value for disk - current_disk = dict(disk_index=disk_index, - state='present', - destroy=True, - filename=None, - datastore=None, - autoselect_datastore=True, - disk_unit_number=0, - scsi_controller=0, - disk_mode='persistent') + current_disk = dict( + disk_index=disk_index, + state="present", + destroy=True, + filename=None, + datastore=None, + autoselect_datastore=True, + disk_unit_number=0, + scsi_controller=0, + disk_mode="persistent", + ) # Check state - if 'state' in disk: - if disk['state'] not in ['absent', 'present']: - self.module.fail_json(msg="Invalid state provided '%s' for disk index [%s]." - " State can be either - 'absent', 'present'" % (disk['state'], - disk_index)) + if "state" in disk: + if disk["state"] not in ["absent", "present"]: + self.module.fail_json( + msg="Invalid state provided '%s' for disk index [%s]." + " State can be either - 'absent', 'present'" + % (disk["state"], disk_index) + ) else: - current_disk['state'] = disk['state'] + current_disk["state"] = disk["state"] - if current_disk['state'] == 'absent': - current_disk['destroy'] = disk['destroy'] - elif current_disk['state'] == 'present': + if current_disk["state"] == "absent": + current_disk["destroy"] = disk["destroy"] + elif current_disk["state"] == "present": # Select datastore or datastore cluster - if 'datastore' in disk: - if 'autoselect_datastore' in disk: - self.module.fail_json(msg="Please specify either 'datastore' " - "or 'autoselect_datastore' for disk index [%s]" % disk_index) + if "datastore" in disk: + if "autoselect_datastore" in disk: + self.module.fail_json( + msg="Please specify either 'datastore' " + "or 'autoselect_datastore' for disk index [%s]" + % disk_index + ) # Check if given value is datastore or datastore cluster - datastore_name = disk['datastore'] - datastore_cluster = find_obj(self.content, [vim.StoragePod], datastore_name) + datastore_name = disk["datastore"] + datastore_cluster = find_obj( + self.content, [vim.StoragePod], datastore_name + ) if datastore_cluster: # If user specified datastore cluster so get recommended datastore - datastore_name = self.get_recommended_datastore(datastore_cluster_obj=datastore_cluster) + datastore_name = self.get_recommended_datastore( + datastore_cluster_obj=datastore_cluster + ) # Check if get_recommended_datastore or user specified datastore exists or not - datastore = find_obj(self.content, [vim.Datastore], datastore_name) + datastore = find_obj( + self.content, [vim.Datastore], datastore_name + ) if datastore is None: - self.module.fail_json(msg="Failed to find datastore named '%s' " - "in given configuration." % disk['datastore']) - current_disk['datastore'] = datastore - current_disk['autoselect_datastore'] = False - elif 'autoselect_datastore' in disk: + self.module.fail_json( + msg="Failed to find datastore named '%s' " + "in given configuration." % disk["datastore"] + ) + current_disk["datastore"] = datastore + current_disk["autoselect_datastore"] = False + elif "autoselect_datastore" in disk: # Find datastore which fits requirement datastores = get_all_objs(self.content, [vim.Datastore]) if not datastores: - self.module.fail_json(msg="Failed to gather information about" - " available datastores in given datacenter.") + self.module.fail_json( + msg="Failed to gather information about" + " available datastores in given datacenter." + ) datastore = None datastore_freespace = 0 for ds in datastores: @@ -606,29 +702,41 @@ def sanitize_disk_inputs(self): # If datastore field is provided, filter destination datastores datastore = ds datastore_freespace = ds.summary.freeSpace - current_disk['datastore'] = datastore - - if 'datastore' not in disk and 'autoselect_datastore' not in disk and 'filename' not in disk: - self.module.fail_json(msg="Either 'datastore' or 'autoselect_datastore' is" - " required parameter while creating disk for " - "disk index [%s]." % disk_index) - - if 'filename' in disk: - current_disk['filename'] = disk['filename'] - - if [x for x in disk.keys() if x.startswith('size_') or x == 'size']: + current_disk["datastore"] = datastore + + if ( + "datastore" not in disk + and "autoselect_datastore" not in disk + and "filename" not in disk + ): + self.module.fail_json( + msg="Either 'datastore' or 'autoselect_datastore' is" + " required parameter while creating disk for " + "disk index [%s]." % disk_index + ) + + if "filename" in disk: + current_disk["filename"] = disk["filename"] + + if [ + x + for x in disk.keys() + if x.startswith("size_") or x == "size" + ]: # size, size_tb, size_gb, size_mb, size_kb disk_size_parse_failed = False - if 'size' in disk: - size_regex = re.compile(r'(\d+(?:\.\d+)?)([tgmkTGMK][bB])') - disk_size_m = size_regex.match(disk['size']) + if "size" in disk: + size_regex = re.compile( + r"(\d+(?:\.\d+)?)([tgmkTGMK][bB])" + ) + disk_size_m = size_regex.match(disk["size"]) if disk_size_m: expected = disk_size_m.group(1) unit = disk_size_m.group(2) else: disk_size_parse_failed = True try: - if re.match(r'\d+\.\d+', expected): + if re.match(r"\d+\.\d+", expected): # We found float value in string, let's typecast it expected = float(expected) else: @@ -639,14 +747,16 @@ def sanitize_disk_inputs(self): else: # Even multiple size_ parameter provided by user, # consider first value only - param = [x for x in disk.keys() if x.startswith('size_')][0] - unit = param.split('_')[-1] + param = [ + x for x in disk.keys() if x.startswith("size_") + ][0] + unit = param.split("_")[-1] disk_size = disk[param] if isinstance(disk_size, (float, int)): disk_size = str(disk_size) try: - if re.match(r'\d+\.\d+', disk_size): + if re.match(r"\d+\.\d+", disk_size): # We found float value in string, let's typecast it expected = float(disk_size) else: @@ -657,86 +767,130 @@ def sanitize_disk_inputs(self): if disk_size_parse_failed: # Common failure - self.module.fail_json(msg="Failed to parse disk size for disk index [%s]," - " please review value provided" - " using documentation." % disk_index) + self.module.fail_json( + msg="Failed to parse disk size for disk index [%s]," + " please review value provided" + " using documentation." % disk_index + ) disk_units = dict(tb=3, gb=2, mb=1, kb=0) unit = unit.lower() if unit in disk_units: - current_disk['size'] = expected * (1024 ** disk_units[unit]) + current_disk["size"] = expected * ( + 1024 ** disk_units[unit] + ) else: - self.module.fail_json(msg="%s is not a supported unit for disk size for disk index [%s]." - " Supported units are ['%s']." % (unit, - disk_index, - "', '".join(disk_units.keys()))) - - elif current_disk['filename'] is None: + self.module.fail_json( + msg="%s is not a supported unit for disk size for disk index [%s]." + " Supported units are ['%s']." + % ( + unit, + disk_index, + "', '".join(disk_units.keys()), + ) + ) + + elif current_disk["filename"] is None: # No size found but disk, fail - self.module.fail_json(msg="No size, size_kb, size_mb, size_gb or size_tb" - " attribute found into disk index [%s] configuration." % disk_index) + self.module.fail_json( + msg="No size, size_kb, size_mb, size_gb or size_tb" + " attribute found into disk index [%s] configuration." + % disk_index + ) # Check SCSI controller key - if 'scsi_controller' in disk: + if "scsi_controller" in disk: try: - temp_disk_controller = int(disk['scsi_controller']) + temp_disk_controller = int(disk["scsi_controller"]) except ValueError: - self.module.fail_json(msg="Invalid SCSI controller ID '%s' specified" - " at index [%s]" % (disk['scsi_controller'], disk_index)) + self.module.fail_json( + msg="Invalid SCSI controller ID '%s' specified" + " at index [%s]" + % (disk["scsi_controller"], disk_index) + ) if temp_disk_controller not in range(0, 4): # Only 4 SCSI controllers are allowed per VM - self.module.fail_json(msg="Invalid SCSI controller ID specified [%s]," - " please specify value between 0 to 3 only." % temp_disk_controller) - current_disk['scsi_controller'] = temp_disk_controller + self.module.fail_json( + msg="Invalid SCSI controller ID specified [%s]," + " please specify value between 0 to 3 only." + % temp_disk_controller + ) + current_disk["scsi_controller"] = temp_disk_controller else: - self.module.fail_json(msg="Please specify 'scsi_controller' under disk parameter" - " at index [%s], which is required while creating disk." % disk_index) + self.module.fail_json( + msg="Please specify 'scsi_controller' under disk parameter" + " at index [%s], which is required while creating disk." + % disk_index + ) # Check for disk unit number - if 'unit_number' in disk: + if "unit_number" in disk: try: - temp_disk_unit_number = int(disk['unit_number']) + temp_disk_unit_number = int(disk["unit_number"]) except ValueError: - self.module.fail_json(msg="Invalid Disk unit number ID '%s'" - " specified at index [%s]" % (disk['unit_number'], disk_index)) + self.module.fail_json( + msg="Invalid Disk unit number ID '%s'" + " specified at index [%s]" + % (disk["unit_number"], disk_index) + ) if temp_disk_unit_number not in range(0, 16): - self.module.fail_json(msg="Invalid Disk unit number ID specified for disk [%s] at index [%s]," - " please specify value between 0 to 15" - " only (excluding 7)." % (temp_disk_unit_number, disk_index)) + self.module.fail_json( + msg="Invalid Disk unit number ID specified for disk [%s] at index [%s]," + " please specify value between 0 to 15" + " only (excluding 7)." + % (temp_disk_unit_number, disk_index) + ) if temp_disk_unit_number == 7: - self.module.fail_json(msg="Invalid Disk unit number ID specified for disk at index [%s]," - " please specify value other than 7 as it is reserved" - "for SCSI Controller" % disk_index) - current_disk['disk_unit_number'] = temp_disk_unit_number + self.module.fail_json( + msg="Invalid Disk unit number ID specified for disk at index [%s]," + " please specify value other than 7 as it is reserved" + "for SCSI Controller" % disk_index + ) + current_disk["disk_unit_number"] = temp_disk_unit_number else: - self.module.fail_json(msg="Please specify 'unit_number' under disk parameter" - " at index [%s], which is required while creating disk." % disk_index) + self.module.fail_json( + msg="Please specify 'unit_number' under disk parameter" + " at index [%s], which is required while creating disk." + % disk_index + ) # Type of Disk - disk_type = disk.get('type', 'thick').lower() - if disk_type not in ['thin', 'thick', 'eagerzeroedthick']: - self.module.fail_json(msg="Invalid 'disk_type' specified for disk index [%s]. Please specify" - " 'disk_type' value from ['thin', 'thick', 'eagerzeroedthick']." % disk_index) - current_disk['disk_type'] = disk_type + disk_type = disk.get("type", "thick").lower() + if disk_type not in ["thin", "thick", "eagerzeroedthick"]: + self.module.fail_json( + msg="Invalid 'disk_type' specified for disk index [%s]. Please specify" + " 'disk_type' value from ['thin', 'thick', 'eagerzeroedthick']." + % disk_index + ) + current_disk["disk_type"] = disk_type # Mode of Disk - temp_disk_mode = disk.get('disk_mode', 'persistent').lower() - if temp_disk_mode not in ['persistent', 'independent_persistent', 'independent_nonpersistent']: - self.module.fail_json(msg="Invalid 'disk_mode' specified for disk index [%s]. Please specify" - " 'disk_mode' value from ['persistent', 'independent_persistent', 'independent_nonpersistent']." % disk_index) - current_disk['disk_mode'] = temp_disk_mode + temp_disk_mode = disk.get("disk_mode", "persistent").lower() + if temp_disk_mode not in [ + "persistent", + "independent_persistent", + "independent_nonpersistent", + ]: + self.module.fail_json( + msg="Invalid 'disk_mode' specified for disk index [%s]. Please specify" + " 'disk_mode' value from ['persistent', 'independent_persistent', 'independent_nonpersistent']." + % disk_index + ) + current_disk["disk_mode"] = temp_disk_mode # SCSI Controller Type - scsi_contrl_type = disk.get('scsi_type', 'paravirtual').lower() + scsi_contrl_type = disk.get("scsi_type", "paravirtual").lower() if scsi_contrl_type not in self.scsi_device_type.keys(): - self.module.fail_json(msg="Invalid 'scsi_type' specified for disk index [%s]. Please specify" - " 'scsi_type' value from ['%s']" % (disk_index, - "', '".join(self.scsi_device_type.keys()))) - current_disk['scsi_type'] = scsi_contrl_type - if 'shares' in disk: - current_disk['shares'] = disk['shares'] - if 'iolimit' in disk: - current_disk['iolimit'] = disk['iolimit'] + self.module.fail_json( + msg="Invalid 'scsi_type' specified for disk index [%s]. Please specify" + " 'scsi_type' value from ['%s']" + % (disk_index, "', '".join(self.scsi_device_type.keys())) + ) + current_disk["scsi_type"] = scsi_contrl_type + if "shares" in disk: + current_disk["shares"] = disk["shares"] + if "iolimit" in disk: + current_disk["iolimit"] = disk["iolimit"] disks_data.append(current_disk) return disks_data @@ -751,17 +905,21 @@ def get_recommended_datastore(self, datastore_cluster_obj): """ # Check if Datastore Cluster provided by user is SDRS ready - sdrs_status = datastore_cluster_obj.podStorageDrsEntry.storageDrsConfig.podConfig.enabled + sdrs_status = ( + datastore_cluster_obj.podStorageDrsEntry.storageDrsConfig.podConfig.enabled + ) if sdrs_status: # We can get storage recommendation only if SDRS is enabled on given datastorage cluster pod_sel_spec = vim.storageDrs.PodSelectionSpec() pod_sel_spec.storagePod = datastore_cluster_obj storage_spec = vim.storageDrs.StoragePlacementSpec() storage_spec.podSelectionSpec = pod_sel_spec - storage_spec.type = 'create' + storage_spec.type = "create" try: - rec = self.content.storageResourceManager.RecommendDatastores(storageSpec=storage_spec) + rec = self.content.storageResourceManager.RecommendDatastores( + storageSpec=storage_spec + ) rec_action = rec.recommendations[0].action[0] return rec_action.destination.name except Exception: @@ -796,7 +954,9 @@ def gather_disk_facts(vm_obj): for disk in vm_obj.config.hardware.device: if isinstance(disk, vim.vm.device.VirtualDisk): if disk.storageIOAllocation is None: - disk.storageIOAllocation = vim.StorageResourceManager.IOAllocationInfo() + disk.storageIOAllocation = ( + vim.StorageResourceManager.IOAllocationInfo() + ) disk.storageIOAllocation.shares = vim.SharesInfo() if disk.shares is None: disk.shares = vim.SharesInfo() @@ -827,25 +987,22 @@ def gather_disk_facts(vm_obj): def main(): argument_spec = vmware_argument_spec() argument_spec.update( - name=dict(type='str'), - uuid=dict(type='str'), - moid=dict(type='str'), - folder=dict(type='str'), - datacenter=dict(type='str', required=True), - disk=dict(type='list', default=[]), - use_instance_uuid=dict(type='bool', default=False), + name=dict(type="str"), + uuid=dict(type="str"), + moid=dict(type="str"), + folder=dict(type="str"), + datacenter=dict(type="str", required=True), + disk=dict(type="list", default=[]), + use_instance_uuid=dict(type="bool", default=False), ) module = AnsibleModule( - argument_spec=argument_spec, - required_one_of=[ - ['name', 'uuid', 'moid'] - ] + argument_spec=argument_spec, required_one_of=[["name", "uuid", "moid"]] ) - if module.params['folder']: + if module.params["folder"]: # FindByInventoryPath() does not require an absolute path # so we should leave the input folder path unmodified - module.params['folder'] = module.params['folder'].rstrip('/') + module.params["folder"] = module.params["folder"].rstrip("/") pyv = PyVmomiHelper(module) # Check if the VM exists before continuing @@ -854,18 +1011,25 @@ def main(): if not vm: # We unable to find the virtual machine user specified # Bail out - vm_id = (module.params.get('name') or module.params.get('uuid') or module.params.get('moid')) - module.fail_json(msg="Unable to manage disks for non-existing" - " virtual machine '%s'." % vm_id) + vm_id = ( + module.params.get("name") + or module.params.get("uuid") + or module.params.get("moid") + ) + module.fail_json( + msg="Unable to manage disks for non-existing" + " virtual machine '%s'." % vm_id + ) # VM exists try: pyv.ensure_disks(vm_obj=vm) except Exception as exc: - module.fail_json(msg="Failed to manage disks for virtual machine" - " '%s' with exception : %s" % (vm.name, - to_native(exc))) + module.fail_json( + msg="Failed to manage disks for virtual machine" + " '%s' with exception : %s" % (vm.name, to_native(exc)) + ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_guest_disk_info.py b/plugins/modules/vmware_guest_disk_info.py index 12b730b..294f755 100644 --- a/plugins/modules/vmware_guest_disk_info.py +++ b/plugins/modules/vmware_guest_disk_info.py @@ -7,15 +7,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_guest_disk_info short_description: Gather info about disks of given virtual machine @@ -76,9 +77,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Gather disk info from virtual machine using UUID vmware_guest_disk_info: hostname: "{{ vcenter_hostname }}" @@ -111,7 +112,7 @@ moid: vm-42 delegate_to: localhost register: disk_info -''' +""" RETURN = """ guest_disk_info: @@ -168,7 +169,10 @@ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_text -from ansible_collections.vmware.general.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + vmware_argument_spec, +) class PyVmomiHelper(PyVmomi): @@ -190,11 +194,11 @@ def gather_disk_info(self, vm_obj): return disks_info controller_types = { - vim.vm.device.VirtualLsiLogicController: 'lsilogic', - vim.vm.device.ParaVirtualSCSIController: 'paravirtual', - vim.vm.device.VirtualBusLogicController: 'buslogic', - vim.vm.device.VirtualLsiLogicSASController: 'lsilogicsas', - vim.vm.device.VirtualIDEController: 'ide' + vim.vm.device.VirtualLsiLogicController: "lsilogic", + vim.vm.device.ParaVirtualSCSIController: "paravirtual", + vim.vm.device.VirtualBusLogicController: "buslogic", + vim.vm.device.VirtualLsiLogicSASController: "lsilogicsas", + vim.vm.device.VirtualIDEController: "ide", } controller_index = 0 @@ -204,7 +208,7 @@ def gather_disk_info(self, vm_obj): key=controller.key, controller_type=controller_types[type(controller)], bus_number=controller.busNumber, - devices=controller.device + devices=controller.device, ) controller_index += 1 @@ -222,68 +226,153 @@ def gather_disk_info(self, vm_obj): capacity_in_kb=disk.capacityInKB, capacity_in_bytes=disk.capacityInBytes, ) - if isinstance(disk.backing, vim.vm.device.VirtualDisk.FlatVer1BackingInfo): - disks_info[disk_index]['backing_type'] = 'FlatVer1' - disks_info[disk_index]['backing_writethrough'] = disk.backing.writeThrough - - elif isinstance(disk.backing, vim.vm.device.VirtualDisk.FlatVer2BackingInfo): - disks_info[disk_index]['backing_type'] = 'FlatVer2' - disks_info[disk_index]['backing_writethrough'] = bool(disk.backing.writeThrough) - disks_info[disk_index]['backing_thinprovisioned'] = bool(disk.backing.thinProvisioned) - disks_info[disk_index]['backing_eagerlyscrub'] = bool(disk.backing.eagerlyScrub) - disks_info[disk_index]['backing_uuid'] = disk.backing.uuid - - elif isinstance(disk.backing, vim.vm.device.VirtualDisk.LocalPMemBackingInfo): - disks_info[disk_index]['backing_type'] = 'LocalPMem' - disks_info[disk_index]['backing_volumeuuid'] = disk.backing.volumeUUID - disks_info[disk_index]['backing_uuid'] = disk.backing.uuid - - elif isinstance(disk.backing, vim.vm.device.VirtualDisk.PartitionedRawDiskVer2BackingInfo): - disks_info[disk_index]['backing_type'] = 'PartitionedRawDiskVer2' - disks_info[disk_index]['backing_descriptorfilename'] = disk.backing.descriptorFileName - disks_info[disk_index]['backing_uuid'] = disk.backing.uuid - - elif isinstance(disk.backing, vim.vm.device.VirtualDisk.RawDiskMappingVer1BackingInfo): - disks_info[disk_index]['backing_type'] = 'RawDiskMappingVer1' - disks_info[disk_index]['backing_devicename'] = disk.backing.deviceName - disks_info[disk_index]['backing_diskmode'] = disk.backing.diskMode - disks_info[disk_index]['backing_disk_mode'] = disk.backing.diskMode - disks_info[disk_index]['backing_lunuuid'] = disk.backing.lunUuid - disks_info[disk_index]['backing_uuid'] = disk.backing.uuid - - elif isinstance(disk.backing, vim.vm.device.VirtualDisk.RawDiskVer2BackingInfo): - disks_info[disk_index]['backing_type'] = 'RawDiskVer2' - disks_info[disk_index]['backing_descriptorfilename'] = disk.backing.descriptorFileName - disks_info[disk_index]['backing_uuid'] = disk.backing.uuid - - elif isinstance(disk.backing, vim.vm.device.VirtualDisk.SeSparseBackingInfo): - disks_info[disk_index]['backing_type'] = 'SeSparse' - disks_info[disk_index]['backing_diskmode'] = disk.backing.diskMode - disks_info[disk_index]['backing_disk_mode'] = disk.backing.diskMode - disks_info[disk_index]['backing_writethrough'] = bool(disk.backing.writeThrough) - disks_info[disk_index]['backing_uuid'] = disk.backing.uuid - - elif isinstance(disk.backing, vim.vm.device.VirtualDisk.SparseVer1BackingInfo): - disks_info[disk_index]['backing_type'] = 'SparseVer1' - disks_info[disk_index]['backing_diskmode'] = disk.backing.diskMode - disks_info[disk_index]['backing_disk_mode'] = disk.backing.diskMode - disks_info[disk_index]['backing_spaceusedinkb'] = disk.backing.spaceUsedInKB - disks_info[disk_index]['backing_split'] = bool(disk.backing.split) - disks_info[disk_index]['backing_writethrough'] = bool(disk.backing.writeThrough) - - elif isinstance(disk.backing, vim.vm.device.VirtualDisk.SparseVer2BackingInfo): - disks_info[disk_index]['backing_type'] = 'SparseVer2' - disks_info[disk_index]['backing_diskmode'] = disk.backing.diskMode - disks_info[disk_index]['backing_disk_mode'] = disk.backing.diskMode - disks_info[disk_index]['backing_spaceusedinkb'] = disk.backing.spaceUsedInKB - disks_info[disk_index]['backing_split'] = bool(disk.backing.split) - disks_info[disk_index]['backing_writethrough'] = bool(disk.backing.writeThrough) - disks_info[disk_index]['backing_uuid'] = disk.backing.uuid + if isinstance( + disk.backing, vim.vm.device.VirtualDisk.FlatVer1BackingInfo + ): + disks_info[disk_index]["backing_type"] = "FlatVer1" + disks_info[disk_index][ + "backing_writethrough" + ] = disk.backing.writeThrough + + elif isinstance( + disk.backing, vim.vm.device.VirtualDisk.FlatVer2BackingInfo + ): + disks_info[disk_index]["backing_type"] = "FlatVer2" + disks_info[disk_index]["backing_writethrough"] = bool( + disk.backing.writeThrough + ) + disks_info[disk_index]["backing_thinprovisioned"] = bool( + disk.backing.thinProvisioned + ) + disks_info[disk_index]["backing_eagerlyscrub"] = bool( + disk.backing.eagerlyScrub + ) + disks_info[disk_index]["backing_uuid"] = disk.backing.uuid + + elif isinstance( + disk.backing, + vim.vm.device.VirtualDisk.LocalPMemBackingInfo, + ): + disks_info[disk_index]["backing_type"] = "LocalPMem" + disks_info[disk_index][ + "backing_volumeuuid" + ] = disk.backing.volumeUUID + disks_info[disk_index]["backing_uuid"] = disk.backing.uuid + + elif isinstance( + disk.backing, + vim.vm.device.VirtualDisk.PartitionedRawDiskVer2BackingInfo, + ): + disks_info[disk_index][ + "backing_type" + ] = "PartitionedRawDiskVer2" + disks_info[disk_index][ + "backing_descriptorfilename" + ] = disk.backing.descriptorFileName + disks_info[disk_index]["backing_uuid"] = disk.backing.uuid + + elif isinstance( + disk.backing, + vim.vm.device.VirtualDisk.RawDiskMappingVer1BackingInfo, + ): + disks_info[disk_index][ + "backing_type" + ] = "RawDiskMappingVer1" + disks_info[disk_index][ + "backing_devicename" + ] = disk.backing.deviceName + disks_info[disk_index][ + "backing_diskmode" + ] = disk.backing.diskMode + disks_info[disk_index][ + "backing_disk_mode" + ] = disk.backing.diskMode + disks_info[disk_index][ + "backing_lunuuid" + ] = disk.backing.lunUuid + disks_info[disk_index]["backing_uuid"] = disk.backing.uuid + + elif isinstance( + disk.backing, + vim.vm.device.VirtualDisk.RawDiskVer2BackingInfo, + ): + disks_info[disk_index]["backing_type"] = "RawDiskVer2" + disks_info[disk_index][ + "backing_descriptorfilename" + ] = disk.backing.descriptorFileName + disks_info[disk_index]["backing_uuid"] = disk.backing.uuid + + elif isinstance( + disk.backing, vim.vm.device.VirtualDisk.SeSparseBackingInfo + ): + disks_info[disk_index]["backing_type"] = "SeSparse" + disks_info[disk_index][ + "backing_diskmode" + ] = disk.backing.diskMode + disks_info[disk_index][ + "backing_disk_mode" + ] = disk.backing.diskMode + disks_info[disk_index]["backing_writethrough"] = bool( + disk.backing.writeThrough + ) + disks_info[disk_index]["backing_uuid"] = disk.backing.uuid + + elif isinstance( + disk.backing, + vim.vm.device.VirtualDisk.SparseVer1BackingInfo, + ): + disks_info[disk_index]["backing_type"] = "SparseVer1" + disks_info[disk_index][ + "backing_diskmode" + ] = disk.backing.diskMode + disks_info[disk_index][ + "backing_disk_mode" + ] = disk.backing.diskMode + disks_info[disk_index][ + "backing_spaceusedinkb" + ] = disk.backing.spaceUsedInKB + disks_info[disk_index]["backing_split"] = bool( + disk.backing.split + ) + disks_info[disk_index]["backing_writethrough"] = bool( + disk.backing.writeThrough + ) + + elif isinstance( + disk.backing, + vim.vm.device.VirtualDisk.SparseVer2BackingInfo, + ): + disks_info[disk_index]["backing_type"] = "SparseVer2" + disks_info[disk_index][ + "backing_diskmode" + ] = disk.backing.diskMode + disks_info[disk_index][ + "backing_disk_mode" + ] = disk.backing.diskMode + disks_info[disk_index][ + "backing_spaceusedinkb" + ] = disk.backing.spaceUsedInKB + disks_info[disk_index]["backing_split"] = bool( + disk.backing.split + ) + disks_info[disk_index]["backing_writethrough"] = bool( + disk.backing.writeThrough + ) + disks_info[disk_index]["backing_uuid"] = disk.backing.uuid for controller_index in range(len(controller_info)): - if controller_info[controller_index]['key'] == disks_info[disk_index]['controller_key']: - disks_info[disk_index]['controller_bus_number'] = controller_info[controller_index]['bus_number'] - disks_info[disk_index]['controller_type'] = controller_info[controller_index]['controller_type'] + if ( + controller_info[controller_index]["key"] + == disks_info[disk_index]["controller_key"] + ): + disks_info[disk_index][ + "controller_bus_number" + ] = controller_info[controller_index]["bus_number"] + disks_info[disk_index][ + "controller_type" + ] = controller_info[controller_index][ + "controller_type" + ] disk_index += 1 return disks_info @@ -292,25 +381,23 @@ def gather_disk_info(self, vm_obj): def main(): argument_spec = vmware_argument_spec() argument_spec.update( - name=dict(type='str'), - uuid=dict(type='str'), - moid=dict(type='str'), - use_instance_uuid=dict(type='bool', default=False), - folder=dict(type='str'), - datacenter=dict(type='str', required=True), + name=dict(type="str"), + uuid=dict(type="str"), + moid=dict(type="str"), + use_instance_uuid=dict(type="bool", default=False), + folder=dict(type="str"), + datacenter=dict(type="str", required=True), ) module = AnsibleModule( argument_spec=argument_spec, - required_one_of=[ - ['name', 'uuid', 'moid'] - ], + required_one_of=[["name", "uuid", "moid"]], supports_check_mode=True, ) - if module.params['folder']: + if module.params["folder"]: # FindByInventoryPath() does not require an absolute path # so we should leave the input folder path unmodified - module.params['folder'] = module.params['folder'].rstrip('/') + module.params["folder"] = module.params["folder"].rstrip("/") pyv = PyVmomiHelper(module) # Check if the VM exists before continuing @@ -321,13 +408,23 @@ def main(): try: module.exit_json(guest_disk_info=pyv.gather_disk_info(vm)) except Exception as exc: - module.fail_json(msg="Failed to gather information with exception : %s" % to_text(exc)) + module.fail_json( + msg="Failed to gather information with exception : %s" + % to_text(exc) + ) else: # We unable to find the virtual machine user specified # Bail out - vm_id = (module.params.get('uuid') or module.params.get('moid') or module.params.get('name')) - module.fail_json(msg="Unable to gather disk information for non-existing VM %s" % vm_id) - - -if __name__ == '__main__': + vm_id = ( + module.params.get("uuid") + or module.params.get("moid") + or module.params.get("name") + ) + module.fail_json( + msg="Unable to gather disk information for non-existing VM %s" + % vm_id + ) + + +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_guest_file_operation.py b/plugins/modules/vmware_guest_file_operation.py index 0d63b49..1e513c2 100644 --- a/plugins/modules/vmware_guest_file_operation.py +++ b/plugins/modules/vmware_guest_file_operation.py @@ -5,15 +5,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_guest_file_operation short_description: Files operation in a VMware guest operating system without network @@ -118,9 +119,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Create directory inside a vm vmware_guest_file_operation: hostname: "{{ vcenter_hostname }}" @@ -165,10 +166,10 @@ src: "/root/test.zip" dest: "files/test.zip" delegate_to: localhost -''' +""" -RETURN = r''' -''' +RETURN = r""" +""" try: from pyVmomi import vim, vmodl @@ -179,49 +180,68 @@ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils import urls from ansible.module_utils._text import to_bytes, to_native -from ansible_collections.vmware.general.plugins.module_utils.vmware import (PyVmomi, find_cluster_by_name, find_datacenter_by_name, - find_vm_by_id, vmware_argument_spec) +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + find_cluster_by_name, + find_datacenter_by_name, + find_vm_by_id, + vmware_argument_spec, +) class VmwareGuestFileManager(PyVmomi): def __init__(self, module): super(VmwareGuestFileManager, self).__init__(module) - datacenter_name = module.params['datacenter'] - cluster_name = module.params['cluster'] - folder = module.params['folder'] + datacenter_name = module.params["datacenter"] + cluster_name = module.params["cluster"] + folder = module.params["folder"] datacenter = None if datacenter_name: datacenter = find_datacenter_by_name(self.content, datacenter_name) if not datacenter: - module.fail_json(msg="Unable to find %(datacenter)s datacenter" % module.params) + module.fail_json( + msg="Unable to find %(datacenter)s datacenter" + % module.params + ) cluster = None if cluster_name: - cluster = find_cluster_by_name(self.content, cluster_name, datacenter) + cluster = find_cluster_by_name( + self.content, cluster_name, datacenter + ) if not cluster: - module.fail_json(msg="Unable to find %(cluster)s cluster" % module.params) - - if module.params['vm_id_type'] == 'inventory_path': - vm = find_vm_by_id(self.content, vm_id=module.params['vm_id'], vm_id_type="inventory_path", folder=folder) + module.fail_json( + msg="Unable to find %(cluster)s cluster" % module.params + ) + + if module.params["vm_id_type"] == "inventory_path": + vm = find_vm_by_id( + self.content, + vm_id=module.params["vm_id"], + vm_id_type="inventory_path", + folder=folder, + ) else: - vm = find_vm_by_id(self.content, - vm_id=module.params['vm_id'], - vm_id_type=module.params['vm_id_type'], - datacenter=datacenter, - cluster=cluster) + vm = find_vm_by_id( + self.content, + vm_id=module.params["vm_id"], + vm_id_type=module.params["vm_id_type"], + datacenter=datacenter, + cluster=cluster, + ) if not vm: - module.fail_json(msg='Unable to find virtual machine.') + module.fail_json(msg="Unable to find virtual machine.") self.vm = vm try: result = dict(changed=False) - if module.params['directory']: + if module.params["directory"]: result = self.directory() - if module.params['copy']: + if module.params["copy"]: result = self.copy() - if module.params['fetch']: + if module.params["fetch"]: result = self.fetch() module.exit_json(**result) except vmodl.RuntimeFault as runtime_fault: @@ -233,83 +253,115 @@ def __init__(self, module): def directory(self): result = dict(changed=True, uuid=self.vm.summary.config.uuid) - vm_username = self.module.params['vm_username'] - vm_password = self.module.params['vm_password'] - - recurse = bool(self.module.params['directory']['recurse']) - operation = self.module.params['directory']['operation'] - path = self.module.params['directory']['path'] - prefix = self.module.params['directory']['prefix'] - suffix = self.module.params['directory']['suffix'] - creds = vim.vm.guest.NamePasswordAuthentication(username=vm_username, password=vm_password) + vm_username = self.module.params["vm_username"] + vm_password = self.module.params["vm_password"] + + recurse = bool(self.module.params["directory"]["recurse"]) + operation = self.module.params["directory"]["operation"] + path = self.module.params["directory"]["path"] + prefix = self.module.params["directory"]["prefix"] + suffix = self.module.params["directory"]["suffix"] + creds = vim.vm.guest.NamePasswordAuthentication( + username=vm_username, password=vm_password + ) file_manager = self.content.guestOperationsManager.fileManager if operation in ("create", "mktemp"): try: if operation == "create": - file_manager.MakeDirectoryInGuest(vm=self.vm, - auth=creds, - directoryPath=path, - createParentDirectories=recurse) + file_manager.MakeDirectoryInGuest( + vm=self.vm, + auth=creds, + directoryPath=path, + createParentDirectories=recurse, + ) else: - newdir = file_manager.CreateTemporaryDirectoryInGuest(vm=self.vm, auth=creds, - prefix=prefix, suffix=suffix) - result['dir'] = newdir + newdir = file_manager.CreateTemporaryDirectoryInGuest( + vm=self.vm, auth=creds, prefix=prefix, suffix=suffix + ) + result["dir"] = newdir except vim.fault.FileAlreadyExists as file_already_exists: - result['changed'] = False - result['msg'] = "Guest directory %s already exist: %s" % (path, - to_native(file_already_exists.msg)) + result["changed"] = False + result["msg"] = "Guest directory %s already exist: %s" % ( + path, + to_native(file_already_exists.msg), + ) except vim.fault.GuestPermissionDenied as permission_denied: - self.module.fail_json(msg="Permission denied for path %s : %s" % (path, - to_native(permission_denied.msg)), - uuid=self.vm.summary.config.uuid) + self.module.fail_json( + msg="Permission denied for path %s : %s" + % (path, to_native(permission_denied.msg)), + uuid=self.vm.summary.config.uuid, + ) except vim.fault.InvalidGuestLogin as invalid_guest_login: - self.module.fail_json(msg="Invalid guest login for user %s : %s" % (vm_username, - to_native(invalid_guest_login.msg)), - uuid=self.vm.summary.config.uuid) + self.module.fail_json( + msg="Invalid guest login for user %s : %s" + % (vm_username, to_native(invalid_guest_login.msg)), + uuid=self.vm.summary.config.uuid, + ) # other exceptions except Exception as e: - self.module.fail_json(msg="Failed to Create directory into VM VMware exception : %s" % to_native(e), - uuid=self.vm.summary.config.uuid) + self.module.fail_json( + msg="Failed to Create directory into VM VMware exception : %s" + % to_native(e), + uuid=self.vm.summary.config.uuid, + ) if operation == "delete": try: - file_manager.DeleteDirectoryInGuest(vm=self.vm, auth=creds, directoryPath=path, - recursive=recurse) + file_manager.DeleteDirectoryInGuest( + vm=self.vm, + auth=creds, + directoryPath=path, + recursive=recurse, + ) except vim.fault.FileNotFound as file_not_found: - result['changed'] = False - result['msg'] = "Guest directory %s not exists %s" % (path, - to_native(file_not_found.msg)) + result["changed"] = False + result["msg"] = "Guest directory %s not exists %s" % ( + path, + to_native(file_not_found.msg), + ) except vim.fault.FileFault as e: - self.module.fail_json(msg="FileFault : %s" % e.msg, - uuid=self.vm.summary.config.uuid) + self.module.fail_json( + msg="FileFault : %s" % e.msg, + uuid=self.vm.summary.config.uuid, + ) except vim.fault.GuestPermissionDenied as permission_denied: - self.module.fail_json(msg="Permission denied for path %s : %s" % (path, - to_native(permission_denied.msg)), - uuid=self.vm.summary.config.uuid) + self.module.fail_json( + msg="Permission denied for path %s : %s" + % (path, to_native(permission_denied.msg)), + uuid=self.vm.summary.config.uuid, + ) except vim.fault.InvalidGuestLogin as invalid_guest_login: - self.module.fail_json(msg="Invalid guest login for user %s : %s" % (vm_username, - to_native(invalid_guest_login.msg)), - uuid=self.vm.summary.config.uuid) + self.module.fail_json( + msg="Invalid guest login for user %s : %s" + % (vm_username, to_native(invalid_guest_login.msg)), + uuid=self.vm.summary.config.uuid, + ) # other exceptions except Exception as e: - self.module.fail_json(msg="Failed to Delete directory into Vm VMware exception : %s" % to_native(e), - uuid=self.vm.summary.config.uuid) + self.module.fail_json( + msg="Failed to Delete directory into Vm VMware exception : %s" + % to_native(e), + uuid=self.vm.summary.config.uuid, + ) return result def fetch(self): result = dict(changed=True, uuid=self.vm.summary.config.uuid) - vm_username = self.module.params['vm_username'] - vm_password = self.module.params['vm_password'] - hostname = self.module.params['hostname'] - dest = self.module.params["fetch"]['dest'] - src = self.module.params['fetch']['src'] - creds = vim.vm.guest.NamePasswordAuthentication(username=vm_username, password=vm_password) + vm_username = self.module.params["vm_username"] + vm_password = self.module.params["vm_password"] + hostname = self.module.params["hostname"] + dest = self.module.params["fetch"]["dest"] + src = self.module.params["fetch"]["src"] + creds = vim.vm.guest.NamePasswordAuthentication( + username=vm_username, password=vm_password + ) file_manager = self.content.guestOperationsManager.fileManager try: - fileTransferInfo = file_manager.InitiateFileTransferFromGuest(vm=self.vm, auth=creds, - guestFilePath=src) + fileTransferInfo = file_manager.InitiateFileTransferFromGuest( + vm=self.vm, auth=creds, guestFilePath=src + ) url = fileTransferInfo.url url = url.replace("*", hostname) resp, info = urls.fetch_url(self.module, url, method="GET") @@ -317,144 +369,211 @@ def fetch(self): with open(dest, "wb") as local_file: local_file.write(resp.read()) except Exception as e: - self.module.fail_json(msg="local file write exception : %s" % to_native(e), - uuid=self.vm.summary.config.uuid) + self.module.fail_json( + msg="local file write exception : %s" % to_native(e), + uuid=self.vm.summary.config.uuid, + ) except vim.fault.FileNotFound as file_not_found: - self.module.fail_json(msg="Guest file %s does not exist : %s" % (src, to_native(file_not_found.msg)), - uuid=self.vm.summary.config.uuid) + self.module.fail_json( + msg="Guest file %s does not exist : %s" + % (src, to_native(file_not_found.msg)), + uuid=self.vm.summary.config.uuid, + ) except vim.fault.FileFault as e: - self.module.fail_json(msg="FileFault : %s" % to_native(e.msg), - uuid=self.vm.summary.config.uuid) + self.module.fail_json( + msg="FileFault : %s" % to_native(e.msg), + uuid=self.vm.summary.config.uuid, + ) except vim.fault.GuestPermissionDenied: - self.module.fail_json(msg="Permission denied to fetch file %s" % src, - uuid=self.vm.summary.config.uuid) + self.module.fail_json( + msg="Permission denied to fetch file %s" % src, + uuid=self.vm.summary.config.uuid, + ) except vim.fault.InvalidGuestLogin: - self.module.fail_json(msg="Invalid guest login for user %s" % vm_username, - uuid=self.vm.summary.config.uuid) + self.module.fail_json( + msg="Invalid guest login for user %s" % vm_username, + uuid=self.vm.summary.config.uuid, + ) # other exceptions except Exception as e: - self.module.fail_json(msg="Failed to Fetch file from Vm VMware exception : %s" % to_native(e), - uuid=self.vm.summary.config.uuid) + self.module.fail_json( + msg="Failed to Fetch file from Vm VMware exception : %s" + % to_native(e), + uuid=self.vm.summary.config.uuid, + ) return result def copy(self): result = dict(changed=True, uuid=self.vm.summary.config.uuid) - vm_username = self.module.params['vm_username'] - vm_password = self.module.params['vm_password'] - hostname = self.module.params['hostname'] + vm_username = self.module.params["vm_username"] + vm_password = self.module.params["vm_password"] + hostname = self.module.params["hostname"] overwrite = self.module.params["copy"]["overwrite"] - dest = self.module.params["copy"]['dest'] - src = self.module.params['copy']['src'] - b_src = to_bytes(src, errors='surrogate_or_strict') + dest = self.module.params["copy"]["dest"] + src = self.module.params["copy"]["src"] + b_src = to_bytes(src, errors="surrogate_or_strict") if not os.path.exists(b_src): self.module.fail_json(msg="Source %s not found" % src) if not os.access(b_src, os.R_OK): self.module.fail_json(msg="Source %s not readable" % src) if os.path.isdir(b_src): - self.module.fail_json(msg="copy does not support copy of directory: %s" % src) + self.module.fail_json( + msg="copy does not support copy of directory: %s" % src + ) data = None with open(b_src, "rb") as local_file: data = local_file.read() file_size = os.path.getsize(b_src) - creds = vim.vm.guest.NamePasswordAuthentication(username=vm_username, password=vm_password) + creds = vim.vm.guest.NamePasswordAuthentication( + username=vm_username, password=vm_password + ) file_attributes = vim.vm.guest.FileManager.FileAttributes() file_manager = self.content.guestOperationsManager.fileManager try: - url = file_manager.InitiateFileTransferToGuest(vm=self.vm, auth=creds, guestFilePath=dest, - fileAttributes=file_attributes, overwrite=overwrite, - fileSize=file_size) + url = file_manager.InitiateFileTransferToGuest( + vm=self.vm, + auth=creds, + guestFilePath=dest, + fileAttributes=file_attributes, + overwrite=overwrite, + fileSize=file_size, + ) url = url.replace("*", hostname) - resp, info = urls.fetch_url(self.module, url, data=data, method="PUT") + resp, info = urls.fetch_url( + self.module, url, data=data, method="PUT" + ) status_code = info["status"] if status_code != 200: - self.module.fail_json(msg='problem during file transfer, http message:%s' % info, - uuid=self.vm.summary.config.uuid) + self.module.fail_json( + msg="problem during file transfer, http message:%s" % info, + uuid=self.vm.summary.config.uuid, + ) except vim.fault.FileAlreadyExists: - result['changed'] = False - result['msg'] = "Guest file %s already exists" % dest + result["changed"] = False + result["msg"] = "Guest file %s already exists" % dest return result except vim.fault.FileFault as e: - self.module.fail_json(msg="FileFault:%s" % to_native(e.msg), - uuid=self.vm.summary.config.uuid) + self.module.fail_json( + msg="FileFault:%s" % to_native(e.msg), + uuid=self.vm.summary.config.uuid, + ) except vim.fault.GuestPermissionDenied as permission_denied: - self.module.fail_json(msg="Permission denied to copy file into " - "destination %s : %s" % (dest, to_native(permission_denied.msg)), - uuid=self.vm.summary.config.uuid) + self.module.fail_json( + msg="Permission denied to copy file into " + "destination %s : %s" + % (dest, to_native(permission_denied.msg)), + uuid=self.vm.summary.config.uuid, + ) except vim.fault.InvalidGuestLogin as invalid_guest_login: - self.module.fail_json(msg="Invalid guest login for user" - " %s : %s" % (vm_username, to_native(invalid_guest_login.msg))) + self.module.fail_json( + msg="Invalid guest login for user" + " %s : %s" % (vm_username, to_native(invalid_guest_login.msg)) + ) # other exceptions except Exception as e: - self.module.fail_json(msg="Failed to Copy file to Vm VMware exception : %s" % to_native(e), - uuid=self.vm.summary.config.uuid) + self.module.fail_json( + msg="Failed to Copy file to Vm VMware exception : %s" + % to_native(e), + uuid=self.vm.summary.config.uuid, + ) return result def main(): argument_spec = vmware_argument_spec() - argument_spec.update(dict( - datacenter=dict(type='str'), - cluster=dict(type='str'), - folder=dict(type='str'), - vm_id=dict(type='str', required=True), - vm_id_type=dict( - default='vm_name', - type='str', - choices=['inventory_path', 'uuid', 'instance_uuid', 'dns_name', 'vm_name']), - vm_username=dict(type='str', required=True), - vm_password=dict(type='str', no_log=True, required=True), - directory=dict( - type='dict', - default=None, - options=dict( - operation=dict(required=True, type='str', choices=['create', 'delete', 'mktemp']), - path=dict(required=False, type='str'), - prefix=dict(required=False, type='str'), - suffix=dict(required=False, type='str'), - recurse=dict(required=False, type='bool', default=False) - ) - ), - copy=dict( - type='dict', - default=None, - options=dict(src=dict(required=True, type='str'), - dest=dict(required=True, type='str'), - overwrite=dict(required=False, type='bool', default=False) - ) - ), - fetch=dict( - type='dict', - default=None, - options=dict( - src=dict(required=True, type='str'), - dest=dict(required=True, type='str'), - ) + argument_spec.update( + dict( + datacenter=dict(type="str"), + cluster=dict(type="str"), + folder=dict(type="str"), + vm_id=dict(type="str", required=True), + vm_id_type=dict( + default="vm_name", + type="str", + choices=[ + "inventory_path", + "uuid", + "instance_uuid", + "dns_name", + "vm_name", + ], + ), + vm_username=dict(type="str", required=True), + vm_password=dict(type="str", no_log=True, required=True), + directory=dict( + type="dict", + default=None, + options=dict( + operation=dict( + required=True, + type="str", + choices=["create", "delete", "mktemp"], + ), + path=dict(required=False, type="str"), + prefix=dict(required=False, type="str"), + suffix=dict(required=False, type="str"), + recurse=dict(required=False, type="bool", default=False), + ), + ), + copy=dict( + type="dict", + default=None, + options=dict( + src=dict(required=True, type="str"), + dest=dict(required=True, type="str"), + overwrite=dict(required=False, type="bool", default=False), + ), + ), + fetch=dict( + type="dict", + default=None, + options=dict( + src=dict(required=True, type="str"), + dest=dict(required=True, type="str"), + ), + ), ) ) - ) - module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False, - required_if=[['vm_id_type', 'inventory_path', ['folder']]], - mutually_exclusive=[['directory', 'copy', 'fetch']], - required_one_of=[['directory', 'copy', 'fetch']], - ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=False, + required_if=[["vm_id_type", "inventory_path", ["folder"]]], + mutually_exclusive=[["directory", "copy", "fetch"]], + required_one_of=[["directory", "copy", "fetch"]], + ) - if module.params['directory']: - if module.params['directory']['operation'] in ('create', 'delete') and not module.params['directory']['path']: - module.fail_json(msg='directory.path is required when operation is "create" or "delete"') - if module.params['directory']['operation'] == 'mktemp' and not (module.params['directory']['prefix'] and module.params['directory']['suffix']): - module.fail_json(msg='directory.prefix and directory.suffix are required when operation is "mktemp"') + if module.params["directory"]: + if ( + module.params["directory"]["operation"] in ("create", "delete") + and not module.params["directory"]["path"] + ): + module.fail_json( + msg='directory.path is required when operation is "create" or "delete"' + ) + if module.params["directory"]["operation"] == "mktemp" and not ( + module.params["directory"]["prefix"] + and module.params["directory"]["suffix"] + ): + module.fail_json( + msg='directory.prefix and directory.suffix are required when operation is "mktemp"' + ) - if module.params['vm_id_type'] == 'inventory_path' and not module.params['folder']: - module.fail_json(msg='Folder is required parameter when vm_id_type is inventory_path') + if ( + module.params["vm_id_type"] == "inventory_path" + and not module.params["folder"] + ): + module.fail_json( + msg="Folder is required parameter when vm_id_type is inventory_path" + ) vmware_guest_file_manager = VmwareGuestFileManager(module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_guest_find.py b/plugins/modules/vmware_guest_find.py index bffbad1..3dc7fd7 100644 --- a/plugins/modules/vmware_guest_find.py +++ b/plugins/modules/vmware_guest_find.py @@ -5,15 +5,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_guest_find short_description: Find the folder path(s) for a virtual machine by name or UUID @@ -50,9 +51,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Find Guest's Folder using name vmware_guest_find: hostname: "{{ vcenter_hostname }}" @@ -71,7 +72,7 @@ uuid: 38c4c89c-b3d7-4ae6-ae4e-43c5118eae49 delegate_to: localhost register: vm_folder -''' +""" RETURN = r""" folders: @@ -86,7 +87,11 @@ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_native -from ansible_collections.vmware.general.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec, find_vm_by_id +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + vmware_argument_spec, + find_vm_by_id, +) try: from pyVmomi import vim @@ -97,9 +102,9 @@ class PyVmomiHelper(PyVmomi): def __init__(self, module): super(PyVmomiHelper, self).__init__(module) - self.name = self.params['name'] - self.uuid = self.params['uuid'] - self.use_instance_uuid = self.params['use_instance_uuid'] + self.name = self.params["name"] + self.uuid = self.params["uuid"] + self.use_instance_uuid = self.params["use_instance_uuid"] def getvm_folder_paths(self): results = [] @@ -107,15 +112,24 @@ def getvm_folder_paths(self): if self.uuid: if self.use_instance_uuid: - vm_obj = find_vm_by_id(self.content, vm_id=self.uuid, vm_id_type="instance_uuid") + vm_obj = find_vm_by_id( + self.content, vm_id=self.uuid, vm_id_type="instance_uuid" + ) else: - vm_obj = find_vm_by_id(self.content, vm_id=self.uuid, vm_id_type="uuid") + vm_obj = find_vm_by_id( + self.content, vm_id=self.uuid, vm_id_type="uuid" + ) if vm_obj is None: - self.module.fail_json(msg="Failed to find the virtual machine with UUID : %s" % self.uuid) + self.module.fail_json( + msg="Failed to find the virtual machine with UUID : %s" + % self.uuid + ) vms = [vm_obj] elif self.name: - objects = self.get_managed_objects_properties(vim_type=vim.VirtualMachine, properties=['name']) + objects = self.get_managed_objects_properties( + vim_type=vim.VirtualMachine, properties=["name"] + ) for temp_vm_object in objects: if temp_vm_object.obj.name == self.name: vms.append(temp_vm_object.obj) @@ -130,16 +144,17 @@ def getvm_folder_paths(self): def main(): argument_spec = vmware_argument_spec() argument_spec.update( - name=dict(type='str'), - uuid=dict(type='str'), - use_instance_uuid=dict(type='bool', default=False), - datacenter=dict(removed_in_version=2.9, type='str') + name=dict(type="str"), + uuid=dict(type="str"), + use_instance_uuid=dict(type="bool", default=False), + datacenter=dict(removed_in_version=2.9, type="str"), ) - module = AnsibleModule(argument_spec=argument_spec, - required_one_of=[['name', 'uuid']], - mutually_exclusive=[['name', 'uuid']], - ) + module = AnsibleModule( + argument_spec=argument_spec, + required_one_of=[["name", "uuid"]], + mutually_exclusive=[["name", "uuid"]], + ) pyv = PyVmomiHelper(module) # Check if the VM exists before continuing @@ -150,11 +165,16 @@ def main(): try: module.exit_json(folders=folders) except Exception as exc: - module.fail_json(msg="Folder enumeration failed with exception %s" % to_native(exc)) + module.fail_json( + msg="Folder enumeration failed with exception %s" + % to_native(exc) + ) else: - module.fail_json(msg="Unable to find folders for virtual machine %s" % (module.params.get('name') or - module.params.get('uuid'))) + module.fail_json( + msg="Unable to find folders for virtual machine %s" + % (module.params.get("name") or module.params.get("uuid")) + ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_guest_info.py b/plugins/modules/vmware_guest_info.py index 96a2c05..f891eb4 100644 --- a/plugins/modules/vmware_guest_info.py +++ b/plugins/modules/vmware_guest_info.py @@ -6,17 +6,18 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_guest_info short_description: Gather info about a single VM @@ -113,9 +114,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Gather info from standalone ESXi server having datacenter as 'ha-datacenter' vmware_guest_info: hostname: "{{ vcenter_hostname }}" @@ -166,7 +167,7 @@ - _moId delegate_to: localhost register: moid_info -''' +""" RETURN = """ instance: @@ -233,10 +234,17 @@ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_text -from ansible_collections.vmware.general.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec -from ansible_collections.vmware.general.plugins.module_utils.vmware_rest_client import VmwareRestClient +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + vmware_argument_spec, +) +from ansible_collections.vmware.general.plugins.module_utils.vmware_rest_client import ( + VmwareRestClient, +) + try: from com.vmware.vapi.std_client import DynamicID + HAS_VSPHERE = True except ImportError: HAS_VSPHERE = False @@ -252,30 +260,43 @@ def __init__(self, module): def main(): argument_spec = vmware_argument_spec() argument_spec.update( - name=dict(type='str'), - name_match=dict(type='str', choices=['first', 'last'], default='first'), - uuid=dict(type='str'), - use_instance_uuid=dict(type='bool', default=False), - moid=dict(type='str'), - folder=dict(type='str'), - datacenter=dict(type='str', required=True), - tags=dict(type='bool', default=False), - schema=dict(type='str', choices=['summary', 'vsphere'], default='summary'), - properties=dict(type='list') + name=dict(type="str"), + name_match=dict( + type="str", choices=["first", "last"], default="first" + ), + uuid=dict(type="str"), + use_instance_uuid=dict(type="bool", default=False), + moid=dict(type="str"), + folder=dict(type="str"), + datacenter=dict(type="str", required=True), + tags=dict(type="bool", default=False), + schema=dict( + type="str", choices=["summary", "vsphere"], default="summary" + ), + properties=dict(type="list"), + ) + module = AnsibleModule( + argument_spec=argument_spec, + required_one_of=[["name", "uuid", "moid"]], + supports_check_mode=True, ) - module = AnsibleModule(argument_spec=argument_spec, - required_one_of=[['name', 'uuid', 'moid']], - supports_check_mode=True) - if module._name == 'vmware_guest_facts': - module.deprecate("The 'vmware_guest_facts' module has been renamed to 'vmware_guest_info'", version='2.13') + if module._name == "vmware_guest_facts": + module.deprecate( + "The 'vmware_guest_facts' module has been renamed to 'vmware_guest_info'", + version="2.13", + ) - if module.params.get('folder'): + if module.params.get("folder"): # FindByInventoryPath() does not require an absolute path # so we should leave the input folder path unmodified - module.params['folder'] = module.params['folder'].rstrip('/') + module.params["folder"] = module.params["folder"].rstrip("/") - if module.params['schema'] != 'vsphere' and module.params.get('properties'): - module.fail_json(msg="The option 'properties' is only valid when the schema is 'vsphere'") + if module.params["schema"] != "vsphere" and module.params.get( + "properties" + ): + module.fail_json( + msg="The option 'properties' is only valid when the schema is 'vsphere'" + ) pyv = PyVmomi(module) # Check if the VM exists before continuing @@ -284,29 +305,42 @@ def main(): # VM already exists if vm: try: - if module.params['schema'] == 'summary': + if module.params["schema"] == "summary": instance = pyv.gather_facts(vm) else: - instance = pyv.to_json(vm, module.params['properties']) - if module.params.get('tags'): + instance = pyv.to_json(vm, module.params["properties"]) + if module.params.get("tags"): if not HAS_VSPHERE: - module.fail_json(msg="Unable to find 'vCloud Suite SDK' Python library which is required." - " Please refer this URL for installation steps" - " - https://code.vmware.com/web/sdk/60/vcloudsuite-python") + module.fail_json( + msg="Unable to find 'vCloud Suite SDK' Python library which is required." + " Please refer this URL for installation steps" + " - https://code.vmware.com/web/sdk/60/vcloudsuite-python" + ) vm_rest_client = VmwareTag(module) instance.update( - tags=vm_rest_client.get_vm_tags(vm_rest_client.tag_service, - vm_rest_client.tag_association_svc, - vm_mid=vm._moId) + tags=vm_rest_client.get_vm_tags( + vm_rest_client.tag_service, + vm_rest_client.tag_association_svc, + vm_mid=vm._moId, + ) ) module.exit_json(instance=instance) except Exception as exc: - module.fail_json(msg="Information gathering failed with exception %s" % to_text(exc)) + module.fail_json( + msg="Information gathering failed with exception %s" + % to_text(exc) + ) else: - vm_id = (module.params.get('uuid') or module.params.get('name') or module.params.get('moid')) - module.fail_json(msg="Unable to gather information for non-existing VM %s" % vm_id) + vm_id = ( + module.params.get("uuid") + or module.params.get("name") + or module.params.get("moid") + ) + module.fail_json( + msg="Unable to gather information for non-existing VM %s" % vm_id + ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_guest_move.py b/plugins/modules/vmware_guest_move.py index d1cc189..82a56ba 100644 --- a/plugins/modules/vmware_guest_move.py +++ b/plugins/modules/vmware_guest_move.py @@ -6,15 +6,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_guest_move short_description: Moves virtual machines in vCenter @@ -79,9 +80,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Move Virtual Machine vmware_guest_move: hostname: "{{ vcenter_hostname }}" @@ -127,7 +128,7 @@ dest_folder: "/DataCenter/vm/path/to/new/folder/where/we/want" delegate_to: localhost register: facts -''' +""" RETURN = """ instance: @@ -181,7 +182,11 @@ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_native -from ansible_collections.vmware.general.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec, wait_for_task +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + vmware_argument_spec, + wait_for_task, +) class PyVmomiHelper(PyVmomi): @@ -192,29 +197,26 @@ def __init__(self, module): def main(): argument_spec = vmware_argument_spec() argument_spec.update( - name=dict(type='str'), + name=dict(type="str"), name_match=dict( - type='str', choices=['first', 'last'], default='first'), - uuid=dict(type='str'), - moid=dict(type='str'), - use_instance_uuid=dict(type='bool', default=False), - dest_folder=dict(type='str', required=True), - datacenter=dict(type='str', required=True), + type="str", choices=["first", "last"], default="first" + ), + uuid=dict(type="str"), + moid=dict(type="str"), + use_instance_uuid=dict(type="bool", default=False), + dest_folder=dict(type="str", required=True), + datacenter=dict(type="str", required=True), ) module = AnsibleModule( argument_spec=argument_spec, - required_one_of=[ - ['name', 'uuid', 'moid'] - ], - mutually_exclusive=[ - ['name', 'uuid', 'moid'] - ], - supports_check_mode=True + required_one_of=[["name", "uuid", "moid"]], + mutually_exclusive=[["name", "uuid", "moid"]], + supports_check_mode=True, ) # FindByInventoryPath() does not require an absolute path # so we should leave the input folder path unmodified - module.params['dest_folder'] = module.params['dest_folder'].rstrip('/') + module.params["dest_folder"] = module.params["dest_folder"].rstrip("/") pyv = PyVmomiHelper(module) search_index = pyv.content.searchIndex @@ -224,36 +226,47 @@ def main(): # VM exists if vm: try: - vm_path = pyv.get_vm_path(pyv.content, vm).lstrip('/') - if module.params['name']: - vm_name = module.params['name'] + vm_path = pyv.get_vm_path(pyv.content, vm).lstrip("/") + if module.params["name"]: + vm_name = module.params["name"] else: vm_name = vm.name - vm_full = vm_path + '/' + vm_name - folder = search_index.FindByInventoryPath(module.params['dest_folder']) + vm_full = vm_path + "/" + vm_name + folder = search_index.FindByInventoryPath( + module.params["dest_folder"] + ) if folder is None: module.fail_json(msg="Folder name and/or path does not exist") vm_to_move = search_index.FindByInventoryPath(vm_full) if module.check_mode: module.exit_json(changed=True, instance=pyv.gather_facts(vm)) - if vm_path != module.params['dest_folder'].lstrip('/'): + if vm_path != module.params["dest_folder"].lstrip("/"): move_task = folder.MoveInto([vm_to_move]) changed, err = wait_for_task(move_task) if changed: module.exit_json( - changed=True, instance=pyv.gather_facts(vm)) + changed=True, instance=pyv.gather_facts(vm) + ) else: module.exit_json(instance=pyv.gather_facts(vm)) except Exception as exc: - module.fail_json(msg="Failed to move VM with exception %s" % - to_native(exc)) + module.fail_json( + msg="Failed to move VM with exception %s" % to_native(exc) + ) else: if module.check_mode: module.exit_json(changed=False) - vm_id = (module.params.get('uuid') or module.params.get('name') or module.params.get('moid')) - module.fail_json(msg="Unable to find VM %s to move to %s" % (vm_id, module.params.get('dest_folder'))) + vm_id = ( + module.params.get("uuid") + or module.params.get("name") + or module.params.get("moid") + ) + module.fail_json( + msg="Unable to find VM %s to move to %s" + % (vm_id, module.params.get("dest_folder")) + ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_guest_network.py b/plugins/modules/vmware_guest_network.py index 5eb66f1..c242cf3 100644 --- a/plugins/modules/vmware_guest_network.py +++ b/plugins/modules/vmware_guest_network.py @@ -5,15 +5,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_guest_network short_description: Manage network adapters of specified virtual machine in given vCenter infrastructure @@ -122,9 +123,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Change network adapter settings of virtual machine vmware_guest_network: hostname: "{{ vcenter_hostname }}" @@ -192,7 +193,7 @@ mac: "aa:50:56:58:59:61" directpath_io: True delegate_to: localhost -''' +""" RETURN = """ network_data: @@ -234,7 +235,13 @@ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.network import is_mac from ansible.module_utils._text import to_native, to_text -from ansible_collections.vmware.general.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec, wait_for_task, get_all_objs, get_parent_datacenter +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + vmware_argument_spec, + wait_for_task, + get_all_objs, + get_parent_datacenter, +) class PyVmomiHelper(PyVmomi): @@ -257,9 +264,13 @@ def get_device_type(self, device_type=None): if device_type and device_type in list(self.nic_device_type.keys()): return self.nic_device_type[device_type]() else: - self.module.fail_json(msg='Invalid network device_type %s' % device_type) + self.module.fail_json( + msg="Invalid network device_type %s" % device_type + ) - def get_network_device(self, vm=None, mac=None, device_type=None, device_label=None): + def get_network_device( + self, vm=None, mac=None, device_type=None, device_label=None + ): """ Get network adapter """ @@ -304,44 +315,64 @@ def get_network_device_by_label(self, vm=None, device_label=None): def create_network_adapter(self, device_info): nic = vim.vm.device.VirtualDeviceSpec() - nic.device = self.get_device_type(device_type=device_info.get('device_type', 'vmxnet3')) + nic.device = self.get_device_type( + device_type=device_info.get("device_type", "vmxnet3") + ) nic.device.deviceInfo = vim.Description() - network_object = self.find_network_by_name(network_name=device_info['name'])[0] + network_object = self.find_network_by_name( + network_name=device_info["name"] + )[0] if network_object: - if hasattr(network_object, 'portKeys'): + if hasattr(network_object, "portKeys"): # DistributedVirtualPortGroup - nic.device.backing = vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo() + nic.device.backing = ( + vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo() + ) nic.device.backing.port = vim.dvs.PortConnection() - nic.device.backing.port.switchUuid = network_object.config.distributedVirtualSwitch.uuid + nic.device.backing.port.switchUuid = ( + network_object.config.distributedVirtualSwitch.uuid + ) nic.device.backing.port.portgroupKey = network_object.key elif isinstance(network_object, vim.OpaqueNetwork): # NSX-T Logical Switch - nic.device.backing = vim.vm.device.VirtualEthernetCard.OpaqueNetworkBackingInfo() + nic.device.backing = ( + vim.vm.device.VirtualEthernetCard.OpaqueNetworkBackingInfo() + ) network_id = network_object.summary.opaqueNetworkId - nic.device.backing.opaqueNetworkType = 'nsx.LogicalSwitch' + nic.device.backing.opaqueNetworkType = "nsx.LogicalSwitch" nic.device.backing.opaqueNetworkId = network_id - nic.device.deviceInfo.summary = 'nsx.LogicalSwitch: %s' % network_id + nic.device.deviceInfo.summary = ( + "nsx.LogicalSwitch: %s" % network_id + ) else: # Standard vSwitch - nic.device.deviceInfo.summary = device_info['name'] - nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo() - nic.device.backing.deviceName = device_info['name'] + nic.device.deviceInfo.summary = device_info["name"] + nic.device.backing = ( + vim.vm.device.VirtualEthernetCard.NetworkBackingInfo() + ) + nic.device.backing.deviceName = device_info["name"] nic.device.backing.network = network_object nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo() - nic.device.connectable.startConnected = device_info.get('start_connected', True) + nic.device.connectable.startConnected = device_info.get( + "start_connected", True + ) nic.device.connectable.allowGuestControl = True - nic.device.connectable.connected = device_info.get('connected', True) - if 'manual_mac' in device_info: - nic.device.addressType = 'manual' - nic.device.macAddress = device_info['manual_mac'] + nic.device.connectable.connected = device_info.get("connected", True) + if "manual_mac" in device_info: + nic.device.addressType = "manual" + nic.device.macAddress = device_info["manual_mac"] else: - nic.device.addressType = 'generated' - if 'directpath_io' in device_info: + nic.device.addressType = "generated" + if "directpath_io" in device_info: if isinstance(nic.device, vim.vm.device.VirtualVmxnet3): - nic.device.uptCompatibilityEnabled = device_info['directpath_io'] + nic.device.uptCompatibilityEnabled = device_info[ + "directpath_io" + ] else: - self.module.fail_json(msg='UPT is only compatible for Vmxnet3 adapter.' - + ' Clients can set this property enabled or disabled if ethernet virtual device is Vmxnet3.') + self.module.fail_json( + msg="UPT is only compatible for Vmxnet3 adapter." + + " Clients can set this property enabled or disabled if ethernet virtual device is Vmxnet3." + ) return nic @@ -353,20 +384,20 @@ def get_network_info(self, vm_obj): nic_index = 0 for nic in vm_obj.config.hardware.device: nic_type = None - directpath_io = 'N/A' + directpath_io = "N/A" if isinstance(nic, vim.vm.device.VirtualPCNet32): - nic_type = 'PCNet32' + nic_type = "PCNet32" elif isinstance(nic, vim.vm.device.VirtualVmxnet2): - nic_type = 'VMXNET2' + nic_type = "VMXNET2" elif isinstance(nic, vim.vm.device.VirtualVmxnet3): - nic_type = 'VMXNET3' + nic_type = "VMXNET3" directpath_io = nic.uptCompatibilityEnabled elif isinstance(nic, vim.vm.device.VirtualE1000): - nic_type = 'E1000' + nic_type = "E1000" elif isinstance(nic, vim.vm.device.VirtualE1000e): - nic_type = 'E1000E' + nic_type = "E1000E" elif isinstance(nic, vim.vm.device.VirtualSriovEthernetCard): - nic_type = 'SriovEthernetCard' + nic_type = "SriovEthernetCard" if nic_type is not None: network_info[nic_index] = dict( device_type=nic_type, @@ -378,7 +409,7 @@ def get_network_info(self, vm_obj): allow_guest_ctl=nic.connectable.allowGuestControl, connected=nic.connectable.connected, start_connected=nic.connectable.startConnected, - directpath_io=directpath_io + directpath_io=directpath_io, ) nic_index += 1 @@ -386,60 +417,124 @@ def get_network_info(self, vm_obj): def sanitize_network_params(self): network_list = [] - valid_state = ['new', 'present', 'absent'] - if len(self.params['networks']) != 0: - for network in self.params['networks']: - if 'state' not in network or network['state'].lower() not in valid_state: - self.module.fail_json(msg="Network adapter state not specified or invalid: '%s', valid values: " - "%s" % (network.get('state', ''), valid_state)) + valid_state = ["new", "present", "absent"] + if len(self.params["networks"]) != 0: + for network in self.params["networks"]: + if ( + "state" not in network + or network["state"].lower() not in valid_state + ): + self.module.fail_json( + msg="Network adapter state not specified or invalid: '%s', valid values: " + "%s" % (network.get("state", ""), valid_state) + ) # add new network adapter but no name specified - if network['state'].lower() == 'new' and 'name' not in network and 'vlan' not in network: - self.module.fail_json(msg="Please specify at least network name or VLAN name for adding new network adapter.") - if network['state'].lower() == 'new' and 'mac' in network: - self.module.fail_json(msg="networks.mac is used for vNIC reconfigure, but networks.state is set to 'new'.") - if network['state'].lower() == 'present' and 'mac' not in network and 'label' not in network and 'device_type' not in network: - self.module.fail_json(msg="Should specify 'mac', 'label' or 'device_type' parameter to reconfigure network adapter") - if 'connected' in network: - if not isinstance(network['connected'], bool): - self.module.fail_json(msg="networks.connected parameter should be boolean.") - if network['state'].lower() == 'new' and not network['connected']: - network['start_connected'] = False - if 'start_connected' in network: - if not isinstance(network['start_connected'], bool): - self.module.fail_json(msg="networks.start_connected parameter should be boolean.") - if network['state'].lower() == 'new' and not network['start_connected']: - network['connected'] = False + if ( + network["state"].lower() == "new" + and "name" not in network + and "vlan" not in network + ): + self.module.fail_json( + msg="Please specify at least network name or VLAN name for adding new network adapter." + ) + if network["state"].lower() == "new" and "mac" in network: + self.module.fail_json( + msg="networks.mac is used for vNIC reconfigure, but networks.state is set to 'new'." + ) + if ( + network["state"].lower() == "present" + and "mac" not in network + and "label" not in network + and "device_type" not in network + ): + self.module.fail_json( + msg="Should specify 'mac', 'label' or 'device_type' parameter to reconfigure network adapter" + ) + if "connected" in network: + if not isinstance(network["connected"], bool): + self.module.fail_json( + msg="networks.connected parameter should be boolean." + ) + if ( + network["state"].lower() == "new" + and not network["connected"] + ): + network["start_connected"] = False + if "start_connected" in network: + if not isinstance(network["start_connected"], bool): + self.module.fail_json( + msg="networks.start_connected parameter should be boolean." + ) + if ( + network["state"].lower() == "new" + and not network["start_connected"] + ): + network["connected"] = False # specified network does not exist - if 'name' in network and not self.network_exists_by_name(network['name']): - self.module.fail_json(msg="Network '%(name)s' does not exist." % network) - elif 'vlan' in network: - objects = get_all_objs(self.content, [vim.dvs.DistributedVirtualPortgroup]) - dvps = [x for x in objects if to_text(get_parent_datacenter(x).name) == to_text(self.params['datacenter'])] + if "name" in network and not self.network_exists_by_name( + network["name"] + ): + self.module.fail_json( + msg="Network '%(name)s' does not exist." % network + ) + elif "vlan" in network: + objects = get_all_objs( + self.content, [vim.dvs.DistributedVirtualPortgroup] + ) + dvps = [ + x + for x in objects + if to_text(get_parent_datacenter(x).name) + == to_text(self.params["datacenter"]) + ] for dvp in dvps: - if hasattr(dvp.config.defaultPortConfig, 'vlan') and \ - isinstance(dvp.config.defaultPortConfig.vlan.vlanId, int) and \ - str(dvp.config.defaultPortConfig.vlan.vlanId) == str(network['vlan']): - network['name'] = dvp.config.name + if ( + hasattr(dvp.config.defaultPortConfig, "vlan") + and isinstance( + dvp.config.defaultPortConfig.vlan.vlanId, int + ) + and str(dvp.config.defaultPortConfig.vlan.vlanId) + == str(network["vlan"]) + ): + network["name"] = dvp.config.name break - if 'dvswitch_name' in network and \ - dvp.config.distributedVirtualSwitch.name == network['dvswitch_name'] and \ - dvp.config.name == network['vlan']: - network['name'] = dvp.config.name + if ( + "dvswitch_name" in network + and dvp.config.distributedVirtualSwitch.name + == network["dvswitch_name"] + and dvp.config.name == network["vlan"] + ): + network["name"] = dvp.config.name break - if dvp.config.name == network['vlan']: - network['name'] = dvp.config.name + if dvp.config.name == network["vlan"]: + network["name"] = dvp.config.name break else: - self.module.fail_json(msg="VLAN '%(vlan)s' does not exist." % network) - - if 'device_type' in network and network['device_type'] not in list(self.nic_device_type.keys()): - self.module.fail_json(msg="Device type specified '%s' is invalid. " - "Valid types %s " % (network['device_type'], list(self.nic_device_type.keys()))) - - if ('mac' in network and not is_mac(network['mac'])) or \ - ('manual_mac' in network and not is_mac(network['manual_mac'])): - self.module.fail_json(msg="Device MAC address '%s' or manual set MAC address %s is invalid. " - "Please provide correct MAC address." % (network['mac'], network['manual_mac'])) + self.module.fail_json( + msg="VLAN '%(vlan)s' does not exist." % network + ) + + if "device_type" in network and network[ + "device_type" + ] not in list(self.nic_device_type.keys()): + self.module.fail_json( + msg="Device type specified '%s' is invalid. " + "Valid types %s " + % ( + network["device_type"], + list(self.nic_device_type.keys()), + ) + ) + + if ("mac" in network and not is_mac(network["mac"])) or ( + "manual_mac" in network + and not is_mac(network["manual_mac"]) + ): + self.module.fail_json( + msg="Device MAC address '%s' or manual set MAC address %s is invalid. " + "Please provide correct MAC address." + % (network["mac"], network["manual_mac"]) + ) network_list.append(network) @@ -449,90 +544,193 @@ def get_network_config_spec(self, vm_obj, network_list): # create network adapter config spec for adding, editing, removing for network in network_list: # add new network adapter - if network['state'].lower() == 'new': + if network["state"].lower() == "new": nic_spec = self.create_network_adapter(network) - nic_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add + nic_spec.operation = ( + vim.vm.device.VirtualDeviceSpec.Operation.add + ) self.change_detected = True self.config_spec.deviceChange.append(nic_spec) # reconfigure network adapter or remove network adapter else: nic_devices = [] - if 'mac' in network: - nic = self.get_network_device_by_mac(vm_obj, mac=network['mac']) + if "mac" in network: + nic = self.get_network_device_by_mac( + vm_obj, mac=network["mac"] + ) if nic is not None: nic_devices.append(nic) - if 'label' in network and len(nic_devices) == 0: - nic = self.get_network_device_by_label(vm_obj, device_label=network['label']) + if "label" in network and len(nic_devices) == 0: + nic = self.get_network_device_by_label( + vm_obj, device_label=network["label"] + ) if nic is not None: nic_devices.append(nic) - if 'device_type' in network and len(nic_devices) == 0: - nic_devices = self.get_network_devices_by_type(vm_obj, device_type=network['device_type']) + if "device_type" in network and len(nic_devices) == 0: + nic_devices = self.get_network_devices_by_type( + vm_obj, device_type=network["device_type"] + ) if len(nic_devices) != 0: for nic_device in nic_devices: nic_spec = vim.vm.device.VirtualDeviceSpec() - if network['state'].lower() == 'present': - nic_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit + if network["state"].lower() == "present": + nic_spec.operation = ( + vim.vm.device.VirtualDeviceSpec.Operation.edit + ) nic_spec.device = nic_device - if 'start_connected' in network and nic_device.connectable.startConnected != network['start_connected']: - nic_device.connectable.startConnected = network['start_connected'] + if ( + "start_connected" in network + and nic_device.connectable.startConnected + != network["start_connected"] + ): + nic_device.connectable.startConnected = network[ + "start_connected" + ] self.change_detected = True - if 'connected' in network and nic_device.connectable.connected != network['connected']: - nic_device.connectable.connected = network['connected'] + if ( + "connected" in network + and nic_device.connectable.connected + != network["connected"] + ): + nic_device.connectable.connected = network[ + "connected" + ] self.change_detected = True - if 'name' in network: - network_object = self.find_network_by_name(network_name=network['name'])[0] - if network_object and hasattr(network_object, 'portKeys') and hasattr(nic_spec.device.backing, 'port'): - if network_object.config.distributedVirtualSwitch.uuid != nic_spec.device.backing.port.switchUuid: + if "name" in network: + network_object = self.find_network_by_name( + network_name=network["name"] + )[0] + if ( + network_object + and hasattr(network_object, "portKeys") + and hasattr( + nic_spec.device.backing, "port" + ) + ): + if ( + network_object.config.distributedVirtualSwitch.uuid + != nic_spec.device.backing.port.switchUuid + ): # DistributedVirtualPortGroup - nic_spec.device.backing = vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo() - nic_spec.device.backing.port = vim.dvs.PortConnection() - nic_spec.device.backing.port.switchUuid = network_object.config.distributedVirtualSwitch.uuid - nic_spec.device.backing.port.portgroupKey = network_object.key + nic_spec.device.backing = ( + vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo() + ) + nic_spec.device.backing.port = ( + vim.dvs.PortConnection() + ) + nic_spec.device.backing.port.switchUuid = ( + network_object.config.distributedVirtualSwitch.uuid + ) + nic_spec.device.backing.port.portgroupKey = ( + network_object.key + ) self.change_detected = True - elif network_object and isinstance(network_object, vim.OpaqueNetwork) and hasattr(nic_spec.device.backing, 'opaqueNetworkId'): - if nic_spec.device.backing.opaqueNetworkId != network_object.summary.opaqueNetworkId: + elif ( + network_object + and isinstance( + network_object, vim.OpaqueNetwork + ) + and hasattr( + nic_spec.device.backing, + "opaqueNetworkId", + ) + ): + if ( + nic_spec.device.backing.opaqueNetworkId + != network_object.summary.opaqueNetworkId + ): # NSX-T Logical Switch - nic_spec.device.backing = vim.vm.device.VirtualEthernetCard.OpaqueNetworkBackingInfo() - network_id = network_object.summary.opaqueNetworkId - nic_spec.device.backing.opaqueNetworkType = 'nsx.LogicalSwitch' - nic_spec.device.backing.opaqueNetworkId = network_id - nic_spec.device.deviceInfo.summary = 'nsx.LogicalSwitch: %s' % network_id + nic_spec.device.backing = ( + vim.vm.device.VirtualEthernetCard.OpaqueNetworkBackingInfo() + ) + network_id = ( + network_object.summary.opaqueNetworkId + ) + nic_spec.device.backing.opaqueNetworkType = ( + "nsx.LogicalSwitch" + ) + nic_spec.device.backing.opaqueNetworkId = ( + network_id + ) + nic_spec.device.deviceInfo.summary = ( + "nsx.LogicalSwitch: %s" + % network_id + ) self.change_detected = True - elif nic_device.deviceInfo.summary != network['name']: + elif ( + nic_device.deviceInfo.summary + != network["name"] + ): # Standard vSwitch - nic_spec.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo() - nic_spec.device.backing.deviceName = network['name'] - nic_spec.device.backing.network = network_object + nic_spec.device.backing = ( + vim.vm.device.VirtualEthernetCard.NetworkBackingInfo() + ) + nic_spec.device.backing.deviceName = network[ + "name" + ] + nic_spec.device.backing.network = ( + network_object + ) self.change_detected = True - if 'manual_mac' in network and nic_device.macAddress != network['manual_mac']: - if vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOff: - self.module.fail_json(msg='Expected power state is poweredOff to reconfigure MAC address') - nic_device.addressType = 'manual' - nic_device.macAddress = network['manual_mac'] + if ( + "manual_mac" in network + and nic_device.macAddress + != network["manual_mac"] + ): + if ( + vm_obj.runtime.powerState + != vim.VirtualMachinePowerState.poweredOff + ): + self.module.fail_json( + msg="Expected power state is poweredOff to reconfigure MAC address" + ) + nic_device.addressType = "manual" + nic_device.macAddress = network["manual_mac"] self.change_detected = True - if 'directpath_io' in network: - if isinstance(nic_device, vim.vm.device.VirtualVmxnet3): - if nic_device.uptCompatibilityEnabled != network['directpath_io']: - nic_device.uptCompatibilityEnabled = network['directpath_io'] + if "directpath_io" in network: + if isinstance( + nic_device, vim.vm.device.VirtualVmxnet3 + ): + if ( + nic_device.uptCompatibilityEnabled + != network["directpath_io"] + ): + nic_device.uptCompatibilityEnabled = network[ + "directpath_io" + ] self.change_detected = True else: - self.module.fail_json(msg='UPT is only compatible for Vmxnet3 adapter.' - + ' Clients can set this property enabled or disabled if ethernet virtual device is Vmxnet3.') + self.module.fail_json( + msg="UPT is only compatible for Vmxnet3 adapter." + + " Clients can set this property enabled or disabled if ethernet virtual device is Vmxnet3." + ) if self.change_detected: self.config_spec.deviceChange.append(nic_spec) - elif network['state'].lower() == 'absent': - nic_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove + elif network["state"].lower() == "absent": + nic_spec.operation = ( + vim.vm.device.VirtualDeviceSpec.Operation.remove + ) nic_spec.device = nic_device self.change_detected = True self.config_spec.deviceChange.append(nic_spec) else: - self.module.fail_json(msg='Unable to find the specified network adapter: %s' % network) + self.module.fail_json( + msg="Unable to find the specified network adapter: %s" + % network + ) def reconfigure_vm_network(self, vm_obj): network_list = self.sanitize_network_params() # gather network adapter info only - if (self.params['gather_network_info'] is not None and self.params['gather_network_info']) or len(network_list) == 0: - results = {'changed': False, 'failed': False, 'network_data': self.get_network_info(vm_obj)} + if ( + self.params["gather_network_info"] is not None + and self.params["gather_network_info"] + ) or len(network_list) == 0: + results = { + "changed": False, + "failed": False, + "network_data": self.get_network_info(vm_obj), + } # do reconfigure then gather info else: self.get_network_config_spec(vm_obj, network_list) @@ -540,17 +738,29 @@ def reconfigure_vm_network(self, vm_obj): task = vm_obj.ReconfigVM_Task(spec=self.config_spec) wait_for_task(task) except vim.fault.InvalidDeviceSpec as e: - self.module.fail_json(msg="Failed to configure network adapter on given virtual machine due to invalid" - " device spec : %s" % to_native(e.msg), - details="Please check ESXi server logs for more details.") + self.module.fail_json( + msg="Failed to configure network adapter on given virtual machine due to invalid" + " device spec : %s" % to_native(e.msg), + details="Please check ESXi server logs for more details.", + ) except vim.fault.RestrictedVersion as e: - self.module.fail_json(msg="Failed to reconfigure virtual machine due to" - " product versioning restrictions: %s" % to_native(e.msg)) - if task.info.state == 'error': - results = {'changed': self.change_detected, 'failed': True, 'msg': task.info.error.msg} + self.module.fail_json( + msg="Failed to reconfigure virtual machine due to" + " product versioning restrictions: %s" % to_native(e.msg) + ) + if task.info.state == "error": + results = { + "changed": self.change_detected, + "failed": True, + "msg": task.info.error.msg, + } else: network_info = self.get_network_info(vm_obj) - results = {'changed': self.change_detected, 'failed': False, 'network_data': network_info} + results = { + "changed": self.change_detected, + "failed": False, + "network_data": network_info, + } return results @@ -558,37 +768,42 @@ def reconfigure_vm_network(self, vm_obj): def main(): argument_spec = vmware_argument_spec() argument_spec.update( - name=dict(type='str'), - uuid=dict(type='str'), - use_instance_uuid=dict(type='bool', default=False), - moid=dict(type='str'), - folder=dict(type='str'), - datacenter=dict(type='str', default='ha-datacenter'), - esxi_hostname=dict(type='str'), - cluster=dict(type='str'), - gather_network_info=dict(type='bool', default=False, aliases=['gather_network_facts']), - networks=dict(type='list', default=[]) + name=dict(type="str"), + uuid=dict(type="str"), + use_instance_uuid=dict(type="bool", default=False), + moid=dict(type="str"), + folder=dict(type="str"), + datacenter=dict(type="str", default="ha-datacenter"), + esxi_hostname=dict(type="str"), + cluster=dict(type="str"), + gather_network_info=dict( + type="bool", default=False, aliases=["gather_network_facts"] + ), + networks=dict(type="list", default=[]), ) module = AnsibleModule( - argument_spec=argument_spec, - required_one_of=[ - ['name', 'uuid', 'moid'] - ] + argument_spec=argument_spec, required_one_of=[["name", "uuid", "moid"]] ) pyv = PyVmomiHelper(module) vm = pyv.get_vm() if not vm: - vm_id = (module.params.get('uuid') or module.params.get('name') or module.params.get('moid')) - module.fail_json(msg='Unable to find the specified virtual machine using %s' % vm_id) + vm_id = ( + module.params.get("uuid") + or module.params.get("name") + or module.params.get("moid") + ) + module.fail_json( + msg="Unable to find the specified virtual machine using %s" % vm_id + ) result = pyv.reconfigure_vm_network(vm) - if result['failed']: + if result["failed"]: module.fail_json(**result) else: module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_guest_powerstate.py b/plugins/modules/vmware_guest_powerstate.py index 2196f15..309915a 100644 --- a/plugins/modules/vmware_guest_powerstate.py +++ b/plugins/modules/vmware_guest_powerstate.py @@ -5,13 +5,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} +ANSIBLE_METADATA = { + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", +} -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_guest_powerstate short_description: Manages power states of virtual machines in vCenter @@ -110,9 +113,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Set the state of a virtual machine to poweroff vmware_guest_powerstate: hostname: "{{ vcenter_hostname }}" @@ -162,9 +165,9 @@ state_change_timeout: 200 delegate_to: localhost register: deploy -''' +""" -RETURN = r''' # ''' +RETURN = r""" # """ try: from pyVmomi import vim, vmodl @@ -174,38 +177,53 @@ from random import randint from datetime import datetime from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import PyVmomi, set_vm_power_state, vmware_argument_spec +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + set_vm_power_state, + vmware_argument_spec, +) from ansible.module_utils._text import to_native def main(): argument_spec = vmware_argument_spec() argument_spec.update( - state=dict(type='str', default='present', - choices=['present', 'powered-off', 'powered-on', 'reboot-guest', 'restarted', 'shutdown-guest', 'suspended']), - name=dict(type='str'), - name_match=dict(type='str', choices=['first', 'last'], default='first'), - uuid=dict(type='str'), - moid=dict(type='str'), - use_instance_uuid=dict(type='bool', default=False), - folder=dict(type='str'), - force=dict(type='bool', default=False), - scheduled_at=dict(type='str'), + state=dict( + type="str", + default="present", + choices=[ + "present", + "powered-off", + "powered-on", + "reboot-guest", + "restarted", + "shutdown-guest", + "suspended", + ], + ), + name=dict(type="str"), + name_match=dict( + type="str", choices=["first", "last"], default="first" + ), + uuid=dict(type="str"), + moid=dict(type="str"), + use_instance_uuid=dict(type="bool", default=False), + folder=dict(type="str"), + force=dict(type="bool", default=False), + scheduled_at=dict(type="str"), schedule_task_name=dict(), schedule_task_description=dict(), - schedule_task_enabled=dict(type='bool', default=True), - state_change_timeout=dict(type='int', default=0), + schedule_task_enabled=dict(type="bool", default=True), + state_change_timeout=dict(type="int", default=0), ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=False, - mutually_exclusive=[ - ['name', 'uuid', 'moid'], - ], + mutually_exclusive=[["name", "uuid", "moid"]], ) - result = dict(changed=False,) + result = dict(changed=False) pyv = PyVmomi(module) @@ -214,65 +232,93 @@ def main(): if vm: # VM already exists, so set power state - scheduled_at = module.params.get('scheduled_at', None) + scheduled_at = module.params.get("scheduled_at", None) if scheduled_at: if not pyv.is_vcenter(): - module.fail_json(msg="Scheduling task requires vCenter, hostname %s " - "is an ESXi server." % module.params.get('hostname')) + module.fail_json( + msg="Scheduling task requires vCenter, hostname %s " + "is an ESXi server." % module.params.get("hostname") + ) powerstate = { - 'present': vim.VirtualMachine.PowerOn, - 'powered-off': vim.VirtualMachine.PowerOff, - 'powered-on': vim.VirtualMachine.PowerOn, - 'reboot-guest': vim.VirtualMachine.RebootGuest, - 'restarted': vim.VirtualMachine.Reset, - 'shutdown-guest': vim.VirtualMachine.ShutdownGuest, - 'suspended': vim.VirtualMachine.Suspend, + "present": vim.VirtualMachine.PowerOn, + "powered-off": vim.VirtualMachine.PowerOff, + "powered-on": vim.VirtualMachine.PowerOn, + "reboot-guest": vim.VirtualMachine.RebootGuest, + "restarted": vim.VirtualMachine.Reset, + "shutdown-guest": vim.VirtualMachine.ShutdownGuest, + "suspended": vim.VirtualMachine.Suspend, } - dt = '' + dt = "" try: - dt = datetime.strptime(scheduled_at, '%d/%m/%Y %H:%M') + dt = datetime.strptime(scheduled_at, "%d/%m/%Y %H:%M") except ValueError as e: - module.fail_json(msg="Failed to convert given date and time string to Python datetime object," - "please specify string in 'dd/mm/yyyy hh:mm' format: %s" % to_native(e)) + module.fail_json( + msg="Failed to convert given date and time string to Python datetime object," + "please specify string in 'dd/mm/yyyy hh:mm' format: %s" + % to_native(e) + ) schedule_task_spec = vim.scheduler.ScheduledTaskSpec() - schedule_task_name = module.params['schedule_task_name'] or 'task_%s' % str(randint(10000, 99999)) - schedule_task_desc = module.params['schedule_task_description'] + schedule_task_name = module.params[ + "schedule_task_name" + ] or "task_%s" % str(randint(10000, 99999)) + schedule_task_desc = module.params["schedule_task_description"] if schedule_task_desc is None: - schedule_task_desc = 'Schedule task for vm %s for ' \ - 'operation %s at %s' % (vm.name, module.params['state'], scheduled_at) + schedule_task_desc = ( + "Schedule task for vm %s for " + "operation %s at %s" + % (vm.name, module.params["state"], scheduled_at) + ) schedule_task_spec.name = schedule_task_name schedule_task_spec.description = schedule_task_desc schedule_task_spec.scheduler = vim.scheduler.OnceTaskScheduler() schedule_task_spec.scheduler.runAt = dt schedule_task_spec.action = vim.action.MethodAction() - schedule_task_spec.action.name = powerstate[module.params['state']] - schedule_task_spec.enabled = module.params['schedule_task_enabled'] + schedule_task_spec.action.name = powerstate[module.params["state"]] + schedule_task_spec.enabled = module.params["schedule_task_enabled"] try: - pyv.content.scheduledTaskManager.CreateScheduledTask(vm, schedule_task_spec) + pyv.content.scheduledTaskManager.CreateScheduledTask( + vm, schedule_task_spec + ) # As this is async task, we create scheduled task and mark state to changed. module.exit_json(changed=True) except vim.fault.InvalidName as e: - module.fail_json(msg="Failed to create scheduled task %s for %s : %s" % (module.params.get('state'), - vm.name, - to_native(e.msg))) + module.fail_json( + msg="Failed to create scheduled task %s for %s : %s" + % (module.params.get("state"), vm.name, to_native(e.msg)) + ) except vim.fault.DuplicateName as e: module.exit_json(changed=False, details=to_native(e.msg)) except vmodl.fault.InvalidArgument as e: - module.fail_json(msg="Failed to create scheduled task %s as specifications " - "given are invalid: %s" % (module.params.get('state'), - to_native(e.msg))) + module.fail_json( + msg="Failed to create scheduled task %s as specifications " + "given are invalid: %s" + % (module.params.get("state"), to_native(e.msg)) + ) else: - result = set_vm_power_state(pyv.content, vm, module.params['state'], module.params['force'], module.params['state_change_timeout']) + result = set_vm_power_state( + pyv.content, + vm, + module.params["state"], + module.params["force"], + module.params["state_change_timeout"], + ) else: - id = module.params.get('uuid') or module.params.get('moid') or module.params.get('name') - module.fail_json(msg="Unable to set power state for non-existing virtual machine : '%s'" % id) + id = ( + module.params.get("uuid") + or module.params.get("moid") + or module.params.get("name") + ) + module.fail_json( + msg="Unable to set power state for non-existing virtual machine : '%s'" + % id + ) - if result.get('failed') is True: + if result.get("failed") is True: module.fail_json(**result) module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_guest_register_operation.py b/plugins/modules/vmware_guest_register_operation.py index 10705ba..5db8a8a 100644 --- a/plugins/modules/vmware_guest_register_operation.py +++ b/plugins/modules/vmware_guest_register_operation.py @@ -5,16 +5,17 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = ''' +DOCUMENTATION = """ module: vmware_guest_register_operation short_description: VM inventory registration operation author: @@ -91,9 +92,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Register VM to inventory vmware_guest_register_operation: hostname: "{{ vcenter_hostname }}" @@ -146,20 +147,27 @@ folder: "/vm" name: "{{ vm_name }}" state: absent -''' +""" -RETURN = r''' -''' +RETURN = r""" +""" try: from pyVmomi import vim, vmodl + HAS_PYVMOMI = True except ImportError: HAS_PYVMOMI = False from ansible.module_utils._text import to_native -from ansible_collections.vmware.general.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec, find_resource_pool_by_name, \ - wait_for_task, compile_folder_path_for_object, find_cluster_by_name +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + vmware_argument_spec, + find_resource_pool_by_name, + wait_for_task, + compile_folder_path_for_object, + find_cluster_by_name, +) from ansible.module_utils.basic import AnsibleModule @@ -181,29 +189,39 @@ def execute(self): datacenter = self.find_datacenter_by_name(self.datacenter) if not datacenter: - self.module.fail_json(msg="Cannot find the specified Datacenter: %s" % self.datacenter) + self.module.fail_json( + msg="Cannot find the specified Datacenter: %s" + % self.datacenter + ) dcpath = compile_folder_path_for_object(datacenter) if not dcpath.endswith("/"): dcpath += "/" - if(self.folder in [None, "", "/"]): - self.module.fail_json(msg="Please specify folder path other than blank or '/'") - elif(self.folder.startswith("/vm")): + if self.folder in [None, "", "/"]: + self.module.fail_json( + msg="Please specify folder path other than blank or '/'" + ) + elif self.folder.startswith("/vm"): fullpath = "%s%s%s" % (dcpath, self.datacenter, self.folder) else: fullpath = "%s%s" % (dcpath, self.folder) - folder_obj = self.content.searchIndex.FindByInventoryPath(inventoryPath="%s" % fullpath) + folder_obj = self.content.searchIndex.FindByInventoryPath( + inventoryPath="%s" % fullpath + ) if not folder_obj: details = { - 'datacenter': datacenter.name, - 'datacenter_path': dcpath, - 'folder': self.folder, - 'full_search_path': fullpath, + "datacenter": datacenter.name, + "datacenter_path": dcpath, + "folder": self.folder, + "full_search_path": fullpath, } - self.module.fail_json(msg="No folder %s matched in the search path : %s" % (self.folder, fullpath), - details=details) + self.module.fail_json( + msg="No folder %s matched in the search path : %s" + % (self.folder, fullpath), + details=details, + ) if self.state == "present": if self.get_vm(): @@ -212,25 +230,43 @@ def execute(self): if self.esxi_hostname: host_obj = self.find_hostsystem_by_name(self.esxi_hostname) if not host_obj: - self.module.fail_json(msg="Cannot find the specified ESXi host: %s" % self.esxi_hostname) + self.module.fail_json( + msg="Cannot find the specified ESXi host: %s" + % self.esxi_hostname + ) else: host_obj = None if self.cluster: - cluster_obj = find_cluster_by_name(self.content, self.cluster, datacenter) + cluster_obj = find_cluster_by_name( + self.content, self.cluster, datacenter + ) if not cluster_obj: - self.module.fail_json(msg="Cannot find the specified cluster name: %s" % self.cluster) + self.module.fail_json( + msg="Cannot find the specified cluster name: %s" + % self.cluster + ) resource_pool_obj = cluster_obj.resourcePool elif self.resource_pool: - resource_pool_obj = find_resource_pool_by_name(self.content, self.resource_pool) + resource_pool_obj = find_resource_pool_by_name( + self.content, self.resource_pool + ) if not resource_pool_obj: - self.module.fail_json(msg="Cannot find the specified resource pool: %s" % self.resource_pool) + self.module.fail_json( + msg="Cannot find the specified resource pool: %s" + % self.resource_pool + ) else: resource_pool_obj = host_obj.parent.resourcePool - task = folder_obj.RegisterVM_Task(path=self.path, name=self.name, asTemplate=self.template, - pool=resource_pool_obj, host=host_obj) + task = folder_obj.RegisterVM_Task( + path=self.path, + name=self.name, + asTemplate=self.template, + pool=resource_pool_obj, + host=host_obj, + ) changed = False try: @@ -255,19 +291,24 @@ def execute(self): def main(): argument_spec = vmware_argument_spec() - argument_spec.update(datacenter=dict(type="str"), - cluster=dict(type="str"), - folder=dict(type="str"), - name=dict(type="str", required=True), - uuid=dict(type="str"), - esxi_hostname=dict(type="str"), - path=dict(type="str"), - template=dict(type="bool", default=False), - resource_pool=dict(type="str"), - state=dict(type="str", default="present", choices=["present", "absent"])) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True) + argument_spec.update( + datacenter=dict(type="str"), + cluster=dict(type="str"), + folder=dict(type="str"), + name=dict(type="str", required=True), + uuid=dict(type="str"), + esxi_hostname=dict(type="str"), + path=dict(type="str"), + template=dict(type="bool", default=False), + resource_pool=dict(type="str"), + state=dict( + type="str", default="present", choices=["present", "absent"] + ), + ) + + module = AnsibleModule( + argument_spec=argument_spec, supports_check_mode=True + ) vmware_guest_register_operation = VMwareGuestRegisterOperation(module) vmware_guest_register_operation.execute() diff --git a/plugins/modules/vmware_guest_screenshot.py b/plugins/modules/vmware_guest_screenshot.py index a1a72ab..12ead64 100644 --- a/plugins/modules/vmware_guest_screenshot.py +++ b/plugins/modules/vmware_guest_screenshot.py @@ -5,15 +5,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_guest_screenshot short_description: Create a screenshot of the Virtual Machine console. @@ -86,9 +87,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: take a screenshot of the virtual machine console vmware_guest_screenshot: validate_certs: no @@ -114,7 +115,7 @@ local_path: "/tmp/" delegate_to: localhost register: take_screenshot -''' +""" RETURN = """ screenshot_info: @@ -142,7 +143,12 @@ from ansible.module_utils.six.moves.urllib.parse import urlencode, quote from ansible.module_utils._text import to_native from ansible.module_utils.urls import open_url -from ansible_collections.vmware.general.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec, wait_for_task, get_parent_datacenter +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + vmware_argument_spec, + wait_for_task, + get_parent_datacenter, +) import os @@ -159,13 +165,19 @@ def generate_http_access_url(self, file_path): return url_path path = "/folder/%s" % quote(file_path.split()[1]) - params = dict(dsName=file_path.split()[0].strip('[]')) + params = dict(dsName=file_path.split()[0].strip("[]")) if not self.is_vcenter(): - datacenter = 'ha-datacenter' + datacenter = "ha-datacenter" else: - datacenter = get_parent_datacenter(self.current_vm_obj).name.replace('&', '%26') - params['dcPath'] = datacenter - url_path = "https://%s%s?%s" % (self.params['hostname'], path, urlencode(params)) + datacenter = get_parent_datacenter( + self.current_vm_obj + ).name.replace("&", "%26") + params["dcPath"] = datacenter + url_path = "https://%s%s?%s" % ( + self.params["hostname"], + path, + urlencode(params), + ) return url_path @@ -173,27 +185,38 @@ def download_screenshot_file(self, file_url, local_file_path, file_name): response = None download_size = 0 # file is downloaded as local_file_name when specified, or use original file name - if local_file_path.endswith('.png'): - local_file_name = local_file_path.split('/')[-1] - local_file_path = local_file_path.rsplit('/', 1)[0] + if local_file_path.endswith(".png"): + local_file_name = local_file_path.split("/")[-1] + local_file_path = local_file_path.rsplit("/", 1)[0] else: local_file_name = file_name if not os.path.exists(local_file_path): try: os.makedirs(local_file_path) except OSError as err: - self.module.fail_json(msg="Exception caught when create folder %s on local machine, with error %s" - % (local_file_path, to_native(err))) + self.module.fail_json( + msg="Exception caught when create folder %s on local machine, with error %s" + % (local_file_path, to_native(err)) + ) local_file = os.path.join(local_file_path, local_file_name) - with open(local_file, 'wb') as handle: + with open(local_file, "wb") as handle: try: - response = open_url(file_url, url_username=self.params.get('username'), - url_password=self.params.get('password'), validate_certs=False) + response = open_url( + file_url, + url_username=self.params.get("username"), + url_password=self.params.get("password"), + validate_certs=False, + ) except Exception as err: - self.module.fail_json(msg="Download screenshot file from URL %s, failed due to %s" % (file_url, to_native(err))) + self.module.fail_json( + msg="Download screenshot file from URL %s, failed due to %s" + % (file_url, to_native(err)) + ) if not response or response.getcode() >= 400: - self.module.fail_json(msg="Download screenshot file from URL %s, failed with response %s, response code %s" - % (file_url, response, response.getcode())) + self.module.fail_json( + msg="Download screenshot file from URL %s, failed with response %s, response code %s" + % (file_url, response, response.getcode()) + ) bytes_read = response.read(2 ** 20) while bytes_read: handle.write(bytes_read) @@ -214,74 +237,105 @@ def get_screenshot_facts(self, task_info, file_url, file_size): task_complete_time=task_info.completeTime, result=task_info.state, screenshot_file_url=file_url, - download_local_path=self.params.get('local_path'), + download_local_path=self.params.get("local_path"), download_file_size=file_size, ) return screenshot_facts def take_vm_screenshot(self): - if self.current_vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOn: - self.module.fail_json(msg="VM is %s, valid power state is poweredOn." % self.current_vm_obj.runtime.powerState) + if ( + self.current_vm_obj.runtime.powerState + != vim.VirtualMachinePowerState.poweredOn + ): + self.module.fail_json( + msg="VM is %s, valid power state is poweredOn." + % self.current_vm_obj.runtime.powerState + ) try: task = self.current_vm_obj.CreateScreenshot_Task() wait_for_task(task) except vim.fault.FileFault as e: - self.module.fail_json(msg="Failed to create screenshot due to errors when creating or accessing one or more" - " files needed for this operation, %s" % to_native(e.msg)) + self.module.fail_json( + msg="Failed to create screenshot due to errors when creating or accessing one or more" + " files needed for this operation, %s" % to_native(e.msg) + ) except vim.fault.InvalidState as e: - self.module.fail_json(msg="Failed to create screenshot due to VM is not ready to respond to such requests," - " %s" % to_native(e.msg)) + self.module.fail_json( + msg="Failed to create screenshot due to VM is not ready to respond to such requests," + " %s" % to_native(e.msg) + ) except vmodl.RuntimeFault as e: - self.module.fail_json(msg="Failed to create screenshot due to runtime fault, %s," % to_native(e.msg)) + self.module.fail_json( + msg="Failed to create screenshot due to runtime fault, %s," + % to_native(e.msg) + ) except vim.fault.TaskInProgress as e: - self.module.fail_json(msg="Failed to create screenshot due to VM is busy, %s" % to_native(e.msg)) + self.module.fail_json( + msg="Failed to create screenshot due to VM is busy, %s" + % to_native(e.msg) + ) - if task.info.state == 'error': - return {'changed': self.change_detected, 'failed': True, 'msg': task.info.error.msg} + if task.info.state == "error": + return { + "changed": self.change_detected, + "failed": True, + "msg": task.info.error.msg, + } else: download_file_size = None self.change_detected = True file_url = self.generate_http_access_url(task.info.result) - if self.params.get('local_path'): + if self.params.get("local_path"): if file_url: - download_file_size = self.download_screenshot_file(file_url=file_url, - local_file_path=self.params['local_path'], - file_name=task.info.result.split('/')[-1]) - screenshot_facts = self.get_screenshot_facts(task.info, file_url, download_file_size) - return {'changed': self.change_detected, 'failed': False, 'screenshot_info': screenshot_facts} + download_file_size = self.download_screenshot_file( + file_url=file_url, + local_file_path=self.params["local_path"], + file_name=task.info.result.split("/")[-1], + ) + screenshot_facts = self.get_screenshot_facts( + task.info, file_url, download_file_size + ) + return { + "changed": self.change_detected, + "failed": False, + "screenshot_info": screenshot_facts, + } def main(): argument_spec = vmware_argument_spec() argument_spec.update( - name=dict(type='str'), - uuid=dict(type='str'), - moid=dict(type='str'), - folder=dict(type='str'), - datacenter=dict(type='str'), - esxi_hostname=dict(type='str'), - cluster=dict(type='str'), - local_path=dict(type='path'), + name=dict(type="str"), + uuid=dict(type="str"), + moid=dict(type="str"), + folder=dict(type="str"), + datacenter=dict(type="str"), + esxi_hostname=dict(type="str"), + cluster=dict(type="str"), + local_path=dict(type="path"), ) module = AnsibleModule( - argument_spec=argument_spec, - required_one_of=[ - ['name', 'uuid', 'moid'] - ] + argument_spec=argument_spec, required_one_of=[["name", "uuid", "moid"]] ) pyv = PyVmomiHelper(module) vm = pyv.get_vm() if not vm: - vm_id = (module.params.get('uuid') or module.params.get('name') or module.params.get('moid')) - module.fail_json(msg='Unable to find the specified virtual machine : %s' % vm_id) + vm_id = ( + module.params.get("uuid") + or module.params.get("name") + or module.params.get("moid") + ) + module.fail_json( + msg="Unable to find the specified virtual machine : %s" % vm_id + ) result = pyv.take_vm_screenshot() - if result['failed']: + if result["failed"]: module.fail_json(**result) else: module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_guest_sendkey.py b/plugins/modules/vmware_guest_sendkey.py index adf1219..46a0893 100644 --- a/plugins/modules/vmware_guest_sendkey.py +++ b/plugins/modules/vmware_guest_sendkey.py @@ -5,15 +5,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_guest_sendkey short_description: Send USB HID codes to the Virtual Machine's keyboard. @@ -90,9 +91,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Send list of keys to virtual machine vmware_guest_sendkey: validate_certs: no @@ -135,7 +136,7 @@ string_send: "user_logon" delegate_to: localhost register: keys_num_sent -''' +""" RETURN = """ sendkey_info: @@ -163,7 +164,10 @@ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_native -from ansible_collections.vmware.general.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + vmware_argument_spec, +) class PyVmomiHelper(PyVmomi): @@ -175,78 +179,78 @@ def __init__(self, module): # HID usage tables https://www.usb.org/sites/default/files/documents/hut1_12v2.pdf # define valid characters and keys value, hex_code, key value and key modifier self.keys_hid_code = [ - (('a', 'A'), '0x04', [('a', []), ('A', ['LEFTSHIFT'])]), - (('b', 'B'), '0x05', [('b', []), ('B', ['LEFTSHIFT'])]), - (('c', 'C'), '0x06', [('c', []), ('C', ['LEFTSHIFT'])]), - (('d', 'D'), '0x07', [('d', []), ('D', ['LEFTSHIFT'])]), - (('e', 'E'), '0x08', [('e', []), ('E', ['LEFTSHIFT'])]), - (('f', 'F'), '0x09', [('f', []), ('F', ['LEFTSHIFT'])]), - (('g', 'G'), '0x0a', [('g', []), ('G', ['LEFTSHIFT'])]), - (('h', 'H'), '0x0b', [('h', []), ('H', ['LEFTSHIFT'])]), - (('i', 'I'), '0x0c', [('i', []), ('I', ['LEFTSHIFT'])]), - (('j', 'J'), '0x0d', [('j', []), ('J', ['LEFTSHIFT'])]), - (('k', 'K'), '0x0e', [('k', []), ('K', ['LEFTSHIFT'])]), - (('l', 'L'), '0x0f', [('l', []), ('L', ['LEFTSHIFT'])]), - (('m', 'M'), '0x10', [('m', []), ('M', ['LEFTSHIFT'])]), - (('n', 'N'), '0x11', [('n', []), ('N', ['LEFTSHIFT'])]), - (('o', 'O'), '0x12', [('o', []), ('O', ['LEFTSHIFT'])]), - (('p', 'P'), '0x13', [('p', []), ('P', ['LEFTSHIFT'])]), - (('q', 'Q'), '0x14', [('q', []), ('Q', ['LEFTSHIFT'])]), - (('r', 'R'), '0x15', [('r', []), ('R', ['LEFTSHIFT'])]), - (('s', 'S'), '0x16', [('s', []), ('S', ['LEFTSHIFT'])]), - (('t', 'T'), '0x17', [('t', []), ('T', ['LEFTSHIFT'])]), - (('u', 'U'), '0x18', [('u', []), ('U', ['LEFTSHIFT'])]), - (('v', 'V'), '0x19', [('v', []), ('V', ['LEFTSHIFT'])]), - (('w', 'W'), '0x1a', [('w', []), ('W', ['LEFTSHIFT'])]), - (('x', 'X'), '0x1b', [('x', []), ('X', ['LEFTSHIFT'])]), - (('y', 'Y'), '0x1c', [('y', []), ('Y', ['LEFTSHIFT'])]), - (('z', 'Z'), '0x1d', [('z', []), ('Z', ['LEFTSHIFT'])]), - (('1', '!'), '0x1e', [('1', []), ('!', ['LEFTSHIFT'])]), - (('2', '@'), '0x1f', [('2', []), ('@', ['LEFTSHIFT'])]), - (('3', '#'), '0x20', [('3', []), ('#', ['LEFTSHIFT'])]), - (('4', '$'), '0x21', [('4', []), ('$', ['LEFTSHIFT'])]), - (('5', '%'), '0x22', [('5', []), ('%', ['LEFTSHIFT'])]), - (('6', '^'), '0x23', [('6', []), ('^', ['LEFTSHIFT'])]), - (('7', '&'), '0x24', [('7', []), ('&', ['LEFTSHIFT'])]), - (('8', '*'), '0x25', [('8', []), ('*', ['LEFTSHIFT'])]), - (('9', '('), '0x26', [('9', []), ('(', ['LEFTSHIFT'])]), - (('0', ')'), '0x27', [('0', []), (')', ['LEFTSHIFT'])]), - (('-', '_'), '0x2d', [('-', []), ('_', ['LEFTSHIFT'])]), - (('=', '+'), '0x2e', [('=', []), ('+', ['LEFTSHIFT'])]), - (('[', '{'), '0x2f', [('[', []), ('{', ['LEFTSHIFT'])]), - ((']', '}'), '0x30', [(']', []), ('}', ['LEFTSHIFT'])]), - (('\\', '|'), '0x31', [('\\', []), ('|', ['LEFTSHIFT'])]), - ((';', ':'), '0x33', [(';', []), (':', ['LEFTSHIFT'])]), - (('\'', '"'), '0x34', [('\'', []), ('"', ['LEFTSHIFT'])]), - (('`', '~'), '0x35', [('`', []), ('~', ['LEFTSHIFT'])]), - ((',', '<'), '0x36', [(',', []), ('<', ['LEFTSHIFT'])]), - (('.', '>'), '0x37', [('.', []), ('>', ['LEFTSHIFT'])]), - (('/', '?'), '0x38', [('/', []), ('?', ['LEFTSHIFT'])]), - ('ENTER', '0x28', [('', [])]), - ('ESC', '0x29', [('', [])]), - ('BACKSPACE', '0x2a', [('', [])]), - ('TAB', '0x2b', [('', [])]), - ('SPACE', '0x2c', [(' ', [])]), - ('CAPSLOCK', '0x39', [('', [])]), - ('F1', '0x3a', [('', [])]), - ('F2', '0x3b', [('', [])]), - ('F3', '0x3c', [('', [])]), - ('F4', '0x3d', [('', [])]), - ('F5', '0x3e', [('', [])]), - ('F6', '0x3f', [('', [])]), - ('F7', '0x40', [('', [])]), - ('F8', '0x41', [('', [])]), - ('F9', '0x42', [('', [])]), - ('F10', '0x43', [('', [])]), - ('F11', '0x44', [('', [])]), - ('F12', '0x45', [('', [])]), - ('DELETE', '0x4c', [('', [])]), - ('CTRL_ALT_DEL', '0x4c', [('', ['CTRL', 'ALT'])]), - ('CTRL_C', '0x06', [('', ['CTRL'])]), - ('RIGHTARROW', '0x4f', [('', [])]), - ('LEFTARROW', '0x50', [('', [])]), - ('DOWNARROW', '0x51', [('', [])]), - ('UPARROW', '0x52', [('', [])]), + (("a", "A"), "0x04", [("a", []), ("A", ["LEFTSHIFT"])]), + (("b", "B"), "0x05", [("b", []), ("B", ["LEFTSHIFT"])]), + (("c", "C"), "0x06", [("c", []), ("C", ["LEFTSHIFT"])]), + (("d", "D"), "0x07", [("d", []), ("D", ["LEFTSHIFT"])]), + (("e", "E"), "0x08", [("e", []), ("E", ["LEFTSHIFT"])]), + (("f", "F"), "0x09", [("f", []), ("F", ["LEFTSHIFT"])]), + (("g", "G"), "0x0a", [("g", []), ("G", ["LEFTSHIFT"])]), + (("h", "H"), "0x0b", [("h", []), ("H", ["LEFTSHIFT"])]), + (("i", "I"), "0x0c", [("i", []), ("I", ["LEFTSHIFT"])]), + (("j", "J"), "0x0d", [("j", []), ("J", ["LEFTSHIFT"])]), + (("k", "K"), "0x0e", [("k", []), ("K", ["LEFTSHIFT"])]), + (("l", "L"), "0x0f", [("l", []), ("L", ["LEFTSHIFT"])]), + (("m", "M"), "0x10", [("m", []), ("M", ["LEFTSHIFT"])]), + (("n", "N"), "0x11", [("n", []), ("N", ["LEFTSHIFT"])]), + (("o", "O"), "0x12", [("o", []), ("O", ["LEFTSHIFT"])]), + (("p", "P"), "0x13", [("p", []), ("P", ["LEFTSHIFT"])]), + (("q", "Q"), "0x14", [("q", []), ("Q", ["LEFTSHIFT"])]), + (("r", "R"), "0x15", [("r", []), ("R", ["LEFTSHIFT"])]), + (("s", "S"), "0x16", [("s", []), ("S", ["LEFTSHIFT"])]), + (("t", "T"), "0x17", [("t", []), ("T", ["LEFTSHIFT"])]), + (("u", "U"), "0x18", [("u", []), ("U", ["LEFTSHIFT"])]), + (("v", "V"), "0x19", [("v", []), ("V", ["LEFTSHIFT"])]), + (("w", "W"), "0x1a", [("w", []), ("W", ["LEFTSHIFT"])]), + (("x", "X"), "0x1b", [("x", []), ("X", ["LEFTSHIFT"])]), + (("y", "Y"), "0x1c", [("y", []), ("Y", ["LEFTSHIFT"])]), + (("z", "Z"), "0x1d", [("z", []), ("Z", ["LEFTSHIFT"])]), + (("1", "!"), "0x1e", [("1", []), ("!", ["LEFTSHIFT"])]), + (("2", "@"), "0x1f", [("2", []), ("@", ["LEFTSHIFT"])]), + (("3", "#"), "0x20", [("3", []), ("#", ["LEFTSHIFT"])]), + (("4", "$"), "0x21", [("4", []), ("$", ["LEFTSHIFT"])]), + (("5", "%"), "0x22", [("5", []), ("%", ["LEFTSHIFT"])]), + (("6", "^"), "0x23", [("6", []), ("^", ["LEFTSHIFT"])]), + (("7", "&"), "0x24", [("7", []), ("&", ["LEFTSHIFT"])]), + (("8", "*"), "0x25", [("8", []), ("*", ["LEFTSHIFT"])]), + (("9", "("), "0x26", [("9", []), ("(", ["LEFTSHIFT"])]), + (("0", ")"), "0x27", [("0", []), (")", ["LEFTSHIFT"])]), + (("-", "_"), "0x2d", [("-", []), ("_", ["LEFTSHIFT"])]), + (("=", "+"), "0x2e", [("=", []), ("+", ["LEFTSHIFT"])]), + (("[", "{"), "0x2f", [("[", []), ("{", ["LEFTSHIFT"])]), + (("]", "}"), "0x30", [("]", []), ("}", ["LEFTSHIFT"])]), + (("\\", "|"), "0x31", [("\\", []), ("|", ["LEFTSHIFT"])]), + ((";", ":"), "0x33", [(";", []), (":", ["LEFTSHIFT"])]), + (("'", '"'), "0x34", [("'", []), ('"', ["LEFTSHIFT"])]), + (("`", "~"), "0x35", [("`", []), ("~", ["LEFTSHIFT"])]), + ((",", "<"), "0x36", [(",", []), ("<", ["LEFTSHIFT"])]), + ((".", ">"), "0x37", [(".", []), (">", ["LEFTSHIFT"])]), + (("/", "?"), "0x38", [("/", []), ("?", ["LEFTSHIFT"])]), + ("ENTER", "0x28", [("", [])]), + ("ESC", "0x29", [("", [])]), + ("BACKSPACE", "0x2a", [("", [])]), + ("TAB", "0x2b", [("", [])]), + ("SPACE", "0x2c", [(" ", [])]), + ("CAPSLOCK", "0x39", [("", [])]), + ("F1", "0x3a", [("", [])]), + ("F2", "0x3b", [("", [])]), + ("F3", "0x3c", [("", [])]), + ("F4", "0x3d", [("", [])]), + ("F5", "0x3e", [("", [])]), + ("F6", "0x3f", [("", [])]), + ("F7", "0x40", [("", [])]), + ("F8", "0x41", [("", [])]), + ("F9", "0x42", [("", [])]), + ("F10", "0x43", [("", [])]), + ("F11", "0x44", [("", [])]), + ("F12", "0x45", [("", [])]), + ("DELETE", "0x4c", [("", [])]), + ("CTRL_ALT_DEL", "0x4c", [("", ["CTRL", "ALT"])]), + ("CTRL_C", "0x06", [("", ["CTRL"])]), + ("RIGHTARROW", "0x4f", [("", [])]), + ("LEFTARROW", "0x50", [("", [])]), + ("DOWNARROW", "0x51", [("", [])]), + ("UPARROW", "0x52", [("", [])]), ] @staticmethod @@ -254,8 +258,8 @@ def hid_to_hex(hid_code): return int(hid_code, 16) << 16 | 0o0007 def get_hid_from_key(self, key): - if key == ' ': - return '0x2c', [] + if key == " ": + return "0x2c", [] for keys_name, key_code, keys_value in self.keys_hid_code: if isinstance(keys_name, tuple): for keys in keys_value: @@ -293,8 +297,8 @@ def get_sendkey_facts(self, vm_obj, returned_value=0): if vm_obj is not None: sendkey_facts = dict( virtual_machine=vm_obj.name, - keys_send=self.params['keys_send'], - string_send=self.params['string_send'], + keys_send=self.params["keys_send"], + string_send=self.params["string_send"], keys_send_number=self.num_keys_send, returned_keys_send_number=returned_value, ) @@ -304,27 +308,37 @@ def get_sendkey_facts(self, vm_obj, returned_value=0): def send_key_to_vm(self, vm_obj): key_event = None num_keys_returned = 0 - if self.params['keys_send']: - for specified_key in self.params['keys_send']: + if self.params["keys_send"]: + for specified_key in self.params["keys_send"]: key_found = False for keys in self.keys_hid_code: - if (isinstance(keys[0], tuple) and specified_key in keys[0]) or \ - (not isinstance(keys[0], tuple) and specified_key == keys[0]): - hid_code, modifiers = self.get_hid_from_key(specified_key) + if ( + isinstance(keys[0], tuple) and specified_key in keys[0] + ) or ( + not isinstance(keys[0], tuple) + and specified_key == keys[0] + ): + hid_code, modifiers = self.get_hid_from_key( + specified_key + ) key_event = self.get_key_event(hid_code, modifiers) self.usb_scan_code_spec.keyEvents.append(key_event) self.num_keys_send += 1 key_found = True break if not key_found: - self.module.fail_json(msg="keys_send parameter: '%s' in %s not supported." - % (specified_key, self.params['keys_send'])) + self.module.fail_json( + msg="keys_send parameter: '%s' in %s not supported." + % (specified_key, self.params["keys_send"]) + ) - if self.params['string_send']: - for char in self.params['string_send']: + if self.params["string_send"]: + for char in self.params["string_send"]: key_found = False for keys in self.keys_hid_code: - if (isinstance(keys[0], tuple) and char in keys[0]) or char == ' ': + if ( + isinstance(keys[0], tuple) and char in keys[0] + ) or char == " ": hid_code, modifiers = self.get_hid_from_key(char) key_event = self.get_key_event(hid_code, modifiers) self.usb_scan_code_spec.keyEvents.append(key_event) @@ -332,21 +346,36 @@ def send_key_to_vm(self, vm_obj): key_found = True break if not key_found: - self.module.fail_json(msg="string_send parameter: '%s' contains char: '%s' not supported." - % (self.params['string_send'], char)) + self.module.fail_json( + msg="string_send parameter: '%s' contains char: '%s' not supported." + % (self.params["string_send"], char) + ) if self.usb_scan_code_spec.keyEvents: try: - num_keys_returned = vm_obj.PutUsbScanCodes(self.usb_scan_code_spec) + num_keys_returned = vm_obj.PutUsbScanCodes( + self.usb_scan_code_spec + ) self.change_detected = True except vmodl.RuntimeFault as e: - self.module.fail_json(msg="Failed to send key %s to virtual machine due to %s" % (key_event, to_native(e.msg))) + self.module.fail_json( + msg="Failed to send key %s to virtual machine due to %s" + % (key_event, to_native(e.msg)) + ) sendkey_facts = self.get_sendkey_facts(vm_obj, num_keys_returned) if num_keys_returned != self.num_keys_send: - results = {'changed': self.change_detected, 'failed': True, 'sendkey_info': sendkey_facts} + results = { + "changed": self.change_detected, + "failed": True, + "sendkey_info": sendkey_facts, + } else: - results = {'changed': self.change_detected, 'failed': False, 'sendkey_info': sendkey_facts} + results = { + "changed": self.change_detected, + "failed": False, + "sendkey_info": sendkey_facts, + } return results @@ -354,36 +383,39 @@ def send_key_to_vm(self, vm_obj): def main(): argument_spec = vmware_argument_spec() argument_spec.update( - name=dict(type='str'), - uuid=dict(type='str'), - moid=dict(type='str'), - folder=dict(type='str'), - datacenter=dict(type='str'), - esxi_hostname=dict(type='str'), - cluster=dict(type='str'), - keys_send=dict(type='list', default=[]), - string_send=dict(type='str') + name=dict(type="str"), + uuid=dict(type="str"), + moid=dict(type="str"), + folder=dict(type="str"), + datacenter=dict(type="str"), + esxi_hostname=dict(type="str"), + cluster=dict(type="str"), + keys_send=dict(type="list", default=[]), + string_send=dict(type="str"), ) module = AnsibleModule( - argument_spec=argument_spec, - required_one_of=[ - ['name', 'uuid', 'moid'] - ] + argument_spec=argument_spec, required_one_of=[["name", "uuid", "moid"]] ) pyv = PyVmomiHelper(module) vm = pyv.get_vm() if not vm: - vm_id = (module.params.get('uuid') or module.params.get('name') or module.params.get('moid')) - module.fail_json(msg='Unable to find the specified virtual machine : %s ' % vm_id) + vm_id = ( + module.params.get("uuid") + or module.params.get("name") + or module.params.get("moid") + ) + module.fail_json( + msg="Unable to find the specified virtual machine : %s " % vm_id + ) result = pyv.send_key_to_vm(vm) - if result['failed']: + if result["failed"]: module.fail_json(**result) else: module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_guest_serial_port.py b/plugins/modules/vmware_guest_serial_port.py index 7afd899..46a6e6f 100644 --- a/plugins/modules/vmware_guest_serial_port.py +++ b/plugins/modules/vmware_guest_serial_port.py @@ -5,13 +5,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} +ANSIBLE_METADATA = { + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", +} -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_guest_serial_port @@ -88,9 +91,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = ''' +EXAMPLES = """ # Create serial ports - name: Create multiple serial ports with Backing type - network, pipe, device and file vmware_guest_serial_port: @@ -140,9 +143,9 @@ state: 'absent' delegate_to: localhost -''' +""" -RETURN = r''' +RETURN = r""" serial_port_data: description: metadata about the virtual machine's serial ports after managing them returned: always @@ -159,11 +162,16 @@ "pipe_name": "serial pipe" }, ] -''' +""" from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec, wait_for_task +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + vmware_argument_spec, + wait_for_task, +) from ansible.module_utils._text import to_native + try: from pyVmomi import vim except ImportError: @@ -175,7 +183,9 @@ class PyVmomiHelper(PyVmomi): def __init__(self, module): super(PyVmomiHelper, self).__init__(module) - self.change_applied = False # a change was applied meaning at least one task succeeded + self.change_applied = ( + False + ) # a change was applied meaning at least one task succeeded self.config_spec = vim.vm.ConfigSpec() self.config_spec.deviceChange = [] self.serial_ports = [] @@ -191,11 +201,18 @@ def check_vm_state(self, vm_obj): - True if vm is in poweredOff state - module fails otherwise """ - if vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOff: + if ( + vm_obj.runtime.powerState + == vim.VirtualMachinePowerState.poweredOff + ): return True else: - self.module.fail_json(msg="A serial device cannot be added to a VM in the current state(" + vm_obj.runtime.powerState + ")." - + "Please use the vmware_guest_powerstate module to power off the VM") + self.module.fail_json( + msg="A serial device cannot be added to a VM in the current state(" + + vm_obj.runtime.powerState + + ")." + + "Please use the vmware_guest_powerstate module to power off the VM" + ) def get_serial_port_config_spec(self, vm_obj): """ @@ -204,33 +221,44 @@ def get_serial_port_config_spec(self, vm_obj): - self.change_applied """ # create serial config spec for adding, editing, removing - for backing in self.params.get('backings'): + for backing in self.params.get("backings"): backing_keys = backing.keys() serial_port = get_serial_port(vm_obj, backing) - if serial_port is None and 'state' not in backing_keys: + if serial_port is None and "state" not in backing_keys: # if serial port is None and state is not mentioned # create a new serial port serial_port_spec = self.create_serial_port(backing) - serial_port_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add + serial_port_spec.operation = ( + vim.vm.device.VirtualDeviceSpec.Operation.add + ) self.serial_ports.append(serial_port_spec) self.change_applied = True else: - if serial_port is not None and 'state' in backing_keys: + if serial_port is not None and "state" in backing_keys: serial_spec = vim.vm.device.VirtualDeviceSpec() serial_spec.device = serial_port - if backing['state'].lower() == 'present': + if backing["state"].lower() == "present": # modify existing serial port - serial_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit - serial_spec.device.backing = self.get_backing_info(serial_port, backing, backing['type']) + serial_spec.operation = ( + vim.vm.device.VirtualDeviceSpec.Operation.edit + ) + serial_spec.device.backing = self.get_backing_info( + serial_port, backing, backing["type"] + ) self.change_applied = True self.config_spec.deviceChange.append(serial_spec) - elif backing['state'].lower() == 'absent': + elif backing["state"].lower() == "absent": # remove serial port - serial_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove + serial_spec.operation = ( + vim.vm.device.VirtualDeviceSpec.Operation.remove + ) self.change_applied = True self.config_spec.deviceChange.append(serial_spec) else: - self.module.fail_json(msg='Unable to find the specified serial port: %s' % backing) + self.module.fail_json( + msg="Unable to find the specified serial port: %s" + % backing + ) def reconfigure_vm_serial_port(self, vm_obj): """ @@ -251,14 +279,28 @@ def reconfigure_vm_serial_port(self, vm_obj): task = vm_obj.ReconfigVM_Task(spec=self.config_spec) wait_for_task(task) except vim.fault.InvalidDatastorePath as e: - self.module.fail_json(msg="Failed to configure serial port on given virtual machine due to invalid path: %s" % to_native(e.msg)) + self.module.fail_json( + msg="Failed to configure serial port on given virtual machine due to invalid path: %s" + % to_native(e.msg) + ) except vim.fault.RestrictedVersion as e: - self.module.fail_json(msg="Failed to reconfigure virtual machine due to product versioning restrictions: %s" % to_native(e.msg)) - if task.info.state == 'error': - results = {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg} + self.module.fail_json( + msg="Failed to reconfigure virtual machine due to product versioning restrictions: %s" + % to_native(e.msg) + ) + if task.info.state == "error": + results = { + "changed": self.change_applied, + "failed": True, + "msg": task.info.error.msg, + } else: serial_port_info = get_serial_port_info(vm_obj) - results = {'changed': self.change_applied, 'failed': False, 'serial_port_info': serial_port_info} + results = { + "changed": self.change_applied, + "failed": False, + "serial_port_info": serial_port_info, + } return results @@ -266,58 +308,66 @@ def set_network_backing(self, serial_port, backing_info): """ Set the networking backing params """ - required_params = ['service_uri', 'direction'] + required_params = ["service_uri", "direction"] if set(required_params).issubset(backing_info.keys()): backing = serial_port.URIBackingInfo() - backing.serviceURI = backing_info['service_uri'] - backing.direction = backing_info['direction'] + backing.serviceURI = backing_info["service_uri"] + backing.direction = backing_info["direction"] else: - self.module.fail_json(msg="Failed to create a new serial port of network backing type due to insufficient parameters." - + "The required parameters are service_uri and direction") + self.module.fail_json( + msg="Failed to create a new serial port of network backing type due to insufficient parameters." + + "The required parameters are service_uri and direction" + ) return backing def set_pipe_backing(self, serial_port, backing_info): """ Set the pipe backing params """ - required_params = ['pipe_name', 'endpoint'] + required_params = ["pipe_name", "endpoint"] if set(required_params).issubset(backing_info.keys()): backing = serial_port.PipeBackingInfo() - backing.pipeName = backing_info['pipe_name'] - backing.endpoint = backing_info['endpoint'] + backing.pipeName = backing_info["pipe_name"] + backing.endpoint = backing_info["endpoint"] else: - self.module.fail_json(msg="Failed to create a new serial port of pipe backing type due to insufficient parameters." - + "The required parameters are pipe_name and endpoint") + self.module.fail_json( + msg="Failed to create a new serial port of pipe backing type due to insufficient parameters." + + "The required parameters are pipe_name and endpoint" + ) # since no_rx_loss is an optional argument, so check if the key is present - if 'no_rx_loss' in backing_info.keys() and backing_info['no_rx_loss']: - backing.noRxLoss = backing_info['no_rx_loss'] + if "no_rx_loss" in backing_info.keys() and backing_info["no_rx_loss"]: + backing.noRxLoss = backing_info["no_rx_loss"] return backing def set_device_backing(self, serial_port, backing_info): """ Set the device backing params """ - required_params = ['device_name'] + required_params = ["device_name"] if set(required_params).issubset(backing_info.keys()): backing = serial_port.DeviceBackingInfo() - backing.deviceName = backing_info['device_name'] + backing.deviceName = backing_info["device_name"] else: - self.module.fail_json(msg="Failed to create a new serial port of device backing type due to insufficient parameters." - + "The required parameters are device_name") + self.module.fail_json( + msg="Failed to create a new serial port of device backing type due to insufficient parameters." + + "The required parameters are device_name" + ) return backing def set_file_backing(self, serial_port, backing_info): """ Set the file backing params """ - required_params = ['file_path'] + required_params = ["file_path"] if set(required_params).issubset(backing_info.keys()): backing = serial_port.FileBackingInfo() - backing.fileName = backing_info['file_path'] + backing.fileName = backing_info["file_path"] else: - self.module.fail_json(msg="Failed to create a new serial port of file backing type due to insufficient parameters." - + "The required parameters are file_path") + self.module.fail_json( + msg="Failed to create a new serial port of file backing type due to insufficient parameters." + + "The required parameters are file_path" + ) return backing def get_backing_info(self, serial_port, backing, backing_type): @@ -328,7 +378,7 @@ def get_backing_info(self, serial_port, backing, backing_type): "network": self.set_network_backing, "pipe": self.set_pipe_backing, "device": self.set_device_backing, - "file": self.set_file_backing + "file": self.set_file_backing, } backing_func = switcher.get(backing_type, "Invalid Backing Info") return backing_func(serial_port, backing) @@ -339,8 +389,14 @@ def create_serial_port(self, backing): """ serial_spec = vim.vm.device.VirtualDeviceSpec() serial_port = vim.vm.device.VirtualSerialPort() - serial_port.yieldOnPoll = backing['yield_on_poll'] if 'yield_on_poll' in backing.keys() else True - serial_port.backing = self.get_backing_info(serial_port, backing, backing['type']) + serial_port.yieldOnPoll = ( + backing["yield_on_poll"] + if "yield_on_poll" in backing.keys() + else True + ) + serial_port.backing = self.get_backing_info( + serial_port, backing, backing["type"] + ) serial_spec.device = serial_port return serial_spec @@ -351,33 +407,35 @@ def get_serial_port(vm_obj, backing): """ serial_port = None backing_type_mapping = { - 'network': vim.vm.device.VirtualSerialPort.URIBackingInfo, - 'pipe': vim.vm.device.VirtualSerialPort.PipeBackingInfo, - 'device': vim.vm.device.VirtualSerialPort.DeviceBackingInfo, - 'file': vim.vm.device.VirtualSerialPort.FileBackingInfo + "network": vim.vm.device.VirtualSerialPort.URIBackingInfo, + "pipe": vim.vm.device.VirtualSerialPort.PipeBackingInfo, + "device": vim.vm.device.VirtualSerialPort.DeviceBackingInfo, + "file": vim.vm.device.VirtualSerialPort.FileBackingInfo, } valid_params = backing.keys() for device in vm_obj.config.hardware.device: if isinstance(device, vim.vm.device.VirtualSerialPort): - if isinstance(device.backing, backing_type_mapping[backing['type']]): - if 'service_uri' in valid_params: + if isinstance( + device.backing, backing_type_mapping[backing["type"]] + ): + if "service_uri" in valid_params: # network backing type - if device.backing.serviceURI == backing['service_uri']: + if device.backing.serviceURI == backing["service_uri"]: serial_port = device break - elif 'pipe_name' in valid_params: + elif "pipe_name" in valid_params: # named pipe backing type - if device.backing.pipeName == backing['pipe_name']: + if device.backing.pipeName == backing["pipe_name"]: serial_port = device break - elif 'device_name' in valid_params: + elif "device_name" in valid_params: # physical serial device backing type - if device.backing.deviceName == backing['device_name']: + if device.backing.deviceName == backing["device_name"]: serial_port = device break - elif 'file_path' in valid_params: + elif "file_path" in valid_params: # file backing type - if device.backing.fileName == backing['file_path']: + if device.backing.fileName == backing["file_path"]: serial_port = device break # if there is a backing of only one type, user need not provide secondary details like service_uri, pipe_name, device_name or file_path @@ -397,21 +455,29 @@ def get_serial_port_info(vm_obj): for port in vm_obj.config.hardware.device: backing = dict() if isinstance(port, vim.vm.device.VirtualSerialPort): - if isinstance(port.backing, vim.vm.device.VirtualSerialPort.URIBackingInfo): - backing['backing_type'] = 'network' - backing['direction'] = port.backing.direction - backing['service_uri'] = port.backing.serviceURI - elif isinstance(port.backing, vim.vm.device.VirtualSerialPort.PipeBackingInfo): - backing['backing_type'] = 'pipe' - backing['pipe_name'] = port.backing.pipeName - backing['endpoint'] = port.backing.endpoint - backing['no_rx_loss'] = port.backing.noRxLoss - elif isinstance(port.backing, vim.vm.device.VirtualSerialPort.DeviceBackingInfo): - backing['backing_type'] = 'device' - backing['device_name'] = port.backing.deviceName - elif isinstance(port.backing, vim.vm.device.VirtualSerialPort.FileBackingInfo): - backing['backing_type'] = 'file' - backing['file_path'] = port.backing.fileName + if isinstance( + port.backing, vim.vm.device.VirtualSerialPort.URIBackingInfo + ): + backing["backing_type"] = "network" + backing["direction"] = port.backing.direction + backing["service_uri"] = port.backing.serviceURI + elif isinstance( + port.backing, vim.vm.device.VirtualSerialPort.PipeBackingInfo + ): + backing["backing_type"] = "pipe" + backing["pipe_name"] = port.backing.pipeName + backing["endpoint"] = port.backing.endpoint + backing["no_rx_loss"] = port.backing.noRxLoss + elif isinstance( + port.backing, vim.vm.device.VirtualSerialPort.DeviceBackingInfo + ): + backing["backing_type"] = "device" + backing["device_name"] = port.backing.deviceName + elif isinstance( + port.backing, vim.vm.device.VirtualSerialPort.FileBackingInfo + ): + backing["backing_type"] = "file" + backing["file_path"] = port.backing.fileName else: continue serial_port_info.append(backing) @@ -424,23 +490,19 @@ def main(): """ argument_spec = vmware_argument_spec() argument_spec.update( - name=dict(type='str'), - uuid=dict(type='str'), - moid=dict(type='str'), - use_instance_uuid=dict(type='bool', default=False), - backings=dict(type='list', default=[]) + name=dict(type="str"), + uuid=dict(type="str"), + moid=dict(type="str"), + use_instance_uuid=dict(type="bool", default=False), + backings=dict(type="list", default=[]), ) module = AnsibleModule( argument_spec=argument_spec, - required_one_of=[ - ['name', 'uuid', 'moid'] - ], - mutually_exclusive=[ - ['name', 'uuid', 'moid'] - ], + required_one_of=[["name", "uuid", "moid"]], + mutually_exclusive=[["name", "uuid", "moid"]], ) - result = {'failed': False, 'changed': False} + result = {"failed": False, "changed": False} pyv = PyVmomiHelper(module) # Check if the VM exists before continuing @@ -454,15 +516,21 @@ def main(): else: # We are unable to find the virtual machine user specified # Bail out - vm_id = (module.params.get('name') or module.params.get('uuid') or module.params.get('vm_id')) - module.fail_json(msg="Unable to manage serial ports for non-existing" - " virtual machine '%s'." % vm_id) - - if result['failed']: + vm_id = ( + module.params.get("name") + or module.params.get("uuid") + or module.params.get("vm_id") + ) + module.fail_json( + msg="Unable to manage serial ports for non-existing" + " virtual machine '%s'." % vm_id + ) + + if result["failed"]: module.fail_json(**result) else: module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_guest_snapshot.py b/plugins/modules/vmware_guest_snapshot.py index 7512980..b943aaf 100644 --- a/plugins/modules/vmware_guest_snapshot.py +++ b/plugins/modules/vmware_guest_snapshot.py @@ -5,13 +5,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} +ANSIBLE_METADATA = { + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", +} -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_guest_snapshot short_description: Manages virtual machines snapshots in vCenter @@ -132,9 +135,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Create a snapshot vmware_guest_snapshot: hostname: "{{ vcenter_hostname }}" @@ -234,7 +237,7 @@ new_snapshot_name: im_renamed new_description: "{{ new_snapshot_description }}" delegate_to: localhost -''' +""" RETURN = """ snapshot_results: @@ -269,6 +272,7 @@ """ import time + try: from pyVmomi import vim except ImportError: @@ -276,7 +280,11 @@ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_native -from ansible_collections.vmware.general.plugins.module_utils.vmware import PyVmomi, list_snapshots, vmware_argument_spec +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + list_snapshots, + vmware_argument_spec, +) class PyVmomiHelper(PyVmomi): @@ -288,7 +296,7 @@ def wait_for_task(task): # https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.Task.html # https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.TaskInfo.html # https://github.com/virtdevninja/pyvmomi-community-samples/blob/master/samples/tools/tasks.py - while task.info.state not in ['success', 'error']: + while task.info.state not in ["success", "error"]: time.sleep(1) def get_snapshots_by_name_recursively(self, snapshots, snapname): @@ -297,7 +305,9 @@ def get_snapshots_by_name_recursively(self, snapshots, snapname): if snapshot.name == snapname: snap_obj.append(snapshot) else: - snap_obj = snap_obj + self.get_snapshots_by_name_recursively(snapshot.childSnapshotList, snapname) + snap_obj = snap_obj + self.get_snapshots_by_name_recursively( + snapshot.childSnapshotList, snapname + ) return snap_obj def snapshot_vm(self, vm): @@ -305,89 +315,140 @@ def snapshot_vm(self, vm): quiesce = False # Check if there is a latest snapshot already present as specified by user if vm.snapshot is not None: - snap_obj = self.get_snapshots_by_name_recursively(vm.snapshot.rootSnapshotList, - self.module.params["snapshot_name"]) + snap_obj = self.get_snapshots_by_name_recursively( + vm.snapshot.rootSnapshotList, + self.module.params["snapshot_name"], + ) if snap_obj: # Snapshot already exists, do not anything. - self.module.exit_json(changed=False, - msg="Snapshot named [%(snapshot_name)s] already exists and is current." % self.module.params) + self.module.exit_json( + changed=False, + msg="Snapshot named [%(snapshot_name)s] already exists and is current." + % self.module.params, + ) # Check if Virtual Machine provides capabilities for Quiesce and Memory Snapshots if vm.capability.quiescedSnapshotsSupported: - quiesce = self.module.params['quiesce'] + quiesce = self.module.params["quiesce"] if vm.capability.memorySnapshotsSupported: - memory_dump = self.module.params['memory_dump'] + memory_dump = self.module.params["memory_dump"] task = None try: - task = vm.CreateSnapshot(self.module.params["snapshot_name"], - self.module.params["description"], - memory_dump, - quiesce) + task = vm.CreateSnapshot( + self.module.params["snapshot_name"], + self.module.params["description"], + memory_dump, + quiesce, + ) except vim.fault.RestrictedVersion as exc: - self.module.fail_json(msg="Failed to take snapshot due to VMware Licence" - " restriction : %s" % to_native(exc.msg)) + self.module.fail_json( + msg="Failed to take snapshot due to VMware Licence" + " restriction : %s" % to_native(exc.msg) + ) except Exception as exc: - self.module.fail_json(msg="Failed to create snapshot of virtual machine" - " %s due to %s" % (self.module.params['name'], to_native(exc))) + self.module.fail_json( + msg="Failed to create snapshot of virtual machine" + " %s due to %s" % (self.module.params["name"], to_native(exc)) + ) return task def rename_snapshot(self, vm): if vm.snapshot is None: - vm_id = self.module.params.get('uuid') or self.module.params.get('name') or self.params.get('moid') - self.module.fail_json(msg="virtual machine - %s doesn't have any snapshots" % vm_id) - - snap_obj = self.get_snapshots_by_name_recursively(vm.snapshot.rootSnapshotList, - self.module.params["snapshot_name"]) + vm_id = ( + self.module.params.get("uuid") + or self.module.params.get("name") + or self.params.get("moid") + ) + self.module.fail_json( + msg="virtual machine - %s doesn't have any snapshots" % vm_id + ) + + snap_obj = self.get_snapshots_by_name_recursively( + vm.snapshot.rootSnapshotList, self.module.params["snapshot_name"] + ) task = None if len(snap_obj) == 1: snap_obj = snap_obj[0].snapshot - if self.module.params["new_snapshot_name"] and self.module.params["new_description"]: - task = snap_obj.RenameSnapshot(name=self.module.params["new_snapshot_name"], - description=self.module.params["new_description"]) + if ( + self.module.params["new_snapshot_name"] + and self.module.params["new_description"] + ): + task = snap_obj.RenameSnapshot( + name=self.module.params["new_snapshot_name"], + description=self.module.params["new_description"], + ) elif self.module.params["new_snapshot_name"]: - task = snap_obj.RenameSnapshot(name=self.module.params["new_snapshot_name"]) + task = snap_obj.RenameSnapshot( + name=self.module.params["new_snapshot_name"] + ) else: - task = snap_obj.RenameSnapshot(description=self.module.params["new_description"]) + task = snap_obj.RenameSnapshot( + description=self.module.params["new_description"] + ) else: - vm_id = self.module.params.get('uuid') or self.module.params.get('name') or self.params.get('moid') + vm_id = ( + self.module.params.get("uuid") + or self.module.params.get("name") + or self.params.get("moid") + ) self.module.exit_json( - msg="Couldn't find any snapshots with specified name: %s on VM: %s" % - (self.module.params["snapshot_name"], vm_id)) + msg="Couldn't find any snapshots with specified name: %s on VM: %s" + % (self.module.params["snapshot_name"], vm_id) + ) return task def remove_or_revert_snapshot(self, vm): if vm.snapshot is None: - vm_name = (self.module.params.get('uuid') or self.module.params.get('name')) - if self.module.params.get('state') == 'revert': - self.module.fail_json(msg="virtual machine - %s does not" - " have any snapshots to revert to." % vm_name) - self.module.exit_json(msg="virtual machine - %s doesn't have any" - " snapshots to remove." % vm_name) - - snap_obj = self.get_snapshots_by_name_recursively(vm.snapshot.rootSnapshotList, - self.module.params["snapshot_name"]) + vm_name = self.module.params.get("uuid") or self.module.params.get( + "name" + ) + if self.module.params.get("state") == "revert": + self.module.fail_json( + msg="virtual machine - %s does not" + " have any snapshots to revert to." % vm_name + ) + self.module.exit_json( + msg="virtual machine - %s doesn't have any" + " snapshots to remove." % vm_name + ) + + snap_obj = self.get_snapshots_by_name_recursively( + vm.snapshot.rootSnapshotList, self.module.params["snapshot_name"] + ) task = None if len(snap_obj) == 1: snap_obj = snap_obj[0].snapshot if self.module.params["state"] == "absent": # Remove subtree depending upon the user input - remove_children = self.module.params.get('remove_children', False) + remove_children = self.module.params.get( + "remove_children", False + ) task = snap_obj.RemoveSnapshot_Task(remove_children) elif self.module.params["state"] == "revert": task = snap_obj.RevertToSnapshot_Task() else: - vm_id = self.module.params.get('uuid') or self.module.params.get('name') or self.params.get('moid') - self.module.exit_json(msg="Couldn't find any snapshots with" - " specified name: %s on VM: %s" % (self.module.params["snapshot_name"], vm_id)) + vm_id = ( + self.module.params.get("uuid") + or self.module.params.get("name") + or self.params.get("moid") + ) + self.module.exit_json( + msg="Couldn't find any snapshots with" + " specified name: %s on VM: %s" + % (self.module.params["snapshot_name"], vm_id) + ) return task def apply_snapshot_op(self, vm): result = {} if self.module.params["state"] == "present": - if self.module.params["new_snapshot_name"] or self.module.params["new_description"]: + if ( + self.module.params["new_snapshot_name"] + or self.module.params["new_description"] + ): self.rename_snapshot(vm) - result = {'changed': True, 'failed': False, 'renamed': True} + result = {"changed": True, "failed": False, "renamed": True} task = None else: task = self.snapshot_vm(vm) @@ -401,10 +462,18 @@ def apply_snapshot_op(self, vm): if task: self.wait_for_task(task) - if task.info.state == 'error': - result = {'changed': False, 'failed': True, 'msg': task.info.error.msg} + if task.info.state == "error": + result = { + "changed": False, + "failed": True, + "msg": task.info.error.msg, + } else: - result = {'changed': True, 'failed': False, 'snapshot_results': list_snapshots(vm)} + result = { + "changed": True, + "failed": False, + "snapshot_results": list_snapshots(vm), + } return result @@ -412,58 +481,71 @@ def apply_snapshot_op(self, vm): def main(): argument_spec = vmware_argument_spec() argument_spec.update( - state=dict(default='present', choices=['present', 'absent', 'revert', 'remove_all']), - name=dict(type='str'), - name_match=dict(type='str', choices=['first', 'last'], default='first'), - uuid=dict(type='str'), - moid=dict(type='str'), - use_instance_uuid=dict(type='bool', default=False), - folder=dict(type='str'), - datacenter=dict(required=True, type='str'), - snapshot_name=dict(type='str'), - description=dict(type='str', default=''), - quiesce=dict(type='bool', default=False), - memory_dump=dict(type='bool', default=False), - remove_children=dict(type='bool', default=False), - new_snapshot_name=dict(type='str'), - new_description=dict(type='str'), + state=dict( + default="present", + choices=["present", "absent", "revert", "remove_all"], + ), + name=dict(type="str"), + name_match=dict( + type="str", choices=["first", "last"], default="first" + ), + uuid=dict(type="str"), + moid=dict(type="str"), + use_instance_uuid=dict(type="bool", default=False), + folder=dict(type="str"), + datacenter=dict(required=True, type="str"), + snapshot_name=dict(type="str"), + description=dict(type="str", default=""), + quiesce=dict(type="bool", default=False), + memory_dump=dict(type="bool", default=False), + remove_children=dict(type="bool", default=False), + new_snapshot_name=dict(type="str"), + new_description=dict(type="str"), ) module = AnsibleModule( argument_spec=argument_spec, - required_together=[ - ['name', 'folder'] - ], - required_one_of=[ - ['name', 'uuid', 'moid'] - ], + required_together=[["name", "folder"]], + required_one_of=[["name", "uuid", "moid"]], ) - if module.params['folder']: + if module.params["folder"]: # FindByInventoryPath() does not require an absolute path # so we should leave the input folder path unmodified - module.params['folder'] = module.params['folder'].rstrip('/') + module.params["folder"] = module.params["folder"].rstrip("/") pyv = PyVmomiHelper(module) # Check if the VM exists before continuing vm = pyv.get_vm() if not vm: - vm_id = (module.params.get('uuid') or module.params.get('name') or module.params.get('moid')) - module.fail_json(msg="Unable to manage snapshots for non-existing VM %s" % vm_id) - - if not module.params['snapshot_name'] and module.params['state'] != 'remove_all': - module.fail_json(msg="snapshot_name param is required when state is '%(state)s'" % module.params) + vm_id = ( + module.params.get("uuid") + or module.params.get("name") + or module.params.get("moid") + ) + module.fail_json( + msg="Unable to manage snapshots for non-existing VM %s" % vm_id + ) + + if ( + not module.params["snapshot_name"] + and module.params["state"] != "remove_all" + ): + module.fail_json( + msg="snapshot_name param is required when state is '%(state)s'" + % module.params + ) result = pyv.apply_snapshot_op(vm) - if 'failed' not in result: - result['failed'] = False + if "failed" not in result: + result["failed"] = False - if result['failed']: + if result["failed"]: module.fail_json(**result) else: module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_guest_snapshot_info.py b/plugins/modules/vmware_guest_snapshot_info.py index fc12d63..6702c90 100644 --- a/plugins/modules/vmware_guest_snapshot_info.py +++ b/plugins/modules/vmware_guest_snapshot_info.py @@ -6,15 +6,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_guest_snapshot_info short_description: Gather info about virtual machine's snapshots in vCenter @@ -74,9 +75,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Gather snapshot information about the virtual machine in the given vCenter vmware_guest_snapshot_info: hostname: "{{ vcenter_hostname }}" @@ -96,7 +97,7 @@ moid: vm-42 delegate_to: localhost register: snapshot_info -''' +""" RETURN = """ guest_snapshots: @@ -124,7 +125,11 @@ """ from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import PyVmomi, list_snapshots, vmware_argument_spec +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + list_snapshots, + vmware_argument_spec, +) class PyVmomiHelper(PyVmomi): @@ -149,30 +154,29 @@ def gather_guest_snapshot_info(vm_obj=None): def main(): argument_spec = vmware_argument_spec() argument_spec.update( - name=dict(type='str'), - uuid=dict(type='str'), - moid=dict(type='str'), - use_instance_uuid=dict(type='bool', default=False), - folder=dict(type='str'), - datacenter=dict(required=True, type='str'), + name=dict(type="str"), + uuid=dict(type="str"), + moid=dict(type="str"), + use_instance_uuid=dict(type="bool", default=False), + folder=dict(type="str"), + datacenter=dict(required=True, type="str"), ) module = AnsibleModule( argument_spec=argument_spec, - required_together=[ - ['name', 'folder'] - ], - required_one_of=[ - ['name', 'uuid', 'moid'] - ], + required_together=[["name", "folder"]], + required_one_of=[["name", "uuid", "moid"]], supports_check_mode=True, ) - if module._name == 'vmware_guest_snapshot_facts': - module.deprecate("The 'vmware_guest_snapshot_facts' module has been renamed to 'vmware_guest_snapshot_info'", version='2.13') + if module._name == "vmware_guest_snapshot_facts": + module.deprecate( + "The 'vmware_guest_snapshot_facts' module has been renamed to 'vmware_guest_snapshot_info'", + version="2.13", + ) - if module.params['folder']: + if module.params["folder"]: # FindByInventoryPath() does not require an absolute path # so we should leave the input folder path unmodified - module.params['folder'] = module.params['folder'].rstrip('/') + module.params["folder"] = module.params["folder"].rstrip("/") pyv = PyVmomiHelper(module) # Check if the VM exists before continuing @@ -180,13 +184,22 @@ def main(): if not vm: # If UUID is set, get_vm select UUID, show error message accordingly. - vm_id = (module.params.get('uuid') or module.params.get('name') or module.params.get('moid')) - module.fail_json(msg="Unable to gather information about snapshots for" - " non-existing VM ['%s']" % vm_id) - - results = dict(changed=False, guest_snapshots=pyv.gather_guest_snapshot_info(vm_obj=vm)) + vm_id = ( + module.params.get("uuid") + or module.params.get("name") + or module.params.get("moid") + ) + module.fail_json( + msg="Unable to gather information about snapshots for" + " non-existing VM ['%s']" % vm_id + ) + + results = dict( + changed=False, + guest_snapshots=pyv.gather_guest_snapshot_info(vm_obj=vm), + ) module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_guest_tools_info.py b/plugins/modules/vmware_guest_tools_info.py index 864511e..be77b7b 100644 --- a/plugins/modules/vmware_guest_tools_info.py +++ b/plugins/modules/vmware_guest_tools_info.py @@ -5,16 +5,17 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_guest_tools_info short_description: Gather info about VMware tools installed in VM @@ -77,9 +78,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Gather VMware tools info installed in VM specified by uuid vmware_guest_tools_info: hostname: "{{ vcenter_hostname }}" @@ -99,7 +100,7 @@ name: "{{ vm_name }}" delegate_to: localhost register: vmtools_info -''' +""" RETURN = """ vmtools_info: @@ -133,16 +134,19 @@ pass from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + vmware_argument_spec, +) class PyVmomiHelper(PyVmomi): def __init__(self, module): super(PyVmomiHelper, self).__init__(module) - self.name = self.params['name'] - self.uuid = self.params['uuid'] - self.moid = self.params['moid'] - self.use_instance_uuid = self.params['use_instance_uuid'] + self.name = self.params["name"] + self.uuid = self.params["uuid"] + self.moid = self.params["moid"] + self.use_instance_uuid = self.params["use_instance_uuid"] def gather_vmtools_info(self): vmtools_info = dict( @@ -164,44 +168,49 @@ def gather_vmtools_info(self): vm_tools_last_install_count=self.current_vm_obj.config.tools.lastInstallInfo.counter, ) - return {'changed': False, 'failed': False, 'vmtools_info': vmtools_info} + return { + "changed": False, + "failed": False, + "vmtools_info": vmtools_info, + } def main(): argument_spec = vmware_argument_spec() argument_spec.update( - name=dict(type='str'), - uuid=dict(type='str'), - moid=dict(type='str'), - use_instance_uuid=dict(type='bool', default=False), + name=dict(type="str"), + uuid=dict(type="str"), + moid=dict(type="str"), + use_instance_uuid=dict(type="bool", default=False), name_match=dict( - choices=['first', 'last'], - default='first', - type='str' + choices=["first", "last"], default="first", type="str" ), - folder=dict(type='str'), - datacenter=dict(type='str'), + folder=dict(type="str"), + datacenter=dict(type="str"), ) module = AnsibleModule( argument_spec=argument_spec, - required_one_of=[ - ['name', 'uuid', 'moid'] - ], - mutually_exclusive=[ - ['name', 'uuid', 'moid'] - ], + required_one_of=[["name", "uuid", "moid"]], + mutually_exclusive=[["name", "uuid", "moid"]], supports_check_mode=True, ) pyv = PyVmomiHelper(module) vm = pyv.get_vm() if not vm: - vm_id = (module.params.get('uuid') or module.params.get('name') or module.params.get('moid')) - module.fail_json(msg='Unable to find the specified virtual machine using: %s' % vm_id) + vm_id = ( + module.params.get("uuid") + or module.params.get("name") + or module.params.get("moid") + ) + module.fail_json( + msg="Unable to find the specified virtual machine using: %s" + % vm_id + ) results = pyv.gather_vmtools_info() module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_guest_tools_upgrade.py b/plugins/modules/vmware_guest_tools_upgrade.py index 3585279..3a2cb61 100644 --- a/plugins/modules/vmware_guest_tools_upgrade.py +++ b/plugins/modules/vmware_guest_tools_upgrade.py @@ -4,17 +4,18 @@ # Copyright: (c) 2018, Mike Klebolt # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_guest_tools_upgrade short_description: Module to upgrade VMTools @@ -73,9 +74,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Upgrade VMware Tools using uuid vmware_guest_tools_upgrade: hostname: "{{ vcenter_hostname }}" @@ -93,13 +94,17 @@ datacenter: "{{ datacenter_name }}" moid: vm-42 delegate_to: localhost -''' +""" -RETURN = ''' # ''' +RETURN = """ # """ from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec, wait_for_task +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + vmware_argument_spec, + wait_for_task, +) from ansible.module_utils._text import to_native @@ -108,20 +113,18 @@ def __init__(self, module): super(PyVmomiHelper, self).__init__(module) def upgrade_tools(self, vm): - result = {'failed': False, 'changed': False, 'msg': ''} + result = {"failed": False, "changed": False, "msg": ""} # Exit if VMware tools is already up to date if vm.guest.toolsStatus == "toolsOk": result.update( - changed=False, - msg="VMware tools is already up to date", + changed=False, msg="VMware tools is already up to date" ) return result # Fail if VM is not powered on elif vm.summary.runtime.powerState != "poweredOn": result.update( - failed=True, - msg="VM must be powered on to upgrade tools", + failed=True, msg="VM must be powered on to upgrade tools" ) return result @@ -142,18 +145,20 @@ def upgrade_tools(self, vm): changed, err_msg = wait_for_task(task) result.update(changed=changed, msg=to_native(err_msg)) else: - result.update(msg='Guest Operating System is other than Linux and Windows.') + result.update( + msg="Guest Operating System is other than Linux and Windows." + ) return result except Exception as exc: result.update( failed=True, - msg='Error while upgrading VMware tools %s' % to_native(exc), + msg="Error while upgrading VMware tools %s" + % to_native(exc), ) return result else: result.update( - failed=True, - msg="VMware tools could not be upgraded", + failed=True, msg="VMware tools could not be upgraded" ) return result @@ -161,24 +166,23 @@ def upgrade_tools(self, vm): def main(): argument_spec = vmware_argument_spec() argument_spec.update( - name=dict(type='str'), - name_match=dict(type='str', choices=['first', 'last'], default='first'), - uuid=dict(type='str'), - moid=dict(type='str'), - folder=dict(type='str'), - datacenter=dict(type='str', required=True), + name=dict(type="str"), + name_match=dict( + type="str", choices=["first", "last"], default="first" + ), + uuid=dict(type="str"), + moid=dict(type="str"), + folder=dict(type="str"), + datacenter=dict(type="str", required=True), ) module = AnsibleModule( - argument_spec=argument_spec, - required_one_of=[ - ['name', 'uuid', 'moid'] - ] + argument_spec=argument_spec, required_one_of=[["name", "uuid", "moid"]] ) - if module.params['folder']: + if module.params["folder"]: # FindByInventoryPath() does not require an absolute path # so we should leave the input folder path unmodified - module.params['folder'] = module.params['folder'].rstrip('/') + module.params["folder"] = module.params["folder"].rstrip("/") pyv = PyVmomiHelper(module) # Check if the VM exists before continuing @@ -188,18 +192,22 @@ def main(): if vm: try: result = pyv.upgrade_tools(vm) - if result['changed']: - module.exit_json(changed=result['changed']) - elif result['failed']: - module.fail_json(msg=result['msg']) + if result["changed"]: + module.exit_json(changed=result["changed"]) + elif result["failed"]: + module.fail_json(msg=result["msg"]) else: - module.exit_json(msg=result['msg'], changed=result['changed']) + module.exit_json(msg=result["msg"], changed=result["changed"]) except Exception as exc: - module.fail_json(msg='Unknown error: %s' % to_native(exc)) + module.fail_json(msg="Unknown error: %s" % to_native(exc)) else: - vm_id = module.params.get('uuid') or module.params.get('name') or module.params.get('moid') - module.fail_json(msg='Unable to find VM %s' % vm_id) + vm_id = ( + module.params.get("uuid") + or module.params.get("name") + or module.params.get("moid") + ) + module.fail_json(msg="Unable to find VM %s" % vm_id) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_guest_tools_wait.py b/plugins/modules/vmware_guest_tools_wait.py index ea5d91e..8d2e5bf 100644 --- a/plugins/modules/vmware_guest_tools_wait.py +++ b/plugins/modules/vmware_guest_tools_wait.py @@ -4,16 +4,17 @@ # Copyright: (c) 2017, Philippe Dellaert # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_guest_tools_wait short_description: Wait for VMware tools to become available @@ -72,9 +73,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Wait for VMware tools to become available by UUID vmware_guest_facts: hostname: "{{ vcenter_hostname }}" @@ -118,7 +119,7 @@ folder: "/{{datacenter}}/vm" delegate_to: localhost register: facts -''' +""" RETURN = """ instance: @@ -132,7 +133,11 @@ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_native -from ansible_collections.vmware.general.plugins.module_utils.vmware import PyVmomi, gather_vm_facts, vmware_argument_spec +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + gather_vm_facts, + vmware_argument_spec, +) class PyVmomiHelper(PyVmomi): @@ -149,63 +154,76 @@ def wait_for_tools(self, vm, poll=100, sleep=5): while not tools_running and poll_num <= poll: newvm = self.get_vm() vm_facts = self.gather_facts(newvm) - if vm_facts['guest_tools_status'] == 'guestToolsRunning': + if vm_facts["guest_tools_status"] == "guestToolsRunning": tools_running = True else: time.sleep(sleep) poll_num += 1 if not tools_running: - return {'failed': True, 'msg': 'VMware tools either not present or not running after {0} seconds'.format((poll * sleep))} + return { + "failed": True, + "msg": "VMware tools either not present or not running after {0} seconds".format( + (poll * sleep) + ), + } changed = False if poll_num > 0: changed = True - return {'changed': changed, 'failed': False, 'instance': vm_facts} + return {"changed": changed, "failed": False, "instance": vm_facts} def main(): argument_spec = vmware_argument_spec() argument_spec.update( - name=dict(type='str'), - name_match=dict(type='str', default='first', choices=['first', 'last']), - folder=dict(type='str'), - uuid=dict(type='str'), - moid=dict(type='str'), - use_instance_uuid=dict(type='bool', default=False), + name=dict(type="str"), + name_match=dict( + type="str", default="first", choices=["first", "last"] + ), + folder=dict(type="str"), + uuid=dict(type="str"), + moid=dict(type="str"), + use_instance_uuid=dict(type="bool", default=False), ) module = AnsibleModule( - argument_spec=argument_spec, - required_one_of=[ - ['name', 'uuid', 'moid'] - ] + argument_spec=argument_spec, required_one_of=[["name", "uuid", "moid"]] ) - if module.params['folder']: + if module.params["folder"]: # FindByInventoryPath() does not require an absolute path # so we should leave the input folder path unmodified - module.params['folder'] = module.params['folder'].rstrip('/') + module.params["folder"] = module.params["folder"].rstrip("/") pyv = PyVmomiHelper(module) # Check if the VM exists before continuing vm = pyv.get_vm() if not vm: - vm_id = module.params.get('name') or module.params.get('uuid') or module.params.get('moid') - module.fail_json(msg="Unable to wait for VMware tools for non-existing VM '%s'." % vm_id) + vm_id = ( + module.params.get("name") + or module.params.get("uuid") + or module.params.get("moid") + ) + module.fail_json( + msg="Unable to wait for VMware tools for non-existing VM '%s'." + % vm_id + ) result = dict(changed=False) try: result = pyv.wait_for_tools(vm) except Exception as e: - module.fail_json(msg="Waiting for VMware tools failed with" - " exception: {0:s}".format(to_native(e))) + module.fail_json( + msg="Waiting for VMware tools failed with" + " exception: {0:s}".format(to_native(e)) + ) - if result['failed']: + if result["failed"]: module.fail_json(**result) else: module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_guest_video.py b/plugins/modules/vmware_guest_video.py index bdc0ca7..2a94229 100644 --- a/plugins/modules/vmware_guest_video.py +++ b/plugins/modules/vmware_guest_video.py @@ -5,15 +5,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_guest_video short_description: Modify video card configurations of specified virtual machine in given vCenter infrastructure @@ -104,9 +105,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Change video card settings of virtual machine vmware_guest_video: hostname: "{{ vcenter_hostname }}" @@ -142,7 +143,7 @@ memory_3D_mb: 512 delegate_to: localhost register: video_facts -''' +""" RETURN = """ video_status: @@ -166,7 +167,11 @@ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_native -from ansible_collections.vmware.general.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec, wait_for_task +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + vmware_argument_spec, + wait_for_task, +) class PyVmomiHelper(PyVmomi): @@ -214,82 +219,126 @@ def get_video_card_spec(self, vm_obj): video_card, video_card_facts = self.gather_video_card_facts(vm_obj) self.video_card_facts = video_card_facts if video_card is None: - self.module.fail_json(msg='Not get video card device of specified virtual machine.') + self.module.fail_json( + msg="Not get video card device of specified virtual machine." + ) video_spec = vim.vm.device.VirtualDeviceSpec() video_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit video_spec.device = video_card auto_detect = False enabled_3d = False - if self.params['gather_video_facts']: + if self.params["gather_video_facts"]: return None - if self.params['use_auto_detect'] is not None: - if video_card_facts['auto_detect'] and self.params['use_auto_detect']: + if self.params["use_auto_detect"] is not None: + if ( + video_card_facts["auto_detect"] + and self.params["use_auto_detect"] + ): auto_detect = True - elif not video_card_facts['auto_detect'] and self.params['use_auto_detect']: + elif ( + not video_card_facts["auto_detect"] + and self.params["use_auto_detect"] + ): video_spec.device.useAutoDetect = True self.change_detected = True auto_detect = True - elif video_card_facts['auto_detect'] and not self.params['use_auto_detect']: + elif ( + video_card_facts["auto_detect"] + and not self.params["use_auto_detect"] + ): video_spec.device.useAutoDetect = False self.change_detected = True else: - if video_card_facts['auto_detect']: + if video_card_facts["auto_detect"]: auto_detect = True # useAutoDetect set to False then display number and video memory config can be changed if not auto_detect: - if self.params['display_number'] is not None: - if self.params['display_number'] < 1: - self.module.fail_json(msg="display_number attribute valid value: 1-10.") - if self.params['display_number'] != video_card_facts['display_number']: - video_spec.device.numDisplays = self.params['display_number'] + if self.params["display_number"] is not None: + if self.params["display_number"] < 1: + self.module.fail_json( + msg="display_number attribute valid value: 1-10." + ) + if ( + self.params["display_number"] + != video_card_facts["display_number"] + ): + video_spec.device.numDisplays = self.params[ + "display_number" + ] self.change_detected = True - if self.params['video_memory_mb'] is not None: - if self.params['video_memory_mb'] < 1.172: - self.module.fail_json(msg="video_memory_mb attribute valid value: ESXi 6.7U1(1.172-256 MB)," - "ESXi 6.7/6.5/6.0(1.172-128 MB).") - if int(self.params['video_memory_mb'] * 1024) != video_card_facts['video_memory']: - video_spec.device.videoRamSizeInKB = int(self.params['video_memory_mb'] * 1024) + if self.params["video_memory_mb"] is not None: + if self.params["video_memory_mb"] < 1.172: + self.module.fail_json( + msg="video_memory_mb attribute valid value: ESXi 6.7U1(1.172-256 MB)," + "ESXi 6.7/6.5/6.0(1.172-128 MB)." + ) + if ( + int(self.params["video_memory_mb"] * 1024) + != video_card_facts["video_memory"] + ): + video_spec.device.videoRamSizeInKB = int( + self.params["video_memory_mb"] * 1024 + ) self.change_detected = True else: - if self.params['display_number'] is not None or self.params['video_memory_mb'] is not None: - self.module.fail_json(msg="display_number and video_memory_mb can not be changed if use_auto_detect is true.") + if ( + self.params["display_number"] is not None + or self.params["video_memory_mb"] is not None + ): + self.module.fail_json( + msg="display_number and video_memory_mb can not be changed if use_auto_detect is true." + ) # useAutoDetect value not control 3D config - if self.params['enable_3D'] is not None: - if self.params['enable_3D'] != video_card_facts['enable_3D_support']: - video_spec.device.enable3DSupport = self.params['enable_3D'] + if self.params["enable_3D"] is not None: + if ( + self.params["enable_3D"] + != video_card_facts["enable_3D_support"] + ): + video_spec.device.enable3DSupport = self.params["enable_3D"] self.change_detected = True - if self.params['enable_3D']: + if self.params["enable_3D"]: enabled_3d = True else: - if video_card_facts['enable_3D_support']: + if video_card_facts["enable_3D_support"]: enabled_3d = True else: - if video_card_facts['enable_3D_support']: + if video_card_facts["enable_3D_support"]: enabled_3d = True # 3D is enabled then 3D memory and renderer method can be set if enabled_3d: - if self.params['renderer_3D'] is not None: - renderer = self.params['renderer_3D'].lower() - if renderer not in ['automatic', 'software', 'hardware']: - self.module.fail_json(msg="renderer_3D attribute valid value: automatic, software, hardware.") - if renderer != video_card_facts['renderer_3D']: + if self.params["renderer_3D"] is not None: + renderer = self.params["renderer_3D"].lower() + if renderer not in ["automatic", "software", "hardware"]: + self.module.fail_json( + msg="renderer_3D attribute valid value: automatic, software, hardware." + ) + if renderer != video_card_facts["renderer_3D"]: video_spec.device.use3dRenderer = renderer self.change_detected = True - if self.params['memory_3D_mb'] is not None: - memory_3d = self.params['memory_3D_mb'] + if self.params["memory_3D_mb"] is not None: + memory_3d = self.params["memory_3D_mb"] if not self.is_power_of_2(memory_3d): - self.module.fail_json(msg="memory_3D_mb attribute should be an integer value and power of 2(32-2048).") + self.module.fail_json( + msg="memory_3D_mb attribute should be an integer value and power of 2(32-2048)." + ) else: if memory_3d < 32 or memory_3d > 2048: - self.module.fail_json(msg="memory_3D_mb attribute should be an integer value and power of 2(32-2048).") - if memory_3d * 1024 != video_card_facts['memory_3D']: + self.module.fail_json( + msg="memory_3D_mb attribute should be an integer value and power of 2(32-2048)." + ) + if memory_3d * 1024 != video_card_facts["memory_3D"]: video_spec.device.graphicsMemorySizeInKB = memory_3d * 1024 self.change_detected = True else: - if self.params['renderer_3D'] is not None or self.params['memory_3D_mb'] is not None: - self.module.fail_json(msg='3D renderer or 3D memory can not be configured if 3D is not enabled.') + if ( + self.params["renderer_3D"] is not None + or self.params["memory_3D_mb"] is not None + ): + self.module.fail_json( + msg="3D renderer or 3D memory can not be configured if 3D is not enabled." + ) if not self.change_detected: return None return video_spec @@ -303,64 +352,87 @@ def reconfigure_vm_video(self, vm_obj): """ video_card_spec = self.get_video_card_spec(vm_obj) if video_card_spec is None: - return {'changed': False, 'failed': False, 'instance': self.video_card_facts} + return { + "changed": False, + "failed": False, + "instance": self.video_card_facts, + } self.config_spec.deviceChange.append(video_card_spec) try: task = vm_obj.ReconfigVM_Task(spec=self.config_spec) wait_for_task(task) except vim.fault.InvalidDeviceSpec as invalid_device_spec: - self.module.fail_json(msg="Failed to configure video card on given virtual machine due to invalid" - " device spec : %s" % (to_native(invalid_device_spec.msg)), - details="Please check ESXi server logs for more details.") + self.module.fail_json( + msg="Failed to configure video card on given virtual machine due to invalid" + " device spec : %s" % (to_native(invalid_device_spec.msg)), + details="Please check ESXi server logs for more details.", + ) except vim.fault.RestrictedVersion as e: - self.module.fail_json(msg="Failed to reconfigure virtual machine due to" - " product versioning restrictions: %s" % to_native(e.msg)) - if task.info.state == 'error': - return {'changed': self.change_detected, 'failed': True, 'msg': task.info.error.msg} + self.module.fail_json( + msg="Failed to reconfigure virtual machine due to" + " product versioning restrictions: %s" % to_native(e.msg) + ) + if task.info.state == "error": + return { + "changed": self.change_detected, + "failed": True, + "msg": task.info.error.msg, + } video_card_facts = self.gather_video_card_facts(vm_obj)[1] - return {'changed': self.change_detected, 'failed': False, 'instance': video_card_facts} + return { + "changed": self.change_detected, + "failed": False, + "instance": video_card_facts, + } def main(): argument_spec = vmware_argument_spec() argument_spec.update( - name=dict(type='str'), - uuid=dict(type='str'), - moid=dict(type='str'), - folder=dict(type='str'), - datacenter=dict(type='str', default='ha-datacenter'), - gather_video_facts=dict(type='bool', default=False), - use_auto_detect=dict(type='bool'), - display_number=dict(type='int'), - video_memory_mb=dict(type='float'), - enable_3D=dict(type='bool'), - renderer_3D=dict(type='str', choices=['automatic', 'software', 'hardware']), - memory_3D_mb=dict(type='int'), + name=dict(type="str"), + uuid=dict(type="str"), + moid=dict(type="str"), + folder=dict(type="str"), + datacenter=dict(type="str", default="ha-datacenter"), + gather_video_facts=dict(type="bool", default=False), + use_auto_detect=dict(type="bool"), + display_number=dict(type="int"), + video_memory_mb=dict(type="float"), + enable_3D=dict(type="bool"), + renderer_3D=dict( + type="str", choices=["automatic", "software", "hardware"] + ), + memory_3D_mb=dict(type="int"), ) module = AnsibleModule( - argument_spec=argument_spec, - required_one_of=[ - ['name', 'uuid', 'moid'] - ] + argument_spec=argument_spec, required_one_of=[["name", "uuid", "moid"]] ) pyv = PyVmomiHelper(module) vm = pyv.get_vm() if not vm: - vm_id = module.params.get('uuid') or module.params.get('name') or module.params.get('moid') - module.fail_json(msg='Unable to find the specified virtual machine : %s' % vm_id) + vm_id = ( + module.params.get("uuid") + or module.params.get("name") + or module.params.get("moid") + ) + module.fail_json( + msg="Unable to find the specified virtual machine : %s" % vm_id + ) vm_facts = pyv.gather_facts(vm) - vm_power_state = vm_facts['hw_power_status'].lower() - if vm_power_state != 'poweredoff': - module.fail_json(msg='VM state should be poweredoff to reconfigure video card settings.') + vm_power_state = vm_facts["hw_power_status"].lower() + if vm_power_state != "poweredoff": + module.fail_json( + msg="VM state should be poweredoff to reconfigure video card settings." + ) result = pyv.reconfigure_vm_video(vm_obj=vm) - if result['failed']: + if result["failed"]: module.fail_json(**result) else: module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_guest_vnc.py b/plugins/modules/vmware_guest_vnc.py index 93dba68..37af847 100644 --- a/plugins/modules/vmware_guest_vnc.py +++ b/plugins/modules/vmware_guest_vnc.py @@ -5,15 +5,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_guest_vnc short_description: Manages VNC remote display on virtual machines in vCenter @@ -92,9 +93,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Enable VNC remote display on the VM vmware_guest_vnc: hostname: "{{ vcenter_hostname }}" @@ -133,9 +134,9 @@ state: absent delegate_to: localhost register: vnc_result -''' +""" -RETURN = ''' +RETURN = """ changed: description: If anything changed on VM's extraConfig. returned: always @@ -148,7 +149,7 @@ description: Dictionary describing the VM, including VNC info. returned: On success in both I(state) type: dict -''' +""" try: from pyVmomi import vim @@ -156,19 +157,23 @@ pass from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec, get_vnc_extraconfig, wait_for_task, gather_vm_facts, TaskError +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + vmware_argument_spec, + get_vnc_extraconfig, + wait_for_task, + gather_vm_facts, + TaskError, +) from ansible.module_utils._text import to_native def set_vnc_extraconfig(content, vm, enabled, ip, port, password): - result = dict( - changed=False, - failed=False, - ) + result = dict(changed=False, failed=False) # set new values key_prefix = "remotedisplay.vnc." new_values = dict() - for key in ['enabled', 'ip', 'port', 'password']: + for key in ["enabled", "ip", "port", "password"]: new_values[key_prefix + key] = "" if enabled: new_values[key_prefix + "enabled"] = "true" @@ -203,42 +208,42 @@ def set_vnc_extraconfig(content, vm, enabled, ip, port, password): try: wait_for_task(task) except TaskError as task_err: - result['failed'] = True - result['msg'] = to_native(task_err) + result["failed"] = True + result["msg"] = to_native(task_err) - if task.info.state == 'error': - result['failed'] = True - result['msg'] = task.info.error.msg + if task.info.state == "error": + result["failed"] = True + result["msg"] = task.info.error.msg else: - result['changed'] = True - result['instance'] = gather_vm_facts(content, vm) + result["changed"] = True + result["instance"] = gather_vm_facts(content, vm) return result def main(): argument_spec = vmware_argument_spec() argument_spec.update( - state=dict(type='str', default='present', choices=['present', 'absent']), - name=dict(type='str'), - name_match=dict(type='str', choices=['first', 'last'], default='first'), - uuid=dict(type='str'), - moid=dict(type='str'), - folder=dict(type='str'), - vnc_ip=dict(type='str', default='0.0.0.0'), - vnc_port=dict(type='int', default=0), - vnc_password=dict(type='str', default='', no_log=True), - datacenter=dict(type='str', default='ha-datacenter') + state=dict( + type="str", default="present", choices=["present", "absent"] + ), + name=dict(type="str"), + name_match=dict( + type="str", choices=["first", "last"], default="first" + ), + uuid=dict(type="str"), + moid=dict(type="str"), + folder=dict(type="str"), + vnc_ip=dict(type="str", default="0.0.0.0"), + vnc_port=dict(type="int", default=0), + vnc_password=dict(type="str", default="", no_log=True), + datacenter=dict(type="str", default="ha-datacenter"), ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, - required_one_of=[ - ['name', 'uuid', 'moid'] - ], - mutually_exclusive=[ - ['name', 'uuid', 'moid'] - ] + required_one_of=[["name", "uuid", "moid"]], + mutually_exclusive=[["name", "uuid", "moid"]], ) result = dict(changed=False, failed=False) @@ -249,16 +254,23 @@ def main(): result = set_vnc_extraconfig( pyv.content, vm, - (module.params['state'] == "present"), - module.params['vnc_ip'], - module.params['vnc_port'], - module.params['vnc_password'] + (module.params["state"] == "present"), + module.params["vnc_ip"], + module.params["vnc_port"], + module.params["vnc_password"], ) else: - vm_id = module.params.get('uuid') or module.params.get('name') or module.params.get('moid') - module.fail_json(msg="Unable to set VNC config for non-existing virtual machine : '%s'" % vm_id) + vm_id = ( + module.params.get("uuid") + or module.params.get("name") + or module.params.get("moid") + ) + module.fail_json( + msg="Unable to set VNC config for non-existing virtual machine : '%s'" + % vm_id + ) - if result.get('failed') is True: + if result.get("failed") is True: module.fail_json(**result) module.exit_json(**result) diff --git a/plugins/modules/vmware_host.py b/plugins/modules/vmware_host.py index f680306..148f426 100644 --- a/plugins/modules/vmware_host.py +++ b/plugins/modules/vmware_host.py @@ -7,15 +7,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_host short_description: Add, remove, or move an ESXi host to, from, or within vCenter @@ -135,9 +136,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Add ESXi Host to vCenter vmware_host: hostname: '{{ vcenter_hostname }}' @@ -202,15 +203,15 @@ esxi_ssl_thumbprint: "3C:A5:60:6F:7A:B7:C4:6C:48:28:3D:2F:A5:EC:A3:58:13:88:F6:DD" state: present delegate_to: localhost -''' +""" -RETURN = r''' +RETURN = r""" result: description: metadata about the new host system added returned: on successful addition type: str sample: "Host already connected to vCenter 'vcenter01' in cluster 'cluster01'" -''' +""" try: from pyVmomi import vim, vmodl @@ -223,51 +224,62 @@ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_native from ansible_collections.vmware.general.plugins.module_utils.vmware import ( - PyVmomi, TaskError, vmware_argument_spec, - wait_for_task, find_host_by_cluster_datacenter, find_hostsystem_by_name + PyVmomi, + TaskError, + vmware_argument_spec, + wait_for_task, + find_host_by_cluster_datacenter, + find_hostsystem_by_name, ) class VMwareHost(PyVmomi): """Class to manage vCenter connection""" + def __init__(self, module): super(VMwareHost, self).__init__(module) - self.vcenter = module.params['hostname'] - self.datacenter_name = module.params['datacenter_name'] - self.cluster_name = module.params['cluster_name'] - self.folder_name = module.params['folder'] - self.esxi_hostname = module.params['esxi_hostname'] - self.esxi_username = module.params['esxi_username'] - self.esxi_password = module.params['esxi_password'] - self.state = module.params['state'] - self.esxi_ssl_thumbprint = module.params.get('esxi_ssl_thumbprint', '') - self.force_connection = module.params.get('force_connection') - self.fetch_ssl_thumbprint = module.params.get('fetch_ssl_thumbprint') - self.reconnect_disconnected = module.params.get('reconnect_disconnected') - self.host_update = self.host = self.cluster = self.folder = self.host_parent_compute_resource = None + self.vcenter = module.params["hostname"] + self.datacenter_name = module.params["datacenter_name"] + self.cluster_name = module.params["cluster_name"] + self.folder_name = module.params["folder"] + self.esxi_hostname = module.params["esxi_hostname"] + self.esxi_username = module.params["esxi_username"] + self.esxi_password = module.params["esxi_password"] + self.state = module.params["state"] + self.esxi_ssl_thumbprint = module.params.get("esxi_ssl_thumbprint", "") + self.force_connection = module.params.get("force_connection") + self.fetch_ssl_thumbprint = module.params.get("fetch_ssl_thumbprint") + self.reconnect_disconnected = module.params.get( + "reconnect_disconnected" + ) + self.host_update = ( + self.host + ) = ( + self.cluster + ) = self.folder = self.host_parent_compute_resource = None def process_state(self): """Check the current state""" host_states = { - 'absent': { - 'present': self.state_remove_host, - 'update': self.state_remove_host, - 'absent': self.state_exit_unchanged, + "absent": { + "present": self.state_remove_host, + "update": self.state_remove_host, + "absent": self.state_exit_unchanged, + }, + "present": { + "present": self.state_exit_unchanged, + "update": self.state_update_host, + "absent": self.state_add_host, }, - 'present': { - 'present': self.state_exit_unchanged, - 'update': self.state_update_host, - 'absent': self.state_add_host, + "add_or_reconnect": { + "present": self.state_reconnect_host, + "update": self.state_update_host, + "absent": self.state_add_host, }, - 'add_or_reconnect': { - 'present': self.state_reconnect_host, - 'update': self.state_update_host, - 'absent': self.state_add_host, + "reconnect": { + "present": self.state_reconnect_host, + "update": self.state_update_host, }, - 'reconnect': { - 'present': self.state_reconnect_host, - 'update': self.state_update_host, - } } try: @@ -282,17 +294,21 @@ def process_state(self): def check_host_state(self): """Check current state""" # Check if the host is already connected to vCenter - self.host_update = find_hostsystem_by_name(self.content, self.esxi_hostname) + self.host_update = find_hostsystem_by_name( + self.content, self.esxi_hostname + ) if self.host_update: # The host name is unique in vCenter; A host with the same name cannot exist in another datacenter # However, the module will fail later if the target folder/cluster is in another datacenter as the host # Check if the host is connected under the target cluster if self.cluster_name: - self.host, self.cluster = self.search_cluster(self.datacenter_name, self.cluster_name, self.esxi_hostname) + self.host, self.cluster = self.search_cluster( + self.datacenter_name, self.cluster_name, self.esxi_hostname + ) if self.host: - state = 'present' + state = "present" else: - state = 'update' + state = "update" # Check if the host is connected under the target folder elif self.folder_name: self.folder = self.search_folder(self.folder_name) @@ -300,18 +316,21 @@ def check_host_state(self): if not child or not isinstance(child, vim.ComputeResource): continue try: - if isinstance(child.host[0], vim.HostSystem) and child.name == self.esxi_hostname: + if ( + isinstance(child.host[0], vim.HostSystem) + and child.name == self.esxi_hostname + ): self.host_parent_compute_resource = child self.host = child.host[0] break except IndexError: continue if self.host: - state = 'present' + state = "present" else: - state = 'update' + state = "update" else: - state = 'absent' + state = "absent" return state def search_folder(self, folder_name): @@ -331,20 +350,33 @@ def search_cluster(self, datacenter_name, cluster_name, esxi_hostname): Returns: host and cluster object """ return find_host_by_cluster_datacenter( - self.module, self.content, datacenter_name, cluster_name, esxi_hostname + self.module, + self.content, + datacenter_name, + cluster_name, + esxi_hostname, ) def state_exit_unchanged(self): """Exit with status message""" if not self.host_update: result = "Host already disconnected" - elif self.reconnect_disconnected and self.host_update.runtime.connectionState == 'disconnected': + elif ( + self.reconnect_disconnected + and self.host_update.runtime.connectionState == "disconnected" + ): self.state_reconnect_host() else: if self.folder_name: - result = "Host already connected to vCenter '%s' in folder '%s'" % (self.vcenter, self.folder_name) + result = ( + "Host already connected to vCenter '%s' in folder '%s'" + % (self.vcenter, self.folder_name) + ) elif self.cluster_name: - result = "Host already connected to vCenter '%s' in cluster '%s'" % (self.vcenter, self.cluster_name) + result = ( + "Host already connected to vCenter '%s' in cluster '%s'" + % (self.vcenter, self.cluster_name) + ) self.module.exit_json(changed=False, result=str(result)) def state_add_host(self): @@ -356,7 +388,7 @@ def state_add_host(self): result = "Host would be connected to vCenter '%s'" % self.vcenter else: host_connect_spec = self.get_host_connect_spec() - as_connected = self.params.get('add_connected') + as_connected = self.params.get("add_connected") esxi_license = None resource_pool = None task = None @@ -364,125 +396,145 @@ def state_add_host(self): self.folder = self.search_folder(self.folder_name) try: task = self.folder.AddStandaloneHost( - spec=host_connect_spec, compResSpec=resource_pool, - addConnected=as_connected, license=esxi_license + spec=host_connect_spec, + compResSpec=resource_pool, + addConnected=as_connected, + license=esxi_license, ) except vim.fault.InvalidLogin as invalid_login: self.module.fail_json( - msg="Cannot authenticate with the host : %s" % to_native(invalid_login) + msg="Cannot authenticate with the host : %s" + % to_native(invalid_login) ) except vim.fault.HostConnectFault as connect_fault: self.module.fail_json( - msg="An error occurred during connect : %s" % to_native(connect_fault) + msg="An error occurred during connect : %s" + % to_native(connect_fault) ) except vim.fault.DuplicateName as duplicate_name: self.module.fail_json( - msg="The folder already contains a host with the same name : %s" % - to_native(duplicate_name) + msg="The folder already contains a host with the same name : %s" + % to_native(duplicate_name) ) except vmodl.fault.InvalidArgument as invalid_argument: self.module.fail_json( - msg="An argument was specified incorrectly : %s" % to_native(invalid_argument) + msg="An argument was specified incorrectly : %s" + % to_native(invalid_argument) ) except vim.fault.AlreadyBeingManaged as already_managed: self.module.fail_json( - msg="The host is already being managed by another vCenter server : %s" % - to_native(already_managed) + msg="The host is already being managed by another vCenter server : %s" + % to_native(already_managed) ) except vmodl.fault.NotEnoughLicenses as not_enough_licenses: self.module.fail_json( - msg="There are not enough licenses to add this host : %s" % to_native(not_enough_licenses) + msg="There are not enough licenses to add this host : %s" + % to_native(not_enough_licenses) ) except vim.fault.NoHost as no_host: self.module.fail_json( - msg="Unable to contact the host : %s" % to_native(no_host) + msg="Unable to contact the host : %s" + % to_native(no_host) ) except vmodl.fault.NotSupported as not_supported: self.module.fail_json( - msg="The folder is not a host folder : %s" % to_native(not_supported) + msg="The folder is not a host folder : %s" + % to_native(not_supported) ) except vim.fault.NotSupportedHost as host_not_supported: self.module.fail_json( - msg="The host is running a software version that is not supported : %s" % - to_native(host_not_supported) + msg="The host is running a software version that is not supported : %s" + % to_native(host_not_supported) ) except vim.fault.AgentInstallFailed as agent_install: self.module.fail_json( - msg="Error during vCenter agent installation : %s" % to_native(agent_install) + msg="Error during vCenter agent installation : %s" + % to_native(agent_install) ) except vim.fault.AlreadyConnected as already_connected: self.module.fail_json( - msg="The host is already connected to the vCenter server : %s" % to_native(already_connected) + msg="The host is already connected to the vCenter server : %s" + % to_native(already_connected) ) except vim.fault.SSLVerifyFault as ssl_fault: self.module.fail_json( - msg="The host certificate could not be authenticated : %s" % to_native(ssl_fault) + msg="The host certificate could not be authenticated : %s" + % to_native(ssl_fault) ) elif self.cluster_name: self.host, self.cluster = self.search_cluster( - self.datacenter_name, - self.cluster_name, - self.esxi_hostname + self.datacenter_name, self.cluster_name, self.esxi_hostname ) try: task = self.cluster.AddHost_Task( - spec=host_connect_spec, asConnected=as_connected, - resourcePool=resource_pool, license=esxi_license + spec=host_connect_spec, + asConnected=as_connected, + resourcePool=resource_pool, + license=esxi_license, ) except vim.fault.InvalidLogin as invalid_login: self.module.fail_json( - msg="Cannot authenticate with the host : %s" % to_native(invalid_login) + msg="Cannot authenticate with the host : %s" + % to_native(invalid_login) ) except vim.fault.HostConnectFault as connect_fault: self.module.fail_json( - msg="An error occurred during connect : %s" % to_native(connect_fault) + msg="An error occurred during connect : %s" + % to_native(connect_fault) ) except vim.fault.DuplicateName as duplicate_name: self.module.fail_json( - msg="The cluster already contains a host with the same name : %s" % - to_native(duplicate_name) + msg="The cluster already contains a host with the same name : %s" + % to_native(duplicate_name) ) except vim.fault.AlreadyBeingManaged as already_managed: self.module.fail_json( - msg="The host is already being managed by another vCenter server : %s" % - to_native(already_managed) + msg="The host is already being managed by another vCenter server : %s" + % to_native(already_managed) ) except vmodl.fault.NotEnoughLicenses as not_enough_licenses: self.module.fail_json( - msg="There are not enough licenses to add this host : %s" % to_native(not_enough_licenses) + msg="There are not enough licenses to add this host : %s" + % to_native(not_enough_licenses) ) except vim.fault.NoHost as no_host: self.module.fail_json( - msg="Unable to contact the host : %s" % to_native(no_host) + msg="Unable to contact the host : %s" + % to_native(no_host) ) except vim.fault.NotSupportedHost as host_not_supported: self.module.fail_json( msg="The host is running a software version that is not supported; " - "It may still be possible to add the host as a stand-alone host : %s" % - to_native(host_not_supported) + "It may still be possible to add the host as a stand-alone host : %s" + % to_native(host_not_supported) ) except vim.fault.TooManyHosts as too_many_hosts: self.module.fail_json( - msg="No additional hosts can be added to the cluster : %s" % to_native(too_many_hosts) + msg="No additional hosts can be added to the cluster : %s" + % to_native(too_many_hosts) ) except vim.fault.AgentInstallFailed as agent_install: self.module.fail_json( - msg="Error during vCenter agent installation : %s" % to_native(agent_install) + msg="Error during vCenter agent installation : %s" + % to_native(agent_install) ) except vim.fault.AlreadyConnected as already_connected: self.module.fail_json( - msg="The host is already connected to the vCenter server : %s" % to_native(already_connected) + msg="The host is already connected to the vCenter server : %s" + % to_native(already_connected) ) except vim.fault.SSLVerifyFault as ssl_fault: self.module.fail_json( - msg="The host certificate could not be authenticated : %s" % to_native(ssl_fault) + msg="The host certificate could not be authenticated : %s" + % to_native(ssl_fault) ) try: changed, result = wait_for_task(task) result = "Host connected to vCenter '%s'" % self.vcenter except TaskError as task_error: self.module.fail_json( - msg="Failed to add host to vCenter '%s' : %s" % (self.vcenter, to_native(task_error)) + msg="Failed to add host to vCenter '%s' : %s" + % (self.vcenter, to_native(task_error)) ) self.module.exit_json(changed=changed, result=result) @@ -493,34 +545,45 @@ def get_host_connect_spec(self): Returns: host connection specification """ # Get the thumbprint of the SSL certificate - if self.fetch_ssl_thumbprint and self.esxi_ssl_thumbprint == '': + if self.fetch_ssl_thumbprint and self.esxi_ssl_thumbprint == "": sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.settimeout(1) - if self.module.params['proxy_host']: - sock.connect(( - self.module.params['proxy_host'], - self.module.params['proxy_port'])) - command = "CONNECT %s:443 HTTP/1.0\r\n\r\n" % (self.esxi_hostname) + if self.module.params["proxy_host"]: + sock.connect( + ( + self.module.params["proxy_host"], + self.module.params["proxy_port"], + ) + ) + command = "CONNECT %s:443 HTTP/1.0\r\n\r\n" % ( + self.esxi_hostname + ) sock.send(command.encode()) buf = sock.recv(8192).decode() - if buf.split()[1] != '200': + if buf.split()[1] != "200": self.module.fail_json(msg="Failed to connect to the proxy") ctx = ssl.create_default_context() ctx.check_hostname = False ctx.verify_mode = ssl.CERT_NONE - der_cert_bin = ctx.wrap_socket(sock, server_hostname=self.esxi_hostname).getpeercert(True) + der_cert_bin = ctx.wrap_socket( + sock, server_hostname=self.esxi_hostname + ).getpeercert(True) sock.close() else: wrapped_socket = ssl.wrap_socket(sock) try: wrapped_socket.connect((self.esxi_hostname, 443)) except socket.error as socket_error: - self.module.fail_json(msg="Cannot connect to host : %s" % socket_error) + self.module.fail_json( + msg="Cannot connect to host : %s" % socket_error + ) else: der_cert_bin = wrapped_socket.getpeercert(True) wrapped_socket.close() - thumb_sha1 = self.format_number(hashlib.sha1(der_cert_bin).hexdigest()) + thumb_sha1 = self.format_number( + hashlib.sha1(der_cert_bin).hexdigest() + ) sslThumbprint = thumb_sha1 else: sslThumbprint = self.esxi_ssl_thumbprint @@ -537,7 +600,7 @@ def get_host_connect_spec(self): def format_number(number): """Format number""" string = str(number) - return ':'.join(a + b for a, b in zip(string[::2], string[1::2])) + return ":".join(a + b for a, b in zip(string[::2], string[1::2])) def state_reconnect_host(self): """Reconnect host to vCenter""" @@ -554,22 +617,24 @@ def state_reconnect_host(self): def reconnect_host(self, host_object): """Reconnect host to vCenter""" reconnecthost_args = {} - reconnecthost_args['reconnectSpec'] = vim.HostSystem.ReconnectSpec() - reconnecthost_args['reconnectSpec'].syncState = True + reconnecthost_args["reconnectSpec"] = vim.HostSystem.ReconnectSpec() + reconnecthost_args["reconnectSpec"].syncState = True if self.esxi_username and self.esxi_password: # Build the connection spec as well and fetch thumbprint if enabled # Useful if you reinstalled a host and it uses a new self-signed certificate - reconnecthost_args['cnxSpec'] = self.get_host_connect_spec() + reconnecthost_args["cnxSpec"] = self.get_host_connect_spec() try: task = host_object.ReconnectHost_Task(**reconnecthost_args) except vim.fault.InvalidLogin as invalid_login: self.module.fail_json( - msg="Cannot authenticate with the host : %s" % to_native(invalid_login) + msg="Cannot authenticate with the host : %s" + % to_native(invalid_login) ) except vim.fault.InvalidState as invalid_state: self.module.fail_json( - msg="The host is not disconnected : %s" % to_native(invalid_state) + msg="The host is not disconnected : %s" + % to_native(invalid_state) ) except vim.fault.InvalidName as invalid_name: self.module.fail_json( @@ -577,19 +642,23 @@ def reconnect_host(self, host_object): ) except vim.fault.HostConnectFault as connect_fault: self.module.fail_json( - msg="An error occurred during reconnect : %s" % to_native(connect_fault) + msg="An error occurred during reconnect : %s" + % to_native(connect_fault) ) except vmodl.fault.NotSupported as not_supported: self.module.fail_json( - msg="No host can be added to this group : %s" % to_native(not_supported) + msg="No host can be added to this group : %s" + % to_native(not_supported) ) except vim.fault.AlreadyBeingManaged as already_managed: self.module.fail_json( - msg="The host is already being managed by another vCenter server : %s" % to_native(already_managed) + msg="The host is already being managed by another vCenter server : %s" + % to_native(already_managed) ) except vmodl.fault.NotEnoughLicenses as not_enough_licenses: self.module.fail_json( - msg="There are not enough licenses to add this host : %s" % to_native(not_enough_licenses) + msg="There are not enough licenses to add this host : %s" + % to_native(not_enough_licenses) ) except vim.fault.NoHost as no_host: self.module.fail_json( @@ -597,19 +666,20 @@ def reconnect_host(self, host_object): ) except vim.fault.NotSupportedHost as host_not_supported: self.module.fail_json( - msg="The host is running a software version that is not supported : %s" % - to_native(host_not_supported) + msg="The host is running a software version that is not supported : %s" + % to_native(host_not_supported) ) except vim.fault.SSLVerifyFault as ssl_fault: self.module.fail_json( - msg="The host certificate could not be authenticated : %s" % to_native(ssl_fault) + msg="The host certificate could not be authenticated : %s" + % to_native(ssl_fault) ) try: changed, result = wait_for_task(task) except TaskError as task_error: self.module.fail_json( - msg="Failed to reconnect host to vCenter '%s' due to %s" % - (self.vcenter, to_native(task_error)) + msg="Failed to reconnect host to vCenter '%s' due to %s" + % (self.vcenter, to_native(task_error)) ) def state_remove_host(self): @@ -621,7 +691,7 @@ def state_remove_host(self): else: # Check parent type parent_type = self.get_parent_type(self.host_update) - if parent_type == 'cluster': + if parent_type == "cluster": self.put_host_in_maintenance_mode(self.host_update) try: if self.folder_name: @@ -635,7 +705,8 @@ def state_remove_host(self): result = "Host removed from vCenter '%s'" % self.vcenter except TaskError as task_error: self.module.fail_json( - msg="Failed to remove the host from vCenter '%s' : %s" % (self.vcenter, to_native(task_error)) + msg="Failed to remove the host from vCenter '%s' : %s" + % (self.vcenter, to_native(task_error)) ) self.module.exit_json(changed=changed, result=str(result)) @@ -644,23 +715,29 @@ def put_host_in_maintenance_mode(self, host_object): if not host_object.runtime.inMaintenanceMode: try: try: - maintenance_mode_task = host_object.EnterMaintenanceMode_Task(300, True, None) + maintenance_mode_task = host_object.EnterMaintenanceMode_Task( + 300, True, None + ) except vim.fault.InvalidState as invalid_state: self.module.fail_json( - msg="The host is already in maintenance mode : %s" % to_native(invalid_state) + msg="The host is already in maintenance mode : %s" + % to_native(invalid_state) ) except vim.fault.Timedout as timed_out: self.module.fail_json( - msg="The maintenance mode operation timed out : %s" % to_native(timed_out) + msg="The maintenance mode operation timed out : %s" + % to_native(timed_out) ) except vim.fault.Timedout as timed_out: self.module.fail_json( - msg="The maintenance mode operation was canceled : %s" % to_native(timed_out) + msg="The maintenance mode operation was canceled : %s" + % to_native(timed_out) ) wait_for_task(maintenance_mode_task) except TaskError as task_err: self.module.fail_json( - msg="Failed to put the host in maintenance mode : %s" % to_native(task_err) + msg="Failed to put the host in maintenance mode : %s" + % to_native(task_err) ) def get_parent_type(self, host_object): @@ -672,9 +749,9 @@ def get_parent_type(self, host_object): # check 'vim.ClusterComputeResource' first because it's also an # instance of 'vim.ComputeResource' if isinstance(host_object.parent, vim.ClusterComputeResource): - object_type = 'cluster' + object_type = "cluster" elif isinstance(host_object.parent, vim.ComputeResource): - object_type = 'folder' + object_type = "folder" return object_type def state_update_host(self): @@ -684,7 +761,10 @@ def state_update_host(self): reconnect = False # Check if the host is disconnected if reconnect disconnected hosts is true - if self.reconnect_disconnected and self.host_update.runtime.connectionState == 'disconnected': + if ( + self.reconnect_disconnected + and self.host_update.runtime.connectionState == "disconnected" + ): reconnect = True # Check parent type @@ -692,70 +772,101 @@ def state_update_host(self): if self.folder_name: if self.module.check_mode: - if reconnect or self.state == 'add_or_reconnect' or self.state == 'reconnect': - result = "Host would be reconnected and moved to folder '%s'" % self.folder_name + if ( + reconnect + or self.state == "add_or_reconnect" + or self.state == "reconnect" + ): + result = ( + "Host would be reconnected and moved to folder '%s'" + % self.folder_name + ) else: - result = "Host would be moved to folder '%s'" % self.folder_name + result = ( + "Host would be moved to folder '%s'" % self.folder_name + ) else: # Reconnect the host if disconnected or if specified by state - if reconnect or self.state == 'add_or_reconnect' or self.state == 'reconnect': + if ( + reconnect + or self.state == "add_or_reconnect" + or self.state == "reconnect" + ): self.reconnect_host(self.host_update) try: try: - if parent_type == 'folder': + if parent_type == "folder": # Move ESXi host from folder to folder - task = self.folder.MoveIntoFolder_Task([self.host_update.parent]) - elif parent_type == 'cluster': + task = self.folder.MoveIntoFolder_Task( + [self.host_update.parent] + ) + elif parent_type == "cluster": self.put_host_in_maintenance_mode(self.host_update) # Move ESXi host from cluster to folder - task = self.folder.MoveIntoFolder_Task([self.host_update]) + task = self.folder.MoveIntoFolder_Task( + [self.host_update] + ) except vim.fault.DuplicateName as duplicate_name: self.module.fail_json( - msg="The folder already contains an object with the specified name : %s" % - to_native(duplicate_name) + msg="The folder already contains an object with the specified name : %s" + % to_native(duplicate_name) ) except vim.fault.InvalidFolder as invalid_folder: self.module.fail_json( - msg="The parent of this folder is in the list of objects : %s" % - to_native(invalid_folder) + msg="The parent of this folder is in the list of objects : %s" + % to_native(invalid_folder) ) except vim.fault.InvalidState as invalid_state: self.module.fail_json( msg="Failed to move host, this can be due to either of following :" - " 1. The host is not part of the same datacenter, 2. The host is not in maintenance mode : %s" % - to_native(invalid_state) + " 1. The host is not part of the same datacenter, 2. The host is not in maintenance mode : %s" + % to_native(invalid_state) ) except vmodl.fault.NotSupported as not_supported: self.module.fail_json( - msg="The target folder is not a host folder : %s" % - to_native(not_supported) + msg="The target folder is not a host folder : %s" + % to_native(not_supported) ) except vim.fault.DisallowedOperationOnFailoverHost as failover_host: self.module.fail_json( - msg="The host is configured as a failover host : %s" % - to_native(failover_host) + msg="The host is configured as a failover host : %s" + % to_native(failover_host) ) except vim.fault.VmAlreadyExistsInDatacenter as already_exists: self.module.fail_json( msg="The host's virtual machines are already registered to a host in " - "the destination datacenter : %s" % to_native(already_exists) + "the destination datacenter : %s" + % to_native(already_exists) ) changed, result = wait_for_task(task) except TaskError as task_error_exception: task_error = task_error_exception.args[0] self.module.fail_json( - msg="Failed to move host %s to folder %s due to %s" % - (self.esxi_hostname, self.folder_name, to_native(task_error)) + msg="Failed to move host %s to folder %s due to %s" + % ( + self.esxi_hostname, + self.folder_name, + to_native(task_error), + ) + ) + if ( + reconnect + or self.state == "add_or_reconnect" + or self.state == "reconnect" + ): + result = ( + "Host reconnected and moved to folder '%s'" + % self.folder_name ) - if reconnect or self.state == 'add_or_reconnect' or self.state == 'reconnect': - result = "Host reconnected and moved to folder '%s'" % self.folder_name else: result = "Host moved to folder '%s'" % self.folder_name elif self.cluster_name: if self.module.check_mode: - result = "Host would be moved to cluster '%s'" % self.cluster_name + result = ( + "Host would be moved to cluster '%s'" % self.cluster_name + ) else: - if parent_type == 'cluster': + if parent_type == "cluster": # Put host in maintenance mode if moved from another cluster self.put_host_in_maintenance_mode(self.host_update) resource_pool = None @@ -766,29 +877,37 @@ def state_update_host(self): ) except vim.fault.TooManyHosts as too_many_hosts: self.module.fail_json( - msg="No additional hosts can be added to the cluster : %s" % to_native(too_many_hosts) + msg="No additional hosts can be added to the cluster : %s" + % to_native(too_many_hosts) ) except vim.fault.InvalidState as invalid_state: self.module.fail_json( - msg="The host is already part of a cluster and is not in maintenance mode : %s" % - to_native(invalid_state) + msg="The host is already part of a cluster and is not in maintenance mode : %s" + % to_native(invalid_state) ) except vmodl.fault.InvalidArgument as invalid_argument: self.module.fail_json( msg="Failed to move host, this can be due to either of following :" " 1. The host is is not a part of the same datacenter as the cluster," - " 2. The source and destination clusters are the same : %s" % - to_native(invalid_argument) + " 2. The source and destination clusters are the same : %s" + % to_native(invalid_argument) ) changed, result = wait_for_task(task) except TaskError as task_error_exception: task_error = task_error_exception.args[0] self.module.fail_json( - msg="Failed to move host to cluster '%s' due to : %s" % - (self.cluster_name, to_native(task_error)) + msg="Failed to move host to cluster '%s' due to : %s" + % (self.cluster_name, to_native(task_error)) + ) + if ( + reconnect + or self.state == "add_or_reconnect" + or self.state == "reconnect" + ): + result = ( + "Host reconnected and moved to cluster '%s'" + % self.cluster_name ) - if reconnect or self.state == 'add_or_reconnect' or self.state == 'reconnect': - result = "Host reconnected and moved to cluster '%s'" % self.cluster_name else: result = "Host moved to cluster '%s'" % self.cluster_name @@ -799,40 +918,42 @@ def main(): """Main""" argument_spec = vmware_argument_spec() argument_spec.update( - datacenter_name=dict(type='str', required=True, aliases=['datacenter']), - cluster_name=dict(type='str', aliases=['cluster']), - esxi_hostname=dict(type='str', required=True), - esxi_username=dict(type='str'), - esxi_password=dict(type='str', no_log=True), - esxi_ssl_thumbprint=dict(type='str', default='', aliases=['ssl_thumbprint']), - fetch_ssl_thumbprint=dict(type='bool', default=True), - state=dict(default='present', - choices=['present', 'absent', 'add_or_reconnect', 'reconnect'], - type='str'), - folder=dict(type='str', aliases=['folder_name']), - add_connected=dict(type='bool', default=True), - force_connection=dict(type='bool', default=True), - reconnect_disconnected=dict(type='bool', default=True), + datacenter_name=dict( + type="str", required=True, aliases=["datacenter"] + ), + cluster_name=dict(type="str", aliases=["cluster"]), + esxi_hostname=dict(type="str", required=True), + esxi_username=dict(type="str"), + esxi_password=dict(type="str", no_log=True), + esxi_ssl_thumbprint=dict( + type="str", default="", aliases=["ssl_thumbprint"] + ), + fetch_ssl_thumbprint=dict(type="bool", default=True), + state=dict( + default="present", + choices=["present", "absent", "add_or_reconnect", "reconnect"], + type="str", + ), + folder=dict(type="str", aliases=["folder_name"]), + add_connected=dict(type="bool", default=True), + force_connection=dict(type="bool", default=True), + reconnect_disconnected=dict(type="bool", default=True), ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, required_if=[ - ['state', 'present', ['esxi_username', 'esxi_password']], - ['state', 'add_or_reconnect', ['esxi_username', 'esxi_password']] - ], - required_one_of=[ - ['cluster_name', 'folder'], + ["state", "present", ["esxi_username", "esxi_password"]], + ["state", "add_or_reconnect", ["esxi_username", "esxi_password"]], ], - mutually_exclusive=[ - ['cluster_name', 'folder'], - ] + required_one_of=[["cluster_name", "folder"]], + mutually_exclusive=[["cluster_name", "folder"]], ) vmware_host = VMwareHost(module) vmware_host.process_state() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_host_acceptance.py b/plugins/modules/vmware_host_acceptance.py index 6e9168e..1171c6a 100644 --- a/plugins/modules/vmware_host_acceptance.py +++ b/plugins/modules/vmware_host_acceptance.py @@ -5,16 +5,17 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_host_acceptance short_description: Manage the host acceptance level of an ESXi host @@ -63,9 +64,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Set acceptance level to community for all ESXi Host in given Cluster vmware_host_acceptance: hostname: '{{ vcenter_hostname }}' @@ -97,69 +98,84 @@ state: list delegate_to: localhost register: host_acceptance_level -''' +""" -RETURN = r''' +RETURN = r""" facts: description: - dict with hostname as key and dict with acceptance level facts, error as value returned: facts type: dict sample: { "facts": { "localhost.localdomain": { "error": "NA", "level": "vmware_certified" }}} -''' +""" try: from pyVmomi import vim except ImportError: pass from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + vmware_argument_spec, + PyVmomi, +) from ansible.module_utils._text import to_native class VMwareAccpetanceManager(PyVmomi): def __init__(self, module): super(VMwareAccpetanceManager, self).__init__(module) - cluster_name = self.params.get('cluster_name', None) - esxi_host_name = self.params.get('esxi_hostname', None) - self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name) - self.desired_state = self.params.get('state') + cluster_name = self.params.get("cluster_name", None) + esxi_host_name = self.params.get("esxi_hostname", None) + self.hosts = self.get_all_host_objs( + cluster_name=cluster_name, esxi_host_name=esxi_host_name + ) + self.desired_state = self.params.get("state") self.hosts_facts = {} - self.acceptance_level = self.params.get('acceptance_level') + self.acceptance_level = self.params.get("acceptance_level") def gather_acceptance_facts(self): for host in self.hosts: - self.hosts_facts[host.name] = dict(level='', error='NA') + self.hosts_facts[host.name] = dict(level="", error="NA") host_image_config_mgr = host.configManager.imageConfigManager if host_image_config_mgr: try: - self.hosts_facts[host.name]['level'] = host_image_config_mgr.HostImageConfigGetAcceptance() + self.hosts_facts[host.name][ + "level" + ] = host_image_config_mgr.HostImageConfigGetAcceptance() except vim.fault.HostConfigFault as e: - self.hosts_facts[host.name]['error'] = to_native(e.msg) + self.hosts_facts[host.name]["error"] = to_native(e.msg) def set_acceptance_level(self): change = [] for host in self.hosts: host_changed = False - if self.hosts_facts[host.name]['level'] != self.acceptance_level: + if self.hosts_facts[host.name]["level"] != self.acceptance_level: host_image_config_mgr = host.configManager.imageConfigManager if host_image_config_mgr: try: if self.module.check_mode: - self.hosts_facts[host.name]['level'] = self.acceptance_level + self.hosts_facts[host.name][ + "level" + ] = self.acceptance_level else: - host_image_config_mgr.UpdateHostImageAcceptanceLevel(newAcceptanceLevel=self.acceptance_level) - self.hosts_facts[host.name]['level'] = host_image_config_mgr.HostImageConfigGetAcceptance() + host_image_config_mgr.UpdateHostImageAcceptanceLevel( + newAcceptanceLevel=self.acceptance_level + ) + self.hosts_facts[host.name][ + "level" + ] = ( + host_image_config_mgr.HostImageConfigGetAcceptance() + ) host_changed = True except vim.fault.HostConfigFault as e: - self.hosts_facts[host.name]['error'] = to_native(e.msg) + self.hosts_facts[host.name]["error"] = to_native(e.msg) change.append(host_changed) self.module.exit_json(changed=any(change), facts=self.hosts_facts) def check_acceptance_state(self): self.gather_acceptance_facts() - if self.desired_state == 'list': + if self.desired_state == "list": self.module.exit_json(changed=False, facts=self.hosts_facts) self.set_acceptance_level() @@ -167,25 +183,25 @@ def check_acceptance_state(self): def main(): argument_spec = vmware_argument_spec() argument_spec.update( - cluster_name=dict(type='str', required=False), - esxi_hostname=dict(type='str', required=False), - acceptance_level=dict(type='str', - choices=['community', 'partner', 'vmware_accepted', 'vmware_certified'] - ), - state=dict(type='str', - choices=['list', 'present'], - default='list'), + cluster_name=dict(type="str", required=False), + esxi_hostname=dict(type="str", required=False), + acceptance_level=dict( + type="str", + choices=[ + "community", + "partner", + "vmware_accepted", + "vmware_certified", + ], + ), + state=dict(type="str", choices=["list", "present"], default="list"), ) module = AnsibleModule( argument_spec=argument_spec, - required_one_of=[ - ['cluster_name', 'esxi_hostname'], - ], - required_if=[ - ['state', 'present', ['acceptance_level']], - ], - supports_check_mode=True + required_one_of=[["cluster_name", "esxi_hostname"]], + required_if=[["state", "present", ["acceptance_level"]]], + supports_check_mode=True, ) vmware_host_accept_config = VMwareAccpetanceManager(module) diff --git a/plugins/modules/vmware_host_active_directory.py b/plugins/modules/vmware_host_active_directory.py index f77d742..c2bd515 100644 --- a/plugins/modules/vmware_host_active_directory.py +++ b/plugins/modules/vmware_host_active_directory.py @@ -6,16 +6,17 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_host_active_directory short_description: Joins an ESXi host system to an Active Directory domain or leaves it @@ -62,9 +63,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Join an AD domain vmware_host_active_directory: hostname: '{{ vcenter_hostname }}' @@ -87,9 +88,9 @@ ad_state: absent validate_certs: no delegate_to: localhost -''' +""" -RETURN = r''' +RETURN = r""" results: description: metadata about host system's AD domain join state returned: always @@ -105,7 +106,7 @@ "ad_state_previous": "absent", }, } -''' +""" try: from pyVmomi import vim @@ -113,7 +114,12 @@ pass from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import PyVmomi, TaskError, vmware_argument_spec, wait_for_task +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + TaskError, + vmware_argument_spec, + wait_for_task, +) from ansible.module_utils._text import to_native @@ -122,198 +128,284 @@ class VmwareHostAdAuthentication(PyVmomi): def __init__(self, module): super(VmwareHostAdAuthentication, self).__init__(module) - cluster_name = self.params.get('cluster_name') - esxi_host_name = self.params.get('esxi_hostname') - self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name) + cluster_name = self.params.get("cluster_name") + esxi_host_name = self.params.get("esxi_hostname") + self.hosts = self.get_all_host_objs( + cluster_name=cluster_name, esxi_host_name=esxi_host_name + ) if not self.hosts: self.module.fail_json(msg="Failed to find host system.") def ensure(self): """Manage Active Directory Authentication for an ESXi host system""" results = dict(changed=False, result=dict()) - desired_state = self.params.get('ad_state') - domain = self.params.get('ad_domain') - ad_user = self.params.get('ad_user') - ad_password = self.params.get('ad_password') + desired_state = self.params.get("ad_state") + domain = self.params.get("ad_domain") + ad_user = self.params.get("ad_user") + ad_password = self.params.get("ad_password") host_change_list = [] for host in self.hosts: changed = False - results['result'][host.name] = dict(msg='') + results["result"][host.name] = dict(msg="") active_directory_info = self.get_ad_info(host) - results['result'][host.name]['ad_state'] = desired_state - results['result'][host.name]['ad_domain'] = domain - if desired_state == 'present': + results["result"][host.name]["ad_state"] = desired_state + results["result"][host.name]["ad_domain"] = domain + if desired_state == "present": # Don't do anything if already enabled and joined if active_directory_info.enabled: # Joined and no problems with the domain membership - if active_directory_info.domainMembershipStatus == 'ok': - results['result'][host.name]['changed'] = False - results['result'][host.name]['membership_state'] = active_directory_info.domainMembershipStatus - results['result'][host.name]['joined_domain'] = active_directory_info.joinedDomain - results['result'][host.name]['trusted_domains'] = active_directory_info.trustedDomain - results['result'][host.name]['msg'] = ( - "Host is joined to AD domain and there are no problems with the domain membership" - ) + if active_directory_info.domainMembershipStatus == "ok": + results["result"][host.name]["changed"] = False + results["result"][host.name][ + "membership_state" + ] = active_directory_info.domainMembershipStatus + results["result"][host.name][ + "joined_domain" + ] = active_directory_info.joinedDomain + results["result"][host.name][ + "trusted_domains" + ] = active_directory_info.trustedDomain + results["result"][host.name][ + "msg" + ] = "Host is joined to AD domain and there are no problems with the domain membership" # Joined, but problems with the domain membership else: - changed = results['result'][host.name]['changed'] = True - results['result'][host.name]['membership_state'] = active_directory_info.domainMembershipStatus - results['result'][host.name]['joined_domain'] = active_directory_info.joinedDomain - results['result'][host.name]['trusted_domains'] = active_directory_info.trustedDomain + changed = results["result"][host.name][ + "changed" + ] = True + results["result"][host.name][ + "membership_state" + ] = active_directory_info.domainMembershipStatus + results["result"][host.name][ + "joined_domain" + ] = active_directory_info.joinedDomain + results["result"][host.name][ + "trusted_domains" + ] = active_directory_info.trustedDomain msg = "Host is joined to AD domain, but " - if active_directory_info.domainMembershipStatus == 'clientTrustBroken': + if ( + active_directory_info.domainMembershipStatus + == "clientTrustBroken" + ): msg += "the client side of the trust relationship is broken" - elif active_directory_info.domainMembershipStatus == 'inconsistentTrust': + elif ( + active_directory_info.domainMembershipStatus + == "inconsistentTrust" + ): msg += "unexpected domain controller responded" - elif active_directory_info.domainMembershipStatus == 'noServers': - msg += "the host thinks it's part of a domain and " \ + elif ( + active_directory_info.domainMembershipStatus + == "noServers" + ): + msg += ( + "the host thinks it's part of a domain and " "no domain controllers could be reached to confirm" - elif active_directory_info.domainMembershipStatus == 'serverTrustBroken': + ) + elif ( + active_directory_info.domainMembershipStatus + == "serverTrustBroken" + ): msg += "the server side of the trust relationship is broken (or bad machine password)" - elif active_directory_info.domainMembershipStatus == 'otherProblem': + elif ( + active_directory_info.domainMembershipStatus + == "otherProblem" + ): msg += "there are some problems with the domain membership" - elif active_directory_info.domainMembershipStatus == 'unknown': + elif ( + active_directory_info.domainMembershipStatus + == "unknown" + ): msg += "the Active Directory integration provider does not support domain trust checks" - results['result'][host.name]['msg'] = msg + results["result"][host.name]["msg"] = msg # Enable and join AD domain else: if self.module.check_mode: - changed = results['result'][host.name]['changed'] = True - results['result'][host.name]['ad_state_previous'] = "absent" - results['result'][host.name]['ad_state_current'] = "present" - results['result'][host.name]['msg'] = "Host would be joined to AD domain" + changed = results["result"][host.name][ + "changed" + ] = True + results["result"][host.name][ + "ad_state_previous" + ] = "absent" + results["result"][host.name][ + "ad_state_current" + ] = "present" + results["result"][host.name][ + "msg" + ] = "Host would be joined to AD domain" else: ad_authentication = self.get_ad_auth_object(host) try: try: task = ad_authentication.JoinDomain( - domainName=domain, userName=ad_user, password=ad_password + domainName=domain, + userName=ad_user, + password=ad_password, ) wait_for_task(task) except TaskError as task_err: self.module.fail_json( - msg="Failed to join domain : %s" % to_native(task_err) + msg="Failed to join domain : %s" + % to_native(task_err) ) - changed = results['result'][host.name]['changed'] = True - results['result'][host.name]['ad_state_previous'] = "absent" - results['result'][host.name]['ad_state_current'] = "present" - results['result'][host.name]['msg'] = "Host joined to AD domain" + changed = results["result"][host.name][ + "changed" + ] = True + results["result"][host.name][ + "ad_state_previous" + ] = "absent" + results["result"][host.name][ + "ad_state_current" + ] = "present" + results["result"][host.name][ + "msg" + ] = "Host joined to AD domain" active_directory_info = self.get_ad_info(host) - results['result'][host.name]['membership_state'] = active_directory_info.domainMembershipStatus + results["result"][host.name][ + "membership_state" + ] = active_directory_info.domainMembershipStatus except vim.fault.InvalidState as invalid_state: self.module.fail_json( - msg="The host has already joined a domain : %s" % to_native(invalid_state.msg) + msg="The host has already joined a domain : %s" + % to_native(invalid_state.msg) ) except vim.fault.HostConfigFault as host_fault: self.module.fail_json( - msg="The host configuration prevents the join operation from succeeding : %s" % - to_native(host_fault.msg) + msg="The host configuration prevents the join operation from succeeding : %s" + % to_native(host_fault.msg) ) except vim.fault.InvalidLogin as invalid_login: self.module.fail_json( - msg="Credentials aren't valid : %s" % to_native(invalid_login.msg) + msg="Credentials aren't valid : %s" + % to_native(invalid_login.msg) ) except vim.fault.TaskInProgress as task_in_progress: self.module.fail_json( - msg="The ActiveDirectoryAuthentication object is busy : %s" % - to_native(task_in_progress.msg) + msg="The ActiveDirectoryAuthentication object is busy : %s" + % to_native(task_in_progress.msg) ) except vim.fault.BlockedByFirewall as blocked_by_firewall: self.module.fail_json( - msg="Ports needed by the join operation are blocked by the firewall : %s" % - to_native(blocked_by_firewall.msg) + msg="Ports needed by the join operation are blocked by the firewall : %s" + % to_native(blocked_by_firewall.msg) ) except vim.fault.DomainNotFound as not_found: self.module.fail_json( - msg="The domain controller can't be reached : %s" % to_native(not_found.msg) + msg="The domain controller can't be reached : %s" + % to_native(not_found.msg) ) except vim.fault.NoPermissionOnAD as no_permission: self.module.fail_json( - msg="The specified user has no right to add hosts to the domain : %s" % - to_native(no_permission.msg) + msg="The specified user has no right to add hosts to the domain : %s" + % to_native(no_permission.msg) ) except vim.fault.InvalidHostName as invalid_host: self.module.fail_json( - msg="The domain part of the host's FQDN doesn't match the domain being joined : %s" % - to_native(invalid_host.msg) + msg="The domain part of the host's FQDN doesn't match the domain being joined : %s" + % to_native(invalid_host.msg) ) except vim.fault.ClockSkew as clock_skew: self.module.fail_json( msg="The clocks of the host and the domain controller differ by more " - "than the allowed amount of time : %s" % to_native(clock_skew.msg) + "than the allowed amount of time : %s" + % to_native(clock_skew.msg) ) except vim.fault.ActiveDirectoryFault as ad_fault: self.module.fail_json( - msg="An error occurred during AD join : %s" % - to_native(ad_fault.msg) + msg="An error occurred during AD join : %s" + % to_native(ad_fault.msg) ) - elif desired_state == 'absent': + elif desired_state == "absent": # Don't do anything not joined to any AD domain if not active_directory_info.enabled: - results['result'][host.name]['changed'] = False - results['result'][host.name]['ad_state_current'] = "absent" - results['result'][host.name]['msg'] = "Host isn't joined to an AD domain" + results["result"][host.name]["changed"] = False + results["result"][host.name]["ad_state_current"] = "absent" + results["result"][host.name][ + "msg" + ] = "Host isn't joined to an AD domain" # Disable and leave AD domain else: if self.module.check_mode: - changed = results['result'][host.name]['changed'] = True - results['result'][host.name]['ad_state_previous'] = "present" - results['result'][host.name]['ad_state_current'] = "absent" - results['result'][host.name]['msg'] = "Host would leave the AD domain '%s'" % \ - active_directory_info.joinedDomain + changed = results["result"][host.name][ + "changed" + ] = True + results["result"][host.name][ + "ad_state_previous" + ] = "present" + results["result"][host.name][ + "ad_state_current" + ] = "absent" + results["result"][host.name]["msg"] = ( + "Host would leave the AD domain '%s'" + % active_directory_info.joinedDomain + ) else: ad_authentication = self.get_ad_auth_object(host) try: try: - task = ad_authentication.LeaveCurrentDomain(force=True) + task = ad_authentication.LeaveCurrentDomain( + force=True + ) wait_for_task(task) except TaskError as task_err: self.module.fail_json( - msg="Failed to join domain : %s" % to_native(task_err) + msg="Failed to join domain : %s" + % to_native(task_err) ) - changed = results['result'][host.name]['changed'] = True - results['result'][host.name]['ad_state_previous'] = "present" - results['result'][host.name]['ad_state_current'] = "absent" - results['result'][host.name]['msg'] = "Host left the AD domain '%s'" % \ - active_directory_info.joinedDomain + changed = results["result"][host.name][ + "changed" + ] = True + results["result"][host.name][ + "ad_state_previous" + ] = "present" + results["result"][host.name][ + "ad_state_current" + ] = "absent" + results["result"][host.name]["msg"] = ( + "Host left the AD domain '%s'" + % active_directory_info.joinedDomain + ) except vim.fault.InvalidState as invalid_state: self.module.fail_json( msg="The host is not in a domain or there are active permissions for " - "Active Directory users : %s" % to_native(invalid_state.msg) + "Active Directory users : %s" + % to_native(invalid_state.msg) ) except vim.fault.AuthMinimumAdminPermission as admin_permission: self.module.fail_json( msg="This change would leave the system with no Administrator permission " - "on the root node : %s" % to_native(admin_permission.msg) + "on the root node : %s" + % to_native(admin_permission.msg) ) except vim.fault.TaskInProgress as task_in_progress: self.module.fail_json( - msg="The ActiveDirectoryAuthentication object is busy : %s" % - to_native(task_in_progress.msg) + msg="The ActiveDirectoryAuthentication object is busy : %s" + % to_native(task_in_progress.msg) ) except vim.fault.NonADUserRequired as non_ad_user: self.module.fail_json( - msg="Only non Active Directory users can initiate the leave domain operation : %s" % - to_native(non_ad_user.msg) + msg="Only non Active Directory users can initiate the leave domain operation : %s" + % to_native(non_ad_user.msg) ) except vim.fault.ActiveDirectoryFault as ad_fault: self.module.fail_json( - msg="An error occurred during AD leave : %s" % - to_native(ad_fault.msg) + msg="An error occurred during AD leave : %s" + % to_native(ad_fault.msg) ) host_change_list.append(changed) if any(host_change_list): - results['changed'] = True + results["changed"] = True self.module.exit_json(**results) def get_ad_info(self, host_object): """Get info about AD membership""" active_directory_info = None - authentication_store_info = host_object.config.authenticationManagerInfo.authConfig + authentication_store_info = ( + host_object.config.authenticationManagerInfo.authConfig + ) for authentication_info in authentication_store_info: if isinstance(authentication_info, vim.host.ActiveDirectoryInfo): active_directory_info = authentication_info @@ -327,7 +419,9 @@ def get_ad_info(self, host_object): def get_ad_auth_object(self, host_object): """Get AD authentication managed object""" ad_authentication = None - authentication_store_info = host_object.configManager.authenticationManager.supportedStore + authentication_store_info = ( + host_object.configManager.authenticationManager.supportedStore + ) for store_info in authentication_store_info: if isinstance(store_info, vim.host.ActiveDirectoryAuthentication): ad_authentication = store_info @@ -343,28 +437,30 @@ def main(): """Main""" argument_spec = vmware_argument_spec() argument_spec.update( - ad_domain=dict(type='str', default='', aliases=['domain', 'domain_name']), - ad_user=dict(type='str', default=''), - ad_password=dict(type='str', default='', no_log=True), - ad_state=dict(default='absent', choices=['present', 'absent'], aliases=['state']), - esxi_hostname=dict(type='str', required=False), - cluster_name=dict(type='str', required=False), + ad_domain=dict( + type="str", default="", aliases=["domain", "domain_name"] + ), + ad_user=dict(type="str", default=""), + ad_password=dict(type="str", default="", no_log=True), + ad_state=dict( + default="absent", choices=["present", "absent"], aliases=["state"] + ), + esxi_hostname=dict(type="str", required=False), + cluster_name=dict(type="str", required=False), ) module = AnsibleModule( argument_spec=argument_spec, - required_one_of=[ - ['cluster_name', 'esxi_hostname'], - ], + required_one_of=[["cluster_name", "esxi_hostname"]], required_if=[ - ['ad_state', 'present', ['ad_domain', 'ad_user', 'ad_password']], + ["ad_state", "present", ["ad_domain", "ad_user", "ad_password"]] ], - supports_check_mode=True + supports_check_mode=True, ) ad_auth = VmwareHostAdAuthentication(module) ad_auth.ensure() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_host_auto_start.py b/plugins/modules/vmware_host_auto_start.py index ccd47bd..8437c18 100644 --- a/plugins/modules/vmware_host_auto_start.py +++ b/plugins/modules/vmware_host_auto_start.py @@ -5,15 +5,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = ''' +DOCUMENTATION = """ module: vmware_host_auto_start short_description: Manage the auto power ON or OFF for vm on ESXi host author: @@ -134,9 +135,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = ''' +EXAMPLES = """ --- - name: Update for system defaults config. vmware_host_auto_start: @@ -164,9 +165,9 @@ start_order: 1 stop_action: powerOff wait_for_heartbeat: yes -''' +""" -RETURN = ''' +RETURN = """ system_defaults_config: description: Parameter return when system defaults config is changed. returned: changed @@ -192,7 +193,7 @@ "stop_delay": -1, "wait_for_heartbeat": "systemDefault" } -''' +""" try: from pyVmomi import vim, vmodl @@ -208,83 +209,108 @@ pass from ansible.module_utils._text import to_native -from ansible_collections.vmware.general.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + vmware_argument_spec, +) from ansible.module_utils.basic import AnsibleModule class VMwareHostAutoStartManager(PyVmomi): def __init__(self, module): super(VMwareHostAutoStartManager, self).__init__(module) - self.esxi_hostname = self.params['esxi_hostname'] - self.name = self.params['name'] - self.uuid = self.params['uuid'] - self.moid = self.params['moid'] - self.system_defaults = self.params['system_defaults'] - self.power_info = self.params['power_info'] + self.esxi_hostname = self.params["esxi_hostname"] + self.name = self.params["name"] + self.uuid = self.params["uuid"] + self.moid = self.params["moid"] + self.system_defaults = self.params["system_defaults"] + self.power_info = self.params["power_info"] def generate_system_defaults_config(self): system_defaults_config = vim.host.AutoStartManager.SystemDefaults() - system_defaults_config.enabled = self.system_defaults['enabled'] - system_defaults_config.startDelay = self.system_defaults['start_delay'] - system_defaults_config.stopAction = self.system_defaults['stop_action'] - system_defaults_config.stopDelay = self.system_defaults['stop_delay'] - system_defaults_config.waitForHeartbeat = self.system_defaults['wait_for_heartbeat'] + system_defaults_config.enabled = self.system_defaults["enabled"] + system_defaults_config.startDelay = self.system_defaults["start_delay"] + system_defaults_config.stopAction = self.system_defaults["stop_action"] + system_defaults_config.stopDelay = self.system_defaults["stop_delay"] + system_defaults_config.waitForHeartbeat = self.system_defaults[ + "wait_for_heartbeat" + ] return system_defaults_config def generate_power_info_config(self): power_info_config = vim.host.AutoStartManager.AutoPowerInfo() power_info_config.key = self.vm_obj - power_info_config.startAction = self.power_info['start_action'] - power_info_config.startDelay = self.power_info['start_delay'] - power_info_config.startOrder = self.power_info['start_order'] - power_info_config.stopAction = self.power_info['stop_action'] - power_info_config.stopDelay = self.power_info['stop_delay'] - power_info_config.waitForHeartbeat = self.power_info['wait_for_heartbeat'] + power_info_config.startAction = self.power_info["start_action"] + power_info_config.startDelay = self.power_info["start_delay"] + power_info_config.startOrder = self.power_info["start_order"] + power_info_config.stopAction = self.power_info["stop_action"] + power_info_config.stopDelay = self.power_info["stop_delay"] + power_info_config.waitForHeartbeat = self.power_info[ + "wait_for_heartbeat" + ] return power_info_config def execute(self): - result = dict(changed=False, diff={'before': {}, 'after': {}}) + result = dict(changed=False, diff={"before": {}, "after": {}}) host_obj = self.find_hostsystem_by_name(self.esxi_hostname) if not host_obj: - self.module.fail_json(msg="Cannot find the specified ESXi host: %s" % self.esxi_hostname) + self.module.fail_json( + msg="Cannot find the specified ESXi host: %s" + % self.esxi_hostname + ) self.vm_obj = None if self.name or self.uuid or self.moid: self.vm_obj = self.get_vm() if not self.vm_obj: - self.module.fail_json(msg="Cannot find the specified VM: %s" % (self.name or self.uuid or self.moid)) + self.module.fail_json( + msg="Cannot find the specified VM: %s" + % (self.name or self.uuid or self.moid) + ) elif self.esxi_hostname != self.vm_obj.runtime.host.name: - self.module.fail_json(msg="%s exists on another host: %s" % (self.name or self.uuid or self.moid, self.vm_obj.runtime.host.name)) + self.module.fail_json( + msg="%s exists on another host: %s" + % ( + self.name or self.uuid or self.moid, + self.vm_obj.runtime.host.name, + ) + ) # Check the existing autoStart setting difference. system_defaults_config_difference = False - existing_system_defaults = self.to_json(host_obj.config.autoStart.defaults) + existing_system_defaults = self.to_json( + host_obj.config.autoStart.defaults + ) system_defaults_for_compare = dict( - enabled=existing_system_defaults['enabled'], - start_delay=existing_system_defaults['startDelay'], - stop_action=existing_system_defaults['stopAction'], - stop_delay=existing_system_defaults['stopDelay'], - wait_for_heartbeat=existing_system_defaults['waitForHeartbeat'] + enabled=existing_system_defaults["enabled"], + start_delay=existing_system_defaults["startDelay"], + stop_action=existing_system_defaults["stopAction"], + stop_delay=existing_system_defaults["stopDelay"], + wait_for_heartbeat=existing_system_defaults["waitForHeartbeat"], ) if self.system_defaults: - if 'guestshutdown' == system_defaults_for_compare['stop_action']: - system_defaults_for_compare['stop_action'] = 'guestShutdown' + if "guestshutdown" == system_defaults_for_compare["stop_action"]: + system_defaults_for_compare["stop_action"] = "guestShutdown" - if 'poweroff' == system_defaults_for_compare['stop_action']: - system_defaults_for_compare['stop_action'] = 'powerOff' + if "poweroff" == system_defaults_for_compare["stop_action"]: + system_defaults_for_compare["stop_action"] = "powerOff" if system_defaults_for_compare != self.system_defaults: - result['diff']['before']['system_defaults'] = OrderedDict(sorted(system_defaults_for_compare.items())) - result['diff']['after']['system_defaults'] = OrderedDict(sorted(self.system_defaults.items())) + result["diff"]["before"]["system_defaults"] = OrderedDict( + sorted(system_defaults_for_compare.items()) + ) + result["diff"]["after"]["system_defaults"] = OrderedDict( + sorted(self.system_defaults.items()) + ) system_defaults_config_difference = True # Check the existing autoStart powerInfo setting difference for VM. vm_power_info_config_difference = False existing_vm_power_info = {} - if system_defaults_for_compare['enabled'] and self.vm_obj: + if system_defaults_for_compare["enabled"] and self.vm_obj: for vm_power_info in host_obj.config.autoStart.powerInfo: if vm_power_info.key == self.vm_obj: existing_vm_power_info = self.to_json(vm_power_info) @@ -292,46 +318,61 @@ def execute(self): if existing_vm_power_info: vm_power_info_for_compare = dict( - start_action=existing_vm_power_info['startAction'], - start_delay=existing_vm_power_info['startDelay'], - start_order=existing_vm_power_info['startOrder'], - stop_action=existing_vm_power_info['stopAction'], - stop_delay=existing_vm_power_info['stopDelay'], - wait_for_heartbeat=existing_vm_power_info['waitForHeartbeat'] + start_action=existing_vm_power_info["startAction"], + start_delay=existing_vm_power_info["startDelay"], + start_order=existing_vm_power_info["startOrder"], + stop_action=existing_vm_power_info["stopAction"], + stop_delay=existing_vm_power_info["stopDelay"], + wait_for_heartbeat=existing_vm_power_info[ + "waitForHeartbeat" + ], ) else: vm_power_info_for_compare = dict( - start_action='none', + start_action="none", start_delay=-1, start_order=-1, - stop_action='systemDefault', + stop_action="systemDefault", stop_delay=-1, - wait_for_heartbeat='systemDefault' + wait_for_heartbeat="systemDefault", ) if vm_power_info_for_compare != self.power_info: - result['diff']['before']['power_info'] = OrderedDict(sorted(vm_power_info_for_compare.items())) - result['diff']['after']['power_info'] = OrderedDict(sorted(self.power_info.items())) + result["diff"]["before"]["power_info"] = OrderedDict( + sorted(vm_power_info_for_compare.items()) + ) + result["diff"]["after"]["power_info"] = OrderedDict( + sorted(self.power_info.items()) + ) vm_power_info_config_difference = True auto_start_manager_config = vim.host.AutoStartManager.Config() auto_start_manager_config.powerInfo = [] - if system_defaults_config_difference or vm_power_info_config_difference: + if ( + system_defaults_config_difference + or vm_power_info_config_difference + ): if system_defaults_config_difference: - auto_start_manager_config.defaults = self.generate_system_defaults_config() - result['system_defaults_config'] = self.system_defaults + auto_start_manager_config.defaults = ( + self.generate_system_defaults_config() + ) + result["system_defaults_config"] = self.system_defaults if vm_power_info_config_difference: - auto_start_manager_config.powerInfo = [self.generate_power_info_config()] - result['power_info_config'] = self.power_info + auto_start_manager_config.powerInfo = [ + self.generate_power_info_config() + ] + result["power_info_config"] = self.power_info if self.module.check_mode: - result['changed'] = True + result["changed"] = True self.module.exit_json(**result) try: - host_obj.configManager.autoStartManager.ReconfigureAutostart(spec=auto_start_manager_config) - result['changed'] = True + host_obj.configManager.autoStartManager.ReconfigureAutostart( + spec=auto_start_manager_config + ) + result["changed"] = True self.module.exit_json(**result) except Exception as e: self.module.fail_json(msg=to_native(e)) @@ -343,42 +384,60 @@ def execute(self): def main(): argument_spec = vmware_argument_spec() - argument_spec.update(esxi_hostname=dict(type='str', required=True), - name=dict(type='str'), - uuid=dict(type='str'), - use_instance_uuid=dict(type='bool', default=False), - moid=dict(type='str'), - system_defaults=dict(type='dict', - options=dict( - enabled=dict(type='bool', default=False), - start_delay=dict(type='int', default=120), - stop_action=dict(type='str', choices=['none', 'guestShutdown', - 'powerOff', 'suspend'], - default='powerOff'), - stop_delay=dict(type='int', default=120), - wait_for_heartbeat=dict(type='bool', default=False)), - ), - power_info=dict(type='dict', - options=dict( - start_action=dict(type='str', choices=['none', 'powerOn'], default='none'), - start_delay=dict(type='int', default=-1), - start_order=dict(type='int', default=-1), - stop_action=dict(type='str', choices=['none', 'systemDefault', 'powerOff', - 'suspend'], default='systemDefault'), - stop_delay=dict(type='int', default=-1), - wait_for_heartbeat=dict(type='str', choices=['no', 'yes', 'systemDefault'], - default='systemDefault')), - default=dict( - start_action='none', - start_delay=-1, - start_order=-1, - stop_action='systemDefault', - stop_delay=-1, - wait_for_heartbeat='systemDefault' - )) - ) - - module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + argument_spec.update( + esxi_hostname=dict(type="str", required=True), + name=dict(type="str"), + uuid=dict(type="str"), + use_instance_uuid=dict(type="bool", default=False), + moid=dict(type="str"), + system_defaults=dict( + type="dict", + options=dict( + enabled=dict(type="bool", default=False), + start_delay=dict(type="int", default=120), + stop_action=dict( + type="str", + choices=["none", "guestShutdown", "powerOff", "suspend"], + default="powerOff", + ), + stop_delay=dict(type="int", default=120), + wait_for_heartbeat=dict(type="bool", default=False), + ), + ), + power_info=dict( + type="dict", + options=dict( + start_action=dict( + type="str", choices=["none", "powerOn"], default="none" + ), + start_delay=dict(type="int", default=-1), + start_order=dict(type="int", default=-1), + stop_action=dict( + type="str", + choices=["none", "systemDefault", "powerOff", "suspend"], + default="systemDefault", + ), + stop_delay=dict(type="int", default=-1), + wait_for_heartbeat=dict( + type="str", + choices=["no", "yes", "systemDefault"], + default="systemDefault", + ), + ), + default=dict( + start_action="none", + start_delay=-1, + start_order=-1, + stop_action="systemDefault", + stop_delay=-1, + wait_for_heartbeat="systemDefault", + ), + ), + ) + + module = AnsibleModule( + argument_spec=argument_spec, supports_check_mode=True + ) vmware_host_auto_start = VMwareHostAutoStartManager(module) vmware_host_auto_start.execute() diff --git a/plugins/modules/vmware_host_capability_info.py b/plugins/modules/vmware_host_capability_info.py index 0d4d437..cebf482 100644 --- a/plugins/modules/vmware_host_capability_info.py +++ b/plugins/modules/vmware_host_capability_info.py @@ -5,15 +5,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_host_capability_info short_description: Gathers info about an ESXi host's capability information @@ -40,9 +41,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather capability info about all ESXi Host in given Cluster vmware_host_capability_info: hostname: '{{ vcenter_hostname }}' @@ -60,9 +61,9 @@ esxi_hostname: '{{ esxi_hostname }}' delegate_to: localhost register: hosts_info -''' +""" -RETURN = r''' +RETURN = r""" hosts_capability_info: description: metadata about host's capability info returned: always @@ -77,18 +78,23 @@ "cpuHwMmuSupported": true, } } -''' +""" from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + vmware_argument_spec, + PyVmomi, +) class CapabilityInfoManager(PyVmomi): def __init__(self, module): super(CapabilityInfoManager, self).__init__(module) - cluster_name = self.params.get('cluster_name', None) - esxi_host_name = self.params.get('esxi_hostname', None) - self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name) + cluster_name = self.params.get("cluster_name", None) + esxi_host_name = self.params.get("esxi_hostname", None) + self.hosts = self.get_all_host_objs( + cluster_name=cluster_name, esxi_host_name=esxi_host_name + ) def gather_host_capability_info(self): hosts_capability_info = dict() @@ -189,12 +195,24 @@ def gather_host_capability_info(self): encryptionVFlashSupported=hc.encryptionVFlashSupported, encryptionCBRCSupported=hc.encryptionCBRCSupported, encryptionHBRSupported=hc.encryptionHBRSupported, - supportedVmfsMajorVersion=[version for version in hc.supportedVmfsMajorVersion], - vmDirectPathGen2UnsupportedReason=[reason for reason in hc.vmDirectPathGen2UnsupportedReason], - ftCompatibilityIssues=[issue for issue in hc.ftCompatibilityIssues], - checkpointFtCompatibilityIssues=[issue for issue in hc.checkpointFtCompatibilityIssues], - smpFtCompatibilityIssues=[issue for issue in hc.smpFtCompatibilityIssues], - replayCompatibilityIssues=[issue for issue in hc.replayCompatibilityIssues], + supportedVmfsMajorVersion=[ + version for version in hc.supportedVmfsMajorVersion + ], + vmDirectPathGen2UnsupportedReason=[ + reason for reason in hc.vmDirectPathGen2UnsupportedReason + ], + ftCompatibilityIssues=[ + issue for issue in hc.ftCompatibilityIssues + ], + checkpointFtCompatibilityIssues=[ + issue for issue in hc.checkpointFtCompatibilityIssues + ], + smpFtCompatibilityIssues=[ + issue for issue in hc.smpFtCompatibilityIssues + ], + replayCompatibilityIssues=[ + issue for issue in hc.replayCompatibilityIssues + ], ) return hosts_capability_info @@ -202,21 +220,21 @@ def gather_host_capability_info(self): def main(): argument_spec = vmware_argument_spec() argument_spec.update( - cluster_name=dict(type='str', required=False), - esxi_hostname=dict(type='str', required=False), + cluster_name=dict(type="str", required=False), + esxi_hostname=dict(type="str", required=False), ) module = AnsibleModule( argument_spec=argument_spec, - required_one_of=[ - ['cluster_name', 'esxi_hostname'], - ], + required_one_of=[["cluster_name", "esxi_hostname"]], supports_check_mode=True, ) host_capability_manager = CapabilityInfoManager(module) - module.exit_json(changed=False, - hosts_capability_info=host_capability_manager.gather_host_capability_info()) + module.exit_json( + changed=False, + hosts_capability_info=host_capability_manager.gather_host_capability_info(), + ) if __name__ == "__main__": diff --git a/plugins/modules/vmware_host_config_info.py b/plugins/modules/vmware_host_config_info.py index cefbc7a..88750f5 100644 --- a/plugins/modules/vmware_host_config_info.py +++ b/plugins/modules/vmware_host_config_info.py @@ -5,15 +5,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_host_config_info short_description: Gathers info about an ESXi host's advance configuration information @@ -40,9 +41,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather info about all ESXi Host in given Cluster vmware_host_config_info: hostname: '{{ vcenter_hostname }}' @@ -58,9 +59,9 @@ password: '{{ vcenter_password }}' esxi_hostname: '{{ esxi_hostname }}' delegate_to: localhost -''' +""" -RETURN = r''' +RETURN = r""" hosts_info: description: - dict with hostname as key and dict with host config information @@ -75,18 +76,23 @@ "BufferCache.SoftMaxDirty": 15, } } -''' +""" from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + vmware_argument_spec, + PyVmomi, +) class VmwareConfigInfoManager(PyVmomi): def __init__(self, module): super(VmwareConfigInfoManager, self).__init__(module) - cluster_name = self.params.get('cluster_name', None) - esxi_host_name = self.params.get('esxi_hostname', None) - self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name) + cluster_name = self.params.get("cluster_name", None) + esxi_host_name = self.params.get("esxi_hostname", None) + self.hosts = self.get_all_host_objs( + cluster_name=cluster_name, esxi_host_name=esxi_host_name + ) def gather_host_info(self): hosts_info = {} @@ -101,20 +107,20 @@ def gather_host_info(self): def main(): argument_spec = vmware_argument_spec() argument_spec.update( - cluster_name=dict(type='str', required=False), - esxi_hostname=dict(type='str', required=False), + cluster_name=dict(type="str", required=False), + esxi_hostname=dict(type="str", required=False), ) module = AnsibleModule( argument_spec=argument_spec, - required_one_of=[ - ['cluster_name', 'esxi_hostname'], - ], - supports_check_mode=True + required_one_of=[["cluster_name", "esxi_hostname"]], + supports_check_mode=True, ) vmware_host_config = VmwareConfigInfoManager(module) - module.exit_json(changed=False, hosts_info=vmware_host_config.gather_host_info()) + module.exit_json( + changed=False, hosts_info=vmware_host_config.gather_host_info() + ) if __name__ == "__main__": diff --git a/plugins/modules/vmware_host_config_manager.py b/plugins/modules/vmware_host_config_manager.py index 50cce37..97a71ab 100644 --- a/plugins/modules/vmware_host_config_manager.py +++ b/plugins/modules/vmware_host_config_manager.py @@ -5,15 +5,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_host_config_manager short_description: Manage advanced system settings of an ESXi host @@ -49,9 +50,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Manage Log level setting for all ESXi hosts in given Cluster vmware_host_config_manager: hostname: '{{ vcenter_hostname }}' @@ -83,10 +84,10 @@ 'Annotations.WelcomeMessage': 'Hello World' 'Config.HostAgent.plugins.solo.enableMob': false delegate_to: localhost -''' +""" -RETURN = r'''# -''' +RETURN = r"""# +""" try: from pyVmomi import vim, vmodl, VmomiSupport @@ -94,7 +95,13 @@ pass from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi, is_boolean, is_integer, is_truthy +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + vmware_argument_spec, + PyVmomi, + is_boolean, + is_integer, + is_truthy, +) from ansible.module_utils._text import to_native from ansible.module_utils.six import integer_types, string_types @@ -102,75 +109,119 @@ class VmwareConfigManager(PyVmomi): def __init__(self, module): super(VmwareConfigManager, self).__init__(module) - cluster_name = self.params.get('cluster_name', None) - esxi_host_name = self.params.get('esxi_hostname', None) - self.options = self.params.get('options', dict()) - self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name) + cluster_name = self.params.get("cluster_name", None) + esxi_host_name = self.params.get("esxi_hostname", None) + self.options = self.params.get("options", dict()) + self.hosts = self.get_all_host_objs( + cluster_name=cluster_name, esxi_host_name=esxi_host_name + ) def set_host_configuration_facts(self): changed_list = [] - message = '' + message = "" for host in self.hosts: option_manager = host.configManager.advancedOption host_facts = {} for s_option in option_manager.supportedOption: - host_facts[s_option.key] = dict(option_type=s_option.optionType, value=None) + host_facts[s_option.key] = dict( + option_type=s_option.optionType, value=None + ) for option in option_manager.QueryOptions(): if option.key in host_facts: - host_facts[option.key].update( - value=option.value, - ) + host_facts[option.key].update(value=option.value) change_option_list = [] for option_key, option_value in self.options.items(): if option_key in host_facts: # We handle all supported types here so we can give meaningful errors. - option_type = host_facts[option_key]['option_type'] - if is_boolean(option_value) and isinstance(option_type, vim.option.BoolOption): + option_type = host_facts[option_key]["option_type"] + if is_boolean(option_value) and isinstance( + option_type, vim.option.BoolOption + ): option_value = is_truthy(option_value) - elif (isinstance(option_value, integer_types) or is_integer(option_value))\ - and isinstance(option_type, vim.option.IntOption): - option_value = VmomiSupport.vmodlTypes['int'](option_value) - elif (isinstance(option_value, integer_types) or is_integer(option_value, 'long'))\ - and isinstance(option_type, vim.option.LongOption): - option_value = VmomiSupport.vmodlTypes['long'](option_value) - elif isinstance(option_value, float) and isinstance(option_type, vim.option.FloatOption): + elif ( + isinstance(option_value, integer_types) + or is_integer(option_value) + ) and isinstance(option_type, vim.option.IntOption): + option_value = VmomiSupport.vmodlTypes["int"]( + option_value + ) + elif ( + isinstance(option_value, integer_types) + or is_integer(option_value, "long") + ) and isinstance(option_type, vim.option.LongOption): + option_value = VmomiSupport.vmodlTypes["long"]( + option_value + ) + elif isinstance(option_value, float) and isinstance( + option_type, vim.option.FloatOption + ): pass - elif isinstance(option_value, string_types) and isinstance(option_type, (vim.option.StringOption, vim.option.ChoiceOption)): + elif isinstance(option_value, string_types) and isinstance( + option_type, + (vim.option.StringOption, vim.option.ChoiceOption), + ): pass else: - self.module.fail_json(msg="Provided value is of type %s." - " Option %s expects: %s" % (type(option_value), option_key, type(option_type))) - - if option_value != host_facts[option_key]['value']: - change_option_list.append(vim.option.OptionValue(key=option_key, value=option_value)) + self.module.fail_json( + msg="Provided value is of type %s." + " Option %s expects: %s" + % ( + type(option_value), + option_key, + type(option_type), + ) + ) + + if option_value != host_facts[option_key]["value"]: + change_option_list.append( + vim.option.OptionValue( + key=option_key, value=option_value + ) + ) changed_list.append(option_key) else: # Don't silently drop unknown options. This prevents typos from falling through the cracks. - self.module.fail_json(msg="Unsupported option %s" % option_key) + self.module.fail_json( + msg="Unsupported option %s" % option_key + ) if change_option_list: if self.module.check_mode: - changed_suffix = ' would be changed.' + changed_suffix = " would be changed." else: - changed_suffix = ' changed.' + changed_suffix = " changed." if len(changed_list) > 2: - message = ', '.join(changed_list[:-1]) + ', and ' + str(changed_list[-1]) + message = ( + ", ".join(changed_list[:-1]) + + ", and " + + str(changed_list[-1]) + ) elif len(changed_list) == 2: - message = ' and '.join(changed_list) + message = " and ".join(changed_list) elif len(changed_list) == 1: message = changed_list[0] message += changed_suffix if self.module.check_mode is False: try: - option_manager.UpdateOptions(changedValue=change_option_list) - except (vmodl.fault.SystemError, vmodl.fault.InvalidArgument) as e: - self.module.fail_json(msg="Failed to update option/s as one or more OptionValue " - "contains an invalid value: %s" % to_native(e.msg)) + option_manager.UpdateOptions( + changedValue=change_option_list + ) + except ( + vmodl.fault.SystemError, + vmodl.fault.InvalidArgument, + ) as e: + self.module.fail_json( + msg="Failed to update option/s as one or more OptionValue " + "contains an invalid value: %s" % to_native(e.msg) + ) except vim.fault.InvalidName as e: - self.module.fail_json(msg="Failed to update option/s as one or more OptionValue " - "objects refers to a non-existent option : %s" % to_native(e.msg)) + self.module.fail_json( + msg="Failed to update option/s as one or more OptionValue " + "objects refers to a non-existent option : %s" + % to_native(e.msg) + ) else: - message = 'All settings are already configured.' + message = "All settings are already configured." self.module.exit_json(changed=bool(changed_list), msg=message) @@ -178,17 +229,15 @@ def set_host_configuration_facts(self): def main(): argument_spec = vmware_argument_spec() argument_spec.update( - cluster_name=dict(type='str', required=False), - esxi_hostname=dict(type='str', required=False), - options=dict(type='dict', default=dict(), required=False), + cluster_name=dict(type="str", required=False), + esxi_hostname=dict(type="str", required=False), + options=dict(type="dict", default=dict(), required=False), ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, - required_one_of=[ - ['cluster_name', 'esxi_hostname'], - ] + required_one_of=[["cluster_name", "esxi_hostname"]], ) vmware_host_config = VmwareConfigManager(module) diff --git a/plugins/modules/vmware_host_datastore.py b/plugins/modules/vmware_host_datastore.py index 4ae708d..2e83421 100644 --- a/plugins/modules/vmware_host_datastore.py +++ b/plugins/modules/vmware_host_datastore.py @@ -5,15 +5,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_host_datastore short_description: Manage a datastore on ESXi host @@ -94,9 +95,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Mount VMFS datastores to ESXi vmware_host_datastore: hostname: '{{ vcenter_hostname }}' @@ -152,10 +153,10 @@ datastore_name: NasDS_vol01 state: absent delegate_to: localhost -''' +""" -RETURN = r''' -''' +RETURN = r""" +""" try: from pyVmomi import vim, vmodl @@ -163,7 +164,12 @@ pass from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi, find_datastore_by_name, find_obj +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + vmware_argument_spec, + PyVmomi, + find_datastore_by_name, + find_obj, +) from ansible.module_utils._text import to_native @@ -172,36 +178,40 @@ def __init__(self, module): super(VMwareHostDatastore, self).__init__(module) # NOTE: The below parameter is deprecated starting from Ansible v2.11 - self.datacenter_name = module.params['datacenter_name'] - self.datastore_name = module.params['datastore_name'] - self.datastore_type = module.params['datastore_type'] - self.nfs_server = module.params['nfs_server'] - self.nfs_path = module.params['nfs_path'] - self.nfs_ro = module.params['nfs_ro'] - self.vmfs_device_name = module.params['vmfs_device_name'] - self.vmfs_version = module.params['vmfs_version'] - self.esxi_hostname = module.params['esxi_hostname'] - self.state = module.params['state'] + self.datacenter_name = module.params["datacenter_name"] + self.datastore_name = module.params["datastore_name"] + self.datastore_type = module.params["datastore_type"] + self.nfs_server = module.params["nfs_server"] + self.nfs_path = module.params["nfs_path"] + self.nfs_ro = module.params["nfs_ro"] + self.vmfs_device_name = module.params["vmfs_device_name"] + self.vmfs_version = module.params["vmfs_version"] + self.esxi_hostname = module.params["esxi_hostname"] + self.state = module.params["state"] if self.is_vcenter(): if not self.esxi_hostname: - self.module.fail_json(msg="esxi_hostname is mandatory with a vcenter") + self.module.fail_json( + msg="esxi_hostname is mandatory with a vcenter" + ) self.esxi = self.find_hostsystem_by_name(self.esxi_hostname) if self.esxi is None: - self.module.fail_json(msg="Failed to find ESXi hostname %s" % self.esxi_hostname) + self.module.fail_json( + msg="Failed to find ESXi hostname %s" % self.esxi_hostname + ) else: self.esxi = find_obj(self.content, [vim.HostSystem], None) def process_state(self): ds_states = { - 'absent': { - 'present': self.umount_datastore_host, - 'absent': self.state_exit_unchanged, + "absent": { + "present": self.umount_datastore_host, + "absent": self.state_exit_unchanged, + }, + "present": { + "present": self.state_exit_unchanged, + "absent": self.mount_datastore_host, }, - 'present': { - 'present': self.state_exit_unchanged, - 'absent': self.mount_datastore_host, - } } try: ds_states[self.state][self.check_datastore_host_state()]() @@ -215,17 +225,21 @@ def state_exit_unchanged(self): def check_datastore_host_state(self): storage_system = self.esxi.configManager.storageSystem - host_file_sys_vol_mount_info = storage_system.fileSystemVolumeInfo.mountInfo + host_file_sys_vol_mount_info = ( + storage_system.fileSystemVolumeInfo.mountInfo + ) for host_mount_info in host_file_sys_vol_mount_info: if host_mount_info.volume.name == self.datastore_name: - return 'present' - return 'absent' + return "present" + return "absent" def get_used_disks_names(self): used_disks = [] storage_system = self.esxi.configManager.storageSystem - for each_vol_mount_info in storage_system.fileSystemVolumeInfo.mountInfo: - if hasattr(each_vol_mount_info.volume, 'extent'): + for ( + each_vol_mount_info + ) in storage_system.fileSystemVolumeInfo.mountInfo: + if hasattr(each_vol_mount_info.volume, "extent"): for each_partition in each_vol_mount_info.volume.extent: used_disks.append(each_partition.diskName) return used_disks @@ -233,32 +247,49 @@ def get_used_disks_names(self): def umount_datastore_host(self): ds = find_datastore_by_name(self.content, self.datastore_name) if not ds: - self.module.fail_json(msg="No datastore found with name %s" % self.datastore_name) + self.module.fail_json( + msg="No datastore found with name %s" % self.datastore_name + ) if self.module.check_mode is False: - error_message_umount = "Cannot umount datastore %s from host %s" % (self.datastore_name, self.esxi.name) + error_message_umount = ( + "Cannot umount datastore %s from host %s" + % (self.datastore_name, self.esxi.name) + ) try: self.esxi.configManager.datastoreSystem.RemoveDatastore(ds) - except (vim.fault.NotFound, vim.fault.HostConfigFault, vim.fault.ResourceInUse) as fault: - self.module.fail_json(msg="%s: %s" % (error_message_umount, to_native(fault.msg))) + except ( + vim.fault.NotFound, + vim.fault.HostConfigFault, + vim.fault.ResourceInUse, + ) as fault: + self.module.fail_json( + msg="%s: %s" % (error_message_umount, to_native(fault.msg)) + ) except Exception as e: - self.module.fail_json(msg="%s: %s" % (error_message_umount, to_native(e))) - self.module.exit_json(changed=True, result="Datastore %s on host %s" % (self.datastore_name, self.esxi.name)) + self.module.fail_json( + msg="%s: %s" % (error_message_umount, to_native(e)) + ) + self.module.exit_json( + changed=True, + result="Datastore %s on host %s" + % (self.datastore_name, self.esxi.name), + ) def mount_datastore_host(self): - if self.datastore_type == 'nfs' or self.datastore_type == 'nfs41': + if self.datastore_type == "nfs" or self.datastore_type == "nfs41": self.mount_nfs_datastore_host() - if self.datastore_type == 'vmfs': + if self.datastore_type == "vmfs": self.mount_vmfs_datastore_host() def mount_nfs_datastore_host(self): if self.module.check_mode is False: mnt_specs = vim.host.NasVolume.Specification() # NFS v3 - if self.datastore_type == 'nfs': + if self.datastore_type == "nfs": mnt_specs.type = "NFS" mnt_specs.remoteHost = self.nfs_server # NFS v4.1 - if self.datastore_type == 'nfs41': + if self.datastore_type == "nfs41": mnt_specs.type = "NFS41" # remoteHost needs to be set to a non-empty string, but the value is not used mnt_specs.remoteHost = "something" @@ -269,19 +300,37 @@ def mount_nfs_datastore_host(self): mnt_specs.accessMode = "readOnly" else: mnt_specs.accessMode = "readWrite" - error_message_mount = "Cannot mount datastore %s on host %s" % (self.datastore_name, self.esxi.name) + error_message_mount = "Cannot mount datastore %s on host %s" % ( + self.datastore_name, + self.esxi.name, + ) try: - ds = self.esxi.configManager.datastoreSystem.CreateNasDatastore(mnt_specs) + ds = self.esxi.configManager.datastoreSystem.CreateNasDatastore( + mnt_specs + ) if not ds: self.module.fail_json(msg=error_message_mount) - except (vim.fault.NotFound, vim.fault.DuplicateName, - vim.fault.AlreadyExists, vim.fault.HostConfigFault, - vmodl.fault.InvalidArgument, vim.fault.NoVirtualNic, - vim.fault.NoGateway) as fault: - self.module.fail_json(msg="%s: %s" % (error_message_mount, to_native(fault.msg))) + except ( + vim.fault.NotFound, + vim.fault.DuplicateName, + vim.fault.AlreadyExists, + vim.fault.HostConfigFault, + vmodl.fault.InvalidArgument, + vim.fault.NoVirtualNic, + vim.fault.NoGateway, + ) as fault: + self.module.fail_json( + msg="%s: %s" % (error_message_mount, to_native(fault.msg)) + ) except Exception as e: - self.module.fail_json(msg="%s : %s" % (error_message_mount, to_native(e))) - self.module.exit_json(changed=True, result="Datastore %s on host %s" % (self.datastore_name, self.esxi.name)) + self.module.fail_json( + msg="%s : %s" % (error_message_mount, to_native(e)) + ) + self.module.exit_json( + changed=True, + result="Datastore %s on host %s" + % (self.datastore_name, self.esxi.name), + ) def mount_vmfs_datastore_host(self): if self.module.check_mode is False: @@ -289,58 +338,87 @@ def mount_vmfs_datastore_host(self): host_ds_system = self.esxi.configManager.datastoreSystem ds_system = vim.host.DatastoreSystem if self.vmfs_device_name in self.get_used_disks_names(): - error_message_used_disk = "VMFS disk %s already in use" % self.vmfs_device_name + error_message_used_disk = ( + "VMFS disk %s already in use" % self.vmfs_device_name + ) self.module.fail_json(msg="%s" % error_message_used_disk) - error_message_mount = "Cannot mount datastore %s on host %s" % (self.datastore_name, self.esxi.name) + error_message_mount = "Cannot mount datastore %s on host %s" % ( + self.datastore_name, + self.esxi.name, + ) try: - vmfs_ds_options = ds_system.QueryVmfsDatastoreCreateOptions(host_ds_system, - ds_path, - self.vmfs_version) + vmfs_ds_options = ds_system.QueryVmfsDatastoreCreateOptions( + host_ds_system, ds_path, self.vmfs_version + ) vmfs_ds_options[0].spec.vmfs.volumeName = self.datastore_name - ds = ds_system.CreateVmfsDatastore(host_ds_system, - vmfs_ds_options[0].spec) - except (vim.fault.NotFound, vim.fault.DuplicateName, - vim.fault.HostConfigFault, vmodl.fault.InvalidArgument) as fault: - self.module.fail_json(msg="%s : %s" % (error_message_mount, to_native(fault.msg))) + ds = ds_system.CreateVmfsDatastore( + host_ds_system, vmfs_ds_options[0].spec + ) + except ( + vim.fault.NotFound, + vim.fault.DuplicateName, + vim.fault.HostConfigFault, + vmodl.fault.InvalidArgument, + ) as fault: + self.module.fail_json( + msg="%s : %s" % (error_message_mount, to_native(fault.msg)) + ) except Exception as e: - self.module.fail_json(msg="%s : %s" % (error_message_mount, to_native(e))) - self.module.exit_json(changed=True, result="Datastore %s on host %s" % (self.datastore_name, self.esxi.name)) + self.module.fail_json( + msg="%s : %s" % (error_message_mount, to_native(e)) + ) + self.module.exit_json( + changed=True, + result="Datastore %s on host %s" + % (self.datastore_name, self.esxi.name), + ) def main(): argument_spec = vmware_argument_spec() argument_spec.update( - datacenter_name=dict(type='str', required=False, removed_in_version=2.11), - datastore_name=dict(type='str', required=True), - datastore_type=dict(type='str', choices=['nfs', 'nfs41', 'vmfs']), - nfs_server=dict(type='str'), - nfs_path=dict(type='str'), - nfs_ro=dict(type='bool', default=False), - vmfs_device_name=dict(type='str'), - vmfs_version=dict(type='int'), - esxi_hostname=dict(type='str', required=False), - state=dict(type='str', default='present', choices=['absent', 'present']) + datacenter_name=dict( + type="str", required=False, removed_in_version=2.11 + ), + datastore_name=dict(type="str", required=True), + datastore_type=dict(type="str", choices=["nfs", "nfs41", "vmfs"]), + nfs_server=dict(type="str"), + nfs_path=dict(type="str"), + nfs_ro=dict(type="bool", default=False), + vmfs_device_name=dict(type="str"), + vmfs_version=dict(type="int"), + esxi_hostname=dict(type="str", required=False), + state=dict( + type="str", default="present", choices=["absent", "present"] + ), ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, - required_together=[ - ['nfs_server', 'nfs_path'] - ], + required_together=[["nfs_server", "nfs_path"]], ) # more complex required_if - if module.params['state'] == 'present': - if module.params['datastore_type'] == 'nfs' and not module.params['nfs_server']: + if module.params["state"] == "present": + if ( + module.params["datastore_type"] == "nfs" + and not module.params["nfs_server"] + ): msg = "Missing nfs_server with datastore_type = nfs" module.fail_json(msg=msg) - if module.params['datastore_type'] == 'nfs41' and not module.params['nfs_server']: + if ( + module.params["datastore_type"] == "nfs41" + and not module.params["nfs_server"] + ): msg = "Missing nfs_server with datastore_type = nfs41" module.fail_json(msg=msg) - if module.params['datastore_type'] == 'vmfs' and not module.params['vmfs_device_name']: + if ( + module.params["datastore_type"] == "vmfs" + and not module.params["vmfs_device_name"] + ): msg = "Missing vmfs_device_name with datastore_type = vmfs" module.fail_json(msg=msg) @@ -348,5 +426,5 @@ def main(): vmware_host_datastore.process_state() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_host_dns.py b/plugins/modules/vmware_host_dns.py index 570d5ca..a9fee89 100644 --- a/plugins/modules/vmware_host_dns.py +++ b/plugins/modules/vmware_host_dns.py @@ -6,16 +6,17 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_host_dns short_description: Manage DNS configuration of an ESXi host system @@ -83,9 +84,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Configure DNS for an ESXi host vmware_host_dns: hostname: '{{ vcenter_hostname }}' @@ -128,9 +129,9 @@ type: dhcp device: vmk0 delegate_to: localhost -''' +""" -RETURN = r''' +RETURN = r""" dns_config_result: description: metadata about host system's DNS configuration returned: always @@ -149,7 +150,7 @@ "search_domains_previous": ["example.local"], }, } -''' +""" try: from pyVmomi import vim, vmodl @@ -157,7 +158,10 @@ pass from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + vmware_argument_spec, +) from ansible.module_utils._text import to_native @@ -166,8 +170,8 @@ class VmwareHostDNS(PyVmomi): def __init__(self, module): super(VmwareHostDNS, self).__init__(module) - self.cluster_name = self.params.get('cluster_name') - self.esxi_host_name = self.params.get('esxi_hostname') + self.cluster_name = self.params.get("cluster_name") + self.esxi_host_name = self.params.get("esxi_hostname") if self.is_vcenter(): if not self.cluster_name and not self.esxi_host_name: self.module.fail_json( @@ -182,41 +186,47 @@ def __init__(self, module): self.module.warn( "You connected directly to an ESXi host, esxi_host_name will be ignored." ) - self.hosts = self.get_all_host_objs(cluster_name=self.cluster_name, esxi_host_name=self.esxi_host_name) + self.hosts = self.get_all_host_objs( + cluster_name=self.cluster_name, esxi_host_name=self.esxi_host_name + ) if not self.hosts: self.module.fail_json(msg="Failed to find host system(s).") - self.network_type = self.params.get('type') - self.vmkernel_device = self.params.get('device') - self.host_name = self.params.get('host_name') - self.domain = self.params.get('domain') - self.dns_servers = self.params.get('dns_servers') - self.search_domains = self.params.get('search_domains') + self.network_type = self.params.get("type") + self.vmkernel_device = self.params.get("device") + self.host_name = self.params.get("host_name") + self.domain = self.params.get("domain") + self.dns_servers = self.params.get("dns_servers") + self.search_domains = self.params.get("search_domains") def ensure(self): """Function to manage DNS configuration of an ESXi host system""" results = dict(changed=False, dns_config_result=dict()) - verbose = self.module.params.get('verbose', False) + verbose = self.module.params.get("verbose", False) host_change_list = [] for host in self.hosts: initial_name = host.name changed = False changed_list = [] - host_result = {'changed': '', 'msg': '', 'host_name': host.name} + host_result = {"changed": "", "msg": "", "host_name": host.name} host_netstack_config = host.config.network.netStackInstance for instance in host_netstack_config: - if instance.key == 'defaultTcpipStack': + if instance.key == "defaultTcpipStack": netstack_spec = vim.host.NetworkConfig.NetStackSpec() - netstack_spec.operation = 'edit' - netstack_spec.netStackInstance = vim.host.NetStackInstance() - netstack_spec.netStackInstance.key = 'defaultTcpipStack' + netstack_spec.operation = "edit" + netstack_spec.netStackInstance = ( + vim.host.NetStackInstance() + ) + netstack_spec.netStackInstance.key = "defaultTcpipStack" dns_config = vim.host.DnsConfig() - host_result['dns_config'] = self.network_type - host_result['search_domains'] = self.search_domains - if self.network_type == 'static': + host_result["dns_config"] = self.network_type + host_result["search_domains"] = self.search_domains + if self.network_type == "static": if self.host_name: if instance.dnsConfig.hostName != self.host_name: - host_result['host_name_previous'] = instance.dnsConfig.hostName + host_result[ + "host_name_previous" + ] = instance.dnsConfig.hostName changed = True changed_list.append("Host name") dns_config.hostName = self.host_name @@ -224,22 +234,32 @@ def ensure(self): dns_config.hostName = instance.dnsConfig.hostName if self.search_domains: - if instance.dnsConfig.searchDomain != self.search_domains: - host_result['search_domains_previous'] = instance.dnsConfig.searchDomain - host_result['search_domains_changed'] = ( - self.get_differt_entries(instance.dnsConfig.searchDomain, self.search_domains) + if ( + instance.dnsConfig.searchDomain + != self.search_domains + ): + host_result[ + "search_domains_previous" + ] = instance.dnsConfig.searchDomain + host_result[ + "search_domains_changed" + ] = self.get_differt_entries( + instance.dnsConfig.searchDomain, + self.search_domains, ) changed = True changed_list.append("Search domains") dns_config.searchDomain = self.search_domains else: - dns_config.searchDomain = instance.dnsConfig.searchDomain + dns_config.searchDomain = ( + instance.dnsConfig.searchDomain + ) if instance.dnsConfig.dhcp: - host_result['domain'] = self.domain - host_result['dns_servers'] = self.dns_servers - host_result['search_domains'] = self.search_domains - host_result['dns_config_previous'] = 'DHCP' + host_result["domain"] = self.domain + host_result["dns_servers"] = self.dns_servers + host_result["search_domains"] = self.search_domains + host_result["dns_config_previous"] = "DHCP" changed = True changed_list.append("DNS configuration") dns_config.dhcp = False @@ -251,30 +271,45 @@ def ensure(self): # Check host name # Check domain - host_result['domain'] = self.domain + host_result["domain"] = self.domain if self.domain: - if instance.dnsConfig.domainName != self.domain: - host_result['domain_previous'] = instance.dnsConfig.domainName + if ( + instance.dnsConfig.domainName + != self.domain + ): + host_result[ + "domain_previous" + ] = instance.dnsConfig.domainName changed = True changed_list.append("Domain") dns_config.domainName = self.domain else: - dns_config.domainName = instance.dnsConfig.domainName + dns_config.domainName = ( + instance.dnsConfig.domainName + ) # Check DNS server(s) - host_result['dns_servers'] = self.dns_servers + host_result["dns_servers"] = self.dns_servers if self.dns_servers: - if instance.dnsConfig.address != self.dns_servers: - host_result['dns_servers_previous'] = instance.dnsConfig.address - host_result['dns_servers_changed'] = ( - self.get_differt_entries(instance.dnsConfig.address, self.dns_servers) + if ( + instance.dnsConfig.address + != self.dns_servers + ): + host_result[ + "dns_servers_previous" + ] = instance.dnsConfig.address + host_result[ + "dns_servers_changed" + ] = self.get_differt_entries( + instance.dnsConfig.address, + self.dns_servers, ) changed = True # build verbose message if verbose: dns_servers_verbose_message = self.build_changed_message( instance.dnsConfig.address, - self.dns_servers + self.dns_servers, ) else: changed_list.append("DNS servers") @@ -282,9 +317,12 @@ def ensure(self): else: dns_config.address = instance.dnsConfig.address - elif self.network_type == 'dhcp' and not instance.dnsConfig.dhcp: - host_result['device'] = self.vmkernel_device - host_result['dns_config_previous'] = 'static' + elif ( + self.network_type == "dhcp" + and not instance.dnsConfig.dhcp + ): + host_result["device"] = self.vmkernel_device + host_result["dns_config_previous"] = "static" changed = True changed_list.append("DNS configuration") dns_config.dhcp = True @@ -295,71 +333,89 @@ def ensure(self): if changed: if self.module.check_mode: - changed_suffix = ' would be changed' + changed_suffix = " would be changed" else: - changed_suffix = ' changed' + changed_suffix = " changed" if len(changed_list) > 2: - message = ', '.join(changed_list[:-1]) + ', and ' + str(changed_list[-1]) + message = ( + ", ".join(changed_list[:-1]) + + ", and " + + str(changed_list[-1]) + ) elif len(changed_list) == 2: - message = ' and '.join(changed_list) + message = " and ".join(changed_list) elif len(changed_list) == 1: message = changed_list[0] if verbose and dns_servers_verbose_message: if changed_list: - message = message + changed_suffix + '. ' + dns_servers_verbose_message + '.' + message = ( + message + + changed_suffix + + ". " + + dns_servers_verbose_message + + "." + ) else: message = dns_servers_verbose_message else: message += changed_suffix - host_result['changed'] = True + host_result["changed"] = True host_network_system = host.configManager.networkSystem if not self.module.check_mode: try: - host_network_system.UpdateNetworkConfig(config, 'modify') + host_network_system.UpdateNetworkConfig( + config, "modify" + ) except vim.fault.AlreadyExists: self.module.fail_json( - msg="Network entity specified in the configuration already exist on host '%s'" % host.name + msg="Network entity specified in the configuration already exist on host '%s'" + % host.name ) except vim.fault.NotFound: self.module.fail_json( - msg="Network entity specified in the configuration doesn't exist on host '%s'" % host.name + msg="Network entity specified in the configuration doesn't exist on host '%s'" + % host.name ) except vim.fault.ResourceInUse: - self.module.fail_json(msg="Resource is in use on host '%s'" % host.name) + self.module.fail_json( + msg="Resource is in use on host '%s'" % host.name + ) except vmodl.fault.InvalidArgument: self.module.fail_json( - msg="An invalid parameter is passed in for one of the networking objects for host '%s'" % - host.name + msg="An invalid parameter is passed in for one of the networking objects for host '%s'" + % host.name ) except vmodl.fault.NotSupported as not_supported: self.module.fail_json( - msg="Operation isn't supported for the instance on '%s' : %s" % - (host.name, to_native(not_supported.msg)) + msg="Operation isn't supported for the instance on '%s' : %s" + % (host.name, to_native(not_supported.msg)) ) except vim.fault.HostConfigFault as config_fault: self.module.fail_json( - msg="Failed to configure TCP/IP stacks for host '%s' due to : %s" % - (host.name, to_native(config_fault.msg)) + msg="Failed to configure TCP/IP stacks for host '%s' due to : %s" + % (host.name, to_native(config_fault.msg)) ) else: - host_result['changed'] = False - message = 'All settings are already configured' + host_result["changed"] = False + message = "All settings are already configured" - host_result['msg'] = message - results['dns_config_result'][initial_name] = host_result + host_result["msg"] = message + results["dns_config_result"][initial_name] = host_result host_change_list.append(changed) if any(host_change_list): - results['changed'] = True + results["changed"] = True self.module.exit_json(**results) def build_changed_message(self, dns_servers_configured, dns_servers_new): """Build changed message""" - check_mode = 'would be ' if self.module.check_mode else '' + check_mode = "would be " if self.module.check_mode else "" # get differences add = self.get_not_in_list_one(dns_servers_new, dns_servers_configured) - remove = self.get_not_in_list_one(dns_servers_configured, dns_servers_new) + remove = self.get_not_in_list_one( + dns_servers_configured, dns_servers_new + ) diff_servers = list(dns_servers_configured) if add and remove: for server in add: @@ -368,40 +424,55 @@ def build_changed_message(self, dns_servers_configured, dns_servers_new): diff_servers.remove(server) if dns_servers_new != diff_servers: message = ( - "DNS server %s %sadded and %s %sremoved and the server sequence %schanged as well" % - (self.array_to_string(add), check_mode, self.array_to_string(remove), check_mode, check_mode) + "DNS server %s %sadded and %s %sremoved and the server sequence %schanged as well" + % ( + self.array_to_string(add), + check_mode, + self.array_to_string(remove), + check_mode, + check_mode, + ) ) else: if dns_servers_new != dns_servers_configured: - message = ( - "DNS server %s %sreplaced with %s" % - (self.array_to_string(remove), check_mode, self.array_to_string(add)) + message = "DNS server %s %sreplaced with %s" % ( + self.array_to_string(remove), + check_mode, + self.array_to_string(add), ) else: - message = ( - "DNS server %s %sremoved and %s %sadded" % - (self.array_to_string(remove), check_mode, self.array_to_string(add), check_mode) + message = "DNS server %s %sremoved and %s %sadded" % ( + self.array_to_string(remove), + check_mode, + self.array_to_string(add), + check_mode, ) elif add: for server in add: diff_servers.append(server) if dns_servers_new != diff_servers: message = ( - "DNS server %s %sadded and the server sequence %schanged as well" % - (self.array_to_string(add), check_mode, check_mode) + "DNS server %s %sadded and the server sequence %schanged as well" + % (self.array_to_string(add), check_mode, check_mode) ) else: - message = "DNS server %s %sadded" % (self.array_to_string(add), check_mode) + message = "DNS server %s %sadded" % ( + self.array_to_string(add), + check_mode, + ) elif remove: for server in remove: diff_servers.remove(server) if dns_servers_new != diff_servers: message = ( - "DNS server %s %sremoved and the server sequence %schanged as well" % - (self.array_to_string(remove), check_mode, check_mode) + "DNS server %s %sremoved and the server sequence %schanged as well" + % (self.array_to_string(remove), check_mode, check_mode) ) else: - message = "DNS server %s %sremoved" % (self.array_to_string(remove), check_mode) + message = "DNS server %s %sremoved" % ( + self.array_to_string(remove), + check_mode, + ) else: message = "DNS server sequence %schanged" % check_mode @@ -417,11 +488,12 @@ def array_to_string(array): """Return string from array""" if len(array) > 2: string = ( - ', '.join("'{0}'".format(element) for element in array[:-1]) + ', and ' + ", ".join("'{0}'".format(element) for element in array[:-1]) + + ", and " + "'{0}'".format(str(array[-1])) ) elif len(array) == 2: - string = ' and '.join("'{0}'".format(element) for element in array) + string = " and ".join("'{0}'".format(element) for element in array) elif len(array) == 1: string = "'{0}'".format(array[0]) return string @@ -429,44 +501,44 @@ def array_to_string(array): @staticmethod def get_differt_entries(list1, list2): """Return different entries of two lists""" - return [a for a in list1 + list2 if (a not in list1) or (a not in list2)] + return [ + a for a in list1 + list2 if (a not in list1) or (a not in list2) + ] def main(): """Main""" argument_spec = vmware_argument_spec() argument_spec.update( - type=dict(required=True, type='str', choices=['dhcp', 'static']), - device=dict(type='str'), - host_name=dict(required=False, type='str'), - domain=dict(required=False, type='str'), - dns_servers=dict(required=False, type='list'), - search_domains=dict(required=False, type='list'), - esxi_hostname=dict(required=False, type='str'), - cluster_name=dict(required=False, type='str'), - verbose=dict(type='bool', default=False, required=False) + type=dict(required=True, type="str", choices=["dhcp", "static"]), + device=dict(type="str"), + host_name=dict(required=False, type="str"), + domain=dict(required=False, type="str"), + dns_servers=dict(required=False, type="list"), + search_domains=dict(required=False, type="list"), + esxi_hostname=dict(required=False, type="str"), + cluster_name=dict(required=False, type="str"), + verbose=dict(type="bool", default=False, required=False), ) module = AnsibleModule( argument_spec=argument_spec, - required_if=[ - ['type', 'dhcp', ['device']], - ], + required_if=[["type", "dhcp", ["device"]]], mutually_exclusive=[ - ['cluster_name', 'host_name'], - ['cluster_name', 'esxi_host_name'], - ['static', 'device'], - ['dhcp', 'host_name'], - ['dhcp', 'domain'], - ['dhcp', 'dns_servers'], - ['dhcp', 'search_domains'], + ["cluster_name", "host_name"], + ["cluster_name", "esxi_host_name"], + ["static", "device"], + ["dhcp", "host_name"], + ["dhcp", "domain"], + ["dhcp", "dns_servers"], + ["dhcp", "search_domains"], ], - supports_check_mode=True + supports_check_mode=True, ) dns = VmwareHostDNS(module) dns.ensure() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_host_dns_info.py b/plugins/modules/vmware_host_dns_info.py index 4a327e6..4c440be 100644 --- a/plugins/modules/vmware_host_dns_info.py +++ b/plugins/modules/vmware_host_dns_info.py @@ -5,16 +5,17 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_host_dns_info short_description: Gathers info about an ESXi host's DNS configuration information @@ -42,9 +43,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather DNS info about all ESXi Hosts in given Cluster vmware_host_dns_info: hostname: '{{ vcenter_hostname }}' @@ -60,9 +61,9 @@ password: '{{ vcenter_password }}' esxi_hostname: '{{ esxi_hostname }}' delegate_to: localhost -''' +""" -RETURN = r''' +RETURN = r""" hosts_dns_info: description: metadata about DNS config from given cluster / host system returned: always @@ -81,30 +82,37 @@ "virtual_nic_device": "vmk0" } } -''' +""" from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + vmware_argument_spec, + PyVmomi, +) class VmwareDnsInfoManager(PyVmomi): def __init__(self, module): super(VmwareDnsInfoManager, self).__init__(module) - cluster_name = self.params.get('cluster_name', None) - esxi_host_name = self.params.get('esxi_hostname', None) - self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name) + cluster_name = self.params.get("cluster_name", None) + esxi_host_name = self.params.get("esxi_hostname", None) + self.hosts = self.get_all_host_objs( + cluster_name=cluster_name, esxi_host_name=esxi_host_name + ) def gather_dns_info(self): hosts_info = {} for host in self.hosts: host_info = {} dns_config = host.config.network.dnsConfig - host_info['dhcp'] = dns_config.dhcp - host_info['virtual_nic_device'] = dns_config.virtualNicDevice - host_info['host_name'] = dns_config.hostName - host_info['domain_name'] = dns_config.domainName - host_info['ip_address'] = [ip for ip in dns_config.address] - host_info['search_domain'] = [domain for domain in dns_config.searchDomain] + host_info["dhcp"] = dns_config.dhcp + host_info["virtual_nic_device"] = dns_config.virtualNicDevice + host_info["host_name"] = dns_config.hostName + host_info["domain_name"] = dns_config.domainName + host_info["ip_address"] = [ip for ip in dns_config.address] + host_info["search_domain"] = [ + domain for domain in dns_config.searchDomain + ] hosts_info[host.name] = host_info return hosts_info @@ -112,20 +120,20 @@ def gather_dns_info(self): def main(): argument_spec = vmware_argument_spec() argument_spec.update( - cluster_name=dict(type='str', required=False), - esxi_hostname=dict(type='str', required=False), + cluster_name=dict(type="str", required=False), + esxi_hostname=dict(type="str", required=False), ) module = AnsibleModule( argument_spec=argument_spec, - required_one_of=[ - ['cluster_name', 'esxi_hostname'], - ], - supports_check_mode=True + required_one_of=[["cluster_name", "esxi_hostname"]], + supports_check_mode=True, ) vmware_dns_config = VmwareDnsInfoManager(module) - module.exit_json(changed=False, hosts_dns_info=vmware_dns_config.gather_dns_info()) + module.exit_json( + changed=False, hosts_dns_info=vmware_dns_config.gather_dns_info() + ) if __name__ == "__main__": diff --git a/plugins/modules/vmware_host_facts.py b/plugins/modules/vmware_host_facts.py index 638c1a3..0faa67e 100644 --- a/plugins/modules/vmware_host_facts.py +++ b/plugins/modules/vmware_host_facts.py @@ -7,15 +7,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_host_facts short_description: Gathers facts about remote ESXi hostsystem @@ -71,9 +72,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather vmware host facts vmware_host_facts: hostname: "{{ esxi_server }}" @@ -123,9 +124,9 @@ - config.product.apiVersion - overallStatus register: host_facts -''' +""" -RETURN = r''' +RETURN = r""" ansible_facts: description: system info about the host machine returned: always @@ -186,30 +187,40 @@ } ], } -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.text.formatters import bytes_to_human -from ansible_collections.vmware.general.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec, find_obj +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + vmware_argument_spec, + find_obj, +) try: from pyVmomi import vim except ImportError: pass -from ansible_collections.vmware.general.plugins.module_utils.vmware_rest_client import VmwareRestClient +from ansible_collections.vmware.general.plugins.module_utils.vmware_rest_client import ( + VmwareRestClient, +) class VMwareHostFactManager(PyVmomi): def __init__(self, module): super(VMwareHostFactManager, self).__init__(module) - esxi_host_name = self.params.get('esxi_hostname', None) + esxi_host_name = self.params.get("esxi_hostname", None) if self.is_vcenter(): if esxi_host_name is None: - self.module.fail_json(msg="Connected to a vCenter system without specifying esxi_hostname") + self.module.fail_json( + msg="Connected to a vCenter system without specifying esxi_hostname" + ) self.host = self.get_all_host_objs(esxi_host_name=esxi_host_name) if len(self.host) > 1: - self.module.fail_json(msg="esxi_hostname matched multiple hosts") + self.module.fail_json( + msg="esxi_hostname matched multiple hosts" + ) self.host = self.host[0] else: self.host = find_obj(self.content, [vim.HostSystem], None) @@ -226,18 +237,22 @@ def all_facts(self): ansible_facts.update(self.get_system_facts()) ansible_facts.update(self.get_vsan_facts()) ansible_facts.update(self.get_cluster_facts()) - if self.params.get('show_tag'): + if self.params.get("show_tag"): vmware_client = VmwareRestClient(self.module) tag_info = { - 'tags': vmware_client.get_tags_for_hostsystem(hostsystem_mid=self.host._moId) + "tags": vmware_client.get_tags_for_hostsystem( + hostsystem_mid=self.host._moId + ) } ansible_facts.update(tag_info) self.module.exit_json(changed=False, ansible_facts=ansible_facts) def get_cluster_facts(self): - cluster_facts = {'cluster': None} - if self.host.parent and isinstance(self.host.parent, vim.ClusterComputeResource): + cluster_facts = {"cluster": None} + if self.host.parent and isinstance( + self.host.parent, vim.ClusterComputeResource + ): cluster_facts.update(cluster=self.host.parent.name) return cluster_facts @@ -245,92 +260,97 @@ def get_vsan_facts(self): config_mgr = self.host.configManager.vsanSystem if config_mgr is None: return { - 'vsan_cluster_uuid': None, - 'vsan_node_uuid': None, - 'vsan_health': "unknown", + "vsan_cluster_uuid": None, + "vsan_node_uuid": None, + "vsan_health": "unknown", } status = config_mgr.QueryHostStatus() return { - 'vsan_cluster_uuid': status.uuid, - 'vsan_node_uuid': status.nodeUuid, - 'vsan_health': status.health, + "vsan_cluster_uuid": status.uuid, + "vsan_node_uuid": status.nodeUuid, + "vsan_health": status.health, } def get_cpu_facts(self): return { - 'ansible_processor': self.host.summary.hardware.cpuModel, - 'ansible_processor_cores': self.host.summary.hardware.numCpuCores, - 'ansible_processor_count': self.host.summary.hardware.numCpuPkgs, - 'ansible_processor_vcpus': self.host.summary.hardware.numCpuThreads, + "ansible_processor": self.host.summary.hardware.cpuModel, + "ansible_processor_cores": self.host.summary.hardware.numCpuCores, + "ansible_processor_count": self.host.summary.hardware.numCpuPkgs, + "ansible_processor_vcpus": self.host.summary.hardware.numCpuThreads, } def get_memory_facts(self): return { - 'ansible_memfree_mb': self.host.hardware.memorySize // 1024 // 1024 - self.host.summary.quickStats.overallMemoryUsage, - 'ansible_memtotal_mb': self.host.hardware.memorySize // 1024 // 1024, + "ansible_memfree_mb": self.host.hardware.memorySize // 1024 // 1024 + - self.host.summary.quickStats.overallMemoryUsage, + "ansible_memtotal_mb": self.host.hardware.memorySize + // 1024 + // 1024, } def get_datastore_facts(self): facts = dict() - facts['ansible_datastore'] = [] + facts["ansible_datastore"] = [] for store in self.host.datastore: _tmp = { - 'name': store.summary.name, - 'total': bytes_to_human(store.summary.capacity), - 'free': bytes_to_human(store.summary.freeSpace), + "name": store.summary.name, + "total": bytes_to_human(store.summary.capacity), + "free": bytes_to_human(store.summary.freeSpace), } - facts['ansible_datastore'].append(_tmp) + facts["ansible_datastore"].append(_tmp) return facts def get_network_facts(self): facts = dict() - facts['ansible_interfaces'] = [] - facts['ansible_all_ipv4_addresses'] = [] + facts["ansible_interfaces"] = [] + facts["ansible_all_ipv4_addresses"] = [] for nic in self.host.config.network.vnic: device = nic.device - facts['ansible_interfaces'].append(device) - facts['ansible_all_ipv4_addresses'].append(nic.spec.ip.ipAddress) + facts["ansible_interfaces"].append(device) + facts["ansible_all_ipv4_addresses"].append(nic.spec.ip.ipAddress) _tmp = { - 'device': device, - 'ipv4': { - 'address': nic.spec.ip.ipAddress, - 'netmask': nic.spec.ip.subnetMask, + "device": device, + "ipv4": { + "address": nic.spec.ip.ipAddress, + "netmask": nic.spec.ip.subnetMask, }, - 'macaddress': nic.spec.mac, - 'mtu': nic.spec.mtu, + "macaddress": nic.spec.mac, + "mtu": nic.spec.mtu, } - facts['ansible_' + device] = _tmp + facts["ansible_" + device] = _tmp return facts def get_system_facts(self): - sn = 'NA' + sn = "NA" for info in self.host.hardware.systemInfo.otherIdentifyingInfo: - if info.identifierType.key == 'ServiceTag': + if info.identifierType.key == "ServiceTag": sn = info.identifierValue facts = { - 'ansible_distribution': self.host.config.product.name, - 'ansible_distribution_version': self.host.config.product.version, - 'ansible_distribution_build': self.host.config.product.build, - 'ansible_os_type': self.host.config.product.osType, - 'ansible_system_vendor': self.host.hardware.systemInfo.vendor, - 'ansible_hostname': self.host.summary.config.name, - 'ansible_product_name': self.host.hardware.systemInfo.model, - 'ansible_product_serial': sn, - 'ansible_bios_date': self.host.hardware.biosInfo.releaseDate, - 'ansible_bios_version': self.host.hardware.biosInfo.biosVersion, - 'ansible_uptime': self.host.summary.quickStats.uptime, - 'ansible_in_maintenance_mode': self.host.runtime.inMaintenanceMode, - 'ansible_uuid': self.host.hardware.systemInfo.uuid, + "ansible_distribution": self.host.config.product.name, + "ansible_distribution_version": self.host.config.product.version, + "ansible_distribution_build": self.host.config.product.build, + "ansible_os_type": self.host.config.product.osType, + "ansible_system_vendor": self.host.hardware.systemInfo.vendor, + "ansible_hostname": self.host.summary.config.name, + "ansible_product_name": self.host.hardware.systemInfo.model, + "ansible_product_serial": sn, + "ansible_bios_date": self.host.hardware.biosInfo.releaseDate, + "ansible_bios_version": self.host.hardware.biosInfo.biosVersion, + "ansible_uptime": self.host.summary.quickStats.uptime, + "ansible_in_maintenance_mode": self.host.runtime.inMaintenanceMode, + "ansible_uuid": self.host.hardware.systemInfo.uuid, } return facts def properties_facts(self): - ansible_facts = self.to_json(self.host, self.params.get('properties')) - if self.params.get('show_tag'): + ansible_facts = self.to_json(self.host, self.params.get("properties")) + if self.params.get("show_tag"): vmware_client = VmwareRestClient(self.module) tag_info = { - 'tags': vmware_client.get_tags_for_hostsystem(hostsystem_mid=self.host._moId) + "tags": vmware_client.get_tags_for_hostsystem( + hostsystem_mid=self.host._moId + ) } ansible_facts.update(tag_info) @@ -340,21 +360,24 @@ def properties_facts(self): def main(): argument_spec = vmware_argument_spec() argument_spec.update( - esxi_hostname=dict(type='str', required=False), - show_tag=dict(type='bool', default=False), - schema=dict(type='str', choices=['summary', 'vsphere'], default='summary'), - properties=dict(type='list') + esxi_hostname=dict(type="str", required=False), + show_tag=dict(type="bool", default=False), + schema=dict( + type="str", choices=["summary", "vsphere"], default="summary" + ), + properties=dict(type="list"), + ) + module = AnsibleModule( + argument_spec=argument_spec, supports_check_mode=True ) - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True) vm_host_manager = VMwareHostFactManager(module) - if module.params['schema'] == 'summary': + if module.params["schema"] == "summary": vm_host_manager.all_facts() else: vm_host_manager.properties_facts() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_host_feature_info.py b/plugins/modules/vmware_host_feature_info.py index 59bb482..9649557 100644 --- a/plugins/modules/vmware_host_feature_info.py +++ b/plugins/modules/vmware_host_feature_info.py @@ -4,15 +4,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_host_feature_info short_description: Gathers info about an ESXi host's feature capability information @@ -39,9 +40,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather feature capability info about all ESXi Hosts in given Cluster vmware_host_feature_info: hostname: '{{ vcenter_hostname }}' @@ -68,9 +69,9 @@ that: - ssbd|int == 1 when: ssbd is defined -''' +""" -RETURN = r''' +RETURN = r""" hosts_feature_info: description: metadata about host's feature capability information returned: always @@ -89,18 +90,23 @@ }, ] } -''' +""" from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + vmware_argument_spec, + PyVmomi, +) class FeatureCapabilityInfoManager(PyVmomi): def __init__(self, module): super(FeatureCapabilityInfoManager, self).__init__(module) - cluster_name = self.params.get('cluster_name', None) - esxi_host_name = self.params.get('esxi_hostname', None) - self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name) + cluster_name = self.params.get("cluster_name", None) + esxi_host_name = self.params.get("esxi_hostname", None) + self.hosts = self.get_all_host_objs( + cluster_name=cluster_name, esxi_host_name=esxi_host_name + ) def gather_host_feature_info(self): host_feature_info = dict() @@ -109,9 +115,9 @@ def gather_host_feature_info(self): capability = [] for fc in host_feature_capabilities: temp_dict = { - 'key': fc.key, - 'feature_name': fc.featureName, - 'value': fc.value, + "key": fc.key, + "feature_name": fc.featureName, + "value": fc.value, } capability.append(temp_dict) @@ -123,21 +129,21 @@ def gather_host_feature_info(self): def main(): argument_spec = vmware_argument_spec() argument_spec.update( - cluster_name=dict(type='str', required=False), - esxi_hostname=dict(type='str', required=False), + cluster_name=dict(type="str", required=False), + esxi_hostname=dict(type="str", required=False), ) module = AnsibleModule( argument_spec=argument_spec, - required_one_of=[ - ['cluster_name', 'esxi_hostname'], - ], + required_one_of=[["cluster_name", "esxi_hostname"]], supports_check_mode=True, ) host_capability_manager = FeatureCapabilityInfoManager(module) - module.exit_json(changed=False, - hosts_feature_info=host_capability_manager.gather_host_feature_info()) + module.exit_json( + changed=False, + hosts_feature_info=host_capability_manager.gather_host_feature_info(), + ) if __name__ == "__main__": diff --git a/plugins/modules/vmware_host_firewall_info.py b/plugins/modules/vmware_host_firewall_info.py index 798b80e..c16d221 100644 --- a/plugins/modules/vmware_host_firewall_info.py +++ b/plugins/modules/vmware_host_firewall_info.py @@ -5,15 +5,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_host_firewall_info short_description: Gathers info about an ESXi host's firewall configuration information @@ -40,9 +41,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather firewall info about all ESXi Host in given Cluster vmware_host_firewall_info: hostname: '{{ vcenter_hostname }}' @@ -58,9 +59,9 @@ password: '{{ vcenter_password }}' esxi_hostname: '{{ esxi_hostname }}' delegate_to: localhost -''' +""" -RETURN = r''' +RETURN = r""" hosts_firewall_info: description: metadata about host's firewall configuration returned: on success @@ -93,42 +94,50 @@ }, ] } -''' +""" from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + vmware_argument_spec, + PyVmomi, +) class FirewallInfoManager(PyVmomi): def __init__(self, module): super(FirewallInfoManager, self).__init__(module) - cluster_name = self.params.get('cluster_name', None) - esxi_host_name = self.params.get('esxi_hostname', None) - self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name) + cluster_name = self.params.get("cluster_name", None) + esxi_host_name = self.params.get("esxi_hostname", None) + self.hosts = self.get_all_host_objs( + cluster_name=cluster_name, esxi_host_name=esxi_host_name + ) @staticmethod def normalize_rule_set(rule_obj): rule_dict = dict() - rule_dict['key'] = rule_obj.key - rule_dict['service'] = rule_obj.service - rule_dict['enabled'] = rule_obj.enabled - rule_dict['rule'] = [] + rule_dict["key"] = rule_obj.key + rule_dict["service"] = rule_obj.service + rule_dict["enabled"] = rule_obj.enabled + rule_dict["rule"] = [] for rule in rule_obj.rule: rule_set_dict = dict() - rule_set_dict['port'] = rule.port - rule_set_dict['end_port'] = rule.endPort - rule_set_dict['direction'] = rule.direction - rule_set_dict['port_type'] = rule.portType - rule_set_dict['protocol'] = rule.protocol - rule_dict['rule'].append(rule_set_dict) + rule_set_dict["port"] = rule.port + rule_set_dict["end_port"] = rule.endPort + rule_set_dict["direction"] = rule.direction + rule_set_dict["port_type"] = rule.portType + rule_set_dict["protocol"] = rule.protocol + rule_dict["rule"].append(rule_set_dict) allowed_host = rule_obj.allowedHosts rule_allow_host = dict() - rule_allow_host['ip_address'] = [ip for ip in allowed_host.ipAddress] - rule_allow_host['ip_network'] = [ip.network + "/" + str(ip.prefixLength) for ip in allowed_host.ipNetwork] - rule_allow_host['all_ip'] = allowed_host.allIp - rule_dict['allowed_hosts'] = rule_allow_host + rule_allow_host["ip_address"] = [ip for ip in allowed_host.ipAddress] + rule_allow_host["ip_network"] = [ + ip.network + "/" + str(ip.prefixLength) + for ip in allowed_host.ipNetwork + ] + rule_allow_host["all_ip"] = allowed_host.allIp + rule_dict["allowed_hosts"] = rule_allow_host return rule_dict def gather_host_firewall_info(self): @@ -138,27 +147,30 @@ def gather_host_firewall_info(self): if firewall_system: hosts_firewall_info[host.name] = [] for rule_set_obj in firewall_system.firewallInfo.ruleset: - hosts_firewall_info[host.name].append(self.normalize_rule_set(rule_obj=rule_set_obj)) + hosts_firewall_info[host.name].append( + self.normalize_rule_set(rule_obj=rule_set_obj) + ) return hosts_firewall_info def main(): argument_spec = vmware_argument_spec() argument_spec.update( - cluster_name=dict(type='str', required=False), - esxi_hostname=dict(type='str', required=False), + cluster_name=dict(type="str", required=False), + esxi_hostname=dict(type="str", required=False), ) module = AnsibleModule( argument_spec=argument_spec, - required_one_of=[ - ['cluster_name', 'esxi_hostname'], - ], - supports_check_mode=True + required_one_of=[["cluster_name", "esxi_hostname"]], + supports_check_mode=True, ) vmware_host_firewall = FirewallInfoManager(module) - module.exit_json(changed=False, hosts_firewall_info=vmware_host_firewall.gather_host_firewall_info()) + module.exit_json( + changed=False, + hosts_firewall_info=vmware_host_firewall.gather_host_firewall_info(), + ) if __name__ == "__main__": diff --git a/plugins/modules/vmware_host_firewall_manager.py b/plugins/modules/vmware_host_firewall_manager.py index a400420..b9761c9 100644 --- a/plugins/modules/vmware_host_firewall_manager.py +++ b/plugins/modules/vmware_host_firewall_manager.py @@ -5,15 +5,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_host_firewall_manager short_description: Manage firewall configurations about an ESXi host @@ -52,9 +53,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Enable vvold rule set for all ESXi Host in given Cluster vmware_host_firewall_manager: hostname: '{{ vcenter_hostname }}' @@ -119,9 +120,9 @@ ip_network: - 192.168.200.0/24 delegate_to: localhost -''' +""" -RETURN = r''' +RETURN = r""" rule_set_state: description: - dict with hostname as key and dict with firewall rule set facts as value @@ -165,7 +166,7 @@ } } } -''' +""" try: from pyVmomi import vim @@ -173,18 +174,25 @@ pass from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + vmware_argument_spec, + PyVmomi, +) from ansible.module_utils._text import to_native -from ansible_collections.community.general.plugins.module_utils.compat import ipaddress +from ansible_collections.community.general.plugins.module_utils.compat import ( + ipaddress, +) class VmwareFirewallManager(PyVmomi): def __init__(self, module): super(VmwareFirewallManager, self).__init__(module) - cluster_name = self.params.get('cluster_name', None) - esxi_host_name = self.params.get('esxi_hostname', None) - self.options = self.params.get('options', dict()) - self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name) + cluster_name = self.params.get("cluster_name", None) + esxi_host_name = self.params.get("esxi_hostname", None) + self.options = self.params.get("options", dict()) + self.hosts = self.get_all_host_objs( + cluster_name=cluster_name, esxi_host_name=esxi_host_name + ) self.firewall_facts = dict() self.rule_options = self.module.params.get("rules") self.gather_rule_set() @@ -196,14 +204,19 @@ def gather_rule_set(self): if firewall_system: for rule_set_obj in firewall_system.firewallInfo.ruleset: temp_rule_dict = dict() - temp_rule_dict['enabled'] = rule_set_obj.enabled + temp_rule_dict["enabled"] = rule_set_obj.enabled allowed_host = rule_set_obj.allowedHosts rule_allow_host = dict() - rule_allow_host['ip_address'] = allowed_host.ipAddress - rule_allow_host['ip_network'] = [ip.network + "/" + str(ip.prefixLength) for ip in allowed_host.ipNetwork] - rule_allow_host['all_ip'] = allowed_host.allIp - temp_rule_dict['allowed_hosts'] = rule_allow_host - self.firewall_facts[host.name][rule_set_obj.key] = temp_rule_dict + rule_allow_host["ip_address"] = allowed_host.ipAddress + rule_allow_host["ip_network"] = [ + ip.network + "/" + str(ip.prefixLength) + for ip in allowed_host.ipNetwork + ] + rule_allow_host["all_ip"] = allowed_host.allIp + temp_rule_dict["allowed_hosts"] = rule_allow_host + self.firewall_facts[host.name][ + rule_set_obj.key + ] = temp_rule_dict def check_params(self): rules_by_host = {} @@ -211,36 +224,50 @@ def check_params(self): rules_by_host[host.name] = self.firewall_facts[host.name].keys() for rule_option in self.rule_options: - rule_name = rule_option.get('name') + rule_name = rule_option.get("name") if rule_name is None: - self.module.fail_json(msg="Please specify rule.name for rule set" - " as it is required parameter.") - hosts_with_rule_name = [h for h, r in rules_by_host.items() if rule_name in r] - hosts_without_rule_name = set([i.name for i in self.hosts]) - set(hosts_with_rule_name) + self.module.fail_json( + msg="Please specify rule.name for rule set" + " as it is required parameter." + ) + hosts_with_rule_name = [ + h for h, r in rules_by_host.items() if rule_name in r + ] + hosts_without_rule_name = set([i.name for i in self.hosts]) - set( + hosts_with_rule_name + ) if hosts_without_rule_name: - self.module.fail_json(msg="rule named '%s' wasn't found on hosts: %s" % ( - rule_name, hosts_without_rule_name)) - - if 'enabled' not in rule_option: - self.module.fail_json(msg="Please specify rules.enabled for rule set" - " %s as it is required parameter." % rule_name) - - allowed_hosts = rule_option.get('allowed_hosts', {}) - ip_addresses = allowed_hosts.get('ip_address', []) - ip_networks = allowed_hosts.get('ip_network', []) + self.module.fail_json( + msg="rule named '%s' wasn't found on hosts: %s" + % (rule_name, hosts_without_rule_name) + ) + + if "enabled" not in rule_option: + self.module.fail_json( + msg="Please specify rules.enabled for rule set" + " %s as it is required parameter." % rule_name + ) + + allowed_hosts = rule_option.get("allowed_hosts", {}) + ip_addresses = allowed_hosts.get("ip_address", []) + ip_networks = allowed_hosts.get("ip_network", []) for ip_address in ip_addresses: try: ipaddress.ip_address(ip_address) except ValueError: - self.module.fail_json(msg="The provided IP address %s is not a valid IP" - " for the rule %s" % (ip_address, rule_name)) + self.module.fail_json( + msg="The provided IP address %s is not a valid IP" + " for the rule %s" % (ip_address, rule_name) + ) for ip_network in ip_networks: try: ipaddress.ip_network(ip_network) except ValueError: - self.module.fail_json(msg="The provided IP network %s is not a valid network" - " for the rule %s" % (ip_network, rule_name)) + self.module.fail_json( + msg="The provided IP network %s is not a valid network" + " for the rule %s" % (ip_network, rule_name) + ) def ensure(self): """ @@ -255,131 +282,179 @@ def ensure(self): firewall_system = host.configManager.firewallSystem if firewall_system is None: continue - results['rule_set_state'][host.name] = {} + results["rule_set_state"][host.name] = {} for rule_option in self.rule_options: - rule_name = rule_option.get('name', None) + rule_name = rule_option.get("name", None) - current_rule_state = self.firewall_facts[host.name][rule_name]['enabled'] - if current_rule_state != rule_option['enabled']: + current_rule_state = self.firewall_facts[host.name][rule_name][ + "enabled" + ] + if current_rule_state != rule_option["enabled"]: try: if not self.module.check_mode: - if rule_option['enabled']: + if rule_option["enabled"]: firewall_system.EnableRuleset(id=rule_name) else: firewall_system.DisableRuleset(id=rule_name) # keep track of changes as we go enable_disable_changed = True except vim.fault.NotFound as not_found: - self.module.fail_json(msg="Failed to enable rule set %s as" - " rule set id is unknown : %s" % ( - rule_name, - to_native(not_found.msg))) + self.module.fail_json( + msg="Failed to enable rule set %s as" + " rule set id is unknown : %s" + % (rule_name, to_native(not_found.msg)) + ) except vim.fault.HostConfigFault as host_config_fault: - self.module.fail_json(msg="Failed to enabled rule set %s as an internal" - " error happened while reconfiguring" - " rule set : %s" % ( - rule_name, - to_native(host_config_fault.msg))) + self.module.fail_json( + msg="Failed to enabled rule set %s as an internal" + " error happened while reconfiguring" + " rule set : %s" + % (rule_name, to_native(host_config_fault.msg)) + ) # save variables here for comparison later and change tracking # also covers cases where inputs may be null - permitted_networking = self.firewall_facts[host.name][rule_name] - rule_allows_all = permitted_networking['allowed_hosts']['all_ip'] - rule_allowed_ips = set(permitted_networking['allowed_hosts']['ip_address']) - rule_allowed_networks = set(permitted_networking['allowed_hosts']['ip_network']) - - allowed_hosts = rule_option.get('allowed_hosts', {}) - playbook_allows_all = allowed_hosts.get('all_ip', False) - playbook_allowed_ips = set(allowed_hosts.get('ip_address', [])) - playbook_allowed_networks = set(allowed_hosts.get('ip_network', [])) + permitted_networking = self.firewall_facts[host.name][ + rule_name + ] + rule_allows_all = permitted_networking["allowed_hosts"][ + "all_ip" + ] + rule_allowed_ips = set( + permitted_networking["allowed_hosts"]["ip_address"] + ) + rule_allowed_networks = set( + permitted_networking["allowed_hosts"]["ip_network"] + ) + + allowed_hosts = rule_option.get("allowed_hosts", {}) + playbook_allows_all = allowed_hosts.get("all_ip", False) + playbook_allowed_ips = set(allowed_hosts.get("ip_address", [])) + playbook_allowed_networks = set( + allowed_hosts.get("ip_network", []) + ) # compare what is configured on the firewall rule with what the playbook provides - allowed_all_ips_different = bool(rule_allows_all != playbook_allows_all) - ip_list_different = bool(rule_allowed_ips != playbook_allowed_ips) - ip_network_different = bool(rule_allowed_networks != playbook_allowed_networks) + allowed_all_ips_different = bool( + rule_allows_all != playbook_allows_all + ) + ip_list_different = bool( + rule_allowed_ips != playbook_allowed_ips + ) + ip_network_different = bool( + rule_allowed_networks != playbook_allowed_networks + ) # apply everything here in one function call - if allowed_all_ips_different is True or ip_list_different is True or ip_network_different is True: + if ( + allowed_all_ips_different is True + or ip_list_different is True + or ip_network_different is True + ): try: allowed_ip_changed = True if not self.module.check_mode: # setup spec firewall_spec = vim.host.Ruleset.RulesetSpec() - firewall_spec.allowedHosts = vim.host.Ruleset.IpList() - firewall_spec.allowedHosts.allIp = playbook_allows_all - firewall_spec.allowedHosts.ipAddress = list(playbook_allowed_ips) + firewall_spec.allowedHosts = ( + vim.host.Ruleset.IpList() + ) + firewall_spec.allowedHosts.allIp = ( + playbook_allows_all + ) + firewall_spec.allowedHosts.ipAddress = list( + playbook_allowed_ips + ) firewall_spec.allowedHosts.ipNetwork = [] for i in playbook_allowed_networks: - address, mask = i.split('/') - tmp_ip_network_spec = vim.host.Ruleset.IpNetwork() + address, mask = i.split("/") + tmp_ip_network_spec = ( + vim.host.Ruleset.IpNetwork() + ) tmp_ip_network_spec.network = address tmp_ip_network_spec.prefixLength = int(mask) - firewall_spec.allowedHosts.ipNetwork.append(tmp_ip_network_spec) + firewall_spec.allowedHosts.ipNetwork.append( + tmp_ip_network_spec + ) - firewall_system.UpdateRuleset(id=rule_name, spec=firewall_spec) + firewall_system.UpdateRuleset( + id=rule_name, spec=firewall_spec + ) except vim.fault.NotFound as not_found: - self.module.fail_json(msg="Failed to configure rule set %s as" - " rule set id is unknown : %s" % (rule_name, - to_native(not_found.msg))) + self.module.fail_json( + msg="Failed to configure rule set %s as" + " rule set id is unknown : %s" + % (rule_name, to_native(not_found.msg)) + ) except vim.fault.HostConfigFault as host_config_fault: - self.module.fail_json(msg="Failed to configure rule set %s as an internal" - " error happened while reconfiguring" - " rule set : %s" % (rule_name, - to_native(host_config_fault.msg))) + self.module.fail_json( + msg="Failed to configure rule set %s as an internal" + " error happened while reconfiguring" + " rule set : %s" + % (rule_name, to_native(host_config_fault.msg)) + ) except vim.fault.RuntimeFault as runtime_fault: - self.module.fail_json(msg="Failed to configure the rule set %s as a runtime" - " error happened while applying the reconfiguration:" - " %s" % (rule_name, to_native(runtime_fault.msg))) - - results['rule_set_state'][host.name][rule_name] = { - 'current_state': rule_option['enabled'], - 'previous_state': current_rule_state, - 'desired_state': rule_option['enabled'], - 'allowed_hosts': { - 'current_allowed_all': playbook_allows_all, - 'previous_allowed_all': permitted_networking['allowed_hosts']['all_ip'], - 'desired_allowed_all': playbook_allows_all, - 'current_allowed_ip': playbook_allowed_ips, - 'previous_allowed_ip': set(permitted_networking['allowed_hosts']['ip_address']), - 'desired_allowed_ip': playbook_allowed_ips, - 'current_allowed_networks': playbook_allowed_networks, - 'previous_allowed_networks': set(permitted_networking['allowed_hosts']['ip_network']), - 'desired_allowed_networks': playbook_allowed_networks, - } + self.module.fail_json( + msg="Failed to configure the rule set %s as a runtime" + " error happened while applying the reconfiguration:" + " %s" % (rule_name, to_native(runtime_fault.msg)) + ) + + results["rule_set_state"][host.name][rule_name] = { + "current_state": rule_option["enabled"], + "previous_state": current_rule_state, + "desired_state": rule_option["enabled"], + "allowed_hosts": { + "current_allowed_all": playbook_allows_all, + "previous_allowed_all": permitted_networking[ + "allowed_hosts" + ]["all_ip"], + "desired_allowed_all": playbook_allows_all, + "current_allowed_ip": playbook_allowed_ips, + "previous_allowed_ip": set( + permitted_networking["allowed_hosts"]["ip_address"] + ), + "desired_allowed_ip": playbook_allowed_ips, + "current_allowed_networks": playbook_allowed_networks, + "previous_allowed_networks": set( + permitted_networking["allowed_hosts"]["ip_network"] + ), + "desired_allowed_networks": playbook_allowed_networks, + }, } if enable_disable_changed or allowed_ip_changed: fw_change_list.append(True) if any(fw_change_list): - results['changed'] = True + results["changed"] = True self.module.exit_json(**results) def main(): argument_spec = vmware_argument_spec() argument_spec.update( - cluster_name=dict(type='str', required=False), - esxi_hostname=dict(type='str', required=False), - rules=dict(type='list', default=list(), required=False), + cluster_name=dict(type="str", required=False), + esxi_hostname=dict(type="str", required=False), + rules=dict(type="list", default=list(), required=False), ) module = AnsibleModule( argument_spec=argument_spec, - required_one_of=[ - ['cluster_name', 'esxi_hostname'], - ], - supports_check_mode=True + required_one_of=[["cluster_name", "esxi_hostname"]], + supports_check_mode=True, ) for rule_option in module.params.get("rules", []): - if 'allowed_hosts' in rule_option: - if isinstance(rule_option['allowed_hosts'], list): - if len(rule_option['allowed_hosts']) == 1: - allowed_hosts = rule_option['allowed_hosts'][0] - rule_option['allowed_hosts'] = allowed_hosts - module.deprecate('allowed_hosts should be a dict, not a list', '2.13') + if "allowed_hosts" in rule_option: + if isinstance(rule_option["allowed_hosts"], list): + if len(rule_option["allowed_hosts"]) == 1: + allowed_hosts = rule_option["allowed_hosts"][0] + rule_option["allowed_hosts"] = allowed_hosts + module.deprecate( + "allowed_hosts should be a dict, not a list", "2.13" + ) vmware_firewall_manager = VmwareFirewallManager(module) vmware_firewall_manager.check_params() diff --git a/plugins/modules/vmware_host_hyperthreading.py b/plugins/modules/vmware_host_hyperthreading.py index 4c868d2..a9c1e5d 100644 --- a/plugins/modules/vmware_host_hyperthreading.py +++ b/plugins/modules/vmware_host_hyperthreading.py @@ -6,16 +6,17 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_host_hyperthreading short_description: Enables/Disables Hyperthreading optimization for an ESXi host system @@ -52,9 +53,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Enable Hyperthreading for an host system vmware_host_hyperthreading: hostname: '{{ vcenter_hostname }}' @@ -84,9 +85,9 @@ state: disabled validate_certs: no delegate_to: localhost -''' +""" -RETURN = r''' +RETURN = r""" results: description: metadata about host system's Hyperthreading configuration returned: always @@ -98,7 +99,7 @@ "state": "enabled", }, } -''' +""" try: from pyVmomi import vim, vmodl @@ -106,56 +107,78 @@ pass from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + vmware_argument_spec, +) from ansible.module_utils._text import to_native class VmwareHostHyperthreading(PyVmomi): """Manage Hyperthreading for an ESXi host system""" + def __init__(self, module): super(VmwareHostHyperthreading, self).__init__(module) - cluster_name = self.params.get('cluster_name') - esxi_host_name = self.params.get('esxi_hostname') - self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name) + cluster_name = self.params.get("cluster_name") + esxi_host_name = self.params.get("esxi_hostname") + self.hosts = self.get_all_host_objs( + cluster_name=cluster_name, esxi_host_name=esxi_host_name + ) if not self.hosts: self.module.fail_json(msg="Failed to find host system.") def ensure(self): """Manage Hyperthreading for an ESXi host system""" results = dict(changed=False, result=dict()) - desired_state = self.params.get('state') + desired_state = self.params.get("state") host_change_list = [] for host in self.hosts: changed = False - results['result'][host.name] = dict(msg='') + results["result"][host.name] = dict(msg="") hyperthreading_info = host.config.hyperThread - results['result'][host.name]['state'] = desired_state - if desired_state == 'enabled': + results["result"][host.name]["state"] = desired_state + if desired_state == "enabled": # Don't do anything if Hyperthreading is already enabled if hyperthreading_info.config: if hyperthreading_info.active: - results['result'][host.name]['changed'] = False - results['result'][host.name]['state_current'] = "active" - results['result'][host.name]['msg'] = "Hyperthreading is enabled and active" + results["result"][host.name]["changed"] = False + results["result"][host.name][ + "state_current" + ] = "active" + results["result"][host.name][ + "msg" + ] = "Hyperthreading is enabled and active" if not hyperthreading_info.active: # L1 Terminal Fault (L1TF)/Foreshadow mitigation workaround (https://kb.vmware.com/s/article/55806) option_manager = host.configManager.advancedOption try: - mitigation = option_manager.QueryOptions('VMkernel.Boot.hyperthreadingMitigation') + mitigation = option_manager.QueryOptions( + "VMkernel.Boot.hyperthreadingMitigation" + ) except vim.fault.InvalidName: mitigation = None if mitigation and mitigation[0].value: - results['result'][host.name]['changed'] = False - results['result'][host.name]['state_current'] = "enabled" - results['result'][host.name]['msg'] = ("Hyperthreading is enabled, but not active because the" - " processor is vulnerable to L1 Terminal Fault (L1TF).") + results["result"][host.name]["changed"] = False + results["result"][host.name][ + "state_current" + ] = "enabled" + results["result"][host.name]["msg"] = ( + "Hyperthreading is enabled, but not active because the" + " processor is vulnerable to L1 Terminal Fault (L1TF)." + ) else: - changed = results['result'][host.name]['changed'] = True - results['result'][host.name]['state_current'] = "enabled" - results['result'][host.name]['msg'] = ("Hyperthreading is enabled, but not active." - " A reboot is required!") + changed = results["result"][host.name][ + "changed" + ] = True + results["result"][host.name][ + "state_current" + ] = "enabled" + results["result"][host.name]["msg"] = ( + "Hyperthreading is enabled, but not active." + " A reboot is required!" + ) # Enable Hyperthreading else: # Check if Hyperthreading is available @@ -163,42 +186,72 @@ def ensure(self): if not self.module.check_mode: try: host.configManager.cpuScheduler.EnableHyperThreading() - changed = results['result'][host.name]['changed'] = True - results['result'][host.name]['state_previous'] = "disabled" - results['result'][host.name]['state_current'] = "enabled" - results['result'][host.name]['msg'] = ( - "Hyperthreading enabled for host. Reboot the host to activate it." - ) + changed = results["result"][host.name][ + "changed" + ] = True + results["result"][host.name][ + "state_previous" + ] = "disabled" + results["result"][host.name][ + "state_current" + ] = "enabled" + results["result"][host.name][ + "msg" + ] = "Hyperthreading enabled for host. Reboot the host to activate it." except vmodl.fault.NotSupported as not_supported: # This should never happen since Hyperthreading is available self.module.fail_json( - msg="Failed to enable Hyperthreading for host '%s' : %s" % - (host.name, to_native(not_supported.msg)) + msg="Failed to enable Hyperthreading for host '%s' : %s" + % (host.name, to_native(not_supported.msg)) ) - except (vmodl.RuntimeFault, vmodl.MethodFault) as runtime_fault: + except ( + vmodl.RuntimeFault, + vmodl.MethodFault, + ) as runtime_fault: self.module.fail_json( - msg="Failed to enable Hyperthreading for host '%s' due to : %s" % - (host.name, to_native(runtime_fault.msg)) + msg="Failed to enable Hyperthreading for host '%s' due to : %s" + % (host.name, to_native(runtime_fault.msg)) ) else: - changed = results['result'][host.name]['changed'] = True - results['result'][host.name]['state_previous'] = "disabled" - results['result'][host.name]['state_current'] = "enabled" - results['result'][host.name]['msg'] = "Hyperthreading will be enabled" + changed = results["result"][host.name][ + "changed" + ] = True + results["result"][host.name][ + "state_previous" + ] = "disabled" + results["result"][host.name][ + "state_current" + ] = "enabled" + results["result"][host.name][ + "msg" + ] = "Hyperthreading will be enabled" else: - self.module.fail_json(msg="Hyperthreading optimization is not available for host '%s'" % host.name) - elif desired_state == 'disabled': + self.module.fail_json( + msg="Hyperthreading optimization is not available for host '%s'" + % host.name + ) + elif desired_state == "disabled": # Don't do anything if Hyperthreading is already disabled if not hyperthreading_info.config: if not hyperthreading_info.active: - results['result'][host.name]['changed'] = False - results['result'][host.name]['state_current'] = "inactive" - results['result'][host.name]['msg'] = "Hyperthreading is disabled and inactive" + results["result"][host.name]["changed"] = False + results["result"][host.name][ + "state_current" + ] = "inactive" + results["result"][host.name][ + "msg" + ] = "Hyperthreading is disabled and inactive" if hyperthreading_info.active: - changed = results['result'][host.name]['changed'] = True - results['result'][host.name]['state_current'] = "disabled" - results['result'][host.name]['msg'] = ("Hyperthreading is already disabled" - " but still active. A reboot is required!") + changed = results["result"][host.name][ + "changed" + ] = True + results["result"][host.name][ + "state_current" + ] = "disabled" + results["result"][host.name]["msg"] = ( + "Hyperthreading is already disabled" + " but still active. A reboot is required!" + ) # Disable Hyperthreading else: # Check if Hyperthreading is available @@ -206,35 +259,55 @@ def ensure(self): if not self.module.check_mode: try: host.configManager.cpuScheduler.DisableHyperThreading() - changed = results['result'][host.name]['changed'] = True - results['result'][host.name]['state_previous'] = "enabled" - results['result'][host.name]['state_current'] = "disabled" - results['result'][host.name]['msg'] = ( - "Hyperthreading disabled. Reboot the host to deactivate it." - ) + changed = results["result"][host.name][ + "changed" + ] = True + results["result"][host.name][ + "state_previous" + ] = "enabled" + results["result"][host.name][ + "state_current" + ] = "disabled" + results["result"][host.name][ + "msg" + ] = "Hyperthreading disabled. Reboot the host to deactivate it." except vmodl.fault.NotSupported as not_supported: # This should never happen since Hyperthreading is available self.module.fail_json( - msg="Failed to disable Hyperthreading for host '%s' : %s" % - (host.name, to_native(not_supported.msg)) + msg="Failed to disable Hyperthreading for host '%s' : %s" + % (host.name, to_native(not_supported.msg)) ) - except (vmodl.RuntimeFault, vmodl.MethodFault) as runtime_fault: + except ( + vmodl.RuntimeFault, + vmodl.MethodFault, + ) as runtime_fault: self.module.fail_json( - msg="Failed to disable Hyperthreading for host '%s' due to : %s" % - (host.name, to_native(runtime_fault.msg)) + msg="Failed to disable Hyperthreading for host '%s' due to : %s" + % (host.name, to_native(runtime_fault.msg)) ) else: - changed = results['result'][host.name]['changed'] = True - results['result'][host.name]['state_previous'] = "enabled" - results['result'][host.name]['state_current'] = "disabled" - results['result'][host.name]['msg'] = "Hyperthreading will be disabled" + changed = results["result"][host.name][ + "changed" + ] = True + results["result"][host.name][ + "state_previous" + ] = "enabled" + results["result"][host.name][ + "state_current" + ] = "disabled" + results["result"][host.name][ + "msg" + ] = "Hyperthreading will be disabled" else: - self.module.fail_json(msg="Hyperthreading optimization is not available for host '%s'" % host.name) + self.module.fail_json( + msg="Hyperthreading optimization is not available for host '%s'" + % host.name + ) host_change_list.append(changed) if any(host_change_list): - results['changed'] = True + results["changed"] = True self.module.exit_json(**results) @@ -242,21 +315,20 @@ def main(): """Main""" argument_spec = vmware_argument_spec() argument_spec.update( - state=dict(default='enabled', choices=['enabled', 'disabled']), - esxi_hostname=dict(type='str', required=False), - cluster_name=dict(type='str', required=False), + state=dict(default="enabled", choices=["enabled", "disabled"]), + esxi_hostname=dict(type="str", required=False), + cluster_name=dict(type="str", required=False), ) - module = AnsibleModule(argument_spec=argument_spec, - required_one_of=[ - ['cluster_name', 'esxi_hostname'], - ], - supports_check_mode=True - ) + module = AnsibleModule( + argument_spec=argument_spec, + required_one_of=[["cluster_name", "esxi_hostname"]], + supports_check_mode=True, + ) hyperthreading = VmwareHostHyperthreading(module) hyperthreading.ensure() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_host_ipv6.py b/plugins/modules/vmware_host_ipv6.py index 00cb403..631354c 100644 --- a/plugins/modules/vmware_host_ipv6.py +++ b/plugins/modules/vmware_host_ipv6.py @@ -6,16 +6,17 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_host_ipv6 short_description: Enables/Disables IPv6 support for an ESXi host system @@ -50,9 +51,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Enable IPv6 for an host system vmware_host_ipv6: hostname: '{{ vcenter_hostname }}' @@ -82,9 +83,9 @@ state: disabled validate_certs: no delegate_to: localhost -''' +""" -RETURN = r''' +RETURN = r""" result: description: metadata about host system's IPv6 configuration returned: always @@ -95,7 +96,7 @@ "msg": "IPv6 is already enabled and active for host 'esxi01'", }, } -''' +""" try: from pyVmomi import vim, vmodl @@ -103,114 +104,169 @@ pass from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + vmware_argument_spec, +) from ansible.module_utils._text import to_native class VmwareHostIPv6(PyVmomi): """Class to manage IPv6 for an ESXi host system""" + def __init__(self, module): super(VmwareHostIPv6, self).__init__(module) - cluster_name = self.params.get('cluster_name') - esxi_host_name = self.params.get('esxi_hostname') - self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name) + cluster_name = self.params.get("cluster_name") + esxi_host_name = self.params.get("esxi_hostname") + self.hosts = self.get_all_host_objs( + cluster_name=cluster_name, esxi_host_name=esxi_host_name + ) if not self.hosts: - self.module.fail_json(msg="Failed to find host system with given configuration.") + self.module.fail_json( + msg="Failed to find host system with given configuration." + ) def ensure(self): """Manage IPv6 for an ESXi host system""" results = dict(changed=False, result=dict()) - desired_state = self.module.params['state'] + desired_state = self.module.params["state"] host_change_list = [] for host in self.hosts: changed = False - results['result'][host.name] = dict(msg='') + results["result"][host.name] = dict(msg="") host_network_system = host.configManager.networkSystem host_network_info = host_network_system.networkInfo - if desired_state == 'enabled': + if desired_state == "enabled": # Don't do anything if IPv6 is already enabled if host_network_info.atBootIpV6Enabled: if host_network_info.ipV6Enabled: - results['result'][host.name]['msg'] = "IPv6 is already enabled and active for host '%s'" % \ - host.name + results["result"][host.name]["msg"] = ( + "IPv6 is already enabled and active for host '%s'" + % host.name + ) if not host_network_info.ipV6Enabled: - results['result'][host.name]['msg'] = ("IPv6 is already enabled for host '%s', but a reboot" - " is required!" % host.name) + results["result"][host.name]["msg"] = ( + "IPv6 is already enabled for host '%s', but a reboot" + " is required!" % host.name + ) # Enable IPv6 else: if not self.module.check_mode: try: config = vim.host.NetworkConfig() config.ipV6Enabled = True - host_network_system.UpdateNetworkConfig(config, "modify") + host_network_system.UpdateNetworkConfig( + config, "modify" + ) changed = True - results['result'][host.name]['changed'] = True - results['result'][host.name]['msg'] = "IPv6 enabled for host '%s'" % host.name + results["result"][host.name]["changed"] = True + results["result"][host.name]["msg"] = ( + "IPv6 enabled for host '%s'" % host.name + ) except (vim.fault.AlreadyExists, vim.fault.NotFound): - self.module.fail_json(msg="Network entity specified in the configuration for host '%s'" - " already exists" % host.name) + self.module.fail_json( + msg="Network entity specified in the configuration for host '%s'" + " already exists" % host.name + ) except vmodl.fault.InvalidArgument as invalid_argument: - self.module.fail_json(msg="Invalid parameter specified for host '%s' : %s" % - (host.name, to_native(invalid_argument.msg))) + self.module.fail_json( + msg="Invalid parameter specified for host '%s' : %s" + % (host.name, to_native(invalid_argument.msg)) + ) except vim.fault.HostConfigFault as config_fault: - self.module.fail_json(msg="Failed to enable IPv6 for host '%s' due to : %s" % - (host.name, to_native(config_fault.msg))) + self.module.fail_json( + msg="Failed to enable IPv6 for host '%s' due to : %s" + % (host.name, to_native(config_fault.msg)) + ) except vmodl.fault.NotSupported as not_supported: - self.module.fail_json(msg="Failed to enable IPv6 for host '%s' due to : %s" % - (host.name, to_native(not_supported.msg))) - except (vmodl.RuntimeFault, vmodl.MethodFault) as runtime_fault: - self.module.fail_json(msg="Failed to enable IPv6 for host '%s' due to : %s" % - (host.name, to_native(runtime_fault.msg))) + self.module.fail_json( + msg="Failed to enable IPv6 for host '%s' due to : %s" + % (host.name, to_native(not_supported.msg)) + ) + except ( + vmodl.RuntimeFault, + vmodl.MethodFault, + ) as runtime_fault: + self.module.fail_json( + msg="Failed to enable IPv6 for host '%s' due to : %s" + % (host.name, to_native(runtime_fault.msg)) + ) else: changed = True - results['result'][host.name]['changed'] = True - results['result'][host.name]['msg'] = "IPv6 will be enabled for host '%s'" % host.name - elif desired_state == 'disabled': + results["result"][host.name]["changed"] = True + results["result"][host.name]["msg"] = ( + "IPv6 will be enabled for host '%s'" % host.name + ) + elif desired_state == "disabled": # Don't do anything if IPv6 is already disabled if not host_network_info.atBootIpV6Enabled: if not host_network_info.ipV6Enabled: - results['result'][host.name]['msg'] = "IPv6 is already disabled for host '%s'" % host.name + results["result"][host.name]["msg"] = ( + "IPv6 is already disabled for host '%s'" + % host.name + ) if host_network_info.ipV6Enabled: changed = True - results['result'][host.name]['msg'] = ("IPv6 is already disabled for host '%s'," - " but a reboot is required!" % host.name) + results["result"][host.name]["msg"] = ( + "IPv6 is already disabled for host '%s'," + " but a reboot is required!" % host.name + ) # Disable IPv6 else: if not self.module.check_mode: try: config = vim.host.NetworkConfig() config.ipV6Enabled = False - host_network_system.UpdateNetworkConfig(config, "modify") + host_network_system.UpdateNetworkConfig( + config, "modify" + ) changed = True - results['result'][host.name]['changed'] = True - results['result'][host.name]['msg'] = "IPv6 disabled for host '%s'" % host.name + results["result"][host.name]["changed"] = True + results["result"][host.name]["msg"] = ( + "IPv6 disabled for host '%s'" % host.name + ) except (vim.fault.AlreadyExists, vim.fault.NotFound): - self.module.fail_json(msg="Network entity specified in the configuration for host '%s'" - " already exists" % host.name) + self.module.fail_json( + msg="Network entity specified in the configuration for host '%s'" + " already exists" % host.name + ) except vmodl.fault.InvalidArgument as invalid_argument: - self.module.fail_json(msg="Invalid parameter specified for host '%s' : %s" % - (host.name, to_native(invalid_argument.msg))) + self.module.fail_json( + msg="Invalid parameter specified for host '%s' : %s" + % (host.name, to_native(invalid_argument.msg)) + ) except vim.fault.HostConfigFault as config_fault: - self.module.fail_json(msg="Failed to disable IPv6 for host '%s' due to : %s" % - (host.name, to_native(config_fault.msg))) + self.module.fail_json( + msg="Failed to disable IPv6 for host '%s' due to : %s" + % (host.name, to_native(config_fault.msg)) + ) except vmodl.fault.NotSupported as not_supported: - self.module.fail_json(msg="Failed to disable IPv6 for host '%s' due to : %s" % - (host.name, to_native(not_supported.msg))) - except (vmodl.RuntimeFault, vmodl.MethodFault) as runtime_fault: - self.module.fail_json(msg="Failed to disable IPv6 for host '%s' due to : %s" % - (host.name, to_native(runtime_fault.msg))) + self.module.fail_json( + msg="Failed to disable IPv6 for host '%s' due to : %s" + % (host.name, to_native(not_supported.msg)) + ) + except ( + vmodl.RuntimeFault, + vmodl.MethodFault, + ) as runtime_fault: + self.module.fail_json( + msg="Failed to disable IPv6 for host '%s' due to : %s" + % (host.name, to_native(runtime_fault.msg)) + ) else: changed = True - results['result'][host.name]['changed'] = True - results['result'][host.name]['msg'] = "IPv6 will be disabled for host '%s'" % host.name + results["result"][host.name]["changed"] = True + results["result"][host.name]["msg"] = ( + "IPv6 will be disabled for host '%s'" % host.name + ) host_change_list.append(changed) if any(host_change_list): - results['changed'] = True + results["changed"] = True self.module.exit_json(**results) @@ -220,21 +276,20 @@ def main(): """ argument_spec = vmware_argument_spec() argument_spec.update( - state=dict(default='enabled', choices=['enabled', 'disabled']), - esxi_hostname=dict(type='str', required=False), - cluster_name=dict(type='str', required=False), + state=dict(default="enabled", choices=["enabled", "disabled"]), + esxi_hostname=dict(type="str", required=False), + cluster_name=dict(type="str", required=False), ) - module = AnsibleModule(argument_spec=argument_spec, - required_one_of=[ - ['cluster_name', 'esxi_hostname'], - ], - supports_check_mode=True - ) + module = AnsibleModule( + argument_spec=argument_spec, + required_one_of=[["cluster_name", "esxi_hostname"]], + supports_check_mode=True, + ) ipv6 = VmwareHostIPv6(module) ipv6.ensure() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_host_kernel_manager.py b/plugins/modules/vmware_host_kernel_manager.py index 9f29478..847f8b2 100644 --- a/plugins/modules/vmware_host_kernel_manager.py +++ b/plugins/modules/vmware_host_kernel_manager.py @@ -3,16 +3,17 @@ # Copyright: (c) 2019, Aaron Longchamps, # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_host_kernel_manager short_description: Manage kernel module options on ESXi hosts @@ -55,9 +56,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Configure IPv6 to be off via tcpip4 kernel module vmware_host_kernel_manager: hostname: '{{ vcenter_hostname }}' @@ -75,9 +76,9 @@ cluster_name: '{{ virtual_cluster_name }}' kernel_module_name: "vmw_psp_rr" kernel_module_option: "maxPathsPerDevice=2" -''' +""" -RETURN = r''' +RETURN = r""" host_kernel_status: description: - dict with information on what was changed, by ESXi host in scope. @@ -93,7 +94,7 @@ } } } -''' +""" try: from pyVmomi import vim @@ -101,7 +102,10 @@ pass from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + vmware_argument_spec, + PyVmomi, +) from ansible.module_utils._text import to_native @@ -109,24 +113,33 @@ class VmwareKernelManager(PyVmomi): def __init__(self, module): self.module = module super(VmwareKernelManager, self).__init__(module) - cluster_name = self.params.get('cluster_name', None) - esxi_host_name = self.params.get('esxi_hostname', None) - self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name) - self.kernel_module_name = self.params.get('kernel_module_name') - self.kernel_module_option = self.params.get('kernel_module_option') + cluster_name = self.params.get("cluster_name", None) + esxi_host_name = self.params.get("esxi_hostname", None) + self.hosts = self.get_all_host_objs( + cluster_name=cluster_name, esxi_host_name=esxi_host_name + ) + self.kernel_module_name = self.params.get("kernel_module_name") + self.kernel_module_option = self.params.get("kernel_module_option") self.results = {} if not self.hosts: - self.module.fail_json(msg="Failed to find a host system that matches the specified criteria") + self.module.fail_json( + msg="Failed to find a host system that matches the specified criteria" + ) # find kernel module options for a given kmod_name. If the name is not right, this will throw an exception def get_kernel_module_option(self, host, kmod_name): host_kernel_manager = host.configManager.kernelModuleSystem try: - return host_kernel_manager.QueryConfiguredModuleOptionString(self.kernel_module_name) + return host_kernel_manager.QueryConfiguredModuleOptionString( + self.kernel_module_name + ) except vim.fault.NotFound as kernel_fault: - self.module.fail_json(msg="Failed to find kernel module on host '%s'. More information: %s" % (host.name, to_native(kernel_fault.msg))) + self.module.fail_json( + msg="Failed to find kernel module on host '%s'. More information: %s" + % (host.name, to_native(kernel_fault.msg)) + ) # configure the provided kernel module with the specified options def apply_kernel_module_option(self, host, kmod_name, kmod_option): @@ -135,11 +148,19 @@ def apply_kernel_module_option(self, host, kmod_name, kmod_option): if host_kernel_manager: try: if not self.module.check_mode: - host_kernel_manager.UpdateModuleOptionString(kmod_name, kmod_option) + host_kernel_manager.UpdateModuleOptionString( + kmod_name, kmod_option + ) except vim.fault.NotFound as kernel_fault: - self.module.fail_json(msg="Failed to find kernel module on host '%s'. More information: %s" % (host.name, to_native(kernel_fault))) + self.module.fail_json( + msg="Failed to find kernel module on host '%s'. More information: %s" + % (host.name, to_native(kernel_fault)) + ) except Exception as kernel_fault: - self.module.fail_json(msg="Failed to configure kernel module for host '%s' due to: %s" % (host.name, to_native(kernel_fault))) + self.module.fail_json( + msg="Failed to configure kernel module for host '%s' due to: %s" + % (host.name, to_native(kernel_fault)) + ) # evaluate our current configuration against desired options and save results def check_host_configuration_state(self): @@ -155,46 +176,64 @@ def check_host_configuration_state(self): if host_kernel_manager: # keep track of original options on the kernel module - original_options = self.get_kernel_module_option(host, self.kernel_module_name) + original_options = self.get_kernel_module_option( + host, self.kernel_module_name + ) desired_options = self.kernel_module_option # apply as needed, also depending on check mode if original_options != desired_options: changed = True if self.module.check_mode: - msg = "Options would be changed on the kernel module" + msg = ( + "Options would be changed on the kernel module" + ) else: - self.apply_kernel_module_option(host, self.kernel_module_name, desired_options) + self.apply_kernel_module_option( + host, self.kernel_module_name, desired_options + ) msg = "Options have been changed on the kernel module" - self.results[host.name]['configured_options'] = desired_options + self.results[host.name][ + "configured_options" + ] = desired_options else: msg = "Options are already the same" change_list.append(changed) - self.results[host.name]['changed'] = changed - self.results[host.name]['msg'] = msg - self.results[host.name]['original_options'] = original_options + self.results[host.name]["changed"] = changed + self.results[host.name]["msg"] = msg + self.results[host.name][ + "original_options" + ] = original_options else: - msg = "No kernel module manager found on host %s - impossible to configure." % host.name - self.results[host.name]['changed'] = changed - self.results[host.name]['msg'] = msg + msg = ( + "No kernel module manager found on host %s - impossible to configure." + % host.name + ) + self.results[host.name]["changed"] = changed + self.results[host.name]["msg"] = msg else: - msg = "Host %s is disconnected and cannot be changed." % host.name - self.results[host.name]['changed'] = changed - self.results[host.name]['msg'] = msg + msg = ( + "Host %s is disconnected and cannot be changed." + % host.name + ) + self.results[host.name]["changed"] = changed + self.results[host.name]["msg"] = msg - self.module.exit_json(changed=any(change_list), host_kernel_status=self.results) + self.module.exit_json( + changed=any(change_list), host_kernel_status=self.results + ) def main(): argument_spec = vmware_argument_spec() # add the arguments we're going to use for this module argument_spec.update( - cluster_name=dict(type='str', required=False), - esxi_hostname=dict(type='str', required=False), - kernel_module_name=dict(type='str', required=True), - kernel_module_option=dict(type='str', required=True), + cluster_name=dict(type="str", required=False), + esxi_hostname=dict(type="str", required=False), + kernel_module_name=dict(type="str", required=True), + kernel_module_option=dict(type="str", required=True), ) # make sure we have a valid target cluster_name or esxi_hostname (not both) @@ -202,17 +241,13 @@ def main(): module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, - required_one_of=[ - ['cluster_name', 'esxi_hostname'], - ], - mutually_exclusive=[ - ['cluster_name', 'esxi_hostname'], - ], + required_one_of=[["cluster_name", "esxi_hostname"]], + mutually_exclusive=[["cluster_name", "esxi_hostname"]], ) vmware_host_config = VmwareKernelManager(module) vmware_host_config.check_host_configuration_state() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_host_lockdown.py b/plugins/modules/vmware_host_lockdown.py index d8055a6..ddfc30e 100644 --- a/plugins/modules/vmware_host_lockdown.py +++ b/plugins/modules/vmware_host_lockdown.py @@ -5,15 +5,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_host_lockdown short_description: Manage administrator permission for the local administrative account for the ESXi host @@ -55,9 +56,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Enter host system into lockdown mode vmware_host_lockdown: hostname: '{{ vcenter_hostname }}' @@ -106,9 +107,9 @@ cluster_name: '{{ cluster_name }}' state: present delegate_to: localhost -''' +""" -RETURN = r''' +RETURN = r""" results: description: metadata about state of Host system lock down returned: always @@ -122,7 +123,7 @@ }, } } -''' +""" try: from pyvmomi import vim @@ -130,7 +131,10 @@ pass from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + vmware_argument_spec, + PyVmomi, +) from ansible.module_utils._text import to_native @@ -138,12 +142,16 @@ class VmwareLockdownManager(PyVmomi): def __init__(self, module): super(VmwareLockdownManager, self).__init__(module) if not self.is_vcenter(): - self.module.fail_json(msg="Lockdown operations are performed from vCenter only. " - "hostname %s is an ESXi server. Please specify hostname " - "as vCenter server." % self.module.params['hostname']) - cluster_name = self.params.get('cluster_name', None) - esxi_host_name = self.params.get('esxi_hostname', None) - self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name) + self.module.fail_json( + msg="Lockdown operations are performed from vCenter only. " + "hostname %s is an ESXi server. Please specify hostname " + "as vCenter server." % self.module.params["hostname"] + ) + cluster_name = self.params.get("cluster_name", None) + esxi_host_name = self.params.get("esxi_hostname", None) + self.hosts = self.get_all_host_objs( + cluster_name=cluster_name, esxi_host_name=esxi_host_name + ) def ensure(self): """ @@ -151,44 +159,66 @@ def ensure(self): """ results = dict(changed=False, host_lockdown_state=dict()) change_list = [] - desired_state = self.params.get('state') + desired_state = self.params.get("state") for host in self.hosts: - results['host_lockdown_state'][host.name] = dict(current_state='', - desired_state=desired_state, - previous_state='' - ) + results["host_lockdown_state"][host.name] = dict( + current_state="", + desired_state=desired_state, + previous_state="", + ) changed = False try: if host.config.adminDisabled: - results['host_lockdown_state'][host.name]['previous_state'] = 'present' - if desired_state == 'absent': + results["host_lockdown_state"][host.name][ + "previous_state" + ] = "present" + if desired_state == "absent": host.ExitLockdownMode() - results['host_lockdown_state'][host.name]['current_state'] = 'absent' + results["host_lockdown_state"][host.name][ + "current_state" + ] = "absent" changed = True else: - results['host_lockdown_state'][host.name]['current_state'] = 'present' + results["host_lockdown_state"][host.name][ + "current_state" + ] = "present" elif not host.config.adminDisabled: - results['host_lockdown_state'][host.name]['previous_state'] = 'absent' - if desired_state == 'present': + results["host_lockdown_state"][host.name][ + "previous_state" + ] = "absent" + if desired_state == "present": host.EnterLockdownMode() - results['host_lockdown_state'][host.name]['current_state'] = 'present' + results["host_lockdown_state"][host.name][ + "current_state" + ] = "present" changed = True else: - results['host_lockdown_state'][host.name]['current_state'] = 'absent' + results["host_lockdown_state"][host.name][ + "current_state" + ] = "absent" except vim.fault.HostConfigFault as host_config_fault: - self.module.fail_json(msg="Failed to manage lockdown mode for esxi" - " hostname %s : %s" % (host.name, to_native(host_config_fault.msg))) + self.module.fail_json( + msg="Failed to manage lockdown mode for esxi" + " hostname %s : %s" + % (host.name, to_native(host_config_fault.msg)) + ) except vim.fault.AdminDisabled as admin_disabled: - self.module.fail_json(msg="Failed to manage lockdown mode as administrator " - "permission has been disabled for " - "esxi hostname %s : %s" % (host.name, to_native(admin_disabled.msg))) + self.module.fail_json( + msg="Failed to manage lockdown mode as administrator " + "permission has been disabled for " + "esxi hostname %s : %s" + % (host.name, to_native(admin_disabled.msg)) + ) except Exception as generic_exception: - self.module.fail_json(msg="Failed to manage lockdown mode due to generic exception for esxi " - "hostname %s : %s" % (host.name, to_native(generic_exception))) + self.module.fail_json( + msg="Failed to manage lockdown mode due to generic exception for esxi " + "hostname %s : %s" + % (host.name, to_native(generic_exception)) + ) change_list.append(changed) if any(change_list): - results['changed'] = True + results["changed"] = True self.module.exit_json(**results) @@ -196,16 +226,19 @@ def ensure(self): def main(): argument_spec = vmware_argument_spec() argument_spec.update( - cluster_name=dict(type='str', required=False), - esxi_hostname=dict(type='list', required=False), - state=dict(type='str', default='present', choices=['present', 'absent'], required=False), + cluster_name=dict(type="str", required=False), + esxi_hostname=dict(type="list", required=False), + state=dict( + type="str", + default="present", + choices=["present", "absent"], + required=False, + ), ) module = AnsibleModule( argument_spec=argument_spec, - required_one_of=[ - ['cluster_name', 'esxi_hostname'], - ] + required_one_of=[["cluster_name", "esxi_hostname"]], ) vmware_lockdown_mgr = VmwareLockdownManager(module) diff --git a/plugins/modules/vmware_host_ntp.py b/plugins/modules/vmware_host_ntp.py index c7e891b..f1ef99b 100644 --- a/plugins/modules/vmware_host_ntp.py +++ b/plugins/modules/vmware_host_ntp.py @@ -6,15 +6,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_host_ntp short_description: Manage NTP server configuration of an ESXi host @@ -64,9 +65,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Configure NTP servers for an ESXi Host vmware_host_ntp: hostname: vcenter01.example.local @@ -112,9 +113,9 @@ ntp_servers: - bad.server.ntp.org delegate_to: localhost -''' +""" -RETURN = r''' +RETURN = r""" host_ntp_status: description: metadata about host system's NTP configuration returned: always @@ -132,7 +133,7 @@ "ntp_servers_previous": ["time1.example.local", "time2.example.local"], }, } -''' +""" try: from pyVmomi import vim @@ -140,7 +141,10 @@ pass from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + vmware_argument_spec, + PyVmomi, +) from ansible.module_utils._text import to_native @@ -149,28 +153,36 @@ class VmwareNtpConfigManager(PyVmomi): def __init__(self, module): super(VmwareNtpConfigManager, self).__init__(module) - cluster_name = self.params.get('cluster_name', None) - esxi_host_name = self.params.get('esxi_hostname', None) - self.ntp_servers = self.params.get('ntp_servers', list()) - self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name) + cluster_name = self.params.get("cluster_name", None) + esxi_host_name = self.params.get("esxi_hostname", None) + self.ntp_servers = self.params.get("ntp_servers", list()) + self.hosts = self.get_all_host_objs( + cluster_name=cluster_name, esxi_host_name=esxi_host_name + ) if not self.hosts: self.module.fail_json(msg="Failed to find host system.") self.results = {} - self.desired_state = self.params.get('state', None) - self.verbose = module.params.get('verbose', False) - - def update_ntp_servers(self, host, ntp_servers_configured, ntp_servers_to_change, operation='overwrite'): + self.desired_state = self.params.get("state", None) + self.verbose = module.params.get("verbose", False) + + def update_ntp_servers( + self, + host, + ntp_servers_configured, + ntp_servers_to_change, + operation="overwrite", + ): """Update NTP server configuration""" host_date_time_manager = host.configManager.dateTimeSystem if host_date_time_manager: # Prepare new NTP server list - if operation == 'overwrite': + if operation == "overwrite": new_ntp_servers = list(ntp_servers_to_change) else: new_ntp_servers = list(ntp_servers_configured) - if operation == 'add': + if operation == "add": new_ntp_servers = new_ntp_servers + ntp_servers_to_change - elif operation == 'delete': + elif operation == "delete": for server in ntp_servers_to_change: if server in new_ntp_servers: new_ntp_servers.remove(server) @@ -181,7 +193,7 @@ def update_ntp_servers(self, host, ntp_servers_configured, ntp_servers_to_change ntp_servers_configured, new_ntp_servers, ntp_servers_to_change, - operation + operation, ) ntp_config_spec = vim.host.NtpConfig() @@ -190,13 +202,15 @@ def update_ntp_servers(self, host, ntp_servers_configured, ntp_servers_to_change date_config_spec.ntpConfig = ntp_config_spec try: if not self.module.check_mode: - host_date_time_manager.UpdateDateTimeConfig(date_config_spec) + host_date_time_manager.UpdateDateTimeConfig( + date_config_spec + ) if self.verbose: - self.results[host.name]['msg'] = message + self.results[host.name]["msg"] = message except vim.fault.HostConfigFault as config_fault: self.module.fail_json( - msg="Failed to configure NTP for host '%s' due to : %s" % - (host.name, to_native(config_fault.msg)) + msg="Failed to configure NTP for host '%s' due to : %s" + % (host.name, to_native(config_fault.msg)) ) return new_ntp_servers @@ -207,51 +221,65 @@ def check_host_state(self): changed = False for host in self.hosts: self.results[host.name] = dict() - ntp_servers_configured, ntp_servers_to_change = self.check_ntp_servers(host=host) + ntp_servers_configured, ntp_servers_to_change = self.check_ntp_servers( + host=host + ) # add/remove NTP servers if self.desired_state: - self.results[host.name]['state'] = self.desired_state + self.results[host.name]["state"] = self.desired_state if ntp_servers_to_change: - self.results[host.name]['ntp_servers_changed'] = ntp_servers_to_change - operation = 'add' if self.desired_state == 'present' else 'delete' + self.results[host.name][ + "ntp_servers_changed" + ] = ntp_servers_to_change + operation = ( + "add" if self.desired_state == "present" else "delete" + ) new_ntp_servers = self.update_ntp_servers( host=host, ntp_servers_configured=ntp_servers_configured, ntp_servers_to_change=ntp_servers_to_change, - operation=operation + operation=operation, ) - self.results[host.name]['ntp_servers_current'] = new_ntp_servers - self.results[host.name]['changed'] = True + self.results[host.name][ + "ntp_servers_current" + ] = new_ntp_servers + self.results[host.name]["changed"] = True change_list.append(True) else: - self.results[host.name]['ntp_servers_current'] = ntp_servers_configured + self.results[host.name][ + "ntp_servers_current" + ] = ntp_servers_configured if self.verbose: - self.results[host.name]['msg'] = ( - "NTP servers already added" if self.desired_state == 'present' + self.results[host.name]["msg"] = ( + "NTP servers already added" + if self.desired_state == "present" else "NTP servers already removed" ) - self.results[host.name]['changed'] = False + self.results[host.name]["changed"] = False change_list.append(False) # overwrite NTP servers else: - self.results[host.name]['ntp_servers'] = self.ntp_servers + self.results[host.name]["ntp_servers"] = self.ntp_servers if ntp_servers_to_change: - self.results[host.name]['ntp_servers_changed'] = self.get_differt_entries( - ntp_servers_configured, - ntp_servers_to_change + self.results[host.name][ + "ntp_servers_changed" + ] = self.get_differt_entries( + ntp_servers_configured, ntp_servers_to_change ) self.update_ntp_servers( host=host, ntp_servers_configured=ntp_servers_configured, ntp_servers_to_change=ntp_servers_to_change, - operation='overwrite' + operation="overwrite", ) - self.results[host.name]['changed'] = True + self.results[host.name]["changed"] = True change_list.append(True) else: if self.verbose: - self.results[host.name]['msg'] = "NTP servers already configured" - self.results[host.name]['changed'] = False + self.results[host.name][ + "msg" + ] = "NTP servers already configured" + self.results[host.name]["changed"] = False change_list.append(False) if any(change_list): @@ -263,13 +291,21 @@ def check_ntp_servers(self, host): update_ntp_list = [] host_datetime_system = host.configManager.dateTimeSystem if host_datetime_system: - ntp_servers_configured = host_datetime_system.dateTimeInfo.ntpConfig.server + ntp_servers_configured = ( + host_datetime_system.dateTimeInfo.ntpConfig.server + ) # add/remove NTP servers if self.desired_state: for ntp_server in self.ntp_servers: - if self.desired_state == 'present' and ntp_server not in ntp_servers_configured: + if ( + self.desired_state == "present" + and ntp_server not in ntp_servers_configured + ): update_ntp_list.append(ntp_server) - if self.desired_state == 'absent' and ntp_server in ntp_servers_configured: + if ( + self.desired_state == "absent" + and ntp_server in ntp_servers_configured + ): update_ntp_list.append(ntp_server) # overwrite NTP servers else: @@ -277,17 +313,29 @@ def check_ntp_servers(self, host): for ntp_server in self.ntp_servers: update_ntp_list.append(ntp_server) if update_ntp_list: - self.results[host.name]['ntp_servers_previous'] = ntp_servers_configured + self.results[host.name][ + "ntp_servers_previous" + ] = ntp_servers_configured return ntp_servers_configured, update_ntp_list - def build_changed_message(self, ntp_servers_configured, new_ntp_servers, ntp_servers_to_change, operation): + def build_changed_message( + self, + ntp_servers_configured, + new_ntp_servers, + ntp_servers_to_change, + operation, + ): """Build changed message""" - check_mode = 'would be ' if self.module.check_mode else '' - if operation == 'overwrite': + check_mode = "would be " if self.module.check_mode else "" + if operation == "overwrite": # get differences - add = self.get_not_in_list_one(new_ntp_servers, ntp_servers_configured) - remove = self.get_not_in_list_one(ntp_servers_configured, new_ntp_servers) + add = self.get_not_in_list_one( + new_ntp_servers, ntp_servers_configured + ) + remove = self.get_not_in_list_one( + ntp_servers_configured, new_ntp_servers + ) diff_servers = list(ntp_servers_configured) if add and remove: for server in add: @@ -296,46 +344,71 @@ def build_changed_message(self, ntp_servers_configured, new_ntp_servers, ntp_ser diff_servers.remove(server) if new_ntp_servers != diff_servers: message = ( - "NTP server %s %sadded and %s %sremoved and the server sequence %schanged as well" % - (self.array_to_string(add), check_mode, self.array_to_string(remove), check_mode, check_mode) + "NTP server %s %sadded and %s %sremoved and the server sequence %schanged as well" + % ( + self.array_to_string(add), + check_mode, + self.array_to_string(remove), + check_mode, + check_mode, + ) ) else: if new_ntp_servers != ntp_servers_configured: - message = ( - "NTP server %s %sreplaced with %s" % - (self.array_to_string(remove), check_mode, self.array_to_string(add)) + message = "NTP server %s %sreplaced with %s" % ( + self.array_to_string(remove), + check_mode, + self.array_to_string(add), ) else: - message = ( - "NTP server %s %sremoved and %s %sadded" % - (self.array_to_string(remove), check_mode, self.array_to_string(add), check_mode) + message = "NTP server %s %sremoved and %s %sadded" % ( + self.array_to_string(remove), + check_mode, + self.array_to_string(add), + check_mode, ) elif add: for server in add: diff_servers.append(server) if new_ntp_servers != diff_servers: message = ( - "NTP server %s %sadded and the server sequence %schanged as well" % - (self.array_to_string(add), check_mode, check_mode) + "NTP server %s %sadded and the server sequence %schanged as well" + % (self.array_to_string(add), check_mode, check_mode) ) else: - message = "NTP server %s %sadded" % (self.array_to_string(add), check_mode) + message = "NTP server %s %sadded" % ( + self.array_to_string(add), + check_mode, + ) elif remove: for server in remove: diff_servers.remove(server) if new_ntp_servers != diff_servers: message = ( - "NTP server %s %sremoved and the server sequence %schanged as well" % - (self.array_to_string(remove), check_mode, check_mode) + "NTP server %s %sremoved and the server sequence %schanged as well" + % ( + self.array_to_string(remove), + check_mode, + check_mode, + ) ) else: - message = "NTP server %s %sremoved" % (self.array_to_string(remove), check_mode) + message = "NTP server %s %sremoved" % ( + self.array_to_string(remove), + check_mode, + ) else: message = "NTP server sequence %schanged" % check_mode - elif operation == 'add': - message = "NTP server %s %sadded" % (self.array_to_string(ntp_servers_to_change), check_mode) - elif operation == 'delete': - message = "NTP server %s %sremoved" % (self.array_to_string(ntp_servers_to_change), check_mode) + elif operation == "add": + message = "NTP server %s %sadded" % ( + self.array_to_string(ntp_servers_to_change), + check_mode, + ) + elif operation == "delete": + message = "NTP server %s %sremoved" % ( + self.array_to_string(ntp_servers_to_change), + check_mode, + ) return message @@ -349,11 +422,12 @@ def array_to_string(array): """Return string from array""" if len(array) > 2: string = ( - ', '.join("'{0}'".format(element) for element in array[:-1]) + ', and ' + ", ".join("'{0}'".format(element) for element in array[:-1]) + + ", and " + "'{0}'".format(str(array[-1])) ) elif len(array) == 2: - string = ' and '.join("'{0}'".format(element) for element in array) + string = " and ".join("'{0}'".format(element) for element in array) elif len(array) == 1: string = "'{0}'".format(array[0]) return string @@ -361,26 +435,26 @@ def array_to_string(array): @staticmethod def get_differt_entries(list1, list2): """Return different entries of two lists""" - return [a for a in list1 + list2 if (a not in list1) or (a not in list2)] + return [ + a for a in list1 + list2 if (a not in list1) or (a not in list2) + ] def main(): """Main""" argument_spec = vmware_argument_spec() argument_spec.update( - cluster_name=dict(type='str', required=False), - esxi_hostname=dict(type='str', required=False), - ntp_servers=dict(type='list', required=True), - state=dict(type='str', choices=['absent', 'present']), - verbose=dict(type='bool', default=False, required=False) + cluster_name=dict(type="str", required=False), + esxi_hostname=dict(type="str", required=False), + ntp_servers=dict(type="list", required=True), + state=dict(type="str", choices=["absent", "present"]), + verbose=dict(type="bool", default=False, required=False), ) module = AnsibleModule( argument_spec=argument_spec, - required_one_of=[ - ['cluster_name', 'esxi_hostname'], - ], - supports_check_mode=True + required_one_of=[["cluster_name", "esxi_hostname"]], + supports_check_mode=True, ) vmware_host_ntp_config = VmwareNtpConfigManager(module) diff --git a/plugins/modules/vmware_host_ntp_info.py b/plugins/modules/vmware_host_ntp_info.py index 1b4981a..229904b 100644 --- a/plugins/modules/vmware_host_ntp_info.py +++ b/plugins/modules/vmware_host_ntp_info.py @@ -5,15 +5,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_host_ntp_info short_description: Gathers info about NTP configuration on an ESXi host @@ -42,9 +43,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather NTP info about all ESXi Host in the given Cluster vmware_host_ntp_info: hostname: '{{ vcenter_hostname }}' @@ -62,9 +63,9 @@ esxi_hostname: '{{ esxi_hostname }}' delegate_to: localhost register: host_ntp -''' +""" -RETURN = r''' +RETURN = r""" hosts_ntp_info: description: - dict with hostname as key and dict with NTP infos as value @@ -81,18 +82,23 @@ } ] } -''' +""" from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + vmware_argument_spec, + PyVmomi, +) class VmwareNtpInfoManager(PyVmomi): def __init__(self, module): super(VmwareNtpInfoManager, self).__init__(module) - cluster_name = self.params.get('cluster_name', None) - esxi_host_name = self.params.get('esxi_hostname', None) - self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name) + cluster_name = self.params.get("cluster_name", None) + esxi_host_name = self.params.get("esxi_hostname", None) + self.hosts = self.get_all_host_objs( + cluster_name=cluster_name, esxi_host_name=esxi_host_name + ) def gather_ntp_info(self): hosts_info = {} @@ -106,7 +112,10 @@ def gather_ntp_info(self): time_zone_name=host_date_time_manager.dateTimeInfo.timeZone.name, time_zone_description=host_date_time_manager.dateTimeInfo.timeZone.description, time_zone_gmt_offset=host_date_time_manager.dateTimeInfo.timeZone.gmtOffset, - ntp_servers=[ntp_server for ntp_server in host_date_time_manager.dateTimeInfo.ntpConfig.server] + ntp_servers=[ + ntp_server + for ntp_server in host_date_time_manager.dateTimeInfo.ntpConfig.server + ], ) ) hosts_info[host.name] = host_ntp_info @@ -116,20 +125,20 @@ def gather_ntp_info(self): def main(): argument_spec = vmware_argument_spec() argument_spec.update( - cluster_name=dict(type='str', required=False), - esxi_hostname=dict(type='str', required=False), + cluster_name=dict(type="str", required=False), + esxi_hostname=dict(type="str", required=False), ) module = AnsibleModule( argument_spec=argument_spec, - required_one_of=[ - ['cluster_name', 'esxi_hostname'], - ], + required_one_of=[["cluster_name", "esxi_hostname"]], supports_check_mode=True, ) vmware_host_ntp_config = VmwareNtpInfoManager(module) - module.exit_json(changed=False, hosts_ntp_info=vmware_host_ntp_config.gather_ntp_info()) + module.exit_json( + changed=False, hosts_ntp_info=vmware_host_ntp_config.gather_ntp_info() + ) if __name__ == "__main__": diff --git a/plugins/modules/vmware_host_package_info.py b/plugins/modules/vmware_host_package_info.py index 6619bf5..0852e9d 100644 --- a/plugins/modules/vmware_host_package_info.py +++ b/plugins/modules/vmware_host_package_info.py @@ -5,15 +5,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_host_package_info short_description: Gathers info about available packages on an ESXi host @@ -42,9 +43,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather info about all ESXi Host in given Cluster vmware_host_package_info: hostname: '{{ vcenter_hostname }}' @@ -62,27 +63,32 @@ esxi_hostname: '{{ esxi_hostname }}' delegate_to: localhost register: host_packages -''' +""" -RETURN = r''' +RETURN = r""" hosts_package_info: description: - dict with hostname as key and dict with package information as value returned: hosts_package_info type: dict sample: { "hosts_package_info": { "localhost.localdomain": []}} -''' +""" from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + vmware_argument_spec, + PyVmomi, +) class VmwarePackageManager(PyVmomi): def __init__(self, module): super(VmwarePackageManager, self).__init__(module) - cluster_name = self.params.get('cluster_name', None) - esxi_host_name = self.params.get('esxi_hostname', None) - self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name) + cluster_name = self.params.get("cluster_name", None) + esxi_host_name = self.params.get("esxi_hostname", None) + self.hosts = self.get_all_host_objs( + cluster_name=cluster_name, esxi_host_name=esxi_host_name + ) def gather_package_info(self): hosts_info = {} @@ -92,16 +98,18 @@ def gather_package_info(self): if host_pkg_mgr: pkgs = host_pkg_mgr.FetchSoftwarePackages() for pkg in pkgs: - host_package_info.append(dict( - name=pkg.name, - version=pkg.version, - vendor=pkg.vendor, - summary=pkg.summary, - description=pkg.description, - acceptance_level=pkg.acceptanceLevel, - maintenance_mode_required=pkg.maintenanceModeRequired, - creation_date=pkg.creationDate, - )) + host_package_info.append( + dict( + name=pkg.name, + version=pkg.version, + vendor=pkg.vendor, + summary=pkg.summary, + description=pkg.description, + acceptance_level=pkg.acceptanceLevel, + maintenance_mode_required=pkg.maintenanceModeRequired, + creation_date=pkg.creationDate, + ) + ) hosts_info[host.name] = host_package_info return hosts_info @@ -109,20 +117,21 @@ def gather_package_info(self): def main(): argument_spec = vmware_argument_spec() argument_spec.update( - cluster_name=dict(type='str', required=False), - esxi_hostname=dict(type='str', required=False), + cluster_name=dict(type="str", required=False), + esxi_hostname=dict(type="str", required=False), ) module = AnsibleModule( argument_spec=argument_spec, - required_one_of=[ - ['cluster_name', 'esxi_hostname'], - ], + required_one_of=[["cluster_name", "esxi_hostname"]], supports_check_mode=True, ) vmware_host_package_config = VmwarePackageManager(module) - module.exit_json(changed=False, hosts_package_info=vmware_host_package_config.gather_package_info()) + module.exit_json( + changed=False, + hosts_package_info=vmware_host_package_config.gather_package_info(), + ) if __name__ == "__main__": diff --git a/plugins/modules/vmware_host_powermgmt_policy.py b/plugins/modules/vmware_host_powermgmt_policy.py index df511b5..e1da602 100644 --- a/plugins/modules/vmware_host_powermgmt_policy.py +++ b/plugins/modules/vmware_host_powermgmt_policy.py @@ -6,16 +6,17 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_host_powermgmt_policy short_description: Manages the Power Management Policy of an ESXI host system @@ -48,9 +49,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Set the Power Management Policy of a host system to high-performance vmware_host_powermgmt_policy: hostname: '{{ vcenter_hostname }}' @@ -70,9 +71,9 @@ policy: high-performance validate_certs: no delegate_to: localhost -''' +""" -RETURN = r''' +RETURN = r""" result: description: metadata about host system's Power Management Policy returned: always @@ -89,7 +90,7 @@ } } } -''' +""" try: from pyVmomi import vim, vmodl @@ -97,7 +98,10 @@ pass from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + vmware_argument_spec, +) from ansible.module_utils._text import to_native @@ -105,43 +109,36 @@ class VmwareHostPowerManagement(PyVmomi): """ Class to manage power management policy of an ESXi host system """ + def __init__(self, module): super(VmwareHostPowerManagement, self).__init__(module) - cluster_name = self.params.get('cluster_name') - esxi_host_name = self.params.get('esxi_hostname') - self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name) + cluster_name = self.params.get("cluster_name") + esxi_host_name = self.params.get("esxi_hostname") + self.hosts = self.get_all_host_objs( + cluster_name=cluster_name, esxi_host_name=esxi_host_name + ) if not self.hosts: - self.module.fail_json(msg="Failed to find host system with given configuration.") + self.module.fail_json( + msg="Failed to find host system with given configuration." + ) def ensure(self): """ Manage power management policy of an ESXi host system """ results = dict(changed=False, result=dict()) - policy = self.params.get('policy') + policy = self.params.get("policy") host_change_list = [] power_policies = { - 'high-performance': { - 'key': 1, - 'short_name': 'static' - }, - 'balanced': { - 'key': 2, - 'short_name': 'dynamic' - }, - 'low-power': { - 'key': 3, - 'short_name': 'low' - }, - 'custom': { - 'key': 4, - 'short_name': 'custom' - } + "high-performance": {"key": 1, "short_name": "static"}, + "balanced": {"key": 2, "short_name": "dynamic"}, + "low-power": {"key": 3, "short_name": "low"}, + "custom": {"key": 4, "short_name": "custom"}, } for host in self.hosts: changed = False - results['result'][host.name] = dict(msg='') + results["result"][host.name] = dict(msg="") power_system = host.configManager.powerSystem @@ -152,60 +149,86 @@ def ensure(self): # the "name" and "description" parameters are pretty useless # they store only strings containing "PowerPolicy..name" and "PowerPolicy..description" if current_host_power_policy.shortName == "static": - current_policy = 'high-performance' + current_policy = "high-performance" elif current_host_power_policy.shortName == "dynamic": - current_policy = 'balanced' + current_policy = "balanced" elif current_host_power_policy.shortName == "low": - current_policy = 'low-power' + current_policy = "low-power" elif current_host_power_policy.shortName == "custom": - current_policy = 'custom' + current_policy = "custom" - results['result'][host.name]['desired_state'] = policy + results["result"][host.name]["desired_state"] = policy # Don't do anything if the power policy is already configured - if current_host_power_policy.key == power_policies[policy]['key']: - results['result'][host.name]['changed'] = changed - results['result'][host.name]['previous_state'] = current_policy - results['result'][host.name]['current_state'] = policy - results['result'][host.name]['msg'] = "Power policy is already configured" + if current_host_power_policy.key == power_policies[policy]["key"]: + results["result"][host.name]["changed"] = changed + results["result"][host.name]["previous_state"] = current_policy + results["result"][host.name]["current_state"] = policy + results["result"][host.name][ + "msg" + ] = "Power policy is already configured" else: # get available power policies and check if policy is included supported_policy = False power_system_capability = power_system.capability - available_host_power_policies = power_system_capability.availablePolicy + available_host_power_policies = ( + power_system_capability.availablePolicy + ) for available_policy in available_host_power_policies: - if available_policy.shortName == power_policies[policy]['short_name']: + if ( + available_policy.shortName + == power_policies[policy]["short_name"] + ): supported_policy = True if supported_policy: if not self.module.check_mode: try: - power_system.ConfigurePowerPolicy(key=power_policies[policy]['key']) + power_system.ConfigurePowerPolicy( + key=power_policies[policy]["key"] + ) changed = True - results['result'][host.name]['changed'] = True - results['result'][host.name]['msg'] = "Power policy changed" + results["result"][host.name]["changed"] = True + results["result"][host.name][ + "msg" + ] = "Power policy changed" except vmodl.fault.InvalidArgument: - self.module.fail_json(msg="Invalid power policy key provided for host '%s'" % host.name) + self.module.fail_json( + msg="Invalid power policy key provided for host '%s'" + % host.name + ) except vim.fault.HostConfigFault as host_config_fault: - self.module.fail_json(msg="Failed to configure power policy for host '%s': %s" % - (host.name, to_native(host_config_fault.msg))) + self.module.fail_json( + msg="Failed to configure power policy for host '%s': %s" + % (host.name, to_native(host_config_fault.msg)) + ) else: changed = True - results['result'][host.name]['changed'] = True - results['result'][host.name]['msg'] = "Power policy will be changed" - results['result'][host.name]['previous_state'] = current_policy - results['result'][host.name]['current_state'] = policy + results["result"][host.name]["changed"] = True + results["result"][host.name][ + "msg" + ] = "Power policy will be changed" + results["result"][host.name][ + "previous_state" + ] = current_policy + results["result"][host.name]["current_state"] = policy else: changed = False - results['result'][host.name]['changed'] = changed - results['result'][host.name]['previous_state'] = current_policy - results['result'][host.name]['current_state'] = current_policy - self.module.fail_json(msg="Power policy '%s' isn't supported for host '%s'" % - (policy, host.name)) + results["result"][host.name]["changed"] = changed + results["result"][host.name][ + "previous_state" + ] = current_policy + results["result"][host.name][ + "current_state" + ] = current_policy + self.module.fail_json( + msg="Power policy '%s' isn't supported for host '%s'" + % (policy, host.name) + ) host_change_list.append(changed) if any(host_change_list): - results['changed'] = True + results["changed"] = True self.module.exit_json(**results) @@ -215,22 +238,24 @@ def main(): """ argument_spec = vmware_argument_spec() argument_spec.update( - policy=dict(type='str', default='balanced', - choices=['high-performance', 'balanced', 'low-power', 'custom']), - esxi_hostname=dict(type='str', required=False), - cluster_name=dict(type='str', required=False), + policy=dict( + type="str", + default="balanced", + choices=["high-performance", "balanced", "low-power", "custom"], + ), + esxi_hostname=dict(type="str", required=False), + cluster_name=dict(type="str", required=False), ) - module = AnsibleModule(argument_spec=argument_spec, - required_one_of=[ - ['cluster_name', 'esxi_hostname'], - ], - supports_check_mode=True - ) + module = AnsibleModule( + argument_spec=argument_spec, + required_one_of=[["cluster_name", "esxi_hostname"]], + supports_check_mode=True, + ) host_power_management = VmwareHostPowerManagement(module) host_power_management.ensure() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_host_powerstate.py b/plugins/modules/vmware_host_powerstate.py index fd6e45a..7dd21df 100644 --- a/plugins/modules/vmware_host_powerstate.py +++ b/plugins/modules/vmware_host_powerstate.py @@ -7,16 +7,17 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_host_powerstate short_description: Manages power states of host systems in vCenter @@ -66,9 +67,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Set the state of a host system to reboot vmware_host_powerstate: hostname: '{{ vcenter_hostname }}' @@ -101,9 +102,9 @@ state: reboot-host delegate_to: localhost register: reboot_host -''' +""" -RETURN = r''' +RETURN = r""" result: description: metadata about host system's state returned: always @@ -114,21 +115,30 @@ "error": "", }, } -''' +""" from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec, wait_for_task, TaskError +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + vmware_argument_spec, + wait_for_task, + TaskError, +) from ansible.module_utils._text import to_native class VmwareHostPowerManager(PyVmomi): def __init__(self, module): super(VmwareHostPowerManager, self).__init__(module) - cluster_name = self.params.get('cluster_name') - esxi_host_name = self.params.get('esxi_hostname') - self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name) + cluster_name = self.params.get("cluster_name") + esxi_host_name = self.params.get("esxi_hostname") + self.hosts = self.get_all_host_objs( + cluster_name=cluster_name, esxi_host_name=esxi_host_name + ) if not self.hosts: - self.module.fail_json(msg="Failed to find host system with given configuration.") + self.module.fail_json( + msg="Failed to find host system with given configuration." + ) def ensure(self): """ @@ -136,40 +146,58 @@ def ensure(self): """ results = dict(changed=False, result=dict()) - state = self.params.get('state') - force = self.params.get('force') - timeout = self.params.get('timeout') + state = self.params.get("state") + force = self.params.get("force") + timeout = self.params.get("timeout") host_change_list = [] for host in self.hosts: changed = False if not host.runtime.inMaintenanceMode and not force: - self.module.fail_json(msg="Current host system '%s' is not in maintenance mode," - " please specify 'force' as True to proceed." % host.name) - if host.runtime.connectionState == 'notResponding': - self.module.fail_json(msg="Current host system '%s' can not be set in '%s'" - " mode as the host system is not responding." % (host.name, state)) - - results['result'][host.name] = dict(msg='', error='') - if state == 'reboot-host' and not host.capability.rebootSupported: - self.module.fail_json(msg="Current host '%s' can not be rebooted as the host system" - " does not have capability to reboot." % host.name) - elif state == 'shutdown-host' and not host.capability.shutdownSupported: - self.module.fail_json(msg="Current host '%s' can not be shut down as the host system" - " does not have capability to shut down." % host.name) - elif state in ['power-down-to-standby', 'power-up-from-standby'] and not host.capability.standbySupported: - self.module.fail_json(msg="Current host '%s' can not be '%s' as the host system" - " does not have capability to standby supported." % (host.name, state)) - - if state == 'reboot-host': + self.module.fail_json( + msg="Current host system '%s' is not in maintenance mode," + " please specify 'force' as True to proceed." % host.name + ) + if host.runtime.connectionState == "notResponding": + self.module.fail_json( + msg="Current host system '%s' can not be set in '%s'" + " mode as the host system is not responding." + % (host.name, state) + ) + + results["result"][host.name] = dict(msg="", error="") + if state == "reboot-host" and not host.capability.rebootSupported: + self.module.fail_json( + msg="Current host '%s' can not be rebooted as the host system" + " does not have capability to reboot." % host.name + ) + elif ( + state == "shutdown-host" + and not host.capability.shutdownSupported + ): + self.module.fail_json( + msg="Current host '%s' can not be shut down as the host system" + " does not have capability to shut down." % host.name + ) + elif ( + state in ["power-down-to-standby", "power-up-from-standby"] + and not host.capability.standbySupported + ): + self.module.fail_json( + msg="Current host '%s' can not be '%s' as the host system" + " does not have capability to standby supported." + % (host.name, state) + ) + + if state == "reboot-host": task = host.RebootHost_Task(force) verb = "reboot '%s'" % host.name - elif state == 'shutdown-host': + elif state == "shutdown-host": task = host.ShutdownHost_Task(force) verb = "shutdown '%s'" % host.name - elif state == 'power-down-to-standby': + elif state == "power-down-to-standby": task = host.PowerDownHostToStandBy_Task(timeout, force) verb = "power down '%s' to standby" % host.name - elif state == 'power-up-from-standby': + elif state == "power-up-from-standby": task = host.PowerUpHostFromStandBy_Task(timeout) verb = "power up '%s' from standby" % host.name @@ -178,49 +206,59 @@ def ensure(self): success, result = wait_for_task(task) if success: changed = True - results['result'][host.name]['msg'] = verb + results["result"][host.name]["msg"] = verb else: - results['result'][host.name]['error'] = result + results["result"][host.name]["error"] = result except TaskError as task_error: - self.module.fail_json(msg="Failed to %s as host system due to : %s" % (verb, - str(task_error))) + self.module.fail_json( + msg="Failed to %s as host system due to : %s" + % (verb, str(task_error)) + ) except Exception as generic_exc: - self.module.fail_json(msg="Failed to %s due to generic exception : %s" % (host.name, - to_native(generic_exc))) + self.module.fail_json( + msg="Failed to %s due to generic exception : %s" + % (host.name, to_native(generic_exc)) + ) else: # Check mode changed = True - results['result'][host.name]['msg'] = verb + results["result"][host.name]["msg"] = verb host_change_list.append(changed) if any(host_change_list): - results['changed'] = True + results["changed"] = True self.module.exit_json(**results) def main(): argument_spec = vmware_argument_spec() argument_spec.update( - state=dict(type='str', default='shutdown-host', - choices=['power-down-to-standby', 'power-up-from-standby', 'shutdown-host', 'reboot-host']), - esxi_hostname=dict(type='str', required=False), - cluster_name=dict(type='str', required=False), - force=dict(type='bool', default=False), - timeout=dict(type='int', default=600), - + state=dict( + type="str", + default="shutdown-host", + choices=[ + "power-down-to-standby", + "power-up-from-standby", + "shutdown-host", + "reboot-host", + ], + ), + esxi_hostname=dict(type="str", required=False), + cluster_name=dict(type="str", required=False), + force=dict(type="bool", default=False), + timeout=dict(type="int", default=600), ) - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True, - required_one_of=[ - ['cluster_name', 'esxi_hostname'], - ] - ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=[["cluster_name", "esxi_hostname"]], + ) host_power_manager = VmwareHostPowerManager(module) host_power_manager.ensure() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_host_scanhba.py b/plugins/modules/vmware_host_scanhba.py index 637b142..6f7cda9 100644 --- a/plugins/modules/vmware_host_scanhba.py +++ b/plugins/modules/vmware_host_scanhba.py @@ -9,12 +9,12 @@ __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_host_scanhba short_description: Rescan host HBA's and optionally refresh the storage system @@ -51,9 +51,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Recan HBA's for a given ESXi host and refresh storage system objects vmware_host_scanhba: hostname: '{{ vcenter_hostname }}' @@ -79,9 +79,9 @@ password: '{{ vcenter_password }}' esxi_hostname: '{{ inventory_hostname }}' delegate_to: localhost -''' +""" -RETURN = r''' +RETURN = r""" result: description: return confirmation of requested host and updated / refreshed storage system returned: always @@ -92,7 +92,7 @@ "refreshed_storage": "true" } } -''' +""" try: from pyVmomi import vim @@ -100,7 +100,11 @@ pass from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi, find_obj +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + vmware_argument_spec, + PyVmomi, + find_obj, +) from ansible.module_utils._text import to_native @@ -109,23 +113,25 @@ def __init__(self, module): super(VmwareHbaScan, self).__init__(module) def scan(self): - esxi_host_name = self.params.get('esxi_hostname', None) - cluster_name = self.params.get('cluster_name', None) - refresh_storage = self.params.get('refresh_storage', bool) - hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name) + esxi_host_name = self.params.get("esxi_hostname", None) + cluster_name = self.params.get("cluster_name", None) + refresh_storage = self.params.get("refresh_storage", bool) + hosts = self.get_all_host_objs( + cluster_name=cluster_name, esxi_host_name=esxi_host_name + ) results = dict(changed=True, result=dict()) if not hosts: self.module.fail_json(msg="Failed to find any hosts.") for host in hosts: - results['result'][host.name] = dict() + results["result"][host.name] = dict() host.configManager.storageSystem.RescanAllHba() if refresh_storage is True: host.configManager.storageSystem.RefreshStorageSystem() - results['result'][host.name]['rescaned_hba'] = True - results['result'][host.name]['refreshed_storage'] = refresh_storage + results["result"][host.name]["rescaned_hba"] = True + results["result"][host.name]["refreshed_storage"] = refresh_storage self.module.exit_json(**results) @@ -133,21 +139,19 @@ def scan(self): def main(): argument_spec = vmware_argument_spec() argument_spec.update( - esxi_hostname=dict(type='str', required=False), - cluster_name=dict(type='str', required=False), - refresh_storage=dict(type='bool', default=False, required=False) + esxi_hostname=dict(type="str", required=False), + cluster_name=dict(type="str", required=False), + refresh_storage=dict(type="bool", default=False, required=False), ) module = AnsibleModule( argument_spec=argument_spec, - required_one_of=[ - ['cluster_name', 'esxi_hostname'], - ], - supports_check_mode=False + required_one_of=[["cluster_name", "esxi_hostname"]], + supports_check_mode=False, ) hbascan = VmwareHbaScan(module) hbascan.scan() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_host_service_info.py b/plugins/modules/vmware_host_service_info.py index de4db45..257006e 100644 --- a/plugins/modules/vmware_host_service_info.py +++ b/plugins/modules/vmware_host_service_info.py @@ -5,15 +5,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_host_service_info short_description: Gathers info about an ESXi host's services @@ -43,9 +44,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather info about all ESXi Host in given Cluster vmware_host_service_info: hostname: '{{ vcenter_hostname }}' @@ -63,9 +64,9 @@ esxi_hostname: '{{ esxi_hostname }}' delegate_to: localhost register: host_services -''' +""" -RETURN = r''' +RETURN = r""" host_service_info: description: - dict with hostname as key and dict with host service config information @@ -95,18 +96,23 @@ }, ] } -''' +""" from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + vmware_argument_spec, + PyVmomi, +) class VmwareServiceManager(PyVmomi): def __init__(self, module): super(VmwareServiceManager, self).__init__(module) - cluster_name = self.params.get('cluster_name', None) - esxi_host_name = self.params.get('esxi_hostname', None) - self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name) + cluster_name = self.params.get("cluster_name", None) + esxi_host_name = self.params.get("esxi_hostname", None) + self.hosts = self.get_all_host_objs( + cluster_name=cluster_name, esxi_host_name=esxi_host_name + ) def gather_host_info(self): hosts_info = {} @@ -124,8 +130,12 @@ def gather_host_info(self): uninstallable=service.uninstallable, running=service.running, policy=service.policy, - source_package_name=service.sourcePackage.sourcePackageName if service.sourcePackage else None, - source_package_desc=service.sourcePackage.description if service.sourcePackage else None, + source_package_name=service.sourcePackage.sourcePackageName + if service.sourcePackage + else None, + source_package_desc=service.sourcePackage.description + if service.sourcePackage + else None, ) ) hosts_info[host.name] = host_service_info @@ -135,20 +145,21 @@ def gather_host_info(self): def main(): argument_spec = vmware_argument_spec() argument_spec.update( - cluster_name=dict(type='str', required=False), - esxi_hostname=dict(type='str', required=False), + cluster_name=dict(type="str", required=False), + esxi_hostname=dict(type="str", required=False), ) module = AnsibleModule( argument_spec=argument_spec, - required_one_of=[ - ['cluster_name', 'esxi_hostname'], - ], + required_one_of=[["cluster_name", "esxi_hostname"]], supports_check_mode=True, ) vmware_host_service_config = VmwareServiceManager(module) - module.exit_json(changed=False, host_service_info=vmware_host_service_config.gather_host_info()) + module.exit_json( + changed=False, + host_service_info=vmware_host_service_config.gather_host_info(), + ) if __name__ == "__main__": diff --git a/plugins/modules/vmware_host_service_manager.py b/plugins/modules/vmware_host_service_manager.py index ef5ad15..a047c53 100644 --- a/plugins/modules/vmware_host_service_manager.py +++ b/plugins/modules/vmware_host_service_manager.py @@ -5,15 +5,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_host_service_manager short_description: Manage services on a given ESXi host @@ -66,9 +67,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Start ntpd service setting for all ESXi Host in given Cluster vmware_host_service_manager: hostname: '{{ vcenter_hostname }}' @@ -109,10 +110,10 @@ service_name: ntpd state: absent delegate_to: localhost -''' +""" -RETURN = r'''# -''' +RETURN = r"""# +""" try: from pyVmomi import vim, vmodl @@ -120,70 +121,95 @@ pass from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + vmware_argument_spec, + PyVmomi, +) from ansible.module_utils._text import to_native class VmwareServiceManager(PyVmomi): def __init__(self, module): super(VmwareServiceManager, self).__init__(module) - cluster_name = self.params.get('cluster_name', None) - esxi_host_name = self.params.get('esxi_hostname', None) - self.options = self.params.get('options', dict()) - self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name) - self.desired_state = self.params.get('state') - self.desired_policy = self.params.get('service_policy', None) - self.service_name = self.params.get('service_name') + cluster_name = self.params.get("cluster_name", None) + esxi_host_name = self.params.get("esxi_hostname", None) + self.options = self.params.get("options", dict()) + self.hosts = self.get_all_host_objs( + cluster_name=cluster_name, esxi_host_name=esxi_host_name + ) + self.desired_state = self.params.get("state") + self.desired_policy = self.params.get("service_policy", None) + self.service_name = self.params.get("service_name") self.results = {} def service_ctrl(self): changed = False host_service_state = [] for host in self.hosts: - actual_service_state, actual_service_policy = self.check_service_state(host=host, service_name=self.service_name) + actual_service_state, actual_service_policy = self.check_service_state( + host=host, service_name=self.service_name + ) host_service_system = host.configManager.serviceSystem if host_service_system: changed_state = False - self.results[host.name] = dict(service_name=self.service_name, - actual_service_state='running' if actual_service_state else 'stopped', - actual_service_policy=actual_service_policy, - desired_service_policy=self.desired_policy, - desired_service_state=self.desired_state, - error='', - ) + self.results[host.name] = dict( + service_name=self.service_name, + actual_service_state="running" + if actual_service_state + else "stopped", + actual_service_policy=actual_service_policy, + desired_service_policy=self.desired_policy, + desired_service_state=self.desired_state, + error="", + ) try: - if self.desired_state in ['start', 'present']: + if self.desired_state in ["start", "present"]: if not actual_service_state: if not self.module.check_mode: - host_service_system.StartService(id=self.service_name) + host_service_system.StartService( + id=self.service_name + ) changed_state = True - elif self.desired_state in ['stop', 'absent']: + elif self.desired_state in ["stop", "absent"]: if actual_service_state: if not self.module.check_mode: - host_service_system.StopService(id=self.service_name) + host_service_system.StopService( + id=self.service_name + ) changed_state = True - elif self.desired_state == 'restart': + elif self.desired_state == "restart": if not self.module.check_mode: - host_service_system.RestartService(id=self.service_name) + host_service_system.RestartService( + id=self.service_name + ) changed_state = True if self.desired_policy: if actual_service_policy != self.desired_policy: if not self.module.check_mode: - host_service_system.UpdateServicePolicy(id=self.service_name, - policy=self.desired_policy) + host_service_system.UpdateServicePolicy( + id=self.service_name, + policy=self.desired_policy, + ) changed_state = True host_service_state.append(changed_state) self.results[host.name].update(changed=changed_state) - except (vim.fault.InvalidState, vim.fault.NotFound, - vim.fault.HostConfigFault, vmodl.fault.InvalidArgument) as e: - self.results[host.name].update(changed=False, - error=to_native(e.msg)) + except ( + vim.fault.InvalidState, + vim.fault.NotFound, + vim.fault.HostConfigFault, + vmodl.fault.InvalidArgument, + ) as e: + self.results[host.name].update( + changed=False, error=to_native(e.msg) + ) if any(host_service_state): changed = True - self.module.exit_json(changed=changed, host_service_status=self.results) + self.module.exit_json( + changed=changed, host_service_status=self.results + ) def check_service_state(self, host, service_name): host_service_system = host.configManager.serviceSystem @@ -193,30 +219,37 @@ def check_service_state(self, host, service_name): if service.key == service_name: return service.running, service.policy - msg = "Failed to find '%s' service on host system '%s'" % (service_name, host.name) - cluster_name = self.params.get('cluster_name', None) + msg = "Failed to find '%s' service on host system '%s'" % ( + service_name, + host.name, + ) + cluster_name = self.params.get("cluster_name", None) if cluster_name: msg += " located on cluster '%s'" % cluster_name - msg += ", please check if you have specified a valid ESXi service name." + msg += ( + ", please check if you have specified a valid ESXi service name." + ) self.module.fail_json(msg=msg) def main(): argument_spec = vmware_argument_spec() argument_spec.update( - cluster_name=dict(type='str', required=False), - esxi_hostname=dict(type='str', required=False), - state=dict(type='str', default='start', choices=['absent', 'present', 'restart', 'start', 'stop']), - service_name=dict(type='str', required=True), - service_policy=dict(type='str', choices=['automatic', 'off', 'on']), + cluster_name=dict(type="str", required=False), + esxi_hostname=dict(type="str", required=False), + state=dict( + type="str", + default="start", + choices=["absent", "present", "restart", "start", "stop"], + ), + service_name=dict(type="str", required=True), + service_policy=dict(type="str", choices=["automatic", "off", "on"]), ) module = AnsibleModule( argument_spec=argument_spec, - required_one_of=[ - ['cluster_name', 'esxi_hostname'], - ], - supports_check_mode=True + required_one_of=[["cluster_name", "esxi_hostname"]], + supports_check_mode=True, ) vmware_host_service = VmwareServiceManager(module) diff --git a/plugins/modules/vmware_host_snmp.py b/plugins/modules/vmware_host_snmp.py index 01833f2..14b4c26 100644 --- a/plugins/modules/vmware_host_snmp.py +++ b/plugins/modules/vmware_host_snmp.py @@ -6,16 +6,17 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_host_snmp short_description: Configures SNMP on an ESXi host system @@ -79,9 +80,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Enable and configure SNMP community vmware_host_snmp: hostname: '{{ esxi_hostname }}' @@ -120,9 +121,9 @@ state: disabled validate_certs: no delegate_to: localhost -''' +""" -RETURN = r''' +RETURN = r""" results: description: metadata about host system's SNMP configuration returned: always @@ -138,7 +139,7 @@ "trap_targets": [] }, } -''' +""" try: from pyVmomi import vim @@ -146,7 +147,11 @@ pass from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec, find_obj +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + vmware_argument_spec, + find_obj, +) from ansible.module_utils._text import to_native @@ -169,9 +174,9 @@ def __init__(self, module): def ensure(self): """Manage SNMP configuration for an ESXi host system""" results = dict(changed=False, result=dict()) - snmp_state = self.params.get('state') - snmp_port = self.params.get('snmp_port') - community = self.params.get('community') + snmp_state = self.params.get("state") + snmp_port = self.params.get("snmp_port") + community = self.params.get("community") desired_trap_targets = self.params.get("trap_targets") hw_source = self.params.get("hw_source") log_level = self.params.get("log_level") @@ -179,48 +184,59 @@ def ensure(self): trap_filter = self.params.get("trap_filter") event_filter = None if trap_filter: - event_filter = ';'.join(trap_filter) + event_filter = ";".join(trap_filter) changed = False reset_hint = None changed_list = [] - results = dict(msg='') + results = dict(msg="") snmp_system = self.host.configManager.snmpSystem if snmp_system: if snmp_system.configuration: snmp_config_spec = snmp_system.configuration else: - self.module.fail_json(msg="SNMP agent configuration isn't supported on the ESXi host") + self.module.fail_json( + msg="SNMP agent configuration isn't supported on the ESXi host" + ) else: - self.module.fail_json(msg="SNMP system isn't available on the ESXi host") + self.module.fail_json( + msg="SNMP system isn't available on the ESXi host" + ) # Check state - results['state'] = snmp_state - if snmp_state == 'reset': + results["state"] = snmp_state + if snmp_state == "reset": changed = True # Get previous config if snmp_config_spec.enabled: - results['state_previous'] = 'enabled' + results["state_previous"] = "enabled" else: - results['state_previous'] = 'disabled' - results['port_previous'] = snmp_config_spec.port - results['community_previous'] = snmp_config_spec.readOnlyCommunities - results['trap_targets_previous'] = self.get_previous_targets(snmp_config_spec.trapTargets) + results["state_previous"] = "disabled" + results["port_previous"] = snmp_config_spec.port + results[ + "community_previous" + ] = snmp_config_spec.readOnlyCommunities + results["trap_targets_previous"] = self.get_previous_targets( + snmp_config_spec.trapTargets + ) for option in snmp_config_spec.option: - if option.key == 'EnvEventSource' and option.value != hw_source: - results['hw_source_previous'] = option.value - if option.key == 'loglevel' and option.value != hw_source: - results['log_level_previous'] = option.value - if option.key == 'EventFilter' and option.value != hw_source: - results['trap_filter_previous'] = option.value.split(';') + if ( + option.key == "EnvEventSource" + and option.value != hw_source + ): + results["hw_source_previous"] = option.value + if option.key == "loglevel" and option.value != hw_source: + results["log_level_previous"] = option.value + if option.key == "EventFilter" and option.value != hw_source: + results["trap_filter_previous"] = option.value.split(";") # Build factory default config destination = vim.host.SnmpSystem.SnmpConfigSpec.Destination() destination.hostName = "" destination.port = 0 destination.community = "" options = [] - options.append(self.create_option('EnvEventSource', 'indications')) - options.append(self.create_option('EventFilter', 'reset')) + options.append(self.create_option("EnvEventSource", "indications")) + options.append(self.create_option("EventFilter", "reset")) snmp_config_spec = vim.host.SnmpSystem.SnmpConfigSpec() # Looks like this value is causing the reset snmp_config_spec.readOnlyCommunities = [""] @@ -229,31 +245,33 @@ def ensure(self): snmp_config_spec.enabled = False snmp_config_spec.option = options else: - if snmp_state == 'enabled' and not snmp_config_spec.enabled: + if snmp_state == "enabled" and not snmp_config_spec.enabled: changed = True changed_list.append("state") - results['state_previous'] = 'disabled' + results["state_previous"] = "disabled" snmp_config_spec.enabled = True - elif snmp_state == 'disabled' and snmp_config_spec.enabled: + elif snmp_state == "disabled" and snmp_config_spec.enabled: changed = True changed_list.append("state") - results['state_previous'] = 'enabled' + results["state_previous"] = "enabled" snmp_config_spec.enabled = False # Check port - results['port'] = snmp_port + results["port"] = snmp_port if snmp_config_spec.port != snmp_port: changed = True changed_list.append("port") - results['port_previous'] = snmp_config_spec.port + results["port_previous"] = snmp_config_spec.port snmp_config_spec.port = snmp_port # Check read-only community strings - results['community'] = community + results["community"] = community if snmp_config_spec.readOnlyCommunities != community: changed = True changed_list.append("community list") - results['community_previous'] = snmp_config_spec.readOnlyCommunities + results[ + "community_previous" + ] = snmp_config_spec.readOnlyCommunities if community: snmp_config_spec.readOnlyCommunities = community else: @@ -262,45 +280,64 @@ def ensure(self): reset_hint = True # Check trap targets - results['trap_targets'] = desired_trap_targets + results["trap_targets"] = desired_trap_targets if snmp_config_spec.trapTargets: if desired_trap_targets: temp_desired_targets = [] # Loop through desired targets for target in desired_trap_targets: - dest_hostname, dest_port, dest_community = self.check_if_options_are_valid(target) + dest_hostname, dest_port, dest_community = self.check_if_options_are_valid( + target + ) trap_target_found = False for trap_target in snmp_config_spec.trapTargets: if trap_target.hostName == dest_hostname: - if trap_target.port != dest_port or trap_target.community != dest_community: + if ( + trap_target.port != dest_port + or trap_target.community != dest_community + ): changed = True - changed_list.append("trap target '%s'" % dest_hostname) + changed_list.append( + "trap target '%s'" % dest_hostname + ) trap_target_found = True break if not trap_target_found: changed = True - changed_list.append("trap target '%s'" % dest_hostname) + changed_list.append( + "trap target '%s'" % dest_hostname + ) # Build destination and add to temp target list - destination = self.build_destination(dest_hostname, dest_port, dest_community) + destination = self.build_destination( + dest_hostname, dest_port, dest_community + ) temp_desired_targets.append(destination) # Loop through existing targets to find targets that need to be deleted for trap_target in snmp_config_spec.trapTargets: target_found = False for target in desired_trap_targets: - if trap_target.hostName == target.get('hostname'): + if trap_target.hostName == target.get("hostname"): target_found = True break if not target_found: changed = True - changed_list.append("trap target '%s'" % trap_target.hostName) + changed_list.append( + "trap target '%s'" % trap_target.hostName + ) # Configure trap targets if something has changed if changed: - results['trap_targets_previous'] = self.get_previous_targets(snmp_config_spec.trapTargets) + results[ + "trap_targets_previous" + ] = self.get_previous_targets( + snmp_config_spec.trapTargets + ) snmp_config_spec.trapTargets = temp_desired_targets else: changed = True changed_list.append("trap targets") - results['trap_targets_previous'] = self.get_previous_targets(snmp_config_spec.trapTargets) + results[ + "trap_targets_previous" + ] = self.get_previous_targets(snmp_config_spec.trapTargets) # Doesn't work. Need to reset config instead # snmp_config_spec.trapTargets = [] reset_hint = True @@ -308,50 +345,63 @@ def ensure(self): if desired_trap_targets: changed = True changed_list.append("trap targets") - results['trap_targets_previous'] = None + results["trap_targets_previous"] = None desired_targets = [] for target in desired_trap_targets: - dest_hostname, dest_port, dest_community = self.check_if_options_are_valid(target) - destination = self.build_destination(dest_hostname, dest_port, dest_community) + dest_hostname, dest_port, dest_community = self.check_if_options_are_valid( + target + ) + destination = self.build_destination( + dest_hostname, dest_port, dest_community + ) desired_targets.append(destination) snmp_config_spec.trapTargets = desired_targets # Check options - results['hw_source'] = hw_source - results['log_level'] = log_level - results['trap_filter'] = trap_filter + results["hw_source"] = hw_source + results["log_level"] = log_level + results["trap_filter"] = trap_filter event_filter_found = False if snmp_config_spec.option: for option in snmp_config_spec.option: - if option.key == 'EnvEventSource' and option.value != hw_source: + if ( + option.key == "EnvEventSource" + and option.value != hw_source + ): changed = True changed_list.append("HW source") - results['hw_source_previous'] = option.value + results["hw_source_previous"] = option.value option.value = hw_source - if option.key == 'loglevel' and option.value != log_level: + if option.key == "loglevel" and option.value != log_level: changed = True changed_list.append("log level") - results['log_level_previous'] = option.value + results["log_level_previous"] = option.value option.value = log_level - if option.key == 'EventFilter': + if option.key == "EventFilter": event_filter_found = True if event_filter and option.value != event_filter: changed = True changed_list.append("trap filter") - results['trap_filter_previous'] = option.value.split(';') + results[ + "trap_filter_previous" + ] = option.value.split(";") option.value = event_filter if trap_filter and not event_filter_found: changed = True changed_list.append("trap filter") - results['trap_filter_previous'] = [] - snmp_config_spec.option.append(self.create_option('EventFilter', event_filter)) + results["trap_filter_previous"] = [] + snmp_config_spec.option.append( + self.create_option("EventFilter", event_filter) + ) elif not trap_filter and event_filter_found: changed = True changed_list.append("trap filter") # options = [] for option in snmp_config_spec.option: - if option.key == 'EventFilter': - results['trap_filter_previous'] = option.value.split(';') + if option.key == "EventFilter": + results["trap_filter_previous"] = option.value.split( + ";" + ) # else: # options.append(option) # Doesn't work. Need to reset config instead @@ -359,20 +409,24 @@ def ensure(self): reset_hint = True if changed: - if snmp_state == 'reset': + if snmp_state == "reset": if self.module.check_mode: message = "SNMP agent would be reset to factory defaults" else: message = "SNMP agent config reset to factory defaults" else: if self.module.check_mode: - changed_suffix = ' would be changed' + changed_suffix = " would be changed" else: - changed_suffix = ' changed' + changed_suffix = " changed" if len(changed_list) > 2: - message = ', '.join(changed_list[:-1]) + ', and ' + str(changed_list[-1]) + message = ( + ", ".join(changed_list[:-1]) + + ", and " + + str(changed_list[-1]) + ) elif len(changed_list) == 2: - message = ' and '.join(changed_list) + message = " and ".join(changed_list) elif len(changed_list) == 1: message = changed_list[0] message = "SNMP " + message + changed_suffix @@ -387,11 +441,12 @@ def ensure(self): ) except vim.fault.InsufficientResourcesFault as insufficient_resources: self.module.fail_json( - msg="Insufficient resources : %s" % to_native(insufficient_resources) + msg="Insufficient resources : %s" + % to_native(insufficient_resources) ) else: message = "SNMP already configured properly" - if not snmp_state == 'reset' and send_trap and desired_trap_targets: + if not snmp_state == "reset" and send_trap and desired_trap_targets: # Check if there was a change before if changed: message += " and " @@ -406,14 +461,16 @@ def ensure(self): message = message + "a test trap was sent" except vim.fault.NotFound as not_found: self.module.fail_json( - msg="Error during trap test : Not found : %s" % to_native(not_found) + msg="Error during trap test : Not found : %s" + % to_native(not_found) ) except vim.fault.InsufficientResourcesFault as insufficient_resources: self.module.fail_json( - msg="Error during trap test : Insufficient resources : %s" % to_native(insufficient_resources) + msg="Error during trap test : Insufficient resources : %s" + % to_native(insufficient_resources) ) - results['changed'] = changed - results['msg'] = message + results["changed"] = changed + results["msg"] = message self.module.exit_json(**results) @@ -431,9 +488,9 @@ def get_previous_targets(trap_targets): previous_targets = [] for target in trap_targets: temp = dict() - temp['hostname'] = target.hostName - temp['port'] = target.port - temp['community'] = target.community + temp["hostname"] = target.hostName + temp["port"] = target.port + temp["community"] = target.community previous_targets.append(temp) return previous_targets @@ -448,17 +505,17 @@ def build_destination(dest_hostname, dest_port, dest_community): def check_if_options_are_valid(self, target): """Check if options are valid""" - dest_hostname = target.get('hostname', None) + dest_hostname = target.get("hostname", None) if dest_hostname is None: self.module.fail_json( msg="Please specify hostname for the trap target as it's a required parameter" ) - dest_port = target.get('port', None) + dest_port = target.get("port", None) if dest_port is None: self.module.fail_json( msg="Please specify port for the trap target as it's a required parameter" ) - dest_community = target.get('community', None) + dest_community = target.get("community", None) if dest_community is None: self.module.fail_json( msg="Please specify community for the trap target as it's a required parameter" @@ -470,24 +527,35 @@ def main(): """Main""" argument_spec = vmware_argument_spec() argument_spec.update( - state=dict(type='str', default='disabled', choices=['enabled', 'disabled', 'reset']), - snmp_port=dict(type='int', default=161), - community=dict(type='list', default=[]), - trap_targets=dict(type='list', default=list()), - trap_filter=dict(type='list'), - hw_source=dict(type='str', default='indications', choices=['indications', 'sensors']), - log_level=dict(type='str', default='info', choices=['debug', 'info', 'warning', 'error']), - send_trap=dict(type='bool', default=False), + state=dict( + type="str", + default="disabled", + choices=["enabled", "disabled", "reset"], + ), + snmp_port=dict(type="int", default=161), + community=dict(type="list", default=[]), + trap_targets=dict(type="list", default=list()), + trap_filter=dict(type="list"), + hw_source=dict( + type="str", + default="indications", + choices=["indications", "sensors"], + ), + log_level=dict( + type="str", + default="info", + choices=["debug", "info", "warning", "error"], + ), + send_trap=dict(type="bool", default=False), ) module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True + argument_spec=argument_spec, supports_check_mode=True ) host_snmp = VmwareHostSnmp(module) host_snmp.ensure() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_host_ssl_info.py b/plugins/modules/vmware_host_ssl_info.py index 5b3bd4d..db8df6d 100644 --- a/plugins/modules/vmware_host_ssl_info.py +++ b/plugins/modules/vmware_host_ssl_info.py @@ -5,16 +5,17 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_host_ssl_info short_description: Gather info of ESXi host system about SSL @@ -43,9 +44,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather SSL thumbprint information about all ESXi Hosts in given Cluster vmware_host_ssl_info: hostname: '{{ vcenter_hostname }}' @@ -78,9 +79,9 @@ esxi_password: '{{ esxi_password }}' esxi_ssl_thumbprint: '{{ ssl_thumbprint }}' state: present -''' +""" -RETURN = r''' +RETURN = r""" host_ssl_info: description: - dict with hostname as key and dict with SSL thumbprint related info @@ -97,32 +98,42 @@ ] } } -''' +""" from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + vmware_argument_spec, + PyVmomi, +) class VMwareHostSslManager(PyVmomi): def __init__(self, module): super(VMwareHostSslManager, self).__init__(module) - cluster_name = self.params.get('cluster_name', None) - esxi_host_name = self.params.get('esxi_hostname', None) - self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name) + cluster_name = self.params.get("cluster_name", None) + esxi_host_name = self.params.get("esxi_hostname", None) + self.hosts = self.get_all_host_objs( + cluster_name=cluster_name, esxi_host_name=esxi_host_name + ) self.hosts_info = {} def gather_ssl_info(self): for host in self.hosts: self.hosts_info[host.name] = dict( - principal='', - owner_tag='', - ssl_thumbprints=[]) + principal="", owner_tag="", ssl_thumbprints=[] + ) host_ssl_info_mgr = host.config.sslThumbprintInfo if host_ssl_info_mgr: - self.hosts_info[host.name]['principal'] = host_ssl_info_mgr.principal - self.hosts_info[host.name]['owner_tag'] = host_ssl_info_mgr.ownerTag - self.hosts_info[host.name]['ssl_thumbprints'] = [i for i in host_ssl_info_mgr.sslThumbprints] + self.hosts_info[host.name][ + "principal" + ] = host_ssl_info_mgr.principal + self.hosts_info[host.name][ + "owner_tag" + ] = host_ssl_info_mgr.ownerTag + self.hosts_info[host.name]["ssl_thumbprints"] = [ + i for i in host_ssl_info_mgr.sslThumbprints + ] self.module.exit_json(changed=False, host_ssl_info=self.hosts_info) @@ -130,15 +141,12 @@ def gather_ssl_info(self): def main(): argument_spec = vmware_argument_spec() argument_spec.update( - cluster_name=dict(type='str'), - esxi_hostname=dict(type='str'), + cluster_name=dict(type="str"), esxi_hostname=dict(type="str") ) module = AnsibleModule( argument_spec=argument_spec, - required_one_of=[ - ['cluster_name', 'esxi_hostname'], - ], + required_one_of=[["cluster_name", "esxi_hostname"]], supports_check_mode=True, ) diff --git a/plugins/modules/vmware_host_vmhba_info.py b/plugins/modules/vmware_host_vmhba_info.py index 908a15c..e2edf54 100644 --- a/plugins/modules/vmware_host_vmhba_info.py +++ b/plugins/modules/vmware_host_vmhba_info.py @@ -5,16 +5,17 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_host_vmhba_info short_description: Gathers info about vmhbas available on the given ESXi host @@ -45,9 +46,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather info about vmhbas of all ESXi Host in the given Cluster vmware_host_vmhba_info: hostname: '{{ vcenter_hostname }}' @@ -65,9 +66,9 @@ esxi_hostname: '{{ esxi_hostname }}' delegate_to: localhost register: host_vmhbas -''' +""" -RETURN = r''' +RETURN = r""" hosts_vmhbas_info: description: - dict with hostname as key and dict with vmhbas information as value. @@ -119,7 +120,7 @@ ], } } -''' +""" try: from pyVmomi import vim @@ -127,16 +128,22 @@ pass from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + vmware_argument_spec, + PyVmomi, +) class HostVmhbaMgr(PyVmomi): """Class to manage vmhba info""" + def __init__(self, module): super(HostVmhbaMgr, self).__init__(module) - cluster_name = self.params.get('cluster_name', None) - esxi_host_name = self.params.get('esxi_hostname', None) - self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name) + cluster_name = self.params.get("cluster_name", None) + esxi_host_name = self.params.get("esxi_hostname", None) + self.hosts = self.get_all_host_objs( + cluster_name=cluster_name, esxi_host_name=esxi_host_name + ) if not self.hosts: self.module.fail_json(msg="Failed to find host system.") @@ -148,53 +155,77 @@ def gather_host_vmhba_info(self): host_st_system = host.configManager.storageSystem if host_st_system: device_info = host_st_system.storageDeviceInfo - host_vmhba_info['vmhba_details'] = [] + host_vmhba_info["vmhba_details"] = [] for hba in device_info.hostBusAdapter: hba_info = dict() if hba.pci: - hba_info['location'] = hba.pci + hba_info["location"] = hba.pci for pci_device in host.hardware.pciDevice: if pci_device.id == hba.pci: - hba_info['adapter'] = pci_device.vendorName + ' ' + pci_device.deviceName + hba_info["adapter"] = ( + pci_device.vendorName + + " " + + pci_device.deviceName + ) break else: - hba_info['location'] = 'PCI' - hba_info['device'] = hba.device + hba_info["location"] = "PCI" + hba_info["device"] = hba.device # contains type as string in format of 'key-vim.host.FibreChannelHba-vmhba1' hba_type = hba.key.split(".")[-1].split("-")[0] - if hba_type == 'SerialAttachedHba': - hba_info['type'] = 'SAS' - elif hba_type == 'FibreChannelHba': - hba_info['type'] = 'Fibre Channel' + if hba_type == "SerialAttachedHba": + hba_info["type"] = "SAS" + elif hba_type == "FibreChannelHba": + hba_info["type"] = "Fibre Channel" else: - hba_info['type'] = hba_type - hba_info['bus'] = hba.bus - hba_info['status'] = hba.status - hba_info['model'] = hba.model - hba_info['driver'] = hba.driver + hba_info["type"] = hba_type + hba_info["bus"] = hba.bus + hba_info["status"] = hba.status + hba_info["model"] = hba.model + hba_info["driver"] = hba.driver try: - if isinstance(hba, (vim.host.FibreChannelHba, vim.host.FibreChannelOverEthernetHba)): - hba_info['node_wwn'] = self.format_number('%X' % hba.nodeWorldWideName) + if isinstance( + hba, + ( + vim.host.FibreChannelHba, + vim.host.FibreChannelOverEthernetHba, + ), + ): + hba_info["node_wwn"] = self.format_number( + "%X" % hba.nodeWorldWideName + ) else: - hba_info['node_wwn'] = self.format_number(hba.nodeWorldWideName) + hba_info["node_wwn"] = self.format_number( + hba.nodeWorldWideName + ) except AttributeError: pass try: - if isinstance(hba, (vim.host.FibreChannelHba, vim.host.FibreChannelOverEthernetHba)): - hba_info['port_wwn'] = self.format_number('%X' % hba.portWorldWideName) + if isinstance( + hba, + ( + vim.host.FibreChannelHba, + vim.host.FibreChannelOverEthernetHba, + ), + ): + hba_info["port_wwn"] = self.format_number( + "%X" % hba.portWorldWideName + ) else: - hba_info['port_wwn'] = self.format_number(hba.portWorldWideName) + hba_info["port_wwn"] = self.format_number( + hba.portWorldWideName + ) except AttributeError: pass try: - hba_info['port_type'] = hba.portType + hba_info["port_type"] = hba.portType except AttributeError: pass try: - hba_info['speed'] = hba.speed + hba_info["speed"] = hba.speed except AttributeError: pass - host_vmhba_info['vmhba_details'].append(hba_info) + host_vmhba_info["vmhba_details"].append(hba_info) hosts_vmhba_info[host.name] = host_vmhba_info return hosts_vmhba_info @@ -203,27 +234,28 @@ def gather_host_vmhba_info(self): def format_number(number): """Format number""" string = str(number) - return ':'.join(a + b for a, b in zip(string[::2], string[1::2])) + return ":".join(a + b for a, b in zip(string[::2], string[1::2])) def main(): """Main""" argument_spec = vmware_argument_spec() argument_spec.update( - cluster_name=dict(type='str', required=False), - esxi_hostname=dict(type='str', required=False), + cluster_name=dict(type="str", required=False), + esxi_hostname=dict(type="str", required=False), ) module = AnsibleModule( argument_spec=argument_spec, - required_one_of=[ - ['cluster_name', 'esxi_hostname'], - ], + required_one_of=[["cluster_name", "esxi_hostname"]], supports_check_mode=True, ) host_vmhba_mgr = HostVmhbaMgr(module) - module.exit_json(changed=False, hosts_vmhbas_info=host_vmhba_mgr.gather_host_vmhba_info()) + module.exit_json( + changed=False, + hosts_vmhbas_info=host_vmhba_mgr.gather_host_vmhba_info(), + ) if __name__ == "__main__": diff --git a/plugins/modules/vmware_host_vmnic_info.py b/plugins/modules/vmware_host_vmnic_info.py index fae35bc..bd78cc7 100644 --- a/plugins/modules/vmware_host_vmnic_info.py +++ b/plugins/modules/vmware_host_vmnic_info.py @@ -6,16 +6,17 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_host_vmnic_info short_description: Gathers info about vmnics available on the given ESXi host @@ -63,9 +64,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather info about vmnics of all ESXi Host in the given Cluster vmware_host_vmnic_info: hostname: '{{ vcenter_hostname }}' @@ -83,9 +84,9 @@ esxi_hostname: '{{ esxi_hostname }}' delegate_to: localhost register: host_vmnics -''' +""" -RETURN = r''' +RETURN = r""" hosts_vmnics_info: description: - dict with hostname as key and dict with vmnics information as value. @@ -145,7 +146,7 @@ } } } -''' +""" try: from pyVmomi import vim @@ -153,19 +154,26 @@ pass from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi, get_all_objs +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + vmware_argument_spec, + PyVmomi, + get_all_objs, +) class HostVmnicMgr(PyVmomi): """Class to manage vmnic info""" + def __init__(self, module): super(HostVmnicMgr, self).__init__(module) - self.capabilities = self.params.get('capabilities') - self.directpath_io = self.params.get('directpath_io') - self.sriov = self.params.get('sriov') - cluster_name = self.params.get('cluster_name', None) - esxi_host_name = self.params.get('esxi_hostname', None) - self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name) + self.capabilities = self.params.get("capabilities") + self.directpath_io = self.params.get("directpath_io") + self.sriov = self.params.get("sriov") + cluster_name = self.params.get("cluster_name", None) + esxi_host_name = self.params.get("esxi_hostname", None) + self.hosts = self.get_all_host_objs( + cluster_name=cluster_name, esxi_host_name=esxi_host_name + ) if not self.hosts: self.module.fail_json(msg="Failed to find host system.") @@ -187,89 +195,151 @@ def gather_host_vmnic_info(self): """Gather vmnic info""" hosts_vmnic_info = {} for host in self.hosts: - host_vmnic_info = dict(all=[], available=[], used=[], vswitch=dict(), dvswitch=dict()) + host_vmnic_info = dict( + all=[], available=[], used=[], vswitch=dict(), dvswitch=dict() + ) host_nw_system = host.configManager.networkSystem if host_nw_system: nw_config = host_nw_system.networkConfig - vmnics = [pnic.device for pnic in nw_config.pnic if pnic.device.startswith('vmnic')] - host_vmnic_info['all'] = [pnic.device for pnic in nw_config.pnic] - host_vmnic_info['num_vmnics'] = len(vmnics) - host_vmnic_info['vmnic_details'] = [] + vmnics = [ + pnic.device + for pnic in nw_config.pnic + if pnic.device.startswith("vmnic") + ] + host_vmnic_info["all"] = [ + pnic.device for pnic in nw_config.pnic + ] + host_vmnic_info["num_vmnics"] = len(vmnics) + host_vmnic_info["vmnic_details"] = [] for pnic in host.config.network.pnic: pnic_info = dict() - if pnic.device.startswith('vmnic'): + if pnic.device.startswith("vmnic"): if pnic.pci: - pnic_info['location'] = pnic.pci + pnic_info["location"] = pnic.pci for pci_device in host.hardware.pciDevice: if pci_device.id == pnic.pci: - pnic_info['adapter'] = pci_device.vendorName + ' ' + pci_device.deviceName + pnic_info["adapter"] = ( + pci_device.vendorName + + " " + + pci_device.deviceName + ) break else: - pnic_info['location'] = 'PCI' - pnic_info['device'] = pnic.device - pnic_info['driver'] = pnic.driver + pnic_info["location"] = "PCI" + pnic_info["device"] = pnic.device + pnic_info["driver"] = pnic.driver if pnic.linkSpeed: - pnic_info['status'] = 'Connected' - pnic_info['actual_speed'] = pnic.linkSpeed.speedMb - pnic_info['actual_duplex'] = 'Full Duplex' if pnic.linkSpeed.duplex else 'Half Duplex' + pnic_info["status"] = "Connected" + pnic_info["actual_speed"] = pnic.linkSpeed.speedMb + pnic_info["actual_duplex"] = ( + "Full Duplex" + if pnic.linkSpeed.duplex + else "Half Duplex" + ) else: - pnic_info['status'] = 'Disconnected' - pnic_info['actual_speed'] = 'N/A' - pnic_info['actual_duplex'] = 'N/A' + pnic_info["status"] = "Disconnected" + pnic_info["actual_speed"] = "N/A" + pnic_info["actual_duplex"] = "N/A" if pnic.spec.linkSpeed: - pnic_info['configured_speed'] = pnic.spec.linkSpeed.speedMb - pnic_info['configured_duplex'] = 'Full Duplex' if pnic.spec.linkSpeed.duplex else 'Half Duplex' + pnic_info[ + "configured_speed" + ] = pnic.spec.linkSpeed.speedMb + pnic_info["configured_duplex"] = ( + "Full Duplex" + if pnic.spec.linkSpeed.duplex + else "Half Duplex" + ) else: - pnic_info['configured_speed'] = 'Auto negotiate' - pnic_info['configured_duplex'] = 'Auto negotiate' - pnic_info['mac'] = pnic.mac + pnic_info["configured_speed"] = "Auto negotiate" + pnic_info["configured_duplex"] = "Auto negotiate" + pnic_info["mac"] = pnic.mac # General NIC capabilities if self.capabilities: - pnic_info['nioc_status'] = 'Allowed' if pnic.resourcePoolSchedulerAllowed else 'Not allowed' - pnic_info['auto_negotiation_supported'] = pnic.autoNegotiateSupported - pnic_info['wake_on_lan_supported'] = pnic.wakeOnLanSupported + pnic_info["nioc_status"] = ( + "Allowed" + if pnic.resourcePoolSchedulerAllowed + else "Not allowed" + ) + pnic_info[ + "auto_negotiation_supported" + ] = pnic.autoNegotiateSupported + pnic_info[ + "wake_on_lan_supported" + ] = pnic.wakeOnLanSupported # DirectPath I/O and SR-IOV capabilities and configuration if self.directpath_io: - pnic_info['directpath_io_supported'] = pnic.vmDirectPathGen2Supported + pnic_info[ + "directpath_io_supported" + ] = pnic.vmDirectPathGen2Supported if self.directpath_io or self.sriov: if pnic.pci: - for pci_device in host.configManager.pciPassthruSystem.pciPassthruInfo: + for ( + pci_device + ) in ( + host.configManager.pciPassthruSystem.pciPassthruInfo + ): if pci_device.id == pnic.pci: if self.directpath_io: - pnic_info['passthru_enabled'] = pci_device.passthruEnabled - pnic_info['passthru_capable'] = pci_device.passthruCapable - pnic_info['passthru_active'] = pci_device.passthruActive + pnic_info[ + "passthru_enabled" + ] = pci_device.passthruEnabled + pnic_info[ + "passthru_capable" + ] = pci_device.passthruCapable + pnic_info[ + "passthru_active" + ] = pci_device.passthruActive if self.sriov: try: if pci_device.sriovCapable: - pnic_info['sriov_status'] = ( - 'Enabled' if pci_device.sriovEnabled else 'Disabled' + pnic_info[ + "sriov_status" + ] = ( + "Enabled" + if pci_device.sriovEnabled + else "Disabled" ) - pnic_info['sriov_active'] = \ - pci_device.sriovActive - pnic_info['sriov_virt_functions'] = \ + pnic_info[ + "sriov_active" + ] = pci_device.sriovActive + pnic_info[ + "sriov_virt_functions" + ] = ( pci_device.numVirtualFunction - pnic_info['sriov_virt_functions_requested'] = \ + ) + pnic_info[ + "sriov_virt_functions_requested" + ] = ( pci_device.numVirtualFunctionRequested - pnic_info['sriov_virt_functions_supported'] = \ + ) + pnic_info[ + "sriov_virt_functions_supported" + ] = ( pci_device.maxVirtualFunctionSupported + ) else: - pnic_info['sriov_status'] = 'Not supported' + pnic_info[ + "sriov_status" + ] = "Not supported" except AttributeError: - pnic_info['sriov_status'] = 'Not supported' - host_vmnic_info['vmnic_details'].append(pnic_info) + pnic_info[ + "sriov_status" + ] = "Not supported" + host_vmnic_info["vmnic_details"].append(pnic_info) vswitch_vmnics = [] proxy_switch_vmnics = [] if nw_config.vswitch: for vswitch in nw_config.vswitch: - host_vmnic_info['vswitch'][vswitch.name] = [] + host_vmnic_info["vswitch"][vswitch.name] = [] # Workaround for "AttributeError: 'NoneType' object has no attribute 'nicDevice'" # this issue doesn't happen every time; vswitch.spec.bridge.nicDevice exists! try: for vnic in vswitch.spec.bridge.nicDevice: vswitch_vmnics.append(vnic) - host_vmnic_info['vswitch'][vswitch.name].append(vnic) + host_vmnic_info["vswitch"][ + vswitch.name + ].append(vnic) except AttributeError: pass @@ -277,15 +347,21 @@ def gather_host_vmnic_info(self): for proxy_config in nw_config.proxySwitch: dvs_obj = self.find_dvs_by_uuid(uuid=proxy_config.uuid) if dvs_obj: - host_vmnic_info['dvswitch'][dvs_obj.name] = [] + host_vmnic_info["dvswitch"][dvs_obj.name] = [] for proxy_nic in proxy_config.spec.backing.pnicSpec: proxy_switch_vmnics.append(proxy_nic.pnicDevice) if dvs_obj: - host_vmnic_info['dvswitch'][dvs_obj.name].append(proxy_nic.pnicDevice) + host_vmnic_info["dvswitch"][ + dvs_obj.name + ].append(proxy_nic.pnicDevice) used_vmics = proxy_switch_vmnics + vswitch_vmnics - host_vmnic_info['used'] = used_vmics - host_vmnic_info['available'] = [pnic.device for pnic in nw_config.pnic if pnic.device not in used_vmics] + host_vmnic_info["used"] = used_vmics + host_vmnic_info["available"] = [ + pnic.device + for pnic in nw_config.pnic + if pnic.device not in used_vmics + ] hosts_vmnic_info[host.name] = host_vmnic_info return hosts_vmnic_info @@ -295,23 +371,24 @@ def main(): """Main""" argument_spec = vmware_argument_spec() argument_spec.update( - cluster_name=dict(type='str', required=False), - esxi_hostname=dict(type='str', required=False), - capabilities=dict(type='bool', required=False, default=False), - directpath_io=dict(type='bool', required=False, default=False), - sriov=dict(type='bool', required=False, default=False), + cluster_name=dict(type="str", required=False), + esxi_hostname=dict(type="str", required=False), + capabilities=dict(type="bool", required=False, default=False), + directpath_io=dict(type="bool", required=False, default=False), + sriov=dict(type="bool", required=False, default=False), ) module = AnsibleModule( argument_spec=argument_spec, - required_one_of=[ - ['cluster_name', 'esxi_hostname'], - ], + required_one_of=[["cluster_name", "esxi_hostname"]], supports_check_mode=True, ) host_vmnic_mgr = HostVmnicMgr(module) - module.exit_json(changed=False, hosts_vmnics_info=host_vmnic_mgr.gather_host_vmnic_info()) + module.exit_json( + changed=False, + hosts_vmnics_info=host_vmnic_mgr.gather_host_vmnic_info(), + ) if __name__ == "__main__": diff --git a/plugins/modules/vmware_local_role_info.py b/plugins/modules/vmware_local_role_info.py index bc0e3cb..491bea3 100644 --- a/plugins/modules/vmware_local_role_info.py +++ b/plugins/modules/vmware_local_role_info.py @@ -5,17 +5,18 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_local_role_info short_description: Gather info about local roles on an ESXi host @@ -33,9 +34,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Gather info about local role from an ESXi vmware_local_role_info: hostname: '{{ esxi_hostname }}' @@ -48,9 +49,9 @@ admin_priv: "{{ fact_details.local_role_info['Admin']['privileges'] }}" - debug: msg: "{{ admin_priv }}" -''' +""" -RETURN = r''' +RETURN = r""" local_role_info: description: Info about role present on ESXi host returned: always @@ -89,14 +90,18 @@ "role_system": true } ] -''' +""" from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + vmware_argument_spec, +) class VMwareLocalRoleInfo(PyVmomi): """Class to manage local role info""" + def __init__(self, module): super(VMwareLocalRoleInfo, self).__init__(module) self.module = module @@ -105,7 +110,8 @@ def __init__(self, module): if self.content.authorizationManager is None: self.module.fail_json( msg="Failed to get local authorization manager settings.", - details="It seems that '%s' is a vCenter server instead of an ESXi server" % self.params['hostname'] + details="It seems that '%s' is a vCenter server instead of an ESXi server" + % self.params["hostname"], ) def gather_local_role_info(self): @@ -129,12 +135,13 @@ def gather_local_role_info(self): def main(): """Main""" argument_spec = vmware_argument_spec() - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True) + module = AnsibleModule( + argument_spec=argument_spec, supports_check_mode=True + ) vmware_local_role_info = VMwareLocalRoleInfo(module) vmware_local_role_info.gather_local_role_info() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_local_role_manager.py b/plugins/modules/vmware_local_role_manager.py index 41be9ea..09c6fe8 100644 --- a/plugins/modules/vmware_local_role_manager.py +++ b/plugins/modules/vmware_local_role_manager.py @@ -6,17 +6,18 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_local_role_manager short_description: Manage local roles on an ESXi host @@ -67,9 +68,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Add local role to ESXi vmware_local_role_manager: hostname: '{{ esxi_hostname }}' @@ -127,9 +128,9 @@ local_privilege_ids: [ 'Folder.Create' ] action: set delegate_to: localhost -''' +""" -RETURN = r''' +RETURN = r""" role_name: description: Name of local role returned: always @@ -159,7 +160,7 @@ description: List of privileges of role before the update returned: on update type: list -''' +""" try: from pyVmomi import vim, vmodl @@ -167,7 +168,10 @@ pass from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + vmware_argument_spec, +) class VMwareLocalRoleManager(PyVmomi): @@ -177,33 +181,36 @@ def __init__(self, module): super(VMwareLocalRoleManager, self).__init__(module) self.module = module self.params = module.params - self.role_name = self.params['local_role_name'] - self.state = self.params['state'] - self.priv_ids = self.params['local_privilege_ids'] - self.force = not self.params['force_remove'] + self.role_name = self.params["local_role_name"] + self.state = self.params["state"] + self.priv_ids = self.params["local_privilege_ids"] + self.force = not self.params["force_remove"] self.current_role = None - self.action = self.params['action'] + self.action = self.params["action"] if self.content.authorizationManager is None: self.module.fail_json( msg="Failed to get local authorization manager settings.", - details="It seems that '%s' is a vCenter server instead of an ESXi server" % self.params['hostname'] + details="It seems that '%s' is a vCenter server instead of an ESXi server" + % self.params["hostname"], ) def process_state(self): """Process the state of the local role""" local_role_manager_states = { - 'absent': { - 'present': self.state_remove_role, - 'absent': self.state_exit_unchanged, + "absent": { + "present": self.state_remove_role, + "absent": self.state_exit_unchanged, + }, + "present": { + "present": self.state_update_role, + "absent": self.state_create_role, }, - 'present': { - 'present': self.state_update_role, - 'absent': self.state_create_role, - } } try: - local_role_manager_states[self.state][self.check_local_role_manager_state()]() + local_role_manager_states[self.state][ + self.check_local_role_manager_state() + ]() except vmodl.RuntimeFault as runtime_fault: self.module.fail_json(msg=runtime_fault.msg) except vmodl.MethodFault as method_fault: @@ -216,8 +223,8 @@ def check_local_role_manager_state(self): auth_role = self.find_authorization_role() if auth_role: self.current_role = auth_role - return 'present' - return 'absent' + return "present" + return "absent" def find_authorization_role(self): """Find local role""" @@ -231,79 +238,85 @@ def state_create_role(self): """Create local role""" role_id = None results = dict() - results['role_name'] = self.role_name - results['privileges'] = self.priv_ids + results["role_name"] = self.role_name + results["privileges"] = self.priv_ids # NOTE: the following code is deprecated from 2.11 onwards - results['local_role_name'] = self.role_name - results['new_privileges'] = self.priv_ids + results["local_role_name"] = self.role_name + results["new_privileges"] = self.priv_ids if self.module.check_mode: - results['msg'] = "Role would be created" + results["msg"] = "Role would be created" else: try: role_id = self.content.authorizationManager.AddAuthorizationRole( - name=self.role_name, - privIds=self.priv_ids + name=self.role_name, privIds=self.priv_ids ) - results['role_id'] = role_id - results['msg'] = "Role created" + results["role_id"] = role_id + results["msg"] = "Role created" except vim.fault.AlreadyExists as already_exists: self.module.fail_json( - msg="Failed to create role '%s' as the user specified role name already exists." % - self.role_name, details=already_exists.msg + msg="Failed to create role '%s' as the user specified role name already exists." + % self.role_name, + details=already_exists.msg, ) except vim.fault.InvalidName as invalid_name: self.module.fail_json( - msg="Failed to create a role %s as the user specified role name is empty" % - self.role_name, details=invalid_name.msg + msg="Failed to create a role %s as the user specified role name is empty" + % self.role_name, + details=invalid_name.msg, ) except vmodl.fault.InvalidArgument as invalid_argument: self.module.fail_json( - msg="Failed to create a role %s as the user specified privileges are unknown" % - self.role_name, etails=invalid_argument.msg + msg="Failed to create a role %s as the user specified privileges are unknown" + % self.role_name, + etails=invalid_argument.msg, ) self.module.exit_json(changed=True, result=results) def state_remove_role(self): """Remove local role""" results = dict() - results['role_name'] = self.role_name - results['role_id'] = self.current_role.roleId + results["role_name"] = self.role_name + results["role_id"] = self.current_role.roleId # NOTE: the following code is deprecated from 2.11 onwards - results['local_role_name'] = self.role_name + results["local_role_name"] = self.role_name if self.module.check_mode: - results['msg'] = "Role would be deleted" + results["msg"] = "Role would be deleted" else: try: self.content.authorizationManager.RemoveAuthorizationRole( - roleId=self.current_role.roleId, - failIfUsed=self.force + roleId=self.current_role.roleId, failIfUsed=self.force ) - results['msg'] = "Role deleted" + results["msg"] = "Role deleted" except vim.fault.NotFound as not_found: self.module.fail_json( - msg="Failed to remove a role %s as the user specified role name does not exist." % - self.role_name, details=not_found.msg + msg="Failed to remove a role %s as the user specified role name does not exist." + % self.role_name, + details=not_found.msg, ) except vim.fault.RemoveFailed as remove_failed: - msg = "Failed to remove role '%s' as the user specified role name." % self.role_name + msg = ( + "Failed to remove role '%s' as the user specified role name." + % self.role_name + ) if self.force: msg += " Use force_remove as True." self.module.fail_json(msg=msg, details=remove_failed.msg) except vmodl.fault.InvalidArgument as invalid_argument: self.module.fail_json( - msg="Failed to remove a role %s as the user specified role is a system role" % - self.role_name, details=invalid_argument.msg + msg="Failed to remove a role %s as the user specified role is a system role" + % self.role_name, + details=invalid_argument.msg, ) self.module.exit_json(changed=True, result=results) def state_exit_unchanged(self): """Don't do anything""" results = dict() - results['role_name'] = self.role_name + results["role_name"] = self.role_name # NOTE: the following code is deprecated from 2.11 onwards - results['local_role_name'] = self.role_name - results['msg'] = "Role not present" + results["local_role_name"] = self.role_name + results["msg"] = "Role not present" self.module.exit_json(changed=False, result=results) def state_update_role(self): @@ -311,63 +324,69 @@ def state_update_role(self): changed = False changed_privileges = [] results = dict() - results['role_name'] = self.role_name - results['role_id'] = self.current_role.roleId + results["role_name"] = self.role_name + results["role_id"] = self.current_role.roleId # NOTE: the following code is deprecated from 2.11 onwards - results['local_role_name'] = self.role_name + results["local_role_name"] = self.role_name current_privileges = self.current_role.privilege - results['privileges'] = current_privileges + results["privileges"] = current_privileges # NOTE: the following code is deprecated from 2.11 onwards - results['new_privileges'] = current_privileges + results["new_privileges"] = current_privileges - if self.action == 'add': + if self.action == "add": # Add to existing privileges - for priv in self.params['local_privilege_ids']: + for priv in self.params["local_privilege_ids"]: if priv not in current_privileges: changed_privileges.append(priv) changed = True if changed: changed_privileges.extend(current_privileges) - elif self.action == 'set': + elif self.action == "set": # Set given privileges # Add system-defined privileges, "System.Anonymous", "System.View", and "System.Read". - self.params['local_privilege_ids'].extend(['System.Anonymous', 'System.Read', 'System.View']) - changed_privileges = self.params['local_privilege_ids'] - changes_applied = list(set(current_privileges) ^ set(changed_privileges)) + self.params["local_privilege_ids"].extend( + ["System.Anonymous", "System.Read", "System.View"] + ) + changed_privileges = self.params["local_privilege_ids"] + changes_applied = list( + set(current_privileges) ^ set(changed_privileges) + ) if changes_applied: changed = True - elif self.action == 'remove': + elif self.action == "remove": changed_privileges = list(current_privileges) # Remove given privileges from existing privileges - for priv in self.params['local_privilege_ids']: + for priv in self.params["local_privilege_ids"]: if priv in current_privileges: changed = True changed_privileges.remove(priv) if changed: - results['privileges'] = changed_privileges - results['privileges_previous'] = current_privileges + results["privileges"] = changed_privileges + results["privileges_previous"] = current_privileges # NOTE: the following code is deprecated from 2.11 onwards - results['new_privileges'] = changed_privileges - results['old_privileges'] = current_privileges + results["new_privileges"] = changed_privileges + results["old_privileges"] = current_privileges if self.module.check_mode: - results['msg'] = "Role privileges would be updated" + results["msg"] = "Role privileges would be updated" else: try: self.content.authorizationManager.UpdateAuthorizationRole( roleId=self.current_role.roleId, newName=self.current_role.name, - privIds=changed_privileges + privIds=changed_privileges, ) - results['msg'] = "Role privileges updated" + results["msg"] = "Role privileges updated" except vim.fault.NotFound as not_found: self.module.fail_json( - msg="Failed to update role. Please check privileges provided for update", details=not_found.msg + msg="Failed to update role. Please check privileges provided for update", + details=not_found.msg, ) except vim.fault.InvalidName as invalid_name: self.module.fail_json( - msg="Failed to update role as role name is empty", details=invalid_name.msg + msg="Failed to update role as role name is empty", + details=invalid_name.msg, ) except vim.fault.AlreadyExists as already_exists: self.module.fail_json( @@ -376,37 +395,42 @@ def state_update_role(self): except vmodl.fault.InvalidArgument as invalid_argument: self.module.fail_json( msg="Failed to update role as user specified role is system role which can not be changed", - details=invalid_argument.msg + details=invalid_argument.msg, ) except vim.fault.NoPermission as no_permission: self.module.fail_json( msg="Failed to update role as current session doesn't have any privilege to update specified role", - details=no_permission.msg + details=no_permission.msg, ) else: - results['msg'] = "Role privileges are properly configured" + results["msg"] = "Role privileges are properly configured" self.module.exit_json(changed=changed, result=results) def main(): argument_spec = vmware_argument_spec() - argument_spec.update(dict(local_role_name=dict(required=True, type='str'), - local_privilege_ids=dict(default=[], type='list'), - force_remove=dict(default=False, type='bool'), - action=dict(type='str', default='set', choices=[ - 'add', - 'set', - 'remove', - ]), - state=dict(default='present', choices=['present', 'absent'], type='str'))) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True) + argument_spec.update( + dict( + local_role_name=dict(required=True, type="str"), + local_privilege_ids=dict(default=[], type="list"), + force_remove=dict(default=False, type="bool"), + action=dict( + type="str", default="set", choices=["add", "set", "remove"] + ), + state=dict( + default="present", choices=["present", "absent"], type="str" + ), + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, supports_check_mode=True + ) vmware_local_role_manager = VMwareLocalRoleManager(module) vmware_local_role_manager.process_state() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_local_user_info.py b/plugins/modules/vmware_local_user_info.py index e7b8f0a..7e883f6 100644 --- a/plugins/modules/vmware_local_user_info.py +++ b/plugins/modules/vmware_local_user_info.py @@ -5,17 +5,18 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_local_user_info short_description: Gather info about users on the given ESXi host @@ -34,9 +35,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather info about all Users on given ESXi host system vmware_local_user_info: hostname: '{{ esxi_hostname }}' @@ -44,9 +45,9 @@ password: '{{ esxi_password }}' delegate_to: localhost register: all_user_info -''' +""" -RETURN = r''' +RETURN = r""" local_user_info: description: metadata about all local users returned: always @@ -69,7 +70,7 @@ "shell_access": false }, ] -''' +""" try: from pyVmomi import vmodl @@ -77,44 +78,54 @@ pass from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + vmware_argument_spec, +) from ansible.module_utils._text import to_native class VMwareUserInfoManager(PyVmomi): """Class to manage local user info""" + def __init__(self, module): super(VMwareUserInfoManager, self).__init__(module) if self.is_vcenter(): self.module.fail_json( msg="Failed to get local account manager settings.", - details="It seems that '%s' is a vCenter server instead of an ESXi server" % self.module.params['hostname'] + details="It seems that '%s' is a vCenter server instead of an ESXi server" + % self.module.params["hostname"], ) def gather_user_info(self): """Gather info about local users""" results = dict(changed=False, local_user_info=[]) - search_string = '' + search_string = "" exact_match = False find_users = True find_groups = False user_accounts = self.content.userDirectory.RetrieveUserGroups( - None, search_string, None, None, exact_match, find_users, find_groups + None, + search_string, + None, + None, + exact_match, + find_users, + find_groups, ) if user_accounts: for user in user_accounts: temp_user = dict() - temp_user['user_name'] = user.principal - temp_user['description'] = user.fullName - temp_user['group'] = user.group - temp_user['user_id'] = user.id - temp_user['shell_access'] = user.shellAccess - temp_user['role'] = None + temp_user["user_name"] = user.principal + temp_user["description"] = user.fullName + temp_user["group"] = user.group + temp_user["user_id"] = user.id + temp_user["shell_access"] = user.shellAccess + temp_user["role"] = None try: permissions = self.content.authorizationManager.RetrieveEntityPermissions( - entity=self.content.rootFolder, - inherited=False + entity=self.content.rootFolder, inherited=False ) except vmodl.fault.ManagedObjectNotFound as not_found: self.module.fail_json( @@ -122,10 +133,13 @@ def gather_user_info(self): ) for permission in permissions: if permission.principal == user.principal: - temp_user['role'] = self.get_role_name(permission.roleId, self.content.authorizationManager.roleList) + temp_user["role"] = self.get_role_name( + permission.roleId, + self.content.authorizationManager.roleList, + ) break - results['local_user_info'].append(temp_user) + results["local_user_info"].append(temp_user) self.module.exit_json(**results) @staticmethod @@ -134,13 +148,13 @@ def get_role_name(role_id, role_list): role_name = None # Default role: No access if role_id == -5: - role_name = 'no-access' + role_name = "no-access" # Default role: Read-only elif role_id == -2: - role_name = 'read-only' + role_name = "read-only" # Default role: Administrator elif role_id == -1: - role_name = 'admin' + role_name = "admin" # Custom roles else: for role in role_list: @@ -153,12 +167,13 @@ def get_role_name(role_id, role_list): def main(): """Main""" argument_spec = vmware_argument_spec() - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True) + module = AnsibleModule( + argument_spec=argument_spec, supports_check_mode=True + ) vmware_local_user_info = VMwareUserInfoManager(module) vmware_local_user_info.gather_user_info() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_local_user_manager.py b/plugins/modules/vmware_local_user_manager.py index 412ae80..9c7ce99 100644 --- a/plugins/modules/vmware_local_user_manager.py +++ b/plugins/modules/vmware_local_user_manager.py @@ -7,17 +7,18 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_local_user_manager short_description: Manage local users on an ESXi host @@ -56,9 +57,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Add local user to ESXi vmware_local_user_manager: hostname: esxi_hostname @@ -66,9 +67,9 @@ password: vmware local_user_name: foo delegate_to: localhost -''' +""" -RETURN = '''# ''' +RETURN = """# """ try: from pyVmomi import vim, vmodl @@ -76,38 +77,46 @@ pass from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + vmware_argument_spec, +) class VMwareLocalUserManager(PyVmomi): - def __init__(self, module): super(VMwareLocalUserManager, self).__init__(module) - self.local_user_name = self.module.params['local_user_name'] - self.local_user_password = self.module.params['local_user_password'] - self.local_user_description = self.module.params['local_user_description'] - self.state = self.module.params['state'] + self.local_user_name = self.module.params["local_user_name"] + self.local_user_password = self.module.params["local_user_password"] + self.local_user_description = self.module.params[ + "local_user_description" + ] + self.state = self.module.params["state"] if self.is_vcenter(): - self.module.fail_json(msg="Failed to get local account manager settings " - "from ESXi server: %s" % self.module.params['hostname'], - details="It seems that %s is a vCenter server instead of an " - "ESXi server" % self.module.params['hostname']) + self.module.fail_json( + msg="Failed to get local account manager settings " + "from ESXi server: %s" % self.module.params["hostname"], + details="It seems that %s is a vCenter server instead of an " + "ESXi server" % self.module.params["hostname"], + ) def process_state(self): try: local_account_manager_states = { - 'absent': { - 'present': self.state_remove_user, - 'absent': self.state_exit_unchanged, + "absent": { + "present": self.state_remove_user, + "absent": self.state_exit_unchanged, + }, + "present": { + "present": self.state_update_user, + "absent": self.state_create_user, }, - 'present': { - 'present': self.state_update_user, - 'absent': self.state_create_user, - } } - local_account_manager_states[self.state][self.check_local_user_manager_state()]() + local_account_manager_states[self.state][ + self.check_local_user_manager_state() + ]() except vmodl.RuntimeFault as runtime_fault: self.module.fail_json(msg=runtime_fault.msg) except vmodl.MethodFault as method_fault: @@ -118,16 +127,18 @@ def process_state(self): def check_local_user_manager_state(self): user_account = self.find_user_account() if not user_account: - return 'absent' + return "absent" else: - return 'present' + return "present" def find_user_account(self): searchStr = self.local_user_name exactMatch = True findUsers = True findGroups = False - user_account = self.content.userDirectory.RetrieveUserGroups(None, searchStr, None, None, exactMatch, findUsers, findGroups) + user_account = self.content.userDirectory.RetrieveUserGroups( + None, searchStr, None, None, exactMatch, findUsers, findGroups + ) return user_account def create_account_spec(self): @@ -174,17 +185,24 @@ def state_exit_unchanged(self): def main(): argument_spec = vmware_argument_spec() - argument_spec.update(dict(local_user_name=dict(required=True, type='str'), - local_user_password=dict(type='str', no_log=True), - local_user_description=dict(type='str'), - state=dict(default='present', choices=['present', 'absent'], type='str'))) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=False) + argument_spec.update( + dict( + local_user_name=dict(required=True, type="str"), + local_user_password=dict(type="str", no_log=True), + local_user_description=dict(type="str"), + state=dict( + default="present", choices=["present", "absent"], type="str" + ), + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, supports_check_mode=False + ) vmware_local_user_manager = VMwareLocalUserManager(module) vmware_local_user_manager.process_state() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_maintenancemode.py b/plugins/modules/vmware_maintenancemode.py index 76d42e2..19fa125 100644 --- a/plugins/modules/vmware_maintenancemode.py +++ b/plugins/modules/vmware_maintenancemode.py @@ -6,16 +6,17 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_maintenancemode short_description: Place a host into maintenance mode @@ -70,9 +71,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Enter VSAN-Compliant Maintenance Mode vmware_maintenancemode: hostname: "{{ vcenter_hostname }}" @@ -84,9 +85,9 @@ timeout: 3600 state: present delegate_to: localhost -''' +""" -RETURN = ''' +RETURN = """ hostsystem: description: Name of vim reference returned: always @@ -102,7 +103,7 @@ returned: always type: str sample: "ENTER" -''' +""" try: from pyVmomi import vim @@ -110,26 +111,35 @@ pass from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import PyVmomi, TaskError, vmware_argument_spec, wait_for_task +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + TaskError, + vmware_argument_spec, + wait_for_task, +) from ansible.module_utils._text import to_native class VmwareMaintenanceMgr(PyVmomi): def __init__(self, module): super(VmwareMaintenanceMgr, self).__init__(module) - self.esxi_hostname = self.module.params.get('esxi_hostname') - self.vsan = self.module.params.get('vsan', None) + self.esxi_hostname = self.module.params.get("esxi_hostname") + self.vsan = self.module.params.get("vsan", None) self.host = self.find_hostsystem_by_name(host_name=self.esxi_hostname) if not self.host: - self.module.fail_json(msg='Host %s not found in vCenter' % self.esxi_hostname) + self.module.fail_json( + msg="Host %s not found in vCenter" % self.esxi_hostname + ) def EnterMaintenanceMode(self): if self.host.runtime.inMaintenanceMode: - self.module.exit_json(changed=False, - hostsystem=str(self.host), - hostname=self.esxi_hostname, - status='NO_ACTION', - msg='Host %s already in maintenance mode' % self.esxi_hostname) + self.module.exit_json( + changed=False, + hostsystem=str(self.host), + hostname=self.esxi_hostname, + status="NO_ACTION", + msg="Host %s already in maintenance mode" % self.esxi_hostname, + ) spec = vim.host.MaintenanceSpec() @@ -138,67 +148,92 @@ def EnterMaintenanceMode(self): spec.vsanMode.objectAction = self.vsan try: - task = self.host.EnterMaintenanceMode_Task(self.module.params['timeout'], - self.module.params['evacuate'], - spec) + task = self.host.EnterMaintenanceMode_Task( + self.module.params["timeout"], + self.module.params["evacuate"], + spec, + ) success, result = wait_for_task(task) - self.module.exit_json(changed=success, - hostsystem=str(self.host), - hostname=self.esxi_hostname, - status='ENTER', - msg='Host %s entered maintenance mode' % self.esxi_hostname) + self.module.exit_json( + changed=success, + hostsystem=str(self.host), + hostname=self.esxi_hostname, + status="ENTER", + msg="Host %s entered maintenance mode" % self.esxi_hostname, + ) except TaskError as e: - self.module.fail_json(msg='Host %s failed to enter maintenance mode due to %s' % (self.esxi_hostname, to_native(e))) + self.module.fail_json( + msg="Host %s failed to enter maintenance mode due to %s" + % (self.esxi_hostname, to_native(e)) + ) def ExitMaintenanceMode(self): if not self.host.runtime.inMaintenanceMode: - self.module.exit_json(changed=False, - hostsystem=str(self.host), - hostname=self.esxi_hostname, - status='NO_ACTION', - msg='Host %s not in maintenance mode' % self.esxi_hostname) + self.module.exit_json( + changed=False, + hostsystem=str(self.host), + hostname=self.esxi_hostname, + status="NO_ACTION", + msg="Host %s not in maintenance mode" % self.esxi_hostname, + ) try: - task = self.host.ExitMaintenanceMode_Task(self.module.params['timeout']) + task = self.host.ExitMaintenanceMode_Task( + self.module.params["timeout"] + ) success, result = wait_for_task(task) - self.module.exit_json(changed=success, - hostsystem=str(self.host), - hostname=self.esxi_hostname, - status='EXIT', - msg='Host %s exited maintenance mode' % self.esxi_hostname) + self.module.exit_json( + changed=success, + hostsystem=str(self.host), + hostname=self.esxi_hostname, + status="EXIT", + msg="Host %s exited maintenance mode" % self.esxi_hostname, + ) except TaskError as e: - self.module.fail_json(msg='Host %s failed to exit maintenance mode due to %s' % (self.esxi_hostname, to_native(e))) + self.module.fail_json( + msg="Host %s failed to exit maintenance mode due to %s" + % (self.esxi_hostname, to_native(e)) + ) def main(): spec = vmware_argument_spec() - spec.update(dict(esxi_hostname=dict(type='str', required=True), - vsan=dict(type='str', - choices=['ensureObjectAccessibility', - 'evacuateAllData', - 'noAction'], - aliases=['vsan_mode'], - ), - evacuate=dict(type='bool', default=False), - timeout=dict(default=0, type='int'), - state=dict(required=False, default='present', choices=['present', 'absent']) - ) - ) + spec.update( + dict( + esxi_hostname=dict(type="str", required=True), + vsan=dict( + type="str", + choices=[ + "ensureObjectAccessibility", + "evacuateAllData", + "noAction", + ], + aliases=["vsan_mode"], + ), + evacuate=dict(type="bool", default=False), + timeout=dict(default=0, type="int"), + state=dict( + required=False, + default="present", + choices=["present", "absent"], + ), + ) + ) module = AnsibleModule(argument_spec=spec) host_maintenance_mgr = VmwareMaintenanceMgr(module=module) - if module.params['state'] == 'present': + if module.params["state"] == "present": host_maintenance_mgr.EnterMaintenanceMode() - elif module.params['state'] == 'absent': + elif module.params["state"] == "absent": host_maintenance_mgr.ExitMaintenanceMode() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_migrate_vmk.py b/plugins/modules/vmware_migrate_vmk.py index 934c4fd..fbfcf71 100644 --- a/plugins/modules/vmware_migrate_vmk.py +++ b/plugins/modules/vmware_migrate_vmk.py @@ -5,13 +5,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} +ANSIBLE_METADATA = { + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", +} -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_migrate_vmk short_description: Migrate a VMK interface from VSS to VDS @@ -59,9 +62,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Migrate Management vmk vmware_migrate_vmk: hostname: "{{ vcenter_hostname }}" @@ -74,37 +77,46 @@ migrate_switch_name: dvSwitch migrate_portgroup_name: Management delegate_to: localhost -''' +""" try: from pyVmomi import vim, vmodl + HAS_PYVMOMI = True except ImportError: HAS_PYVMOMI = False from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import (vmware_argument_spec, find_dvs_by_name, find_hostsystem_by_name, - connect_to_api, find_dvspg_by_name) +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + vmware_argument_spec, + find_dvs_by_name, + find_hostsystem_by_name, + connect_to_api, + find_dvspg_by_name, +) class VMwareMigrateVmk(object): - def __init__(self, module): self.module = module self.host_system = None - self.migrate_switch_name = self.module.params['migrate_switch_name'] - self.migrate_portgroup_name = self.module.params['migrate_portgroup_name'] - self.device = self.module.params['device'] - self.esxi_hostname = self.module.params['esxi_hostname'] - self.current_portgroup_name = self.module.params['current_portgroup_name'] - self.current_switch_name = self.module.params['current_switch_name'] + self.migrate_switch_name = self.module.params["migrate_switch_name"] + self.migrate_portgroup_name = self.module.params[ + "migrate_portgroup_name" + ] + self.device = self.module.params["device"] + self.esxi_hostname = self.module.params["esxi_hostname"] + self.current_portgroup_name = self.module.params[ + "current_portgroup_name" + ] + self.current_switch_name = self.module.params["current_switch_name"] self.content = connect_to_api(module) def process_state(self): try: vmk_migration_states = { - 'migrate_vss_vds': self.state_migrate_vss_vds, - 'migrate_vds_vss': self.state_migrate_vds_vss, - 'migrated': self.state_exit_unchanged + "migrate_vss_vds": self.state_migrate_vss_vds, + "migrate_vds_vss": self.state_migrate_vds_vss, + "migrated": self.state_exit_unchanged, } vmk_migration_states[self.check_vmk_current_state()]() @@ -130,8 +142,12 @@ def create_host_vnic_config(self, dv_switch_uuid, portgroup_key): host_vnic_config.device = self.device host_vnic_config.portgroup = "" host_vnic_config.spec.distributedVirtualPort = vim.dvs.PortConnection() - host_vnic_config.spec.distributedVirtualPort.switchUuid = dv_switch_uuid - host_vnic_config.spec.distributedVirtualPort.portgroupKey = portgroup_key + host_vnic_config.spec.distributedVirtualPort.switchUuid = ( + dv_switch_uuid + ) + host_vnic_config.spec.distributedVirtualPort.portgroupKey = ( + portgroup_key + ) return host_vnic_config @@ -160,16 +176,22 @@ def state_migrate_vss_vds(self): self.module.exit_json(changed=True) def check_vmk_current_state(self): - self.host_system = find_hostsystem_by_name(self.content, self.esxi_hostname) + self.host_system = find_hostsystem_by_name( + self.content, self.esxi_hostname + ) - for vnic in self.host_system.configManager.networkSystem.networkInfo.vnic: + for ( + vnic + ) in self.host_system.configManager.networkSystem.networkInfo.vnic: if vnic.device == self.device: # self.vnic = vnic if vnic.spec.distributedVirtualPort is None: if vnic.portgroup == self.current_portgroup_name: return "migrate_vss_vds" else: - dvs = find_dvs_by_name(self.content, self.current_switch_name) + dvs = find_dvs_by_name( + self.content, self.current_switch_name + ) if dvs is None: return "migrated" if vnic.spec.distributedVirtualPort.switchUuid == dvs.uuid: @@ -179,21 +201,27 @@ def check_vmk_current_state(self): def main(): argument_spec = vmware_argument_spec() - argument_spec.update(dict(esxi_hostname=dict(required=True, type='str'), - device=dict(required=True, type='str'), - current_switch_name=dict(required=True, type='str'), - current_portgroup_name=dict(required=True, type='str'), - migrate_switch_name=dict(required=True, type='str'), - migrate_portgroup_name=dict(required=True, type='str'))) - - module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) + argument_spec.update( + dict( + esxi_hostname=dict(required=True, type="str"), + device=dict(required=True, type="str"), + current_switch_name=dict(required=True, type="str"), + current_portgroup_name=dict(required=True, type="str"), + migrate_switch_name=dict(required=True, type="str"), + migrate_portgroup_name=dict(required=True, type="str"), + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, supports_check_mode=False + ) if not HAS_PYVMOMI: - module.fail_json(msg='pyvmomi required for this module') + module.fail_json(msg="pyvmomi required for this module") vmware_migrate_vmk = VMwareMigrateVmk(module) vmware_migrate_vmk.process_state() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_object_role_permission.py b/plugins/modules/vmware_object_role_permission.py index 9d75ddd..51c2dc5 100644 --- a/plugins/modules/vmware_object_role_permission.py +++ b/plugins/modules/vmware_object_role_permission.py @@ -11,12 +11,12 @@ __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_object_role_permission short_description: Manage local roles on an ESXi host @@ -76,9 +76,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Assign user to VM folder vmware_object_role_permission: role: Admin @@ -110,14 +110,14 @@ object_name: rootFolder state: present delegate_to: localhost -''' +""" -RETURN = r''' +RETURN = r""" changed: description: whether or not a change was made to the object's role returned: always type: bool -''' +""" try: from pyVmomi import vim, vmodl @@ -126,7 +126,11 @@ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_native -from ansible_collections.vmware.general.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec, find_obj +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + vmware_argument_spec, + find_obj, +) class VMwareObjectRolePermission(PyVmomi): @@ -136,41 +140,45 @@ def __init__(self, module): self.params = module.params self.is_group = False - if self.params.get('principal', None) is not None: - self.applied_to = self.params['principal'] - elif self.params.get('group', None) is not None: - self.applied_to = self.params['group'] + if self.params.get("principal", None) is not None: + self.applied_to = self.params["principal"] + elif self.params.get("group", None) is not None: + self.applied_to = self.params["group"] self.is_group = True self.get_role() self.get_object() self.get_perms() self.perm = self.setup_permission() - self.state = self.params['state'] + self.state = self.params["state"] def get_perms(self): - self.current_perms = self.content.authorizationManager.RetrieveEntityPermissions(self.current_obj, False) + self.current_perms = self.content.authorizationManager.RetrieveEntityPermissions( + self.current_obj, False + ) def same_permission(self, perm_one, perm_two): - return perm_one.principal.lower() == perm_two.principal.lower() \ + return ( + perm_one.principal.lower() == perm_two.principal.lower() and perm_one.roleId == perm_two.roleId + ) def get_state(self): for perm in self.current_perms: if self.same_permission(self.perm, perm): - return 'present' - return 'absent' + return "present" + return "absent" def process_state(self): local_permission_states = { - 'absent': { - 'present': self.remove_permission, - 'absent': self.state_exit_unchanged, + "absent": { + "present": self.remove_permission, + "absent": self.state_exit_unchanged, + }, + "present": { + "present": self.state_exit_unchanged, + "absent": self.add_permission, }, - 'present': { - 'present': self.state_exit_unchanged, - 'absent': self.add_permission, - } } try: local_permission_states[self.state][self.get_state()]() @@ -190,49 +198,64 @@ def setup_permission(self): perm.group = self.is_group perm.principal = self.applied_to perm.roleId = self.role.roleId - perm.propagate = self.params['recursive'] + perm.propagate = self.params["recursive"] return perm def add_permission(self): if not self.module.check_mode: - self.content.authorizationManager.SetEntityPermissions(self.current_obj, [self.perm]) + self.content.authorizationManager.SetEntityPermissions( + self.current_obj, [self.perm] + ) self.module.exit_json(changed=True) def remove_permission(self): if not self.module.check_mode: - self.content.authorizationManager.RemoveEntityPermission(self.current_obj, self.applied_to, self.is_group) + self.content.authorizationManager.RemoveEntityPermission( + self.current_obj, self.applied_to, self.is_group + ) self.module.exit_json(changed=True) def get_role(self): for role in self.content.authorizationManager.roleList: - if role.name == self.params['role']: + if role.name == self.params["role"]: self.role = role return - self.module.fail_json(msg="Specified role (%s) was not found" % self.params['role']) + self.module.fail_json( + msg="Specified role (%s) was not found" % self.params["role"] + ) def get_object(self): # find_obj doesn't include rootFolder - if self.params['object_type'] == 'Folder' and self.params['object_name'] == 'rootFolder': + if ( + self.params["object_type"] == "Folder" + and self.params["object_name"] == "rootFolder" + ): self.current_obj = self.content.rootFolder return try: - object_type = getattr(vim, self.params['object_type']) + object_type = getattr(vim, self.params["object_type"]) except AttributeError: - self.module.fail_json(msg="Object type %s is not valid." % self.params['object_type']) - self.current_obj = find_obj(content=self.content, - vimtype=[getattr(vim, self.params['object_type'])], - name=self.params['object_name']) + self.module.fail_json( + msg="Object type %s is not valid." % self.params["object_type"] + ) + self.current_obj = find_obj( + content=self.content, + vimtype=[getattr(vim, self.params["object_type"])], + name=self.params["object_name"], + ) if self.current_obj is None: self.module.fail_json( msg="Specified object %s of type %s was not found." - % (self.params['object_name'], self.params['object_type']) + % (self.params["object_name"], self.params["object_type"]) + ) + if self.params["object_type"] == "DistributedVirtualSwitch": + msg = ( + "You are applying permissions to a Distributed vSwitch. " + "This will probably fail, since Distributed vSwitches inherits permissions " + "from the datacenter or a folder level. " + "Define permissions on the datacenter or the folder containing the switch." ) - if self.params['object_type'] == 'DistributedVirtualSwitch': - msg = "You are applying permissions to a Distributed vSwitch. " \ - "This will probably fail, since Distributed vSwitches inherits permissions " \ - "from the datacenter or a folder level. " \ - "Define permissions on the datacenter or the folder containing the switch." self.module.warn(msg) @@ -240,41 +263,43 @@ def main(): argument_spec = vmware_argument_spec() argument_spec.update( dict( - role=dict(required=True, type='str'), - object_name=dict(required=True, type='str'), + role=dict(required=True, type="str"), + object_name=dict(required=True, type="str"), object_type=dict( - type='str', - default='Folder', + type="str", + default="Folder", choices=[ - 'Folder', - 'VirtualMachine', - 'Datacenter', - 'ResourcePool', - 'Datastore', - 'Network', - 'HostSystem', - 'ComputeResource', - 'ClusterComputeResource', - 'DistributedVirtualSwitch', + "Folder", + "VirtualMachine", + "Datacenter", + "ResourcePool", + "Datastore", + "Network", + "HostSystem", + "ComputeResource", + "ClusterComputeResource", + "DistributedVirtualSwitch", ], ), - principal=dict(type='str'), - group=dict(type='str'), - recursive=dict(type='bool', default=True), - state=dict(default='present', choices=['present', 'absent'], type='str'), + principal=dict(type="str"), + group=dict(type="str"), + recursive=dict(type="bool", default=True), + state=dict( + default="present", choices=["present", "absent"], type="str" + ), ) ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, - mutually_exclusive=[['principal', 'group']], - required_one_of=[['principal', 'group']], + mutually_exclusive=[["principal", "group"]], + required_one_of=[["principal", "group"]], ) vmware_object_permission = VMwareObjectRolePermission(module) vmware_object_permission.process_state() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_portgroup.py b/plugins/modules/vmware_portgroup.py index dcbe73a..5d4bf9b 100644 --- a/plugins/modules/vmware_portgroup.py +++ b/plugins/modules/vmware_portgroup.py @@ -8,15 +8,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_portgroup short_description: Create a VMware portgroup @@ -121,9 +122,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Add Management Network VM Portgroup vmware_portgroup: hostname: "{{ esxi_hostname }}" @@ -208,9 +209,9 @@ - vmnic1 delegate_to: localhost register: teaming_result -''' +""" -RETURN = r''' +RETURN = r""" result: description: metadata about the portgroup returned: always @@ -234,7 +235,7 @@ "vswitch": "vSwitch1" } } -''' +""" try: @@ -243,7 +244,10 @@ pass from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + vmware_argument_spec, +) from ansible.module_utils._text import to_native @@ -254,46 +258,73 @@ def __init__(self, module): super(VMwareHostPortGroup, self).__init__(module) self.switch_object = None self.portgroup_object = None - hosts = self.params['hosts'] - cluster = self.params['cluster_name'] - self.portgroup = self.params['portgroup'] - self.switch = self.params['switch'] - self.vlan_id = self.params['vlan_id'] - if self.params['security']: - self.sec_promiscuous_mode = self.params['security'].get('promiscuous_mode') - self.sec_forged_transmits = self.params['security'].get('forged_transmits') - self.sec_mac_changes = self.params['security'].get('mac_changes') + hosts = self.params["hosts"] + cluster = self.params["cluster_name"] + self.portgroup = self.params["portgroup"] + self.switch = self.params["switch"] + self.vlan_id = self.params["vlan_id"] + if self.params["security"]: + self.sec_promiscuous_mode = self.params["security"].get( + "promiscuous_mode" + ) + self.sec_forged_transmits = self.params["security"].get( + "forged_transmits" + ) + self.sec_mac_changes = self.params["security"].get("mac_changes") else: self.sec_promiscuous_mode = None self.sec_forged_transmits = None self.sec_mac_changes = None - if self.params['traffic_shaping']: - self.ts_enabled = self.params['traffic_shaping'].get('enabled') - for value in ['average_bandwidth', 'peak_bandwidth', 'burst_size']: - if not self.params['traffic_shaping'].get(value): - self.module.fail_json(msg="traffic_shaping.%s is a required parameter if traffic_shaping is enabled." % value) - self.ts_average_bandwidth = self.params['traffic_shaping'].get('average_bandwidth') - self.ts_peak_bandwidth = self.params['traffic_shaping'].get('peak_bandwidth') - self.ts_burst_size = self.params['traffic_shaping'].get('burst_size') + if self.params["traffic_shaping"]: + self.ts_enabled = self.params["traffic_shaping"].get("enabled") + for value in ["average_bandwidth", "peak_bandwidth", "burst_size"]: + if not self.params["traffic_shaping"].get(value): + self.module.fail_json( + msg="traffic_shaping.%s is a required parameter if traffic_shaping is enabled." + % value + ) + self.ts_average_bandwidth = self.params["traffic_shaping"].get( + "average_bandwidth" + ) + self.ts_peak_bandwidth = self.params["traffic_shaping"].get( + "peak_bandwidth" + ) + self.ts_burst_size = self.params["traffic_shaping"].get( + "burst_size" + ) else: self.ts_enabled = None self.ts_average_bandwidth = None self.ts_peak_bandwidth = None self.ts_burst_size = None - if self.params['teaming']: - self.teaming_load_balancing = self.params['teaming'].get('load_balancing') - self.teaming_failure_detection = self.params['teaming'].get('network_failure_detection') - self.teaming_notify_switches = self.params['teaming'].get('notify_switches') - self.teaming_failback = self.params['teaming'].get('failback') - self.teaming_failover_order_active = self.params['teaming'].get('active_adapters') - self.teaming_failover_order_standby = self.params['teaming'].get('standby_adapters') + if self.params["teaming"]: + self.teaming_load_balancing = self.params["teaming"].get( + "load_balancing" + ) + self.teaming_failure_detection = self.params["teaming"].get( + "network_failure_detection" + ) + self.teaming_notify_switches = self.params["teaming"].get( + "notify_switches" + ) + self.teaming_failback = self.params["teaming"].get("failback") + self.teaming_failover_order_active = self.params["teaming"].get( + "active_adapters" + ) + self.teaming_failover_order_standby = self.params["teaming"].get( + "standby_adapters" + ) if self.teaming_failover_order_active is None: self.teaming_failover_order_active = [] if self.teaming_failover_order_standby is None: self.teaming_failover_order_standby = [] # NOTE: the following options are deprecated and should be removed in 2.11 - self.teaming_inbound_policy = self.module.params['teaming']['inbound_policy'] - self.teaming_rolling_order = self.module.params['teaming']['rolling_order'] + self.teaming_inbound_policy = self.module.params["teaming"][ + "inbound_policy" + ] + self.teaming_rolling_order = self.module.params["teaming"][ + "rolling_order" + ] else: self.teaming_load_balancing = None self.teaming_failure_detection = None @@ -304,11 +335,15 @@ def __init__(self, module): # NOTE: the following options are deprecated and should be removed in 2.11 self.teaming_inbound_policy = None self.teaming_rolling_order = None - self.state = self.params['state'] + self.state = self.params["state"] - self.hosts = self.get_all_host_objs(cluster_name=cluster, esxi_host_name=hosts) + self.hosts = self.get_all_host_objs( + cluster_name=cluster, esxi_host_name=hosts + ) if not self.hosts: - self.module.fail_json(msg="Failed to find host system with given configuration.") + self.module.fail_json( + msg="Failed to find host system with given configuration." + ) def process_state(self): """Manage internal state of the portgroup""" @@ -316,31 +351,37 @@ def process_state(self): host_change_list = [] for host in self.hosts: changed = False - results['result'][host.name] = dict() + results["result"][host.name] = dict() switch_state = self.check_if_vswitch_exists(host_system=host) - if switch_state == 'absent': - self.module.fail_json(msg="The vSwitch '%s' doesn't exist on host '%s'" % (self.switch, host.name)) + if switch_state == "absent": + self.module.fail_json( + msg="The vSwitch '%s' doesn't exist on host '%s'" + % (self.switch, host.name) + ) portgroup_state = self.check_if_portgroup_exists(host_system=host) - if self.state == 'present' and portgroup_state == 'present': + if self.state == "present" and portgroup_state == "present": changed, host_results = self.update_host_port_group( - host_system=host, - portgroup_object=self.portgroup_object + host_system=host, portgroup_object=self.portgroup_object + ) + elif self.state == "present" and portgroup_state == "absent": + changed, host_results = self.create_host_port_group( + host_system=host + ) + elif self.state == "absent" and portgroup_state == "present": + changed, host_results = self.remove_host_port_group( + host_system=host ) - elif self.state == 'present' and portgroup_state == 'absent': - changed, host_results = self.create_host_port_group(host_system=host) - elif self.state == 'absent' and portgroup_state == 'present': - changed, host_results = self.remove_host_port_group(host_system=host) else: host_results = dict() - host_results['changed'] = False - host_results['msg'] = "Port Group already deleted" - host_results['portgroup'] = self.portgroup - results['result'][host.name] = host_results + host_results["changed"] = False + host_results["msg"] = "Port Group already deleted" + host_results["portgroup"] = self.portgroup + results["result"][host.name] = host_results host_change_list.append(changed) if any(host_change_list): - results['changed'] = True + results["changed"] = True self.module.exit_json(**results) def check_if_portgroup_exists(self, host_system): @@ -351,13 +392,15 @@ def check_if_portgroup_exists(self, host_system): self.portgroup_object = self.find_portgroup_by_name( host_system=host_system, portgroup_name=self.portgroup, - vswitch_name=self.switch + vswitch_name=self.switch, ) if self.portgroup_object is None: - return 'absent' - return 'present' + return "absent" + return "present" - def find_portgroup_by_name(self, host_system, portgroup_name, vswitch_name): + def find_portgroup_by_name( + self, host_system, portgroup_name, vswitch_name + ): """ Find and return port group managed object Args: @@ -369,10 +412,19 @@ def find_portgroup_by_name(self, host_system, portgroup_name, vswitch_name): """ portgroups = self.get_all_port_groups_by_host(host_system=host_system) for portgroup in portgroups: - if portgroup.spec.name == portgroup_name and portgroup.spec.vswitchName != vswitch_name: + if ( + portgroup.spec.name == portgroup_name + and portgroup.spec.vswitchName != vswitch_name + ): # portgroup names are unique; there can be only one portgroup with the same name per host - self.module.fail_json(msg="The portgroup already exists on vSwitch '%s'" % portgroup.spec.vswitchName) - if portgroup.spec.name == portgroup_name and portgroup.spec.vswitchName == vswitch_name: + self.module.fail_json( + msg="The portgroup already exists on vSwitch '%s'" + % portgroup.spec.vswitchName + ) + if ( + portgroup.spec.name == portgroup_name + and portgroup.spec.vswitchName == vswitch_name + ): return portgroup return None @@ -382,12 +434,11 @@ def check_if_vswitch_exists(self, host_system): Returns: 'present' if vSwitch exists or 'absent' if not """ self.switch_object = self.find_vswitch_by_name( - host_system=host_system, - vswitch_name=self.switch + host_system=host_system, vswitch_name=self.switch ) if self.switch_object is None: - return 'absent' - return 'present' + return "absent" + return "present" @staticmethod def find_vswitch_by_name(host_system, vswitch_name): @@ -413,26 +464,31 @@ def remove_host_port_group(self, host_system): host_results = dict(changed=False, msg="") if self.module.check_mode: - host_results['msg'] = "Port Group would be removed" + host_results["msg"] = "Port Group would be removed" else: try: - host_system.configManager.networkSystem.RemovePortGroup(pgName=self.portgroup) - host_results['msg'] = "Port Group removed" + host_system.configManager.networkSystem.RemovePortGroup( + pgName=self.portgroup + ) + host_results["msg"] = "Port Group removed" except vim.fault.NotFound as not_found: self.module.fail_json( - msg="Failed to remove Portgroup as it was not found: %s" % to_native(not_found.msg) + msg="Failed to remove Portgroup as it was not found: %s" + % to_native(not_found.msg) ) except vim.fault.ResourceInUse as resource_in_use: self.module.fail_json( - msg="Failed to remove Portgroup as it is in use: %s" % to_native(resource_in_use.msg) + msg="Failed to remove Portgroup as it is in use: %s" + % to_native(resource_in_use.msg) ) except vim.fault.HostConfigFault as host_config_fault: self.module.fail_json( - msg="Failed to remove Portgroup due to configuration failures: %s" % to_native(host_config_fault.msg) + msg="Failed to remove Portgroup due to configuration failures: %s" + % to_native(host_config_fault.msg) ) - host_results['changed'] = True - host_results['portgroup'] = self.portgroup - host_results['vswitch'] = self.switch + host_results["changed"] = True + host_results["portgroup"] = self.portgroup + host_results["vswitch"] = self.switch return True, host_results @@ -444,7 +500,7 @@ def create_host_port_group(self, host_system): host_results = dict(changed=False, msg="") if self.module.check_mode: - host_results['msg'] = "Port Group would be added" + host_results["msg"] = "Port Group would be added" else: port_group = vim.host.PortGroup.Config() port_group.spec = vim.host.PortGroup.Specification() @@ -454,55 +510,80 @@ def create_host_port_group(self, host_system): port_group.spec.policy = self.create_network_policy() try: - host_system.configManager.networkSystem.AddPortGroup(portgrp=port_group.spec) - host_results['changed'] = True - host_results['msg'] = "Port Group added" + host_system.configManager.networkSystem.AddPortGroup( + portgrp=port_group.spec + ) + host_results["changed"] = True + host_results["msg"] = "Port Group added" except vim.fault.AlreadyExists as already_exists: self.module.fail_json( - msg="Failed to add Portgroup as it already exists: %s" % to_native(already_exists.msg) + msg="Failed to add Portgroup as it already exists: %s" + % to_native(already_exists.msg) ) except vim.fault.NotFound as not_found: self.module.fail_json( - msg="Failed to add Portgroup as vSwitch was not found: %s" % to_native(not_found.msg) + msg="Failed to add Portgroup as vSwitch was not found: %s" + % to_native(not_found.msg) ) except vim.fault.HostConfigFault as host_config_fault: self.module.fail_json( - msg="Failed to add Portgroup due to host system configuration failure : %s" % - to_native(host_config_fault.msg) + msg="Failed to add Portgroup due to host system configuration failure : %s" + % to_native(host_config_fault.msg) ) except vmodl.fault.InvalidArgument as invalid_argument: self.module.fail_json( - msg="Failed to add Portgroup as VLAN id was not correct as per specifications: %s" % - to_native(invalid_argument.msg) + msg="Failed to add Portgroup as VLAN id was not correct as per specifications: %s" + % to_native(invalid_argument.msg) ) - host_results['changed'] = True - host_results['portgroup'] = self.portgroup - host_results['vswitch'] = self.switch - host_results['vlan_id'] = self.vlan_id + host_results["changed"] = True + host_results["portgroup"] = self.portgroup + host_results["vswitch"] = self.switch + host_results["vlan_id"] = self.vlan_id if self.sec_promiscuous_mode is None: - host_results['sec_promiscuous_mode'] = "No override" + host_results["sec_promiscuous_mode"] = "No override" else: - host_results['sec_promiscuous_mode'] = self.sec_promiscuous_mode + host_results["sec_promiscuous_mode"] = self.sec_promiscuous_mode if self.sec_mac_changes is None: - host_results['sec_mac_changes'] = "No override" + host_results["sec_mac_changes"] = "No override" else: - host_results['sec_mac_changes'] = self.sec_mac_changes + host_results["sec_mac_changes"] = self.sec_mac_changes if self.sec_forged_transmits is None: - host_results['sec_forged_transmits'] = "No override" + host_results["sec_forged_transmits"] = "No override" else: - host_results['sec_forged_transmits'] = self.sec_forged_transmits - host_results['traffic_shaping'] = "No override" if self.ts_enabled is None else self.ts_enabled - host_results['load_balancing'] = "No override" if self.teaming_load_balancing is None \ + host_results["sec_forged_transmits"] = self.sec_forged_transmits + host_results["traffic_shaping"] = ( + "No override" if self.ts_enabled is None else self.ts_enabled + ) + host_results["load_balancing"] = ( + "No override" + if self.teaming_load_balancing is None else self.teaming_load_balancing - host_results['notify_switches'] = "No override" if self.teaming_notify_switches is None \ + ) + host_results["notify_switches"] = ( + "No override" + if self.teaming_notify_switches is None else self.teaming_notify_switches - host_results['failback'] = "No override" if self.teaming_failback is None else self.teaming_failback - host_results['failover_active'] = "No override" if self.teaming_failover_order_active is None \ + ) + host_results["failback"] = ( + "No override" + if self.teaming_failback is None + else self.teaming_failback + ) + host_results["failover_active"] = ( + "No override" + if self.teaming_failover_order_active is None else self.teaming_failover_order_active - host_results['failover_standby'] = "No override" if self.teaming_failover_order_standby is None \ + ) + host_results["failover_standby"] = ( + "No override" + if self.teaming_failover_order_standby is None else self.teaming_failover_order_standby - host_results['failure_detection'] = "No override" if self.teaming_failure_detection is None \ + ) + host_results["failure_detection"] = ( + "No override" + if self.teaming_failure_detection is None else self.teaming_failure_detection + ) return True, host_results @@ -517,32 +598,34 @@ def update_host_port_group(self, host_system, portgroup_object): spec = portgroup_object.spec # Check VLAN ID - host_results['vlan_id'] = self.vlan_id + host_results["vlan_id"] = self.vlan_id if spec.vlanId != self.vlan_id: changed = True changed_list.append("VLAN ID") - host_results['vlan_id_previous'] = spec.vlanId + host_results["vlan_id_previous"] = spec.vlanId spec.vlanId = self.vlan_id # Check security settings if self.sec_promiscuous_mode is None: - host_results['sec_promiscuous_mode'] = "No override" + host_results["sec_promiscuous_mode"] = "No override" else: - host_results['sec_promiscuous_mode'] = self.sec_promiscuous_mode + host_results["sec_promiscuous_mode"] = self.sec_promiscuous_mode if self.sec_mac_changes is None: - host_results['sec_mac_changes'] = "No override" + host_results["sec_mac_changes"] = "No override" else: - host_results['sec_mac_changes'] = self.sec_mac_changes + host_results["sec_mac_changes"] = self.sec_mac_changes if self.sec_forged_transmits is None: - host_results['sec_forged_transmits'] = "No override" + host_results["sec_forged_transmits"] = "No override" else: - host_results['sec_forged_transmits'] = self.sec_forged_transmits + host_results["sec_forged_transmits"] = self.sec_forged_transmits if spec.policy.security: promiscuous_mode_previous = spec.policy.security.allowPromiscuous mac_changes_previous = spec.policy.security.macChanges forged_transmits_previous = spec.policy.security.forgedTransmits if promiscuous_mode_previous is not self.sec_promiscuous_mode: - spec.policy.security.allowPromiscuous = self.sec_promiscuous_mode + spec.policy.security.allowPromiscuous = ( + self.sec_promiscuous_mode + ) changed = changed_security = True changed_list.append("Promiscuous mode") if mac_changes_previous is not self.sec_mac_changes: @@ -550,289 +633,410 @@ def update_host_port_group(self, host_system, portgroup_object): changed = changed_security = True changed_list.append("MAC address changes") if forged_transmits_previous is not self.sec_forged_transmits: - spec.policy.security.forgedTransmits = self.sec_forged_transmits + spec.policy.security.forgedTransmits = ( + self.sec_forged_transmits + ) changed = changed_security = True changed_list.append("Forged transmits") if changed_security: if self.sec_promiscuous_mode is None: - host_results['sec_promiscuous_mode_previous'] = "No override" + host_results[ + "sec_promiscuous_mode_previous" + ] = "No override" else: - host_results['sec_promiscuous_mode_previous'] = promiscuous_mode_previous + host_results[ + "sec_promiscuous_mode_previous" + ] = promiscuous_mode_previous if self.sec_mac_changes is None: - host_results['sec_mac_changes_previous'] = "No override" + host_results["sec_mac_changes_previous"] = "No override" else: - host_results['sec_mac_changes'] = mac_changes_previous + host_results["sec_mac_changes"] = mac_changes_previous if self.sec_forged_transmits is None: - host_results['sec_forged_transmits_previous'] = "No override" + host_results[ + "sec_forged_transmits_previous" + ] = "No override" else: - host_results['sec_forged_transmits_previous'] = forged_transmits_previous + host_results[ + "sec_forged_transmits_previous" + ] = forged_transmits_previous else: spec.policy.security = self.create_security_policy() changed = True changed_list.append("Security") - host_results['sec_promiscuous_mode_previous'] = "No override" - host_results['sec_mac_changes_previous'] = "No override" - host_results['sec_forged_transmits_previous'] = "No override" + host_results["sec_promiscuous_mode_previous"] = "No override" + host_results["sec_mac_changes_previous"] = "No override" + host_results["sec_forged_transmits_previous"] = "No override" # Check traffic shaping if self.ts_enabled is None: - host_results['traffic_shaping'] = "No override" + host_results["traffic_shaping"] = "No override" else: - host_results['traffic_shaping'] = self.ts_enabled + host_results["traffic_shaping"] = self.ts_enabled if self.ts_enabled: ts_average_bandwidth = self.ts_average_bandwidth * 1000 ts_peak_bandwidth = self.ts_peak_bandwidth * 1000 ts_burst_size = self.ts_burst_size * 1024 - host_results['traffic_shaping_avg_bandw'] = ts_average_bandwidth - host_results['traffic_shaping_peak_bandw'] = ts_peak_bandwidth - host_results['traffic_shaping_burst'] = ts_burst_size - if spec.policy.shapingPolicy and spec.policy.shapingPolicy.enabled is not None: + host_results[ + "traffic_shaping_avg_bandw" + ] = ts_average_bandwidth + host_results["traffic_shaping_peak_bandw"] = ts_peak_bandwidth + host_results["traffic_shaping_burst"] = ts_burst_size + if ( + spec.policy.shapingPolicy + and spec.policy.shapingPolicy.enabled is not None + ): if spec.policy.shapingPolicy.enabled: if self.ts_enabled: - if spec.policy.shapingPolicy.averageBandwidth != ts_average_bandwidth: + if ( + spec.policy.shapingPolicy.averageBandwidth + != ts_average_bandwidth + ): changed = True changed_list.append("Average bandwidth") - host_results['traffic_shaping_avg_bandw_previous'] = spec.policy.shapingPolicy.averageBandwidth - spec.policy.shapingPolicy.averageBandwidth = ts_average_bandwidth - if spec.policy.shapingPolicy.peakBandwidth != ts_peak_bandwidth: + host_results[ + "traffic_shaping_avg_bandw_previous" + ] = spec.policy.shapingPolicy.averageBandwidth + spec.policy.shapingPolicy.averageBandwidth = ( + ts_average_bandwidth + ) + if ( + spec.policy.shapingPolicy.peakBandwidth + != ts_peak_bandwidth + ): changed = True changed_list.append("Peak bandwidth") - host_results['traffic_shaping_peak_bandw_previous'] = spec.policy.shapingPolicy.peakBandwidth - spec.policy.shapingPolicy.peakBandwidth = ts_peak_bandwidth + host_results[ + "traffic_shaping_peak_bandw_previous" + ] = spec.policy.shapingPolicy.peakBandwidth + spec.policy.shapingPolicy.peakBandwidth = ( + ts_peak_bandwidth + ) if spec.policy.shapingPolicy.burstSize != ts_burst_size: changed = True changed_list.append("Burst size") - host_results['traffic_shaping_burst_previous'] = spec.policy.shapingPolicy.burstSize + host_results[ + "traffic_shaping_burst_previous" + ] = spec.policy.shapingPolicy.burstSize spec.policy.shapingPolicy.burstSize = ts_burst_size elif self.ts_enabled is False: changed = True changed_list.append("Traffic shaping") - host_results['traffic_shaping_previous'] = True + host_results["traffic_shaping_previous"] = True spec.policy.shapingPolicy.enabled = False elif self.ts_enabled is None: spec.policy.shapingPolicy = None changed = True changed_list.append("Traffic shaping") - host_results['traffic_shaping_previous'] = True + host_results["traffic_shaping_previous"] = True else: if self.ts_enabled: spec.policy.shapingPolicy = self.create_shaping_policy() changed = True changed_list.append("Traffic shaping") - host_results['traffic_shaping_previous'] = False + host_results["traffic_shaping_previous"] = False elif self.ts_enabled is False: changed = True changed_list.append("Traffic shaping") - host_results['traffic_shaping_previous'] = True + host_results["traffic_shaping_previous"] = True spec.policy.shapingPolicy.enabled = False elif self.ts_enabled is None: spec.policy.shapingPolicy = None changed = True changed_list.append("Traffic shaping") - host_results['traffic_shaping_previous'] = True + host_results["traffic_shaping_previous"] = True else: if self.ts_enabled: spec.policy.shapingPolicy = self.create_shaping_policy() changed = True changed_list.append("Traffic shaping") - host_results['traffic_shaping_previous'] = "No override" + host_results["traffic_shaping_previous"] = "No override" elif self.ts_enabled is False: changed = True changed_list.append("Traffic shaping") - host_results['traffic_shaping_previous'] = "No override" + host_results["traffic_shaping_previous"] = "No override" spec.policy.shapingPolicy.enabled = False # Check teaming if spec.policy.nicTeaming: # Check teaming policy if self.teaming_load_balancing is None: - host_results['load_balancing'] = "No override" + host_results["load_balancing"] = "No override" else: - host_results['load_balancing'] = self.teaming_load_balancing + host_results["load_balancing"] = self.teaming_load_balancing if spec.policy.nicTeaming.policy: - if spec.policy.nicTeaming.policy != self.teaming_load_balancing: + if ( + spec.policy.nicTeaming.policy + != self.teaming_load_balancing + ): changed = True changed_list.append("Load balancing") - host_results['load_balancing_previous'] = spec.policy.nicTeaming.policy + host_results[ + "load_balancing_previous" + ] = spec.policy.nicTeaming.policy spec.policy.nicTeaming.policy = self.teaming_load_balancing else: if self.teaming_load_balancing: changed = True changed_list.append("Load balancing") - host_results['load_balancing_previous'] = "No override" + host_results["load_balancing_previous"] = "No override" spec.policy.nicTeaming.policy = self.teaming_load_balancing # Check teaming notify switches if spec.policy.nicTeaming.notifySwitches is None: - host_results['notify_switches'] = "No override" + host_results["notify_switches"] = "No override" else: - host_results['notify_switches'] = self.teaming_notify_switches + host_results["notify_switches"] = self.teaming_notify_switches if spec.policy.nicTeaming.notifySwitches is not None: if self.teaming_notify_switches is not None: - if spec.policy.nicTeaming.notifySwitches is not self.teaming_notify_switches: + if ( + spec.policy.nicTeaming.notifySwitches + is not self.teaming_notify_switches + ): changed = True changed_list.append("Notify switches") - host_results['notify_switches_previous'] = spec.policy.nicTeaming.notifySwitches - spec.policy.nicTeaming.notifySwitches = self.teaming_notify_switches + host_results[ + "notify_switches_previous" + ] = spec.policy.nicTeaming.notifySwitches + spec.policy.nicTeaming.notifySwitches = ( + self.teaming_notify_switches + ) else: changed = True changed_list.append("Notify switches") - host_results['notify_switches_previous'] = spec.policy.nicTeaming.notifySwitches + host_results[ + "notify_switches_previous" + ] = spec.policy.nicTeaming.notifySwitches spec.policy.nicTeaming.notifySwitches = None else: if self.teaming_notify_switches is not None: changed = True changed_list.append("Notify switches") - host_results['notify_switches_previous'] = "No override" - spec.policy.nicTeaming.notifySwitches = self.teaming_notify_switches + host_results["notify_switches_previous"] = "No override" + spec.policy.nicTeaming.notifySwitches = ( + self.teaming_notify_switches + ) # Check failback if spec.policy.nicTeaming.rollingOrder is None: - host_results['failback'] = "No override" + host_results["failback"] = "No override" else: - host_results['failback'] = self.teaming_failback + host_results["failback"] = self.teaming_failback if spec.policy.nicTeaming.rollingOrder is not None: if self.teaming_failback is not None: # this option is called 'failback' in the vSphere Client # rollingOrder also uses the opposite value displayed in the client - if spec.policy.nicTeaming.rollingOrder is self.teaming_failback: + if ( + spec.policy.nicTeaming.rollingOrder + is self.teaming_failback + ): changed = True changed_list.append("Failback") - host_results['failback_previous'] = not spec.policy.nicTeaming.rollingOrder - spec.policy.nicTeaming.rollingOrder = not self.teaming_failback + host_results[ + "failback_previous" + ] = not spec.policy.nicTeaming.rollingOrder + spec.policy.nicTeaming.rollingOrder = ( + not self.teaming_failback + ) else: changed = True changed_list.append("Failback") - host_results['failback_previous'] = spec.policy.nicTeaming.rollingOrder + host_results[ + "failback_previous" + ] = spec.policy.nicTeaming.rollingOrder spec.policy.nicTeaming.rollingOrder = None else: if self.teaming_failback is not None: changed = True changed_list.append("Failback") - host_results['failback_previous'] = "No override" - spec.policy.nicTeaming.rollingOrder = not self.teaming_failback + host_results["failback_previous"] = "No override" + spec.policy.nicTeaming.rollingOrder = ( + not self.teaming_failback + ) # Check teaming failover order - if self.teaming_failover_order_active is None and self.teaming_failover_order_standby is None: - host_results['failover_active'] = "No override" - host_results['failover_standby'] = "No override" + if ( + self.teaming_failover_order_active is None + and self.teaming_failover_order_standby is None + ): + host_results["failover_active"] = "No override" + host_results["failover_standby"] = "No override" else: - host_results['failover_active'] = self.teaming_failover_order_active - host_results['failover_standby'] = self.teaming_failover_order_standby + host_results[ + "failover_active" + ] = self.teaming_failover_order_active + host_results[ + "failover_standby" + ] = self.teaming_failover_order_standby if spec.policy.nicTeaming.nicOrder: - if self.teaming_failover_order_active or self.teaming_failover_order_standby: - if spec.policy.nicTeaming.nicOrder.activeNic != self.teaming_failover_order_active: + if ( + self.teaming_failover_order_active + or self.teaming_failover_order_standby + ): + if ( + spec.policy.nicTeaming.nicOrder.activeNic + != self.teaming_failover_order_active + ): changed = True changed_list.append("Failover order active") - host_results['failover_active_previous'] = spec.policy.nicTeaming.nicOrder.activeNic - spec.policy.nicTeaming.nicOrder.activeNic = self.teaming_failover_order_active - if spec.policy.nicTeaming.nicOrder.standbyNic != self.teaming_failover_order_standby: + host_results[ + "failover_active_previous" + ] = spec.policy.nicTeaming.nicOrder.activeNic + spec.policy.nicTeaming.nicOrder.activeNic = ( + self.teaming_failover_order_active + ) + if ( + spec.policy.nicTeaming.nicOrder.standbyNic + != self.teaming_failover_order_standby + ): changed = True changed_list.append("Failover order standby") - host_results['failover_standby_previous'] = spec.policy.nicTeaming.nicOrder.standbyNic - spec.policy.nicTeaming.nicOrder.standbyNic = self.teaming_failover_order_standby + host_results[ + "failover_standby_previous" + ] = spec.policy.nicTeaming.nicOrder.standbyNic + spec.policy.nicTeaming.nicOrder.standbyNic = ( + self.teaming_failover_order_standby + ) else: spec.policy.nicTeaming.nicOrder = None changed = True changed_list.append("Failover order") - if hasattr(spec.policy.nicTeaming.nicOrder, 'activeNic'): - host_results['failover_active_previous'] = spec.policy.nicTeaming.nicOrder.activeNic + if hasattr(spec.policy.nicTeaming.nicOrder, "activeNic"): + host_results[ + "failover_active_previous" + ] = spec.policy.nicTeaming.nicOrder.activeNic else: - host_results['failover_active_previous'] = [] - if hasattr(spec.policy.nicTeaming.nicOrder, 'standbyNic'): - host_results['failover_standby_previous'] = spec.policy.nicTeaming.nicOrder.standbyNic + host_results["failover_active_previous"] = [] + if hasattr(spec.policy.nicTeaming.nicOrder, "standbyNic"): + host_results[ + "failover_standby_previous" + ] = spec.policy.nicTeaming.nicOrder.standbyNic else: - host_results['failover_standby_previous'] = [] + host_results["failover_standby_previous"] = [] else: - if self.teaming_failover_order_active or self.teaming_failover_order_standby: + if ( + self.teaming_failover_order_active + or self.teaming_failover_order_standby + ): changed = True changed_list.append("Failover order") - host_results['failover_active_previous'] = "No override" - host_results['failover_standby_previous'] = "No override" - spec.policy.nicTeaming.nicOrder = self.create_nic_order_policy() + host_results["failover_active_previous"] = "No override" + host_results["failover_standby_previous"] = "No override" + spec.policy.nicTeaming.nicOrder = ( + self.create_nic_order_policy() + ) # Check teaming failure detection if self.teaming_failure_detection is None: - host_results['failure_detection'] = "No override" + host_results["failure_detection"] = "No override" else: - host_results['failure_detection'] = self.teaming_failure_detection - if spec.policy.nicTeaming.failureCriteria and spec.policy.nicTeaming.failureCriteria.checkBeacon is not None: + host_results[ + "failure_detection" + ] = self.teaming_failure_detection + if ( + spec.policy.nicTeaming.failureCriteria + and spec.policy.nicTeaming.failureCriteria.checkBeacon + is not None + ): if self.teaming_failure_detection == "link_status_only": - if spec.policy.nicTeaming.failureCriteria.checkBeacon is True: + if ( + spec.policy.nicTeaming.failureCriteria.checkBeacon + is True + ): changed = True changed_list.append("Network failure detection") - host_results['failure_detection_previous'] = "beacon_probing" - spec.policy.nicTeaming.failureCriteria.checkBeacon = False + host_results[ + "failure_detection_previous" + ] = "beacon_probing" + spec.policy.nicTeaming.failureCriteria.checkBeacon = ( + False + ) elif self.teaming_failure_detection == "beacon_probing": - if spec.policy.nicTeaming.failureCriteria.checkBeacon is False: + if ( + spec.policy.nicTeaming.failureCriteria.checkBeacon + is False + ): changed = True changed_list.append("Network failure detection") - host_results['failure_detection_previous'] = "link_status_only" - spec.policy.nicTeaming.failureCriteria.checkBeacon = True - elif spec.policy.nicTeaming.failureCriteria.checkBeacon is not None: + host_results[ + "failure_detection_previous" + ] = "link_status_only" + spec.policy.nicTeaming.failureCriteria.checkBeacon = ( + True + ) + elif ( + spec.policy.nicTeaming.failureCriteria.checkBeacon + is not None + ): changed = True changed_list.append("Network failure detection") - host_results['failure_detection_previous'] = spec.policy.nicTeaming.failureCriteria.checkBeacon + host_results[ + "failure_detection_previous" + ] = spec.policy.nicTeaming.failureCriteria.checkBeacon spec.policy.nicTeaming.failureCriteria = None else: if self.teaming_failure_detection: - spec.policy.nicTeaming.failureCriteria = self.create_nic_failure_policy() + spec.policy.nicTeaming.failureCriteria = ( + self.create_nic_failure_policy() + ) changed = True changed_list.append("Network failure detection") - host_results['failure_detection_previous'] = "No override" + host_results["failure_detection_previous"] = "No override" else: spec.policy.nicTeaming = self.create_teaming_policy() if spec.policy.nicTeaming: changed = True changed_list.append("Teaming and failover") - host_results['load_balancing_previous'] = "No override" - host_results['notify_switches_previous'] = "No override" - host_results['failback_previous'] = "No override" - host_results['failover_active_previous'] = "No override" - host_results['failover_standby_previous'] = "No override" - host_results['failure_detection_previous'] = "No override" + host_results["load_balancing_previous"] = "No override" + host_results["notify_switches_previous"] = "No override" + host_results["failback_previous"] = "No override" + host_results["failover_active_previous"] = "No override" + host_results["failover_standby_previous"] = "No override" + host_results["failure_detection_previous"] = "No override" if changed: if self.module.check_mode: - changed_suffix = ' would be changed' + changed_suffix = " would be changed" else: - changed_suffix = ' changed' + changed_suffix = " changed" if len(changed_list) > 2: - message = ', '.join(changed_list[:-1]) + ', and ' + str(changed_list[-1]) + message = ( + ", ".join(changed_list[:-1]) + + ", and " + + str(changed_list[-1]) + ) elif len(changed_list) == 2: - message = ' and '.join(changed_list) + message = " and ".join(changed_list) elif len(changed_list) == 1: message = changed_list[0] message += changed_suffix if not self.module.check_mode: try: host_system.configManager.networkSystem.UpdatePortGroup( - pgName=self.portgroup, - portgrp=spec + pgName=self.portgroup, portgrp=spec ) except vim.fault.AlreadyExists as already_exists: self.module.fail_json( - msg="Failed to update Portgroup as it would conflict with an existing port group: %s" % - to_native(already_exists.msg) + msg="Failed to update Portgroup as it would conflict with an existing port group: %s" + % to_native(already_exists.msg) ) except vim.fault.NotFound as not_found: self.module.fail_json( - msg="Failed to update Portgroup as vSwitch was not found: %s" % - to_native(not_found.msg) + msg="Failed to update Portgroup as vSwitch was not found: %s" + % to_native(not_found.msg) ) except vim.fault.HostConfigFault as host_config_fault: self.module.fail_json( - msg="Failed to update Portgroup due to host system configuration failure : %s" % - to_native(host_config_fault.msg) + msg="Failed to update Portgroup due to host system configuration failure : %s" + % to_native(host_config_fault.msg) ) except vmodl.fault.InvalidArgument as invalid_argument: self.module.fail_json( msg="Failed to update Port Group '%s', this can be due to either of following :" - " 1. VLAN id was not correct as per specifications, 2. Network policy is invalid : %s" % - (self.portgroup, to_native(invalid_argument.msg)) + " 1. VLAN id was not correct as per specifications, 2. Network policy is invalid : %s" + % (self.portgroup, to_native(invalid_argument.msg)) ) else: message = "Port Group already configured properly" - host_results['changed'] = changed - host_results['msg'] = message - host_results['portgroup'] = self.portgroup - host_results['vswitch'] = self.switch + host_results["changed"] = changed + host_results["msg"] = message + host_results["portgroup"] = self.portgroup + host_results["vswitch"] = self.switch return changed, host_results @@ -846,9 +1050,14 @@ def create_network_policy(self): teaming_policy = None # Only configure security policy if an option is defined - if not all(option is None for option in [self.sec_promiscuous_mode, - self.sec_mac_changes, - self.sec_forged_transmits]): + if not all( + option is None + for option in [ + self.sec_promiscuous_mode, + self.sec_mac_changes, + self.sec_forged_transmits, + ] + ): security_policy = self.create_security_policy() if self.ts_enabled: shaping_policy = self.create_shaping_policy() @@ -857,7 +1066,7 @@ def create_network_policy(self): network_policy = vim.host.NetworkPolicy( security=security_policy, nicTeaming=teaming_policy, - shapingPolicy=shaping_policy + shapingPolicy=shaping_policy, ) return network_policy @@ -891,12 +1100,17 @@ def create_teaming_policy(self): Returns: NIC Teaming Policy object """ # Only configure teaming policy if an option is defined - if not all(option is None for option in [self.teaming_load_balancing, - self.teaming_failure_detection, - self.teaming_notify_switches, - self.teaming_failback, - self.teaming_failover_order_active, - self.teaming_failover_order_standby]): + if not all( + option is None + for option in [ + self.teaming_load_balancing, + self.teaming_failure_detection, + self.teaming_notify_switches, + self.teaming_failback, + self.teaming_failover_order_active, + self.teaming_failover_order_standby, + ] + ): teaming_policy = vim.host.NetworkPolicy.NicTeamingPolicy() teaming_policy.policy = self.teaming_load_balancing # NOTE: 'teaming_inbound_policy' is deprecated and the following if statement should be removed in 2.11 @@ -916,14 +1130,19 @@ def create_teaming_policy(self): teaming_policy.rollingOrder = None else: teaming_policy.rollingOrder = not self.teaming_failback - if self.teaming_failover_order_active is None and self.teaming_failover_order_standby is None: + if ( + self.teaming_failover_order_active is None + and self.teaming_failover_order_standby is None + ): teaming_policy.nicOrder = None else: teaming_policy.nicOrder = self.create_nic_order_policy() if self.teaming_failure_detection is None: teaming_policy.failureCriteria = None else: - teaming_policy.failureCriteria = self.create_nic_failure_policy() + teaming_policy.failureCriteria = ( + self.create_nic_failure_policy() + ) return teaming_policy return None @@ -935,12 +1154,14 @@ def create_nic_order_policy(self): for active_nic in self.teaming_failover_order_active: if active_nic not in self.switch_object.spec.bridge.nicDevice: self.module.fail_json( - msg="NIC '%s' (active) is not configured on vSwitch '%s'" % (active_nic, self.switch) + msg="NIC '%s' (active) is not configured on vSwitch '%s'" + % (active_nic, self.switch) ) for standby_nic in self.teaming_failover_order_standby: if standby_nic not in self.switch_object.spec.bridge.nicDevice: self.module.fail_json( - msg="NIC '%s' (standby) is not configured on vSwitch '%s'" % (standby_nic, self.switch) + msg="NIC '%s' (standby) is not configured on vSwitch '%s'" + % (standby_nic, self.switch) ) nic_order = vim.host.NetworkPolicy.NicOrderPolicy() nic_order.activeNic = self.teaming_failover_order_active @@ -965,74 +1186,82 @@ def create_nic_failure_policy(self): failure_criteria.checkErrorPercent = False failure_criteria.checkDuplex = False failure_criteria.speed = 10 - failure_criteria.checkSpeed = 'minimum' + failure_criteria.checkSpeed = "minimum" return failure_criteria def main(): """Main""" argument_spec = vmware_argument_spec() - argument_spec.update(dict( - portgroup=dict(type='str', required=True, aliases=['portgroup_name']), - switch=dict(type='str', required=True, aliases=['switch_name', 'vswitch']), - vlan_id=dict(type='int', required=False, default=0, aliases=['vlan']), - hosts=dict(type='list', aliases=['esxi_hostname']), - cluster_name=dict(type='str', aliases=['cluster']), - state=dict(type='str', choices=['present', 'absent'], default='present'), - security=dict( - type='dict', - options=dict( - promiscuous_mode=dict(type='bool'), - forged_transmits=dict(type='bool'), - mac_changes=dict(type='bool'), + argument_spec.update( + dict( + portgroup=dict( + type="str", required=True, aliases=["portgroup_name"] + ), + switch=dict( + type="str", required=True, aliases=["switch_name", "vswitch"] + ), + vlan_id=dict( + type="int", required=False, default=0, aliases=["vlan"] + ), + hosts=dict(type="list", aliases=["esxi_hostname"]), + cluster_name=dict(type="str", aliases=["cluster"]), + state=dict( + type="str", choices=["present", "absent"], default="present" ), - aliases=['security_policy', 'network_policy'] - ), - traffic_shaping=dict( - type='dict', - options=dict( - enabled=dict(type='bool'), - average_bandwidth=dict(type='int'), - peak_bandwidth=dict(type='int'), - burst_size=dict(type='int'), + security=dict( + type="dict", + options=dict( + promiscuous_mode=dict(type="bool"), + forged_transmits=dict(type="bool"), + mac_changes=dict(type="bool"), + ), + aliases=["security_policy", "network_policy"], ), - ), - teaming=dict( - type='dict', - options=dict( - load_balancing=dict( - type='str', - choices=[ - None, - 'loadbalance_ip', - 'loadbalance_srcmac', - 'loadbalance_srcid', - 'failover_explicit', - ], - aliases=['load_balance_policy'], + traffic_shaping=dict( + type="dict", + options=dict( + enabled=dict(type="bool"), + average_bandwidth=dict(type="int"), + peak_bandwidth=dict(type="int"), + burst_size=dict(type="int"), ), - network_failure_detection=dict( - type='str', - choices=['link_status_only', 'beacon_probing'] + ), + teaming=dict( + type="dict", + options=dict( + load_balancing=dict( + type="str", + choices=[ + None, + "loadbalance_ip", + "loadbalance_srcmac", + "loadbalance_srcid", + "failover_explicit", + ], + aliases=["load_balance_policy"], + ), + network_failure_detection=dict( + type="str", + choices=["link_status_only", "beacon_probing"], + ), + notify_switches=dict(type="bool"), + failback=dict(type="bool"), + active_adapters=dict(type="list"), + standby_adapters=dict(type="list"), + # NOTE: Deprecated from 2.11 onwards + inbound_policy=dict(type="bool"), + rolling_order=dict(type="bool"), ), - notify_switches=dict(type='bool'), - failback=dict(type='bool'), - active_adapters=dict(type='list'), - standby_adapters=dict(type='list'), - # NOTE: Deprecated from 2.11 onwards - inbound_policy=dict(type='bool'), - rolling_order=dict(type='bool'), + aliases=["teaming_policy"], ), - aliases=['teaming_policy'] - ), - )) + ) + ) module = AnsibleModule( argument_spec=argument_spec, - required_one_of=[ - ['cluster_name', 'hosts'], - ], - supports_check_mode=True + required_one_of=[["cluster_name", "hosts"]], + supports_check_mode=True, ) try: @@ -1046,5 +1275,5 @@ def main(): module.fail_json(msg=to_native(e)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_portgroup_info.py b/plugins/modules/vmware_portgroup_info.py index eb07b84..5e7a6a4 100644 --- a/plugins/modules/vmware_portgroup_info.py +++ b/plugins/modules/vmware_portgroup_info.py @@ -5,16 +5,17 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_portgroup_info short_description: Gathers info about an ESXi host's Port Group configuration @@ -49,9 +50,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather portgroup info about all ESXi Host in given Cluster vmware_portgroup_info: hostname: '{{ vcenter_hostname }}' @@ -67,9 +68,9 @@ password: '{{ vcenter_password }}' esxi_hostname: '{{ esxi_hostname }}' delegate_to: localhost -''' +""" -RETURN = r''' +RETURN = r""" hosts_portgroup_info: description: metadata about host's portgroup configuration returned: on success @@ -104,31 +105,37 @@ } ] } -''' +""" from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + vmware_argument_spec, + PyVmomi, +) class PortgroupInfoManager(PyVmomi): """Class to manage Port Group info""" + def __init__(self, module): super(PortgroupInfoManager, self).__init__(module) - cluster_name = self.params.get('cluster_name', None) - esxi_host_name = self.params.get('esxi_hostname', None) - self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name) + cluster_name = self.params.get("cluster_name", None) + esxi_host_name = self.params.get("esxi_hostname", None) + self.hosts = self.get_all_host_objs( + cluster_name=cluster_name, esxi_host_name=esxi_host_name + ) if not self.hosts: self.module.fail_json(msg="Failed to find host system.") - self.policies = self.params.get('policies') + self.policies = self.params.get("policies") @staticmethod def normalize_pg_info(portgroup_obj, policy_info): """Create Port Group information""" pg_info_dict = dict() spec = portgroup_obj.spec - pg_info_dict['portgroup'] = spec.name - pg_info_dict['vlan_id'] = spec.vlanId - pg_info_dict['vswitch'] = spec.vswitchName + pg_info_dict["portgroup"] = spec.name + pg_info_dict["vlan_id"] = spec.vlanId + pg_info_dict["vswitch"] = spec.vswitchName if policy_info: # Security info @@ -136,54 +143,79 @@ def normalize_pg_info(portgroup_obj, policy_info): promiscuous_mode = spec.policy.security.allowPromiscuous mac_changes = spec.policy.security.macChanges forged_transmits = spec.policy.security.forgedTransmits - pg_info_dict['security'] = ( - ["No override" if promiscuous_mode is None else promiscuous_mode, - "No override" if mac_changes is None else mac_changes, - "No override" if forged_transmits is None else forged_transmits] - ) + pg_info_dict["security"] = [ + "No override" + if promiscuous_mode is None + else promiscuous_mode, + "No override" if mac_changes is None else mac_changes, + "No override" + if forged_transmits is None + else forged_transmits, + ] else: - pg_info_dict['security'] = ["No override", "No override", "No override"] + pg_info_dict["security"] = [ + "No override", + "No override", + "No override", + ] # Traffic Shaping info - if spec.policy.shapingPolicy and spec.policy.shapingPolicy.enabled is not None: - pg_info_dict['ts'] = portgroup_obj.spec.policy.shapingPolicy.enabled + if ( + spec.policy.shapingPolicy + and spec.policy.shapingPolicy.enabled is not None + ): + pg_info_dict[ + "ts" + ] = portgroup_obj.spec.policy.shapingPolicy.enabled else: - pg_info_dict['ts'] = "No override" + pg_info_dict["ts"] = "No override" # Teaming and failover info if spec.policy.nicTeaming: if spec.policy.nicTeaming.policy is None: - pg_info_dict['lb'] = "No override" + pg_info_dict["lb"] = "No override" else: - pg_info_dict['lb'] = spec.policy.nicTeaming.policy + pg_info_dict["lb"] = spec.policy.nicTeaming.policy if spec.policy.nicTeaming.notifySwitches is None: - pg_info_dict['notify'] = "No override" + pg_info_dict["notify"] = "No override" else: - pg_info_dict['notify'] = spec.policy.nicTeaming.notifySwitches + pg_info_dict[ + "notify" + ] = spec.policy.nicTeaming.notifySwitches if spec.policy.nicTeaming.rollingOrder is None: - pg_info_dict['failback'] = "No override" + pg_info_dict["failback"] = "No override" else: - pg_info_dict['failback'] = not spec.policy.nicTeaming.rollingOrder + pg_info_dict[ + "failback" + ] = not spec.policy.nicTeaming.rollingOrder if spec.policy.nicTeaming.nicOrder is None: - pg_info_dict['failover_active'] = "No override" - pg_info_dict['failover_standby'] = "No override" + pg_info_dict["failover_active"] = "No override" + pg_info_dict["failover_standby"] = "No override" else: - pg_info_dict['failover_active'] = spec.policy.nicTeaming.nicOrder.activeNic - pg_info_dict['failover_standby'] = spec.policy.nicTeaming.nicOrder.standbyNic - if spec.policy.nicTeaming.failureCriteria and spec.policy.nicTeaming.failureCriteria.checkBeacon is None: - pg_info_dict['failure_detection'] = "No override" + pg_info_dict[ + "failover_active" + ] = spec.policy.nicTeaming.nicOrder.activeNic + pg_info_dict[ + "failover_standby" + ] = spec.policy.nicTeaming.nicOrder.standbyNic + if ( + spec.policy.nicTeaming.failureCriteria + and spec.policy.nicTeaming.failureCriteria.checkBeacon + is None + ): + pg_info_dict["failure_detection"] = "No override" else: if spec.policy.nicTeaming.failureCriteria.checkBeacon: - pg_info_dict['failure_detection'] = "beacon_probing" + pg_info_dict["failure_detection"] = "beacon_probing" else: - pg_info_dict['failure_detection'] = "link_status_only" + pg_info_dict["failure_detection"] = "link_status_only" else: - pg_info_dict['lb'] = "No override" - pg_info_dict['notify'] = "No override" - pg_info_dict['failback'] = "No override" - pg_info_dict['failover_active'] = "No override" - pg_info_dict['failover_standby'] = "No override" - pg_info_dict['failure_detection'] = "No override" + pg_info_dict["lb"] = "No override" + pg_info_dict["notify"] = "No override" + pg_info_dict["failback"] = "No override" + pg_info_dict["failover_active"] = "No override" + pg_info_dict["failover_standby"] = "No override" + pg_info_dict["failure_detection"] = "No override" return pg_info_dict @@ -195,7 +227,9 @@ def gather_host_portgroup_info(self): hosts_pg_info[host.name] = [] for portgroup in pgs: hosts_pg_info[host.name].append( - self.normalize_pg_info(portgroup_obj=portgroup, policy_info=self.policies) + self.normalize_pg_info( + portgroup_obj=portgroup, policy_info=self.policies + ) ) return hosts_pg_info @@ -204,21 +238,22 @@ def main(): """Main""" argument_spec = vmware_argument_spec() argument_spec.update( - cluster_name=dict(type='str', required=False), - esxi_hostname=dict(type='str', required=False), - policies=dict(type='bool', required=False, default=False), + cluster_name=dict(type="str", required=False), + esxi_hostname=dict(type="str", required=False), + policies=dict(type="bool", required=False, default=False), ) module = AnsibleModule( argument_spec=argument_spec, - required_one_of=[ - ['cluster_name', 'esxi_hostname'], - ], - supports_check_mode=True + required_one_of=[["cluster_name", "esxi_hostname"]], + supports_check_mode=True, ) host_pg_mgr = PortgroupInfoManager(module) - module.exit_json(changed=False, hosts_portgroup_info=host_pg_mgr.gather_host_portgroup_info()) + module.exit_json( + changed=False, + hosts_portgroup_info=host_pg_mgr.gather_host_portgroup_info(), + ) if __name__ == "__main__": diff --git a/plugins/modules/vmware_resource_pool.py b/plugins/modules/vmware_resource_pool.py index 1a5c45c..5521635 100644 --- a/plugins/modules/vmware_resource_pool.py +++ b/plugins/modules/vmware_resource_pool.py @@ -5,13 +5,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} +ANSIBLE_METADATA = { + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", +} -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_resource_pool short_description: Add/remove resource pools to/from vCenter @@ -103,9 +106,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Add resource pool to vCenter vmware_resource_pool: hostname: '{{ vcenter_hostname }}' @@ -124,7 +127,7 @@ cpu_expandable_reservations: yes state: present delegate_to: localhost -''' +""" RETURN = """ instance: @@ -136,36 +139,45 @@ try: from pyVmomi import vim, vmodl + HAS_PYVMOMI = True except ImportError: HAS_PYVMOMI = False -from ansible_collections.vmware.general.plugins.module_utils.vmware import get_all_objs, connect_to_api, vmware_argument_spec, find_datacenter_by_name, \ - find_cluster_by_name, wait_for_task, find_host_by_cluster_datacenter +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + get_all_objs, + connect_to_api, + vmware_argument_spec, + find_datacenter_by_name, + find_cluster_by_name, + wait_for_task, + find_host_by_cluster_datacenter, +) from ansible.module_utils.basic import AnsibleModule class VMwareResourcePool(object): - def __init__(self, module): self.module = module - self.datacenter = module.params['datacenter'] - self.cluster = module.params['cluster'] - self.resource_pool = module.params['resource_pool'] - self.hostname = module.params['hostname'] - self.username = module.params['username'] - self.password = module.params['password'] - self.state = module.params['state'] - self.mem_shares = module.params['mem_shares'] - self.mem_limit = module.params['mem_limit'] - self.mem_reservation = module.params['mem_reservation'] + self.datacenter = module.params["datacenter"] + self.cluster = module.params["cluster"] + self.resource_pool = module.params["resource_pool"] + self.hostname = module.params["hostname"] + self.username = module.params["username"] + self.password = module.params["password"] + self.state = module.params["state"] + self.mem_shares = module.params["mem_shares"] + self.mem_limit = module.params["mem_limit"] + self.mem_reservation = module.params["mem_reservation"] self.mem_expandable_reservations = module.params[ - 'cpu_expandable_reservations'] - self.cpu_shares = module.params['cpu_shares'] - self.cpu_limit = module.params['cpu_limit'] - self.cpu_reservation = module.params['cpu_reservation'] + "cpu_expandable_reservations" + ] + self.cpu_shares = module.params["cpu_shares"] + self.cpu_limit = module.params["cpu_limit"] + self.cpu_reservation = module.params["cpu_reservation"] self.cpu_expandable_reservations = module.params[ - 'cpu_expandable_reservations'] + "cpu_expandable_reservations" + ] self.dc_obj = None self.cluster_obj = None self.host_obj = None @@ -178,9 +190,7 @@ def select_resource_pool(self, host): resource_pools = get_all_objs(self.content, [vim.ResourcePool]) pool_selections = self.get_obj( - [vim.ResourcePool], - self.resource_pool, - return_all=True + [vim.ResourcePool], self.resource_pool, return_all=True ) if pool_selections: for p in pool_selections: @@ -192,7 +202,8 @@ def select_resource_pool(self, host): def get_obj(self, vimtype, name, return_all=False): obj = list() container = self.content.viewManager.CreateContainerView( - self.content.rootFolder, vimtype, True) + self.content.rootFolder, vimtype, True + ) for c in container.view: if name in [c.name, c._GetMoId()]: @@ -210,14 +221,14 @@ def get_obj(self, vimtype, name, return_all=False): def process_state(self): try: rp_states = { - 'absent': { - 'present': self.state_remove_rp, - 'absent': self.state_exit_unchanged, + "absent": { + "present": self.state_remove_rp, + "absent": self.state_exit_unchanged, + }, + "present": { + "present": self.state_exit_unchanged, + "absent": self.state_add_rp, }, - 'present': { - 'present': self.state_exit_unchanged, - 'absent': self.state_add_rp, - } } rp_states[self.state][self.check_rp_state()]() @@ -241,8 +252,10 @@ def state_remove_rp(self): success, result = wait_for_task(task) except Exception: - self.module.fail_json(msg="Failed to remove resource pool '%s' '%s'" % ( - self.resource_pool, resource_pool)) + self.module.fail_json( + msg="Failed to remove resource pool '%s' '%s'" + % (self.resource_pool, resource_pool) + ) self.module.exit_json(changed=changed, result=str(result)) def state_add_rp(self): @@ -268,11 +281,17 @@ def state_add_rp(self): self.dc_obj = find_datacenter_by_name(self.content, self.datacenter) if self.dc_obj is None: - self.module.fail_json(msg="Unable to find datacenter with name %s" % self.datacenter) + self.module.fail_json( + msg="Unable to find datacenter with name %s" % self.datacenter + ) - self.cluster_obj = find_cluster_by_name(self.content, self.cluster, datacenter=self.dc_obj) + self.cluster_obj = find_cluster_by_name( + self.content, self.cluster, datacenter=self.dc_obj + ) if self.cluster_obj is None: - self.module.fail_json(msg="Unable to find cluster with name %s" % self.cluster) + self.module.fail_json( + msg="Unable to find cluster with name %s" % self.cluster + ) rootResourcePool = self.cluster_obj.resourcePool rootResourcePool.CreateResourcePool(self.resource_pool, rp_spec) @@ -280,44 +299,60 @@ def state_add_rp(self): def check_rp_state(self): - self.host_obj, self.cluster_obj = find_host_by_cluster_datacenter(self.module, self.content, self.datacenter, - self.cluster, self.hostname) + self.host_obj, self.cluster_obj = find_host_by_cluster_datacenter( + self.module, + self.content, + self.datacenter, + self.cluster, + self.hostname, + ) self.resource_pool_obj = self.select_resource_pool(self.host_obj) if self.resource_pool_obj is None: - return 'absent' + return "absent" else: - return 'present' + return "present" def main(): argument_spec = vmware_argument_spec() - argument_spec.update(dict(datacenter=dict(required=True, type='str'), - cluster=dict(required=True, type='str'), - resource_pool=dict(required=True, type='str'), - mem_shares=dict(type='str', default="normal", choices=[ - 'high', 'custom', 'normal', 'low']), - mem_limit=dict(type='int', default=-1), - mem_reservation=dict(type='int', default=0), - mem_expandable_reservations=dict( - type='bool', default="True"), - cpu_shares=dict(type='str', default="normal", choices=[ - 'high', 'custom', 'normal', 'low']), - cpu_limit=dict(type='int', default=-1), - cpu_reservation=dict(type='int', default=0), - cpu_expandable_reservations=dict( - type='bool', default="True"), - state=dict(default='present', choices=['present', 'absent'], type='str'))) - - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True) + argument_spec.update( + dict( + datacenter=dict(required=True, type="str"), + cluster=dict(required=True, type="str"), + resource_pool=dict(required=True, type="str"), + mem_shares=dict( + type="str", + default="normal", + choices=["high", "custom", "normal", "low"], + ), + mem_limit=dict(type="int", default=-1), + mem_reservation=dict(type="int", default=0), + mem_expandable_reservations=dict(type="bool", default="True"), + cpu_shares=dict( + type="str", + default="normal", + choices=["high", "custom", "normal", "low"], + ), + cpu_limit=dict(type="int", default=-1), + cpu_reservation=dict(type="int", default=0), + cpu_expandable_reservations=dict(type="bool", default="True"), + state=dict( + default="present", choices=["present", "absent"], type="str" + ), + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, supports_check_mode=True + ) if not HAS_PYVMOMI: - module.fail_json(msg='pyvmomi is required for this module') + module.fail_json(msg="pyvmomi is required for this module") vmware_rp = VMwareResourcePool(module) vmware_rp.process_state() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_resource_pool_info.py b/plugins/modules/vmware_resource_pool_info.py index 43aa2bf..f7dceff 100644 --- a/plugins/modules/vmware_resource_pool_info.py +++ b/plugins/modules/vmware_resource_pool_info.py @@ -4,15 +4,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_resource_pool_info short_description: Gathers info about resource pool information @@ -28,9 +29,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather resource pool info about all resource pools available vmware_resource_pool_info: hostname: '{{ vcenter_hostname }}' @@ -38,9 +39,9 @@ password: '{{ vcenter_password }}' register: rp_info delegate_to: localhost -''' +""" -RETURN = r''' +RETURN = r""" resource_pool_info: description: metadata about resource pool configuration returned: on success @@ -76,7 +77,7 @@ "runtime_memory_unreserved_for_vm": 1007681536 }, ] -''' +""" try: from pyVmomi import vim @@ -84,7 +85,11 @@ pass from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi, get_all_objs +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + vmware_argument_spec, + PyVmomi, + get_all_objs, +) class ResourcePoolInfoManager(PyVmomi): @@ -131,10 +136,14 @@ def gather_rp_info(self): def main(): argument_spec = vmware_argument_spec() - module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + module = AnsibleModule( + argument_spec=argument_spec, supports_check_mode=True + ) vmware_rp_mgr = ResourcePoolInfoManager(module) - module.exit_json(changed=False, resource_pool_info=vmware_rp_mgr.gather_rp_info()) + module.exit_json( + changed=False, resource_pool_info=vmware_rp_mgr.gather_rp_info() + ) if __name__ == "__main__": diff --git a/plugins/modules/vmware_tag.py b/plugins/modules/vmware_tag.py index 74af6d8..2abfc3d 100644 --- a/plugins/modules/vmware_tag.py +++ b/plugins/modules/vmware_tag.py @@ -5,15 +5,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_tag short_description: Manage VMware tags @@ -64,9 +65,9 @@ extends_documentation_fragment: - vmware.general.vmware_rest_client.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create a tag vmware_tag: hostname: '{{ vcenter_hostname }}' @@ -97,9 +98,9 @@ tag_name: Sample_Tag_0002 state: absent delegate_to: localhost -''' +""" -RETURN = r''' +RETURN = r""" tag_status: description: dictionary of tag metadata returned: on success @@ -108,10 +109,13 @@ "msg": "Tag 'Sample_Tag_0002' created.", "tag_id": "urn:vmomi:InventoryServiceTag:bff91819-f529-43c9-80ca-1c9dfda09441:GLOBAL" } -''' +""" from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware_rest_client import VmwareRestClient +from ansible_collections.vmware.general.plugins.module_utils.vmware_rest_client import ( + VmwareRestClient, +) + try: from com.vmware.vapi.std.errors_client import Error except ImportError: @@ -124,7 +128,7 @@ def __init__(self, module): self.global_tags = dict() # api_client to call APIs instead of individual service self.tag_service = self.api_client.tagging.Tag - self.tag_name = self.params.get('tag_name') + self.tag_name = self.params.get("tag_name") self.get_all_tags() self.category_service = self.api_client.tagging.Category @@ -133,16 +137,16 @@ def ensure_state(self): Manage internal states of tags """ - desired_state = self.params.get('state') + desired_state = self.params.get("state") states = { - 'present': { - 'present': self.state_update_tag, - 'absent': self.state_create_tag, + "present": { + "present": self.state_update_tag, + "absent": self.state_create_tag, + }, + "absent": { + "present": self.state_delete_tag, + "absent": self.state_unchanged, }, - 'absent': { - 'present': self.state_delete_tag, - 'absent': self.state_unchanged, - } } states[desired_state][self.check_tag_status()]() @@ -153,10 +157,12 @@ def state_create_tag(self): """ tag_spec = self.tag_service.CreateSpec() tag_spec.name = self.tag_name - tag_spec.description = self.params.get('tag_description') - category_id = self.params.get('category_id', None) + tag_spec.description = self.params.get("tag_description") + category_id = self.params.get("category_id", None) if category_id is None: - self.module.fail_json(msg="'category_id' is required parameter while creating tag.") + self.module.fail_json( + msg="'category_id' is required parameter while creating tag." + ) category_found = False for category in self.category_service.list(): @@ -166,20 +172,28 @@ def state_create_tag(self): break if not category_found: - self.module.fail_json(msg="Unable to find category specified using 'category_id' - %s" % category_id) + self.module.fail_json( + msg="Unable to find category specified using 'category_id' - %s" + % category_id + ) tag_spec.category_id = category_id - tag_id = '' + tag_id = "" try: tag_id = self.tag_service.create(tag_spec) except Error as error: self.module.fail_json(msg="%s" % self.get_error_message(error)) if tag_id: - self.module.exit_json(changed=True, - tag_status=dict(msg="Tag '%s' created." % tag_spec.name, tag_id=tag_id)) - self.module.exit_json(changed=False, - tag_status=dict(msg="No tag created", tag_id=tag_id)) + self.module.exit_json( + changed=True, + tag_status=dict( + msg="Tag '%s' created." % tag_spec.name, tag_id=tag_id + ), + ) + self.module.exit_json( + changed=False, tag_status=dict(msg="No tag created", tag_id=tag_id) + ) def state_unchanged(self): """ @@ -194,12 +208,13 @@ def state_update_tag(self): """ changed = False - tag_id = self.global_tags[self.tag_name]['tag_id'] - results = dict(msg="Tag %s is unchanged." % self.tag_name, - tag_id=tag_id) + tag_id = self.global_tags[self.tag_name]["tag_id"] + results = dict( + msg="Tag %s is unchanged." % self.tag_name, tag_id=tag_id + ) tag_update_spec = self.tag_service.UpdateSpec() - tag_desc = self.global_tags[self.tag_name]['tag_description'] - desired_tag_desc = self.params.get('tag_description') + tag_desc = self.global_tags[self.tag_name]["tag_description"] + desired_tag_desc = self.params.get("tag_description") if tag_desc != desired_tag_desc: tag_update_spec.description = desired_tag_desc try: @@ -207,7 +222,7 @@ def state_update_tag(self): except Error as error: self.module.fail_json(msg="%s" % self.get_error_message(error)) - results['msg'] = 'Tag %s updated.' % self.tag_name + results["msg"] = "Tag %s updated." % self.tag_name changed = True self.module.exit_json(changed=changed, tag_status=results) @@ -217,13 +232,17 @@ def state_delete_tag(self): Delete tag """ - tag_id = self.global_tags[self.tag_name]['tag_id'] + tag_id = self.global_tags[self.tag_name]["tag_id"] try: self.tag_service.delete(tag_id=tag_id) except Error as error: self.module.fail_json(msg="%s" % self.get_error_message(error)) - self.module.exit_json(changed=True, - tag_status=dict(msg="Tag '%s' deleted." % self.tag_name, tag_id=tag_id)) + self.module.exit_json( + changed=True, + tag_status=dict( + msg="Tag '%s' deleted." % self.tag_name, tag_id=tag_id + ), + ) def check_tag_status(self): """ @@ -231,13 +250,17 @@ def check_tag_status(self): Returns: 'present' if tag found, else 'absent' """ - if 'category_id' in self.params: - if self.tag_name in self.global_tags and self.params['category_id'] == self.global_tags[self.tag_name]['tag_category_id']: - ret = 'present' + if "category_id" in self.params: + if ( + self.tag_name in self.global_tags + and self.params["category_id"] + == self.global_tags[self.tag_name]["tag_category_id"] + ): + ret = "present" else: - ret = 'absent' + ret = "absent" else: - ret = 'present' if self.tag_name in self.global_tags else 'absent' + ret = "present" if self.tag_name in self.global_tags else "absent" return ret def get_all_tags(self): @@ -251,17 +274,22 @@ def get_all_tags(self): tag_description=tag_obj.description, tag_used_by=tag_obj.used_by, tag_category_id=tag_obj.category_id, - tag_id=tag_obj.id + tag_id=tag_obj.id, ) def main(): argument_spec = VmwareRestClient.vmware_client_argument_spec() argument_spec.update( - tag_name=dict(type='str', required=True), - tag_description=dict(type='str', default='', required=False), - category_id=dict(type='str', required=False), - state=dict(type='str', choices=['present', 'absent'], default='present', required=False), + tag_name=dict(type="str", required=True), + tag_description=dict(type="str", default="", required=False), + category_id=dict(type="str", required=False), + state=dict( + type="str", + choices=["present", "absent"], + default="present", + required=False, + ), ) module = AnsibleModule(argument_spec=argument_spec) @@ -269,5 +297,5 @@ def main(): vmware_tag.ensure_state() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_tag_info.py b/plugins/modules/vmware_tag_info.py index 585586c..41da502 100644 --- a/plugins/modules/vmware_tag_info.py +++ b/plugins/modules/vmware_tag_info.py @@ -7,15 +7,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_tag_info short_description: Manage VMware tag info @@ -37,9 +38,9 @@ extends_documentation_fragment: - vmware.general.vmware_rest_client.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Get info about tag vmware_tag_info: hostname: '{{ vcenter_hostname }}' @@ -72,9 +73,9 @@ vars: query: "[?tag_name==`tag0001`]" - debug: var=tag_id -''' +""" -RETURN = r''' +RETURN = r""" tag_facts: description: dictionary of tag metadata returned: on success @@ -124,10 +125,12 @@ "tag_used_by": [] } ] -''' +""" from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware_rest_client import VmwareRestClient +from ansible_collections.vmware.general.plugins.module_utils.vmware_rest_client import ( + VmwareRestClient, +) class VmTagInfoManager(VmwareRestClient): @@ -149,33 +152,37 @@ def get_all_tags(self): tag_description=tag_obj.description, tag_used_by=tag_obj.used_by, tag_category_id=tag_obj.category_id, - tag_id=tag_obj.id + tag_id=tag_obj.id, + ) + global_tag_info.append( + dict( + tag_name=tag_obj.name, + tag_description=tag_obj.description, + tag_used_by=tag_obj.used_by, + tag_category_id=tag_obj.category_id, + tag_id=tag_obj.id, + ) ) - global_tag_info.append(dict( - tag_name=tag_obj.name, - tag_description=tag_obj.description, - tag_used_by=tag_obj.used_by, - tag_category_id=tag_obj.category_id, - tag_id=tag_obj.id - )) self.module.exit_json( - changed=False, - tag_facts=global_tags, - tag_info=global_tag_info + changed=False, tag_facts=global_tags, tag_info=global_tag_info ) def main(): argument_spec = VmwareRestClient.vmware_client_argument_spec() - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=True) - if module._name == 'vmware_tag_facts': - module.deprecate("The 'vmware_tag_facts' module has been renamed to 'vmware_tag_info'", version='2.13') + module = AnsibleModule( + argument_spec=argument_spec, supports_check_mode=True + ) + if module._name == "vmware_tag_facts": + module.deprecate( + "The 'vmware_tag_facts' module has been renamed to 'vmware_tag_info'", + version="2.13", + ) vmware_tag_info = VmTagInfoManager(module) vmware_tag_info.get_all_tags() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_tag_manager.py b/plugins/modules/vmware_tag_manager.py index 5cc2cd3..08e692f 100644 --- a/plugins/modules/vmware_tag_manager.py +++ b/plugins/modules/vmware_tag_manager.py @@ -5,15 +5,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_tag_manager short_description: Manage association of VMware tags with VMware objects @@ -61,9 +62,9 @@ extends_documentation_fragment: - vmware.general.vmware_rest_client.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Add tags to a virtual machine vmware_tag_manager: hostname: '{{ vcenter_hostname }}' @@ -116,9 +117,9 @@ object_type: DistributedVirtualPortgroup state: add delegate_to: localhost -''' +""" -RETURN = r''' +RETURN = r""" tag_status: description: metadata about tags related to object configuration returned: on success @@ -136,10 +137,17 @@ "security" ] } -''' +""" from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware_rest_client import VmwareRestClient -from ansible_collections.vmware.general.plugins.module_utils.vmware import (PyVmomi, find_dvs_by_name, find_dvspg_by_name) +from ansible_collections.vmware.general.plugins.module_utils.vmware_rest_client import ( + VmwareRestClient, +) +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + find_dvs_by_name, + find_dvspg_by_name, +) + try: from com.vmware.vapi.std_client import DynamicID from com.vmware.vapi.std.errors_client import Error @@ -155,65 +163,88 @@ def __init__(self, module): super(VmwareTagManager, self).__init__(module) self.pyv = PyVmomi(module=module) - self.object_type = self.params.get('object_type') - self.object_name = self.params.get('object_name') + self.object_type = self.params.get("object_type") + self.object_name = self.params.get("object_name") self.managed_object = None - if self.object_type == 'VirtualMachine': + if self.object_type == "VirtualMachine": self.managed_object = self.pyv.get_vm_or_template(self.object_name) - if self.object_type == 'Datacenter': - self.managed_object = self.pyv.find_datacenter_by_name(self.object_name) + if self.object_type == "Datacenter": + self.managed_object = self.pyv.find_datacenter_by_name( + self.object_name + ) - if self.object_type == 'ClusterComputeResource': - self.managed_object = self.pyv.find_cluster_by_name(self.object_name) + if self.object_type == "ClusterComputeResource": + self.managed_object = self.pyv.find_cluster_by_name( + self.object_name + ) - if self.object_type == 'HostSystem': - self.managed_object = self.pyv.find_hostsystem_by_name(self.object_name) + if self.object_type == "HostSystem": + self.managed_object = self.pyv.find_hostsystem_by_name( + self.object_name + ) - if self.object_type == 'DistributedVirtualSwitch': - self.managed_object = find_dvs_by_name(self.pyv.content, self.object_name) - self.object_type = 'VmwareDistributedVirtualSwitch' + if self.object_type == "DistributedVirtualSwitch": + self.managed_object = find_dvs_by_name( + self.pyv.content, self.object_name + ) + self.object_type = "VmwareDistributedVirtualSwitch" - if self.object_type == 'DistributedVirtualPortgroup': + if self.object_type == "DistributedVirtualPortgroup": dvs_name, pg_name = self.object_name.split(":", 1) dv_switch = find_dvs_by_name(self.pyv.content, dvs_name) if dv_switch is None: - self.module.fail_json(msg="A distributed virtual switch with name %s does not exist" % dvs_name) + self.module.fail_json( + msg="A distributed virtual switch with name %s does not exist" + % dvs_name + ) self.managed_object = find_dvspg_by_name(dv_switch, pg_name) if self.managed_object is None: - self.module.fail_json(msg="Failed to find the managed object for %s with type %s" % (self.object_name, self.object_type)) - - if not hasattr(self.managed_object, '_moId'): - self.module.fail_json(msg="Unable to find managed object id for %s managed object" % self.object_name) - - self.dynamic_managed_object = DynamicID(type=self.object_type, id=self.managed_object._moId) + self.module.fail_json( + msg="Failed to find the managed object for %s with type %s" + % (self.object_name, self.object_type) + ) + + if not hasattr(self.managed_object, "_moId"): + self.module.fail_json( + msg="Unable to find managed object id for %s managed object" + % self.object_name + ) + + self.dynamic_managed_object = DynamicID( + type=self.object_type, id=self.managed_object._moId + ) self.tag_service = self.api_client.tagging.Tag self.category_service = self.api_client.tagging.Category self.tag_association_svc = self.api_client.tagging.TagAssociation - self.tag_names = self.params.get('tag_names') + self.tag_names = self.params.get("tag_names") def ensure_state(self): """ Manage the internal state of tags """ - results = dict( - changed=False, - tag_status=dict(), - ) + results = dict(changed=False, tag_status=dict()) changed = False - action = self.params.get('state') - available_tag_obj = self.get_tags_for_object(tag_service=self.tag_service, - tag_assoc_svc=self.tag_association_svc, - dobj=self.dynamic_managed_object) + action = self.params.get("state") + available_tag_obj = self.get_tags_for_object( + tag_service=self.tag_service, + tag_assoc_svc=self.tag_association_svc, + dobj=self.dynamic_managed_object, + ) - _temp_prev_tags = ["%s:%s" % (tag['category_name'], tag['name']) for tag in self.get_tags_for_dynamic_obj(self.dynamic_managed_object)] - results['tag_status']['previous_tags'] = _temp_prev_tags - results['tag_status']['desired_tags'] = self.tag_names + _temp_prev_tags = [ + "%s:%s" % (tag["category_name"], tag["name"]) + for tag in self.get_tags_for_dynamic_obj( + self.dynamic_managed_object + ) + ] + results["tag_status"]["previous_tags"] = _temp_prev_tags + results["tag_status"]["desired_tags"] = self.tag_names # Check if category and tag combination exists as per user request removed_tags_for_set = False @@ -222,65 +253,109 @@ def ensure_state(self): if ":" in tag: # User specified category category_name, tag_name = tag.split(":", 1) - category_obj = self.search_svc_object_by_name(self.category_service, category_name) + category_obj = self.search_svc_object_by_name( + self.category_service, category_name + ) if not category_obj: - self.module.fail_json(msg="Unable to find the category %s" % category_name) + self.module.fail_json( + msg="Unable to find the category %s" % category_name + ) else: # User specified only tag tag_name = tag if category_name: - tag_obj = self.get_tag_by_category(tag_name=tag_name, category_name=category_name) + tag_obj = self.get_tag_by_category( + tag_name=tag_name, category_name=category_name + ) else: tag_obj = self.get_tag_by_name(tag_name=tag_name) if not tag_obj: - self.module.fail_json(msg="Unable to find the tag %s" % tag_name) + self.module.fail_json( + msg="Unable to find the tag %s" % tag_name + ) - if action in ('add', 'present'): + if action in ("add", "present"): if tag_obj not in available_tag_obj: # Tag is not already applied try: - self.tag_association_svc.attach(tag_id=tag_obj.id, object_id=self.dynamic_managed_object) + self.tag_association_svc.attach( + tag_id=tag_obj.id, + object_id=self.dynamic_managed_object, + ) changed = True except Error as error: - self.module.fail_json(msg="%s" % self.get_error_message(error)) + self.module.fail_json( + msg="%s" % self.get_error_message(error) + ) - elif action == 'set': + elif action == "set": # Remove all tags first try: if not removed_tags_for_set: for av_tag in available_tag_obj: - self.tag_association_svc.detach(tag_id=av_tag.id, object_id=self.dynamic_managed_object) + self.tag_association_svc.detach( + tag_id=av_tag.id, + object_id=self.dynamic_managed_object, + ) removed_tags_for_set = True - self.tag_association_svc.attach(tag_id=tag_obj.id, object_id=self.dynamic_managed_object) + self.tag_association_svc.attach( + tag_id=tag_obj.id, + object_id=self.dynamic_managed_object, + ) changed = True except Error as error: - self.module.fail_json(msg="%s" % self.get_error_message(error)) + self.module.fail_json( + msg="%s" % self.get_error_message(error) + ) - elif action in ('remove', 'absent'): + elif action in ("remove", "absent"): if tag_obj in available_tag_obj: try: - self.tag_association_svc.detach(tag_id=tag_obj.id, object_id=self.dynamic_managed_object) + self.tag_association_svc.detach( + tag_id=tag_obj.id, + object_id=self.dynamic_managed_object, + ) changed = True except Error as error: - self.module.fail_json(msg="%s" % self.get_error_message(error)) - - _temp_curr_tags = ["%s:%s" % (tag['category_name'], tag['name']) for tag in self.get_tags_for_dynamic_obj(self.dynamic_managed_object)] - results['tag_status']['current_tags'] = _temp_curr_tags - results['changed'] = changed + self.module.fail_json( + msg="%s" % self.get_error_message(error) + ) + + _temp_curr_tags = [ + "%s:%s" % (tag["category_name"], tag["name"]) + for tag in self.get_tags_for_dynamic_obj( + self.dynamic_managed_object + ) + ] + results["tag_status"]["current_tags"] = _temp_curr_tags + results["changed"] = changed self.module.exit_json(**results) def main(): argument_spec = VmwareRestClient.vmware_client_argument_spec() argument_spec.update( - tag_names=dict(type='list', required=True), - state=dict(type='str', choices=['absent', 'add', 'present', 'remove', 'set'], default='add'), - object_name=dict(type='str', required=True), - object_type=dict(type='str', required=True, choices=['VirtualMachine', 'Datacenter', 'ClusterComputeResource', - 'HostSystem', 'DistributedVirtualSwitch', - 'DistributedVirtualPortgroup']), + tag_names=dict(type="list", required=True), + state=dict( + type="str", + choices=["absent", "add", "present", "remove", "set"], + default="add", + ), + object_name=dict(type="str", required=True), + object_type=dict( + type="str", + required=True, + choices=[ + "VirtualMachine", + "Datacenter", + "ClusterComputeResource", + "HostSystem", + "DistributedVirtualSwitch", + "DistributedVirtualPortgroup", + ], + ), ) module = AnsibleModule(argument_spec=argument_spec) @@ -288,5 +363,5 @@ def main(): vmware_tag_manager.ensure_state() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_target_canonical_info.py b/plugins/modules/vmware_target_canonical_info.py index f1105af..b726837 100644 --- a/plugins/modules/vmware_target_canonical_info.py +++ b/plugins/modules/vmware_target_canonical_info.py @@ -5,15 +5,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_target_canonical_info short_description: Return canonical (NAA) from an ESXi host system @@ -48,9 +49,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Get Canonical name of particular target on particular ESXi host system vmware_target_canonical_info: hostname: '{{ vcenter_hostname }}' @@ -75,7 +76,7 @@ password: '{{ vcenter_password }}' cluster_name: '{{ cluster_name }}' delegate_to: localhost -''' +""" RETURN = r""" canonical: @@ -111,15 +112,20 @@ """ from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + vmware_argument_spec, +) class ScsiTargetInfoManager(PyVmomi): def __init__(self, module): super(ScsiTargetInfoManager, self).__init__(module) - cluster_name = self.module.params.get('cluster_name') - self.esxi_hostname = self.module.params.get('esxi_hostname') - self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=self.esxi_hostname) + cluster_name = self.module.params.get("cluster_name") + self.esxi_hostname = self.module.params.get("esxi_hostname") + self.hosts = self.get_all_host_objs( + cluster_name=cluster_name, esxi_host_name=self.esxi_hostname + ) def gather_scsi_device_info(self): """ @@ -129,7 +135,7 @@ def gather_scsi_device_info(self): scsi_tgt_info = {} target_lun_uuid = {} scsilun_canonical = {} - target_id = self.module.params['target_id'] + target_id = self.module.params["target_id"] for host in self.hosts: # Associate the scsiLun key with the canonicalName (NAA) @@ -137,20 +143,29 @@ def gather_scsi_device_info(self): scsilun_canonical[scsilun.key] = scsilun.canonicalName # Associate target number with LUN uuid - for target in host.config.storageDevice.scsiTopology.adapter[0].target: + for target in host.config.storageDevice.scsiTopology.adapter[ + 0 + ].target: for lun in target.lun: target_lun_uuid[target.target] = lun.scsiLun scsi_tgt_info[host.name] = dict( scsilun_canonical=scsilun_canonical, - target_lun_uuid=target_lun_uuid) + target_lun_uuid=target_lun_uuid, + ) if target_id is not None and self.esxi_hostname is not None: - canonical = '' - temp_lun_data = scsi_tgt_info[self.esxi_hostname]['target_lun_uuid'] - if self.esxi_hostname in scsi_tgt_info and \ - target_id in temp_lun_data: - temp_scsi_data = scsi_tgt_info[self.esxi_hostname]['scsilun_canonical'] + canonical = "" + temp_lun_data = scsi_tgt_info[self.esxi_hostname][ + "target_lun_uuid" + ] + if ( + self.esxi_hostname in scsi_tgt_info + and target_id in temp_lun_data + ): + temp_scsi_data = scsi_tgt_info[self.esxi_hostname][ + "scsilun_canonical" + ] temp_target = temp_lun_data[target_id] canonical = temp_scsi_data[temp_target] self.module.exit_json(changed=False, canonical=canonical) @@ -162,17 +177,15 @@ def main(): argument_spec = vmware_argument_spec() argument_spec.update( dict( - target_id=dict(required=False, type='int'), - cluster_name=dict(type='str', required=False), - esxi_hostname=dict(type='str', required=False), + target_id=dict(required=False, type="int"), + cluster_name=dict(type="str", required=False), + esxi_hostname=dict(type="str", required=False), ) ) module = AnsibleModule( argument_spec=argument_spec, - required_one_of=[ - ['cluster_name', 'esxi_hostname'], - ], + required_one_of=[["cluster_name", "esxi_hostname"]], supports_check_mode=True, ) @@ -180,5 +193,5 @@ def main(): scsi_tgt_manager.gather_scsi_device_info() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_vcenter_settings.py b/plugins/modules/vmware_vcenter_settings.py index 4bc3b7f..4182f7f 100644 --- a/plugins/modules/vmware_vcenter_settings.py +++ b/plugins/modules/vmware_vcenter_settings.py @@ -6,16 +6,17 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_vcenter_settings short_description: Configures general settings on a vCenter server @@ -142,9 +143,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Configure vCenter general settings vmware_vcenter_settings: hostname: '{{ vcenter_hostname }}' @@ -180,9 +181,9 @@ logging_options: info validate_certs: no delegate_to: localhost -''' +""" -RETURN = r''' +RETURN = r""" results: description: metadata about vCenter settings returned: always @@ -209,7 +210,7 @@ "timeout_long_operations": 120, "timeout_normal_operations": 30 } -''' +""" try: from pyVmomi import vim, vmodl @@ -217,7 +218,10 @@ pass from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + vmware_argument_spec, +) from ansible.module_utils._text import to_native @@ -228,348 +232,585 @@ def __init__(self, module): super(VmwareVcenterSettings, self).__init__(module) if not self.is_vcenter(): - self.module.fail_json(msg="You have to connect to a vCenter server!") + self.module.fail_json( + msg="You have to connect to a vCenter server!" + ) def ensure(self): """Manage settings for a vCenter server""" - result = dict(changed=False, msg='') - db_max_connections = self.params['database'].get('max_connections') - db_task_cleanup = self.params['database'].get('task_cleanup') - db_task_retention = self.params['database'].get('task_retention') - db_event_cleanup = self.params['database'].get('event_cleanup') - db_event_retention = self.params['database'].get('event_retention') - runtime_unique_id = self.params['runtime_settings'].get('unique_id') - runtime_managed_address = self.params['runtime_settings'].get('managed_address') - runtime_server_name = self.params['runtime_settings'].get('vcenter_server_name') - directory_timeout = self.params['user_directory'].get('timeout') - directory_query_limit = self.params['user_directory'].get('query_limit') - directory_query_limit_size = self.params['user_directory'].get('query_limit_size') - directory_validation = self.params['user_directory'].get('validation') - directory_validation_period = self.params['user_directory'].get('validation_period') - mail_server = self.params['mail'].get('server') - mail_sender = self.params['mail'].get('sender') - snmp_receiver_1_url = self.params['snmp_receivers'].get('snmp_receiver_1_url') - snmp_receiver_1_enabled = self.params['snmp_receivers'].get('snmp_receiver_1_enabled') - snmp_receiver_1_port = self.params['snmp_receivers'].get('snmp_receiver_1_port') - snmp_receiver_1_community = self.params['snmp_receivers'].get('snmp_receiver_1_community') - snmp_receiver_2_url = self.params['snmp_receivers'].get('snmp_receiver_2_url') - snmp_receiver_2_enabled = self.params['snmp_receivers'].get('snmp_receiver_2_enabled') - snmp_receiver_2_port = self.params['snmp_receivers'].get('snmp_receiver_2_port') - snmp_receiver_2_community = self.params['snmp_receivers'].get('snmp_receiver_2_community') - snmp_receiver_3_url = self.params['snmp_receivers'].get('snmp_receiver_3_url') - snmp_receiver_3_enabled = self.params['snmp_receivers'].get('snmp_receiver_3_enabled') - snmp_receiver_3_port = self.params['snmp_receivers'].get('snmp_receiver_3_port') - snmp_receiver_3_community = self.params['snmp_receivers'].get('snmp_receiver_3_community') - snmp_receiver_4_url = self.params['snmp_receivers'].get('snmp_receiver_4_url') - snmp_receiver_4_enabled = self.params['snmp_receivers'].get('snmp_receiver_4_enabled') - snmp_receiver_4_port = self.params['snmp_receivers'].get('snmp_receiver_4_port') - snmp_receiver_4_community = self.params['snmp_receivers'].get('snmp_receiver_4_community') - timeout_normal_operations = self.params['timeout_settings'].get('normal_operations') - timeout_long_operations = self.params['timeout_settings'].get('long_operations') - logging_options = self.params.get('logging_options') + result = dict(changed=False, msg="") + db_max_connections = self.params["database"].get("max_connections") + db_task_cleanup = self.params["database"].get("task_cleanup") + db_task_retention = self.params["database"].get("task_retention") + db_event_cleanup = self.params["database"].get("event_cleanup") + db_event_retention = self.params["database"].get("event_retention") + runtime_unique_id = self.params["runtime_settings"].get("unique_id") + runtime_managed_address = self.params["runtime_settings"].get( + "managed_address" + ) + runtime_server_name = self.params["runtime_settings"].get( + "vcenter_server_name" + ) + directory_timeout = self.params["user_directory"].get("timeout") + directory_query_limit = self.params["user_directory"].get( + "query_limit" + ) + directory_query_limit_size = self.params["user_directory"].get( + "query_limit_size" + ) + directory_validation = self.params["user_directory"].get("validation") + directory_validation_period = self.params["user_directory"].get( + "validation_period" + ) + mail_server = self.params["mail"].get("server") + mail_sender = self.params["mail"].get("sender") + snmp_receiver_1_url = self.params["snmp_receivers"].get( + "snmp_receiver_1_url" + ) + snmp_receiver_1_enabled = self.params["snmp_receivers"].get( + "snmp_receiver_1_enabled" + ) + snmp_receiver_1_port = self.params["snmp_receivers"].get( + "snmp_receiver_1_port" + ) + snmp_receiver_1_community = self.params["snmp_receivers"].get( + "snmp_receiver_1_community" + ) + snmp_receiver_2_url = self.params["snmp_receivers"].get( + "snmp_receiver_2_url" + ) + snmp_receiver_2_enabled = self.params["snmp_receivers"].get( + "snmp_receiver_2_enabled" + ) + snmp_receiver_2_port = self.params["snmp_receivers"].get( + "snmp_receiver_2_port" + ) + snmp_receiver_2_community = self.params["snmp_receivers"].get( + "snmp_receiver_2_community" + ) + snmp_receiver_3_url = self.params["snmp_receivers"].get( + "snmp_receiver_3_url" + ) + snmp_receiver_3_enabled = self.params["snmp_receivers"].get( + "snmp_receiver_3_enabled" + ) + snmp_receiver_3_port = self.params["snmp_receivers"].get( + "snmp_receiver_3_port" + ) + snmp_receiver_3_community = self.params["snmp_receivers"].get( + "snmp_receiver_3_community" + ) + snmp_receiver_4_url = self.params["snmp_receivers"].get( + "snmp_receiver_4_url" + ) + snmp_receiver_4_enabled = self.params["snmp_receivers"].get( + "snmp_receiver_4_enabled" + ) + snmp_receiver_4_port = self.params["snmp_receivers"].get( + "snmp_receiver_4_port" + ) + snmp_receiver_4_community = self.params["snmp_receivers"].get( + "snmp_receiver_4_community" + ) + timeout_normal_operations = self.params["timeout_settings"].get( + "normal_operations" + ) + timeout_long_operations = self.params["timeout_settings"].get( + "long_operations" + ) + logging_options = self.params.get("logging_options") changed = False changed_list = [] # Check all general settings, except statistics - result['db_max_connections'] = db_max_connections - result['db_task_cleanup'] = db_task_cleanup - result['db_task_retention'] = db_task_retention - result['db_event_cleanup'] = db_event_cleanup - result['db_event_retention'] = db_event_retention - result['runtime_unique_id'] = runtime_unique_id - result['runtime_managed_address'] = runtime_managed_address - result['runtime_server_name'] = runtime_server_name - result['directory_timeout'] = directory_timeout - result['directory_query_limit'] = directory_query_limit - result['directory_query_limit_size'] = directory_query_limit_size - result['directory_validation'] = directory_validation - result['directory_validation_period'] = directory_validation_period - result['mail_server'] = mail_server - result['mail_sender'] = mail_sender - result['timeout_normal_operations'] = timeout_normal_operations - result['timeout_long_operations'] = timeout_long_operations - result['logging_options'] = logging_options + result["db_max_connections"] = db_max_connections + result["db_task_cleanup"] = db_task_cleanup + result["db_task_retention"] = db_task_retention + result["db_event_cleanup"] = db_event_cleanup + result["db_event_retention"] = db_event_retention + result["runtime_unique_id"] = runtime_unique_id + result["runtime_managed_address"] = runtime_managed_address + result["runtime_server_name"] = runtime_server_name + result["directory_timeout"] = directory_timeout + result["directory_query_limit"] = directory_query_limit + result["directory_query_limit_size"] = directory_query_limit_size + result["directory_validation"] = directory_validation + result["directory_validation_period"] = directory_validation_period + result["mail_server"] = mail_server + result["mail_sender"] = mail_sender + result["timeout_normal_operations"] = timeout_normal_operations + result["timeout_long_operations"] = timeout_long_operations + result["logging_options"] = logging_options change_option_list = [] option_manager = self.content.setting for setting in option_manager.setting: # Database - if setting.key == 'VirtualCenter.MaxDBConnection' and setting.value != db_max_connections: + if ( + setting.key == "VirtualCenter.MaxDBConnection" + and setting.value != db_max_connections + ): changed = True changed_list.append("DB max connections") - result['db_max_connections_previous'] = setting.value + result["db_max_connections_previous"] = setting.value change_option_list.append( - vim.option.OptionValue(key='VirtualCenter.MaxDBConnection', value=db_max_connections) + vim.option.OptionValue( + key="VirtualCenter.MaxDBConnection", + value=db_max_connections, + ) ) - if setting.key == 'task.maxAgeEnabled' and setting.value != db_task_cleanup: + if ( + setting.key == "task.maxAgeEnabled" + and setting.value != db_task_cleanup + ): changed = True changed_list.append("DB task cleanup") - result['db_task_cleanup_previous'] = setting.value + result["db_task_cleanup_previous"] = setting.value change_option_list.append( - vim.option.OptionValue(key='task.maxAgeEnabled', value=db_task_cleanup) + vim.option.OptionValue( + key="task.maxAgeEnabled", value=db_task_cleanup + ) ) - if setting.key == 'task.maxAge' and setting.value != db_task_retention: + if ( + setting.key == "task.maxAge" + and setting.value != db_task_retention + ): changed = True changed_list.append("DB task retention") - result['db_task_retention_previous'] = setting.value + result["db_task_retention_previous"] = setting.value change_option_list.append( - vim.option.OptionValue(key='task.maxAge', value=db_task_retention) + vim.option.OptionValue( + key="task.maxAge", value=db_task_retention + ) ) - if setting.key == 'event.maxAgeEnabled' and setting.value != db_event_cleanup: + if ( + setting.key == "event.maxAgeEnabled" + and setting.value != db_event_cleanup + ): changed = True changed_list.append("DB event cleanup") - result['db_event_cleanup_previous'] = setting.value + result["db_event_cleanup_previous"] = setting.value change_option_list.append( - vim.option.OptionValue(key='event.maxAgeEnabled', value=db_event_cleanup) + vim.option.OptionValue( + key="event.maxAgeEnabled", value=db_event_cleanup + ) ) - if setting.key == 'event.maxAge' and setting.value != db_event_retention: + if ( + setting.key == "event.maxAge" + and setting.value != db_event_retention + ): changed = True changed_list.append("DB event retention") - result['db_event_retention_previous'] = setting.value + result["db_event_retention_previous"] = setting.value change_option_list.append( - vim.option.OptionValue(key='event.maxAge', value=db_event_retention) + vim.option.OptionValue( + key="event.maxAge", value=db_event_retention + ) ) # Runtime settings - if setting.key == 'instance.id' and setting.value != runtime_unique_id: + if ( + setting.key == "instance.id" + and setting.value != runtime_unique_id + ): changed = True changed_list.append("Instance ID") - result['runtime_unique_id_previous'] = setting.value + result["runtime_unique_id_previous"] = setting.value change_option_list.append( - vim.option.OptionValue(key='instance.id', value=runtime_unique_id) + vim.option.OptionValue( + key="instance.id", value=runtime_unique_id + ) ) - if setting.key == 'VirtualCenter.ManagedIP' and setting.value != runtime_managed_address: + if ( + setting.key == "VirtualCenter.ManagedIP" + and setting.value != runtime_managed_address + ): changed = True changed_list.append("Managed IP") - result['runtime_managed_address_previous'] = setting.value + result["runtime_managed_address_previous"] = setting.value change_option_list.append( - vim.option.OptionValue(key='VirtualCenter.ManagedIP', value=runtime_managed_address) + vim.option.OptionValue( + key="VirtualCenter.ManagedIP", + value=runtime_managed_address, + ) ) - if setting.key == 'VirtualCenter.InstanceName' and setting.value != runtime_server_name: + if ( + setting.key == "VirtualCenter.InstanceName" + and setting.value != runtime_server_name + ): changed = True changed_list.append("Server name") - result['runtime_server_name_previous'] = setting.value + result["runtime_server_name_previous"] = setting.value change_option_list.append( - vim.option.OptionValue(key='VirtualCenter.InstanceName', value=runtime_server_name) + vim.option.OptionValue( + key="VirtualCenter.InstanceName", + value=runtime_server_name, + ) ) # User directory - if setting.key == 'ads.timeout' and setting.value != directory_timeout: + if ( + setting.key == "ads.timeout" + and setting.value != directory_timeout + ): changed = True changed_list.append("Directory timeout") - result['directory_timeout_previous'] = setting.value + result["directory_timeout_previous"] = setting.value change_option_list.append( - vim.option.OptionValue(key='ads.timeout', value=directory_timeout) + vim.option.OptionValue( + key="ads.timeout", value=directory_timeout + ) ) - if setting.key == 'ads.maxFetchEnabled' and setting.value != directory_query_limit: + if ( + setting.key == "ads.maxFetchEnabled" + and setting.value != directory_query_limit + ): changed = True changed_list.append("Query limit") - result['directory_query_limit_previous'] = setting.value + result["directory_query_limit_previous"] = setting.value change_option_list.append( - vim.option.OptionValue(key='ads.maxFetchEnabled', value=directory_query_limit) + vim.option.OptionValue( + key="ads.maxFetchEnabled", value=directory_query_limit + ) ) - if setting.key == 'ads.maxFetch' and setting.value != directory_query_limit_size: + if ( + setting.key == "ads.maxFetch" + and setting.value != directory_query_limit_size + ): changed = True changed_list.append("Query limit size") - result['directory_query_limit_size_previous'] = setting.value + result["directory_query_limit_size_previous"] = setting.value change_option_list.append( - vim.option.OptionValue(key='ads.maxFetch', value=directory_query_limit_size) + vim.option.OptionValue( + key="ads.maxFetch", value=directory_query_limit_size + ) ) - if setting.key == 'ads.checkIntervalEnabled' and setting.value != directory_validation: + if ( + setting.key == "ads.checkIntervalEnabled" + and setting.value != directory_validation + ): changed = True changed_list.append("Validation") - result['directory_validation_previous'] = setting.value + result["directory_validation_previous"] = setting.value change_option_list.append( - vim.option.OptionValue(key='ads.checkIntervalEnabled', value=directory_validation) + vim.option.OptionValue( + key="ads.checkIntervalEnabled", + value=directory_validation, + ) ) - if setting.key == 'ads.checkInterval' and setting.value != directory_validation_period: + if ( + setting.key == "ads.checkInterval" + and setting.value != directory_validation_period + ): changed = True changed_list.append("Validation period") - result['directory_validation_period_previous'] = setting.value + result["directory_validation_period_previous"] = setting.value change_option_list.append( - vim.option.OptionValue(key='ads.checkInterval', value=directory_validation_period) + vim.option.OptionValue( + key="ads.checkInterval", + value=directory_validation_period, + ) ) # Mail - if setting.key == 'mail.smtp.server' and setting.value != mail_server: + if ( + setting.key == "mail.smtp.server" + and setting.value != mail_server + ): changed = True changed_list.append("Mail server") - result['mail_server_previous'] = setting.value + result["mail_server_previous"] = setting.value change_option_list.append( - vim.option.OptionValue(key='mail.smtp.server', value=mail_server) + vim.option.OptionValue( + key="mail.smtp.server", value=mail_server + ) ) - if setting.key == 'mail.sender' and setting.value != mail_sender: + if setting.key == "mail.sender" and setting.value != mail_sender: changed = True changed_list.append("Mail sender") - result['mail_sender_previous'] = setting.value + result["mail_sender_previous"] = setting.value change_option_list.append( - vim.option.OptionValue(key='mail.sender', value=mail_sender) + vim.option.OptionValue( + key="mail.sender", value=mail_sender + ) ) # SNMP receivers - SNMP receiver #1 - if setting.key == 'snmp.receiver.1.enabled' and setting.value != snmp_receiver_1_enabled: + if ( + setting.key == "snmp.receiver.1.enabled" + and setting.value != snmp_receiver_1_enabled + ): changed = True changed_list.append("SNMP-1-enabled") - result['snmp_1_enabled_previous'] = setting.value + result["snmp_1_enabled_previous"] = setting.value change_option_list.append( - vim.option.OptionValue(key='snmp.receiver.1.enabled', value=snmp_receiver_1_enabled) + vim.option.OptionValue( + key="snmp.receiver.1.enabled", + value=snmp_receiver_1_enabled, + ) ) - if setting.key == 'snmp.receiver.1.name' and setting.value != snmp_receiver_1_url: + if ( + setting.key == "snmp.receiver.1.name" + and setting.value != snmp_receiver_1_url + ): changed = True changed_list.append("SNMP-1-name") - result['snmp_1_url_previous'] = setting.value + result["snmp_1_url_previous"] = setting.value change_option_list.append( - vim.option.OptionValue(key='snmp.receiver.1.name', value=snmp_receiver_1_url) + vim.option.OptionValue( + key="snmp.receiver.1.name", value=snmp_receiver_1_url + ) ) - if setting.key == 'snmp.receiver.1.port' and setting.value != snmp_receiver_1_port: + if ( + setting.key == "snmp.receiver.1.port" + and setting.value != snmp_receiver_1_port + ): changed = True changed_list.append("SNMP-1-port") - result['snmp_receiver_1_port_previous'] = setting.value + result["snmp_receiver_1_port_previous"] = setting.value change_option_list.append( - vim.option.OptionValue(key='snmp.receiver.1.port', value=snmp_receiver_1_port) + vim.option.OptionValue( + key="snmp.receiver.1.port", value=snmp_receiver_1_port + ) ) - if setting.key == 'snmp.receiver.1.community' and setting.value != snmp_receiver_1_community: + if ( + setting.key == "snmp.receiver.1.community" + and setting.value != snmp_receiver_1_community + ): changed = True changed_list.append("SNMP-1-community") - result['snmp_1_community_previous'] = setting.value + result["snmp_1_community_previous"] = setting.value change_option_list.append( - vim.option.OptionValue(key='snmp.receiver.1.community', value=snmp_receiver_1_community) + vim.option.OptionValue( + key="snmp.receiver.1.community", + value=snmp_receiver_1_community, + ) ) # SNMP receivers - SNMP receiver #2 - if setting.key == 'snmp.receiver.2.enabled' and setting.value != snmp_receiver_2_enabled: + if ( + setting.key == "snmp.receiver.2.enabled" + and setting.value != snmp_receiver_2_enabled + ): changed = True changed_list.append("SNMP-2-enabled") - result['snmp_2_enabled_previous'] = setting.value + result["snmp_2_enabled_previous"] = setting.value change_option_list.append( - vim.option.OptionValue(key='snmp.receiver.2.enabled', value=snmp_receiver_2_enabled) + vim.option.OptionValue( + key="snmp.receiver.2.enabled", + value=snmp_receiver_2_enabled, + ) ) - if setting.key == 'snmp.receiver.2.name' and setting.value != snmp_receiver_2_url: + if ( + setting.key == "snmp.receiver.2.name" + and setting.value != snmp_receiver_2_url + ): changed = True changed_list.append("SNMP-2-name") - result['snmp_2_url_previous'] = setting.value + result["snmp_2_url_previous"] = setting.value change_option_list.append( - vim.option.OptionValue(key='snmp.receiver.2.name', value=snmp_receiver_2_url) + vim.option.OptionValue( + key="snmp.receiver.2.name", value=snmp_receiver_2_url + ) ) - if setting.key == 'snmp.receiver.2.port' and setting.value != snmp_receiver_2_port: + if ( + setting.key == "snmp.receiver.2.port" + and setting.value != snmp_receiver_2_port + ): changed = True changed_list.append("SNMP-2-port") - result['snmp_receiver_2_port_previous'] = setting.value + result["snmp_receiver_2_port_previous"] = setting.value change_option_list.append( - vim.option.OptionValue(key='snmp.receiver.2.port', value=snmp_receiver_2_port) + vim.option.OptionValue( + key="snmp.receiver.2.port", value=snmp_receiver_2_port + ) ) - if setting.key == 'snmp.receiver.2.community' and setting.value != snmp_receiver_2_community: + if ( + setting.key == "snmp.receiver.2.community" + and setting.value != snmp_receiver_2_community + ): changed = True changed_list.append("SNMP-2-community") - result['snmp_2_community_previous'] = setting.value + result["snmp_2_community_previous"] = setting.value change_option_list.append( - vim.option.OptionValue(key='snmp.receiver.2.community', value=snmp_receiver_2_community) + vim.option.OptionValue( + key="snmp.receiver.2.community", + value=snmp_receiver_2_community, + ) ) # SNMP receivers - SNMP receiver #3 - if setting.key == 'snmp.receiver.3.enabled' and setting.value != snmp_receiver_3_enabled: + if ( + setting.key == "snmp.receiver.3.enabled" + and setting.value != snmp_receiver_3_enabled + ): changed = True changed_list.append("SNMP-3-enabled") - result['snmp_3_enabled_previous'] = setting.value + result["snmp_3_enabled_previous"] = setting.value change_option_list.append( - vim.option.OptionValue(key='snmp.receiver.3.enabled', value=snmp_receiver_3_enabled) + vim.option.OptionValue( + key="snmp.receiver.3.enabled", + value=snmp_receiver_3_enabled, + ) ) - if setting.key == 'snmp.receiver.3.name' and setting.value != snmp_receiver_3_url: + if ( + setting.key == "snmp.receiver.3.name" + and setting.value != snmp_receiver_3_url + ): changed = True changed_list.append("SNMP-3-name") - result['snmp_3_url_previous'] = setting.value + result["snmp_3_url_previous"] = setting.value change_option_list.append( - vim.option.OptionValue(key='snmp.receiver.3.name', value=snmp_receiver_3_url) + vim.option.OptionValue( + key="snmp.receiver.3.name", value=snmp_receiver_3_url + ) ) - if setting.key == 'snmp.receiver.3.port' and setting.value != snmp_receiver_3_port: + if ( + setting.key == "snmp.receiver.3.port" + and setting.value != snmp_receiver_3_port + ): changed = True changed_list.append("SNMP-3-port") - result['snmp_receiver_3_port_previous'] = setting.value + result["snmp_receiver_3_port_previous"] = setting.value change_option_list.append( - vim.option.OptionValue(key='snmp.receiver.3.port', value=snmp_receiver_3_port) + vim.option.OptionValue( + key="snmp.receiver.3.port", value=snmp_receiver_3_port + ) ) - if setting.key == 'snmp.receiver.3.community' and setting.value != snmp_receiver_3_community: + if ( + setting.key == "snmp.receiver.3.community" + and setting.value != snmp_receiver_3_community + ): changed = True changed_list.append("SNMP-3-community") - result['snmp_3_community_previous'] = setting.value + result["snmp_3_community_previous"] = setting.value change_option_list.append( - vim.option.OptionValue(key='snmp.receiver.3.community', value=snmp_receiver_3_community) + vim.option.OptionValue( + key="snmp.receiver.3.community", + value=snmp_receiver_3_community, + ) ) # SNMP receivers - SNMP receiver #4 - if setting.key == 'snmp.receiver.4.enabled' and setting.value != snmp_receiver_4_enabled: + if ( + setting.key == "snmp.receiver.4.enabled" + and setting.value != snmp_receiver_4_enabled + ): changed = True changed_list.append("SNMP-4-enabled") - result['snmp_4_enabled_previous'] = setting.value + result["snmp_4_enabled_previous"] = setting.value change_option_list.append( - vim.option.OptionValue(key='snmp.receiver.4.enabled', value=snmp_receiver_4_enabled) + vim.option.OptionValue( + key="snmp.receiver.4.enabled", + value=snmp_receiver_4_enabled, + ) ) - if setting.key == 'snmp.receiver.4.name' and setting.value != snmp_receiver_4_url: + if ( + setting.key == "snmp.receiver.4.name" + and setting.value != snmp_receiver_4_url + ): changed = True changed_list.append("SNMP-4-name") - result['snmp_4_url_previous'] = setting.value + result["snmp_4_url_previous"] = setting.value change_option_list.append( - vim.option.OptionValue(key='snmp.receiver.4.name', value=snmp_receiver_4_url) + vim.option.OptionValue( + key="snmp.receiver.4.name", value=snmp_receiver_4_url + ) ) - if setting.key == 'snmp.receiver.4.port' and setting.value != snmp_receiver_4_port: + if ( + setting.key == "snmp.receiver.4.port" + and setting.value != snmp_receiver_4_port + ): changed = True changed_list.append("SNMP-4-port") - result['snmp_receiver_4_port_previous'] = setting.value + result["snmp_receiver_4_port_previous"] = setting.value change_option_list.append( - vim.option.OptionValue(key='snmp.receiver.4.port', value=snmp_receiver_4_port) + vim.option.OptionValue( + key="snmp.receiver.4.port", value=snmp_receiver_4_port + ) ) - if setting.key == 'snmp.receiver.4.community' and setting.value != snmp_receiver_4_community: + if ( + setting.key == "snmp.receiver.4.community" + and setting.value != snmp_receiver_4_community + ): changed = True changed_list.append("SNMP-4-community") - result['snmp_4_community_previous'] = setting.value + result["snmp_4_community_previous"] = setting.value change_option_list.append( - vim.option.OptionValue(key='snmp.receiver.4.community', value=snmp_receiver_4_community) + vim.option.OptionValue( + key="snmp.receiver.4.community", + value=snmp_receiver_4_community, + ) ) # Timeout settings - if setting.key == 'client.timeout.normal' and setting.value != timeout_normal_operations: + if ( + setting.key == "client.timeout.normal" + and setting.value != timeout_normal_operations + ): changed = True changed_list.append("Timeout normal") - result['timeout_normal_operations_previous'] = setting.value + result["timeout_normal_operations_previous"] = setting.value change_option_list.append( - vim.option.OptionValue(key='client.timeout.normal', value=timeout_normal_operations) + vim.option.OptionValue( + key="client.timeout.normal", + value=timeout_normal_operations, + ) ) - if setting.key == 'client.timeout.long' and setting.value != timeout_long_operations: + if ( + setting.key == "client.timeout.long" + and setting.value != timeout_long_operations + ): changed = True changed_list.append("Timout long") - result['timeout_long_operations_previous'] = setting.value + result["timeout_long_operations_previous"] = setting.value change_option_list.append( - vim.option.OptionValue(key='client.timeout.long', value=timeout_long_operations) + vim.option.OptionValue( + key="client.timeout.long", + value=timeout_long_operations, + ) ) # Logging settings - if setting.key == 'log.level' and setting.value != logging_options: + if setting.key == "log.level" and setting.value != logging_options: changed = True changed_list.append("Logging") - result['logging_options_previous'] = setting.value + result["logging_options_previous"] = setting.value change_option_list.append( - vim.option.OptionValue(key='log.level', value=logging_options) + vim.option.OptionValue( + key="log.level", value=logging_options + ) ) if changed: if self.module.check_mode: - changed_suffix = ' would be changed' + changed_suffix = " would be changed" else: - changed_suffix = ' changed' + changed_suffix = " changed" if len(changed_list) > 2: - message = ', '.join(changed_list[:-1]) + ', and ' + str(changed_list[-1]) + message = ( + ", ".join(changed_list[:-1]) + + ", and " + + str(changed_list[-1]) + ) elif len(changed_list) == 2: - message = ' and '.join(changed_list) + message = " and ".join(changed_list) elif len(changed_list) == 1: message = changed_list[0] message += changed_suffix if not self.module.check_mode: try: - option_manager.UpdateOptions(changedValue=change_option_list) - except (vmodl.fault.SystemError, vmodl.fault.InvalidArgument) as invalid_argument: + option_manager.UpdateOptions( + changedValue=change_option_list + ) + except ( + vmodl.fault.SystemError, + vmodl.fault.InvalidArgument, + ) as invalid_argument: self.module.fail_json( - msg="Failed to update option(s) as one or more OptionValue contains an invalid value: %s" % - to_native(invalid_argument.msg) + msg="Failed to update option(s) as one or more OptionValue contains an invalid value: %s" + % to_native(invalid_argument.msg) ) except vim.fault.InvalidName as invalid_name: self.module.fail_json( msg="Failed to update option(s) as one or more OptionValue objects refers to a " - "non-existent option : %s" % to_native(invalid_name.msg) + "non-existent option : %s" + % to_native(invalid_name.msg) ) else: message = "vCenter settings already configured properly" - result['changed'] = changed - result['msg'] = message + result["changed"] = changed + result["msg"] = message self.module.exit_json(**result) @@ -579,13 +820,13 @@ def main(): argument_spec = vmware_argument_spec() argument_spec.update( database=dict( - type='dict', + type="dict", options=dict( - max_connections=dict(type='int', default=50), - task_cleanup=dict(type='bool', default=True), - task_retention=dict(type='int', default=30), - event_cleanup=dict(type='bool', default=True), - event_retention=dict(type='int', default=30), + max_connections=dict(type="int", default=50), + task_cleanup=dict(type="bool", default=True), + task_retention=dict(type="int", default=30), + event_cleanup=dict(type="bool", default=True), + event_retention=dict(type="int", default=30), ), default=dict( max_connections=50, @@ -596,21 +837,21 @@ def main(): ), ), runtime_settings=dict( - type='dict', + type="dict", options=dict( - unique_id=dict(type='int'), - managed_address=dict(type='str'), - vcenter_server_name=dict(type='str'), + unique_id=dict(type="int"), + managed_address=dict(type="str"), + vcenter_server_name=dict(type="str"), ), ), user_directory=dict( - type='dict', + type="dict", options=dict( - timeout=dict(type='int', default=60), - query_limit=dict(type='bool', default=True), - query_limit_size=dict(type='int', default=5000), - validation=dict(type='bool', default=True), - validation_period=dict(type='int', default=1440), + timeout=dict(type="int", default=60), + query_limit=dict(type="bool", default=True), + query_limit_size=dict(type="int", default=5000), + validation=dict(type="bool", default=True), + validation_period=dict(type="int", default=1440), ), default=dict( timeout=60, @@ -621,77 +862,70 @@ def main(): ), ), mail=dict( - type='dict', - options=dict( - server=dict(type='str'), - sender=dict(type='str'), - ), - default=dict( - server='', - sender='', - ), + type="dict", + options=dict(server=dict(type="str"), sender=dict(type="str")), + default=dict(server="", sender=""), ), snmp_receivers=dict( - type='dict', + type="dict", options=dict( - snmp_receiver_1_url=dict(type='str', default='localhost'), - snmp_receiver_1_enabled=dict(type='bool', default=True), - snmp_receiver_1_port=dict(type='int', default=162), - snmp_receiver_1_community=dict(type='str', default='public'), - snmp_receiver_2_url=dict(type='str', default=''), - snmp_receiver_2_enabled=dict(type='bool', default=False), - snmp_receiver_2_port=dict(type='int', default=162), - snmp_receiver_2_community=dict(type='str', default=''), - snmp_receiver_3_url=dict(type='str', default=''), - snmp_receiver_3_enabled=dict(type='bool', default=False), - snmp_receiver_3_port=dict(type='int', default=162), - snmp_receiver_3_community=dict(type='str', default=''), - snmp_receiver_4_url=dict(type='str', default=''), - snmp_receiver_4_enabled=dict(type='bool', default=False), - snmp_receiver_4_port=dict(type='int', default=162), - snmp_receiver_4_community=dict(type='str', default=''), + snmp_receiver_1_url=dict(type="str", default="localhost"), + snmp_receiver_1_enabled=dict(type="bool", default=True), + snmp_receiver_1_port=dict(type="int", default=162), + snmp_receiver_1_community=dict(type="str", default="public"), + snmp_receiver_2_url=dict(type="str", default=""), + snmp_receiver_2_enabled=dict(type="bool", default=False), + snmp_receiver_2_port=dict(type="int", default=162), + snmp_receiver_2_community=dict(type="str", default=""), + snmp_receiver_3_url=dict(type="str", default=""), + snmp_receiver_3_enabled=dict(type="bool", default=False), + snmp_receiver_3_port=dict(type="int", default=162), + snmp_receiver_3_community=dict(type="str", default=""), + snmp_receiver_4_url=dict(type="str", default=""), + snmp_receiver_4_enabled=dict(type="bool", default=False), + snmp_receiver_4_port=dict(type="int", default=162), + snmp_receiver_4_community=dict(type="str", default=""), ), default=dict( - snmp_receiver_1_url='localhost', + snmp_receiver_1_url="localhost", snmp_receiver_1_enabled=True, snmp_receiver_1_port=162, - snmp_receiver_1_community='public', - snmp_receiver_2_url='', + snmp_receiver_1_community="public", + snmp_receiver_2_url="", snmp_receiver_2_enabled=False, snmp_receiver_2_port=162, - snmp_receiver_2_community='', - snmp_receiver_3_url='', + snmp_receiver_2_community="", + snmp_receiver_3_url="", snmp_receiver_3_enabled=False, snmp_receiver_3_port=162, - snmp_receiver_3_community='', - snmp_receiver_4_url='', + snmp_receiver_3_community="", + snmp_receiver_4_url="", snmp_receiver_4_enabled=False, snmp_receiver_4_port=162, - snmp_receiver_4_community='', + snmp_receiver_4_community="", ), ), timeout_settings=dict( - type='dict', + type="dict", options=dict( - normal_operations=dict(type='int', default=30), - long_operations=dict(type='int', default=120), - ), - default=dict( - normal_operations=30, - long_operations=120, + normal_operations=dict(type="int", default=30), + long_operations=dict(type="int", default=120), ), + default=dict(normal_operations=30, long_operations=120), + ), + logging_options=dict( + default="info", + choices=["none", "error", "warning", "info", "verbose", "trivia"], ), - logging_options=dict(default='info', choices=['none', 'error', 'warning', 'info', 'verbose', 'trivia']), ) module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True + argument_spec=argument_spec, supports_check_mode=True ) host_snmp = VmwareVcenterSettings(module) host_snmp.ensure() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_vcenter_statistics.py b/plugins/modules/vmware_vcenter_statistics.py index 61cb691..6bdd1aa 100644 --- a/plugins/modules/vmware_vcenter_statistics.py +++ b/plugins/modules/vmware_vcenter_statistics.py @@ -6,16 +6,17 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_vcenter_statistics short_description: Configures statistics on a vCenter server @@ -69,9 +70,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Configure vCenter statistics vmware_vcenter_statistics: hostname: '{{ vcenter_hostname }}' @@ -94,9 +95,9 @@ level: 1 validate_certs: no delegate_to: localhost -''' +""" -RETURN = r''' +RETURN = r""" results: description: metadata about vCenter statistics settings returned: always @@ -121,7 +122,7 @@ "past_year_level": 1, "past_year_save_for": 1 } -''' +""" try: from pyVmomi import vim, vmodl @@ -129,7 +130,10 @@ pass from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + vmware_argument_spec, +) from ansible.module_utils._text import to_native @@ -143,8 +147,11 @@ def __init__(self, old, new): self.new = new def __eq__(self, other): - return ((self.key, self.new.enabled, self.new.level) == - (other.key, other.new.enabled, other.new.level)) + return (self.key, self.new.enabled, self.new.level) == ( + other.key, + other.new.enabled, + other.new.level, + ) def __gt__(self, other): if self.key < other.key: @@ -177,7 +184,9 @@ def __init__(self, module): super(VmwareVcenterStatistics, self).__init__(module) if not self.is_vcenter(): - self.module.fail_json(msg="You have to connect to a vCenter server!") + self.module.fail_json( + msg="You have to connect to a vCenter server!" + ) def ensure(self): """Manage statistics for a vCenter server""" @@ -187,86 +196,150 @@ def ensure(self): MONTHLY_COUNTER = 3 YEARLY_COUNTER = 4 - result = dict(changed=False, msg='') - past_day_enabled = self.params['interval_past_day'].get('enabled', True) - past_day_seconds = self.params['interval_past_day'].get('interval_minutes', 5) * 60 - past_day_save_for_seconds = self.params['interval_past_day'].get('save_for_days', 1) * 86400 - past_day_level = self.params['interval_past_day'].get('level', 1) - past_week_enabled = self.params['interval_past_week'].get('enabled', True) - past_week_seconds = self.params['interval_past_week'].get('interval_minutes', 30) * 60 - past_week_save_for_seconds = self.params['interval_past_week'].get('save_for_weeks', 1) * 604800 - past_week_level = self.params['interval_past_week'].get('level', 1) - past_month_enabled = self.params['interval_past_month'].get('enabled', True) - past_month_seconds = self.params['interval_past_month'].get('interval_hours', 2) * 3600 - past_month_save_for_seconds = self.params['interval_past_month'].get('save_for_months', 1) * 2592000 - past_month_level = self.params['interval_past_month'].get('level', 1) - past_year_enabled = self.params['interval_past_year'].get('enabled', True) - past_year_seconds = self.params['interval_past_year'].get('interval_days', 1) * 86400 - past_year_save_for_seconds = self.params['interval_past_year'].get('save_for_years', 1) * 31536000 - past_year_level = self.params['interval_past_year'].get('level', 1) + result = dict(changed=False, msg="") + past_day_enabled = self.params["interval_past_day"].get( + "enabled", True + ) + past_day_seconds = ( + self.params["interval_past_day"].get("interval_minutes", 5) * 60 + ) + past_day_save_for_seconds = ( + self.params["interval_past_day"].get("save_for_days", 1) * 86400 + ) + past_day_level = self.params["interval_past_day"].get("level", 1) + past_week_enabled = self.params["interval_past_week"].get( + "enabled", True + ) + past_week_seconds = ( + self.params["interval_past_week"].get("interval_minutes", 30) * 60 + ) + past_week_save_for_seconds = ( + self.params["interval_past_week"].get("save_for_weeks", 1) * 604800 + ) + past_week_level = self.params["interval_past_week"].get("level", 1) + past_month_enabled = self.params["interval_past_month"].get( + "enabled", True + ) + past_month_seconds = ( + self.params["interval_past_month"].get("interval_hours", 2) * 3600 + ) + past_month_save_for_seconds = ( + self.params["interval_past_month"].get("save_for_months", 1) + * 2592000 + ) + past_month_level = self.params["interval_past_month"].get("level", 1) + past_year_enabled = self.params["interval_past_year"].get( + "enabled", True + ) + past_year_seconds = ( + self.params["interval_past_year"].get("interval_days", 1) * 86400 + ) + past_year_save_for_seconds = ( + self.params["interval_past_year"].get("save_for_years", 1) + * 31536000 + ) + past_year_level = self.params["interval_past_year"].get("level", 1) # Check if level options are valid if past_year_level > past_month_level: - self.module.fail_json(msg="The statistics level for past year can't be higher than past month!") + self.module.fail_json( + msg="The statistics level for past year can't be higher than past month!" + ) if past_month_level > past_week_level: - self.module.fail_json(msg="The statistics level for past month can't be higher than past week!") + self.module.fail_json( + msg="The statistics level for past month can't be higher than past week!" + ) if past_week_level > past_day_level: - self.module.fail_json(msg="The statistics level for past week can't be higher than past day!") + self.module.fail_json( + msg="The statistics level for past week can't be higher than past day!" + ) # Check if state options are valid - if not past_day_enabled and (past_week_enabled or past_month_enabled or past_year_enabled): - self.module.fail_json(msg="The intervals past week, month, and year need to be disabled as well!") + if not past_day_enabled and ( + past_week_enabled or past_month_enabled or past_year_enabled + ): + self.module.fail_json( + msg="The intervals past week, month, and year need to be disabled as well!" + ) if not past_week_enabled and (past_month_enabled or past_year_enabled): - self.module.fail_json(msg="The intervals past month, and year need to be disabled as well!") + self.module.fail_json( + msg="The intervals past month, and year need to be disabled as well!" + ) if not past_month_enabled and past_year_enabled: - self.module.fail_json(msg="The interval past year need to be disabled as well!") - if past_year_enabled and (not past_day_enabled or not past_week_enabled or not past_month_enabled): - self.module.fail_json(msg="The intervals past day, week, and month need to be enabled as well!") - if past_month_enabled and (not past_day_enabled or not past_week_enabled): - self.module.fail_json(msg="The intervals past day, and week need to be enabled as well!") + self.module.fail_json( + msg="The interval past year need to be disabled as well!" + ) + if past_year_enabled and ( + not past_day_enabled + or not past_week_enabled + or not past_month_enabled + ): + self.module.fail_json( + msg="The intervals past day, week, and month need to be enabled as well!" + ) + if past_month_enabled and ( + not past_day_enabled or not past_week_enabled + ): + self.module.fail_json( + msg="The intervals past day, and week need to be enabled as well!" + ) if past_week_enabled and (not past_day_enabled): - self.module.fail_json(msg="The intervals past day need to be enabled as well!") + self.module.fail_json( + msg="The intervals past day need to be enabled as well!" + ) changed = False changed_list = [] # Check statistics - result['past_day_enabled'] = past_day_enabled - result['past_day_interval'] = int(past_day_seconds / 60) - result['past_day_save_for'] = int(past_day_save_for_seconds / 86400) - result['past_day_level'] = past_day_level - result['past_week_enabled'] = past_week_enabled - result['past_week_interval'] = int(past_week_seconds / 60) - result['past_week_save_for'] = int(past_week_save_for_seconds / 604800) - result['past_week_level'] = past_week_level - result['past_month_enabled'] = past_month_enabled - result['past_month_interval'] = int(past_month_seconds / 3600) - result['past_month_save_for'] = int(past_month_save_for_seconds / 2592000) - result['past_month_level'] = past_month_level - result['past_year_enabled'] = past_year_enabled - result['past_year_interval'] = int(past_year_seconds / 86400) - result['past_year_save_for'] = int(past_year_save_for_seconds / 31536000) - result['past_year_level'] = past_year_level + result["past_day_enabled"] = past_day_enabled + result["past_day_interval"] = int(past_day_seconds / 60) + result["past_day_save_for"] = int(past_day_save_for_seconds / 86400) + result["past_day_level"] = past_day_level + result["past_week_enabled"] = past_week_enabled + result["past_week_interval"] = int(past_week_seconds / 60) + result["past_week_save_for"] = int(past_week_save_for_seconds / 604800) + result["past_week_level"] = past_week_level + result["past_month_enabled"] = past_month_enabled + result["past_month_interval"] = int(past_month_seconds / 3600) + result["past_month_save_for"] = int( + past_month_save_for_seconds / 2592000 + ) + result["past_month_level"] = past_month_level + result["past_year_enabled"] = past_year_enabled + result["past_year_interval"] = int(past_year_seconds / 86400) + result["past_year_save_for"] = int( + past_year_save_for_seconds / 31536000 + ) + result["past_year_level"] = past_year_level change_statistics_list = [] perf_manager = self.content.perfManager for historical_interval in perf_manager.historicalInterval: # Statistics for past day - if historical_interval.name == 'Past day' and ( - historical_interval.samplingPeriod != past_day_seconds - or historical_interval.length != past_day_save_for_seconds - or historical_interval.level != past_day_level - or historical_interval.enabled != past_day_enabled + if historical_interval.name == "Past day" and ( + historical_interval.samplingPeriod != past_day_seconds + or historical_interval.length != past_day_save_for_seconds + or historical_interval.level != past_day_level + or historical_interval.enabled != past_day_enabled ): changed = True changed_list.append("Past day interval") if historical_interval.enabled != past_day_enabled: - result['past_day_enabled_previous'] = historical_interval.enabled + result[ + "past_day_enabled_previous" + ] = historical_interval.enabled if historical_interval.samplingPeriod != past_day_seconds: - result['past_day_interval_previous'] = int(historical_interval.samplingPeriod / 60) + result["past_day_interval_previous"] = int( + historical_interval.samplingPeriod / 60 + ) if historical_interval.length != past_day_save_for_seconds: - result['past_day_save_for_previous'] = int(historical_interval.length / 86400) + result["past_day_save_for_previous"] = int( + historical_interval.length / 86400 + ) if historical_interval.level != past_day_level: - result['past_day_level_previous'] = historical_interval.level + result[ + "past_day_level_previous" + ] = historical_interval.level change_statistics_list.append( ChangeHelper( @@ -274,31 +347,39 @@ def ensure(self): vim.HistoricalInterval( key=DAILY_COUNTER, samplingPeriod=past_day_seconds, - name='Past day', + name="Past day", length=past_day_save_for_seconds, level=past_day_level, - enabled=past_day_enabled - ) + enabled=past_day_enabled, + ), ) ) # Statistics for past week - if historical_interval.name == 'Past week' and ( - historical_interval.samplingPeriod != past_week_seconds - or historical_interval.length != past_week_save_for_seconds - or historical_interval.level != past_week_level - or historical_interval.enabled != past_week_enabled + if historical_interval.name == "Past week" and ( + historical_interval.samplingPeriod != past_week_seconds + or historical_interval.length != past_week_save_for_seconds + or historical_interval.level != past_week_level + or historical_interval.enabled != past_week_enabled ): changed = True changed_list.append("Past week interval") if historical_interval.enabled != past_week_enabled: - result['past_week_enabled_previous'] = historical_interval.enabled + result[ + "past_week_enabled_previous" + ] = historical_interval.enabled if historical_interval.samplingPeriod != past_week_seconds: - result['past_week_interval_previous'] = int(historical_interval.samplingPeriod / 60) + result["past_week_interval_previous"] = int( + historical_interval.samplingPeriod / 60 + ) if historical_interval.length != past_week_save_for_seconds: - result['past_week_save_for_previous'] = int(historical_interval.length / 604800) + result["past_week_save_for_previous"] = int( + historical_interval.length / 604800 + ) if historical_interval.level != past_week_level: - result['past_week_level_previous'] = historical_interval.level + result[ + "past_week_level_previous" + ] = historical_interval.level change_statistics_list.append( ChangeHelper( @@ -306,31 +387,39 @@ def ensure(self): vim.HistoricalInterval( key=WEEKLY_COUNTER, samplingPeriod=past_week_seconds, - name='Past week', + name="Past week", length=past_week_save_for_seconds, level=past_week_level, - enabled=past_week_enabled - ) + enabled=past_week_enabled, + ), ) ) # Statistics for past month - if historical_interval.name == 'Past month' and ( - historical_interval.samplingPeriod != past_month_seconds - or historical_interval.length != past_month_save_for_seconds - or historical_interval.level != past_month_level - or historical_interval.enabled != past_month_enabled + if historical_interval.name == "Past month" and ( + historical_interval.samplingPeriod != past_month_seconds + or historical_interval.length != past_month_save_for_seconds + or historical_interval.level != past_month_level + or historical_interval.enabled != past_month_enabled ): changed = True changed_list.append("Past month interval") if historical_interval.enabled != past_month_enabled: - result['past_month_enabled_previous'] = historical_interval.enabled + result[ + "past_month_enabled_previous" + ] = historical_interval.enabled if historical_interval.samplingPeriod != past_month_seconds: - result['past_month_interval_previous'] = int(historical_interval.samplingPeriod / 3600) + result["past_month_interval_previous"] = int( + historical_interval.samplingPeriod / 3600 + ) if historical_interval.length != past_month_save_for_seconds: - result['past_month_save_for_previous'] = int(historical_interval.length / 2592000) + result["past_month_save_for_previous"] = int( + historical_interval.length / 2592000 + ) if historical_interval.level != past_month_level: - result['past_month_level_previous'] = historical_interval.level + result[ + "past_month_level_previous" + ] = historical_interval.level change_statistics_list.append( ChangeHelper( @@ -338,31 +427,39 @@ def ensure(self): vim.HistoricalInterval( key=MONTHLY_COUNTER, samplingPeriod=past_month_seconds, - name='Past month', + name="Past month", length=past_month_save_for_seconds, level=past_month_level, - enabled=past_month_enabled - ) + enabled=past_month_enabled, + ), ) ) # Statistics for past year - if historical_interval.name == 'Past year' and ( - historical_interval.samplingPeriod != past_year_seconds - or historical_interval.length != past_year_save_for_seconds - or historical_interval.level != past_year_level - or historical_interval.enabled != past_year_enabled + if historical_interval.name == "Past year" and ( + historical_interval.samplingPeriod != past_year_seconds + or historical_interval.length != past_year_save_for_seconds + or historical_interval.level != past_year_level + or historical_interval.enabled != past_year_enabled ): changed = True changed_list.append("Past year interval") if historical_interval.enabled != past_year_enabled: - result['past_year_enabled_previous'] = historical_interval.enabled + result[ + "past_year_enabled_previous" + ] = historical_interval.enabled if historical_interval.samplingPeriod != past_year_seconds: - result['past_year_interval_previous'] = int(historical_interval.samplingPeriod / 86400) + result["past_year_interval_previous"] = int( + historical_interval.samplingPeriod / 86400 + ) if historical_interval.length != past_year_save_for_seconds: - result['past_year_save_for_previous'] = int(historical_interval.length / 31536000) + result["past_year_save_for_previous"] = int( + historical_interval.length / 31536000 + ) if historical_interval.level != past_year_level: - result['past_year_level_previous'] = historical_interval.level + result[ + "past_year_level_previous" + ] = historical_interval.level change_statistics_list.append( ChangeHelper( @@ -370,23 +467,27 @@ def ensure(self): vim.HistoricalInterval( key=YEARLY_COUNTER, samplingPeriod=past_year_seconds, - name='Past year', + name="Past year", length=past_year_save_for_seconds, level=past_year_level, - enabled=past_year_enabled - ) + enabled=past_year_enabled, + ), ) ) if changed: if self.module.check_mode: - changed_suffix = ' would be changed' + changed_suffix = " would be changed" else: - changed_suffix = ' changed' + changed_suffix = " changed" if len(changed_list) > 2: - message = ', '.join(changed_list[:-1]) + ', and ' + str(changed_list[-1]) + message = ( + ", ".join(changed_list[:-1]) + + ", and " + + str(changed_list[-1]) + ) elif len(changed_list) == 2: - message = ' and '.join(changed_list) + message = " and ".join(changed_list) elif len(changed_list) == 1: message = changed_list[0] message += changed_suffix @@ -396,8 +497,8 @@ def ensure(self): self.update_perf_interval(perf_manager, statistic.new) else: message = "vCenter statistics already configured properly" - result['changed'] = changed - result['msg'] = message + result["changed"] = changed + result["msg"] = message self.module.exit_json(**result) @@ -408,7 +509,8 @@ def update_perf_interval(self, perf_manager, statistic): except vmodl.fault.InvalidArgument as invalid_argument: self.module.fail_json( msg="The set of arguments passed to the function is not specified correctly or " - "the update does not conform to the rules: %s" % to_native(invalid_argument.msg) + "the update does not conform to the rules: %s" + % to_native(invalid_argument.msg) ) @@ -417,51 +519,56 @@ def main(): argument_spec = vmware_argument_spec() argument_spec.update( interval_past_day=dict( - type='dict', + type="dict", options=dict( - enabled=dict(type='bool', default=True), - interval_minutes=dict(type='int', choices=[1, 2, 3, 4, 5], default=5), - save_for_days=dict(type='int', choices=[1, 2, 3, 4, 5], default=1), - level=dict(type='int', choices=[1, 2, 3, 4], default=1), + enabled=dict(type="bool", default=True), + interval_minutes=dict( + type="int", choices=[1, 2, 3, 4, 5], default=5 + ), + save_for_days=dict( + type="int", choices=[1, 2, 3, 4, 5], default=1 + ), + level=dict(type="int", choices=[1, 2, 3, 4], default=1), ), ), interval_past_week=dict( - type='dict', + type="dict", options=dict( - enabled=dict(type='bool', default=True), - interval_minutes=dict(type='int', choices=[30], default=30), - save_for_weeks=dict(type='int', choices=[1], default=1), - level=dict(type='int', choices=[1, 2, 3, 4], default=1), + enabled=dict(type="bool", default=True), + interval_minutes=dict(type="int", choices=[30], default=30), + save_for_weeks=dict(type="int", choices=[1], default=1), + level=dict(type="int", choices=[1, 2, 3, 4], default=1), ), ), interval_past_month=dict( - type='dict', + type="dict", options=dict( - enabled=dict(type='bool', default=True), - interval_hours=dict(type='int', choices=[2], default=2), - save_for_months=dict(type='int', choices=[1], default=1), - level=dict(type='int', choices=[1, 2, 3, 4], default=1), + enabled=dict(type="bool", default=True), + interval_hours=dict(type="int", choices=[2], default=2), + save_for_months=dict(type="int", choices=[1], default=1), + level=dict(type="int", choices=[1, 2, 3, 4], default=1), ), ), interval_past_year=dict( - type='dict', + type="dict", options=dict( - enabled=dict(type='bool', default=True), - interval_days=dict(type='int', choices=[1], default=1), - save_for_years=dict(type='int', choices=[1, 2, 3, 4, 5], default=1), - level=dict(type='int', choices=[1, 2, 3, 4], default=1), + enabled=dict(type="bool", default=True), + interval_days=dict(type="int", choices=[1], default=1), + save_for_years=dict( + type="int", choices=[1, 2, 3, 4, 5], default=1 + ), + level=dict(type="int", choices=[1, 2, 3, 4], default=1), ), ), ) module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True + argument_spec=argument_spec, supports_check_mode=True ) host_snmp = VmwareVcenterStatistics(module) host_snmp.ensure() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_vm_host_drs_rule.py b/plugins/modules/vmware_vm_host_drs_rule.py index 4257295..68c479a 100644 --- a/plugins/modules/vmware_vm_host_drs_rule.py +++ b/plugins/modules/vmware_vm_host_drs_rule.py @@ -5,15 +5,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- author: - "Karsten Kaj Jakobsen (@karstenjakobsen)" @@ -89,9 +90,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" --- - name: "Create mandatory DRS Affinity rule for VM/Host" vmware_vm_host_drs_rule: @@ -106,11 +107,11 @@ mandatory: True enabled: True affinity_rule: True -''' +""" -RETURN = r''' +RETURN = r""" -''' +""" try: from pyVmomi import vim @@ -119,8 +120,13 @@ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_native -from ansible_collections.vmware.general.plugins.module_utils.vmware import (PyVmomi, vmware_argument_spec, wait_for_task, find_cluster_by_name, - find_datacenter_by_name) +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + vmware_argument_spec, + wait_for_task, + find_cluster_by_name, + find_datacenter_by_name, +) class VmwareVmHostRuleDrs(PyVmomi): @@ -135,41 +141,51 @@ def __init__(self, module): super(VmwareVmHostRuleDrs, self).__init__(module) - self.__datacenter_name = module.params.get('datacenter', None) + self.__datacenter_name = module.params.get("datacenter", None) self.__datacenter_obj = None - self.__cluster_name = module.params['cluster_name'] + self.__cluster_name = module.params["cluster_name"] self.__cluster_obj = None - self.__vm_group_name = module.params.get('vm_group_name', None) - self.__host_group_name = module.params.get('host_group_name', None) - self.__rule_name = module.params['drs_rule_name'] - self.__enabled = module.params['enabled'] - self.__mandatory = module.params['mandatory'] - self.__affinity_rule = module.params['affinity_rule'] - self.__state = module.params['state'] - self.__msg = 'Nothing to see here...' + self.__vm_group_name = module.params.get("vm_group_name", None) + self.__host_group_name = module.params.get("host_group_name", None) + self.__rule_name = module.params["drs_rule_name"] + self.__enabled = module.params["enabled"] + self.__mandatory = module.params["mandatory"] + self.__affinity_rule = module.params["affinity_rule"] + self.__state = module.params["state"] + self.__msg = "Nothing to see here..." self.__result = dict() self.__changed = False if self.__datacenter_name is not None: - self.__datacenter_obj = find_datacenter_by_name(self.content, self.__datacenter_name) + self.__datacenter_obj = find_datacenter_by_name( + self.content, self.__datacenter_name + ) if self.__datacenter_obj is None and module.check_mode is False: - raise Exception("Datacenter '%s' not found" % self.__datacenter_name) + raise Exception( + "Datacenter '%s' not found" % self.__datacenter_name + ) - self.__cluster_obj = find_cluster_by_name(content=self.content, - cluster_name=self.__cluster_name, - datacenter=self.__datacenter_obj) + self.__cluster_obj = find_cluster_by_name( + content=self.content, + cluster_name=self.__cluster_name, + datacenter=self.__datacenter_obj, + ) # Throw error if cluster does not exist if self.__cluster_obj is None and module.check_mode is False: raise Exception("Cluster '%s' not found" % self.__cluster_name) # Dont populate lists if we are deleting group - if self.__state == 'present': + if self.__state == "present": # Get list of vm groups only if state is present - self.__vm_group_obj = self.__get_group_by_name(group_name=self.__vm_group_name) - self.__host_group_obj = self.__get_group_by_name(group_name=self.__host_group_name, host_group=True) + self.__vm_group_obj = self.__get_group_by_name( + group_name=self.__vm_group_name + ) + self.__host_group_obj = self.__get_group_by_name( + group_name=self.__host_group_name, host_group=True + ) def get_msg(self): """ @@ -198,7 +214,9 @@ def get_changed(self): """ return self.__changed - def __get_group_by_name(self, group_name, cluster_obj=None, host_group=False): + def __get_group_by_name( + self, group_name, cluster_obj=None, host_group=False + ): """ Return group Args: @@ -220,7 +238,10 @@ def __get_group_by_name(self, group_name, cluster_obj=None, host_group=False): if group.name == group_name: return group - raise Exception("Failed to find the group %s in given cluster %s" % (group_name, cluster_obj.name)) + raise Exception( + "Failed to find the group %s in given cluster %s" + % (group_name, cluster_obj.name) + ) def __get_rule_key_by_name(self, cluster_obj=None, rule_name=None): """ @@ -240,7 +261,11 @@ def __get_rule_key_by_name(self, cluster_obj=None, rule_name=None): rule_name = self.__rule_name if rule_name: - rules_list = [rule for rule in cluster_obj.configuration.rule if rule.name == rule_name] + rules_list = [ + rule + for rule in cluster_obj.configuration.rule + if rule.name == rule_name + ] if rules_list: return rules_list[0] @@ -263,26 +288,34 @@ def __normalize_vm_host_rule_spec(self, rule_obj, cluster_obj=None): if not all([rule_obj, cluster_obj]): return {} - return dict(rule_key=rule_obj.key, - rule_enabled=rule_obj.enabled, - rule_name=rule_obj.name, - rule_mandatory=rule_obj.mandatory, - rule_uuid=rule_obj.ruleUuid, - rule_vm_group_name=rule_obj.vmGroupName, - rule_affine_host_group_name=rule_obj.affineHostGroupName, - rule_anti_affine_host_group_name=rule_obj.antiAffineHostGroupName, - rule_vms=self.__get_all_from_group(group_name=rule_obj.vmGroupName, - cluster_obj=cluster_obj), - rule_affine_hosts=self.__get_all_from_group(group_name=rule_obj.affineHostGroupName, - cluster_obj=cluster_obj, - host_group=True), - rule_anti_affine_hosts=self.__get_all_from_group(group_name=rule_obj.antiAffineHostGroupName, - cluster_obj=cluster_obj, - host_group=True), - rule_type="vm_host_rule" - ) - - def __get_all_from_group(self, group_name=None, cluster_obj=None, host_group=False): + return dict( + rule_key=rule_obj.key, + rule_enabled=rule_obj.enabled, + rule_name=rule_obj.name, + rule_mandatory=rule_obj.mandatory, + rule_uuid=rule_obj.ruleUuid, + rule_vm_group_name=rule_obj.vmGroupName, + rule_affine_host_group_name=rule_obj.affineHostGroupName, + rule_anti_affine_host_group_name=rule_obj.antiAffineHostGroupName, + rule_vms=self.__get_all_from_group( + group_name=rule_obj.vmGroupName, cluster_obj=cluster_obj + ), + rule_affine_hosts=self.__get_all_from_group( + group_name=rule_obj.affineHostGroupName, + cluster_obj=cluster_obj, + host_group=True, + ), + rule_anti_affine_hosts=self.__get_all_from_group( + group_name=rule_obj.antiAffineHostGroupName, + cluster_obj=cluster_obj, + host_group=True, + ), + rule_type="vm_host_rule", + ) + + def __get_all_from_group( + self, group_name=None, cluster_obj=None, host_group=False + ): """ Return all VM / Host names using given group name Args: @@ -317,14 +350,22 @@ def __check_rule_has_changed(self, rule_obj, cluster_obj=None): if cluster_obj is None: cluster_obj = self.__cluster_obj - existing_rule = self.__normalize_vm_host_rule_spec(rule_obj=rule_obj, cluster_obj=cluster_obj) + existing_rule = self.__normalize_vm_host_rule_spec( + rule_obj=rule_obj, cluster_obj=cluster_obj + ) # Check if anything has changed - if ((existing_rule['rule_enabled'] == self.__enabled) and - (existing_rule['rule_mandatory'] == self.__mandatory) and - (existing_rule['rule_vm_group_name'] == self.__vm_group_name) and - (existing_rule['rule_affine_host_group_name'] == self.__host_group_name or - existing_rule['rule_anti_affine_host_group_name'] == self.__host_group_name)): + if ( + (existing_rule["rule_enabled"] == self.__enabled) + and (existing_rule["rule_mandatory"] == self.__mandatory) + and (existing_rule["rule_vm_group_name"] == self.__vm_group_name) + and ( + existing_rule["rule_affine_host_group_name"] + == self.__host_group_name + or existing_rule["rule_anti_affine_host_group_name"] + == self.__host_group_name + ) + ): return False else: @@ -339,14 +380,16 @@ def create(self): # Check if rule exists if rule_obj: - operation = 'edit' + operation = "edit" rule_changed = self.__check_rule_has_changed(rule_obj) else: - operation = 'add' + operation = "add" # Check if anything has changed when editing - if operation == 'add' or (operation == 'edit' and rule_changed is True): + if operation == "add" or ( + operation == "edit" and rule_changed is True + ): rule = vim.cluster.VmHostRuleInfo() @@ -371,7 +414,9 @@ def create(self): if not self.module.check_mode: - task = self.__cluster_obj.ReconfigureEx(config_spec, modify=True) + task = self.__cluster_obj.ReconfigureEx( + config_spec, modify=True + ) wait_for_task(task) self.__changed = True @@ -379,10 +424,14 @@ def create(self): rule_obj = self.__get_rule_key_by_name(rule_name=self.__rule_name) self.__result = self.__normalize_vm_host_rule_spec(rule_obj) - if operation == 'edit': - self.__msg = "Updated DRS rule `%s` successfully" % (self.__rule_name) + if operation == "edit": + self.__msg = "Updated DRS rule `%s` successfully" % ( + self.__rule_name + ) else: - self.__msg = "Created DRS rule `%s` successfully" % (self.__rule_name) + self.__msg = "Created DRS rule `%s` successfully" % ( + self.__rule_name + ) # Delete def delete(self, rule_name=None): @@ -399,69 +448,87 @@ def delete(self, rule_name=None): if rule_obj is not None: rule_key = int(rule_obj.key) - rule_spec = vim.cluster.RuleSpec(removeKey=rule_key, operation='remove') + rule_spec = vim.cluster.RuleSpec( + removeKey=rule_key, operation="remove" + ) config_spec = vim.cluster.ConfigSpecEx(rulesSpec=[rule_spec]) if not self.module.check_mode: - task = self.__cluster_obj.ReconfigureEx(config_spec, modify=True) + task = self.__cluster_obj.ReconfigureEx( + config_spec, modify=True + ) wait_for_task(task) self.__changed = True if self.__changed: - self.__msg = "Deleted DRS rule `%s` successfully" % (self.__rule_name) + self.__msg = "Deleted DRS rule `%s` successfully" % ( + self.__rule_name + ) else: - self.__msg = "DRS Rule `%s` does not exists or already deleted" % (self.__rule_name) + self.__msg = "DRS Rule `%s` does not exists or already deleted" % ( + self.__rule_name + ) def main(): argument_spec = vmware_argument_spec() - argument_spec.update(dict( - state=dict(type='str', default='present', choices=['absent', 'present']), - vm_group_name=dict(type='str', required=True), - host_group_name=dict(type='str', required=True), - cluster_name=dict(type='str', required=True), - datacenter=dict(type='str', required=False, aliases=['datacenter_name']), - drs_rule_name=dict(type='str', required=True), - enabled=dict(type='bool', default=False), - mandatory=dict(type='bool', default=False), - affinity_rule=dict(type='bool', default=True)) + argument_spec.update( + dict( + state=dict( + type="str", default="present", choices=["absent", "present"] + ), + vm_group_name=dict(type="str", required=True), + host_group_name=dict(type="str", required=True), + cluster_name=dict(type="str", required=True), + datacenter=dict( + type="str", required=False, aliases=["datacenter_name"] + ), + drs_rule_name=dict(type="str", required=True), + enabled=dict(type="bool", default=False), + mandatory=dict(type="bool", default=False), + affinity_rule=dict(type="bool", default=True), + ) ) required_if = [ - ['state', 'present', ['vm_group_name'], ['host_group_name']], + ["state", "present", ["vm_group_name"], ["host_group_name"]] ] - module = AnsibleModule(argument_spec=argument_spec, - required_if=required_if, - supports_check_mode=True) + module = AnsibleModule( + argument_spec=argument_spec, + required_if=required_if, + supports_check_mode=True, + ) try: # Create instance of VmwareDrsGroupManager vm_host_drs = VmwareVmHostRuleDrs(module=module) - if module.params['state'] == 'present': + if module.params["state"] == "present": vm_host_drs.create() - elif module.params['state'] == 'absent': + elif module.params["state"] == "absent": vm_host_drs.delete() # Set results - results = dict(msg=vm_host_drs.get_msg(), - failed=False, - changed=vm_host_drs.get_changed(), - result=vm_host_drs.get_result()) + results = dict( + msg=vm_host_drs.get_msg(), + failed=False, + changed=vm_host_drs.get_changed(), + result=vm_host_drs.get_result(), + ) except Exception as error: results = dict(failed=True, msg="Error: `%s`" % error) - if results['failed']: + if results["failed"]: module.fail_json(**results) else: module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_vm_info.py b/plugins/modules/vmware_vm_info.py index 9a09da9..40f0261 100644 --- a/plugins/modules/vmware_vm_info.py +++ b/plugins/modules/vmware_vm_info.py @@ -7,15 +7,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_vm_info short_description: Return basic info pertaining to a VMware machine guest @@ -70,9 +71,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather all registered virtual machines vmware_vm_info: hostname: '{{ vcenter_hostname }}' @@ -152,9 +153,9 @@ folder: "/Asia-Datacenter1/vm/prod" delegate_to: localhost register: vm_info -''' +""" -RETURN = r''' +RETURN = r""" virtual_machines: description: list of dictionary of virtual machines and their information returned: success @@ -193,7 +194,7 @@ ] } ] -''' +""" try: from pyVmomi import vim @@ -201,8 +202,15 @@ pass from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import PyVmomi, get_all_objs, vmware_argument_spec, _get_vm_prop -from ansible_collections.vmware.general.plugins.module_utils.vmware_rest_client import VmwareRestClient +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + get_all_objs, + vmware_argument_spec, + _get_vm_prop, +) +from ansible_collections.vmware.general.plugins.module_utils.vmware_rest_client import ( + VmwareRestClient, +) class VmwareVmInfo(PyVmomi): @@ -214,22 +222,31 @@ def get_tag_info(self, vm_dynamic_obj): return vmware_client.get_tags_for_vm(vm_mid=vm_dynamic_obj._moId) def get_vm_attributes(self, vm): - return dict((x.name, v.value) for x in self.custom_field_mgr - for v in vm.customValue if x.key == v.key) + return dict( + (x.name, v.value) + for x in self.custom_field_mgr + for v in vm.customValue + if x.key == v.key + ) # https://github.com/vmware/pyvmomi-community-samples/blob/master/samples/getallvms.py def get_all_virtual_machines(self): """ Get all virtual machines and related configurations information """ - folder = self.params.get('folder') + folder = self.params.get("folder") folder_obj = None if folder: folder_obj = self.content.searchIndex.FindByInventoryPath(folder) if not folder_obj: - self.module.fail_json(msg="Failed to find folder specified by %(folder)s" % self.params) - - virtual_machines = get_all_objs(self.content, [vim.VirtualMachine], folder=folder_obj) + self.module.fail_json( + msg="Failed to find folder specified by %(folder)s" + % self.params + ) + + virtual_machines = get_all_objs( + self.content, [vim.VirtualMachine], folder=folder_obj + ) _virtual_machines = [] for vm in virtual_machines: @@ -240,24 +257,24 @@ def get_all_virtual_machines(self): if _ip_address is None: _ip_address = "" _mac_address = [] - all_devices = _get_vm_prop(vm, ('config', 'hardware', 'device')) + all_devices = _get_vm_prop(vm, ("config", "hardware", "device")) if all_devices: for dev in all_devices: if isinstance(dev, vim.vm.device.VirtualEthernetCard): _mac_address.append(dev.macAddress) net_dict = {} - vmnet = _get_vm_prop(vm, ('guest', 'net')) + vmnet = _get_vm_prop(vm, ("guest", "net")) if vmnet: for device in vmnet: net_dict[device.macAddress] = dict() - net_dict[device.macAddress]['ipv4'] = [] - net_dict[device.macAddress]['ipv6'] = [] + net_dict[device.macAddress]["ipv4"] = [] + net_dict[device.macAddress]["ipv6"] = [] for ip_addr in device.ipAddress: if "::" in ip_addr: - net_dict[device.macAddress]['ipv6'].append(ip_addr) + net_dict[device.macAddress]["ipv6"].append(ip_addr) else: - net_dict[device.macAddress]['ipv4'].append(ip_addr) + net_dict[device.macAddress]["ipv4"].append(ip_addr) esxi_hostname = None esxi_parent = None @@ -266,15 +283,17 @@ def get_all_virtual_machines(self): esxi_parent = summary.runtime.host.parent cluster_name = None - if esxi_parent and isinstance(esxi_parent, vim.ClusterComputeResource): + if esxi_parent and isinstance( + esxi_parent, vim.ClusterComputeResource + ): cluster_name = summary.runtime.host.parent.name vm_attributes = dict() - if self.module.params.get('show_attribute'): + if self.module.params.get("show_attribute"): vm_attributes = self.get_vm_attributes(vm) vm_tags = list() - if self.module.params.get('show_tag'): + if self.module.params.get("show_tag"): vm_tags = self.get_tag_info(vm) virtual_machine = { @@ -288,16 +307,16 @@ def get_all_virtual_machines(self): "esxi_hostname": esxi_hostname, "cluster": cluster_name, "attributes": vm_attributes, - "tags": vm_tags + "tags": vm_tags, } - vm_type = self.module.params.get('vm_type') - is_template = _get_vm_prop(vm, ('config', 'template')) - if vm_type == 'vm' and not is_template: + vm_type = self.module.params.get("vm_type") + is_template = _get_vm_prop(vm, ("config", "template")) + if vm_type == "vm" and not is_template: _virtual_machines.append(virtual_machine) - elif vm_type == 'template' and is_template: + elif vm_type == "template" and is_template: _virtual_machines.append(virtual_machine) - elif vm_type == 'all': + elif vm_type == "all": _virtual_machines.append(virtual_machine) return _virtual_machines @@ -305,18 +324,22 @@ def get_all_virtual_machines(self): def main(): argument_spec = vmware_argument_spec() argument_spec.update( - vm_type=dict(type='str', choices=['vm', 'all', 'template'], default='all'), - show_attribute=dict(type='bool', default='no'), - show_tag=dict(type='bool', default=False), - folder=dict(type='str'), + vm_type=dict( + type="str", choices=["vm", "all", "template"], default="all" + ), + show_attribute=dict(type="bool", default="no"), + show_tag=dict(type="bool", default=False), + folder=dict(type="str"), ) module = AnsibleModule( - argument_spec=argument_spec, - supports_check_mode=True + argument_spec=argument_spec, supports_check_mode=True ) - if module._name == 'vmware_vm_facts': - module.deprecate("The 'vmware_vm_facts' module has been renamed to 'vmware_vm_info'", version='2.13') + if module._name == "vmware_vm_facts": + module.deprecate( + "The 'vmware_vm_facts' module has been renamed to 'vmware_vm_info'", + version="2.13", + ) vmware_vm_info = VmwareVmInfo(module) _virtual_machines = vmware_vm_info.get_all_virtual_machines() @@ -324,5 +347,5 @@ def main(): module.exit_json(changed=False, virtual_machines=_virtual_machines) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_vm_shell.py b/plugins/modules/vmware_vm_shell.py index 95782c1..5f17846 100644 --- a/plugins/modules/vmware_vm_shell.py +++ b/plugins/modules/vmware_vm_shell.py @@ -6,15 +6,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_vm_shell short_description: Run commands in a VMware guest operating system @@ -110,9 +111,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Run command inside a virtual machine vmware_vm_shell: hostname: "{{ vcenter_hostname }}" @@ -177,9 +178,9 @@ vm_shell: "/usr/bin/hostnamectl" vm_shell_args: "set-hostname new_hostname > /tmp/$$.txt 2>&1" delegate_to: localhost -''' +""" -RETURN = r''' +RETURN = r""" results: description: metadata about the new process after completion with wait_for_process returned: on success @@ -194,9 +195,10 @@ "start_time": "2018-04-26T05:03:19+00:00", "uuid": "564db1e2-a3ff-3b0e-8b77-49c25570bb66", } -''' +""" import time + try: from pyVmomi import vim, vmodl except ImportError: @@ -204,52 +206,71 @@ from ansible.module_utils._text import to_native from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import (PyVmomi, find_cluster_by_name, - find_datacenter_by_name, find_vm_by_id, - vmware_argument_spec) +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + find_cluster_by_name, + find_datacenter_by_name, + find_vm_by_id, + vmware_argument_spec, +) class VMwareShellManager(PyVmomi): def __init__(self, module): super(VMwareShellManager, self).__init__(module) - datacenter_name = module.params['datacenter'] - cluster_name = module.params['cluster'] - folder = module.params['folder'] + datacenter_name = module.params["datacenter"] + cluster_name = module.params["cluster"] + folder = module.params["folder"] self.pm = self.content.guestOperationsManager.processManager - self.timeout = self.params.get('timeout', 3600) - self.wait_for_pid = self.params.get('wait_for_process', False) + self.timeout = self.params.get("timeout", 3600) + self.wait_for_pid = self.params.get("wait_for_process", False) datacenter = None if datacenter_name: datacenter = find_datacenter_by_name(self.content, datacenter_name) if not datacenter: - module.fail_json(changed=False, msg="Unable to find %(datacenter)s datacenter" % module.params) + module.fail_json( + changed=False, + msg="Unable to find %(datacenter)s datacenter" + % module.params, + ) cluster = None if cluster_name: - cluster = find_cluster_by_name(self.content, cluster_name, datacenter) + cluster = find_cluster_by_name( + self.content, cluster_name, datacenter + ) if not cluster: - module.fail_json(changed=False, msg="Unable to find %(cluster)s cluster" % module.params) + module.fail_json( + changed=False, + msg="Unable to find %(cluster)s cluster" % module.params, + ) - if module.params['vm_id_type'] == 'inventory_path': - vm = find_vm_by_id(self.content, - vm_id=module.params['vm_id'], - vm_id_type="inventory_path", - folder=folder) + if module.params["vm_id_type"] == "inventory_path": + vm = find_vm_by_id( + self.content, + vm_id=module.params["vm_id"], + vm_id_type="inventory_path", + folder=folder, + ) else: - vm = find_vm_by_id(self.content, - vm_id=module.params['vm_id'], - vm_id_type=module.params['vm_id_type'], - datacenter=datacenter, - cluster=cluster) + vm = find_vm_by_id( + self.content, + vm_id=module.params["vm_id"], + vm_id_type=module.params["vm_id_type"], + datacenter=datacenter, + cluster=cluster, + ) if not vm: - module.fail_json(msg='Unable to find virtual machine.') + module.fail_json(msg="Unable to find virtual machine.") tools_status = vm.guest.toolsStatus - if tools_status in ['toolsNotInstalled', 'toolsNotRunning']: - self.module.fail_json(msg="VMwareTools is not installed or is not running in the guest." - " VMware Tools are necessary to run this module.") + if tools_status in ["toolsNotInstalled", "toolsNotRunning"]: + self.module.fail_json( + msg="VMwareTools is not installed or is not running in the guest." + " VMware Tools are necessary to run this module." + ) try: self.execute_command(vm, module.params) @@ -262,48 +283,58 @@ def __init__(self, module): def execute_command(self, vm, params): # https://github.com/vmware/pyvmomi-community-samples/blob/master/samples/execute_program_in_vm.py - vm_username = params['vm_username'] - vm_password = params['vm_password'] - program_path = params['vm_shell'] - args = params['vm_shell_args'] - env = params['vm_shell_env'] - cwd = params['vm_shell_cwd'] - - credentials = vim.vm.guest.NamePasswordAuthentication(username=vm_username, - password=vm_password) - cmd_spec = vim.vm.guest.ProcessManager.ProgramSpec(arguments=args, - envVariables=env, - programPath=program_path, - workingDirectory=cwd) - - res = self.pm.StartProgramInGuest(vm=vm, auth=credentials, spec=cmd_spec) + vm_username = params["vm_username"] + vm_password = params["vm_password"] + program_path = params["vm_shell"] + args = params["vm_shell_args"] + env = params["vm_shell_env"] + cwd = params["vm_shell_cwd"] + + credentials = vim.vm.guest.NamePasswordAuthentication( + username=vm_username, password=vm_password + ) + cmd_spec = vim.vm.guest.ProcessManager.ProgramSpec( + arguments=args, + envVariables=env, + programPath=program_path, + workingDirectory=cwd, + ) + + res = self.pm.StartProgramInGuest( + vm=vm, auth=credentials, spec=cmd_spec + ) if self.wait_for_pid: res_data = self.wait_for_process(vm, res, credentials) - results = dict(uuid=vm.summary.config.uuid, - owner=res_data.owner, - start_time=res_data.startTime.isoformat(), - end_time=res_data.endTime.isoformat(), - exit_code=res_data.exitCode, - name=res_data.name, - cmd_line=res_data.cmdLine) + results = dict( + uuid=vm.summary.config.uuid, + owner=res_data.owner, + start_time=res_data.startTime.isoformat(), + end_time=res_data.endTime.isoformat(), + exit_code=res_data.exitCode, + name=res_data.name, + cmd_line=res_data.cmdLine, + ) if res_data.exitCode != 0: - results['msg'] = "Failed to execute command" - results['changed'] = False - results['failed'] = True + results["msg"] = "Failed to execute command" + results["changed"] = False + results["failed"] = True self.module.fail_json(**results) else: - results['changed'] = True - results['failed'] = False + results["changed"] = True + results["failed"] = False self.module.exit_json(**results) else: - self.module.exit_json(changed=True, uuid=vm.summary.config.uuid, msg=res) + self.module.exit_json( + changed=True, uuid=vm.summary.config.uuid, msg=res + ) def process_exists_in_guest(self, vm, pid, creds): res = self.pm.ListProcessesInGuest(vm, creds, pids=[pid]) if not res: self.module.fail_json( - changed=False, msg='ListProcessesInGuest: None (unexpected)') + changed=False, msg="ListProcessesInGuest: None (unexpected)" + ) res = res[0] if res.exitCode is None: return True, None @@ -314,7 +345,9 @@ def wait_for_process(self, vm, pid, creds): start_time = time.time() while True: current_time = time.time() - process_status, res_data = self.process_exists_in_guest(vm, pid, creds) + process_status, res_data = self.process_exists_in_guest( + vm, pid, creds + ) if not process_status: return res_data elif current_time - start_time >= self.timeout: @@ -324,7 +357,8 @@ def wait_for_process(self, vm, pid, creds): pid=pid, start_time=start_time, current_time=current_time, - timeout=self.timeout) + timeout=self.timeout, + ) else: time.sleep(5) @@ -333,37 +367,40 @@ def main(): argument_spec = vmware_argument_spec() argument_spec.update( dict( - datacenter=dict(type='str'), - cluster=dict(type='str'), - folder=dict(type='str'), - vm_id=dict(type='str', required=True), - vm_id_type=dict(default='vm_name', type='str', - choices=['inventory_path', - 'uuid', - 'instance_uuid', - 'dns_name', - 'vm_name']), - vm_username=dict(type='str', required=True), - vm_password=dict(type='str', no_log=True, required=True), - vm_shell=dict(type='str', required=True), - vm_shell_args=dict(default=" ", type='str'), - vm_shell_env=dict(type='list'), - vm_shell_cwd=dict(type='str'), - wait_for_process=dict(type='bool', default=False), - timeout=dict(type='int', default=3600), + datacenter=dict(type="str"), + cluster=dict(type="str"), + folder=dict(type="str"), + vm_id=dict(type="str", required=True), + vm_id_type=dict( + default="vm_name", + type="str", + choices=[ + "inventory_path", + "uuid", + "instance_uuid", + "dns_name", + "vm_name", + ], + ), + vm_username=dict(type="str", required=True), + vm_password=dict(type="str", no_log=True, required=True), + vm_shell=dict(type="str", required=True), + vm_shell_args=dict(default=" ", type="str"), + vm_shell_env=dict(type="list"), + vm_shell_cwd=dict(type="str"), + wait_for_process=dict(type="bool", default=False), + timeout=dict(type="int", default=3600), ) ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=False, - required_if=[ - ['vm_id_type', 'inventory_path', ['folder']] - ], + required_if=[["vm_id_type", "inventory_path", ["folder"]]], ) vm_shell_mgr = VMwareShellManager(module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_vm_storage_policy_info.py b/plugins/modules/vmware_vm_storage_policy_info.py index 3abd1aa..74e8108 100644 --- a/plugins/modules/vmware_vm_storage_policy_info.py +++ b/plugins/modules/vmware_vm_storage_policy_info.py @@ -6,15 +6,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_vm_storage_policy_info short_description: Gather information about vSphere storage profile defined storage policy information. @@ -32,9 +33,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Get SPBM info vmware_vm_storage_policy_info: hostname: '{{ vcenter_hostname }}' @@ -43,9 +44,9 @@ validate_certs: no delegate_to: localhost register: profiles -''' +""" -RETURN = r''' +RETURN = r""" spbm_profiles: description: list of dictionary of SPBM info returned: success @@ -84,7 +85,7 @@ "name": "vSAN Default Storage Policy" }, ] -''' +""" try: from pyVmomi import pbm @@ -92,8 +93,12 @@ pass from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware_spbm import SPBM -from ansible_collections.vmware.general.plugins.module_utils.vmware import vmware_argument_spec +from ansible_collections.vmware.general.plugins.module_utils.vmware_spbm import ( + SPBM, +) +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + vmware_argument_spec, +) class SPBMClient(SPBM): @@ -107,12 +112,12 @@ def show_capabilities(self, capabilities): capabilities_info = [] for capability in capabilities: for constraint in capability.constraint: - if hasattr(constraint, 'propertyInstance'): + if hasattr(constraint, "propertyInstance"): for propertyInstance in constraint.propertyInstance: capabilities_info.append( { - 'id': propertyInstance.id, - 'value': propertyInstance.value + "id": propertyInstance.id, + "value": propertyInstance.value, } ) return capabilities_info @@ -124,30 +129,38 @@ def get_storage_policy_info(self): profile_manager = self.spbm_content.profileManager profile_ids = profile_manager.PbmQueryProfile( resourceType=pbm.profile.ResourceType(resourceType="STORAGE"), - profileCategory="REQUIREMENT" + profileCategory="REQUIREMENT", ) profiles = [] if profile_ids: - profiles = profile_manager.PbmRetrieveContent(profileIds=profile_ids) + profiles = profile_manager.PbmRetrieveContent( + profileIds=profile_ids + ) for profile in profiles: temp_profile_info = { - 'name': profile.name, - 'id': profile.profileId.uniqueId, - 'description': profile.description, - 'constraints_sub_profiles': [] + "name": profile.name, + "id": profile.profileId.uniqueId, + "description": profile.description, + "constraints_sub_profiles": [], } - if hasattr(profile.constraints, 'subProfiles'): + if hasattr(profile.constraints, "subProfiles"): subprofiles = profile.constraints.subProfiles temp_sub_profiles = [] for subprofile in subprofiles: - temp_sub_profiles.append({ - 'rule_set_name': subprofile.name, - 'rule_set_info': self.show_capabilities(subprofile.capability), - }) - temp_profile_info['constraints_sub_profiles'] = temp_sub_profiles + temp_sub_profiles.append( + { + "rule_set_name": subprofile.name, + "rule_set_info": self.show_capabilities( + subprofile.capability + ), + } + ) + temp_profile_info[ + "constraints_sub_profiles" + ] = temp_sub_profiles - results['spbm_profiles'].append(temp_profile_info) + results["spbm_profiles"].append(temp_profile_info) self.module.exit_json(**results) @@ -155,11 +168,13 @@ def get_storage_policy_info(self): def main(): argument_spec = vmware_argument_spec() - module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + module = AnsibleModule( + argument_spec=argument_spec, supports_check_mode=True + ) spbm_client = SPBMClient(module) spbm_client.get_storage_policy_info() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_vm_vm_drs_rule.py b/plugins/modules/vmware_vm_vm_drs_rule.py index f1636b8..4a70208 100644 --- a/plugins/modules/vmware_vm_vm_drs_rule.py +++ b/plugins/modules/vmware_vm_vm_drs_rule.py @@ -5,15 +5,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_vm_vm_drs_rule short_description: Configure VMware DRS Affinity rule for virtual machine in given cluster @@ -73,9 +74,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create DRS Affinity Rule for VM-VM vmware_vm_vm_drs_rule: hostname: "{{ esxi_server }}" @@ -118,9 +119,9 @@ drs_rule_name: vm1-vm2-affinity-rule-001 state: absent delegate_to: localhost -''' +""" -RETURN = r''' +RETURN = r""" result: description: metadata about DRS VM and VM rule returned: when state is present @@ -136,7 +137,7 @@ "VM_146" ] } -''' +""" try: from pyVmomi import vim, vmodl @@ -145,29 +146,37 @@ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_native -from ansible_collections.vmware.general.plugins.module_utils.vmware import (PyVmomi, vmware_argument_spec, wait_for_task, - find_vm_by_id, find_cluster_by_name) +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + vmware_argument_spec, + wait_for_task, + find_vm_by_id, + find_cluster_by_name, +) class VmwareDrs(PyVmomi): def __init__(self, module): super(VmwareDrs, self).__init__(module) - self.vm_list = module.params['vms'] - self.cluster_name = module.params['cluster_name'] - self.rule_name = module.params['drs_rule_name'] - self.enabled = module.params['enabled'] - self.mandatory = module.params['mandatory'] - self.affinity_rule = module.params['affinity_rule'] - self.state = module.params['state'] + self.vm_list = module.params["vms"] + self.cluster_name = module.params["cluster_name"] + self.rule_name = module.params["drs_rule_name"] + self.enabled = module.params["enabled"] + self.mandatory = module.params["mandatory"] + self.affinity_rule = module.params["affinity_rule"] + self.state = module.params["state"] # Sanity check for cluster - self.cluster_obj = find_cluster_by_name(content=self.content, - cluster_name=self.cluster_name) + self.cluster_obj = find_cluster_by_name( + content=self.content, cluster_name=self.cluster_name + ) if self.cluster_obj is None: - self.module.fail_json(msg="Failed to find the cluster %s" % self.cluster_name) + self.module.fail_json( + msg="Failed to find the cluster %s" % self.cluster_name + ) # Sanity check for virtual machines self.vm_obj_list = [] - if self.state == 'present': + if self.state == "present": # Get list of VMs only if state is present self.vm_obj_list = self.get_all_vms_info() @@ -186,12 +195,17 @@ def get_all_vms_info(self, vms_list=None): vms_list = self.vm_list for vm_name in vms_list: - vm_obj = find_vm_by_id(content=self.content, vm_id=vm_name, - vm_id_type='vm_name', cluster=self.cluster_obj) + vm_obj = find_vm_by_id( + content=self.content, + vm_id=vm_name, + vm_id_type="vm_name", + cluster=self.cluster_obj, + ) if vm_obj is None: - self.module.fail_json(msg="Failed to find the virtual machine %s " - "in given cluster %s" % (vm_name, - self.cluster_name)) + self.module.fail_json( + msg="Failed to find the virtual machine %s " + "in given cluster %s" % (vm_name, self.cluster_name) + ) vm_obj_list.append(vm_obj) return vm_obj_list @@ -209,7 +223,11 @@ def get_rule_key_by_name(self, cluster_obj=None, rule_name=None): cluster_obj = self.cluster_obj if rule_name: - rules_list = [rule for rule in cluster_obj.configuration.rule if rule.name == rule_name] + rules_list = [ + rule + for rule in cluster_obj.configuration.rule + if rule.name == rule_name + ] if rules_list: return rules_list[0] # No rule found @@ -227,14 +245,17 @@ def normalize_rule_spec(rule_obj=None): """ if rule_obj is None: return {} - return dict(rule_key=rule_obj.key, - rule_enabled=rule_obj.enabled, - rule_name=rule_obj.name, - rule_mandatory=rule_obj.mandatory, - rule_uuid=rule_obj.ruleUuid, - rule_vms=[vm.name for vm in rule_obj.vm], - rule_affinity=True if isinstance(rule_obj, vim.cluster.AffinityRuleSpec) else False, - ) + return dict( + rule_key=rule_obj.key, + rule_enabled=rule_obj.enabled, + rule_name=rule_obj.name, + rule_mandatory=rule_obj.mandatory, + rule_uuid=rule_obj.ruleUuid, + rule_vms=[vm.name for vm in rule_obj.vm], + rule_affinity=True + if isinstance(rule_obj, vim.cluster.AffinityRuleSpec) + else False, + ) # Create def create(self): @@ -244,11 +265,17 @@ def create(self): rule_obj = self.get_rule_key_by_name(rule_name=self.rule_name) if rule_obj is not None: existing_rule = self.normalize_rule_spec(rule_obj=rule_obj) - if ((sorted(existing_rule['rule_vms']) == sorted(self.vm_list)) and - (existing_rule['rule_enabled'] == self.enabled) and - (existing_rule['rule_mandatory'] == self.mandatory) and - (existing_rule['rule_affinity'] == self.affinity_rule)): - self.module.exit_json(changed=False, result=existing_rule, msg="Rule already exists with the same configuration") + if ( + (sorted(existing_rule["rule_vms"]) == sorted(self.vm_list)) + and (existing_rule["rule_enabled"] == self.enabled) + and (existing_rule["rule_mandatory"] == self.mandatory) + and (existing_rule["rule_affinity"] == self.affinity_rule) + ): + self.module.exit_json( + changed=False, + result=existing_rule, + msg="Rule already exists with the same configuration", + ) else: changed, result = self.update_rule_spec(rule_obj) return changed, result @@ -271,7 +298,7 @@ def create_rule_spec(self): rule.mandatory = self.mandatory rule.name = self.rule_name - rule_spec = vim.cluster.RuleSpec(info=rule, operation='add') + rule_spec = vim.cluster.RuleSpec(info=rule, operation="add") config_spec = vim.cluster.ConfigSpecEx(rulesSpec=[rule_spec]) try: @@ -296,17 +323,19 @@ def update_rule_spec(self, rule_obj=None): rule_obj.vm = self.vm_obj_list - if (rule_obj.mandatory != self.mandatory): + if rule_obj.mandatory != self.mandatory: rule_obj.mandatory = self.mandatory - if (rule_obj.enabled != self.enabled): + if rule_obj.enabled != self.enabled: rule_obj.enabled = self.enabled - rule_spec = vim.cluster.RuleSpec(info=rule_obj, operation='edit') + rule_spec = vim.cluster.RuleSpec(info=rule_obj, operation="edit") config_spec = vim.cluster.ConfigSpec(rulesSpec=[rule_spec]) try: - task = self.cluster_obj.ReconfigureCluster_Task(config_spec, modify=True) + task = self.cluster_obj.ReconfigureCluster_Task( + config_spec, modify=True + ) changed, result = wait_for_task(task) except vmodl.fault.InvalidRequest as e: result = to_native(e.msg) @@ -331,7 +360,9 @@ def delete(self, rule_name=None): rule = self.get_rule_key_by_name(rule_name=rule_name) if rule is not None: rule_key = int(rule.key) - rule_spec = vim.cluster.RuleSpec(removeKey=rule_key, operation='remove') + rule_spec = vim.cluster.RuleSpec( + removeKey=rule_key, operation="remove" + ) config_spec = vim.cluster.ConfigSpecEx(rulesSpec=[rule_spec]) try: task = self.cluster_obj.ReconfigureEx(config_spec, modify=True) @@ -341,69 +372,74 @@ def delete(self, rule_name=None): except Exception as e: result = to_native(e) else: - result = 'No rule named %s exists' % self.rule_name + result = "No rule named %s exists" % self.rule_name return changed, result def main(): argument_spec = vmware_argument_spec() - argument_spec.update(dict( - state=dict(type='str', default='present', choices=['absent', 'present']), - vms=dict(type='list'), - cluster_name=dict(type='str', required=True), - drs_rule_name=dict(type='str', required=True), - enabled=dict(type='bool', default=False), - mandatory=dict(type='bool', default=False), - affinity_rule=dict(type='bool', default=True), - ) + argument_spec.update( + dict( + state=dict( + type="str", default="present", choices=["absent", "present"] + ), + vms=dict(type="list"), + cluster_name=dict(type="str", required=True), + drs_rule_name=dict(type="str", required=True), + enabled=dict(type="bool", default=False), + mandatory=dict(type="bool", default=False), + affinity_rule=dict(type="bool", default=True), + ) ) - required_if = [ - ['state', 'present', ['vms']] - ] - module = AnsibleModule(argument_spec=argument_spec, - required_if=required_if, - supports_check_mode=True) + required_if = [["state", "present", ["vms"]]] + module = AnsibleModule( + argument_spec=argument_spec, + required_if=required_if, + supports_check_mode=True, + ) results = dict(failed=False, changed=False) - state = module.params['state'] + state = module.params["state"] vm_drs = VmwareDrs(module) - if state == 'present': + if state == "present": # Add Rule if module.check_mode: - results['changed'] = True + results["changed"] = True module.exit_json(**results) changed, result = vm_drs.create() if changed: - results['changed'] = changed + results["changed"] = changed else: - results['failed'] = True - results['msg'] = "Failed to create DRS rule %s" % vm_drs.rule_name - results['result'] = result - elif state == 'absent': + results["failed"] = True + results["msg"] = "Failed to create DRS rule %s" % vm_drs.rule_name + results["result"] = result + elif state == "absent": # Delete Rule if module.check_mode: - results['changed'] = True + results["changed"] = True module.exit_json(**results) changed, result = vm_drs.delete() if changed: - results['changed'] = changed - results['msg'] = "DRS rule %s deleted successfully." % vm_drs.rule_name + results["changed"] = changed + results["msg"] = ( + "DRS rule %s deleted successfully." % vm_drs.rule_name + ) else: if "No rule named" in result: - results['msg'] = result + results["msg"] = result module.exit_json(**results) - results['failed'] = True - results['msg'] = "Failed to delete DRS rule %s" % vm_drs.rule_name - results['result'] = result + results["failed"] = True + results["msg"] = "Failed to delete DRS rule %s" % vm_drs.rule_name + results["result"] = result - if results['changed']: + if results["changed"]: module.exit_json(**results) - if results['failed']: + if results["failed"]: module.fail_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_vm_vss_dvs_migrate.py b/plugins/modules/vmware_vm_vss_dvs_migrate.py index 996bf6b..81db4ec 100644 --- a/plugins/modules/vmware_vm_vss_dvs_migrate.py +++ b/plugins/modules/vmware_vm_vss_dvs_migrate.py @@ -5,13 +5,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} +ANSIBLE_METADATA = { + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", +} -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_vm_vss_dvs_migrate short_description: Migrates a virtual machine from a standard vswitch to distributed @@ -38,9 +41,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Migrate VCSA to vDS vmware_vm_vss_dvs_migrate: hostname: '{{ vcenter_hostname }}' @@ -49,17 +52,23 @@ vm_name: '{{ vm_name }}' dvportgroup_name: '{{ distributed_portgroup_name }}' delegate_to: localhost -''' +""" try: from pyVmomi import vim, vmodl + HAS_PYVMOMI = True except ImportError: HAS_PYVMOMI = False from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import (HAS_PYVMOMI, connect_to_api, get_all_objs, - vmware_argument_spec, wait_for_task) +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + HAS_PYVMOMI, + connect_to_api, + get_all_objs, + vmware_argument_spec, + wait_for_task, +) class VMwareVmVssDvsMigrate(object): @@ -67,19 +76,21 @@ def __init__(self, module): self.module = module self.content = connect_to_api(module) self.vm = None - self.vm_name = module.params['vm_name'] - self.dvportgroup_name = module.params['dvportgroup_name'] + self.vm_name = module.params["vm_name"] + self.dvportgroup_name = module.params["dvportgroup_name"] def process_state(self): vm_nic_states = { - 'absent': self.migrate_network_adapter_vds, - 'present': self.state_exit_unchanged, + "absent": self.migrate_network_adapter_vds, + "present": self.state_exit_unchanged, } vm_nic_states[self.check_vm_network_state()]() def find_dvspg_by_name(self): - vmware_distributed_port_group = get_all_objs(self.content, [vim.dvs.DistributedVirtualPortgroup]) + vmware_distributed_port_group = get_all_objs( + self.content, [vim.dvs.DistributedVirtualPortgroup] + ) for dvspg in vmware_distributed_port_group: if dvspg.name == self.dvportgroup_name: return dvspg @@ -94,7 +105,9 @@ def find_vm_by_name(self): def migrate_network_adapter_vds(self): vm_configspec = vim.vm.ConfigSpec() - nic = vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo() + nic = ( + vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo() + ) port = vim.dvs.PortConnection() devicespec = vim.vm.device.VirtualDeviceSpec() @@ -111,7 +124,9 @@ def migrate_network_adapter_vds(self): for device in self.vm.config.hardware.device: if isinstance(device, vim.vm.device.VirtualEthernetCard): devicespec.device = device - devicespec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit + devicespec.operation = ( + vim.vm.device.VirtualDeviceSpec.Operation.edit + ) devicespec.device.backing = nic vm_configspec.deviceChange.append(devicespec) @@ -127,12 +142,18 @@ def check_vm_network_state(self): self.vm = self.find_vm_by_name() if self.vm is None: - self.module.fail_json(msg="A virtual machine with name %s does not exist" % self.vm_name) + self.module.fail_json( + msg="A virtual machine with name %s does not exist" + % self.vm_name + ) for device in self.vm.config.hardware.device: if isinstance(device, vim.vm.device.VirtualEthernetCard): - if isinstance(device.backing, vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo): - return 'present' - return 'absent' + if isinstance( + device.backing, + vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo, + ): + return "present" + return "absent" except vmodl.RuntimeFault as runtime_fault: self.module.fail_json(msg=runtime_fault.msg) except vmodl.MethodFault as method_fault: @@ -142,16 +163,22 @@ def check_vm_network_state(self): def main(): argument_spec = vmware_argument_spec() - argument_spec.update(dict(vm_name=dict(required=True, type='str'), - dvportgroup_name=dict(required=True, type='str'))) - - module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) + argument_spec.update( + dict( + vm_name=dict(required=True, type="str"), + dvportgroup_name=dict(required=True, type="str"), + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, supports_check_mode=False + ) if not HAS_PYVMOMI: - module.fail_json(msg='pyvmomi is required for this module') + module.fail_json(msg="pyvmomi is required for this module") vmware_vmnic_migrate = VMwareVmVssDvsMigrate(module) vmware_vmnic_migrate.process_state() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_vmkernel.py b/plugins/modules/vmware_vmkernel.py index d0a5c3c..5adf5ce 100644 --- a/plugins/modules/vmware_vmkernel.py +++ b/plugins/modules/vmware_vmkernel.py @@ -8,15 +8,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_vmkernel short_description: Manages a VMware VMkernel Adapter of an ESXi host. @@ -152,9 +153,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Add Management vmkernel port using static network type vmware_vmkernel: hostname: '{{ esxi_hostname }}' @@ -242,9 +243,9 @@ tcpip_stack: vmotion state: present delegate_to: localhost -''' +""" -RETURN = r''' +RETURN = r""" result: description: metadata about VMKernel name returned: always @@ -261,7 +262,7 @@ "services": "vMotion", "switch": "vDS" } -''' +""" try: from pyVmomi import vim, vmodl @@ -270,8 +271,13 @@ from ansible.module_utils.basic import AnsibleModule from ansible_collections.vmware.general.plugins.module_utils.vmware import ( - PyVmomi, TaskError, vmware_argument_spec, wait_for_task, - find_dvspg_by_name, find_dvs_by_name, get_all_objs + PyVmomi, + TaskError, + vmware_argument_spec, + wait_for_task, + find_dvspg_by_name, + find_dvs_by_name, + get_all_objs, ) from ansible.module_utils._text import to_native @@ -281,29 +287,33 @@ class PyVmomiHelper(PyVmomi): def __init__(self, module): super(PyVmomiHelper, self).__init__(module) - if self.params['network']: - self.network_type = self.params['network'].get('type') - self.ip_address = self.params['network'].get('ip_address', None) - self.subnet_mask = self.params['network'].get('subnet_mask', None) - self.default_gateway = self.params['network'].get('default_gateway', None) - self.tcpip_stack = self.params['network'].get('tcpip_stack') - self.device = self.params['device'] - if self.network_type == 'dhcp' and not self.device: - module.fail_json(msg="device is a required parameter when network type is set to 'dhcp'") - self.mtu = self.params['mtu'] - self.enable_vsan = self.params['enable_vsan'] - self.enable_vmotion = self.params['enable_vmotion'] - self.enable_mgmt = self.params['enable_mgmt'] - self.enable_ft = self.params['enable_ft'] - self.enable_provisioning = self.params['enable_provisioning'] - self.enable_replication = self.params['enable_replication'] - self.enable_replication_nfc = self.params['enable_replication_nfc'] - - self.vswitch_name = self.params['vswitch_name'] - self.vds_name = self.params['dvswitch_name'] - self.port_group_name = self.params['portgroup_name'] - - self.esxi_host_name = self.params['esxi_hostname'] + if self.params["network"]: + self.network_type = self.params["network"].get("type") + self.ip_address = self.params["network"].get("ip_address", None) + self.subnet_mask = self.params["network"].get("subnet_mask", None) + self.default_gateway = self.params["network"].get( + "default_gateway", None + ) + self.tcpip_stack = self.params["network"].get("tcpip_stack") + self.device = self.params["device"] + if self.network_type == "dhcp" and not self.device: + module.fail_json( + msg="device is a required parameter when network type is set to 'dhcp'" + ) + self.mtu = self.params["mtu"] + self.enable_vsan = self.params["enable_vsan"] + self.enable_vmotion = self.params["enable_vmotion"] + self.enable_mgmt = self.params["enable_mgmt"] + self.enable_ft = self.params["enable_ft"] + self.enable_provisioning = self.params["enable_provisioning"] + self.enable_replication = self.params["enable_replication"] + self.enable_replication_nfc = self.params["enable_replication_nfc"] + + self.vswitch_name = self.params["vswitch_name"] + self.vds_name = self.params["dvswitch_name"] + self.port_group_name = self.params["portgroup_name"] + + self.esxi_host_name = self.params["esxi_hostname"] hosts = self.get_all_host_objs(esxi_host_name=self.esxi_host_name) if hosts: self.esxi_host_obj = hosts[0] @@ -312,42 +322,58 @@ def __init__(self, module): msg="Failed to get details of ESXi server. Please specify esxi_hostname." ) - if self.network_type == 'static': - if self.module.params['state'] == 'absent': + if self.network_type == "static": + if self.module.params["state"] == "absent": pass elif not self.ip_address: - module.fail_json(msg="ip_address is a required parameter when network type is set to 'static'") + module.fail_json( + msg="ip_address is a required parameter when network type is set to 'static'" + ) elif not self.subnet_mask: - module.fail_json(msg="subnet_mask is a required parameter when network type is set to 'static'") + module.fail_json( + msg="subnet_mask is a required parameter when network type is set to 'static'" + ) # find Port Group if self.vswitch_name: self.port_group_obj = self.get_port_group_by_name( host_system=self.esxi_host_obj, portgroup_name=self.port_group_name, - vswitch_name=self.vswitch_name + vswitch_name=self.vswitch_name, ) if not self.port_group_obj: - module.fail_json(msg="Portgroup '%s' not found on vSS '%s'" % (self.port_group_name, self.vswitch_name)) + module.fail_json( + msg="Portgroup '%s' not found on vSS '%s'" + % (self.port_group_name, self.vswitch_name) + ) elif self.vds_name: self.dv_switch_obj = find_dvs_by_name(self.content, self.vds_name) if not self.dv_switch_obj: module.fail_json(msg="vDS '%s' not found" % self.vds_name) - self.port_group_obj = find_dvspg_by_name(self.dv_switch_obj, self.port_group_name) + self.port_group_obj = find_dvspg_by_name( + self.dv_switch_obj, self.port_group_name + ) if not self.port_group_obj: - module.fail_json(msg="Portgroup '%s' not found on vDS '%s'" % (self.port_group_name, self.vds_name)) + module.fail_json( + msg="Portgroup '%s' not found on vDS '%s'" + % (self.port_group_name, self.vds_name) + ) # find VMkernel Adapter if self.device: self.vnic = self.get_vmkernel_by_device(device_name=self.device) else: # config change (e.g. DHCP to static, or vice versa); doesn't work with virtual port change - self.vnic = self.get_vmkernel_by_portgroup_new(port_group_name=self.port_group_name) - if not self.vnic and self.network_type == 'static': + self.vnic = self.get_vmkernel_by_portgroup_new( + port_group_name=self.port_group_name + ) + if not self.vnic and self.network_type == "static": # vDS to vSS or vSS to vSS (static IP) self.vnic = self.get_vmkernel_by_ip(ip_address=self.ip_address) - def get_port_group_by_name(self, host_system, portgroup_name, vswitch_name): + def get_port_group_by_name( + self, host_system, portgroup_name, vswitch_name + ): """ Get specific port group by given name Args: @@ -361,7 +387,10 @@ def get_port_group_by_name(self, host_system, portgroup_name, vswitch_name): portgroups = self.get_all_port_groups_by_host(host_system=host_system) for portgroup in portgroups: - if portgroup.spec.vswitchName == vswitch_name and portgroup.spec.name == portgroup_name: + if ( + portgroup.spec.vswitchName == vswitch_name + and portgroup.spec.name == portgroup_name + ): return portgroup return None @@ -372,18 +401,18 @@ def ensure(self): """ host_vmk_states = { - 'absent': { - 'present': self.host_vmk_delete, - 'absent': self.host_vmk_unchange, + "absent": { + "present": self.host_vmk_delete, + "absent": self.host_vmk_unchange, + }, + "present": { + "present": self.host_vmk_update, + "absent": self.host_vmk_create, }, - 'present': { - 'present': self.host_vmk_update, - 'absent': self.host_vmk_create, - } } try: - host_vmk_states[self.module.params['state']][self.check_state()]() + host_vmk_states[self.module.params["state"]][self.check_state()]() except vmodl.RuntimeFault as runtime_fault: self.module.fail_json(msg=to_native(runtime_fault.msg)) except vmodl.MethodFault as method_fault: @@ -404,7 +433,10 @@ def get_vmkernel_by_portgroup_new(self, port_group_name=None): return vnic # check if it's a vDS Port Group try: - if vnic.spec.distributedVirtualPort.portgroupKey == self.port_group_obj.key: + if ( + vnic.spec.distributedVirtualPort.portgroupKey + == self.port_group_obj.key + ): return vnic except AttributeError: pass @@ -444,7 +476,7 @@ def check_state(self): Returns: Present if found and absent if not found """ - return 'present' if self.vnic else 'absent' + return "present" if self.vnic else "absent" def host_vmk_delete(self): """ @@ -452,25 +484,27 @@ def host_vmk_delete(self): Returns: NA """ - results = dict(changed=False, msg='') + results = dict(changed=False, msg="") vmk_device = self.vnic.device try: if self.module.check_mode: - results['msg'] = "VMkernel Adapter would be deleted" + results["msg"] = "VMkernel Adapter would be deleted" else: - self.esxi_host_obj.configManager.networkSystem.RemoveVirtualNic(vmk_device) - results['msg'] = "VMkernel Adapter deleted" - results['changed'] = True - results['device'] = vmk_device + self.esxi_host_obj.configManager.networkSystem.RemoveVirtualNic( + vmk_device + ) + results["msg"] = "VMkernel Adapter deleted" + results["changed"] = True + results["device"] = vmk_device except vim.fault.NotFound as not_found: self.module.fail_json( - msg="Failed to find vmk to delete due to %s" % - to_native(not_found.msg) + msg="Failed to find vmk to delete due to %s" + % to_native(not_found.msg) ) except vim.fault.HostConfigFault as host_config_fault: self.module.fail_json( - msg="Failed to delete vmk due host config issues : %s" % - to_native(host_config_fault.msg) + msg="Failed to delete vmk due host config issues : %s" + % to_native(host_config_fault.msg) ) self.module.exit_json(**results) @@ -489,75 +523,106 @@ def host_vmk_update(self): Returns: NA """ - changed = changed_settings = changed_vds = changed_services = \ - changed_service_vmotion = changed_service_mgmt = changed_service_ft = \ - changed_service_vsan = changed_service_prov = changed_service_rep = changed_service_rep_nfc = False + changed = ( + changed_settings + ) = ( + changed_vds + ) = ( + changed_services + ) = ( + changed_service_vmotion + ) = ( + changed_service_mgmt + ) = ( + changed_service_ft + ) = ( + changed_service_vsan + ) = ( + changed_service_prov + ) = changed_service_rep = changed_service_rep_nfc = False changed_list = [] - results = dict(changed=False, msg='') + results = dict(changed=False, msg="") - results['tcpip_stack'] = self.tcpip_stack - net_stack_instance_key = self.get_api_net_stack_instance(self.tcpip_stack) + results["tcpip_stack"] = self.tcpip_stack + net_stack_instance_key = self.get_api_net_stack_instance( + self.tcpip_stack + ) if self.vnic.spec.netStackInstanceKey != net_stack_instance_key: - self.module.fail_json(msg="The TCP/IP stack cannot be changed on an existing VMkernel adapter!") + self.module.fail_json( + msg="The TCP/IP stack cannot be changed on an existing VMkernel adapter!" + ) # Check MTU - results['mtu'] = self.mtu + results["mtu"] = self.mtu if self.vnic.spec.mtu != self.mtu: changed_settings = True changed_list.append("MTU") - results['mtu_previous'] = self.vnic.spec.mtu + results["mtu_previous"] = self.vnic.spec.mtu # Check IPv4 settings - results['ipv4'] = self.network_type - results['ipv4_ip'] = self.ip_address - results['ipv4_sm'] = self.subnet_mask + results["ipv4"] = self.network_type + results["ipv4_ip"] = self.ip_address + results["ipv4_sm"] = self.subnet_mask if self.default_gateway: - results['ipv4_gw'] = self.default_gateway + results["ipv4_gw"] = self.default_gateway else: - results['ipv4_gw'] = "No override" + results["ipv4_gw"] = "No override" if self.vnic.spec.ip.dhcp: - if self.network_type == 'static': + if self.network_type == "static": changed_settings = True changed_list.append("IPv4 settings") - results['ipv4_previous'] = "DHCP" + results["ipv4_previous"] = "DHCP" if not self.vnic.spec.ip.dhcp: - if self.network_type == 'dhcp': + if self.network_type == "dhcp": changed_settings = True changed_list.append("IPv4 settings") - results['ipv4_previous'] = "static" - elif self.network_type == 'static': + results["ipv4_previous"] = "static" + elif self.network_type == "static": if self.ip_address != self.vnic.spec.ip.ipAddress: changed_settings = True changed_list.append("IP") - results['ipv4_ip_previous'] = self.vnic.spec.ip.ipAddress + results["ipv4_ip_previous"] = self.vnic.spec.ip.ipAddress if self.subnet_mask != self.vnic.spec.ip.subnetMask: changed_settings = True changed_list.append("SM") - results['ipv4_sm_previous'] = self.vnic.spec.ip.subnetMask + results["ipv4_sm_previous"] = self.vnic.spec.ip.subnetMask if self.default_gateway: try: - if self.default_gateway != self.vnic.spec.ipRouteSpec.ipRouteConfig.defaultGateway: + if ( + self.default_gateway + != self.vnic.spec.ipRouteSpec.ipRouteConfig.defaultGateway + ): changed_settings = True changed_list.append("GW override") - results['ipv4_gw_previous'] = self.vnic.spec.ipRouteSpec.ipRouteConfig.defaultGateway + results[ + "ipv4_gw_previous" + ] = ( + self.vnic.spec.ipRouteSpec.ipRouteConfig.defaultGateway + ) except AttributeError: changed_settings = True changed_list.append("GW override") - results['ipv4_gw_previous'] = "No override" + results["ipv4_gw_previous"] = "No override" else: try: - if self.vnic.spec.ipRouteSpec.ipRouteConfig.defaultGateway: + if ( + self.vnic.spec.ipRouteSpec.ipRouteConfig.defaultGateway + ): changed_settings = True changed_list.append("GW override") - results['ipv4_gw_previous'] = self.vnic.spec.ipRouteSpec.ipRouteConfig.defaultGateway + results[ + "ipv4_gw_previous" + ] = ( + self.vnic.spec.ipRouteSpec.ipRouteConfig.defaultGateway + ) except AttributeError: pass # Check virtual port (vSS or vDS) - results['portgroup'] = self.port_group_name + results["portgroup"] = self.port_group_name dvs_uuid = None if self.vswitch_name: - results['switch'] = self.vswitch_name + results["switch"] = self.vswitch_name try: if self.vnic.spec.distributedVirtualPort.switchUuid: changed_vds = True @@ -566,15 +631,21 @@ def host_vmk_update(self): except AttributeError: pass if changed_vds: - results['switch_previous'] = self.find_dvs_by_uuid(dvs_uuid) - self.dv_switch_obj = find_dvs_by_name(self.content, results['switch_previous']) - results['portgroup_previous'] = self.find_dvspg_by_key( - self.dv_switch_obj, self.vnic.spec.distributedVirtualPort.portgroupKey + results["switch_previous"] = self.find_dvs_by_uuid(dvs_uuid) + self.dv_switch_obj = find_dvs_by_name( + self.content, results["switch_previous"] + ) + results["portgroup_previous"] = self.find_dvspg_by_key( + self.dv_switch_obj, + self.vnic.spec.distributedVirtualPort.portgroupKey, ) elif self.vds_name: - results['switch'] = self.vds_name + results["switch"] = self.vds_name try: - if self.vnic.spec.distributedVirtualPort.switchUuid != self.dv_switch_obj.uuid: + if ( + self.vnic.spec.distributedVirtualPort.switchUuid + != self.dv_switch_obj.uuid + ): changed_vds = True changed_list.append("Virtual Port") dvs_uuid = self.vnic.spec.distributedVirtualPort.switchUuid @@ -582,43 +653,87 @@ def host_vmk_update(self): changed_vds = True changed_list.append("Virtual Port") if changed_vds: - results['switch_previous'] = self.find_dvs_by_uuid(dvs_uuid) - results['portgroup_previous'] = self.vnic.spec.portgroup - portgroups = self.get_all_port_groups_by_host(host_system=self.esxi_host_obj) + results["switch_previous"] = self.find_dvs_by_uuid(dvs_uuid) + results["portgroup_previous"] = self.vnic.spec.portgroup + portgroups = self.get_all_port_groups_by_host( + host_system=self.esxi_host_obj + ) for portgroup in portgroups: if portgroup.spec.name == self.vnic.spec.portgroup: - results['switch_previous'] = portgroup.spec.vswitchName + results["switch_previous"] = portgroup.spec.vswitchName - results['services'] = self.create_enabled_services_string() + results["services"] = self.create_enabled_services_string() # Check configuration of service types (only if default TCP/IP stack is used) - if self.vnic.spec.netStackInstanceKey == 'defaultTcpipStack': + if self.vnic.spec.netStackInstanceKey == "defaultTcpipStack": service_type_vmks = self.get_all_vmks_by_service_type() - if (self.enable_vmotion and self.vnic.device not in service_type_vmks['vmotion']) or \ - (not self.enable_vmotion and self.vnic.device in service_type_vmks['vmotion']): + if ( + self.enable_vmotion + and self.vnic.device not in service_type_vmks["vmotion"] + ) or ( + not self.enable_vmotion + and self.vnic.device in service_type_vmks["vmotion"] + ): changed_services = changed_service_vmotion = True - if (self.enable_mgmt and self.vnic.device not in service_type_vmks['management']) or \ - (not self.enable_mgmt and self.vnic.device in service_type_vmks['management']): + if ( + self.enable_mgmt + and self.vnic.device not in service_type_vmks["management"] + ) or ( + not self.enable_mgmt + and self.vnic.device in service_type_vmks["management"] + ): changed_services = changed_service_mgmt = True - if (self.enable_ft and self.vnic.device not in service_type_vmks['faultToleranceLogging']) or \ - (not self.enable_ft and self.vnic.device in service_type_vmks['faultToleranceLogging']): + if ( + self.enable_ft + and self.vnic.device + not in service_type_vmks["faultToleranceLogging"] + ) or ( + not self.enable_ft + and self.vnic.device + in service_type_vmks["faultToleranceLogging"] + ): changed_services = changed_service_ft = True - if (self.enable_vsan and self.vnic.device not in service_type_vmks['vsan']) or \ - (not self.enable_vsan and self.vnic.device in service_type_vmks['vsan']): + if ( + self.enable_vsan + and self.vnic.device not in service_type_vmks["vsan"] + ) or ( + not self.enable_vsan + and self.vnic.device in service_type_vmks["vsan"] + ): changed_services = changed_service_vsan = True - if (self.enable_provisioning and self.vnic.device not in service_type_vmks['vSphereProvisioning']) or \ - (not self.enable_provisioning and self.vnic.device in service_type_vmks['vSphereProvisioning']): + if ( + self.enable_provisioning + and self.vnic.device + not in service_type_vmks["vSphereProvisioning"] + ) or ( + not self.enable_provisioning + and self.vnic.device + in service_type_vmks["vSphereProvisioning"] + ): changed_services = changed_service_prov = True - if (self.enable_replication and self.vnic.device not in service_type_vmks['vSphereReplication']) or \ - (not self.enable_provisioning and self.vnic.device in service_type_vmks['vSphereReplication']): + if ( + self.enable_replication + and self.vnic.device + not in service_type_vmks["vSphereReplication"] + ) or ( + not self.enable_provisioning + and self.vnic.device in service_type_vmks["vSphereReplication"] + ): changed_services = changed_service_rep = True - if (self.enable_replication_nfc and self.vnic.device not in service_type_vmks['vSphereReplicationNFC']) or \ - (not self.enable_provisioning and self.vnic.device in service_type_vmks['vSphereReplicationNFC']): + if ( + self.enable_replication_nfc + and self.vnic.device + not in service_type_vmks["vSphereReplicationNFC"] + ) or ( + not self.enable_provisioning + and self.vnic.device + in service_type_vmks["vSphereReplicationNFC"] + ): changed_services = changed_service_rep_nfc = True if changed_services: changed_list.append("services") @@ -626,32 +741,46 @@ def host_vmk_update(self): if changed_settings or changed_vds or changed_services: changed = True if self.module.check_mode: - changed_suffix = ' would be updated' + changed_suffix = " would be updated" else: - changed_suffix = ' updated' + changed_suffix = " updated" if len(changed_list) > 2: - message = ', '.join(changed_list[:-1]) + ', and ' + str(changed_list[-1]) + message = ( + ", ".join(changed_list[:-1]) + + ", and " + + str(changed_list[-1]) + ) elif len(changed_list) == 2: - message = ' and '.join(changed_list) + message = " and ".join(changed_list) elif len(changed_list) == 1: message = changed_list[0] message = "VMkernel Adapter " + message + changed_suffix if changed_settings or changed_vds: vnic_config = vim.host.VirtualNic.Specification() ip_spec = vim.host.IpConfig() - if self.network_type == 'dhcp': + if self.network_type == "dhcp": ip_spec.dhcp = True else: ip_spec.dhcp = False ip_spec.ipAddress = self.ip_address ip_spec.subnetMask = self.subnet_mask if self.default_gateway: - vnic_config.ipRouteSpec = vim.host.VirtualNic.IpRouteSpec() - vnic_config.ipRouteSpec.ipRouteConfig = vim.host.IpRouteConfig() - vnic_config.ipRouteSpec.ipRouteConfig.defaultGateway = self.default_gateway + vnic_config.ipRouteSpec = ( + vim.host.VirtualNic.IpRouteSpec() + ) + vnic_config.ipRouteSpec.ipRouteConfig = ( + vim.host.IpRouteConfig() + ) + vnic_config.ipRouteSpec.ipRouteConfig.defaultGateway = ( + self.default_gateway + ) else: - vnic_config.ipRouteSpec = vim.host.VirtualNic.IpRouteSpec() - vnic_config.ipRouteSpec.ipRouteConfig = vim.host.IpRouteConfig() + vnic_config.ipRouteSpec = ( + vim.host.VirtualNic.IpRouteSpec() + ) + vnic_config.ipRouteSpec.ipRouteConfig = ( + vim.host.IpRouteConfig() + ) vnic_config.ip = ip_spec vnic_config.mtu = self.mtu @@ -660,104 +789,154 @@ def host_vmk_update(self): if self.vswitch_name: vnic_config.portgroup = self.port_group_name elif self.vds_name: - vnic_config.distributedVirtualPort = vim.dvs.PortConnection() - vnic_config.distributedVirtualPort.switchUuid = self.dv_switch_obj.uuid - vnic_config.distributedVirtualPort.portgroupKey = self.port_group_obj.key + vnic_config.distributedVirtualPort = ( + vim.dvs.PortConnection() + ) + vnic_config.distributedVirtualPort.switchUuid = ( + self.dv_switch_obj.uuid + ) + vnic_config.distributedVirtualPort.portgroupKey = ( + self.port_group_obj.key + ) try: if not self.module.check_mode: - self.esxi_host_obj.configManager.networkSystem.UpdateVirtualNic(self.vnic.device, vnic_config) + self.esxi_host_obj.configManager.networkSystem.UpdateVirtualNic( + self.vnic.device, vnic_config + ) except vim.fault.NotFound as not_found: self.module.fail_json( - msg="Failed to update vmk as virtual network adapter cannot be found %s" % - to_native(not_found.msg) + msg="Failed to update vmk as virtual network adapter cannot be found %s" + % to_native(not_found.msg) ) except vim.fault.HostConfigFault as host_config_fault: self.module.fail_json( - msg="Failed to update vmk due to host config issues : %s" % - to_native(host_config_fault.msg) + msg="Failed to update vmk due to host config issues : %s" + % to_native(host_config_fault.msg) ) except vim.fault.InvalidState as invalid_state: self.module.fail_json( - msg="Failed to update vmk as ipv6 address is specified in an ipv4 only system : %s" % - to_native(invalid_state.msg) + msg="Failed to update vmk as ipv6 address is specified in an ipv4 only system : %s" + % to_native(invalid_state.msg) ) except vmodl.fault.InvalidArgument as invalid_arg: self.module.fail_json( msg="Failed to update vmk as IP address or Subnet Mask in the IP configuration" - "are invalid or PortGroup does not exist : %s" % to_native(invalid_arg.msg) + "are invalid or PortGroup does not exist : %s" + % to_native(invalid_arg.msg) ) if changed_services: changed_list.append("Services") services_previous = [] - vnic_manager = self.esxi_host_obj.configManager.virtualNicManager + vnic_manager = ( + self.esxi_host_obj.configManager.virtualNicManager + ) if changed_service_mgmt: - if self.vnic.device in service_type_vmks['management']: - services_previous.append('Mgmt') - operation = 'select' if self.enable_mgmt else 'deselect' + if self.vnic.device in service_type_vmks["management"]: + services_previous.append("Mgmt") + operation = "select" if self.enable_mgmt else "deselect" self.set_service_type( - vnic_manager=vnic_manager, vmk=self.vnic, service_type='management', operation=operation + vnic_manager=vnic_manager, + vmk=self.vnic, + service_type="management", + operation=operation, ) if changed_service_vmotion: - if self.vnic.device in service_type_vmks['vmotion']: - services_previous.append('vMotion') - operation = 'select' if self.enable_vmotion else 'deselect' + if self.vnic.device in service_type_vmks["vmotion"]: + services_previous.append("vMotion") + operation = "select" if self.enable_vmotion else "deselect" self.set_service_type( - vnic_manager=vnic_manager, vmk=self.vnic, service_type='vmotion', operation=operation + vnic_manager=vnic_manager, + vmk=self.vnic, + service_type="vmotion", + operation=operation, ) if changed_service_ft: - if self.vnic.device in service_type_vmks['faultToleranceLogging']: - services_previous.append('FT') - operation = 'select' if self.enable_ft else 'deselect' + if ( + self.vnic.device + in service_type_vmks["faultToleranceLogging"] + ): + services_previous.append("FT") + operation = "select" if self.enable_ft else "deselect" self.set_service_type( - vnic_manager=vnic_manager, vmk=self.vnic, service_type='faultToleranceLogging', operation=operation + vnic_manager=vnic_manager, + vmk=self.vnic, + service_type="faultToleranceLogging", + operation=operation, ) if changed_service_prov: - if self.vnic.device in service_type_vmks['vSphereProvisioning']: - services_previous.append('Prov') - operation = 'select' if self.enable_provisioning else 'deselect' + if ( + self.vnic.device + in service_type_vmks["vSphereProvisioning"] + ): + services_previous.append("Prov") + operation = ( + "select" if self.enable_provisioning else "deselect" + ) self.set_service_type( - vnic_manager=vnic_manager, vmk=self.vnic, service_type='vSphereProvisioning', operation=operation + vnic_manager=vnic_manager, + vmk=self.vnic, + service_type="vSphereProvisioning", + operation=operation, ) if changed_service_rep: - if self.vnic.device in service_type_vmks['vSphereReplication']: - services_previous.append('Repl') - operation = 'select' if self.enable_replication else 'deselect' + if ( + self.vnic.device + in service_type_vmks["vSphereReplication"] + ): + services_previous.append("Repl") + operation = ( + "select" if self.enable_replication else "deselect" + ) self.set_service_type( - vnic_manager=vnic_manager, vmk=self.vnic, service_type='vSphereReplication', operation=operation + vnic_manager=vnic_manager, + vmk=self.vnic, + service_type="vSphereReplication", + operation=operation, ) if changed_service_rep_nfc: - if self.vnic.device in service_type_vmks['vSphereReplicationNFC']: - services_previous.append('Repl_NFC') - operation = 'select' if self.enable_replication_nfc else 'deselect' + if ( + self.vnic.device + in service_type_vmks["vSphereReplicationNFC"] + ): + services_previous.append("Repl_NFC") + operation = ( + "select" if self.enable_replication_nfc else "deselect" + ) self.set_service_type( - vnic_manager=vnic_manager, vmk=self.vnic, service_type='vSphereReplicationNFC', operation=operation + vnic_manager=vnic_manager, + vmk=self.vnic, + service_type="vSphereReplicationNFC", + operation=operation, ) if changed_service_vsan: - if self.vnic.device in service_type_vmks['vsan']: - services_previous.append('VSAN') + if self.vnic.device in service_type_vmks["vsan"]: + services_previous.append("VSAN") if self.enable_vsan: - results['vsan'] = self.set_vsan_service_type() + results["vsan"] = self.set_vsan_service_type() else: self.set_service_type( - vnic_manager=vnic_manager, vmk=self.vnic, service_type='vsan', operation=operation + vnic_manager=vnic_manager, + vmk=self.vnic, + service_type="vsan", + operation=operation, ) - results['services_previous'] = ', '.join(services_previous) + results["services_previous"] = ", ".join(services_previous) else: message = "VMkernel Adapter already configured properly" - results['changed'] = changed - results['msg'] = message - results['device'] = self.vnic.device + results["changed"] = changed + results["msg"] = message + results["device"] = self.vnic.device self.module.exit_json(**results) def find_dvs_by_uuid(self, uuid): @@ -806,7 +985,8 @@ def set_vsan_service_type(self): wait_for_task(vsan_task) except TaskError as task_err: self.module.fail_json( - msg="Failed to set service type to vsan for %s : %s" % (self.vnic.device, to_native(task_err)) + msg="Failed to set service type to vsan for %s : %s" + % (self.vnic.device, to_native(task_err)) ) return result @@ -816,117 +996,152 @@ def host_vmk_create(self): Returns: NA """ - results = dict(changed=False, message='') + results = dict(changed=False, message="") if self.vswitch_name: - results['switch'] = self.vswitch_name + results["switch"] = self.vswitch_name elif self.vds_name: - results['switch'] = self.vds_name - results['portgroup'] = self.port_group_name + results["switch"] = self.vds_name + results["portgroup"] = self.port_group_name vnic_config = vim.host.VirtualNic.Specification() ip_spec = vim.host.IpConfig() - results['ipv4'] = self.network_type - if self.network_type == 'dhcp': + results["ipv4"] = self.network_type + if self.network_type == "dhcp": ip_spec.dhcp = True else: ip_spec.dhcp = False - results['ipv4_ip'] = self.ip_address - results['ipv4_sm'] = self.subnet_mask + results["ipv4_ip"] = self.ip_address + results["ipv4_sm"] = self.subnet_mask ip_spec.ipAddress = self.ip_address ip_spec.subnetMask = self.subnet_mask if self.default_gateway: vnic_config.ipRouteSpec = vim.host.VirtualNic.IpRouteSpec() - vnic_config.ipRouteSpec.ipRouteConfig = vim.host.IpRouteConfig() - vnic_config.ipRouteSpec.ipRouteConfig.defaultGateway = self.default_gateway + vnic_config.ipRouteSpec.ipRouteConfig = ( + vim.host.IpRouteConfig() + ) + vnic_config.ipRouteSpec.ipRouteConfig.defaultGateway = ( + self.default_gateway + ) vnic_config.ip = ip_spec - results['mtu'] = self.mtu + results["mtu"] = self.mtu vnic_config.mtu = self.mtu - results['tcpip_stack'] = self.tcpip_stack - vnic_config.netStackInstanceKey = self.get_api_net_stack_instance(self.tcpip_stack) + results["tcpip_stack"] = self.tcpip_stack + vnic_config.netStackInstanceKey = self.get_api_net_stack_instance( + self.tcpip_stack + ) vmk_device = None try: if self.module.check_mode: - results['msg'] = "VMkernel Adapter would be created" + results["msg"] = "VMkernel Adapter would be created" else: if self.vswitch_name: vmk_device = self.esxi_host_obj.configManager.networkSystem.AddVirtualNic( - self.port_group_name, - vnic_config + self.port_group_name, vnic_config ) elif self.vds_name: - vnic_config.distributedVirtualPort = vim.dvs.PortConnection() - vnic_config.distributedVirtualPort.switchUuid = self.dv_switch_obj.uuid - vnic_config.distributedVirtualPort.portgroupKey = self.port_group_obj.key - vmk_device = self.esxi_host_obj.configManager.networkSystem.AddVirtualNic(portgroup="", nic=vnic_config) - results['msg'] = "VMkernel Adapter created" - results['changed'] = True - results['device'] = vmk_device - if self.network_type != 'dhcp': + vnic_config.distributedVirtualPort = ( + vim.dvs.PortConnection() + ) + vnic_config.distributedVirtualPort.switchUuid = ( + self.dv_switch_obj.uuid + ) + vnic_config.distributedVirtualPort.portgroupKey = ( + self.port_group_obj.key + ) + vmk_device = self.esxi_host_obj.configManager.networkSystem.AddVirtualNic( + portgroup="", nic=vnic_config + ) + results["msg"] = "VMkernel Adapter created" + results["changed"] = True + results["device"] = vmk_device + if self.network_type != "dhcp": if self.default_gateway: - results['ipv4_gw'] = self.default_gateway + results["ipv4_gw"] = self.default_gateway else: - results['ipv4_gw'] = "No override" - results['services'] = self.create_enabled_services_string() + results["ipv4_gw"] = "No override" + results["services"] = self.create_enabled_services_string() except vim.fault.AlreadyExists as already_exists: self.module.fail_json( - msg="Failed to add vmk as portgroup already has a virtual network adapter %s" % - to_native(already_exists.msg) + msg="Failed to add vmk as portgroup already has a virtual network adapter %s" + % to_native(already_exists.msg) ) except vim.fault.HostConfigFault as host_config_fault: self.module.fail_json( - msg="Failed to add vmk due to host config issues : %s" % - to_native(host_config_fault.msg) + msg="Failed to add vmk due to host config issues : %s" + % to_native(host_config_fault.msg) ) except vim.fault.InvalidState as invalid_state: self.module.fail_json( - msg="Failed to add vmk as ipv6 address is specified in an ipv4 only system : %s" % - to_native(invalid_state.msg) + msg="Failed to add vmk as ipv6 address is specified in an ipv4 only system : %s" + % to_native(invalid_state.msg) ) except vmodl.fault.InvalidArgument as invalid_arg: self.module.fail_json( msg="Failed to add vmk as IP address or Subnet Mask in the IP configuration " - "are invalid or PortGroup does not exist : %s" % to_native(invalid_arg.msg) + "are invalid or PortGroup does not exist : %s" + % to_native(invalid_arg.msg) ) # do service type configuration - if self.tcpip_stack == 'default' and not all( - option is False for option in [self.enable_vsan, self.enable_vmotion, - self.enable_mgmt, self.enable_ft, - self.enable_provisioning, self.enable_replication, - self.enable_replication_nfc]): + if self.tcpip_stack == "default" and not all( + option is False + for option in [ + self.enable_vsan, + self.enable_vmotion, + self.enable_mgmt, + self.enable_ft, + self.enable_provisioning, + self.enable_replication, + self.enable_replication_nfc, + ] + ): self.vnic = self.get_vmkernel_by_device(device_name=vmk_device) # VSAN if self.enable_vsan: - results['vsan'] = self.set_vsan_service_type() + results["vsan"] = self.set_vsan_service_type() # Other service type - host_vnic_manager = self.esxi_host_obj.configManager.virtualNicManager + host_vnic_manager = ( + self.esxi_host_obj.configManager.virtualNicManager + ) if self.enable_vmotion: - self.set_service_type(host_vnic_manager, self.vnic, 'vmotion') + self.set_service_type(host_vnic_manager, self.vnic, "vmotion") if self.enable_mgmt: - self.set_service_type(host_vnic_manager, self.vnic, 'management') + self.set_service_type( + host_vnic_manager, self.vnic, "management" + ) if self.enable_ft: - self.set_service_type(host_vnic_manager, self.vnic, 'faultToleranceLogging') + self.set_service_type( + host_vnic_manager, self.vnic, "faultToleranceLogging" + ) if self.enable_provisioning: - self.set_service_type(host_vnic_manager, self.vnic, 'vSphereProvisioning') + self.set_service_type( + host_vnic_manager, self.vnic, "vSphereProvisioning" + ) if self.enable_replication: - self.set_service_type(host_vnic_manager, self.vnic, 'vSphereReplication') + self.set_service_type( + host_vnic_manager, self.vnic, "vSphereReplication" + ) if self.enable_replication_nfc: - self.set_service_type(host_vnic_manager, self.vnic, 'vSphereReplicationNFC') + self.set_service_type( + host_vnic_manager, self.vnic, "vSphereReplicationNFC" + ) self.module.exit_json(**results) - def set_service_type(self, vnic_manager, vmk, service_type, operation='select'): + def set_service_type( + self, vnic_manager, vmk, service_type, operation="select" + ): """ Set service type to given VMKernel Args: @@ -937,16 +1152,23 @@ def set_service_type(self, vnic_manager, vmk, service_type, operation='select'): """ try: - if operation == 'select': + if operation == "select": if not self.module.check_mode: vnic_manager.SelectVnicForNicType(service_type, vmk.device) - elif operation == 'deselect': + elif operation == "deselect": if not self.module.check_mode: - vnic_manager.DeselectVnicForNicType(service_type, vmk.device) + vnic_manager.DeselectVnicForNicType( + service_type, vmk.device + ) except vmodl.fault.InvalidArgument as invalid_arg: self.module.fail_json( - msg="Failed to %s VMK service type '%s' on '%s' due to : %s" % - (operation, service_type, vmk.device, to_native(invalid_arg.msg)) + msg="Failed to %s VMK service type '%s' on '%s' due to : %s" + % ( + operation, + service_type, + vmk.device, + to_native(invalid_arg.msg), + ) ) def get_all_vmks_by_service_type(self): @@ -982,132 +1204,154 @@ def query_service_type_for_vmks(self, service_type): vmks_list = [] query = None try: - query = self.esxi_host_obj.configManager.virtualNicManager.QueryNetConfig(service_type) + query = self.esxi_host_obj.configManager.virtualNicManager.QueryNetConfig( + service_type + ) except vim.fault.HostConfigFault as config_fault: self.module.fail_json( - msg="Failed to get all VMKs for service type %s due to host config fault : %s" % - (service_type, to_native(config_fault.msg)) + msg="Failed to get all VMKs for service type %s due to host config fault : %s" + % (service_type, to_native(config_fault.msg)) ) except vmodl.fault.InvalidArgument as invalid_argument: self.module.fail_json( - msg="Failed to get all VMKs for service type %s due to invalid arguments : %s" % - (service_type, to_native(invalid_argument.msg)) + msg="Failed to get all VMKs for service type %s due to invalid arguments : %s" + % (service_type, to_native(invalid_argument.msg)) ) if not query.selectedVnic: return vmks_list selected_vnics = [vnic for vnic in query.selectedVnic] - vnics_with_service_type = [vnic.device for vnic in query.candidateVnic if vnic.key in selected_vnics] + vnics_with_service_type = [ + vnic.device + for vnic in query.candidateVnic + if vnic.key in selected_vnics + ] return vnics_with_service_type def create_enabled_services_string(self): """Create services list""" services = [] if self.enable_mgmt: - services.append('Mgmt') + services.append("Mgmt") if self.enable_vmotion: - services.append('vMotion') + services.append("vMotion") if self.enable_ft: - services.append('FT') + services.append("FT") if self.enable_vsan: - services.append('VSAN') + services.append("VSAN") if self.enable_provisioning: - services.append('Prov') + services.append("Prov") if self.enable_replication: - services.append('Repl') + services.append("Repl") if self.enable_replication_nfc: - services.append('Repl_NFC') - return ', '.join(services) + services.append("Repl_NFC") + return ", ".join(services) @staticmethod def get_api_net_stack_instance(tcpip_stack): """Get TCP/IP stack instance name or key""" net_stack_instance = None - if tcpip_stack == 'default': - net_stack_instance = 'defaultTcpipStack' - elif tcpip_stack == 'provisioning': - net_stack_instance = 'vSphereProvisioning' + if tcpip_stack == "default": + net_stack_instance = "defaultTcpipStack" + elif tcpip_stack == "provisioning": + net_stack_instance = "vSphereProvisioning" # vmotion and vxlan stay the same - elif tcpip_stack == 'vmotion': - net_stack_instance = 'vmotion' - elif tcpip_stack == 'vxlan': - net_stack_instance = 'vxlan' - elif tcpip_stack == 'defaultTcpipStack': - net_stack_instance = 'default' - elif tcpip_stack == 'vSphereProvisioning': - net_stack_instance = 'provisioning' + elif tcpip_stack == "vmotion": + net_stack_instance = "vmotion" + elif tcpip_stack == "vxlan": + net_stack_instance = "vxlan" + elif tcpip_stack == "defaultTcpipStack": + net_stack_instance = "default" + elif tcpip_stack == "vSphereProvisioning": + net_stack_instance = "provisioning" # vmotion and vxlan stay the same - elif tcpip_stack == 'vmotion': - net_stack_instance = 'vmotion' - elif tcpip_stack == 'vxlan': - net_stack_instance = 'vxlan' + elif tcpip_stack == "vmotion": + net_stack_instance = "vmotion" + elif tcpip_stack == "vxlan": + net_stack_instance = "vxlan" return net_stack_instance def main(): """Main""" argument_spec = vmware_argument_spec() - argument_spec.update(dict( - esxi_hostname=dict(required=True, type='str'), - portgroup_name=dict(required=True, type='str', aliases=['portgroup']), - ip_address=dict(removed_in_version=2.9, type='str'), - subnet_mask=dict(removed_in_version=2.9, type='str'), - mtu=dict(required=False, type='int', default=1500), - device=dict(type='str'), - enable_vsan=dict(required=False, type='bool', default=False), - enable_vmotion=dict(required=False, type='bool', default=False), - enable_mgmt=dict(required=False, type='bool', default=False), - enable_ft=dict(required=False, type='bool', default=False), - enable_provisioning=dict(type='bool', default=False), - enable_replication=dict(type='bool', default=False), - enable_replication_nfc=dict(type='bool', default=False), - vswitch_name=dict(required=False, type='str', aliases=['vswitch']), - dvswitch_name=dict(required=False, type='str', aliases=['dvswitch']), - network=dict( - type='dict', - options=dict( - type=dict(type='str', default='static', choices=['static', 'dhcp']), - ip_address=dict(type='str'), - subnet_mask=dict(type='str'), - default_gateway=dict(type='str'), - tcpip_stack=dict(type='str', default='default', choices=['default', 'provisioning', 'vmotion', 'vxlan']), + argument_spec.update( + dict( + esxi_hostname=dict(required=True, type="str"), + portgroup_name=dict( + required=True, type="str", aliases=["portgroup"] ), - default=dict( - type='static', - tcpip_stack='default', + ip_address=dict(removed_in_version=2.9, type="str"), + subnet_mask=dict(removed_in_version=2.9, type="str"), + mtu=dict(required=False, type="int", default=1500), + device=dict(type="str"), + enable_vsan=dict(required=False, type="bool", default=False), + enable_vmotion=dict(required=False, type="bool", default=False), + enable_mgmt=dict(required=False, type="bool", default=False), + enable_ft=dict(required=False, type="bool", default=False), + enable_provisioning=dict(type="bool", default=False), + enable_replication=dict(type="bool", default=False), + enable_replication_nfc=dict(type="bool", default=False), + vswitch_name=dict(required=False, type="str", aliases=["vswitch"]), + dvswitch_name=dict( + required=False, type="str", aliases=["dvswitch"] ), - ), - state=dict( - type='str', - default='present', - choices=['absent', 'present'] - ), - )) - - module = AnsibleModule(argument_spec=argument_spec, - mutually_exclusive=[ - ['vswitch_name', 'dvswitch_name'], - ['tcpip_stack', 'enable_vsan'], - ['tcpip_stack', 'enable_vmotion'], - ['tcpip_stack', 'enable_mgmt'], - ['tcpip_stack', 'enable_ft'], - ['tcpip_stack', 'enable_provisioning'], - ['tcpip_stack', 'enable_replication'], - ['tcpip_stack', 'enable_replication_nfc'], - ], - required_one_of=[ - ['vswitch_name', 'dvswitch_name'], - ['portgroup_name', 'device'], - ], - required_if=[ - ['state', 'present', ['portgroup_name']], - ['state', 'absent', ['device']] - ], - supports_check_mode=True) + network=dict( + type="dict", + options=dict( + type=dict( + type="str", + default="static", + choices=["static", "dhcp"], + ), + ip_address=dict(type="str"), + subnet_mask=dict(type="str"), + default_gateway=dict(type="str"), + tcpip_stack=dict( + type="str", + default="default", + choices=[ + "default", + "provisioning", + "vmotion", + "vxlan", + ], + ), + ), + default=dict(type="static", tcpip_stack="default"), + ), + state=dict( + type="str", default="present", choices=["absent", "present"] + ), + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ["vswitch_name", "dvswitch_name"], + ["tcpip_stack", "enable_vsan"], + ["tcpip_stack", "enable_vmotion"], + ["tcpip_stack", "enable_mgmt"], + ["tcpip_stack", "enable_ft"], + ["tcpip_stack", "enable_provisioning"], + ["tcpip_stack", "enable_replication"], + ["tcpip_stack", "enable_replication_nfc"], + ], + required_one_of=[ + ["vswitch_name", "dvswitch_name"], + ["portgroup_name", "device"], + ], + required_if=[ + ["state", "present", ["portgroup_name"]], + ["state", "absent", ["device"]], + ], + supports_check_mode=True, + ) pyv = PyVmomiHelper(module) pyv.ensure() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_vmkernel_info.py b/plugins/modules/vmware_vmkernel_info.py index 8740587..c5fff21 100644 --- a/plugins/modules/vmware_vmkernel_info.py +++ b/plugins/modules/vmware_vmkernel_info.py @@ -5,15 +5,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_vmkernel_info short_description: Gathers VMKernel info about an ESXi host @@ -42,9 +43,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather VMKernel info about all ESXi Host in given Cluster vmware_vmkernel_info: hostname: '{{ vcenter_hostname }}' @@ -62,9 +63,9 @@ esxi_hostname: '{{ esxi_hostname }}' delegate_to: localhost register: host_vmks -''' +""" -RETURN = r''' +RETURN = r""" host_vmk_info: description: metadata about VMKernel present on given host system returned: success @@ -90,7 +91,7 @@ ] } -''' +""" try: from pyVmomi import vim, vmodl @@ -98,16 +99,21 @@ pass from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + vmware_argument_spec, + PyVmomi, +) from ansible.module_utils._text import to_native class VmkernelInfoManager(PyVmomi): def __init__(self, module): super(VmkernelInfoManager, self).__init__(module) - cluster_name = self.params.get('cluster_name', None) - esxi_host_name = self.params.get('esxi_hostname', None) - self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name) + cluster_name = self.params.get("cluster_name", None) + esxi_host_name = self.params.get("esxi_hostname", None) + self.hosts = self.get_all_host_objs( + cluster_name=cluster_name, esxi_host_name=esxi_host_name + ) self.service_type_vmks = dict() self.get_all_vmks_by_service_type() @@ -117,9 +123,13 @@ def get_all_vmks_by_service_type(self): """ for host in self.hosts: - self.service_type_vmks[host.name] = dict(vmotion=[], vsan=[], management=[], faultToleranceLogging=[]) + self.service_type_vmks[host.name] = dict( + vmotion=[], vsan=[], management=[], faultToleranceLogging=[] + ) for service_type in self.service_type_vmks[host.name].keys(): - vmks_list = self.query_service_type_for_vmks(host, service_type) + vmks_list = self.query_service_type_for_vmks( + host, service_type + ) self.service_type_vmks[host.name][service_type] = vmks_list def query_service_type_for_vmks(self, host_system, service_type): @@ -135,21 +145,35 @@ def query_service_type_for_vmks(self, host_system, service_type): vmks_list = [] query = None try: - query = host_system.configManager.virtualNicManager.QueryNetConfig(service_type) + query = host_system.configManager.virtualNicManager.QueryNetConfig( + service_type + ) except vim.fault.HostConfigFault as config_fault: - self.module.fail_json(msg="Failed to get all VMKs for service type %s due to" - " host config fault : %s" % (service_type, to_native(config_fault.msg))) + self.module.fail_json( + msg="Failed to get all VMKs for service type %s due to" + " host config fault : %s" + % (service_type, to_native(config_fault.msg)) + ) except vmodl.fault.InvalidArgument as invalid_argument: - self.module.fail_json(msg="Failed to get all VMKs for service type %s due to" - " invalid arguments : %s" % (service_type, to_native(invalid_argument.msg))) + self.module.fail_json( + msg="Failed to get all VMKs for service type %s due to" + " invalid arguments : %s" + % (service_type, to_native(invalid_argument.msg)) + ) except Exception as e: - self.module.fail_json(msg="Failed to get all VMKs for service type %s due to" - "%s" % (service_type, to_native(e))) + self.module.fail_json( + msg="Failed to get all VMKs for service type %s due to" + "%s" % (service_type, to_native(e)) + ) if not query or not query.selectedVnic: return vmks_list selected_vnics = [vnic for vnic in query.selectedVnic] - vnics_with_service_type = [vnic.device for vnic in query.candidateVnic if vnic.key in selected_vnics] + vnics_with_service_type = [ + vnic.device + for vnic in query.candidateVnic + if vnic.key in selected_vnics + ] return vnics_with_service_type def gather_host_vmk_info(self): @@ -161,21 +185,28 @@ def gather_host_vmk_info(self): if host_network_system: vmks_config = host.config.network.vnic for vmk in vmks_config: - host_vmk_info.append(dict( - device=vmk.device, - key=vmk.key, - portgroup=vmk.portgroup, - ipv4_address=vmk.spec.ip.ipAddress, - ipv4_subnet_mask=vmk.spec.ip.subnetMask, - dhcp=vmk.spec.ip.dhcp, - mac=vmk.spec.mac, - mtu=vmk.spec.mtu, - stack=vmk.spec.netStackInstanceKey, - enable_vsan=vmk.device in self.service_type_vmks[host.name]['vsan'], - enable_vmotion=vmk.device in self.service_type_vmks[host.name]['vmotion'], - enable_management=vmk.device in self.service_type_vmks[host.name]['management'], - enable_ft=vmk.device in self.service_type_vmks[host.name]['faultToleranceLogging'], - ) + host_vmk_info.append( + dict( + device=vmk.device, + key=vmk.key, + portgroup=vmk.portgroup, + ipv4_address=vmk.spec.ip.ipAddress, + ipv4_subnet_mask=vmk.spec.ip.subnetMask, + dhcp=vmk.spec.ip.dhcp, + mac=vmk.spec.mac, + mtu=vmk.spec.mtu, + stack=vmk.spec.netStackInstanceKey, + enable_vsan=vmk.device + in self.service_type_vmks[host.name]["vsan"], + enable_vmotion=vmk.device + in self.service_type_vmks[host.name]["vmotion"], + enable_management=vmk.device + in self.service_type_vmks[host.name]["management"], + enable_ft=vmk.device + in self.service_type_vmks[host.name][ + "faultToleranceLogging" + ], + ) ) hosts_info[host.name] = host_vmk_info return hosts_info @@ -184,20 +215,20 @@ def gather_host_vmk_info(self): def main(): argument_spec = vmware_argument_spec() argument_spec.update( - cluster_name=dict(type='str', required=False), - esxi_hostname=dict(type='str', required=False), + cluster_name=dict(type="str", required=False), + esxi_hostname=dict(type="str", required=False), ) module = AnsibleModule( argument_spec=argument_spec, - required_one_of=[ - ['cluster_name', 'esxi_hostname'], - ], - supports_check_mode=True + required_one_of=[["cluster_name", "esxi_hostname"]], + supports_check_mode=True, ) vmware_vmk_config = VmkernelInfoManager(module) - module.exit_json(changed=False, host_vmk_info=vmware_vmk_config.gather_host_vmk_info()) + module.exit_json( + changed=False, host_vmk_info=vmware_vmk_config.gather_host_vmk_info() + ) if __name__ == "__main__": diff --git a/plugins/modules/vmware_vmkernel_ip_config.py b/plugins/modules/vmware_vmkernel_ip_config.py index 49b09eb..cca55d0 100644 --- a/plugins/modules/vmware_vmkernel_ip_config.py +++ b/plugins/modules/vmware_vmkernel_ip_config.py @@ -5,13 +5,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} +ANSIBLE_METADATA = { + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", +} -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_vmkernel_ip_config short_description: Configure the VMkernel IP Address @@ -44,9 +47,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = ''' +EXAMPLES = """ # Example command from Ansible Playbook - name: Configure IP address on ESX host @@ -58,19 +61,27 @@ ip_address: 10.0.0.10 subnet_mask: 255.255.255.0 delegate_to: localhost -''' +""" try: from pyVmomi import vim, vmodl + HAS_PYVMOMI = True except ImportError: HAS_PYVMOMI = False from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import HAS_PYVMOMI, connect_to_api, get_all_objs, vmware_argument_spec +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + HAS_PYVMOMI, + connect_to_api, + get_all_objs, + vmware_argument_spec, +) -def configure_vmkernel_ip_address(host_system, vmk_name, ip_address, subnet_mask): +def configure_vmkernel_ip_address( + host_system, vmk_name, ip_address, subnet_mask +): host_config_manager = host_system.configManager host_network_system = host_config_manager.networkSystem @@ -90,18 +101,24 @@ def configure_vmkernel_ip_address(host_system, vmk_name, ip_address, subnet_mask def main(): argument_spec = vmware_argument_spec() - argument_spec.update(dict(vmk_name=dict(required=True, type='str'), - ip_address=dict(required=True, type='str'), - subnet_mask=dict(required=True, type='str'))) - - module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) + argument_spec.update( + dict( + vmk_name=dict(required=True, type="str"), + ip_address=dict(required=True, type="str"), + subnet_mask=dict(required=True, type="str"), + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, supports_check_mode=False + ) if not HAS_PYVMOMI: - module.fail_json(msg='pyvmomi is required for this module') + module.fail_json(msg="pyvmomi is required for this module") - vmk_name = module.params['vmk_name'] - ip_address = module.params['ip_address'] - subnet_mask = module.params['subnet_mask'] + vmk_name = module.params["vmk_name"] + ip_address = module.params["ip_address"] + subnet_mask = module.params["subnet_mask"] try: content = connect_to_api(module, False) @@ -109,7 +126,9 @@ def main(): if not host: module.fail_json(msg="Unable to locate Physical Host.") host_system = list(host)[0] - changed = configure_vmkernel_ip_address(host_system, vmk_name, ip_address, subnet_mask) + changed = configure_vmkernel_ip_address( + host_system, vmk_name, ip_address, subnet_mask + ) module.exit_json(changed=changed) except vmodl.RuntimeFault as runtime_fault: module.fail_json(msg=runtime_fault.msg) @@ -119,5 +138,5 @@ def main(): module.fail_json(msg=str(e)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_vmotion.py b/plugins/modules/vmware_vmotion.py index a3cda2a..922a9cd 100644 --- a/plugins/modules/vmware_vmotion.py +++ b/plugins/modules/vmware_vmotion.py @@ -9,16 +9,17 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_vmotion short_description: Move a virtual machine using vMotion, and/or its vmdks using storage vMotion. @@ -78,9 +79,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Perform vMotion of virtual machine vmware_vmotion: hostname: '{{ vcenter_hostname }}' @@ -132,15 +133,15 @@ destination_host: 'destination_host_as_per_vcenter' destination_datastore: 'destination_datastore_as_per_vcenter' delegate_to: localhost -''' +""" -RETURN = ''' +RETURN = """ running_host: description: List the host the virtual machine is registered to returned: changed or success type: str sample: 'host1.example.com' -''' +""" try: from pyVmomi import vim, VmomiSupport @@ -149,73 +150,98 @@ from ansible.module_utils._text import to_native from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import (PyVmomi, find_hostsystem_by_name, - find_vm_by_id, find_datastore_by_name, - find_resource_pool_by_name, - vmware_argument_spec, wait_for_task, TaskError) +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + find_hostsystem_by_name, + find_vm_by_id, + find_datastore_by_name, + find_resource_pool_by_name, + vmware_argument_spec, + wait_for_task, + TaskError, +) class VmotionManager(PyVmomi): def __init__(self, module): super(VmotionManager, self).__init__(module) self.vm = None - self.vm_uuid = self.params.get('vm_uuid', None) - self.use_instance_uuid = self.params.get('use_instance_uuid', False) - self.vm_name = self.params.get('vm_name', None) - self.moid = self.params.get('moid') or None + self.vm_uuid = self.params.get("vm_uuid", None) + self.use_instance_uuid = self.params.get("use_instance_uuid", False) + self.vm_name = self.params.get("vm_name", None) + self.moid = self.params.get("moid") or None result = dict() self.get_vm() if self.vm is None: vm_id = self.vm_uuid or self.vm_name or self.moid - self.module.fail_json(msg="Failed to find the virtual machine with %s" % vm_id) + self.module.fail_json( + msg="Failed to find the virtual machine with %s" % vm_id + ) # Get Destination Host System if specified by user - dest_host_name = self.params.get('destination_host', None) + dest_host_name = self.params.get("destination_host", None) self.host_object = None if dest_host_name is not None: - self.host_object = find_hostsystem_by_name(content=self.content, - hostname=dest_host_name) + self.host_object = find_hostsystem_by_name( + content=self.content, hostname=dest_host_name + ) # Get Destination Datastore if specified by user - dest_datastore = self.params.get('destination_datastore', None) + dest_datastore = self.params.get("destination_datastore", None) self.datastore_object = None if dest_datastore is not None: - self.datastore_object = find_datastore_by_name(content=self.content, - datastore_name=dest_datastore) + self.datastore_object = find_datastore_by_name( + content=self.content, datastore_name=dest_datastore + ) # At-least one of datastore, host system is required to migrate if self.datastore_object is None and self.host_object is None: - self.module.fail_json(msg="Unable to find destination datastore" - " and destination host system.") + self.module.fail_json( + msg="Unable to find destination datastore" + " and destination host system." + ) # Get Destination resourcepool - dest_resourcepool = self.params.get('destination_resourcepool', None) + dest_resourcepool = self.params.get("destination_resourcepool", None) self.resourcepool_object = None if dest_resourcepool: - self.resourcepool_object = find_resource_pool_by_name(content=self.content, - resource_pool_name=dest_resourcepool) + self.resourcepool_object = find_resource_pool_by_name( + content=self.content, resource_pool_name=dest_resourcepool + ) elif not dest_resourcepool and dest_host_name: self.resourcepool_object = self.host_object.parent.resourcePool # Fail if resourcePool object is not found if self.resourcepool_object is None: - self.module.fail_json(msg="Unable to destination resource pool object which is required") + self.module.fail_json( + msg="Unable to destination resource pool object which is required" + ) # Check if datastore is required, this check is required if destination # and source host system does not share same datastore. host_datastore_required = [] for vm_datastore in self.vm.datastore: - if self.host_object and vm_datastore not in self.host_object.datastore: + if ( + self.host_object + and vm_datastore not in self.host_object.datastore + ): host_datastore_required.append(True) else: host_datastore_required.append(False) if any(host_datastore_required) and dest_datastore is None: - msg = "Destination host system does not share" \ - " datastore ['%s'] with source host system ['%s'] on which" \ - " virtual machine is located. Please specify destination_datastore" \ - " to rectify this problem." % ("', '".join([ds.name for ds in self.host_object.datastore]), - "', '".join([ds.name for ds in self.vm.datastore])) + msg = ( + "Destination host system does not share" + " datastore ['%s'] with source host system ['%s'] on which" + " virtual machine is located. Please specify destination_datastore" + " to rectify this problem." + % ( + "', '".join( + [ds.name for ds in self.host_object.datastore] + ), + "', '".join([ds.name for ds in self.vm.datastore]), + ) + ) self.module.fail_json(msg=msg) @@ -226,20 +252,32 @@ def __init__(self, module): # We have both host system and datastore object if not self.datastore_object.summary.accessible: # Datastore is not accessible - self.module.fail_json(msg='Destination datastore %s is' - ' not accessible.' % dest_datastore) + self.module.fail_json( + msg="Destination datastore %s is" + " not accessible." % dest_datastore + ) if self.datastore_object not in self.host_object.datastore: # Datastore is not associated with host system - self.module.fail_json(msg="Destination datastore %s provided" - " is not associated with destination" - " host system %s. Please specify" - " datastore value ['%s'] associated with" - " the given host system." % (dest_datastore, - dest_host_name, - "', '".join([ds.name for ds in self.host_object.datastore]))) - - if self.vm.runtime.host.name == dest_host_name and dest_datastore in [ds.name for ds in self.vm.datastore]: + self.module.fail_json( + msg="Destination datastore %s provided" + " is not associated with destination" + " host system %s. Please specify" + " datastore value ['%s'] associated with" + " the given host system." + % ( + dest_datastore, + dest_host_name, + "', '".join( + [ds.name for ds in self.host_object.datastore] + ), + ) + ) + + if ( + self.vm.runtime.host.name == dest_host_name + and dest_datastore in [ds.name for ds in self.vm.datastore] + ): change_required = False if self.host_object and self.datastore_object is None: @@ -256,12 +294,14 @@ def __init__(self, module): if not self.datastore_object.summary.accessible: # Datastore is not accessible - self.module.fail_json(msg='Destination datastore %s is' - ' not accessible.' % dest_datastore) + self.module.fail_json( + msg="Destination datastore %s is" + " not accessible." % dest_datastore + ) if module.check_mode: - result['running_host'] = module.params['destination_host'] - result['changed'] = True + result["running_host"] = module.params["destination_host"] + result["changed"] = True module.exit_json(**result) if change_required: @@ -277,30 +317,32 @@ def __init__(self, module): # The storage layout is not automatically refreshed, so we trigger it to get coherent module return values if storage_vmotion_needed: self.vm.RefreshStorageInfo() - result['running_host'] = module.params['destination_host'] - result['changed'] = True + result["running_host"] = module.params["destination_host"] + result["changed"] = True module.exit_json(**result) else: - msg = 'Unable to migrate virtual machine due to an error, please check vCenter' + msg = "Unable to migrate virtual machine due to an error, please check vCenter" if task_object.info.error is not None: msg += " : %s" % task_object.info.error module.fail_json(msg=msg) else: try: host = self.vm.summary.runtime.host - result['running_host'] = host.summary.config.name + result["running_host"] = host.summary.config.name except vim.fault.NoPermission: - result['running_host'] = 'NA' - result['changed'] = False + result["running_host"] = "NA" + result["changed"] = False module.exit_json(**result) def migrate_vm(self): """ Migrate virtual machine and return the task. """ - relocate_spec = vim.vm.RelocateSpec(host=self.host_object, - datastore=self.datastore_object, - pool=self.resourcepool_object) + relocate_spec = vim.vm.RelocateSpec( + host=self.host_object, + datastore=self.datastore_object, + pool=self.resourcepool_object, + ) task_object = self.vm.Relocate(relocate_spec) return task_object @@ -313,12 +355,22 @@ def get_vm(self): vms = [] if self.vm_uuid: if not self.use_instance_uuid: - vm_obj = find_vm_by_id(self.content, vm_id=self.params['vm_uuid'], vm_id_type="uuid") + vm_obj = find_vm_by_id( + self.content, + vm_id=self.params["vm_uuid"], + vm_id_type="uuid", + ) elif self.use_instance_uuid: - vm_obj = find_vm_by_id(self.content, vm_id=self.params['vm_uuid'], vm_id_type="instance_uuid") + vm_obj = find_vm_by_id( + self.content, + vm_id=self.params["vm_uuid"], + vm_id_type="instance_uuid", + ) vms = [vm_obj] elif self.vm_name: - objects = self.get_managed_objects_properties(vim_type=vim.VirtualMachine, properties=['name']) + objects = self.get_managed_objects_properties( + vim_type=vim.VirtualMachine, properties=["name"] + ) for temp_vm_object in objects: if len(temp_vm_object.propSet) != 1: continue @@ -326,13 +378,17 @@ def get_vm(self): vms.append(temp_vm_object.obj) break elif self.moid: - vm_obj = VmomiSupport.templateOf('VirtualMachine')(self.moid, self.si._stub) + vm_obj = VmomiSupport.templateOf("VirtualMachine")( + self.moid, self.si._stub + ) if vm_obj: vms.append(vm_obj) if len(vms) > 1: - self.module.fail_json(msg="Multiple virtual machines with same name %s found." - " Please specify vm_uuid instead of vm_name." % self.vm_name) + self.module.fail_json( + msg="Multiple virtual machines with same name %s found." + " Please specify vm_uuid instead of vm_name." % self.vm_name + ) if vms: self.vm = vms[0] @@ -342,13 +398,13 @@ def main(): argument_spec = vmware_argument_spec() argument_spec.update( dict( - vm_name=dict(aliases=['vm']), - vm_uuid=dict(aliases=['uuid']), - moid=dict(type='str'), - use_instance_uuid=dict(type='bool', default=False), - destination_host=dict(aliases=['destination']), - destination_resourcepool=dict(aliases=['resource_pool']), - destination_datastore=dict(aliases=['datastore']) + vm_name=dict(aliases=["vm"]), + vm_uuid=dict(aliases=["uuid"]), + moid=dict(type="str"), + use_instance_uuid=dict(type="bool", default=False), + destination_host=dict(aliases=["destination"]), + destination_resourcepool=dict(aliases=["resource_pool"]), + destination_datastore=dict(aliases=["datastore"]), ) ) @@ -356,16 +412,14 @@ def main(): argument_spec=argument_spec, supports_check_mode=True, required_one_of=[ - ['destination_host', 'destination_datastore'], - ['vm_uuid', 'vm_name', 'moid'], - ], - mutually_exclusive=[ - ['vm_uuid', 'vm_name', 'moid'], + ["destination_host", "destination_datastore"], + ["vm_uuid", "vm_name", "moid"], ], + mutually_exclusive=[["vm_uuid", "vm_name", "moid"]], ) vmotion_manager = VmotionManager(module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_vsan_cluster.py b/plugins/modules/vmware_vsan_cluster.py index 6e70234..9eb2f34 100644 --- a/plugins/modules/vmware_vsan_cluster.py +++ b/plugins/modules/vmware_vsan_cluster.py @@ -5,13 +5,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} +ANSIBLE_METADATA = { + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", +} -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_vsan_cluster short_description: Configure VSAN clustering on an ESXi host @@ -33,9 +36,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Configure VMware VSAN Cluster hosts: deploy_node tags: @@ -57,17 +60,23 @@ cluster_uuid: "{{ vsan_cluster.cluster_uuid }}" delegate_to: localhost loop: "{{ groups['esxi'][1:] }}" -''' +""" try: from pyVmomi import vim, vmodl + HAS_PYVMOMI = True except ImportError: HAS_PYVMOMI = False from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import (HAS_PYVMOMI, connect_to_api, get_all_objs, vmware_argument_spec, - wait_for_task) +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + HAS_PYVMOMI, + connect_to_api, + get_all_objs, + vmware_argument_spec, + wait_for_task, +) def create_vsan_cluster(host_system, new_cluster_uuid): @@ -96,14 +105,16 @@ def create_vsan_cluster(host_system, new_cluster_uuid): def main(): argument_spec = vmware_argument_spec() - argument_spec.update(dict(cluster_uuid=dict(required=False, type='str'))) + argument_spec.update(dict(cluster_uuid=dict(required=False, type="str"))) - module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) + module = AnsibleModule( + argument_spec=argument_spec, supports_check_mode=False + ) if not HAS_PYVMOMI: - module.fail_json(msg='pyvmomi is required for this module') + module.fail_json(msg="pyvmomi is required for this module") - new_cluster_uuid = module.params['cluster_uuid'] + new_cluster_uuid = module.params["cluster_uuid"] try: content = connect_to_api(module, False) @@ -111,8 +122,12 @@ def main(): if not host: module.fail_json(msg="Unable to locate Physical Host.") host_system = list(host)[0] - changed, result, cluster_uuid = create_vsan_cluster(host_system, new_cluster_uuid) - module.exit_json(changed=changed, result=result, cluster_uuid=cluster_uuid) + changed, result, cluster_uuid = create_vsan_cluster( + host_system, new_cluster_uuid + ) + module.exit_json( + changed=changed, result=result, cluster_uuid=cluster_uuid + ) except vmodl.RuntimeFault as runtime_fault: module.fail_json(msg=runtime_fault.msg) @@ -122,5 +137,5 @@ def main(): module.fail_json(msg=str(e)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_vsan_health_info.py b/plugins/modules/vmware_vsan_health_info.py index daaceb2..45282a1 100644 --- a/plugins/modules/vmware_vsan_health_info.py +++ b/plugins/modules/vmware_vsan_health_info.py @@ -4,15 +4,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_vsan_health_info @@ -45,9 +46,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Gather health info from a vSAN's cluster hosts: localhost gather_facts: 'no' @@ -57,9 +58,9 @@ password: "{{ vcenter_password }}" cluster_name: 'vSAN01' fetch_from_cache: False -''' +""" -RETURN = ''' +RETURN = """ vsan_health_info: description: vSAN cluster health info returned: on success @@ -100,15 +101,16 @@ "untrackedHosts": [] } } -''' +""" import json import traceback try: from pyVmomi import vim, vmodl, VmomiSupport + HAS_PYVMOMI = True - HAS_PYVMOMIJSON = hasattr(VmomiSupport, 'VmomiJSONEncoder') + HAS_PYVMOMIJSON = hasattr(VmomiSupport, "VmomiJSONEncoder") except ImportError: PYVMOMI_IMP_ERR = traceback.format_exc() HAS_PYVMOMI = False @@ -117,32 +119,46 @@ try: import vsanapiutils import vsanmgmtObjects + HAS_VSANPYTHONSDK = True except ImportError: VSANPYTHONSDK_IMP_ERR = traceback.format_exc() HAS_VSANPYTHONSDK = False from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible_collections.vmware.general.plugins.module_utils.vmware import connect_to_api, vmware_argument_spec, find_cluster_by_name +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + connect_to_api, + vmware_argument_spec, + find_cluster_by_name, +) def main(): argument_spec = vmware_argument_spec() argument_spec.update( - cluster_name=dict(required=True, type='str'), - fetch_from_cache=dict(required=False, type='bool') + cluster_name=dict(required=True, type="str"), + fetch_from_cache=dict(required=False, type="bool"), ) - module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + module = AnsibleModule( + argument_spec=argument_spec, supports_check_mode=True + ) if not HAS_PYVMOMI: - module.fail_json(msg=missing_required_lib('PyVmomi'), exception=PYVMOMI_IMP_ERR) + module.fail_json( + msg=missing_required_lib("PyVmomi"), exception=PYVMOMI_IMP_ERR + ) if not HAS_VSANPYTHONSDK: - module.fail_json(msg=missing_required_lib('vSAN Management SDK for Python'), exception=VSANPYTHONSDK_IMP_ERR) + module.fail_json( + msg=missing_required_lib("vSAN Management SDK for Python"), + exception=VSANPYTHONSDK_IMP_ERR, + ) if not HAS_PYVMOMIJSON: - module.fail_json(msg='The installed version of pyvmomi lacks JSON output support; need pyvmomi>6.7.1') + module.fail_json( + msg="The installed version of pyvmomi lacks JSON output support; need pyvmomi>6.7.1" + ) try: si, content = connect_to_api(module, True, True) @@ -150,32 +166,40 @@ def main(): module.fail_json(msg=e.msg) client_stub = si._GetStub() - ssl_context = client_stub.schemeArgs.get('context') + ssl_context = client_stub.schemeArgs.get("context") - cluster = find_cluster_by_name(content, module.params['cluster_name']) + cluster = find_cluster_by_name(content, module.params["cluster_name"]) if not cluster: - module.fail_json(msg="Failed to find cluster %s" % module.params['cluster_name']) + module.fail_json( + msg="Failed to find cluster %s" % module.params["cluster_name"] + ) - apiVersion = vsanapiutils.GetLatestVmodlVersion(module.params['hostname']) - vcMos = vsanapiutils.GetVsanVcMos(client_stub, context=ssl_context, version=apiVersion) + apiVersion = vsanapiutils.GetLatestVmodlVersion(module.params["hostname"]) + vcMos = vsanapiutils.GetVsanVcMos( + client_stub, context=ssl_context, version=apiVersion + ) - vsanClusterHealthSystem = vcMos['vsan-cluster-health-system'] + vsanClusterHealthSystem = vcMos["vsan-cluster-health-system"] try: clusterHealth = vsanClusterHealthSystem.VsanQueryVcClusterHealthSummary( - cluster=cluster, - fetchFromCache=module.params['fetch_from_cache'] + cluster=cluster, fetchFromCache=module.params["fetch_from_cache"] ) except vmodl.fault.NotFound as not_found: module.fail_json(msg=not_found.msg) except vmodl.fault.RuntimeFault as runtime_fault: module.fail_json(msg=runtime_fault.msg) - health = json.dumps(clusterHealth, cls=VmomiSupport.VmomiJSONEncoder, sort_keys=True, strip_dynamic=True) + health = json.dumps( + clusterHealth, + cls=VmomiSupport.VmomiJSONEncoder, + sort_keys=True, + strip_dynamic=True, + ) module.exit_json(changed=False, vsan_health_info=health) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_vspan_session.py b/plugins/modules/vmware_vspan_session.py index 6c7d6d4..5173eb6 100644 --- a/plugins/modules/vmware_vspan_session.py +++ b/plugins/modules/vmware_vspan_session.py @@ -10,12 +10,12 @@ __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_vspan_session short_description: Create or remove a Port Mirroring session. @@ -149,9 +149,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Create distributed mirroring session. vmware_vspan_session: hostname: '{{ vcenter_hostname }}' @@ -191,7 +191,7 @@ state: absent name: Remote Session delegate_to: localhost -''' +""" RETURN = """# """ @@ -202,60 +202,91 @@ pass from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import (vmware_argument_spec, PyVmomi, find_dvs_by_name, - find_vm_by_name, wait_for_task) +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + vmware_argument_spec, + PyVmomi, + find_dvs_by_name, + find_vm_by_name, + wait_for_task, +) class VMwareVspanSession(PyVmomi): def __init__(self, module): super(VMwareVspanSession, self).__init__(module) - self.switch = module.params['switch'] - self.name = module.params['name'] - self.session_type = module.params['session_type'] - self.enabled = module.params['enabled'] - self.state = module.params['state'] - self.description = module.params['description'] - self.source_port_transmitted = module.params['source_port_transmitted'] - self.source_port_received = module.params['source_port_received'] - self.destination_port = module.params['destination_port'] - self.encapsulation_vlan_id = module.params['encapsulation_vlan_id'] - self.strip_original_vlan = module.params['strip_original_vlan'] - self.mirrored_packet_length = module.params['mirrored_packet_length'] - self.normal_traffic_allowed = module.params['normal_traffic_allowed'] - self.sampling_rate = module.params['sampling_rate'] + self.switch = module.params["switch"] + self.name = module.params["name"] + self.session_type = module.params["session_type"] + self.enabled = module.params["enabled"] + self.state = module.params["state"] + self.description = module.params["description"] + self.source_port_transmitted = module.params["source_port_transmitted"] + self.source_port_received = module.params["source_port_received"] + self.destination_port = module.params["destination_port"] + self.encapsulation_vlan_id = module.params["encapsulation_vlan_id"] + self.strip_original_vlan = module.params["strip_original_vlan"] + self.mirrored_packet_length = module.params["mirrored_packet_length"] + self.normal_traffic_allowed = module.params["normal_traffic_allowed"] + self.sampling_rate = module.params["sampling_rate"] self.dv_switch = find_dvs_by_name(self.content, self.switch) if self.dv_switch is None: - self.module.fail_json(msg="There is no dvSwitch with the name: {0:s}.".format(self.switch)) + self.module.fail_json( + msg="There is no dvSwitch with the name: {0:s}.".format( + self.switch + ) + ) self.operation = None self.modified_ports = dict() self.deleted_session = None - if module.params['source_vm_transmitted'] is not None: - if (module.params['source_vm_transmitted']['name'] is None or - module.params['source_vm_transmitted']['nic_label'] is None): - self.module.fail_json(msg="Please provide both VM name and NIC Label") - self.source_vm_transmitted_name = module.params['source_vm_transmitted']['name'] - self.source_vm_transmitted_nic_label = module.params['source_vm_transmitted']['nic_label'] - if module.params['source_vm_received'] is not None: - if (module.params['source_vm_received']['name'] is None or - module.params['source_vm_received']['nic_label'] is None): - self.module.fail_json(msg="Please provide both VM name and NIC Label") - self.source_vm_received_name = module.params['source_vm_received']['name'] - self.source_vm_received_nic_label = module.params['source_vm_received']['nic_label'] - if module.params['destination_vm'] is not None: - if (module.params['destination_vm']['name'] is None or - module.params['destination_vm']['nic_label'] is None): - self.module.fail_json(msg="Please provide both VM name and NIC Label") - self.destination_vm_name = module.params['destination_vm']['name'] - self.destination_vm_nic_label = module.params['destination_vm']['nic_label'] + if module.params["source_vm_transmitted"] is not None: + if ( + module.params["source_vm_transmitted"]["name"] is None + or module.params["source_vm_transmitted"]["nic_label"] is None + ): + self.module.fail_json( + msg="Please provide both VM name and NIC Label" + ) + self.source_vm_transmitted_name = module.params[ + "source_vm_transmitted" + ]["name"] + self.source_vm_transmitted_nic_label = module.params[ + "source_vm_transmitted" + ]["nic_label"] + if module.params["source_vm_received"] is not None: + if ( + module.params["source_vm_received"]["name"] is None + or module.params["source_vm_received"]["nic_label"] is None + ): + self.module.fail_json( + msg="Please provide both VM name and NIC Label" + ) + self.source_vm_received_name = module.params["source_vm_received"][ + "name" + ] + self.source_vm_received_nic_label = module.params[ + "source_vm_received" + ]["nic_label"] + if module.params["destination_vm"] is not None: + if ( + module.params["destination_vm"]["name"] is None + or module.params["destination_vm"]["nic_label"] is None + ): + self.module.fail_json( + msg="Please provide both VM name and NIC Label" + ) + self.destination_vm_name = module.params["destination_vm"]["name"] + self.destination_vm_nic_label = module.params["destination_vm"][ + "nic_label" + ] def set_operation(self): """Sets the operation according to state""" - if self.state == 'absent': - self.operation = 'remove' - elif self.state == 'present' and self.find_session_by_name() is None: - self.operation = 'add' + if self.state == "absent": + self.operation = "remove" + elif self.state == "present" and self.find_session_by_name() is None: + self.operation = "add" else: - self.operation = 'edit' + self.operation = "edit" def find_session_by_name(self): """Finds a session by name @@ -278,7 +309,9 @@ def get_vm_port(self, vm_name, nic_label): """ vm = find_vm_by_name(self.content, vm_name) if vm is None: - self.module.fail_json(msg="There is no VM with the name: {0:s}.".format(vm_name)) + self.module.fail_json( + msg="There is no VM with the name: {0:s}.".format(vm_name) + ) for hardware in vm.config.hardware.device: if isinstance(hardware, vim.vm.device.VirtualVmxnet3): if hardware.deviceInfo.label == nic_label: @@ -287,42 +320,63 @@ def get_vm_port(self, vm_name, nic_label): def set_port_for_vm(self): """Sets the ports, to the VM's specified port.""" - if hasattr(self, 'source_vm_transmitted_name') and hasattr(self, 'source_vm_transmitted_nic_label'): - port = self.get_vm_port(self.source_vm_transmitted_name, self.source_vm_transmitted_nic_label) + if hasattr(self, "source_vm_transmitted_name") and hasattr( + self, "source_vm_transmitted_nic_label" + ): + port = self.get_vm_port( + self.source_vm_transmitted_name, + self.source_vm_transmitted_nic_label, + ) if port is not None: self.source_port_transmitted = port else: self.module.fail_json( - msg="No port could be found for VM: {0:s} NIC: {1:s}".format(self.source_vm_transmitted_name, - self.source_vm_transmitted_nic_label)) - if hasattr(self, 'source_vm_received_name') and hasattr(self, 'source_vm_received_nic_label'): - port = self.get_vm_port(self.source_vm_received_name, self.source_vm_received_nic_label) + msg="No port could be found for VM: {0:s} NIC: {1:s}".format( + self.source_vm_transmitted_name, + self.source_vm_transmitted_nic_label, + ) + ) + if hasattr(self, "source_vm_received_name") and hasattr( + self, "source_vm_received_nic_label" + ): + port = self.get_vm_port( + self.source_vm_received_name, self.source_vm_received_nic_label + ) if port is not None: self.source_port_received = port else: self.module.fail_json( - msg="No port could be found for VM: {0:s} NIC: {1:s}".format(self.source_vm_received_name, - self.source_vm_received_nic_label)) - if hasattr(self, 'destination_vm_name') and hasattr(self, 'destination_vm_nic_label'): - port = self.get_vm_port(self.destination_vm_name, self.destination_vm_nic_label) + msg="No port could be found for VM: {0:s} NIC: {1:s}".format( + self.source_vm_received_name, + self.source_vm_received_nic_label, + ) + ) + if hasattr(self, "destination_vm_name") and hasattr( + self, "destination_vm_nic_label" + ): + port = self.get_vm_port( + self.destination_vm_name, self.destination_vm_nic_label + ) if port is not None: self.destination_port = port else: self.module.fail_json( - msg="No port could be found for VM: {0:s} NIC: {1:s}".format(self.destination_vm_name, - self.destination_vm_nic_label)) + msg="No port could be found for VM: {0:s} NIC: {1:s}".format( + self.destination_vm_name, self.destination_vm_nic_label + ) + ) def process_operation(self): """Calls the create or delete function based on the operation""" self.set_operation() - if self.operation == 'remove': + if self.operation == "remove": results = self.remove_vspan_session() self.module.exit_json(**results) - if self.operation == 'add': + if self.operation == "add": self.set_port_for_vm() results = self.add_vspan_session() self.module.exit_json(**results) - if self.operation == 'edit': + if self.operation == "edit": self.remove_vspan_session() self.set_port_for_vm() results = self.add_vspan_session() @@ -340,13 +394,15 @@ def set_port_security_promiscuous(self, ports, state): # Creating the new port policy port_spec = [] vim_bool = vim.BoolPolicy(value=state) - port_policy = vim.dvs.VmwareDistributedVirtualSwitch.SecurityPolicy(allowPromiscuous=vim_bool) - port_settings = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy(securityPolicy=port_policy) + port_policy = vim.dvs.VmwareDistributedVirtualSwitch.SecurityPolicy( + allowPromiscuous=vim_bool + ) + port_settings = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy( + securityPolicy=port_policy + ) for port in ports: temp_port_spec = vim.dvs.DistributedVirtualPort.ConfigSpec( - operation="edit", - key=port, - setting=port_settings + operation="edit", key=port, setting=port_settings ) port_spec.append(temp_port_spec) @@ -373,35 +429,43 @@ def turn_off_promiscuous(self): for port in session_ports: if vspan_session.name == self.name: ports_of_selected_session.append(port) - elif not(port in ports): + elif not (port in ports): ports.append(port) if vspan_session.sourcePortTransmitted is not None: session_ports = vspan_session.sourcePortTransmitted.portKey for port in session_ports: if vspan_session.name == self.name: ports_of_selected_session.append(port) - elif not(port in ports): + elif not (port in ports): ports.append(port) if vspan_session.destinationPort is not None: session_ports = vspan_session.destinationPort.portKey for port in session_ports: if vspan_session.name == self.name: ports_of_selected_session.append(port) - elif not(port in ports): + elif not (port in ports): ports.append(port) promiscuous_ports = [] if ports: - dv_ports = self.dv_switch.FetchDVPorts(vim.dvs.PortCriteria(portKey=ports)) + dv_ports = self.dv_switch.FetchDVPorts( + vim.dvs.PortCriteria(portKey=ports) + ) # If a port is promiscuous set disable it, and add it to the array to enable it after the changes are made. for dv_port in dv_ports: - if dv_port.config.setting.securityPolicy.allowPromiscuous.value: + if ( + dv_port.config.setting.securityPolicy.allowPromiscuous.value + ): self.set_port_security_promiscuous([dv_port.key], False) self.modified_ports.update({dv_port.key: True}) promiscuous_ports.append(dv_port.key) if ports_of_selected_session: - current_dv_ports = self.dv_switch.FetchDVPorts(vim.dvs.PortCriteria(portKey=ports_of_selected_session)) + current_dv_ports = self.dv_switch.FetchDVPorts( + vim.dvs.PortCriteria(portKey=ports_of_selected_session) + ) for dv_port in current_dv_ports: - if dv_port.config.setting.securityPolicy.allowPromiscuous.value: + if ( + dv_port.config.setting.securityPolicy.allowPromiscuous.value + ): self.set_port_security_promiscuous([dv_port.key], False) self.modified_ports.update({dv_port.key: True}) # Return the promiscuous ports array, to set them back after the config is finished. @@ -414,12 +478,14 @@ def delete_mirroring_session(self, key): key : str Key of the Session """ - session = vim.dvs.VmwareDistributedVirtualSwitch.VspanSession( - key=key - ) + session = vim.dvs.VmwareDistributedVirtualSwitch.VspanSession(key=key) config_version = self.dv_switch.config.configVersion - s_spec = vim.dvs.VmwareDistributedVirtualSwitch.VspanConfigSpec(vspanSession=session, operation="remove") - c_spec = vim.dvs.VmwareDistributedVirtualSwitch.ConfigSpec(vspanConfigSpec=[s_spec], configVersion=config_version) + s_spec = vim.dvs.VmwareDistributedVirtualSwitch.VspanConfigSpec( + vspanSession=session, operation="remove" + ) + c_spec = vim.dvs.VmwareDistributedVirtualSwitch.ConfigSpec( + vspanConfigSpec=[s_spec], configVersion=config_version + ) task = self.dv_switch.ReconfigureDvs_Task(c_spec) try: wait_for_task(task) @@ -434,8 +500,12 @@ def restore_original_state(self): if self.deleted_session is not None: session = self.deleted_session config_version = self.dv_switch.config.configVersion - s_spec = vim.dvs.VmwareDistributedVirtualSwitch.VspanConfigSpec(vspanSession=session, operation="add") - c_spec = vim.dvs.VmwareDistributedVirtualSwitch.ConfigSpec(vspanConfigSpec=[s_spec], configVersion=config_version) + s_spec = vim.dvs.VmwareDistributedVirtualSwitch.VspanConfigSpec( + vspanSession=session, operation="add" + ) + c_spec = vim.dvs.VmwareDistributedVirtualSwitch.ConfigSpec( + vspanConfigSpec=[s_spec], configVersion=config_version + ) # Revert the delete task = self.dv_switch.ReconfigureDvs_Task(c_spec) try: @@ -449,7 +519,11 @@ def remove_vspan_session(self): results = dict(changed=False, result="") mirror_session = self.find_session_by_name() if mirror_session is None: - results['result'] = "There is no VSpanSession with the name: {0:s}.".format(self.name) + results[ + "result" + ] = "There is no VSpanSession with the name: {0:s}.".format( + self.name + ) return results promiscuous_ports = self.turn_off_promiscuous() session_key = mirror_session.key @@ -460,8 +534,8 @@ def remove_vspan_session(self): # Set back the promiscuous ports if promiscuous_ports: self.set_port_security_promiscuous(promiscuous_ports, True) - results['changed'] = True - results['result'] = 'VSpan Session has been deleted' + results["changed"] = True + results["result"] = "VSpan Session has been deleted" return results def check_if_session_name_is_free(self): @@ -480,65 +554,141 @@ def create_vspan_session(self): """Builds up the session, adds the parameters that we specified, then creates it on the vSwitch""" session = vim.dvs.VmwareDistributedVirtualSwitch.VspanSession( - name=self.name, - enabled=True + name=self.name, enabled=True ) if self.session_type is not None: session.sessionType = self.session_type - if self.session_type == 'encapsulatedRemoteMirrorSource': + if self.session_type == "encapsulatedRemoteMirrorSource": if self.source_port_received is not None: - port = vim.dvs.VmwareDistributedVirtualSwitch.VspanPorts(portKey=str(self.source_port_received)) - if not self.dv_switch.FetchDVPorts(vim.dvs.PortCriteria(portKey=port.portKey)): - self.module.fail_json(msg="Couldn't find port: {0:s}".format(self.source_port_received)) + port = vim.dvs.VmwareDistributedVirtualSwitch.VspanPorts( + portKey=str(self.source_port_received) + ) + if not self.dv_switch.FetchDVPorts( + vim.dvs.PortCriteria(portKey=port.portKey) + ): + self.module.fail_json( + msg="Couldn't find port: {0:s}".format( + self.source_port_received + ) + ) session.sourcePortReceived = port if self.source_port_transmitted is not None: - port = vim.dvs.VmwareDistributedVirtualSwitch.VspanPorts(portKey=str(self.source_port_transmitted)) - if not self.dv_switch.FetchDVPorts(vim.dvs.PortCriteria(portKey=port.portKey)): - self.module.fail_json(msg="Couldn't find port: {0:s}".format(self.source_port_transmitted)) + port = vim.dvs.VmwareDistributedVirtualSwitch.VspanPorts( + portKey=str(self.source_port_transmitted) + ) + if not self.dv_switch.FetchDVPorts( + vim.dvs.PortCriteria(portKey=port.portKey) + ): + self.module.fail_json( + msg="Couldn't find port: {0:s}".format( + self.source_port_transmitted + ) + ) session.sourcePortTransmitted = port if self.destination_port is not None: - port = vim.dvs.VmwareDistributedVirtualSwitch.VspanPorts(ipAddress=str(self.destination_port)) + port = vim.dvs.VmwareDistributedVirtualSwitch.VspanPorts( + ipAddress=str(self.destination_port) + ) session.destinationPort = port - if self.session_type == 'remoteMirrorSource': + if self.session_type == "remoteMirrorSource": if self.source_port_received is not None: - port = vim.dvs.VmwareDistributedVirtualSwitch.VspanPorts(portKey=str(self.source_port_received)) - if not self.dv_switch.FetchDVPorts(vim.dvs.PortCriteria(portKey=port.portKey)): - self.module.fail_json(msg="Couldn't find port: {0:s}".format(self.source_port_received)) + port = vim.dvs.VmwareDistributedVirtualSwitch.VspanPorts( + portKey=str(self.source_port_received) + ) + if not self.dv_switch.FetchDVPorts( + vim.dvs.PortCriteria(portKey=port.portKey) + ): + self.module.fail_json( + msg="Couldn't find port: {0:s}".format( + self.source_port_received + ) + ) session.sourcePortReceived = port if self.source_port_transmitted is not None: - port = vim.dvs.VmwareDistributedVirtualSwitch.VspanPorts(portKey=str(self.source_port_transmitted)) - if not self.dv_switch.FetchDVPorts(vim.dvs.PortCriteria(portKey=port.portKey)): - self.module.fail_json(msg="Couldn't find port: {0:s}".format(self.source_port_transmitted)) + port = vim.dvs.VmwareDistributedVirtualSwitch.VspanPorts( + portKey=str(self.source_port_transmitted) + ) + if not self.dv_switch.FetchDVPorts( + vim.dvs.PortCriteria(portKey=port.portKey) + ): + self.module.fail_json( + msg="Couldn't find port: {0:s}".format( + self.source_port_transmitted + ) + ) session.sourcePortTransmitted = port if self.destination_port is not None: - port = vim.dvs.VmwareDistributedVirtualSwitch.VspanPorts(uplinkPortName=str(self.destination_port)) + port = vim.dvs.VmwareDistributedVirtualSwitch.VspanPorts( + uplinkPortName=str(self.destination_port) + ) session.destinationPort = port - if self.session_type == 'remoteMirrorDest': + if self.session_type == "remoteMirrorDest": if self.source_port_received is not None: - port = vim.dvs.VmwareDistributedVirtualSwitch.VspanPorts(vlans=[int(self.source_port_received)]) - if int(self.source_port_received) not in self.dv_switch.QueryUsedVlanIdInDvs(): - self.module.fail_json(msg="Couldn't find vlan: {0:s}".format(self.source_port_received)) + port = vim.dvs.VmwareDistributedVirtualSwitch.VspanPorts( + vlans=[int(self.source_port_received)] + ) + if ( + int(self.source_port_received) + not in self.dv_switch.QueryUsedVlanIdInDvs() + ): + self.module.fail_json( + msg="Couldn't find vlan: {0:s}".format( + self.source_port_received + ) + ) session.sourcePortReceived = port if self.destination_port is not None: - port = vim.dvs.VmwareDistributedVirtualSwitch.VspanPorts(portKey=str(self.destination_port)) - if not self.dv_switch.FetchDVPorts(vim.dvs.PortCriteria(portKey=port.portKey)): - self.module.fail_json(msg="Couldn't find port: {0:s}".format(self.destination_port)) + port = vim.dvs.VmwareDistributedVirtualSwitch.VspanPorts( + portKey=str(self.destination_port) + ) + if not self.dv_switch.FetchDVPorts( + vim.dvs.PortCriteria(portKey=port.portKey) + ): + self.module.fail_json( + msg="Couldn't find port: {0:s}".format( + self.destination_port + ) + ) session.destinationPort = port - if self.session_type == 'dvPortMirror': + if self.session_type == "dvPortMirror": if self.source_port_received is not None: - port = vim.dvs.VmwareDistributedVirtualSwitch.VspanPorts(portKey=str(self.source_port_received)) - if not self.dv_switch.FetchDVPorts(vim.dvs.PortCriteria(portKey=port.portKey)): - self.module.fail_json(msg="Couldn't find port: {0:s}".format(self.source_port_received)) + port = vim.dvs.VmwareDistributedVirtualSwitch.VspanPorts( + portKey=str(self.source_port_received) + ) + if not self.dv_switch.FetchDVPorts( + vim.dvs.PortCriteria(portKey=port.portKey) + ): + self.module.fail_json( + msg="Couldn't find port: {0:s}".format( + self.source_port_received + ) + ) session.sourcePortReceived = port if self.source_port_transmitted is not None: - port = vim.dvs.VmwareDistributedVirtualSwitch.VspanPorts(portKey=str(self.source_port_transmitted)) - if not self.dv_switch.FetchDVPorts(vim.dvs.PortCriteria(portKey=port.portKey)): - self.module.fail_json(msg="Couldn't find port: {0:s}".format(self.source_port_transmitted)) + port = vim.dvs.VmwareDistributedVirtualSwitch.VspanPorts( + portKey=str(self.source_port_transmitted) + ) + if not self.dv_switch.FetchDVPorts( + vim.dvs.PortCriteria(portKey=port.portKey) + ): + self.module.fail_json( + msg="Couldn't find port: {0:s}".format( + self.source_port_transmitted + ) + ) session.sourcePortTransmitted = port if self.destination_port is not None: - port = vim.dvs.VmwareDistributedVirtualSwitch.VspanPorts(portKey=str(self.destination_port)) - if not self.dv_switch.FetchDVPorts(vim.dvs.PortCriteria(portKey=port.portKey)): - self.module.fail_json(msg="Couldn't find port: {0:s}".format(self.destination_port)) + port = vim.dvs.VmwareDistributedVirtualSwitch.VspanPorts( + portKey=str(self.destination_port) + ) + if not self.dv_switch.FetchDVPorts( + vim.dvs.PortCriteria(portKey=port.portKey) + ): + self.module.fail_json( + msg="Couldn't find port: {0:s}".format( + self.destination_port + ) + ) session.destinationPort = port if self.description is not None: session.description = self.description @@ -553,8 +703,12 @@ def create_vspan_session(self): if self.sampling_rate is not None: session.samplingRate = self.sampling_rate config_version = self.dv_switch.config.configVersion - s_spec = vim.dvs.VmwareDistributedVirtualSwitch.VspanConfigSpec(vspanSession=session, operation="add") - c_spec = vim.dvs.VmwareDistributedVirtualSwitch.ConfigSpec(vspanConfigSpec=[s_spec], configVersion=config_version) + s_spec = vim.dvs.VmwareDistributedVirtualSwitch.VspanConfigSpec( + vspanSession=session, operation="add" + ) + c_spec = vim.dvs.VmwareDistributedVirtualSwitch.ConfigSpec( + vspanConfigSpec=[s_spec], configVersion=config_version + ) task = self.dv_switch.ReconfigureDvs_Task(c_spec) try: wait_for_task(task) @@ -567,12 +721,22 @@ def add_vspan_session(self): results = dict(changed=False, result="") promiscous_ports = self.turn_off_promiscuous() if not self.check_if_session_name_is_free(): - self.module.fail_json(msg="There is another VSpan Session with the name: {0:s}.".format(self.name)) + self.module.fail_json( + msg="There is another VSpan Session with the name: {0:s}.".format( + self.name + ) + ) # Locate the ports, we want to use dv_ports = None - ports = [str(self.source_port_received), str(self.source_port_transmitted), str(self.destination_port)] + ports = [ + str(self.source_port_received), + str(self.source_port_transmitted), + str(self.destination_port), + ] if ports: - dv_ports = self.dv_switch.FetchDVPorts(vim.dvs.PortCriteria(portKey=ports)) + dv_ports = self.dv_switch.FetchDVPorts( + vim.dvs.PortCriteria(portKey=ports) + ) for dv_port in dv_ports: if dv_port.config.setting.securityPolicy.allowPromiscuous.value: self.set_port_security_promiscuous([dv_port.key], False) @@ -580,53 +744,76 @@ def add_vspan_session(self): # Now we can create the VspanSession self.create_vspan_session() # Finally we can set the destination port to promiscuous mode - if self.session_type == 'dvPortMirror' or self.session_type == 'remoteMirrorDest': - self.set_port_security_promiscuous([str(self.destination_port)], True) + if ( + self.session_type == "dvPortMirror" + or self.session_type == "remoteMirrorDest" + ): + self.set_port_security_promiscuous( + [str(self.destination_port)], True + ) # Set Back the Promiscuous ports if promiscous_ports: self.set_port_security_promiscuous(promiscous_ports, True) - results['changed'] = True - results['result'] = 'Mirroring session has been created.' + results["changed"] = True + results["result"] = "Mirroring session has been created." return results def main(): argument_spec = vmware_argument_spec() - argument_spec.update(dict( - switch=dict(type='str', required=True, aliases=['switch_name']), - name=dict(type='str', required=True), - state=dict(type='str', required=True, choices=['present', 'absent']), - session_type=dict(type='str', default='dvPortMirror', choices=['dvPortMirror', - 'encapsulatedRemoteMirrorSource', - 'remoteMirrorDest', - 'remoteMirrorSource']), - enabled=dict(type='bool', default=True), - description=dict(type='str'), - source_port_transmitted=dict(type='str'), - source_port_received=dict(type='str'), - destination_port=dict(type='str'), - encapsulation_vlan_id=dict(type='int'), - strip_original_vlan=dict(type='bool'), - mirrored_packet_length=dict(type='int'), - normal_traffic_allowed=dict(type='bool'), - sampling_rate=dict(type='int'), - source_vm_transmitted=dict(type='dict', - options=dict( - name=dict(type='str'), - nic_label=dict(type='str'))), - source_vm_received=dict(type='dict', - options=dict( - name=dict(type='str'), - nic_label=dict(type='str'))), - destination_vm=dict(type='dict', - options=dict( - name=dict(type='str'), - nic_label=dict(type='str'))), - )) - module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) + argument_spec.update( + dict( + switch=dict(type="str", required=True, aliases=["switch_name"]), + name=dict(type="str", required=True), + state=dict( + type="str", required=True, choices=["present", "absent"] + ), + session_type=dict( + type="str", + default="dvPortMirror", + choices=[ + "dvPortMirror", + "encapsulatedRemoteMirrorSource", + "remoteMirrorDest", + "remoteMirrorSource", + ], + ), + enabled=dict(type="bool", default=True), + description=dict(type="str"), + source_port_transmitted=dict(type="str"), + source_port_received=dict(type="str"), + destination_port=dict(type="str"), + encapsulation_vlan_id=dict(type="int"), + strip_original_vlan=dict(type="bool"), + mirrored_packet_length=dict(type="int"), + normal_traffic_allowed=dict(type="bool"), + sampling_rate=dict(type="int"), + source_vm_transmitted=dict( + type="dict", + options=dict( + name=dict(type="str"), nic_label=dict(type="str") + ), + ), + source_vm_received=dict( + type="dict", + options=dict( + name=dict(type="str"), nic_label=dict(type="str") + ), + ), + destination_vm=dict( + type="dict", + options=dict( + name=dict(type="str"), nic_label=dict(type="str") + ), + ), + ) + ) + module = AnsibleModule( + argument_spec=argument_spec, supports_check_mode=False + ) session = VMwareVspanSession(module) session.process_operation() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_vswitch.py b/plugins/modules/vmware_vswitch.py index 5691269..5871f1f 100644 --- a/plugins/modules/vmware_vswitch.py +++ b/plugins/modules/vmware_vswitch.py @@ -6,16 +6,17 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vmware_vswitch short_description: Manage a VMware Standard Switch to an ESXi host. @@ -69,9 +70,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Add a VMware vSwitch vmware_vswitch: hostname: '{{ esxi_hostname }}' @@ -113,7 +114,7 @@ nic_name: vmnic0 mtu: 9000 delegate_to: localhost -''' +""" RETURN = """ result: @@ -129,7 +130,10 @@ pass from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import PyVmomi, vmware_argument_spec +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + PyVmomi, + vmware_argument_spec, +) from ansible.module_utils._text import to_native @@ -138,24 +142,28 @@ def __init__(self, module): super(VMwareHostVirtualSwitch, self).__init__(module) self.host_system = None self.vss = None - self.switch = module.params['switch'] - self.number_of_ports = module.params['number_of_ports'] - self.nics = module.params['nics'] - self.mtu = module.params['mtu'] - self.state = module.params['state'] - esxi_hostname = module.params['esxi_hostname'] + self.switch = module.params["switch"] + self.number_of_ports = module.params["number_of_ports"] + self.nics = module.params["nics"] + self.mtu = module.params["mtu"] + self.state = module.params["state"] + esxi_hostname = module.params["esxi_hostname"] hosts = self.get_all_host_objs(esxi_host_name=esxi_hostname) if hosts: self.host_system = hosts[0] else: - self.module.fail_json(msg="Failed to get details of ESXi server." - " Please specify esxi_hostname.") + self.module.fail_json( + msg="Failed to get details of ESXi server." + " Please specify esxi_hostname." + ) - if self.params.get('state') == 'present': + if self.params.get("state") == "present": # Gather information about all vSwitches and Physical NICs network_manager = self.host_system.configManager.networkSystem - available_pnic = [pnic.device for pnic in network_manager.networkInfo.pnic] + available_pnic = [ + pnic.device for pnic in network_manager.networkInfo.pnic + ] self.available_vswitches = dict() for available_vswitch in network_manager.networkInfo.vswitch: used_pnic = [] @@ -163,35 +171,44 @@ def __init__(self, module): # vSwitch contains all PNICs as string in format of 'key-vim.host.PhysicalNic-vmnic0' m_pnic = pnic.split("-", 3)[-1] used_pnic.append(m_pnic) - self.available_vswitches[available_vswitch.name] = dict(pnic=used_pnic, - mtu=available_vswitch.mtu, - num_ports=available_vswitch.spec.numPorts, - ) + self.available_vswitches[available_vswitch.name] = dict( + pnic=used_pnic, + mtu=available_vswitch.mtu, + num_ports=available_vswitch.spec.numPorts, + ) for desired_pnic in self.nics: if desired_pnic not in available_pnic: # Check if pnic does not exists - self.module.fail_json(msg="Specified Physical NIC '%s' does not" - " exists on given ESXi '%s'." % (desired_pnic, - self.host_system.name)) + self.module.fail_json( + msg="Specified Physical NIC '%s' does not" + " exists on given ESXi '%s'." + % (desired_pnic, self.host_system.name) + ) for vswitch in self.available_vswitches: - if desired_pnic in self.available_vswitches[vswitch]['pnic'] and vswitch != self.switch: + if ( + desired_pnic + in self.available_vswitches[vswitch]["pnic"] + and vswitch != self.switch + ): # Check if pnic is already part of some other vSwitch - self.module.fail_json(msg="Specified Physical NIC '%s' is already used" - " by vSwitch '%s'." % (desired_pnic, vswitch)) + self.module.fail_json( + msg="Specified Physical NIC '%s' is already used" + " by vSwitch '%s'." % (desired_pnic, vswitch) + ) def process_state(self): """ Manage internal state of vSwitch """ vswitch_states = { - 'absent': { - 'present': self.state_destroy_vswitch, - 'absent': self.state_exit_unchanged, + "absent": { + "present": self.state_destroy_vswitch, + "absent": self.state_exit_unchanged, + }, + "present": { + "present": self.state_update_vswitch, + "absent": self.state_create_vswitch, }, - 'present': { - 'present': self.state_update_vswitch, - 'absent': self.state_create_vswitch, - } } try: @@ -217,41 +234,60 @@ def state_create_vswitch(self): vss_spec.numPorts = self.number_of_ports vss_spec.mtu = self.mtu if self.nics: - vss_spec.bridge = vim.host.VirtualSwitch.BondBridge(nicDevice=self.nics) + vss_spec.bridge = vim.host.VirtualSwitch.BondBridge( + nicDevice=self.nics + ) try: network_mgr = self.host_system.configManager.networkSystem if network_mgr: - network_mgr.AddVirtualSwitch(vswitchName=self.switch, - spec=vss_spec) - results['changed'] = True - results['result'] = "vSwitch '%s' is created successfully" % self.switch + network_mgr.AddVirtualSwitch( + vswitchName=self.switch, spec=vss_spec + ) + results["changed"] = True + results["result"] = ( + "vSwitch '%s' is created successfully" % self.switch + ) else: - self.module.fail_json(msg="Failed to find network manager for ESXi system") + self.module.fail_json( + msg="Failed to find network manager for ESXi system" + ) except vim.fault.AlreadyExists as already_exists: - results['result'] = "vSwitch with name %s already exists: %s" % (self.switch, - to_native(already_exists.msg)) + results["result"] = "vSwitch with name %s already exists: %s" % ( + self.switch, + to_native(already_exists.msg), + ) except vim.fault.ResourceInUse as resource_used: - self.module.fail_json(msg="Failed to add vSwitch '%s' as physical network adapter" - " being bridged is already in use: %s" % (self.switch, - to_native(resource_used.msg))) + self.module.fail_json( + msg="Failed to add vSwitch '%s' as physical network adapter" + " being bridged is already in use: %s" + % (self.switch, to_native(resource_used.msg)) + ) except vim.fault.HostConfigFault as host_config_fault: - self.module.fail_json(msg="Failed to add vSwitch '%s' due to host" - " configuration fault : %s" % (self.switch, - to_native(host_config_fault.msg))) + self.module.fail_json( + msg="Failed to add vSwitch '%s' due to host" + " configuration fault : %s" + % (self.switch, to_native(host_config_fault.msg)) + ) except vmodl.fault.InvalidArgument as invalid_argument: - self.module.fail_json(msg="Failed to add vSwitch '%s', this can be due to either of following :" - " 1. vSwitch Name exceeds the maximum allowed length," - " 2. Number of ports specified falls out of valid range," - " 3. Network policy is invalid," - " 4. Beacon configuration is invalid : %s" % (self.switch, - to_native(invalid_argument.msg))) + self.module.fail_json( + msg="Failed to add vSwitch '%s', this can be due to either of following :" + " 1. vSwitch Name exceeds the maximum allowed length," + " 2. Number of ports specified falls out of valid range," + " 3. Network policy is invalid," + " 4. Beacon configuration is invalid : %s" + % (self.switch, to_native(invalid_argument.msg)) + ) except vmodl.fault.SystemError as system_error: - self.module.fail_json(msg="Failed to add vSwitch '%s' due to : %s" % (self.switch, - to_native(system_error.msg))) + self.module.fail_json( + msg="Failed to add vSwitch '%s' due to : %s" + % (self.switch, to_native(system_error.msg)) + ) except Exception as generic_exc: - self.module.fail_json(msg="Failed to add vSwitch '%s' due to" - " generic exception : %s" % (self.switch, - to_native(generic_exc))) + self.module.fail_json( + msg="Failed to add vSwitch '%s' due to" + " generic exception : %s" + % (self.switch, to_native(generic_exc)) + ) self.module.exit_json(**results) def state_exit_unchanged(self): @@ -268,25 +304,36 @@ def state_destroy_vswitch(self): results = dict(changed=False, result="") try: - self.host_system.configManager.networkSystem.RemoveVirtualSwitch(self.vss.name) - results['changed'] = True - results['result'] = "vSwitch '%s' removed successfully." % self.vss.name + self.host_system.configManager.networkSystem.RemoveVirtualSwitch( + self.vss.name + ) + results["changed"] = True + results["result"] = ( + "vSwitch '%s' removed successfully." % self.vss.name + ) except vim.fault.NotFound as vswitch_not_found: - results['result'] = "vSwitch '%s' not available. %s" % (self.switch, - to_native(vswitch_not_found.msg)) + results["result"] = "vSwitch '%s' not available. %s" % ( + self.switch, + to_native(vswitch_not_found.msg), + ) except vim.fault.ResourceInUse as vswitch_in_use: - self.module.fail_json(msg="Failed to remove vSwitch '%s' as vSwitch" - " is used by several virtual" - " network adapters: %s" % (self.switch, - to_native(vswitch_in_use.msg))) + self.module.fail_json( + msg="Failed to remove vSwitch '%s' as vSwitch" + " is used by several virtual" + " network adapters: %s" + % (self.switch, to_native(vswitch_in_use.msg)) + ) except vim.fault.HostConfigFault as host_config_fault: - self.module.fail_json(msg="Failed to remove vSwitch '%s' due to host" - " configuration fault : %s" % (self.switch, - to_native(host_config_fault.msg))) + self.module.fail_json( + msg="Failed to remove vSwitch '%s' due to host" + " configuration fault : %s" + % (self.switch, to_native(host_config_fault.msg)) + ) except Exception as generic_exc: - self.module.fail_json(msg="Failed to remove vSwitch '%s' due to generic" - " exception : %s" % (self.switch, - to_native(generic_exc))) + self.module.fail_json( + msg="Failed to remove vSwitch '%s' due to generic" + " exception : %s" % (self.switch, to_native(generic_exc)) + ) self.module.exit_json(**results) @@ -295,71 +342,96 @@ def state_update_vswitch(self): Update vSwitch """ - results = dict(changed=False, result="No change in vSwitch '%s'" % self.switch) + results = dict( + changed=False, result="No change in vSwitch '%s'" % self.switch + ) vswitch_pnic_info = self.available_vswitches[self.switch] remain_pnic = [] for desired_pnic in self.nics: - if desired_pnic not in vswitch_pnic_info['pnic']: + if desired_pnic not in vswitch_pnic_info["pnic"]: remain_pnic.append(desired_pnic) diff = False # Update all nics - all_nics = vswitch_pnic_info['pnic'] + all_nics = vswitch_pnic_info["pnic"] if remain_pnic: all_nics += remain_pnic diff = True - if vswitch_pnic_info['mtu'] != self.mtu or \ - vswitch_pnic_info['num_ports'] != self.number_of_ports: + if ( + vswitch_pnic_info["mtu"] != self.mtu + or vswitch_pnic_info["num_ports"] != self.number_of_ports + ): diff = True try: if diff: vss_spec = vim.host.VirtualSwitch.Specification() if all_nics: - vss_spec.bridge = vim.host.VirtualSwitch.BondBridge(nicDevice=all_nics) + vss_spec.bridge = vim.host.VirtualSwitch.BondBridge( + nicDevice=all_nics + ) vss_spec.numPorts = self.number_of_ports vss_spec.mtu = self.mtu network_mgr = self.host_system.configManager.networkSystem if network_mgr: - network_mgr.UpdateVirtualSwitch(vswitchName=self.switch, - spec=vss_spec) - results['changed'] = True - results['result'] = "vSwitch '%s' is updated successfully" % self.switch + network_mgr.UpdateVirtualSwitch( + vswitchName=self.switch, spec=vss_spec + ) + results["changed"] = True + results["result"] = ( + "vSwitch '%s' is updated successfully" % self.switch + ) else: - self.module.fail_json(msg="Failed to find network manager for ESXi system.") + self.module.fail_json( + msg="Failed to find network manager for ESXi system." + ) except vim.fault.ResourceInUse as resource_used: - self.module.fail_json(msg="Failed to update vSwitch '%s' as physical network adapter" - " being bridged is already in use: %s" % (self.switch, - to_native(resource_used.msg))) + self.module.fail_json( + msg="Failed to update vSwitch '%s' as physical network adapter" + " being bridged is already in use: %s" + % (self.switch, to_native(resource_used.msg)) + ) except vim.fault.NotFound as not_found: - self.module.fail_json(msg="Failed to update vSwitch with name '%s'" - " as it does not exists: %s" % (self.switch, - to_native(not_found.msg))) + self.module.fail_json( + msg="Failed to update vSwitch with name '%s'" + " as it does not exists: %s" + % (self.switch, to_native(not_found.msg)) + ) except vim.fault.HostConfigFault as host_config_fault: - self.module.fail_json(msg="Failed to update vSwitch '%s' due to host" - " configuration fault : %s" % (self.switch, - to_native(host_config_fault.msg))) + self.module.fail_json( + msg="Failed to update vSwitch '%s' due to host" + " configuration fault : %s" + % (self.switch, to_native(host_config_fault.msg)) + ) except vmodl.fault.InvalidArgument as invalid_argument: - self.module.fail_json(msg="Failed to update vSwitch '%s', this can be due to either of following :" - " 1. vSwitch Name exceeds the maximum allowed length," - " 2. Number of ports specified falls out of valid range," - " 3. Network policy is invalid," - " 4. Beacon configuration is invalid : %s" % (self.switch, - to_native(invalid_argument.msg))) + self.module.fail_json( + msg="Failed to update vSwitch '%s', this can be due to either of following :" + " 1. vSwitch Name exceeds the maximum allowed length," + " 2. Number of ports specified falls out of valid range," + " 3. Network policy is invalid," + " 4. Beacon configuration is invalid : %s" + % (self.switch, to_native(invalid_argument.msg)) + ) except vmodl.fault.SystemError as system_error: - self.module.fail_json(msg="Failed to update vSwitch '%s' due to : %s" % (self.switch, - to_native(system_error.msg))) + self.module.fail_json( + msg="Failed to update vSwitch '%s' due to : %s" + % (self.switch, to_native(system_error.msg)) + ) except vmodl.fault.NotSupported as not_supported: - self.module.fail_json(msg="Failed to update vSwitch '%s' as network adapter teaming policy" - " is set but is not supported : %s" % (self.switch, - to_native(not_supported.msg))) + self.module.fail_json( + msg="Failed to update vSwitch '%s' as network adapter teaming policy" + " is set but is not supported : %s" + % (self.switch, to_native(not_supported.msg)) + ) except Exception as generic_exc: - self.module.fail_json(msg="Failed to update vSwitch '%s' due to" - " generic exception : %s" % (self.switch, - to_native(generic_exc))) + self.module.fail_json( + msg="Failed to update vSwitch '%s' due to" + " generic exception : %s" + % (self.switch, to_native(generic_exc)) + ) self.module.exit_json(**results) def check_vswitch_configuration(self): @@ -370,9 +442,9 @@ def check_vswitch_configuration(self): """ self.vss = self.find_vswitch_by_name(self.host_system, self.switch) if self.vss is None: - return 'absent' + return "absent" else: - return 'present' + return "present" @staticmethod def find_vswitch_by_name(host, vswitch_name): @@ -393,21 +465,26 @@ def find_vswitch_by_name(host, vswitch_name): def main(): argument_spec = vmware_argument_spec() - argument_spec.update(dict( - switch=dict(type='str', required=True, aliases=['switch_name']), - nics=dict(type='list', aliases=['nic_name'], default=[]), - number_of_ports=dict(type='int', default=128), - mtu=dict(type='int', default=1500), - state=dict(type='str', default='present', choices=['absent', 'present'])), - esxi_hostname=dict(type='str', aliases=['host']), + argument_spec.update( + dict( + switch=dict(type="str", required=True, aliases=["switch_name"]), + nics=dict(type="list", aliases=["nic_name"], default=[]), + number_of_ports=dict(type="int", default=128), + mtu=dict(type="int", default=1500), + state=dict( + type="str", default="present", choices=["absent", "present"] + ), + ), + esxi_hostname=dict(type="str", aliases=["host"]), ) - module = AnsibleModule(argument_spec=argument_spec, - supports_check_mode=False) + module = AnsibleModule( + argument_spec=argument_spec, supports_check_mode=False + ) host_virtual_switch = VMwareHostVirtualSwitch(module) host_virtual_switch.process_state() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vmware_vswitch_info.py b/plugins/modules/vmware_vswitch_info.py index 7eff797..8bf5bb8 100644 --- a/plugins/modules/vmware_vswitch_info.py +++ b/plugins/modules/vmware_vswitch_info.py @@ -4,15 +4,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vmware_vswitch_info short_description: Gathers info about an ESXi host's vswitch configurations @@ -43,9 +44,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather vswitch info about all ESXi Host in given Cluster vmware_vswitch_info: hostname: '{{ vcenter_hostname }}' @@ -63,9 +64,9 @@ esxi_hostname: '{{ esxi_hostname }}' delegate_to: localhost register: all_vswitch_info -''' +""" -RETURN = r''' +RETURN = r""" hosts_vswitch_info: description: metadata about host's vswitch configuration returned: on success @@ -89,19 +90,25 @@ }, }, } -''' +""" from ansible.module_utils.basic import AnsibleModule -from ansible_collections.vmware.general.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + vmware_argument_spec, + PyVmomi, +) class VswitchInfoManager(PyVmomi): """Class to gather vSwitch info""" + def __init__(self, module): super(VswitchInfoManager, self).__init__(module) - cluster_name = self.params.get('cluster_name', None) - esxi_host_name = self.params.get('esxi_hostname', None) - self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name) + cluster_name = self.params.get("cluster_name", None) + esxi_host_name = self.params.get("esxi_hostname", None) + self.hosts = self.get_all_host_objs( + cluster_name=cluster_name, esxi_host_name=esxi_host_name + ) if not self.hosts: self.module.fail_json(msg="Failed to find host system.") @@ -128,7 +135,7 @@ def gather_vswitch_info(self): # we need to use the spec to get the ports # otherwise, the output might be different compared to the vswitch config module # (e.g. 5632 ports instead of 128) - num_ports=available_vswitch.spec.numPorts + num_ports=available_vswitch.spec.numPorts, ) hosts_vswitch_info[host.name] = temp_switch_dict return hosts_vswitch_info @@ -138,20 +145,21 @@ def main(): """Main""" argument_spec = vmware_argument_spec() argument_spec.update( - cluster_name=dict(type='str', required=False), - esxi_hostname=dict(type='str', required=False), + cluster_name=dict(type="str", required=False), + esxi_hostname=dict(type="str", required=False), ) module = AnsibleModule( argument_spec=argument_spec, - required_one_of=[ - ['cluster_name', 'esxi_hostname'], - ], - supports_check_mode=True + required_one_of=[["cluster_name", "esxi_hostname"]], + supports_check_mode=True, ) vmware_vswitch_mgr = VswitchInfoManager(module) - module.exit_json(changed=False, hosts_vswitch_info=vmware_vswitch_mgr.gather_vswitch_info()) + module.exit_json( + changed=False, + hosts_vswitch_info=vmware_vswitch_mgr.gather_vswitch_info(), + ) if __name__ == "__main__": diff --git a/plugins/modules/vsphere_copy.py b/plugins/modules/vsphere_copy.py index 5c161e2..2d147c6 100644 --- a/plugins/modules/vsphere_copy.py +++ b/plugins/modules/vsphere_copy.py @@ -5,16 +5,17 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", } -DOCUMENTATION = ''' +DOCUMENTATION = """ --- module: vsphere_copy short_description: Copy a file to a VMware datastore @@ -61,9 +62,9 @@ extends_documentation_fragment: - vmware.general.vmware.documentation -''' +""" -EXAMPLES = ''' +EXAMPLES = """ - name: Copy file to datastore using delegate_to vsphere_copy: hostname: '{{ vcenter_hostname }}' @@ -96,7 +97,7 @@ datastore: datastore2 path: other/remote/file delegate_to: other_system -''' +""" import atexit import errno @@ -109,11 +110,13 @@ from ansible.module_utils.six.moves.urllib.parse import urlencode, quote from ansible.module_utils._text import to_native from ansible.module_utils.urls import open_url -from ansible_collections.vmware.general.plugins.module_utils.vmware import vmware_argument_spec +from ansible_collections.vmware.general.plugins.module_utils.vmware import ( + vmware_argument_spec, +) def vmware_path(datastore, datacenter, path): - ''' Constructs a URL path that vSphere accepts reliably ''' + """ Constructs a URL path that vSphere accepts reliably """ path = "/folder/%s" % quote(path.lstrip("/")) # Due to a software bug in vSphere, it fails to handle ampersand in datacenter names # The solution is to do what vSphere does (when browsing) and double-encode ampersands, maybe others ? @@ -121,7 +124,7 @@ def vmware_path(datastore, datacenter, path): path = "/" + path params = dict(dsName=datastore) if datacenter: - datacenter = datacenter.replace('&', '%26') + datacenter = datacenter.replace("&", "%26") params["dcPath"] = datacenter params = urlencode(params) return "%s?%s" % (path, params) @@ -129,14 +132,16 @@ def vmware_path(datastore, datacenter, path): def main(): argument_spec = vmware_argument_spec() - argument_spec.update(dict( - hostname=dict(required=False, aliases=['host']), - username=dict(required=False, aliases=['login']), - src=dict(required=True, aliases=['name']), - datacenter=dict(required=False), - datastore=dict(required=True), - dest=dict(required=True, aliases=['path']), - timeout=dict(default=10, type='int')) + argument_spec.update( + dict( + hostname=dict(required=False, aliases=["host"]), + username=dict(required=False, aliases=["login"]), + src=dict(required=True, aliases=["name"]), + datacenter=dict(required=False), + datastore=dict(required=True), + dest=dict(required=True, aliases=["path"]), + timeout=dict(default=10, type="int"), + ) ) module = AnsibleModule( @@ -145,20 +150,25 @@ def main(): supports_check_mode=False, ) - if module.params.get('host'): - module.deprecate("The 'host' option is being replaced by 'hostname'", version='2.12') - if module.params.get('login'): - module.deprecate("The 'login' option is being replaced by 'username'", version='2.12') - - hostname = module.params['hostname'] - username = module.params['username'] - password = module.params.get('password') - src = module.params.get('src') - datacenter = module.params.get('datacenter') - datastore = module.params.get('datastore') - dest = module.params.get('dest') - validate_certs = module.params.get('validate_certs') - timeout = module.params.get('timeout') + if module.params.get("host"): + module.deprecate( + "The 'host' option is being replaced by 'hostname'", version="2.12" + ) + if module.params.get("login"): + module.deprecate( + "The 'login' option is being replaced by 'username'", + version="2.12", + ) + + hostname = module.params["hostname"] + username = module.params["username"] + password = module.params.get("password") + src = module.params.get("src") + datacenter = module.params.get("datacenter") + datastore = module.params.get("datastore") + dest = module.params.get("dest") + validate_certs = module.params.get("validate_certs") + timeout = module.params.get("timeout") try: fd = open(src, "rb") @@ -167,7 +177,7 @@ def main(): module.fail_json(msg="Failed to open src file %s" % to_native(e)) if os.stat(src).st_size == 0: - data = '' + data = "" else: data = mmap.mmap(fd.fileno(), 0, access=mmap.ACCESS_READ) atexit.register(data.close) @@ -175,8 +185,10 @@ def main(): remote_path = vmware_path(datastore, datacenter, dest) if not all([hostname, username, password]): - module.fail_json(msg="One of following parameter is missing - hostname, username, password") - url = 'https://%s%s' % (hostname, remote_path) + module.fail_json( + msg="One of following parameter is missing - hostname, username, password" + ) + url = "https://%s%s" % (hostname, remote_path) headers = { "Content-Type": "application/octet-stream", @@ -184,20 +196,40 @@ def main(): } try: - r = open_url(url, data=data, headers=headers, method='PUT', timeout=timeout, - url_username=username, url_password=password, validate_certs=validate_certs, - force_basic_auth=True) + r = open_url( + url, + data=data, + headers=headers, + method="PUT", + timeout=timeout, + url_username=username, + url_password=password, + validate_certs=validate_certs, + force_basic_auth=True, + ) except socket.error as e: if isinstance(e.args, tuple): if len(e.args) > 0: if e[0] == errno.ECONNRESET: # vSphere resets connection if the file is in use and cannot be replaced - module.fail_json(msg='Failed to upload, image probably in use', status=None, errno=e[0], reason=to_native(e), url=url) + module.fail_json( + msg="Failed to upload, image probably in use", + status=None, + errno=e[0], + reason=to_native(e), + url=url, + ) else: module.fail_json(msg=to_native(e)) else: - module.fail_json(msg=str(e), status=None, errno=e[0], reason=str(e), - url=url, exception=traceback.format_exc()) + module.fail_json( + msg=str(e), + status=None, + errno=e[0], + reason=str(e), + url=url, + exception=traceback.format_exc(), + ) except Exception as e: error_code = -1 try: @@ -205,21 +237,36 @@ def main(): error_code = e[0] except (KeyError, TypeError): pass - module.fail_json(msg=to_native(e), status=None, errno=error_code, - reason=to_native(e), url=url, exception=traceback.format_exc()) + module.fail_json( + msg=to_native(e), + status=None, + errno=error_code, + reason=to_native(e), + url=url, + exception=traceback.format_exc(), + ) status = r.getcode() if 200 <= status < 300: module.exit_json(changed=True, status=status, reason=r.msg, url=url) else: - length = r.headers.get('content-length', None) - if r.headers.get('transfer-encoding', '').lower() == 'chunked': + length = r.headers.get("content-length", None) + if r.headers.get("transfer-encoding", "").lower() == "chunked": chunked = 1 else: chunked = 0 - module.fail_json(msg='Failed to upload', errno=None, status=status, reason=r.msg, length=length, headers=dict(r.headers), chunked=chunked, url=url) + module.fail_json( + msg="Failed to upload", + errno=None, + status=status, + reason=r.msg, + length=length, + headers=dict(r.headers), + chunked=chunked, + url=url, + ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/plugins/modules/vsphere_file.py b/plugins/modules/vsphere_file.py index 1ed891b..673f88f 100644 --- a/plugins/modules/vsphere_file.py +++ b/plugins/modules/vsphere_file.py @@ -5,13 +5,16 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type -ANSIBLE_METADATA = {'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community'} +ANSIBLE_METADATA = { + "metadata_version": "1.1", + "status": ["preview"], + "supported_by": "community", +} -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: vsphere_file short_description: Manage files on a vCenter datastore @@ -75,9 +78,9 @@ default: file notes: - The vSphere folder API does not allow to remove directory objects. -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create an empty file on a datastore vsphere_file: host: '{{ vhost }}' @@ -123,10 +126,10 @@ path: other/remote/file state: absent delegate_to: localhost -''' +""" -RETURN = r''' -''' +RETURN = r""" +""" import socket import sys @@ -140,69 +143,77 @@ def vmware_path(datastore, datacenter, path): - ''' Constructs a URL path that VSphere accepts reliably ''' - path = '/folder/{path}'.format(path=quote(path.strip('/'))) + """ Constructs a URL path that VSphere accepts reliably """ + path = "/folder/{path}".format(path=quote(path.strip("/"))) # Due to a software bug in vSphere, it fails to handle ampersand in datacenter names # The solution is to do what vSphere does (when browsing) and double-encode ampersands, maybe others ? - datacenter = datacenter.replace('&', '%26') - if not path.startswith('/'): - path = '/' + path + datacenter = datacenter.replace("&", "%26") + if not path.startswith("/"): + path = "/" + path params = dict(dsName=datastore) if datacenter: - params['dcPath'] = datacenter - return '{0}?{1}'.format(path, urlencode(params)) + params["dcPath"] = datacenter + return "{0}?{1}".format(path, urlencode(params)) def main(): module = AnsibleModule( argument_spec=dict( - host=dict(type='str', required=True, aliases=['hostname']), - username=dict(type='str', required=True), - password=dict(type='str', required=True, no_log=True), - datacenter=dict(type='str', required=True), - datastore=dict(type='str', required=True), - path=dict(type='str', required=True, aliases=['dest']), - state=dict(type='str', default='file', choices=['absent', 'directory', 'file', 'touch']), - timeout=dict(type='int', default=10), - validate_certs=dict(type='bool', default=True), + host=dict(type="str", required=True, aliases=["hostname"]), + username=dict(type="str", required=True), + password=dict(type="str", required=True, no_log=True), + datacenter=dict(type="str", required=True), + datastore=dict(type="str", required=True), + path=dict(type="str", required=True, aliases=["dest"]), + state=dict( + type="str", + default="file", + choices=["absent", "directory", "file", "touch"], + ), + timeout=dict(type="int", default=10), + validate_certs=dict(type="bool", default=True), ), supports_check_mode=True, ) - host = module.params.get('host') - username = module.params.get('username') - password = module.params.get('password') - src = module.params.get('src') - datacenter = module.params.get('datacenter') - datastore = module.params.get('datastore') - path = module.params.get('path') - validate_certs = module.params.get('validate_certs') - timeout = module.params.get('timeout') - state = module.params.get('state') + host = module.params.get("host") + username = module.params.get("username") + password = module.params.get("password") + src = module.params.get("src") + datacenter = module.params.get("datacenter") + datastore = module.params.get("datastore") + path = module.params.get("path") + validate_certs = module.params.get("validate_certs") + timeout = module.params.get("timeout") + state = module.params.get("state") remote_path = vmware_path(datastore, datacenter, path) - url = 'https://%s%s' % (host, remote_path) - - result = dict( - path=path, - size=None, - state=state, - status=None, - url=url, - ) + url = "https://%s%s" % (host, remote_path) + + result = dict(path=path, size=None, state=state, status=None, url=url) # Check if the file/directory exists try: - r = open_url(url, method='HEAD', timeout=timeout, - url_username=username, url_password=password, - validate_certs=validate_certs, force_basic_auth=True) + r = open_url( + url, + method="HEAD", + timeout=timeout, + url_username=username, + url_password=password, + validate_certs=validate_certs, + force_basic_auth=True, + ) except HTTPError as e: r = e except socket.error as e: - module.fail_json(msg=to_native(e), errno=e[0], reason=to_native(e), **result) + module.fail_json( + msg=to_native(e), errno=e[0], reason=to_native(e), **result + ) except Exception as e: - module.fail_json(msg=to_native(e), errno=dir(e), reason=to_native(e), **result) + module.fail_json( + msg=to_native(e), errno=dir(e), reason=to_native(e), **result + ) if PY2: sys.exc_clear() # Avoid false positive traceback in fail_json() on Python 2 @@ -210,145 +221,218 @@ def main(): status = r.getcode() if status == 200: exists = True - result['size'] = int(r.headers.get('content-length', None)) + result["size"] = int(r.headers.get("content-length", None)) elif status == 404: exists = False else: - result['reason'] = r.msg - result['status'] = status - module.fail_json(msg="Failed to query for file '%s'" % path, errno=None, headers=dict(r.headers), **result) - - if state == 'absent': + result["reason"] = r.msg + result["status"] = status + module.fail_json( + msg="Failed to query for file '%s'" % path, + errno=None, + headers=dict(r.headers), + **result + ) + + if state == "absent": if not exists: module.exit_json(changed=False, **result) if module.check_mode: - result['reason'] = 'No Content' - result['status'] = 204 + result["reason"] = "No Content" + result["status"] = 204 else: try: - r = open_url(url, method='DELETE', timeout=timeout, - url_username=username, url_password=password, - validate_certs=validate_certs, force_basic_auth=True) + r = open_url( + url, + method="DELETE", + timeout=timeout, + url_username=username, + url_password=password, + validate_certs=validate_certs, + force_basic_auth=True, + ) except HTTPError as e: r = e except socket.error as e: - module.fail_json(msg=to_native(e), errno=e[0], reason=to_native(e), **result) + module.fail_json( + msg=to_native(e), errno=e[0], reason=to_native(e), **result + ) except Exception as e: - module.fail_json(msg=to_native(e), errno=e[0], reason=to_native(e), **result) + module.fail_json( + msg=to_native(e), errno=e[0], reason=to_native(e), **result + ) if PY2: sys.exc_clear() # Avoid false positive traceback in fail_json() on Python 2 - result['reason'] = r.msg - result['status'] = r.getcode() - - if result['status'] == 405: - result['state'] = 'directory' - module.fail_json(msg='Directories cannot be removed with this module', errno=None, headers=dict(r.headers), **result) - elif result['status'] != 204: - module.fail_json(msg="Failed to remove '%s'" % path, errno=None, headers=dict(r.headers), **result) - - result['size'] = None + result["reason"] = r.msg + result["status"] = r.getcode() + + if result["status"] == 405: + result["state"] = "directory" + module.fail_json( + msg="Directories cannot be removed with this module", + errno=None, + headers=dict(r.headers), + **result + ) + elif result["status"] != 204: + module.fail_json( + msg="Failed to remove '%s'" % path, + errno=None, + headers=dict(r.headers), + **result + ) + + result["size"] = None module.exit_json(changed=True, **result) # NOTE: Creating a file in a non-existing directory, then remove the file - elif state == 'directory': + elif state == "directory": if exists: module.exit_json(changed=False, **result) if module.check_mode: - result['reason'] = 'Created' - result['status'] = 201 + result["reason"] = "Created" + result["status"] = 201 else: # Create a temporary file in the new directory - remote_path = vmware_path(datastore, datacenter, path + '/foobar.tmp') - temp_url = 'https://%s%s' % (host, remote_path) + remote_path = vmware_path( + datastore, datacenter, path + "/foobar.tmp" + ) + temp_url = "https://%s%s" % (host, remote_path) try: - r = open_url(temp_url, method='PUT', timeout=timeout, - url_username=username, url_password=password, - validate_certs=validate_certs, force_basic_auth=True) + r = open_url( + temp_url, + method="PUT", + timeout=timeout, + url_username=username, + url_password=password, + validate_certs=validate_certs, + force_basic_auth=True, + ) except HTTPError as e: r = e except socket.error as e: - module.fail_json(msg=to_native(e), errno=e[0], reason=to_native(e), **result) + module.fail_json( + msg=to_native(e), errno=e[0], reason=to_native(e), **result + ) except Exception as e: - module.fail_json(msg=to_native(e), errno=e[0], reason=to_native(e), **result) + module.fail_json( + msg=to_native(e), errno=e[0], reason=to_native(e), **result + ) if PY2: sys.exc_clear() # Avoid false positive traceback in fail_json() on Python 2 - result['reason'] = r.msg - result['status'] = r.getcode() - if result['status'] != 201: - result['url'] = temp_url - module.fail_json(msg='Failed to create temporary file', errno=None, headers=dict(r.headers), **result) + result["reason"] = r.msg + result["status"] = r.getcode() + if result["status"] != 201: + result["url"] = temp_url + module.fail_json( + msg="Failed to create temporary file", + errno=None, + headers=dict(r.headers), + **result + ) try: - r = open_url(temp_url, method='DELETE', timeout=timeout, - url_username=username, url_password=password, - validate_certs=validate_certs, force_basic_auth=True) + r = open_url( + temp_url, + method="DELETE", + timeout=timeout, + url_username=username, + url_password=password, + validate_certs=validate_certs, + force_basic_auth=True, + ) except HTTPError as e: r = e except socket.error as e: - module.fail_json(msg=to_native(e), errno=e[0], reason=to_native(e), **result) + module.fail_json( + msg=to_native(e), errno=e[0], reason=to_native(e), **result + ) except Exception as e: - module.fail_json(msg=to_native(e), errno=e[0], reason=to_native(e), **result) + module.fail_json( + msg=to_native(e), errno=e[0], reason=to_native(e), **result + ) if PY2: sys.exc_clear() # Avoid false positive traceback in fail_json() on Python 2 status = r.getcode() if status != 204: - result['reason'] = r.msg - result['status'] = status - module.warn('Failed to remove temporary file ({reason})'.format(**result)) + result["reason"] = r.msg + result["status"] = status + module.warn( + "Failed to remove temporary file ({reason})".format( + **result + ) + ) module.exit_json(changed=True, **result) - elif state == 'file': + elif state == "file": if not exists: - result['state'] = 'absent' - result['status'] = status - module.fail_json(msg="File '%s' is absent, cannot continue" % path, **result) + result["state"] = "absent" + result["status"] = status + module.fail_json( + msg="File '%s' is absent, cannot continue" % path, **result + ) - result['status'] = status + result["status"] = status module.exit_json(changed=False, **result) - elif state == 'touch': + elif state == "touch": if exists: - result['state'] = 'file' + result["state"] = "file" module.exit_json(changed=False, **result) if module.check_mode: - result['reason'] = 'Created' - result['status'] = 201 + result["reason"] = "Created" + result["status"] = 201 else: try: - r = open_url(url, method='PUT', timeout=timeout, - url_username=username, url_password=password, - validate_certs=validate_certs, force_basic_auth=True) + r = open_url( + url, + method="PUT", + timeout=timeout, + url_username=username, + url_password=password, + validate_certs=validate_certs, + force_basic_auth=True, + ) except HTTPError as e: r = e except socket.error as e: - module.fail_json(msg=to_native(e), errno=e[0], reason=to_native(e), **result) + module.fail_json( + msg=to_native(e), errno=e[0], reason=to_native(e), **result + ) except Exception as e: - module.fail_json(msg=to_native(e), errno=e[0], reason=to_native(e), **result) + module.fail_json( + msg=to_native(e), errno=e[0], reason=to_native(e), **result + ) if PY2: sys.exc_clear() # Avoid false positive traceback in fail_json() on Python 2 - result['reason'] = r.msg - result['status'] = r.getcode() - if result['status'] != 201: - module.fail_json(msg="Failed to touch '%s'" % path, errno=None, headers=dict(r.headers), **result) - - result['size'] = 0 - result['state'] = 'file' + result["reason"] = r.msg + result["status"] = r.getcode() + if result["status"] != 201: + module.fail_json( + msg="Failed to touch '%s'" % path, + errno=None, + headers=dict(r.headers), + **result + ) + + result["size"] = 0 + result["state"] = "file" module.exit_json(changed=True, **result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..90d4055 --- /dev/null +++ b/requirements.txt @@ -0,0 +1 @@ +ansible diff --git a/test-requirements.txt b/test-requirements.txt new file mode 100644 index 0000000..b7659ab --- /dev/null +++ b/test-requirements.txt @@ -0,0 +1,3 @@ +black==19.3b0 +flake8 +yamllint diff --git a/tests/unit/compat/builtins.py b/tests/unit/compat/builtins.py index f60ee67..bfc8adf 100644 --- a/tests/unit/compat/builtins.py +++ b/tests/unit/compat/builtins.py @@ -16,7 +16,8 @@ # along with Ansible. If not, see . # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type # @@ -28,6 +29,6 @@ try: import __builtin__ except ImportError: - BUILTINS = 'builtins' + BUILTINS = "builtins" else: - BUILTINS = '__builtin__' + BUILTINS = "__builtin__" diff --git a/tests/unit/compat/mock.py b/tests/unit/compat/mock.py index 0972cd2..b45d6b5 100644 --- a/tests/unit/compat/mock.py +++ b/tests/unit/compat/mock.py @@ -16,12 +16,13 @@ # along with Ansible. If not, see . # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type -''' +""" Compat module for Python3.x's unittest.mock module -''' +""" import sys # Python 2.7 @@ -40,7 +41,7 @@ try: from mock import * except ImportError: - print('You need the mock library installed on python2.x to run tests') + print("You need the mock library installed on python2.x to run tests") # Prior to 3.4.4, mock_open cannot handle binary read_data @@ -51,7 +52,7 @@ def _iterate_read_data(read_data): # Helper for mock_open: # Retrieve lines from read_data via a generator so that separate calls to # readline, read, and readlines are properly interleaved - sep = b'\n' if isinstance(read_data, bytes) else '\n' + sep = b"\n" if isinstance(read_data, bytes) else "\n" data_as_list = [l + sep for l in read_data.split(sep)] if data_as_list[-1] == sep: @@ -67,7 +68,7 @@ def _iterate_read_data(read_data): for line in data_as_list: yield line - def mock_open(mock=None, read_data=''): + def mock_open(mock=None, read_data=""): """ A helper function to create a mock to replace the use of `open`. It works for `open` called directly or used as a context manager. @@ -79,6 +80,7 @@ def mock_open(mock=None, read_data=''): `read_data` is a string for the `read` methoddline`, and `readlines` of the file handle to return. This is an empty string by default. """ + def _readlines_side_effect(*args, **kwargs): if handle.readlines.return_value is not None: return handle.readlines.return_value @@ -99,10 +101,13 @@ def _readline_side_effect(): global file_spec if file_spec is None: import _io - file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO)))) + + file_spec = list( + set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO))) + ) if mock is None: - mock = MagicMock(name='open', spec=open) + mock = MagicMock(name="open", spec=open) handle = MagicMock(spec=file_spec) handle.__enter__.return_value = handle diff --git a/tests/unit/compat/unittest.py b/tests/unit/compat/unittest.py index 98f08ad..df3379b 100644 --- a/tests/unit/compat/unittest.py +++ b/tests/unit/compat/unittest.py @@ -16,12 +16,13 @@ # along with Ansible. If not, see . # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type -''' +""" Compat module for Python2.7's unittest module -''' +""" import sys @@ -33,6 +34,6 @@ # Need unittest2 on python2.6 from unittest2 import * except ImportError: - print('You need unittest2 installed on python2.6.x to run tests') + print("You need unittest2 installed on python2.6.x to run tests") else: from unittest import * diff --git a/tests/unit/mock/loader.py b/tests/unit/mock/loader.py index 0ee47fb..c21188e 100644 --- a/tests/unit/mock/loader.py +++ b/tests/unit/mock/loader.py @@ -16,7 +16,8 @@ # along with Ansible. If not, see . # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type import os @@ -27,7 +28,6 @@ class DictDataLoader(DataLoader): - def __init__(self, file_mapping=None): file_mapping = {} if file_mapping is None else file_mapping assert type(file_mapping) == dict @@ -68,7 +68,7 @@ def is_directory(self, path): def list_directory(self, path): ret = [] path = to_text(path) - for x in (list(self._file_mapping.keys()) + self._known_directories): + for x in list(self._file_mapping.keys()) + self._known_directories: if x.startswith(path): if os.path.dirname(x) == path: ret.append(os.path.basename(x)) @@ -86,7 +86,7 @@ def _build_known_directories(self): self._known_directories = [] for path in self._file_mapping: dirname = os.path.dirname(path) - while dirname not in ('/', ''): + while dirname not in ("/", ""): self._add_known_directory(dirname) dirname = os.path.dirname(dirname) diff --git a/tests/unit/mock/path.py b/tests/unit/mock/path.py index 42942e8..0e35675 100644 --- a/tests/unit/mock/path.py +++ b/tests/unit/mock/path.py @@ -2,4 +2,6 @@ from ansible.utils.path import unfrackpath -mock_unfrackpath_noop = MagicMock(spec_set=unfrackpath, side_effect=lambda x, *args, **kwargs: x) +mock_unfrackpath_noop = MagicMock( + spec_set=unfrackpath, side_effect=lambda x, *args, **kwargs: x +) diff --git a/tests/unit/mock/procenv.py b/tests/unit/mock/procenv.py index fa088bd..6eba943 100644 --- a/tests/unit/mock/procenv.py +++ b/tests/unit/mock/procenv.py @@ -17,7 +17,8 @@ # along with Ansible. If not, see . # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type import sys @@ -31,7 +32,7 @@ @contextmanager -def swap_stdin_and_argv(stdin_data='', argv_data=tuple()): +def swap_stdin_and_argv(stdin_data="", argv_data=tuple()): """ context manager that temporarily masks the test runner's values for stdin and argv """ @@ -77,7 +78,10 @@ def swap_stdout(): class ModuleTestCase(unittest.TestCase): def setUp(self, module_args=None): if module_args is None: - module_args = {'_ansible_remote_tmp': '/tmp', '_ansible_keep_remote_files': False} + module_args = { + "_ansible_remote_tmp": "/tmp", + "_ansible_keep_remote_files": False, + } args = json.dumps(dict(ANSIBLE_MODULE_ARGS=module_args)) diff --git a/tests/unit/mock/vault_helper.py b/tests/unit/mock/vault_helper.py index dcce9c7..b34ae13 100644 --- a/tests/unit/mock/vault_helper.py +++ b/tests/unit/mock/vault_helper.py @@ -12,7 +12,8 @@ # along with Ansible. If not, see . # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) +from __future__ import absolute_import, division, print_function + __metaclass__ = type from ansible.module_utils._text import to_bytes @@ -21,19 +22,21 @@ class TextVaultSecret(VaultSecret): - '''A secret piece of text. ie, a password. Tracks text encoding. + """A secret piece of text. ie, a password. Tracks text encoding. The text encoding of the text may not be the default text encoding so - we keep track of the encoding so we encode it to the same bytes.''' + we keep track of the encoding so we encode it to the same bytes.""" def __init__(self, text, encoding=None, errors=None, _bytes=None): super(TextVaultSecret, self).__init__() self.text = text - self.encoding = encoding or 'utf-8' + self.encoding = encoding or "utf-8" self._bytes = _bytes - self.errors = errors or 'strict' + self.errors = errors or "strict" @property def bytes(self): - '''The text encoded with encoding, unless we specifically set _bytes.''' - return self._bytes or to_bytes(self.text, encoding=self.encoding, errors=self.errors) + """The text encoded with encoding, unless we specifically set _bytes.""" + return self._bytes or to_bytes( + self.text, encoding=self.encoding, errors=self.errors + ) diff --git a/tests/unit/mock/yaml_helper.py b/tests/unit/mock/yaml_helper.py index cc095fe..1a945f1 100644 --- a/tests/unit/mock/yaml_helper.py +++ b/tests/unit/mock/yaml_helper.py @@ -8,6 +8,7 @@ class YamlTestUtils(object): """Mixin class to combine with a unittest.TestCase subclass.""" + def _loader(self, stream): """Vault related tests will want to override this. @@ -40,8 +41,9 @@ def _dump_load_cycle(self, obj): obj_2 = loader.get_data() # dump the gen 2 objects directory to strings - string_from_object_dump_2 = self._dump_string(obj_2, - dumper=AnsibleDumper) + string_from_object_dump_2 = self._dump_string( + obj_2, dumper=AnsibleDumper + ) # The gen 1 and gen 2 yaml strings self.assertEqual(string_from_object_dump, string_from_object_dump_2) @@ -53,7 +55,9 @@ def _dump_load_cycle(self, obj): loader_3 = self._loader(stream_3) obj_3 = loader_3.get_data() - string_from_object_dump_3 = self._dump_string(obj_3, dumper=AnsibleDumper) + string_from_object_dump_3 = self._dump_string( + obj_3, dumper=AnsibleDumper + ) self.assertEqual(obj, obj_3) # should be transitive, but... @@ -61,7 +65,7 @@ def _dump_load_cycle(self, obj): self.assertEqual(string_from_object_dump, string_from_object_dump_3) def _old_dump_load_cycle(self, obj): - '''Dump the passed in object to yaml, load it back up, dump again, compare.''' + """Dump the passed in object to yaml, load it back up, dump again, compare.""" stream = io.StringIO() yaml_string = self._dump_string(obj, dumper=AnsibleDumper) @@ -85,11 +89,25 @@ def _old_dump_load_cycle(self, obj): stream_obj_from_string = io.StringIO() if PY3: - yaml.dump(obj_from_stream, stream_obj_from_stream, Dumper=AnsibleDumper) - yaml.dump(obj_from_stream, stream_obj_from_string, Dumper=AnsibleDumper) + yaml.dump( + obj_from_stream, stream_obj_from_stream, Dumper=AnsibleDumper + ) + yaml.dump( + obj_from_stream, stream_obj_from_string, Dumper=AnsibleDumper + ) else: - yaml.dump(obj_from_stream, stream_obj_from_stream, Dumper=AnsibleDumper, encoding=None) - yaml.dump(obj_from_stream, stream_obj_from_string, Dumper=AnsibleDumper, encoding=None) + yaml.dump( + obj_from_stream, + stream_obj_from_stream, + Dumper=AnsibleDumper, + encoding=None, + ) + yaml.dump( + obj_from_stream, + stream_obj_from_string, + Dumper=AnsibleDumper, + encoding=None, + ) yaml_string_stream_obj_from_stream = stream_obj_from_stream.getvalue() yaml_string_stream_obj_from_string = stream_obj_from_string.getvalue() @@ -98,24 +116,49 @@ def _old_dump_load_cycle(self, obj): stream_obj_from_string.seek(0) if PY3: - yaml_string_obj_from_stream = yaml.dump(obj_from_stream, Dumper=AnsibleDumper) - yaml_string_obj_from_string = yaml.dump(obj_from_string, Dumper=AnsibleDumper) + yaml_string_obj_from_stream = yaml.dump( + obj_from_stream, Dumper=AnsibleDumper + ) + yaml_string_obj_from_string = yaml.dump( + obj_from_string, Dumper=AnsibleDumper + ) else: - yaml_string_obj_from_stream = yaml.dump(obj_from_stream, Dumper=AnsibleDumper, encoding=None) - yaml_string_obj_from_string = yaml.dump(obj_from_string, Dumper=AnsibleDumper, encoding=None) + yaml_string_obj_from_stream = yaml.dump( + obj_from_stream, Dumper=AnsibleDumper, encoding=None + ) + yaml_string_obj_from_string = yaml.dump( + obj_from_string, Dumper=AnsibleDumper, encoding=None + ) assert yaml_string == yaml_string_obj_from_stream - assert yaml_string == yaml_string_obj_from_stream == yaml_string_obj_from_string - assert (yaml_string == yaml_string_obj_from_stream == yaml_string_obj_from_string == yaml_string_stream_obj_from_stream == - yaml_string_stream_obj_from_string) + assert ( + yaml_string + == yaml_string_obj_from_stream + == yaml_string_obj_from_string + ) + assert ( + yaml_string + == yaml_string_obj_from_stream + == yaml_string_obj_from_string + == yaml_string_stream_obj_from_stream + == yaml_string_stream_obj_from_string + ) assert obj == obj_from_stream assert obj == obj_from_string assert obj == yaml_string_obj_from_stream assert obj == yaml_string_obj_from_string - assert obj == obj_from_stream == obj_from_string == yaml_string_obj_from_stream == yaml_string_obj_from_string - return {'obj': obj, - 'yaml_string': yaml_string, - 'yaml_string_from_stream': yaml_string_from_stream, - 'obj_from_stream': obj_from_stream, - 'obj_from_string': obj_from_string, - 'yaml_string_obj_from_string': yaml_string_obj_from_string} + assert ( + obj + == obj_from_stream + == obj_from_string + == yaml_string_obj_from_stream + == yaml_string_obj_from_string + ) + return { + "obj": obj, + "yaml_string": yaml_string, + "yaml_string_from_stream": yaml_string_from_stream, + "obj_from_stream": obj_from_stream, + "obj_from_string": obj_from_string, + "yaml_string_obj_from_string": yaml_string_obj_from_string, + } diff --git a/tests/unit/module_utils/test_vmware.py b/tests/unit/module_utils/test_vmware.py index 56fb43e..dbe24b0 100644 --- a/tests/unit/module_utils/test_vmware.py +++ b/tests/unit/module_utils/test_vmware.py @@ -4,13 +4,14 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function + __metaclass__ = type import ssl import sys import pytest -pyvmomi = pytest.importorskip('pyVmomi') +pyvmomi = pytest.importorskip("pyVmomi") from ansible_collections.vmware.general.tests.unit.compat import mock @@ -20,62 +21,62 @@ test_data = [ ( dict( - username='Administrator@vsphere.local', - password='Esxi@123$%', + username="Administrator@vsphere.local", + password="Esxi@123$%", hostname=False, validate_certs=False, ), "Hostname parameter is missing. Please specify this parameter in task or" - " export environment variable like 'export VMWARE_HOST=ESXI_HOSTNAME'" + " export environment variable like 'export VMWARE_HOST=ESXI_HOSTNAME'", ), ( dict( username=False, - password='Esxi@123$%', - hostname='esxi1', + password="Esxi@123$%", + hostname="esxi1", validate_certs=False, ), "Username parameter is missing. Please specify this parameter in task or" - " export environment variable like 'export VMWARE_USER=ESXI_USERNAME'" + " export environment variable like 'export VMWARE_USER=ESXI_USERNAME'", ), ( dict( - username='Administrator@vsphere.local', + username="Administrator@vsphere.local", password=False, - hostname='esxi1', + hostname="esxi1", validate_certs=False, ), "Password parameter is missing. Please specify this parameter in task or" - " export environment variable like 'export VMWARE_PASSWORD=ESXI_PASSWORD'" + " export environment variable like 'export VMWARE_PASSWORD=ESXI_PASSWORD'", ), ( dict( - username='Administrator@vsphere.local', - password='Esxi@123$%', - hostname='esxi1', + username="Administrator@vsphere.local", + password="Esxi@123$%", + hostname="esxi1", validate_certs=True, ), - "Unknown error while connecting to vCenter or ESXi API at esxi1:443" + "Unknown error while connecting to vCenter or ESXi API at esxi1:443", ), ( dict( - username='Administrator@vsphere.local', - password='Esxi@123$%', - hostname='esxi1', - proxy_host='myproxyserver.com', + username="Administrator@vsphere.local", + password="Esxi@123$%", + hostname="esxi1", + proxy_host="myproxyserver.com", proxy_port=80, validate_certs=False, ), - " [proxy: myproxyserver.com:80]" + " [proxy: myproxyserver.com:80]", ), ] test_ids = [ - 'hostname', - 'username', - 'password', - 'validate_certs', - 'valid_http_proxy', + "hostname", + "username", + "password", + "validate_certs", + "valid_http_proxy", ] @@ -93,13 +94,10 @@ def fake_ansible_module(): def fake_connect_to_api(module, return_si=None): - return None, mock.Mock(), + return None, mock.Mock() -testdata = [ - ('HAS_PYVMOMI', 'PyVmomi'), - ('HAS_REQUESTS', 'requests'), -] +testdata = [("HAS_PYVMOMI", "PyVmomi"), ("HAS_REQUESTS", "requests")] @pytest.mark.parametrize("key,libname", testdata) @@ -108,12 +106,14 @@ def test_lib_loading_failure(monkeypatch, fake_ansible_module, key, libname): monkeypatch.setattr(vmware_module_utils, key, False) with pytest.raises(FailJsonException): vmware_module_utils.PyVmomi(fake_ansible_module) - error_str = 'Failed to import the required Python library (%s)' % libname + error_str = "Failed to import the required Python library (%s)" % libname assert fake_ansible_module.fail_json.called_once() - assert error_str in fake_ansible_module.fail_json.call_args[1]['msg'] + assert error_str in fake_ansible_module.fail_json.call_args[1]["msg"] -@pytest.mark.skipif(sys.version_info < (2, 7), reason="requires python2.7 and greater") +@pytest.mark.skipif( + sys.version_info < (2, 7), reason="requires python2.7 and greater" +) @pytest.mark.parametrize("params, msg", test_data, ids=test_ids) def test_required_params(request, params, msg, fake_ansible_module): """ Test if required params are correct or not""" @@ -121,47 +121,55 @@ def test_required_params(request, params, msg, fake_ansible_module): with pytest.raises(FailJsonException): vmware_module_utils.connect_to_api(fake_ansible_module) assert fake_ansible_module.fail_json.called_once() - assert msg in fake_ansible_module.fail_json.call_args[1]['msg'] + assert msg in fake_ansible_module.fail_json.call_args[1]["msg"] def test_validate_certs(monkeypatch, fake_ansible_module): """ Test if SSL is required or not""" fake_ansible_module.params = test_data[3][0] - monkeypatch.setattr(vmware_module_utils, 'ssl', None) + monkeypatch.setattr(vmware_module_utils, "ssl", None) with pytest.raises(FailJsonException): vmware_module_utils.PyVmomi(fake_ansible_module) - msg = 'pyVim does not support changing verification mode with python < 2.7.9.' \ - ' Either update python or use validate_certs=false.' + msg = ( + "pyVim does not support changing verification mode with python < 2.7.9." + " Either update python or use validate_certs=false." + ) assert fake_ansible_module.fail_json.called_once() - assert msg in fake_ansible_module.fail_json.call_args[1]['msg'] + assert msg in fake_ansible_module.fail_json.call_args[1]["msg"] def test_vmdk_disk_path_split(monkeypatch, fake_ansible_module): """ Test vmdk_disk_path_split function""" fake_ansible_module.params = test_data[0][0] - monkeypatch.setattr(vmware_module_utils, 'connect_to_api', fake_connect_to_api) + monkeypatch.setattr( + vmware_module_utils, "connect_to_api", fake_connect_to_api + ) pyv = vmware_module_utils.PyVmomi(fake_ansible_module) - v = pyv.vmdk_disk_path_split('[ds1] VM_0001/VM0001_0.vmdk') - assert v == ('ds1', 'VM_0001/VM0001_0.vmdk', 'VM0001_0.vmdk', 'VM_0001') + v = pyv.vmdk_disk_path_split("[ds1] VM_0001/VM0001_0.vmdk") + assert v == ("ds1", "VM_0001/VM0001_0.vmdk", "VM0001_0.vmdk", "VM_0001") def test_vmdk_disk_path_split_negative(monkeypatch, fake_ansible_module): """ Test vmdk_disk_path_split function""" fake_ansible_module.params = test_data[0][0] - monkeypatch.setattr(vmware_module_utils, 'connect_to_api', fake_connect_to_api) + monkeypatch.setattr( + vmware_module_utils, "connect_to_api", fake_connect_to_api + ) with pytest.raises(FailJsonException): pyv = vmware_module_utils.PyVmomi(fake_ansible_module) - pyv.vmdk_disk_path_split('[ds1]') + pyv.vmdk_disk_path_split("[ds1]") assert fake_ansible_module.fail_json.called_once() - assert 'Bad path' in fake_ansible_module.fail_json.call_args[1]['msg'] + assert "Bad path" in fake_ansible_module.fail_json.call_args[1]["msg"] -@pytest.mark.skipif(sys.version_info < (2, 7), reason="requires python2.7 and greater") +@pytest.mark.skipif( + sys.version_info < (2, 7), reason="requires python2.7 and greater" +) def test_connect_to_api_validate_certs(monkeypatch, fake_ansible_module): - monkeypatch.setattr(vmware_module_utils, 'connect', mock.Mock()) + monkeypatch.setattr(vmware_module_utils, "connect", mock.Mock()) def MockSSLContext(proto): ssl_context.proto = proto @@ -170,59 +178,65 @@ def MockSSLContext(proto): # New Python with SSLContext + validate_certs=True vmware_module_utils.connect.reset_mock() ssl_context = mock.Mock() - monkeypatch.setattr(vmware_module_utils.ssl, 'SSLContext', MockSSLContext) - fake_ansible_module.params['validate_certs'] = True + monkeypatch.setattr(vmware_module_utils.ssl, "SSLContext", MockSSLContext) + fake_ansible_module.params["validate_certs"] = True vmware_module_utils.connect_to_api(fake_ansible_module) assert ssl_context.proto == ssl.PROTOCOL_SSLv23 assert ssl_context.verify_mode == ssl.CERT_REQUIRED assert ssl_context.check_hostname is True vmware_module_utils.connect.SmartConnect.assert_called_once_with( - host='esxi1', + host="esxi1", port=443, - pwd='Esxi@123$%', - user='Administrator@vsphere.local', - sslContext=ssl_context) + pwd="Esxi@123$%", + user="Administrator@vsphere.local", + sslContext=ssl_context, + ) # New Python with SSLContext + validate_certs=False vmware_module_utils.connect.reset_mock() ssl_context = mock.Mock() - monkeypatch.setattr(vmware_module_utils.ssl, 'SSLContext', MockSSLContext) - fake_ansible_module.params['validate_certs'] = False + monkeypatch.setattr(vmware_module_utils.ssl, "SSLContext", MockSSLContext) + fake_ansible_module.params["validate_certs"] = False vmware_module_utils.connect_to_api(fake_ansible_module) assert ssl_context.proto == ssl.PROTOCOL_SSLv23 assert ssl_context.verify_mode == ssl.CERT_NONE assert ssl_context.check_hostname is False vmware_module_utils.connect.SmartConnect.assert_called_once_with( - host='esxi1', + host="esxi1", port=443, - pwd='Esxi@123$%', - user='Administrator@vsphere.local', - sslContext=ssl_context) + pwd="Esxi@123$%", + user="Administrator@vsphere.local", + sslContext=ssl_context, + ) # Old Python with no SSLContext + validate_certs=True vmware_module_utils.connect.reset_mock() ssl_context = mock.Mock() ssl_context.proto = None - monkeypatch.delattr(vmware_module_utils.ssl, 'SSLContext') - fake_ansible_module.params['validate_certs'] = True + monkeypatch.delattr(vmware_module_utils.ssl, "SSLContext") + fake_ansible_module.params["validate_certs"] = True with pytest.raises(FailJsonException): vmware_module_utils.connect_to_api(fake_ansible_module) assert ssl_context.proto is None - fake_ansible_module.fail_json.assert_called_once_with(msg=( - 'pyVim does not support changing verification mode with python ' - '< 2.7.9. Either update python or use validate_certs=false.')) + fake_ansible_module.fail_json.assert_called_once_with( + msg=( + "pyVim does not support changing verification mode with python " + "< 2.7.9. Either update python or use validate_certs=false." + ) + ) assert not vmware_module_utils.connect.SmartConnect.called # Old Python with no SSLContext + validate_certs=False vmware_module_utils.connect.reset_mock() ssl_context = mock.Mock() ssl_context.proto = None - monkeypatch.delattr(vmware_module_utils.ssl, 'SSLContext', raising=False) - fake_ansible_module.params['validate_certs'] = False + monkeypatch.delattr(vmware_module_utils.ssl, "SSLContext", raising=False) + fake_ansible_module.params["validate_certs"] = False vmware_module_utils.connect_to_api(fake_ansible_module) assert ssl_context.proto is None vmware_module_utils.connect.SmartConnect.assert_called_once_with( - host='esxi1', + host="esxi1", port=443, - pwd='Esxi@123$%', - user='Administrator@vsphere.local') + pwd="Esxi@123$%", + user="Administrator@vsphere.local", + ) diff --git a/tests/unit/modules/utils.py b/tests/unit/modules/utils.py index ee96ce4..acf52a7 100644 --- a/tests/unit/modules/utils.py +++ b/tests/unit/modules/utils.py @@ -7,12 +7,12 @@ def set_module_args(args): - if '_ansible_remote_tmp' not in args: - args['_ansible_remote_tmp'] = '/tmp' - if '_ansible_keep_remote_files' not in args: - args['_ansible_keep_remote_files'] = False + if "_ansible_remote_tmp" not in args: + args["_ansible_remote_tmp"] = "/tmp" + if "_ansible_keep_remote_files" not in args: + args["_ansible_keep_remote_files"] = False - args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) + args = json.dumps({"ANSIBLE_MODULE_ARGS": args}) basic._ANSIBLE_ARGS = to_bytes(args) @@ -25,22 +25,23 @@ class AnsibleFailJson(Exception): def exit_json(*args, **kwargs): - if 'changed' not in kwargs: - kwargs['changed'] = False + if "changed" not in kwargs: + kwargs["changed"] = False raise AnsibleExitJson(kwargs) def fail_json(*args, **kwargs): - kwargs['failed'] = True + kwargs["failed"] = True raise AnsibleFailJson(kwargs) class ModuleTestCase(unittest.TestCase): - def setUp(self): - self.mock_module = patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json) + self.mock_module = patch.multiple( + basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json + ) self.mock_module.start() - self.mock_sleep = patch('time.sleep') + self.mock_sleep = patch("time.sleep") self.mock_sleep.start() set_module_args({}) self.addCleanup(self.mock_module.stop) diff --git a/tox.ini b/tox.ini new file mode 100644 index 0000000..1ef2ba2 --- /dev/null +++ b/tox.ini @@ -0,0 +1,32 @@ +[tox] +minversion = 1.4.2 +envlist = linters +skipsdist = True + +[testenv] +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + +[testenv:black] +install_command = pip install {opts} {packages} +commands = + black -v -l79 {toxinidir} + +[testenv:linters] +install_command = pip install {opts} {packages} +commands = + black -v -l79 --check {toxinidir} + flake8 {posargs} + yamllint -s . + +[testenv:venv] +commands = {posargs} + +[flake8] +# E123, E125 skipped as they are invalid PEP-8. + +show-source = True +ignore = E123,E125,E203,E402,E501,E741,W503 +max-line-length = 160 +builtins = _ +exclude = .git,.tox,tests/unit/compat/