diff --git a/Makefile b/Makefile index 9c522d2..ad6ef7f 100644 --- a/Makefile +++ b/Makefile @@ -29,3 +29,6 @@ test: deps ${activate} && ${python} -m pytest test .PHONY: test +black: deps + ${activate} && ${python} -m black src +.PHONY: black diff --git a/requirements.txt b/requirements.txt index 2e205b4..b7d298b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -10,3 +10,4 @@ openstacksdk==3.3.0 pytest==7.4.0 mypy==1.13.0 flake8==6.1.0 +black>=24.4.2 diff --git a/src/openstack_workload_generator/__main__.py b/src/openstack_workload_generator/__main__.py index 13704d1..83bd2ec 100644 --- a/src/openstack_workload_generator/__main__.py +++ b/src/openstack_workload_generator/__main__.py @@ -31,62 +31,114 @@ LOGGER = logging.getLogger() -parser = argparse.ArgumentParser( - prog='Create workloads on openstack installations') - -parser.add_argument('--log_level', metavar='loglevel', type=str, - default="INFO", help='The loglevel') - -parser.add_argument('--os_cloud', type=cloud_checker, - default=os.environ.get("OS_CLOUD", "admin"), - help='The openstack config to use, defaults to the value of the OS_CLOUD ' - 'environment variable or "admin" if the variable is not set') - -parser.add_argument('--ansible_inventory', type=str, nargs="?", - help="Dump the created servers as an ansible inventory to the specified directory, " - "adds a ssh proxy jump for the hosts without a floating ip") - -parser.add_argument('--clouds_yaml', type=str, nargs="?", - help="Generate a openstack clouds.yaml file") - -parser.add_argument('--wait_for_machines', action="store_true", - help="Wait for every machine to be created " - "(normally the provisioning only waits for machines which use floating ips)") - -parser.add_argument('--config', type=str, - default="default.yaml", - help='The config file for environment creation, define a path to the' - ' yaml file or a subpath in the profiles folder') +parser = argparse.ArgumentParser(prog="Create workloads on openstack installations") + +parser.add_argument( + "--log_level", metavar="loglevel", type=str, default="INFO", help="The loglevel" +) + +parser.add_argument( + "--os_cloud", + type=cloud_checker, + default=os.environ.get("OS_CLOUD", "admin"), + help="The openstack config to use, defaults to the value of the OS_CLOUD " + 'environment variable or "admin" if the variable is not set', +) + +parser.add_argument( + "--ansible_inventory", + type=str, + nargs="?", + help="Dump the created servers as an ansible inventory to the specified directory, " + "adds a ssh proxy jump for the hosts without a floating ip", +) + +parser.add_argument( + "--clouds_yaml", type=str, nargs="?", help="Use a specific clouds.yaml file" +) + +parser.add_argument( + "--wait_for_machines", + action="store_true", + help="Wait for every machine to be created " + "(normally the provisioning only waits for machines which use floating ips)", +) + +parser.add_argument( + "--generate_clouds_yaml", + type=str, + nargs="?", + help="Generate a openstack clouds.yaml file", +) + + +parser.add_argument( + "--config", + type=str, + default="default.yaml", + help="The config file for environment creation, define a path to the" + " yaml file or a subpath in the profiles folder", +) exclusive_group_domain = parser.add_mutually_exclusive_group(required=True) -exclusive_group_domain.add_argument('--create_domains', type=item_checker, nargs="+", default=None, - metavar="DOMAINNAME", - help='A list of domains to be created') - -exclusive_group_domain.add_argument('--delete_domains', type=item_checker, nargs="+", default=None, - metavar="DOMAINNAME", - help='A list of domains to be deleted, all child elements are recursively deleted') +exclusive_group_domain.add_argument( + "--create_domains", + type=item_checker, + nargs="+", + default=None, + metavar="DOMAINNAME", + help="A list of domains to be created", +) + +exclusive_group_domain.add_argument( + "--delete_domains", + type=item_checker, + nargs="+", + default=None, + metavar="DOMAINNAME", + help="A list of domains to be deleted, all child elements are recursively deleted", +) exclusive_group_project = parser.add_mutually_exclusive_group(required=False) -exclusive_group_project.add_argument('--create_projects', type=item_checker, nargs="+", default=None, - metavar="PROJECTNAME", - help='A list of projects to be created in the created domains') - -exclusive_group_project.add_argument('--delete_projects', type=item_checker, nargs="+", default=None, - metavar="PROJECTNAME", - help='A list of projects to be deleted in the created ' - 'domains, all child elements are recursively deleted') +exclusive_group_project.add_argument( + "--create_projects", + type=item_checker, + nargs="+", + default=None, + metavar="PROJECTNAME", + help="A list of projects to be created in the created domains", +) + +exclusive_group_project.add_argument( + "--delete_projects", + type=item_checker, + nargs="+", + default=None, + metavar="PROJECTNAME", + help="A list of projects to be deleted in the created " + "domains, all child elements are recursively deleted", +) exclusive_group_machines = parser.add_mutually_exclusive_group(required=False) -exclusive_group_machines.add_argument('--create_machines', type=item_checker, nargs="+", default=None, - metavar="SERVERNAME", - help='A list of vms to be created in the created domains') - -exclusive_group_machines.add_argument('--delete_machines', type=item_checker, nargs="+", default=None, - metavar="SERVERNAME", - help='A list of vms to be deleted in the created projects') +exclusive_group_machines.add_argument( + "--create_machines", + type=item_checker, + nargs="+", + default=None, + metavar="SERVERNAME", + help="A list of vms to be created in the created domains", +) + +exclusive_group_machines.add_argument( + "--delete_machines", + type=item_checker, + nargs="+", + default=None, + metavar="SERVERNAME", + help="A list of vms to be deleted in the created projects", +) args = parser.parse_args() @@ -97,7 +149,11 @@ def establish_connection(): - config = loader.OpenStackConfig() + if args.clouds_yaml is None: + config = loader.OpenStackConfig() + else: + LOGGER.info(f"Loading connection configuration from {args.clouds_yaml}") + config = loader.OpenStackConfig(config_files=[args.clouds_yaml]) cloud_config = config.get_one(args.os_cloud) return Connection(config=cloud_config) @@ -123,20 +179,30 @@ def establish_connection(): for workload_domain in workload_domains.values(): for workload_project in workload_domain.get_projects(args.create_projects): if args.create_machines: - workload_project.get_and_create_machines(args.create_machines, args.wait_for_machines) + workload_project.get_and_create_machines( + args.create_machines, args.wait_for_machines + ) if args.ansible_inventory: workload_project.dump_inventory_hosts(args.ansible_inventory) if args.clouds_yaml: - clouds_yaml_data[f"{workload_domain.domain_name}-{workload_project.project_name}"] \ - = workload_project.get_clouds_yaml_data() + clouds_yaml_data[ + f"{workload_domain.domain_name}-{workload_project.project_name}" + ] = workload_project.get_clouds_yaml_data() elif args.delete_machines: - for machine_obj in workload_project.get_machines(args.delete_machines): + for machine_obj in workload_project.get_machines( + args.delete_machines + ): machine_obj.delete_machine() - if args.clouds_yaml: - LOGGER.info(f"Creating a a clouds yaml : {args.clouds_yaml}") + if args.generate_clouds_yaml: + LOGGER.info(f"Creating a a clouds yaml : {args.generate_clouds_yaml}") clouds_yaml_data = {"clouds": clouds_yaml_data} - with open(args.clouds_yaml, 'w') as file: - yaml.dump(clouds_yaml_data, file, default_flow_style=False, explicit_start=True) + with open(args.generate_clouds_yaml, "w") as file: + yaml.dump( + clouds_yaml_data, + file, + default_flow_style=False, + explicit_start=True, + ) sys.exit(0) elif args.delete_projects: conn = establish_connection() diff --git a/src/openstack_workload_generator/entities/domain.py b/src/openstack_workload_generator/entities/domain.py index 641e1ef..5ed3488 100644 --- a/src/openstack_workload_generator/entities/domain.py +++ b/src/openstack_workload_generator/entities/domain.py @@ -19,9 +19,12 @@ def __init__(self, conn: Connection, domain_name: str): self.obj: Domain = self.conn.identity.find_domain(domain_name) if self.obj: DomainCache.add(self.obj.id, self.obj.name) - self.workload_user = WorkloadGeneratorDomain._get_user(conn, domain_name, self.obj) - self.workload_projects: dict[str, WorkloadGeneratorProject] = WorkloadGeneratorDomain._get_projects( - conn, self.obj, self.workload_user) + self.workload_user = WorkloadGeneratorDomain._get_user( + conn, domain_name, self.obj + ) + self.workload_projects: dict[str, WorkloadGeneratorProject] = ( + WorkloadGeneratorDomain._get_projects(conn, self.obj, self.workload_user) + ) @staticmethod def _get_user(conn: Connection, domain_name: str, obj: Domain): @@ -30,13 +33,16 @@ def _get_user(conn: Connection, domain_name: str, obj: Domain): return WorkloadGeneratorUser(conn, f"{domain_name}-admin", obj) @staticmethod - def _get_projects(conn: Connection, domain: Domain | None, user: WorkloadGeneratorUser | None) \ - -> dict[str, WorkloadGeneratorProject]: + def _get_projects( + conn: Connection, domain: Domain | None, user: WorkloadGeneratorUser | None + ) -> dict[str, WorkloadGeneratorProject]: if not domain or not user: return dict() result: dict[str, WorkloadGeneratorProject] = dict() for project in conn.identity.projects(domain_id=domain.id): - result[project.name] = WorkloadGeneratorProject(conn, project.name, domain, user) + result[project.name] = WorkloadGeneratorProject( + conn, project.name, domain, user + ) return result def create_and_get_domain(self) -> Domain: @@ -44,14 +50,14 @@ def create_and_get_domain(self) -> Domain: return self.obj self.obj = self.conn.identity.create_domain( - name=self.domain_name, - description="Automated creation", - enabled=True + name=self.domain_name, description="Automated creation", enabled=True ) DomainCache.add(self.obj.id, self.obj.name) LOGGER.info(f"Created {DomainCache.ident_by_id(self.obj.id)}") - self.workload_user = WorkloadGeneratorDomain._get_user(self.conn, self.domain_name, self.obj) + self.workload_user = WorkloadGeneratorDomain._get_user( + self.conn, self.domain_name, self.obj + ) return self.obj def disable_domain(self): @@ -92,7 +98,9 @@ def create_and_get_projects(self, create_projects: list[str]): for project_name in create_projects: if project_name in self.workload_projects: continue - project = WorkloadGeneratorProject(self.conn, project_name, self.obj, self.workload_user) + project = WorkloadGeneratorProject( + self.conn, project_name, self.obj, self.workload_user + ) project.create_and_get_project() project.get_or_create_ssh_key() self.workload_projects[project_name] = project diff --git a/src/openstack_workload_generator/entities/helpers.py b/src/openstack_workload_generator/entities/helpers.py index 05f690a..e576e9e 100644 --- a/src/openstack_workload_generator/entities/helpers.py +++ b/src/openstack_workload_generator/entities/helpers.py @@ -14,22 +14,22 @@ class Config: - _config: dict[str, str | dict[str, str] | None] = \ - { - 'admin_domain_password': "", - 'admin_vm_password': "", - 'admin_vm_ssh_key': "", - 'admin_vm_ssh_keypair_name': 'my_ssh_public_key', - 'project_ipv4_subnet': '192.168.200.0/24', - 'public_network': "public", - 'network_mtu': '1500', - 'number_of_floating_ips_per_project': "1", - 'vm_flavor': 'SCS-1L-1', - 'vm_image': 'Ubuntu 24.04', - 'vm_volume_size_gb': "10", - 'cloud_init_extra_script': """#!/bin/bash\necho "HELLO WORLD"; date > READY; whoami >> READY""", - 'wait_for_server_timeout': "300", - } + _config: dict[str, str | dict[str, str] | None] = { + "admin_domain_password": "", + "admin_vm_password": "", + "admin_vm_ssh_key": "", + "admin_vm_ssh_keypair_name": "my_ssh_public_key", + "project_ipv4_subnet": "192.168.200.0/24", + "public_network": "public", + "network_mtu": "0", + "number_of_floating_ips_per_project": "1", + "vm_flavor": "SCS-1L-1", + "vm_image": "Ubuntu 24.04", + "vm_volume_size_gb": "10", + "verify_ssl_certificate": "false", + "cloud_init_extra_script": """#!/bin/bash\necho "HELLO WORLD"; date > READY; whoami >> READY""", + "wait_for_server_timeout": "300", + } _file: str | None = None @@ -48,7 +48,9 @@ def get(key: str, regex: str = ".+", multi_line: bool = False) -> str: for value in values: matcher = re.compile(regex, re.MULTILINE | re.DOTALL) if not matcher.fullmatch(value): - LOGGER.error(f"{key} : >>>{value}<<< : does not match to regex >>>{regex}<<<") + LOGGER.error( + f"{key} : >>>{value}<<< : does not match to regex >>>{regex}<<<" + ) sys.exit(1) if len(values) > 1: @@ -58,23 +60,28 @@ def get(key: str, regex: str = ".+", multi_line: bool = False) -> str: @staticmethod def load_config(config_file: str): - potential_profile_file = \ - str(os.path.realpath( - os.path.dirname(os.path.realpath(__file__))) + f"/../../../profiles/{config_file}") + potential_profile_file = str( + os.path.realpath(os.path.dirname(os.path.realpath(__file__))) + + f"/../../../profiles/{config_file}" + ) if os.path.exists(config_file): Config._file = config_file - elif not str(config_file).startswith("/") and os.path.exists(potential_profile_file): + elif not str(config_file).startswith("/") and os.path.exists( + potential_profile_file + ): Config._file = potential_profile_file else: - LOGGER.error(f"Cannot find a profile at {config_file} or {potential_profile_file}") + LOGGER.error( + f"Cannot find a profile at {config_file} or {potential_profile_file}" + ) sys.exit(1) Config._file = os.path.realpath(Config._file) try: LOGGER.info(f"Reading {Config._file}") - with open(str(Config._file), 'r') as file_fd: + with open(str(Config._file), "r") as file_fd: Config._config.update(yaml.safe_load(file_fd)) except Exception as e: @@ -97,7 +104,8 @@ def check_config(cls): def show_effective_config(): Config.check_config(Config) LOGGER.info( - "The effective configuration from %s : \n>>>\n---\n%s\n<<<" % ( + "The effective configuration from %s : \n>>>\n---\n%s\n<<<" + % ( Config._file, yaml.dump(Config._config, default_flow_style=False, width=10000), ) @@ -151,6 +159,14 @@ def get_admin_vm_ssh_key() -> str: def get_admin_domain_password() -> str: return Config.get("admin_domain_password", regex=r".{5,}") + @staticmethod + def get_verify_ssl_certificate() -> bool: + value = Config.get("verify_ssl_certificate", regex=r"true|false|True|False") + if value.lower() == "false": + return False + else: + return True + @staticmethod def configured_quota_names(quota_category: str) -> list[str]: if quota_category in Config._config: @@ -166,7 +182,9 @@ def quota(quota_name: str, quota_category: str, default_value: int) -> int: if isinstance(value, int): return value else: - LOGGER.error(f"Quota {quota_category} -> {quota_name} is not an integer") + LOGGER.error( + f"Quota {quota_category} -> {quota_name} is not an integer" + ) sys.exit(1) else: return default_value @@ -198,7 +216,9 @@ def ident_by_id(project_id: str) -> str: if project_id not in ProjectCache.PROJECT_CACHE: raise RuntimeError(f"There is no project with id {project_id}") project = f'{ProjectCache.PROJECT_CACHE[project_id]["name"]}/{project_id}' - domain = DomainCache.ident_by_id(ProjectCache.PROJECT_CACHE[project_id]["domain_id"]) + domain = DomainCache.ident_by_id( + ProjectCache.PROJECT_CACHE[project_id]["domain_id"] + ) return f"project '{project}' in {domain}" @staticmethod @@ -207,26 +227,26 @@ def add(project_id: str, data: dict[str, str]): def setup_logging(log_level: str) -> Tuple[logging.Logger, str]: - log_format_string = \ - '%(asctime)-10s - %(levelname)s - %(filename)s:%(lineno)d - %(message)s' + log_format_string = ( + "%(asctime)-10s - %(levelname)s - %(filename)s:%(lineno)d - %(message)s" + ) logger = logging.getLogger() log_file = "STDOUT" - logging.basicConfig(format=log_format_string, - level=log_level) + logging.basicConfig(format=log_format_string, level=log_level) - coloredlogs.DEFAULT_FIELD_STYLES["levelname"] = {'bold': True, 'color': ''} + coloredlogs.DEFAULT_FIELD_STYLES["levelname"] = {"bold": True, "color": ""} coloredlogs.install(fmt=log_format_string, level=log_level.upper()) return logger, log_file def cloud_checker(value: str) -> str: - if not re.fullmatch("[a-zA-Z0-9]+", value): - raise argparse.ArgumentTypeError('specify a value for os_cloud') + if not re.fullmatch("[a-zA-Z0-9-]+", value): + raise argparse.ArgumentTypeError("specify a value for os_cloud") return value def item_checker(value: str) -> str: if not re.fullmatch(r"[a-zA-Z0-9]+[a-zA-Z0-9\-]*[a-zA-Z0-9]+", value): - raise argparse.ArgumentTypeError('specify a valid name for an item') + raise argparse.ArgumentTypeError("specify a valid name for an item") return value diff --git a/src/openstack_workload_generator/entities/machine.py b/src/openstack_workload_generator/entities/machine.py index 263dd12..7c17d3d 100644 --- a/src/openstack_workload_generator/entities/machine.py +++ b/src/openstack_workload_generator/entities/machine.py @@ -13,10 +13,14 @@ class WorkloadGeneratorMachine: - def __init__(self, conn: Connection, project: Project, machine_name: str, - security_group_name_ingress: str, - security_group_name_egress: str - ): + def __init__( + self, + conn: Connection, + project: Project, + machine_name: str, + security_group_name_ingress: str, + security_group_name_egress: str, + ): self.conn = conn self.machine_name = machine_name self.root_password = Config.get_admin_vm_password() @@ -46,18 +50,23 @@ def get_flavor_id_by_name(self, flavor_name): return None def delete_machine(self): - LOGGER.warning(f"Deleting machine {self.machine_name} in {ProjectCache.ident_by_id(self.project.id)}") + LOGGER.warning( + f"Deleting machine {self.machine_name} in {ProjectCache.ident_by_id(self.project.id)}" + ) self.conn.delete_server(self.obj.id) def wait_for_delete(self): self.conn.compute.wait_for_delete(self.obj) - LOGGER.warning(f"Machine {self.machine_name} in {self.obj.project_id} is deleted now") + LOGGER.warning( + f"Machine {self.machine_name} in {self.obj.project_id} is deleted now" + ) def create_or_get_server(self, network: Network, wait_for_machine: bool): if self.obj: LOGGER.info( - f"Server {self.obj.name}/{self.obj.id} in {ProjectCache.ident_by_id(self.obj.project_id)} already exists") + f"Server {self.obj.name}/{self.obj.id} in {ProjectCache.ident_by_id(self.obj.project_id)} already exists" + ) return # https://docs.openstack.org/openstacksdk/latest/user/resources/compute/v2/server.html#openstack.compute.v2.server.Server @@ -67,14 +76,16 @@ def create_or_get_server(self, network: Network, wait_for_machine: bool): networks=[{"uuid": network.id}], admin_password=self.root_password, description="automatically created", - block_device_mapping_v2=[{ - "boot_index": 0, - "uuid": self.get_image_id_by_name(Config.get_vm_image()), - "source_type": "image", - "destination_type": "volume", - "volume_size": Config.get_vm_volume_size_gb(), - "delete_on_termination": True, - }], + block_device_mapping_v2=[ + { + "boot_index": 0, + "uuid": self.get_image_id_by_name(Config.get_vm_image()), + "source_type": "image", + "destination_type": "volume", + "volume_size": Config.get_vm_volume_size_gb(), + "delete_on_termination": True, + } + ], user_data=WorkloadGeneratorMachine._get_user_script(), security_groups=[ {"name": self.security_group_name_ingress}, @@ -85,28 +96,36 @@ def create_or_get_server(self, network: Network, wait_for_machine: bool): if wait_for_machine: self.wait_for_server() if self.obj: - LOGGER.info(f"Created server {self.obj.name}/{self.obj.id} in {ProjectCache.ident_by_id(network.project_id)}") + LOGGER.info( + f"Created server {self.obj.name}/{self.obj.id} in {ProjectCache.ident_by_id(network.project_id)}" + ) else: - raise RuntimeError(f"Unable to create server {self.machine_name} in {ProjectCache.ident_by_id(network.project_id)}") + raise RuntimeError( + f"Unable to create server {self.machine_name} in {ProjectCache.ident_by_id(network.project_id)}" + ) @staticmethod def _get_user_script() -> str: cloud_init_script = "\n".join(Config.get_cloud_init_extra_script()) - cloud_init_script = base64.b64encode(cloud_init_script.encode('utf-8')).decode('utf-8') + cloud_init_script = base64.b64encode(cloud_init_script.encode("utf-8")).decode( + "utf-8" + ) return cloud_init_script def update_assigned_ips(self): if self.obj.addresses: for network_name, addresses in self.obj.addresses.items(): for address in addresses: - if address['OS-EXT-IPS:type'] == 'floating': - if self.floating_ip and self.floating_ip != address['addr']: - raise RuntimeError("More than one address of type 'floating'") - self.floating_ip = address['addr'] - elif address['OS-EXT-IPS:type'] == 'fixed': - if self.internal_ip and self.internal_ip != address['addr']: + if address["OS-EXT-IPS:type"] == "floating": + if self.floating_ip and self.floating_ip != address["addr"]: + raise RuntimeError( + "More than one address of type 'floating'" + ) + self.floating_ip = address["addr"] + elif address["OS-EXT-IPS:type"] == "fixed": + if self.internal_ip and self.internal_ip != address["addr"]: raise RuntimeError("More than one address of type 'fixed'") - self.internal_ip = address['addr'] + self.internal_ip = address["addr"] else: raise NotImplementedError(f"{address} not implemented") @@ -120,11 +139,16 @@ def add_floating_ip(self): if self.floating_ip: LOGGER.info( - f"Floating ip is already added to {self.obj.name}/{self.obj.id} in domain {self.project.domain_id}") + f"Floating ip is already added to {self.obj.name}/{self.obj.id} in domain {self.project.domain_id}" + ) else: - LOGGER.info(f"Add floating ip {self.obj.name}/{self.obj.id} in {ProjectCache.ident_by_id(self.project.id)}") + LOGGER.info( + f"Add floating ip {self.obj.name}/{self.obj.id} in {ProjectCache.ident_by_id(self.project.id)}" + ) self.wait_for_server() - new_floating_ip = self.conn.network.create_ip(floating_network_id=public_network.id) + new_floating_ip = self.conn.network.create_ip( + floating_network_id=public_network.id + ) server_port = list(self.conn.network.ports(device_id=self.obj.id))[0] self.conn.network.add_ip_to_port(server_port, new_floating_ip) self.floating_ip = new_floating_ip.floating_ip_address @@ -136,14 +160,14 @@ def wait_for_server(self): ) def start_server(self): - if self.obj.status != 'ACTIVE': + if self.obj.status != "ACTIVE": self.conn.compute.start_server(self.obj.id) LOGGER.info(f"Server '{self.obj.name}' started successfully.") else: LOGGER.info(f"Server '{self.obj.name}' is already running.") def stop_server(self): - if self.obj.status == 'ACTIVE': + if self.obj.status == "ACTIVE": self.conn.compute.stop_server(self.obj.id) LOGGER.info(f"Server '{self.obj.name}' started successfully.") else: diff --git a/src/openstack_workload_generator/entities/network.py b/src/openstack_workload_generator/entities/network.py index 5dabc34..4d32d52 100644 --- a/src/openstack_workload_generator/entities/network.py +++ b/src/openstack_workload_generator/entities/network.py @@ -15,8 +15,13 @@ class WorkloadGeneratorNetwork: - def __init__(self, conn: Connection, project: Project, - security_group_name_ingress: str, security_group_name_egress: str): + def __init__( + self, + conn: Connection, + project: Project, + security_group_name_ingress: str, + security_group_name_egress: str, + ): self.project: Project = project self.conn = conn self.network_name = f"localnet-{self.project.name}" @@ -24,54 +29,87 @@ def __init__(self, conn: Connection, project: Project, self.router_name = f"localrouter-{self.project.name}" self.security_group_name_ingress = security_group_name_ingress self.security_group_name_egress = security_group_name_egress - self.obj_network: Network | None = WorkloadGeneratorNetwork._find_network(self.network_name, conn, project) - self.obj_subnet: Subnet | None = WorkloadGeneratorNetwork._find_subnet(self.network_name, conn, project) - self.obj_router: Router | None = WorkloadGeneratorNetwork._find_router(self.router_name, conn, project) - self.obj_ingress_security_group: SecurityGroup | None = WorkloadGeneratorNetwork._find_security_group( - self.security_group_name_ingress, conn, project) - self.obj_egress_security_group: SecurityGroup | None = WorkloadGeneratorNetwork._find_security_group( - self.security_group_name_egress, conn, project) + self.obj_network: Network | None = WorkloadGeneratorNetwork._find_network( + self.network_name, conn, project + ) + self.obj_subnet: Subnet | None = WorkloadGeneratorNetwork._find_subnet( + self.network_name, conn, project + ) + self.obj_router: Router | None = WorkloadGeneratorNetwork._find_router( + self.router_name, conn, project + ) + self.obj_ingress_security_group: SecurityGroup | None = ( + WorkloadGeneratorNetwork._find_security_group( + self.security_group_name_ingress, conn, project + ) + ) + self.obj_egress_security_group: SecurityGroup | None = ( + WorkloadGeneratorNetwork._find_security_group( + self.security_group_name_egress, conn, project + ) + ) @staticmethod - def _find_security_group(name, conn: Connection, project: Project) -> SecurityGroup | None: - security_groups = [group for group in conn.network.security_groups(name=name, - project_id=project.id, - domain_id=project.domain_id)] + def _find_security_group( + name, conn: Connection, project: Project + ) -> SecurityGroup | None: + security_groups = [ + group + for group in conn.network.security_groups( + name=name, project_id=project.id, domain_id=project.domain_id + ) + ] if len(security_groups) > 1: - raise RuntimeError(f"Error fetching security group for project {project.name}/{project.domain_id}") + raise RuntimeError( + f"Error fetching security group for project {project.name}/{project.domain_id}" + ) elif len(security_groups) == 1: return security_groups[0] return None @staticmethod def _find_router(name, conn: Connection, project: Project) -> Router | None: - routers = [router for router in conn.network.routers(name=name, project_id=project.id)] + routers = [ + router for router in conn.network.routers(name=name, project_id=project.id) + ] if len(routers) == 0: return None elif len(routers) == 1: return routers[0] else: - raise RuntimeError(f"More than one router with the name {name} in {project.name}") + raise RuntimeError( + f"More than one router with the name {name} in {project.name}" + ) @staticmethod def _find_network(name, conn: Connection, project: Project) -> Network | None: - networks = [network for network in conn.network.networks(name=name, project_id=project.id)] + networks = [ + network + for network in conn.network.networks(name=name, project_id=project.id) + ] if len(networks) == 0: return None elif len(networks) == 1: return networks[0] else: - raise RuntimeError(f"More the one network with the name {name} in {project.name}") + raise RuntimeError( + f"More the one network with the name {name} in {project.name}" + ) @staticmethod def _find_subnet(name, conn, project) -> Subnet | None: - subnet = [network for network in conn.network.subnets(name=name, project_id=project.id)] + subnet = [ + network + for network in conn.network.subnets(name=name, project_id=project.id) + ] if len(subnet) == 0: return None elif len(subnet) == 1: return subnet[0] else: - raise RuntimeError(f"More the one subnet with the name {name} in {project.name}") + raise RuntimeError( + f"More the one subnet with the name {name} in {project.name}" + ) def create_and_get_network_setup(self) -> Network: network = self.create_and_get_network() @@ -85,26 +123,33 @@ def create_and_get_network_setup(self) -> Network: def create_and_get_router(self, subnet: Subnet) -> Router | None: public_network = self.conn.network.find_network(Config.get_public_network()) if not public_network: - LOGGER.error(f"There is no '{Config.get_public_network()}' network, not adding floating ips") + LOGGER.error( + f"There is no '{Config.get_public_network()}' network, not adding floating ips" + ) return None if self.obj_router: return self.obj_router self.obj_router = self.conn.network.create_router( - name=self.router_name, - admin_state_up=True + name=self.router_name, admin_state_up=True ) if not self.obj_router: raise RuntimeError(f"Unable to create Router '{self.router_name}'") - LOGGER.info(f"Router '{self.obj_router.name}' created with ID: {self.obj_router.id}") - self.conn.network.update_router(self.obj_router, external_gateway_info={ - 'network_id': public_network.id - }) - LOGGER.info(f"Router '{self.obj_router.name}' gateway set to external network: {public_network.name}") + LOGGER.info( + f"Router '{self.obj_router.name}' created with ID: {self.obj_router.id}" + ) + self.conn.network.update_router( + self.obj_router, external_gateway_info={"network_id": public_network.id} + ) + LOGGER.info( + f"Router '{self.obj_router.name}' gateway set to external network: {public_network.name}" + ) self.conn.network.add_interface_to_router(self.obj_router, subnet_id=subnet.id) - LOGGER.info(f"Subnet '{subnet.name}' added to router '{self.obj_router.name}' as an interface") + LOGGER.info( + f"Subnet '{subnet.name}' added to router '{self.obj_router.name}' as an interface" + ) return self.obj_router @@ -112,16 +157,21 @@ def create_and_get_network(self) -> Network: if self.obj_network: return self.obj_network + mtu_size = Config.get_network_mtu() + if mtu_size == 0: + mtu_size = None + self.obj_network = self.conn.network.create_network( name=self.network_name, project_id=self.project.id, - mtu=Config.get_network_mtu(), + mtu=mtu_size, ) if not self.obj_network: raise RuntimeError(f"Unable to create network {self.network_name}") LOGGER.info( - f"Created network {self.obj_network.name}/{self.obj_network.id} in {self.project.name}/{self.project.id}") + f"Created network {self.obj_network.name}/{self.obj_network.id} in {self.project.name}/{self.project.id}" + ) return self.obj_network def create_and_get_subnet(self) -> Subnet: @@ -138,14 +188,15 @@ def create_and_get_subnet(self) -> Subnet: cidr=Config.get_project_ipv4_subnet(), ip_version="4", enable_dhcp=True, - dns_nameservers=["8.8.8.8", "9.9.9.9"] + dns_nameservers=["8.8.8.8", "9.9.9.9"], ) if not self.obj_subnet: raise RuntimeError(f"No subnet created {self.network_name}") LOGGER.info( - f"Created subnet {self.obj_subnet.name}/{self.obj_subnet.id} in {self.project.name}/{self.project.id}") + f"Created subnet {self.obj_subnet.name}/{self.obj_subnet.id} in {self.project.name}/{self.project.id}" + ) return self.obj_subnet @@ -154,14 +205,19 @@ def delete_network(self): if self.obj_router: ports = self.conn.network.ports(device_id=self.obj_router.id) for port in ports: - if port.device_owner == 'network:router_interface': - self.conn.network.remove_interface_from_router(self.obj_router, - subnet_id=port.fixed_ips[0]['subnet_id']) - LOGGER.warning(f"Removed interface from subnet: {port.fixed_ips[0]['subnet_id']}") + if port.device_owner == "network:router_interface": + self.conn.network.remove_interface_from_router( + self.obj_router, subnet_id=port.fixed_ips[0]["subnet_id"] + ) + LOGGER.warning( + f"Removed interface from subnet: {port.fixed_ips[0]['subnet_id']}" + ) self.conn.network.update_router(self.obj_router, external_gateway_info=None) LOGGER.warning(f"Removed gateway from router {self.obj_router.id}") self.conn.delete_router(self.obj_router) - LOGGER.warning(f"Deleted router {self.obj_router.id}/{self.obj_router.name}") + LOGGER.warning( + f"Deleted router {self.obj_router.id}/{self.obj_router.name}" + ) if self.obj_network: for subnet_id in self.obj_network.subnet_ids: @@ -174,28 +230,36 @@ def delete_network(self): if subnet_obj.id in port_subnet_ids: LOGGER.warning(f"Delete port {port.id}") if port.device_owner == "network:router_interface": - self.conn.network.remove_interface_from_router(port.device_id, port_id=port.id) + self.conn.network.remove_interface_from_router( + port.device_id, port_id=port.id + ) self.conn.network.delete_router(port.device_id) else: self.conn.network.delete_port(port.id) LOGGER.warning( - f"Delete subnet {subnet_obj.name} of {ProjectCache.ident_by_id(self.obj_subnet.project_id)}") - self.conn.network.delete_subnet(subnet_obj, ignore_missing=False) + f"Delete subnet {subnet_obj.name} of {ProjectCache.ident_by_id(self.obj_subnet.project_id)}" + ) + self.conn.network.delete_subnet( + subnet_obj, ignore_missing=False + ) except ResourceNotFound: LOGGER.warning(f"Already deleted subnet {subnet_id}") self.conn.network.delete_network(self.obj_network, ignore_missing=False) - LOGGER.warning(f"Deleted network {self.obj_network.name} / {self.obj_network.id}") + LOGGER.warning( + f"Deleted network {self.obj_network.name} / {self.obj_network.id}" + ) def create_and_get_ingress_security_group(self) -> SecurityGroup: if self.obj_ingress_security_group: return self.obj_ingress_security_group LOGGER.info( - f"Creating ingress security group {self.security_group_name_ingress} for {ProjectCache.ident_by_id(self.project.id)}") + f"Creating ingress security group {self.security_group_name_ingress} for {ProjectCache.ident_by_id(self.project.id)}" + ) self.obj_ingress_security_group = self.conn.network.create_security_group( name=self.security_group_name_ingress, - description="Security group to allow SSH access to instances" + description="Security group to allow SSH access to instances", ) if not self.obj_ingress_security_group: @@ -203,20 +267,20 @@ def create_and_get_ingress_security_group(self) -> SecurityGroup: self.conn.network.create_security_group_rule( security_group_id=self.obj_ingress_security_group.id, - direction='ingress', - ethertype='IPv4', - protocol='icmp', - remote_ip_prefix='0.0.0.0/0' + direction="ingress", + ethertype="IPv4", + protocol="icmp", + remote_ip_prefix="0.0.0.0/0", ) self.conn.network.create_security_group_rule( security_group_id=self.obj_ingress_security_group.id, - direction='ingress', - ethertype='IPv4', - protocol='tcp', + direction="ingress", + ethertype="IPv4", + protocol="tcp", port_range_min=22, port_range_max=22, - remote_ip_prefix='0.0.0.0/0' + remote_ip_prefix="0.0.0.0/0", ) return self.obj_ingress_security_group @@ -224,11 +288,13 @@ def create_and_get_egress_security_group(self) -> SecurityGroup: if self.obj_egress_security_group: return self.obj_egress_security_group - LOGGER.info(f"Creating egress security group {self.security_group_name_egress} for " - f"project {self.project.name}/{self.project.domain_id}") + LOGGER.info( + f"Creating egress security group {self.security_group_name_egress} for " + f"project {self.project.name}/{self.project.domain_id}" + ) self.obj_egress_security_group = self.conn.network.create_security_group( name=self.security_group_name_egress, - description="Security group to allow outgoing access" + description="Security group to allow outgoing access", ) if not self.obj_egress_security_group: @@ -236,19 +302,19 @@ def create_and_get_egress_security_group(self) -> SecurityGroup: self.conn.network.create_security_group_rule( security_group_id=self.obj_egress_security_group.id, - direction='egress', - ethertype='IPv4', - protocol='tcp', + direction="egress", + ethertype="IPv4", + protocol="tcp", port_range_min=None, port_range_max=None, - remote_ip_prefix='0.0.0.0/0' + remote_ip_prefix="0.0.0.0/0", ) self.conn.network.create_security_group_rule( security_group_id=self.obj_egress_security_group.id, - direction='egress', - ethertype='IPv4', - protocol='icmp', - remote_ip_prefix='0.0.0.0/0' + direction="egress", + ethertype="IPv4", + protocol="icmp", + remote_ip_prefix="0.0.0.0/0", ) return self.obj_egress_security_group diff --git a/src/openstack_workload_generator/entities/project.py b/src/openstack_workload_generator/entities/project.py index 2e3ef46..b0ae498 100644 --- a/src/openstack_workload_generator/entities/project.py +++ b/src/openstack_workload_generator/entities/project.py @@ -18,8 +18,13 @@ class WorkloadGeneratorProject: - def __init__(self, admin_conn: Connection, project_name: str, domain: Domain, - user: WorkloadGeneratorUser): + def __init__( + self, + admin_conn: Connection, + project_name: str, + domain: Domain, + user: WorkloadGeneratorUser, + ): self._admin_conn: Connection = admin_conn self._project_conn: Connection | None = None self.project_name: str = project_name @@ -28,19 +33,29 @@ def __init__(self, admin_conn: Connection, project_name: str, domain: Domain, self.domain: Domain = domain self.ssh_proxy_jump: str | None = None self.user: WorkloadGeneratorUser = user - self.obj: Project = self._admin_conn.identity.find_project(project_name, domain_id=self.domain.id) + self.obj: Project = self._admin_conn.identity.find_project( + project_name, domain_id=self.domain.id + ) if self.obj: - ProjectCache.add(self.obj.id, {"name": self.obj.name, "domain_id": self.domain.id}) - self.workload_network: WorkloadGeneratorNetwork | None = \ - WorkloadGeneratorProject._get_network(admin_conn, self.obj, - self.security_group_name_ingress, - self.security_group_name_egress - ) - self.workload_machines: dict[str, WorkloadGeneratorMachine] = \ - WorkloadGeneratorProject._get_machines(admin_conn, self.obj, - self.security_group_name_ingress, - self.security_group_name_egress - ) + ProjectCache.add( + self.obj.id, {"name": self.obj.name, "domain_id": self.domain.id} + ) + self.workload_network: WorkloadGeneratorNetwork | None = ( + WorkloadGeneratorProject._get_network( + admin_conn, + self.obj, + self.security_group_name_ingress, + self.security_group_name_egress, + ) + ) + self.workload_machines: dict[str, WorkloadGeneratorMachine] = ( + WorkloadGeneratorProject._get_machines( + admin_conn, + self.obj, + self.security_group_name_ingress, + self.security_group_name_egress, + ) + ) self.ssh_key: Keypair | None = None @property @@ -48,7 +63,9 @@ def project_conn(self) -> Connection: if self._project_conn: return self._project_conn - LOGGER.info(f"Establishing a connection for {ProjectCache.ident_by_id(self.obj.id)}") + LOGGER.info( + f"Establishing a connection for {ProjectCache.ident_by_id(self.obj.id)}" + ) self._project_conn = self._admin_conn.connect_as( domain_id=self.obj.domain_id, project_id=self.obj.id, @@ -56,30 +73,43 @@ def project_conn(self) -> Connection: password=self.user.user_password, ) if not self._project_conn: - raise RuntimeError(f"Unable to create a project connection {ProjectCache.ident_by_id(self.obj.id)}") + raise RuntimeError( + f"Unable to create a project connection {ProjectCache.ident_by_id(self.obj.id)}" + ) return self._project_conn @staticmethod - def _get_network(conn: Connection, obj: Project, - security_group_name_ingress: str, - security_group_name_egress: str, - ) -> None | WorkloadGeneratorNetwork: + def _get_network( + conn: Connection, + obj: Project, + security_group_name_ingress: str, + security_group_name_egress: str, + ) -> None | WorkloadGeneratorNetwork: if not obj: return None - return WorkloadGeneratorNetwork(conn, obj, security_group_name_ingress, security_group_name_egress) + return WorkloadGeneratorNetwork( + conn, obj, security_group_name_ingress, security_group_name_egress + ) @staticmethod - def _get_machines(conn: Connection, obj: Project, - security_group_name_ingress: str, - security_group_name_egress: str, - ) -> dict[str, WorkloadGeneratorMachine]: + def _get_machines( + conn: Connection, + obj: Project, + security_group_name_ingress: str, + security_group_name_egress: str, + ) -> dict[str, WorkloadGeneratorMachine]: result: dict[str, WorkloadGeneratorMachine] = dict() if not obj: return result for server in conn.compute.servers(all_projects=True, project_id=obj.id): - workload_server = WorkloadGeneratorMachine(conn, obj, server.name, security_group_name_ingress, - security_group_name_egress) + workload_server = WorkloadGeneratorMachine( + conn, + obj, + server.name, + security_group_name_ingress, + security_group_name_egress, + ) workload_server.obj = server result[workload_server.machine_name] = workload_server return result @@ -101,14 +131,22 @@ def get_role_id_by_name(self, role_name) -> str: def assign_role_to_user_for_project(self, role_name: str): self._admin_conn.identity.assign_project_role_to_user( - user=self.user.obj.id, project=self.obj.id, role=self.get_role_id_by_name(role_name)) - LOGGER.info(f"Assigned {role_name} to {self.user.obj.id} for {ProjectCache.ident_by_id(self.obj.id)}") + user=self.user.obj.id, + project=self.obj.id, + role=self.get_role_id_by_name(role_name), + ) + LOGGER.info( + f"Assigned {role_name} to {self.user.obj.id} for {ProjectCache.ident_by_id(self.obj.id)}" + ) def assign_role_to_global_admin_for_project(self, role_name: str): user_id = self._admin_conn.session.get_user_id() self._admin_conn.identity.assign_project_role_to_user( - user=user_id, project=self.obj.id, role=self.get_role_id_by_name(role_name)) - LOGGER.info(f"Assigned global admin {role_name} to {user_id} for {ProjectCache.ident_by_id(self.obj.id)}") + user=user_id, project=self.obj.id, role=self.get_role_id_by_name(role_name) + ) + LOGGER.info( + f"Assigned global admin {role_name} to {user_id} for {ProjectCache.ident_by_id(self.obj.id)}" + ) def _set_quota(self, quota_category: str): if quota_category == "compute_quotas": @@ -132,21 +170,31 @@ def _set_quota(self, quota_category: str): try: current_value = getattr(current_quota, key_name) except AttributeError: - LOGGER.error(f"No such {api_area} quota field {key_name} in {current_quota}") + LOGGER.error( + f"No such {api_area} quota field {key_name} in {current_quota}" + ) sys.exit() - new_value = Config.quota(key_name, quota_category, getattr(current_quota, key_name)) + new_value = Config.quota( + key_name, quota_category, getattr(current_quota, key_name) + ) if current_value != new_value: - LOGGER.info(f"New {api_area} quota for {ProjectCache.ident_by_id(self.obj.id)}" - f": {key_name} : {current_value} -> {new_value}") + LOGGER.info( + f"New {api_area} quota for {ProjectCache.ident_by_id(self.obj.id)}" + f": {key_name} : {current_value} -> {new_value}" + ) new_quota[key_name] = new_value if len(new_quota): set_quota_method = getattr(self._admin_conn, f"set_{api_area}_quotas") set_quota_method(self.obj.id, **new_quota) - LOGGER.info(f"Configured {api_area} quotas for {ProjectCache.ident_by_id(self.obj.id)}") + LOGGER.info( + f"Configured {api_area} quotas for {ProjectCache.ident_by_id(self.obj.id)}" + ) else: - LOGGER.info(f"{api_area.capitalize()} quotas for {ProjectCache.ident_by_id(self.obj.id)} not changed") + LOGGER.info( + f"{api_area.capitalize()} quotas for {ProjectCache.ident_by_id(self.obj.id)} not changed" + ) def adapt_quota(self): self._set_quota("compute_quotas") @@ -156,9 +204,12 @@ def adapt_quota(self): def create_and_get_project(self) -> Project: if self.obj: self.adapt_quota() - self.workload_network = WorkloadGeneratorNetwork(self._admin_conn, self.obj, - self.security_group_name_ingress, - self.security_group_name_egress) + self.workload_network = WorkloadGeneratorNetwork( + self._admin_conn, + self.obj, + self.security_group_name_ingress, + self.security_group_name_egress, + ) self.workload_network.create_and_get_network_setup() return self.obj @@ -166,9 +217,11 @@ def create_and_get_project(self) -> Project: name=self.project_name, domain_id=self.domain.id, description="Auto generated", - enabled=True + enabled=True, + ) + ProjectCache.add( + self.obj.id, {"name": self.obj.name, "domain_id": self.obj.domain_id} ) - ProjectCache.add(self.obj.id, {"name": self.obj.name, "domain_id": self.obj.domain_id}) LOGGER.info(f"Created {ProjectCache.ident_by_id(self.obj.id)}") self.adapt_quota() @@ -176,8 +229,12 @@ def create_and_get_project(self) -> Project: self.assign_role_to_user_for_project("load-balancer_member") self.assign_role_to_user_for_project("member") - self.workload_network = WorkloadGeneratorNetwork(self.project_conn, self.obj, self.security_group_name_ingress, - self.security_group_name_egress) + self.workload_network = WorkloadGeneratorNetwork( + self.project_conn, + self.obj, + self.security_group_name_ingress, + self.security_group_name_egress, + ) self.workload_network.create_and_get_network_setup() return self.obj @@ -217,7 +274,9 @@ def delete_project(self): def get_and_create_machines(self, machines: list[str], wait_for_machines: bool): if "none" in machines: - LOGGER.warning("Not creating a virtual machine, because 'none' was in the list") + LOGGER.warning( + "Not creating a virtual machine, because 'none' was in the list" + ) self.close_connection() return @@ -225,13 +284,23 @@ def get_and_create_machines(self, machines: list[str], wait_for_machines: bool): for nr, machine_name in enumerate(sorted(machines)): if machine_name not in self.workload_machines: - machine = WorkloadGeneratorMachine(self.project_conn, self.obj, machine_name, - self.security_group_name_ingress, self.security_group_name_egress) - - if self.workload_network is None or self.workload_network.obj_network is None: + machine = WorkloadGeneratorMachine( + self.project_conn, + self.obj, + machine_name, + self.security_group_name_ingress, + self.security_group_name_egress, + ) + + if ( + self.workload_network is None + or self.workload_network.obj_network is None + ): raise RuntimeError("No Workload network object") - machine.create_or_get_server(self.workload_network.obj_network, wait_for_machines) + machine.create_or_get_server( + self.workload_network.obj_network, wait_for_machines + ) if machine.floating_ip: self.ssh_proxy_jump = machine.floating_ip @@ -248,23 +317,30 @@ def get_and_create_machines(self, machines: list[str], wait_for_machines: bool): def dump_inventory_hosts(self, directory_location: str): for name, workload_machine in self.workload_machines.items(): if workload_machine.obj is None: - raise RuntimeError(f"Invalid reference to server for {workload_machine.machine_name}") + raise RuntimeError( + f"Invalid reference to server for {workload_machine.machine_name}" + ) workload_machine.update_assigned_ips() if not workload_machine.internal_ip: - raise RuntimeError(f"Unable to get associated ip address for {workload_machine.machine_name}") + raise RuntimeError( + f"Unable to get associated ip address for {workload_machine.machine_name}" + ) data: dict[str, str | dict[str, str]] = { "openstack": { "machine_id": workload_machine.obj.id, "machine_status": workload_machine.obj.status, - "hypervisor": workload_machine.obj['OS-EXT-SRV-ATTR:hypervisor_hostname'], + "hypervisor": workload_machine.obj[ + "OS-EXT-SRV-ATTR:hypervisor_hostname" + ], "domain": self.domain.name, "project": workload_machine.project.name, }, "hostname": workload_machine.machine_name, - "ansible_host": workload_machine.floating_ip or workload_machine.internal_ip, + "ansible_host": workload_machine.floating_ip + or workload_machine.internal_ip, "internal_ip": workload_machine.internal_ip, } @@ -273,17 +349,22 @@ def dump_inventory_hosts(self, directory_location: str): base_dir = f"{directory_location}/{self.domain.name}-{workload_machine.project.name}-{workload_machine.machine_name}" - filename = f'{base_dir}/data.yml' + filename = f"{base_dir}/data.yml" os.makedirs(base_dir, exist_ok=True) - with open(filename, 'w') as file: - LOGGER.info(f"Creating ansible_inventory_file {filename} for host {data['hostname']}") + with open(filename, "w") as file: + LOGGER.info( + f"Creating ansible_inventory_file {filename} for host {data['hostname']}" + ) yaml.dump(data, file, default_flow_style=False, explicit_start=True) def get_or_create_ssh_key(self): - self.ssh_key = self.project_conn.compute.find_keypair(Config.get_admin_vm_ssh_keypair_name()) + self.ssh_key = self.project_conn.compute.find_keypair( + Config.get_admin_vm_ssh_keypair_name() + ) if not self.ssh_key: LOGGER.info( - f"Create SSH keypair '{Config.get_admin_vm_ssh_keypair_name()} in {ProjectCache.ident_by_id(self.obj.id)}") + f"Create SSH keypair '{Config.get_admin_vm_ssh_keypair_name()} in {ProjectCache.ident_by_id(self.obj.id)}" + ) self.ssh_key = self.project_conn.compute.create_keypair( name=Config.get_admin_vm_ssh_keypair_name(), public_key=Config.get_admin_vm_ssh_key(), @@ -295,8 +376,8 @@ def close_connection(self): self._project_conn.close() self._project_conn = None - def get_clouds_yaml_data(self) -> dict[str, str | dict[str, str]]: - data: dict[str, str | dict[str, str]] = { + def get_clouds_yaml_data(self) -> dict[str, str | bool | dict[str, str]]: + data: dict[str, bool | str | dict[str, str]] = { "auth": { "username": self.user.user_name, "project_name": self.project_name, @@ -305,8 +386,8 @@ def get_clouds_yaml_data(self) -> dict[str, str | dict[str, str]]: "user_domain_name": self.domain.name, "password": self.user.user_password, }, + "verify": Config.get_verify_ssl_certificate(), "cacert": self.project_conn.verify, "identity_api_version": "3", - "endpoint_type": "internalURL" } return data diff --git a/src/openstack_workload_generator/entities/user.py b/src/openstack_workload_generator/entities/user.py index 617abf4..6e45aec 100644 --- a/src/openstack_workload_generator/entities/user.py +++ b/src/openstack_workload_generator/entities/user.py @@ -16,28 +16,36 @@ def __init__(self, conn: Connection, user_name: str, domain: Domain): self.user_name = user_name self.user_password = Config.get_admin_domain_password() self.domain: Domain = domain - self.obj = self.conn.identity.find_user(user_name, query={"domain_id": self.domain.id}) + self.obj = self.conn.identity.find_user( + user_name, query={"domain_id": self.domain.id} + ) def assign_role_to_user(self, role_name: str): - self.conn.identity.assign_project_role_to_user(self.obj.id, self.domain.id, self.get_role_id_by_name(role_name)) + self.conn.identity.assign_project_role_to_user( + self.obj.id, self.domain.id, self.get_role_id_by_name(role_name) + ) LOGGER.info( - f"Assigned role '{role_name}' to user '{self.obj.name}' in {DomainCache.ident_by_id(self.domain.id)}") + f"Assigned role '{role_name}' to user '{self.obj.name}' in {DomainCache.ident_by_id(self.domain.id)}" + ) def create_and_get_user(self) -> User: if self.obj: - LOGGER.info(f"User {self.user_name} already exists in {DomainCache.ident_by_id(self.domain.id)}") + LOGGER.info( + f"User {self.user_name} already exists in {DomainCache.ident_by_id(self.domain.id)}" + ) return self.obj self.obj = self.conn.identity.create_user( name=self.user_name, password=self.user_password, domain_id=self.domain.id, - enabled=True + enabled=True, ) self.assign_role_to_user("manager") LOGGER.info( - f"Created user {self.obj.name} / {self.obj.id} with password {self.obj.password} in {DomainCache.ident_by_id(self.domain.id)}") + f"Created user {self.obj.name} / {self.obj.id} with password {self.obj.password} in {DomainCache.ident_by_id(self.domain.id)}" + ) return self.obj def delete_user(self):