Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -29,3 +29,6 @@ test: deps
${activate} && ${python} -m pytest test
.PHONY: test

black: deps
${activate} && ${python} -m black src
.PHONY: black
1 change: 1 addition & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -10,3 +10,4 @@ openstacksdk==3.3.0
pytest==7.4.0
mypy==1.13.0
flake8==6.1.0
black>=24.4.2
180 changes: 123 additions & 57 deletions src/openstack_workload_generator/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,62 +31,114 @@

LOGGER = logging.getLogger()

parser = argparse.ArgumentParser(
prog='Create workloads on openstack installations')

parser.add_argument('--log_level', metavar='loglevel', type=str,
default="INFO", help='The loglevel')

parser.add_argument('--os_cloud', type=cloud_checker,
default=os.environ.get("OS_CLOUD", "admin"),
help='The openstack config to use, defaults to the value of the OS_CLOUD '
'environment variable or "admin" if the variable is not set')

parser.add_argument('--ansible_inventory', type=str, nargs="?",
help="Dump the created servers as an ansible inventory to the specified directory, "
"adds a ssh proxy jump for the hosts without a floating ip")

parser.add_argument('--clouds_yaml', type=str, nargs="?",
help="Generate a openstack clouds.yaml file")

parser.add_argument('--wait_for_machines', action="store_true",
help="Wait for every machine to be created "
"(normally the provisioning only waits for machines which use floating ips)")

parser.add_argument('--config', type=str,
default="default.yaml",
help='The config file for environment creation, define a path to the'
' yaml file or a subpath in the profiles folder')
parser = argparse.ArgumentParser(prog="Create workloads on openstack installations")

parser.add_argument(
"--log_level", metavar="loglevel", type=str, default="INFO", help="The loglevel"
)

parser.add_argument(
"--os_cloud",
type=cloud_checker,
default=os.environ.get("OS_CLOUD", "admin"),
help="The openstack config to use, defaults to the value of the OS_CLOUD "
'environment variable or "admin" if the variable is not set',
)

parser.add_argument(
"--ansible_inventory",
type=str,
nargs="?",
help="Dump the created servers as an ansible inventory to the specified directory, "
"adds a ssh proxy jump for the hosts without a floating ip",
)

parser.add_argument(
"--clouds_yaml", type=str, nargs="?", help="Use a specific clouds.yaml file"
)

parser.add_argument(
"--wait_for_machines",
action="store_true",
help="Wait for every machine to be created "
"(normally the provisioning only waits for machines which use floating ips)",
)

parser.add_argument(
"--generate_clouds_yaml",
type=str,
nargs="?",
help="Generate a openstack clouds.yaml file",
)


parser.add_argument(
"--config",
type=str,
default="default.yaml",
help="The config file for environment creation, define a path to the"
" yaml file or a subpath in the profiles folder",
)

exclusive_group_domain = parser.add_mutually_exclusive_group(required=True)

exclusive_group_domain.add_argument('--create_domains', type=item_checker, nargs="+", default=None,
metavar="DOMAINNAME",
help='A list of domains to be created')

exclusive_group_domain.add_argument('--delete_domains', type=item_checker, nargs="+", default=None,
metavar="DOMAINNAME",
help='A list of domains to be deleted, all child elements are recursively deleted')
exclusive_group_domain.add_argument(
"--create_domains",
type=item_checker,
nargs="+",
default=None,
metavar="DOMAINNAME",
help="A list of domains to be created",
)

exclusive_group_domain.add_argument(
"--delete_domains",
type=item_checker,
nargs="+",
default=None,
metavar="DOMAINNAME",
help="A list of domains to be deleted, all child elements are recursively deleted",
)

exclusive_group_project = parser.add_mutually_exclusive_group(required=False)

exclusive_group_project.add_argument('--create_projects', type=item_checker, nargs="+", default=None,
metavar="PROJECTNAME",
help='A list of projects to be created in the created domains')

exclusive_group_project.add_argument('--delete_projects', type=item_checker, nargs="+", default=None,
metavar="PROJECTNAME",
help='A list of projects to be deleted in the created '
'domains, all child elements are recursively deleted')
exclusive_group_project.add_argument(
"--create_projects",
type=item_checker,
nargs="+",
default=None,
metavar="PROJECTNAME",
help="A list of projects to be created in the created domains",
)

exclusive_group_project.add_argument(
"--delete_projects",
type=item_checker,
nargs="+",
default=None,
metavar="PROJECTNAME",
help="A list of projects to be deleted in the created "
"domains, all child elements are recursively deleted",
)

exclusive_group_machines = parser.add_mutually_exclusive_group(required=False)
exclusive_group_machines.add_argument('--create_machines', type=item_checker, nargs="+", default=None,
metavar="SERVERNAME",
help='A list of vms to be created in the created domains')

exclusive_group_machines.add_argument('--delete_machines', type=item_checker, nargs="+", default=None,
metavar="SERVERNAME",
help='A list of vms to be deleted in the created projects')
exclusive_group_machines.add_argument(
"--create_machines",
type=item_checker,
nargs="+",
default=None,
metavar="SERVERNAME",
help="A list of vms to be created in the created domains",
)

exclusive_group_machines.add_argument(
"--delete_machines",
type=item_checker,
nargs="+",
default=None,
metavar="SERVERNAME",
help="A list of vms to be deleted in the created projects",
)

args = parser.parse_args()

Expand All @@ -97,7 +149,11 @@


def establish_connection():
config = loader.OpenStackConfig()
if args.clouds_yaml is None:
config = loader.OpenStackConfig()
else:
LOGGER.info(f"Loading connection configuration from {args.clouds_yaml}")
config = loader.OpenStackConfig(config_files=[args.clouds_yaml])
cloud_config = config.get_one(args.os_cloud)
return Connection(config=cloud_config)

Expand All @@ -123,20 +179,30 @@ def establish_connection():
for workload_domain in workload_domains.values():
for workload_project in workload_domain.get_projects(args.create_projects):
if args.create_machines:
workload_project.get_and_create_machines(args.create_machines, args.wait_for_machines)
workload_project.get_and_create_machines(
args.create_machines, args.wait_for_machines
)
if args.ansible_inventory:
workload_project.dump_inventory_hosts(args.ansible_inventory)
if args.clouds_yaml:
clouds_yaml_data[f"{workload_domain.domain_name}-{workload_project.project_name}"] \
= workload_project.get_clouds_yaml_data()
clouds_yaml_data[
f"{workload_domain.domain_name}-{workload_project.project_name}"
] = workload_project.get_clouds_yaml_data()
elif args.delete_machines:
for machine_obj in workload_project.get_machines(args.delete_machines):
for machine_obj in workload_project.get_machines(
args.delete_machines
):
machine_obj.delete_machine()
if args.clouds_yaml:
LOGGER.info(f"Creating a a clouds yaml : {args.clouds_yaml}")
if args.generate_clouds_yaml:
LOGGER.info(f"Creating a a clouds yaml : {args.generate_clouds_yaml}")
clouds_yaml_data = {"clouds": clouds_yaml_data}
with open(args.clouds_yaml, 'w') as file:
yaml.dump(clouds_yaml_data, file, default_flow_style=False, explicit_start=True)
with open(args.generate_clouds_yaml, "w") as file:
yaml.dump(
clouds_yaml_data,
file,
default_flow_style=False,
explicit_start=True,
)
sys.exit(0)
elif args.delete_projects:
conn = establish_connection()
Expand Down
30 changes: 19 additions & 11 deletions src/openstack_workload_generator/entities/domain.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,12 @@ def __init__(self, conn: Connection, domain_name: str):
self.obj: Domain = self.conn.identity.find_domain(domain_name)
if self.obj:
DomainCache.add(self.obj.id, self.obj.name)
self.workload_user = WorkloadGeneratorDomain._get_user(conn, domain_name, self.obj)
self.workload_projects: dict[str, WorkloadGeneratorProject] = WorkloadGeneratorDomain._get_projects(
conn, self.obj, self.workload_user)
self.workload_user = WorkloadGeneratorDomain._get_user(
conn, domain_name, self.obj
)
self.workload_projects: dict[str, WorkloadGeneratorProject] = (
WorkloadGeneratorDomain._get_projects(conn, self.obj, self.workload_user)
)

@staticmethod
def _get_user(conn: Connection, domain_name: str, obj: Domain):
Expand All @@ -30,28 +33,31 @@ def _get_user(conn: Connection, domain_name: str, obj: Domain):
return WorkloadGeneratorUser(conn, f"{domain_name}-admin", obj)

@staticmethod
def _get_projects(conn: Connection, domain: Domain | None, user: WorkloadGeneratorUser | None) \
-> dict[str, WorkloadGeneratorProject]:
def _get_projects(
conn: Connection, domain: Domain | None, user: WorkloadGeneratorUser | None
) -> dict[str, WorkloadGeneratorProject]:
if not domain or not user:
return dict()
result: dict[str, WorkloadGeneratorProject] = dict()
for project in conn.identity.projects(domain_id=domain.id):
result[project.name] = WorkloadGeneratorProject(conn, project.name, domain, user)
result[project.name] = WorkloadGeneratorProject(
conn, project.name, domain, user
)
return result

def create_and_get_domain(self) -> Domain:
if self.obj:
return self.obj

self.obj = self.conn.identity.create_domain(
name=self.domain_name,
description="Automated creation",
enabled=True
name=self.domain_name, description="Automated creation", enabled=True
)
DomainCache.add(self.obj.id, self.obj.name)
LOGGER.info(f"Created {DomainCache.ident_by_id(self.obj.id)}")

self.workload_user = WorkloadGeneratorDomain._get_user(self.conn, self.domain_name, self.obj)
self.workload_user = WorkloadGeneratorDomain._get_user(
self.conn, self.domain_name, self.obj
)
return self.obj

def disable_domain(self):
Expand Down Expand Up @@ -92,7 +98,9 @@ def create_and_get_projects(self, create_projects: list[str]):
for project_name in create_projects:
if project_name in self.workload_projects:
continue
project = WorkloadGeneratorProject(self.conn, project_name, self.obj, self.workload_user)
project = WorkloadGeneratorProject(
self.conn, project_name, self.obj, self.workload_user
)
project.create_and_get_project()
project.get_or_create_ssh_key()
self.workload_projects[project_name] = project
Expand Down
Loading