diff --git a/ci/chargeback_tests.yml b/ci/chargeback_tests.yml new file mode 100644 index 00000000..bfe01a97 --- /dev/null +++ b/ci/chargeback_tests.yml @@ -0,0 +1,219 @@ +--- +- name: "Verify logging projects, endpoints, credentials, nodes, pods, services, manifests and subscriptions" + hosts: controller + gather_facts: no + ignore_errors: true + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + vars_files: + - vars/osp18_env.yml + vars: + common_pod_test_id: "" + common_pod_status_str: "Running" + common_pod_nspace: openstack-operators + common_pod_list: + - telemetry-operator-controller-manager + - openstack-operator-controller-manager + + common_subscription_test_id: "" + common_subscription_nspace: openshift-operators-redhat + common_subscription_list: + - loki-operator + + common_project_test_id: "" + common_project_list: + - openshift-openstack-infra + - openshift + - openstack-operators + - openshift-logging + + common_endpoint_test_id: "" + common_endpoint_list: + - [nova,compute,public] + - [nova,compute,internal] + - [placement,placement,public] + - [placement,placement,internal] + - [swift,object-store,public] + - [swift,object-store,internal] + - [cinderv3,volumev3,public] + - [cinderv3,volumev3,internal] + - [barbican,key-manager,public] + - [barbican,key-manager,internal] + - [keystone,identity,public] + - [keystone,identity,internal] + - [glance,image,public] + - [glance,image,internal] + - [neutron,network,public] + - [neutron,network,internal] + + common_manifest_test_id: "" + common_manifest_list: + - "loki-operator 2" + - "loki-helm-operator 1" + + common_service_test_id: "" + common_service_nspace: openshift-logging + common_service_list: + - cluster-logging-operator-metrics + - logging-loki-compactor-grpc + - logging-loki-compactor-http + - logging-loki-distributor-grpc + - logging-loki-distributor-http + - logging-loki-gateway-http + - logging-loki-gossip-ring + - logging-loki-index-gateway-grpc + - logging-loki-index-gateway-http + - logging-loki-ingester-grpc + - logging-loki-ingester-http + - logging-loki-querier-grpc + - logging-loki-querier-http + - logging-loki-query-frontend-grpc + - logging-loki-query-frontend-http + - openstack-logging + + tasks: + - name: "Verify logging infrastructure components" + ansible.builtin.import_role: + name: common + + +- name: "Verify logging pods are running in openstack" + hosts: controller + gather_facts: no + ignore_errors: true + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + vars: + common_pod_test_id: "" + common_pod_status_str: "Running" + common_pod_nspace: openstack + common_pod_list: + - openstackclient + tasks: + - name: "Verify Running Pods" + ansible.builtin.import_role: + name: common + + +- name: "Verify logging pods are running in openshift-operators-redhat" + hosts: controller + gather_facts: no + ignore_errors: true + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + vars: + common_pod_test_id: "" + common_pod_status_str: "Running" + common_pod_nspace: openshift-operators-redhat + common_pod_list: + - loki-operator-controller-manager + tasks: + - name: "Verify Pods running" + ansible.builtin.import_role: + name: common + +- name: "Verify logging pods are running in openshift-logging" + hosts: controller + gather_facts: no + ignore_errors: true + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + vars: + common_pod_test_id: "" + common_pod_status_str: "Running" + common_pod_nspace: openshift-logging + common_pod_list: + - cluster-logging-operator + - logging-loki-compactor + - logging-loki-distributor + - logging-loki-index-gateway + - logging-loki-ingester + - logging-loki-querier + - logging-loki-query-frontend + - collector + + tasks: + - name: "Verify Pods running" + ansible.builtin.import_role: + name: common + +- name: "Verify logging pods are running in minio-dev" + hosts: controller + gather_facts: no + ignore_errors: true + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + vars: + common_pod_test_id: "" + common_pod_status_str: "Running" + common_pod_nspace: minio-dev + common_pod_list: + - minio + tasks: + - name: "Run pod running tests" + ansible.builtin.import_role: + name: common + + +- name: "Verify logging pods have complete status in openstack" + hosts: controller + gather_facts: no + ignore_errors: true + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + vars: + common_pod_test_id: "" + common_pod_nspace: openstack + common_pod_status_str: "Completed" + common_pod_list: + - bootstrap-edpm-deployment-openstack-edpm-ipam + - configure-network-edpm-deployment-openstack-edpm-ipam + - configure-os-edpm-deployment-openstack-edpm-ipam + - install-certs-edpm-deployment-openstack-edpm-ipam + - install-os-edpm-deployment-openstack-edpm-ipam + - libvirt-edpm-deployment-openstack-edpm-ipam + - logging-edpm-deployment-openstack-edpm-ipam + - neutron-metadata-edpm-deployment-openstack-edpm-ipam + - ovn-edpm-deployment-openstack-edpm-ipam + - reboot-os-edpm-deployment-openstack-edpm-ipam + - run-os-edpm-deployment-openstack-edpm-ipam + - ssh-known-hosts-edpm-deployment + - telemetry-edpm-deployment-openstack-edpm-ipam + - validate-network-edpm-deployment-openstack-edpm-ipam + tasks: + - name: "Run pods completed tests" + ansible.builtin.import_role: + name: common + + +- name: "Verify the crds exist" + hosts: controller + gather_facts: no + ignore_errors: true + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + vars: + common_service_test_id: "" + common_service_nspace: openstack + common_service_list: + - nova-internal + - nova-metadata-internal + - nova-novncproxy-cell1-public + - nova-public + common_crd_test_id: "" + common_crd_list: + - alertingrules.loki.grafana.com + - lokistacks.loki.grafana.com + - recordingrules.loki.grafana.com + - rulerconfigs.loki.grafana.com + tasks: + - name: "Run Services and CRD tests" + ansible.builtin.import_role: + name: common diff --git a/roles/observe_chargeback/README.md b/roles/observe_chargeback/README.md new file mode 100644 index 00000000..6ef3d20a --- /dev/null +++ b/roles/observe_chargeback/README.md @@ -0,0 +1,51 @@ +Ansible Playbook: CloudKitty ChargeBack Validation +========= + +This Ansible playbook validates and enforces the configuration of the OpenStack CloudKitty (chargeback) service. It performs a series of tests to ensure that the CloudKitty rating modules are in the correct state and that the `hashmap` module has the correct priority. + +Playbook Files +--------- + +* **`main.yml`**: The main playbook entry point. It includes the `chargeback_tests.yml` file to execute the validation logic. +* **`chargeback_tests.yml`**: This task file contains the sequence of steps used to validate and configure the CloudKitty service. + +Workflow +--------- + +The `chargeback_tests.yml` playbook executes the following steps: + +1. **Get Module Status**: It first runs the `{{ openstack_cmd }} rating module list` command to fetch the current status of all CloudKitty rating modules. + +2. **Validate Module States**: It uses an `assert` task to test for specific, expected conditions. The playbook will **fail** if these are not met: + * The `noop` module must be **enabled** (`True`). + * The `hashmap` module must be **enabled** (`True`). + * The `pyscripts` module must be **disabled** (`False`). + +3. `Check Hashmap Priority`: It runs a shell command to find the current priority value of the `hashmap` module. + +4. `Set Hashmap Priority`: It idempotently sets the `hashmap` module's priority to `100`. This task is skipped if the priority is already set to `100`. + + + +Requirements +--------- + +This playbook relies on an Ansible variable, `openstack_cmd`, which must be defined when running the playbook. + +This variable must contain the full command necessary to execute OpenStack CLI commands (e.g., `/usr/bin/openstack` or simply `openstack` if it's in the system's PATH). +The host running the playbook must have access to the OpenStack environment and the necessary credentials loaded for the CLI to function. + + +Usage +--------- + +You can run this playbook using the `ansible-playbook` command. You must pass the `openstack_cmd` variable as an extra argument. + +Example of running the playbook in bash shell: +``ansible-playbook main.yml -e "openstack_cmd=/usr/bin/openstack`` + + +Author Information +------------------ + +Alex Yefimov diff --git a/roles/observe_chargeback/defaults/main.yml b/roles/observe_chargeback/defaults/main.yml new file mode 100644 index 00000000..d6d730bf --- /dev/null +++ b/roles/observe_chargeback/defaults/main.yml @@ -0,0 +1,3 @@ +--- +chargeback_test_id: "" + diff --git a/roles/observe_chargeback/files/gen_synth_loki_data.py b/roles/observe_chargeback/files/gen_synth_loki_data.py new file mode 100755 index 00000000..ddec78d5 --- /dev/null +++ b/roles/observe_chargeback/files/gen_synth_loki_data.py @@ -0,0 +1,153 @@ +import logging +import argparse +from datetime import datetime, timezone, timedelta +from pathlib import Path +from typing import Union +from jinja2 import Template + +# --- Configure logging with a default level that can be changed --- +logging.basicConfig( + level=logging.INFO, + format='%(asctime)s - %(levelname)s - %(message)s', + datefmt='%Y-%m-%d %H:%M:%S' +) +logger = logging.getLogger() + +def _format_timestamp(epoch_seconds: float) -> str: + """ + Converts an epoch timestamp into a human-readable UTC string. + + Args: + epoch_seconds (float): The timestamp in seconds since the epoch. + + Returns: + str: The formatted datetime string (e.g., "2023-10-26T14:30:00 UTC"). + """ + try: + dt_object = datetime.fromtimestamp(epoch_seconds, tz=timezone.utc) + return dt_object.strftime("%Y-%m-%dT%H:%M:%S %Z") + except (ValueError, TypeError): + logger.warning(f"Invalid epoch value provided: {epoch_seconds}") + return "INVALID_TIMESTAMP" + +def generate_loki_data( + template_path: Path, + output_path: Path, + start_time: datetime, + end_time: datetime, + time_step_seconds: int +): + """ + Generates synthetic Loki log data by first preparing a data list + and then rendering it with a single template. + + Args: + template_path (Path): Path to the main log template file. + output_path (Path): Path for the generated output JSON file. + start_time (datetime): The start time for data generation. + end_time (datetime): The end time for data generation. + time_step_seconds (int): The duration of each log entry in seconds. + """ + + # --- Step 1: Generate the data structure first --- + logger.info( + f"Generating data from {start_time.strftime('%Y-%m-%d')} to " + f"{end_time.strftime('%Y-%m-%d')} with a {time_step_seconds}s step." + ) + start_epoch = int(start_time.timestamp()) + end_epoch = int(end_time.timestamp()) + logger.debug(f"Time range in epoch seconds: {start_epoch} to {end_epoch}") + + log_data_list = [] # This list will hold all our data points + + # Loop through the time range and generate data points + for current_epoch in range(start_epoch, end_epoch, time_step_seconds): + end_of_step_epoch = current_epoch + time_step_seconds - 1 + + # Prepare replacement values + nanoseconds = int(current_epoch * 1_000_000_000) + start_str = _format_timestamp(current_epoch) + end_str = _format_timestamp(end_of_step_epoch) + + logger.debug(f"Processing epoch: {current_epoch} -> nanoseconds: {nanoseconds}") + + # Create a dictionary for this time step and add it to the list + log_data_list.append({ + "nanoseconds": nanoseconds, + "start_time": start_str, + "end_time": end_str + }) + + logger.info(f"Generated {len(log_data_list)} data points to be rendered.") + + # --- Step 2: Load template and render --- + try: + logger.info(f"Loading main template from: {template_path}") + template_content = template_path.read_text() + template = Template(template_content, trim_blocks=True, lstrip_blocks=True) + + except FileNotFoundError as e: + logger.error(f"Error loading template file: {e}. Aborting.") + raise # Re-raise the exception to be caught in main() + + # --- Render the template in one pass with all the data --- + logger.info("Rendering final output...") + # The template expects a variable named 'log_data' + final_output = template.render(log_data=log_data_list) + + # --- Step 3: Write the final string to the file --- + try: + with output_path.open('w') as f_out: + f_out.write(final_output) + logger.info(f"Successfully generated synthetic data to '{output_path}'") + except IOError as e: + logger.error(f"Failed to write to output file '{output_path}': {e}") + except Exception as e: + logger.error(f"An unexpected error occurred during file write: {e}") + +def main(): + """Main entry point for the script.""" + parser = argparse.ArgumentParser( + description="Generate synthetic Loki log data from a single main template.", + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + # --- Required File Path Arguments --- + parser.add_argument("-o", "--output", required=True, help="Path to the output file.") + # --- Only one template argument is needed now --- + parser.add_argument("--template", required=True, help="Path to the main log template file (e.g., loki_main.tmpl).") + + # --- Optional Generation Arguments --- + parser.add_argument("--days", type=int, default=30, help="How many days of data to generate, ending today.") + parser.add_argument("--step", type=int, default=300, help="Time step in seconds for each log entry.") + + # --- Optional Utility Arguments --- + parser.add_argument("--debug", action="store_true", help="Enable debug level logging for verbose output.") + + args = parser.parse_args() + + if args.debug: + logger.setLevel(logging.DEBUG) + logger.debug("Debug mode enabled.") + + # Define the time range for data generation + end_time_utc = datetime.now(timezone.utc) + start_time_utc = end_time_utc - timedelta(days=args.days) + logger.debug(f"Time range calculated: {start_time_utc} to {end_time_utc}") + + # Run the generator + try: + generate_loki_data( + template_path=Path(args.template), + output_path=Path(args.output), + start_time=start_time_utc, + end_time=end_time_utc, + time_step_seconds=args.step + ) + except FileNotFoundError: + logger.error("Process aborted because the template file was not found.") + except Exception as e: + logger.critical(f"A critical, unhandled error stopped the script: {e}") + + +if __name__ == "__main__": + main() diff --git a/roles/observe_chargeback/files/loki_data_templ.j2 b/roles/observe_chargeback/files/loki_data_templ.j2 new file mode 100644 index 00000000..e9132393 --- /dev/null +++ b/roles/observe_chargeback/files/loki_data_templ.j2 @@ -0,0 +1,19 @@ +{# This is the static header from loki_header.tmpl #} +{"streams": [{ "stream": { "service": "cloudkitty" }, "values": [ +{%- for item in log_data %} +{#- This is the content from loki_step.tmpl, using 'item' from the loop #} +[ +"{{ item.nanoseconds }}", +"{\"start\": \"{{ item.start_time }}\", \"end\": \"{{ item.end_time }}\", \"type\": \"image.size\", \"unit\": \"MiB\", \"description\": null, \"qty\": 20.6875, \"price\": 0.0206875, \"groupby\": {\"id\": \"cd65d30f-8b94-4fa3-95dc-e3b429f479b2\", \"project_id\": \"0030775de80e4d84a4fd0d73e0a1b3a7\", \"user_id\": null, \"week_of_the_year\": \"37\", \"day_of_the_year\": \"258\", \"month\": \"9\", \"year\": \"2025\"}, \"metadata\": {\"container_format\": \"bare\", \"disk_format\": \"qcow2\"}}" +], +[ +"{{ item.nanoseconds }}", +"{\"start\": \"{{ item.start_time }}\", \"end\": \"{{ item.end_time }}\", \"type\": \"instance\", \"unit\": \"instance\", \"description\": null, \"qty\": 1.0, \"price\": 0.3, \"groupby\": {\"id\": \"de168c31-ed44-4a1a-a079-51bd238a91d6\", \"project_id\": \"9cf5bcfc61a24682acc448af2d062ad2\", \"user_id\": \"c29ab6e886354bbd88ee9899e62d1d40\", \"week_of_the_year\": \"37\", \"day_of_the_year\": \"258\", \"month\": \"9\", \"year\": \"2025\"}, \"metadata\": {\"flavor_name\": \"m1.tiny\", \"flavor_id\": \"1\", \"vcpus\": \"\"}}" +] +{#- This logic adds a comma after every pair, *except* for the very last one. #} +{%- if not loop.last -%} +, +{%- endif -%} +{%- endfor %} +{#- This is the static footer from loki_footer.tmpl #} +]}]} diff --git a/roles/observe_chargeback/meta/main.yml b/roles/observe_chargeback/meta/main.yml new file mode 100644 index 00000000..98b3fe5d --- /dev/null +++ b/roles/observe_chargeback/meta/main.yml @@ -0,0 +1,13 @@ +--- +galaxy_info: + author: Alex Yefimov + description: Tests the chargeback feature is set up in OpenStack running running in OpenShift + company: Red Hat + + license: Apache-2.0 + + min_ansible_version: "2.1" + + galaxy_tags: [] + +dependencies: [] diff --git a/roles/observe_chargeback/tasks/chargeback_tests.yml b/roles/observe_chargeback/tasks/chargeback_tests.yml new file mode 100644 index 00000000..86f6dacc --- /dev/null +++ b/roles/observe_chargeback/tasks/chargeback_tests.yml @@ -0,0 +1,34 @@ +--- +- name: Get status of all CloudKitty rating modules + ansible.builtin.command: + cmd: "{{ openstack_cmd }} rating module list" + changed_when: false + register: module_list + +- name: TEST Validate CloudKitty module states + ansible.builtin.assert: + that: + - "'noop' in module_list.stdout and 'True' in (module_list.stdout_lines | select('search', 'noop') | first)" + - "'hashmap' in module_list.stdout and 'True' in (module_list.stdout_lines | select('search', 'hashmap') | first)" + - "'pyscripts' in module_list.stdout and 'False' in (module_list.stdout_lines | select('search', 'pyscripts') | first)" + fail_msg: "CloudKitty module validation FAILED. Module states are not as expected." + success_msg: "SUCCESS: CloudKitty modules (noop=True, hashmap=True, pyscripts=False) are configured correctly." + +- name: Find the current value of hashmap + ansible.builtin.shell: + cmd: "{{ openstack_cmd }} rating module get hashmap -c Priority -f csv | tail -n +2" + register: start_hashmap_priority + changed_when: start_hashmap_priority.stdout | trim != '100' + +- name: TEST Set priority for CloudKitty hashmap module + ansible.builtin.command: + cmd: "{{ openstack_cmd }} rating module set priority hashmap 100" + register: set_hashmap_priority + when: start_hashmap_priority.stdout | trim != '100' + failed_when: set_hashmap_priority.rc >= 1 or set_hashmap_priority.stdout == "" + changed_when: set_hashmap_priority.rc == 0 + +- name: Print result of hashmap priority value change + ansible.builtin.debug: + msg: "The hashmap priority is set to 100" + when: (start_hashmap_priority.stdout | trim == '100') or (set_hashmap_priority.rc is defined and set_hashmap_priority.rc == 0) diff --git a/roles/observe_chargeback/tasks/main.yml b/roles/observe_chargeback/tasks/main.yml new file mode 100644 index 00000000..0b44bad3 --- /dev/null +++ b/roles/observe_chargeback/tasks/main.yml @@ -0,0 +1,3 @@ +--- +- name: "Validate ChargeBack Feature" + ansible.builtin.include_tasks: "chargeback_tests.yml"