diff --git a/.github/actions/gmt-pytest/action.yml b/.github/actions/gmt-pytest/action.yml index abf5b36a1..8450c2625 100644 --- a/.github/actions/gmt-pytest/action.yml +++ b/.github/actions/gmt-pytest/action.yml @@ -9,10 +9,9 @@ inputs: description: 'The root directory of the gmt repository' required: false default: '.' - tests-command: - description: 'The command to run the tests' - required: false - default: 'pytest' + run-examples-directory-tests: + description: 'Run tests for examples directory instead of regular gmt tests' + default: false github-token: description: 'pass in your secrets.GITHUB_TOKEN' required: true @@ -86,15 +85,22 @@ runs: run: sleep 10s shell: bash - # - name: Setup upterm session - # uses: lhotari/action-upterm@v1 - - name: Run Tests + if: inputs.run-examples-directory-tests == 'false' + shell: bash + working-directory: ${{ inputs.gmt-directory }}/tests + run: | + source ../venv/bin/activate + python3 -m pytest -n auto -m "not serial" -rA | tee -a /tmp/test-results.txt + python3 -m pytest -m "serial" -rA | tee -a /tmp/test-results.txt + + - name: Run Tests (examples directory) + if: inputs.run-examples-directory-tests == 'true' shell: bash working-directory: ${{ inputs.gmt-directory }}/tests run: | source ../venv/bin/activate - python3 -m ${{ inputs.tests-command }} -rA | tee /tmp/test-results.txt + python3 -m pytest ../../examples-directory/test/smoke_test.py -k "test_all_directories" -rA | tee -a /tmp/test-results.txt - name: Display Results shell: bash diff --git a/.github/workflows/tests-bare-metal-main.yml b/.github/workflows/tests-bare-metal-main.yml index cb3bd2c6a..a0512d262 100644 --- a/.github/workflows/tests-bare-metal-main.yml +++ b/.github/workflows/tests-bare-metal-main.yml @@ -38,6 +38,7 @@ jobs: with: metrics-to-turn-off: 'Machine Sensors Debug MacOS' github-token: ${{ secrets.GITHUB_TOKEN }} + run-examples-directory-tests: false - name: Eco CI Energy Estimation - Get Measurement uses: green-coding-solutions/eco-ci-energy-estimation@v2 diff --git a/.github/workflows/tests-eco-ci-energy-estimation.yaml b/.github/workflows/tests-eco-ci-energy-estimation.yaml index 4066d5e0b..d2b501f2e 100644 --- a/.github/workflows/tests-eco-ci-energy-estimation.yaml +++ b/.github/workflows/tests-eco-ci-energy-estimation.yaml @@ -32,6 +32,7 @@ jobs: with: metrics-to-turn-off: '--categories RAPL Machine Sensors Debug CGroupV2 MacOS GPU --providers PsuEnergyAcSdiaMachineProvider' github-token: ${{ secrets.GITHUB_TOKEN }} + run-examples-directory-tests: false - name: Eco CI Energy Estimation - Get Measurement uses: green-coding-solutions/eco-ci-energy-estimation@testing diff --git a/.github/workflows/tests-vm-main.yml b/.github/workflows/tests-vm-main.yml index e67d90974..887fbc781 100644 --- a/.github/workflows/tests-vm-main.yml +++ b/.github/workflows/tests-vm-main.yml @@ -36,6 +36,7 @@ jobs: with: metrics-to-turn-off: '--categories RAPL Machine Sensors Debug CGroupV2 MacOS GPU --providers PsuEnergyAcSdiaMachineProvider' github-token: ${{ secrets.GITHUB_TOKEN }} + run-examples-directory-tests: false - name: Eco CI Energy Estimation - Get Measurement uses: green-coding-solutions/eco-ci-energy-estimation@v2 diff --git a/.github/workflows/tests-vm-pr.yml b/.github/workflows/tests-vm-pr.yml index 4c710c210..3f494123c 100644 --- a/.github/workflows/tests-vm-pr.yml +++ b/.github/workflows/tests-vm-pr.yml @@ -27,6 +27,7 @@ jobs: with: metrics-to-turn-off: '--categories RAPL Machine Sensors Debug CGroupV2 MacOS GPU --providers PsuEnergyAcSdiaMachineProvider' github-token: ${{ secrets.GITHUB_TOKEN }} + run-examples-directory-tests: false - name: Eco CI Energy Estimation - Get Measurement uses: green-coding-solutions/eco-ci-energy-estimation@v2 diff --git a/lib/utils.py b/lib/utils.py index c98230928..e41ca0c78 100644 --- a/lib/utils.py +++ b/lib/utils.py @@ -2,6 +2,8 @@ import string import subprocess import psycopg +import os +from pathlib import Path from lib.db import DB @@ -61,3 +63,41 @@ def get_architecture(): if output == 'darwin': return 'macos' return output + +# This function takes a path and a file and joins them while making sure that no one is trying to escape the +# path with `..`, symbolic links or similar. +# We always return the same error message including the path and file parameter, never `filename` as +# otherwise we might disclose if certain files exist or not. +def join_paths(path, path2, mode='file'): + filename = os.path.realpath(os.path.join(path, path2)) + + # If the original path is a symlink we need to resolve it. + path = os.path.realpath(path) + + # This is a special case in which the file is '.' + if filename == path.rstrip('/'): + return filename + + if not filename.startswith(path): + raise ValueError(f"{path2} must not be in folder above {path}") + + # To double check we also check if it is in the files allow list + + if mode == 'file': + folder_content = [str(item) for item in Path(path).rglob("*") if item.is_file()] + elif mode == 'directory': + folder_content = [str(item) for item in Path(path).rglob("*") if item.is_dir()] + else: + raise RuntimeError(f"Unknown mode supplied for join_paths: {mode}") + + if filename not in folder_content: + raise ValueError(f"{mode.capitalize()} '{path2}' not in '{path}'") + + # Another way to implement this. This is checking the third time but we want to be extra secure 👾 + if Path(path).resolve(strict=True) not in Path(path, path2).resolve(strict=True).parents: + raise ValueError(f"{mode.capitalize()} '{path2}' not in folder '{path}'") + + if os.path.exists(filename): + return filename + + raise FileNotFoundError(f"{path2} in {path} not found") diff --git a/lib/yml_helpers.py b/lib/yml_helpers.py new file mode 100644 index 000000000..c7e04cffc --- /dev/null +++ b/lib/yml_helpers.py @@ -0,0 +1,44 @@ +#pylint: disable=too-many-ancestors + +import yaml +import os +from lib import utils + +class Loader(yaml.SafeLoader): + def __init__(self, stream): + # We need to find our own root as the Loader is instantiated in PyYaml + self._root = os.path.split(stream.name)[0] + super().__init__(stream) + + def include(self, node): + # We allow two types of includes + # !include => ScalarNode + # and + # !include => SequenceNode + if isinstance(node, yaml.nodes.ScalarNode): + nodes = [self.construct_scalar(node)] + elif isinstance(node, yaml.nodes.SequenceNode): + nodes = self.construct_sequence(node) + else: + raise ValueError("We don't support Mapping Nodes to date") + + filename = utils.join_paths(self._root, nodes[0], 'file') + + with open(filename, 'r', encoding='utf-8') as f: + # We want to enable a deep search for keys + def recursive_lookup(k, d): + if k in d: + return d[k] + for v in d.values(): + if isinstance(v, dict): + return recursive_lookup(k, v) + return None + + # We can use load here as the Loader extends SafeLoader + if len(nodes) == 1: + # There is no selector specified + return yaml.load(f, Loader) + + return recursive_lookup(nodes[1], yaml.load(f, Loader)) + +Loader.add_constructor('!include', Loader.include) diff --git a/requirements-dev.txt b/requirements-dev.txt index 95791b718..a9f7b6cdc 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -6,6 +6,7 @@ pylint==3.0.3 fastapi==0.109.2 starlette>=0.32 anybadge==1.14.0 +pytest-xdist==3.5.0 # just to clear the pylint errors for the files in /api scipy==1.12.0 diff --git a/runner.py b/runner.py index 9f6899a8a..85f9631e1 100755 --- a/runner.py +++ b/runner.py @@ -38,52 +38,13 @@ from lib.global_config import GlobalConfig from lib.notes import Notes from lib import system_checks +from lib.yml_helpers import Loader from tools.machine import Machine def arrows(text): return f"\n\n>>>> {text} <<<<\n\n" -# This function takes a path and a file and joins them while making sure that no one is trying to escape the -# path with `..`, symbolic links or similar. -# We always return the same error message including the path and file parameter, never `filename` as -# otherwise we might disclose if certain files exist or not. -def join_paths(path, path2, mode='file'): - filename = os.path.realpath(os.path.join(path, path2)) - - # If the original path is a symlink we need to resolve it. - path = os.path.realpath(path) - - # This is a special case in which the file is '.' - if filename == path.rstrip('/'): - return filename - - if not filename.startswith(path): - raise ValueError(f"{path2} must not be in folder above {path}") - - # To double check we also check if it is in the files allow list - - if mode == 'file': - folder_content = [str(item) for item in Path(path).rglob("*") if item.is_file()] - elif mode == 'directory': - folder_content = [str(item) for item in Path(path).rglob("*") if item.is_dir()] - else: - raise RuntimeError(f"Unknown mode supplied for join_paths: {mode}") - - if filename not in folder_content: - raise ValueError(f"{mode.capitalize()} '{path2}' not in '{path}'") - - # Another way to implement this. This is checking the third time but we want to be extra secure 👾 - if Path(path).resolve(strict=True) not in Path(path, path2).resolve(strict=True).parents: - raise ValueError(f"{mode.capitalize()} '{path2}' not in folder '{path}'") - - if os.path.exists(filename): - return filename - - raise FileNotFoundError(f"{path2} in {path} not found") - - - class Runner: def __init__(self, name, uri, uri_type, filename='usage_scenario.yml', branch=None, @@ -241,47 +202,7 @@ def checkout_repository(self): # Inspiration from https://github.com/tanbro/pyyaml-include which we can't use as it doesn't # do security checking and has no option to select when imported def load_yml_file(self): - #pylint: disable=too-many-ancestors - class Loader(yaml.SafeLoader): - def __init__(self, stream): - # We need to find our own root as the Loader is instantiated in PyYaml - self._root = os.path.split(stream.name)[0] - super().__init__(stream) - - def include(self, node): - # We allow two types of includes - # !include => ScalarNode - # and - # !include => SequenceNode - if isinstance(node, yaml.nodes.ScalarNode): - nodes = [self.construct_scalar(node)] - elif isinstance(node, yaml.nodes.SequenceNode): - nodes = self.construct_sequence(node) - else: - raise ValueError("We don't support Mapping Nodes to date") - - filename = join_paths(self._root, nodes[0], 'file') - - with open(filename, 'r', encoding='utf-8') as f: - # We want to enable a deep search for keys - def recursive_lookup(k, d): - if k in d: - return d[k] - for v in d.values(): - if isinstance(v, dict): - return recursive_lookup(k, v) - return None - - # We can use load here as the Loader extends SafeLoader - if len(nodes) == 1: - # There is no selector specified - return yaml.load(f, Loader) - - return recursive_lookup(nodes[1], yaml.load(f, Loader)) - - Loader.add_constructor('!include', Loader.include) - - usage_scenario_file = join_paths(self._folder, self._original_filename, 'file') + usage_scenario_file = utils.join_paths(self._folder, self._original_filename, 'file') # We set the working folder now to the actual location of the usage_scenario if '/' in self._original_filename: @@ -563,8 +484,8 @@ def build_docker_images(self): self.__notes_helper.add_note({'note': f"Building {service['image']}", 'detail_name': '[NOTES]', 'timestamp': int(time.time_ns() / 1_000)}) # Make sure the context docker file exists and is not trying to escape some root. We don't need the returns - context_path = join_paths(self._folder, context, 'directory') - join_paths(context_path, dockerfile, 'file') + context_path = utils.join_paths(self._folder, context, 'directory') + utils.join_paths(context_path, dockerfile, 'file') docker_build_command = ['docker', 'run', '--rm', '-v', f"{self._folder}:/workspace:ro", # this is the folder where the usage_scenario is! @@ -671,7 +592,6 @@ def setup_services(self): # If so, change the order of the services accordingly. services_ordered = self.order_services(services) for service_name, service in services_ordered.items(): - if 'container_name' in service: container_name = service['container_name'] else: @@ -817,7 +737,6 @@ def setup_services(self): docker_run_string.append('--net') docker_run_string.append(self.__networks[0]) - if 'pause-after-phase' in service: self.__services_to_pause_phase[service['pause-after-phase']] = self.__services_to_pause_phase.get(service['pause-after-phase'], []) + [container_name] diff --git a/tests/README.MD b/tests/README.MD index fabdaf9e6..a5657f2e5 100644 --- a/tests/README.MD +++ b/tests/README.MD @@ -23,12 +23,9 @@ run: `python3 setup-test-env.py` -from the test directory. This will create a copy of the `config.yml` and docker `compose.yml` files that will be used in +from the test directory. This will create a copy of the docker `compose.yml` file that will be used in the test containers. Please make sure that you have compiled all the metric providers and source code in lib. You can do -this automatically by using the `install.sh` command. - -You will need to re-run this setup script if new metric providers are added or the config.yml is otherwise changed in a -significant way. +this automatically by using the `install_linux.sh` or `install_mac.sh` command. ## Running @@ -42,9 +39,25 @@ There are a few scripts to make this easy. `./run-tests.sh` will do everything - start the containers, run pytest, and then stop the containers. The recommended workflow is to start the containers with the `./start-test-containers.sh` script, then in another shell -window run the pytest suite using `pytest`, and then stop the containers when your test run has finished. +window run the pytest suite using: + +`pytest -n auto -m "not serial" --dist loadgroup && pytest -m "serial"`, and then stop the containers when your test run has finished. Running a subset of tests using pytest is better explained within the documentation here: https://docs.pytest.org/en/7.2.x/how-to/usage.html You can also do everything in one command using the `./run-tests.sh` script. + + +## Parallelization +We now support running our test suite with parallelization using xdist. When writing tests it is important to note that not all tests can be parallelized, and the ones that cannot need to be marked accordingly. For parallelization, we use functions in test_functions.py to setup the environment with unique container names, as well as setting up the runner with setup_runner so that its tmp folders are also unique. If you bypass using the setup_runner, you will need need to still use the `parallelize_runner_folders` function to make sure its internal directories are correct. + +Any test that cannot be parrallelized should be marked with: +`@pytest.mark.serial` + +This includes any test that runs the runner through a subprocess, or otherwise creates a Runner class withhout using either test_functions.setup_runner or test_functions.parallelize_runner_folders + +- tests that do not skip_system_checks can be parallelized, but only if they are marked with +`@pytest.mark.xdist_group(name="systems_checks")` + +This will make all tests that use group name run sequentially on the same thread (but parallel to the rest of the suite). This is needed because we have a system check which makes sure the metric providers are not already running during setup. \ No newline at end of file diff --git a/tests/conftest.py b/tests/conftest.py index 111c76f9d..3cb5717b2 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,29 +1,56 @@ import pytest - +import os +import shutil from lib.db import DB +import subprocess ## VERY IMPORTANT to override the config file here ## otherwise it will automatically connect to non-test DB and delete all your real data from lib.global_config import GlobalConfig GlobalConfig().override_config(config_name='test-config.yml') +CURRENT_DIR = os.path.dirname(os.path.abspath(__file__)) + def pytest_collection_modifyitems(items): for item in items: if item.fspath.basename == 'test_functions.py': item.add_marker(pytest.mark.skip(reason='Skipping this file')) -# should we hardcode test-db here? -@pytest.fixture(autouse=True) -def cleanup_after_test(): - yield +def cleanup_tables(): tables = DB().fetch_all("SELECT table_name FROM information_schema.tables WHERE table_schema = 'public'") for table in tables: table_name = table[0] DB().query(f'TRUNCATE TABLE "{table_name}" RESTART IDENTITY CASCADE') -### If you wish to turn off the above auto-cleanup per test, include the following in your -### test module: -# from conftest import cleanup_after_test -# @pytest.fixture(autouse=False) # Set autouse to False to override the fixture -# def cleanup_after_test(): -# pass +def cleanup_temp_directories(): + tmp_dir = os.path.join(CURRENT_DIR, 'tmp/') + if os.path.exists(tmp_dir): + for item in os.listdir(tmp_dir): + item_path = os.path.join(tmp_dir, item) + if os.path.isfile(item_path): + os.remove(item_path) + elif os.path.isdir(item_path): + shutil.rmtree(item_path) + if os.path.exists("/tmp/gmt-test-data/"): + shutil.rmtree("/tmp/gmt-test-data/") + + # remove all files/folders under gmt_tests-* in /tmp + for item in os.listdir("/tmp"): + if item.startswith('gmt_tests-'): + item_path = os.path.join("/tmp", item) + if os.path.isdir(item_path): + shutil.rmtree(item_path) + +def build_image_fixture(): + uri = os.path.abspath(os.path.join(CURRENT_DIR, 'stress-application/')) + subprocess.run(['docker', 'compose', '-f', uri+'/compose_gmt_run_tmp.yml', 'build'], check=True) + GlobalConfig().override_config(config_name='test-config.yml') + +def pytest_sessionstart(session): + if not hasattr(session.config, 'workerinput'): + build_image_fixture() + +def pytest_sessionfinish(session): + if not hasattr(session.config, 'workerinput'): + cleanup_tables() + cleanup_temp_directories() diff --git a/tests/data/usage_scenarios/env_vars_stress_allowed.yml b/tests/data/usage_scenarios/env_vars_stress_allowed.yml index e82aa8952..2aadd59b5 100644 --- a/tests/data/usage_scenarios/env_vars_stress_allowed.yml +++ b/tests/data/usage_scenarios/env_vars_stress_allowed.yml @@ -2,7 +2,6 @@ name: Test Stress author: Dan Mateas description: test -description: test services: test-container: diff --git a/tests/data/usage_scenarios/network_stress.yml b/tests/data/usage_scenarios/network_stress.yml index 9f90b4e9a..7251a22e7 100644 --- a/tests/data/usage_scenarios/network_stress.yml +++ b/tests/data/usage_scenarios/network_stress.yml @@ -6,7 +6,7 @@ description: test networks: gmt-test-network: - + services: test-container: type: container diff --git a/tests/data/usage_scenarios/stress_application.yml b/tests/data/usage_scenarios/stress_application.yml new file mode 100644 index 000000000..0cbe010e3 --- /dev/null +++ b/tests/data/usage_scenarios/stress_application.yml @@ -0,0 +1,40 @@ +--- +# Important +# Please remember that anything in this file changes the structural change should +# also be reflected in the simple example we provide in the documentation: +# https://docs.green-coding.io/docs/measuring/measuring-locally/ + + +name: Stress Container One Core 5 Seconds +author: Arne Tarara +description: test +description: test + +networks: + network-for-pytests: + +services: + ubuntu-stress: + type: container + image: gcb_stress + networks: + - network-for-pytests + build: + context: . + dockerfile: Dockerfile + + ubuntu-stress-2: + type: container + image: gcb_stress # this will reuse the image earlier built + networks: + - network-for-pytests + +flow: + - name: Stress + container: ubuntu-stress + commands: +# Alpine does not have stress, so we use stress-ng +# We need the -q flag because otherwise it will write debug to STDERR + - type: console + command: stress-ng -c 1 -t 1 -q + note: Starting Stress diff --git a/tests/pytest.ini b/tests/pytest.ini new file mode 100644 index 000000000..fc4babc07 --- /dev/null +++ b/tests/pytest.ini @@ -0,0 +1,3 @@ +[pytest] +markers = + serial: tests that cannot be run during parallelization (requires DB to be in specific states) \ No newline at end of file diff --git a/tests/run-tests.sh b/tests/run-tests.sh index f7e643b12..95696e1d6 100755 --- a/tests/run-tests.sh +++ b/tests/run-tests.sh @@ -3,7 +3,8 @@ echo "Starting test containers..." ./start-test-containers.sh &>/dev/null & sleep 2 echo "Running pytest..." -pytest +pytest -n auto -m "not serial" --dist loadgroup +pytest -m "serial" echo "Stopping test containers..." ./stop-test-containers.sh &>/dev/null & -echo "fin" \ No newline at end of file +echo "fin" diff --git a/tests/session_config.txt b/tests/session_config.txt new file mode 100644 index 000000000..fdd05d143 --- /dev/null +++ b/tests/session_config.txt @@ -0,0 +1 @@ +<_pytest.config.Config object at 0x7f75c1ae49d0> \ No newline at end of file diff --git a/tests/smoke_test.py b/tests/smoke_test.py index c02577685..46d7f1ccc 100644 --- a/tests/smoke_test.py +++ b/tests/smoke_test.py @@ -2,48 +2,40 @@ import os import subprocess import re +import pytest +import shutil CURRENT_DIR = os.path.dirname(os.path.abspath(__file__)) from contextlib import redirect_stdout, redirect_stderr -import pytest from lib.db import DB from lib import utils from lib.global_config import GlobalConfig -from runner import Runner +from tests import test_functions as Tests run_stderr = None run_stdout = None RUN_NAME = 'test_' + utils.randomword(12) - -# override per test cleanup, as the module setup requires writing to DB -@pytest.fixture(autouse=False) -def cleanup_after_test(): - pass - -#pylint: disable=unused-argument # unused arguement off for now - because there are no running tests in this file -def cleanup_after_module(autouse=True, scope="module"): - yield - tables = DB().fetch_all("SELECT table_name FROM information_schema.tables WHERE table_schema = 'public'") - for table in tables: - table_name = table[0] - DB().query(f'TRUNCATE TABLE "{table_name}" RESTART IDENTITY CASCADE') - # Runs once per file before any test( #pylint: disable=expression-not-assigned -def setup_module(module): +def setup_module(): + parallel_id = utils.randomword(12) + test_case_path=os.path.join(CURRENT_DIR, 'stress-application/') + tmp_dir_path=os.path.join(CURRENT_DIR, 'tmp', parallel_id) + shutil.copytree(test_case_path, tmp_dir_path) + out = io.StringIO() err = io.StringIO() GlobalConfig(config_name='test-config.yml').config with redirect_stdout(out), redirect_stderr(err): - uri = os.path.abspath(os.path.join(CURRENT_DIR, 'stress-application/')) + uri = os.path.abspath(tmp_dir_path) subprocess.run(['docker', 'compose', '-f', uri+'/compose.yml', 'build'], check=True) # Run the application - runner = Runner(name=RUN_NAME, uri=uri, uri_type='folder', dev_no_build=True, dev_no_sleeps=True, dev_no_metrics=False, skip_system_checks=False) + runner = Tests.setup_runner(name=RUN_NAME, uri=uri, uri_type='folder', dev_no_metrics=False, skip_system_checks=False, create_tmp_directory=False, parallel_id=parallel_id) runner.run() #pylint: disable=global-statement @@ -51,15 +43,18 @@ def setup_module(module): run_stderr = err.getvalue() run_stdout = out.getvalue() +@pytest.mark.xdist_group(name="systems_checks") def test_no_errors(): # Assert that there is no std.err output assert run_stderr == '' +@pytest.mark.xdist_group(name="systems_checks") def test_cleanup_success(): # Assert that Cleanup has run assert re.search( 'MEASUREMENT SUCCESSFULLY COMPLETED', run_stdout) +@pytest.mark.xdist_group(name="systems_checks") def test_db_rows_are_written_and_presented(): # for every metric provider, check that there were rows written in the DB with info for that provider # also check (in the same test, to save on a DB call) that the output to STD.OUT diff --git a/tests/stress-application/compose_gmt_run_tmp.yml b/tests/stress-application/compose_gmt_run_tmp.yml new file mode 100644 index 000000000..6cbc53596 --- /dev/null +++ b/tests/stress-application/compose_gmt_run_tmp.yml @@ -0,0 +1,7 @@ +version: '2' +services: + stress: + build: . + image: gcb_stress_gmt_run_tmps + container_name: gcb_stress + restart: always diff --git a/tests/stress-application/usage_scenario.yml b/tests/stress-application/usage_scenario.yml index 0cbe010e3..f4e898f5f 100644 --- a/tests/stress-application/usage_scenario.yml +++ b/tests/stress-application/usage_scenario.yml @@ -8,7 +8,6 @@ name: Stress Container One Core 5 Seconds author: Arne Tarara description: test -description: test networks: network-for-pytests: diff --git a/tests/test_config_opts.py b/tests/test_config_opts.py index 85e46185e..4cfd9993e 100644 --- a/tests/test_config_opts.py +++ b/tests/test_config_opts.py @@ -1,5 +1,4 @@ import os -import subprocess import pytest CURRENT_DIR = os.path.dirname(os.path.abspath(__file__)) @@ -24,11 +23,6 @@ def reset_config_fixture(): config['measurement']['idle-time-end'] = idle_time_end config['measurement']['flow-process-runtime'] = flow_process_runtime -@pytest.fixture(autouse=True, scope="module", name="build_image") -def build_image_fixture(): - uri = os.path.abspath(os.path.join( - CURRENT_DIR, 'stress-application/')) - subprocess.run(['docker', 'compose', '-f', uri+'/compose.yml', 'build'], check=True) #pylint: disable=expression-not-assigned def run_runner(): @@ -36,8 +30,8 @@ def run_runner(): CURRENT_DIR, 'stress-application/')) # Run the application - RUN_NAME = 'test_' + utils.randomword(12) - runner = Runner(name=RUN_NAME, uri=uri, uri_type='folder', verbose_provider_boot=True, dev_repeat_run=True, skip_system_checks=True) + name = 'test_' + utils.randomword(12) + runner = Runner(name=name, uri=uri, uri_type='folder', verbose_provider_boot=True, skip_system_checks=True) return runner.run() # Rethink how to do this test entirely diff --git a/tests/test_functions.py b/tests/test_functions.py index 8ce135d4b..ce5f39ebc 100644 --- a/tests/test_functions.py +++ b/tests/test_functions.py @@ -1,12 +1,14 @@ import os import re import shutil +import yaml CURRENT_DIR = os.path.dirname(os.path.abspath(__file__)) from pathlib import Path from lib.global_config import GlobalConfig +from lib.yml_helpers import Loader from lib import utils from runner import Runner @@ -17,13 +19,14 @@ def make_proj_dir(dir_name, usage_scenario_path, docker_compose_path=None): if not os.path.exists('tmp/' + dir_name): os.mkdir('tmp/' + dir_name) - shutil.copy2(usage_scenario_path, os.path.join(CURRENT_DIR, 'tmp' ,dir_name)) + dir_path = os.path.join(CURRENT_DIR, 'tmp' ,dir_name) + shutil.copy2(usage_scenario_path, dir_path) # copy over compose.yml and Dockerfile (from stress for now) if docker_compose_path is not None: shutil.copy2(docker_compose_path, os.path.join(CURRENT_DIR, 'tmp' ,dir_name)) dockerfile = os.path.join(CURRENT_DIR, 'stress-application/Dockerfile') shutil.copy2(dockerfile, os.path.join(CURRENT_DIR, 'tmp' ,dir_name)) - return dir_name + return dir_path def replace_include_in_usage_scenario(usage_scenario_path, docker_compose_filename): with open(usage_scenario_path, 'r', encoding='utf-8') as file: @@ -32,30 +35,117 @@ def replace_include_in_usage_scenario(usage_scenario_path, docker_compose_filena with open(usage_scenario_path, 'w', encoding='utf-8') as file: file.write(data) +def parallelize_runner_folders(runner, parallel_id): + runner._tmp_folder = f"/tmp/gmt_tests-{parallel_id}/green-metrics-tool/" + runner._folder = f"{runner._tmp_folder}/repo" -def setup_runner(usage_scenario, docker_compose=None, uri='default', uri_type='folder', branch=None, - debug_mode=False, allow_unsafe=False, no_file_cleanup=False, - skip_unsafe=False, verbose_provider_boot=False, dir_name=None, dev_no_build=False, skip_system_checks=True, - dev_no_sleeps=True, dev_no_metrics=True): - usage_scenario_path = os.path.join(CURRENT_DIR, 'data/usage_scenarios/', usage_scenario) - if docker_compose is not None: - docker_compose_path = os.path.join(CURRENT_DIR, 'data/docker-compose-files/', docker_compose) - else: - docker_compose_path = os.path.join(CURRENT_DIR, 'data/docker-compose-files/compose.yml') +def edit_yml_with_id(yml_path, parallel_id): + with open(yml_path, 'r', encoding='utf-8') as fp: + yml_data = yaml.load(fp, Loader=Loader) + + # Update services + services_copy = dict(yml_data.get('services', {})) + for service_name, service_info in services_copy.items(): + new_service_name = f"{service_name}-{parallel_id}" + yml_data['services'][new_service_name] = service_info + del yml_data['services'][service_name] + + # Update networks within service + service_networks = service_info.get('networks') + if service_networks: + if isinstance(service_networks, list): + service_info['networks'] = [f"{network}-{parallel_id}" for network in service_networks] + elif isinstance(service_networks, dict): + service_info['networks'] = {f"{key}-{parallel_id}": value for key, value in service_networks.items()} + + if 'container_name' in service_info: + service_info['container_name'] = f"{service_info['container_name']}-{parallel_id}" + + if 'depends_on' in service_info: + if isinstance(service_info['depends_on'], list): + service_info['depends_on'] = [f"{dep}-{parallel_id}" for dep in service_info['depends_on']] + elif isinstance(service_info['depends_on'], dict): + service_info['depends_on'] = {f"{key}-{parallel_id}": value for key, value in service_info['depends_on'].items()} + else: + service_info['depends_on'] = f"{service_info['depends_on']}-{parallel_id}" + + + # top level networks + networks = yml_data.get('networks') + if networks: + if isinstance(networks, list): + yml_data['networks'] = [f"{network}-{parallel_id}" for network in networks] + elif isinstance(networks, dict): + yml_data['networks'] = {f"{key}-{parallel_id}": value for key, value in networks.items()} + + # Update container names in the flow section + for item in yml_data.get('flow', []): + if 'container' in item: + item['container'] = f"{item['container']}-{parallel_id}" + + # Save the updated YAML file + with open(yml_path, 'w', encoding='utf-8') as fp: + yaml.dump(yml_data, fp, sort_keys=False) #sort_keys=False preserves the original order + +def parallelize_files(proj_dir, usage_scenario_file, docker_compose='compose.yml', parallel_id=None): + if parallel_id is None: + parallel_id = utils.randomword(12) + if docker_compose is None: + docker_compose = 'compose.yml' + usage_scenario_path = os.path.join(proj_dir, usage_scenario_file) + docker_compose_path = os.path.join(proj_dir, docker_compose) + + # need to do docker compose first, in case its loaded by the usage_scenario + edit_yml_with_id(docker_compose_path, parallel_id) + edit_yml_with_id(usage_scenario_path, parallel_id) - if uri == 'default': + +def setup_runner(name=None, usage_scenario="usage_scenario.yml", docker_compose=None, uri='default', + uri_type='folder', branch=None, debug_mode=False, allow_unsafe=False, no_file_cleanup=False, + skip_unsafe=False, verbose_provider_boot=False, dir_name=None, dev_no_build=True, skip_system_checks=True, + dev_no_sleeps=True, dev_no_metrics=True, parallel_id=None, create_tmp_directory=True, do_parallelize_files=True): + + if parallel_id is None: + parallel_id = utils.randomword(12) + + # parallelization of files only for uri_type folders, so far + # because url type means we are checking out a repo, and that happens already too late + if uri_type == 'folder': if dir_name is None: - dir_name = utils.randomword(12) - make_proj_dir(dir_name=dir_name, usage_scenario_path=usage_scenario_path, docker_compose_path=docker_compose_path) - uri = os.path.join(CURRENT_DIR, 'tmp/', dir_name) + dir_name = parallel_id + + if create_tmp_directory: + if docker_compose is not None: + docker_compose_path = os.path.join(CURRENT_DIR, 'data/docker-compose-files/', docker_compose) + else: + docker_compose_path = os.path.join(CURRENT_DIR, 'data/docker-compose-files/compose.yml') + usage_scenario_path = os.path.join(CURRENT_DIR, 'data/usage_scenarios/', usage_scenario) + make_proj_dir(dir_name=dir_name, usage_scenario_path=usage_scenario_path, docker_compose_path=docker_compose_path) + + tmp_dir_path = os.path.join(CURRENT_DIR, 'tmp/', dir_name) + if uri == 'default': + uri = tmp_dir_path + if do_parallelize_files: + parallelize_files(tmp_dir_path, usage_scenario, docker_compose, parallel_id) + elif uri_type == 'URL': + if uri[0:8] != 'https://' and uri[0:7] != 'http://': + raise ValueError("Invalid uri for URL") + else: + raise ValueError("Invalid uri_type") - RUN_NAME = 'test_' + utils.randomword(12) + if name is None: + name = f'test-{parallel_id}' - return Runner(name=RUN_NAME, uri=uri, uri_type=uri_type, filename=usage_scenario, branch=branch, + runner = Runner(name=name, uri=uri, uri_type=uri_type, filename=usage_scenario, branch=branch, debug_mode=debug_mode, allow_unsafe=allow_unsafe, no_file_cleanup=no_file_cleanup, skip_unsafe=skip_unsafe, verbose_provider_boot=verbose_provider_boot, dev_no_build=dev_no_build, skip_system_checks=skip_system_checks, dev_no_sleeps=dev_no_sleeps, dev_no_metrics=dev_no_metrics) + parallelize_runner_folders(runner, parallel_id) + + return runner + + # This function runs the runner up to and *including* the specified step # remember to catch in try:finally and do cleanup when calling this! #pylint: disable=redefined-argument-from-local @@ -157,7 +247,6 @@ def cleanup(runner): finally: runner.cleanup() # always run cleanup automatically after each run - def assertion_info(expected, actual): return f"Expected: {expected}, Actual: {actual}" @@ -165,3 +254,7 @@ def create_test_file(path): if not os.path.exists(path): os.mkdir(path) Path(f"{path}/test-file").touch() + +# test this file +if __name__ == '__main__': + setup_runner('import_error.yml', parallel_id=123) diff --git a/tests/test_runner.py b/tests/test_runner.py index 7ecc03a7d..9a46d5a0e 100644 --- a/tests/test_runner.py +++ b/tests/test_runner.py @@ -35,10 +35,21 @@ def test_check_system(skip_system_checks, expectation): del GlobalConfig().config['measurement']['metric-providers']['common']['psu.energy.ac.foo.machine.provider.SomeProvider'] del GlobalConfig().config['measurement']['metric-providers']['common']['psu.energy.ac.bar.machine.provider.SomeOtherProvider'] +@pytest.mark.xdist_group(name="systems_checks") def test_reporters_still_running(): - runner = Tests.setup_runner(usage_scenario='basic_stress.yml', skip_unsafe=True, skip_system_checks=False, dev_no_sleeps=True, dev_no_build=True, dev_no_metrics=False) + if GlobalConfig().config['measurement']['metric-providers']['linux'] is None: + GlobalConfig().config['measurement']['metric-providers']['linux'] = {} - runner2 = Tests.setup_runner(usage_scenario='basic_stress.yml', skip_unsafe=True, skip_system_checks=False, dev_no_sleeps=True, dev_no_build=True, dev_no_metrics=False) + real_provider = { + 'cpu.utilization.procfs.system.provider.CpuUtilizationProcfsSystemProvider': { + 'resolution': 99 + } + } + GlobalConfig().config['measurement']['metric-providers']['linux'].update(real_provider) + + runner = Tests.setup_runner(usage_scenario='basic_stress.yml', skip_unsafe=True, skip_system_checks=False, dev_no_metrics=False) + + runner2 = Tests.setup_runner(usage_scenario='basic_stress.yml', skip_unsafe=True, skip_system_checks=False, dev_no_metrics=False) runner.check_system('start') # should not fail @@ -54,3 +65,4 @@ def test_reporters_still_running(): finally: Tests.cleanup(runner) Tests.cleanup(runner2) + del GlobalConfig().config['measurement']['metric-providers']['linux']['cpu.utilization.procfs.system.provider.CpuUtilizationProcfsSystemProvider'] diff --git a/tests/test_usage_scenario.py b/tests/test_usage_scenario.py index cacdfa9fe..d81edae57 100644 --- a/tests/test_usage_scenario.py +++ b/tests/test_usage_scenario.py @@ -5,7 +5,6 @@ import io import os import re -import shutil import subprocess CURRENT_DIR = os.path.dirname(os.path.abspath(__file__)) @@ -28,23 +27,6 @@ # Always do asserts after try:finally: blocks # otherwise failing Tests will not run the runner.cleanup() properly -# This should be done once per module -@pytest.fixture(autouse=True, scope="module", name="build_image") -def build_image_fixture(): - uri = os.path.abspath(os.path.join(CURRENT_DIR, 'stress-application/')) - subprocess.run(['docker', 'compose', '-f', uri+'/compose.yml', 'build'], check=True) - GlobalConfig().override_config(config_name='test-config.yml') - -# cleanup test/tmp directory after every test run -@pytest.fixture(autouse=True, name="cleanup_tmp_directories") -def cleanup_tmp_directories_fixture(): - yield - tmp_dir = os.path.join(CURRENT_DIR, 'tmp/') - if os.path.exists(tmp_dir): - shutil.rmtree(tmp_dir) - if os.path.exists('/tmp/gmt-test-data'): - shutil.rmtree('/tmp/gmt-test-data') - # This function runs the runner up to and *including* the specified step #pylint: disable=redefined-argument-from-local ### The Tests for usage_scenario configurations @@ -52,12 +34,12 @@ def cleanup_tmp_directories_fixture(): # environment: [object] (optional) # Key-Value pairs for ENV variables inside the container -def get_env_vars(runner): +def get_env_vars(runner, parallel_id): try: Tests.run_until(runner, 'setup_services') ps = subprocess.run( - ['docker', 'exec', 'test-container', '/bin/sh', + ['docker', 'exec', f"test-container-{parallel_id}", '/bin/sh', '-c', 'env'], check=True, stderr=subprocess.PIPE, @@ -71,8 +53,9 @@ def get_env_vars(runner): # Test allowed characters def test_env_variable_allowed_characters(): - runner = Tests.setup_runner(usage_scenario='env_vars_stress_allowed.yml', skip_unsafe=False, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) - env_var_output = get_env_vars(runner) + parallel_id = utils.randomword(12) + runner = Tests.setup_runner(usage_scenario='env_vars_stress_allowed.yml', skip_unsafe=False, parallel_id=parallel_id) + env_var_output = get_env_vars(runner, parallel_id) assert 'TESTALLOWED=alpha-num123_' in env_var_output, Tests.assertion_info('TESTALLOWED=alpha-num123_', env_var_output) assert 'TEST1_ALLOWED=alpha-key-num123_' in env_var_output, Tests.assertion_info('TEST1_ALLOWED=alpha-key-num123_', env_var_output) @@ -81,16 +64,18 @@ def test_env_variable_allowed_characters(): # Test too long values def test_env_variable_too_long(): - runner = Tests.setup_runner(usage_scenario='env_vars_stress_forbidden.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + parallel_id = utils.randomword(12) + runner = Tests.setup_runner(usage_scenario='env_vars_stress_forbidden.yml', parallel_id=parallel_id) with pytest.raises(RuntimeError) as e: - get_env_vars(runner) + get_env_vars(runner, parallel_id) assert 'TEST_TOO_LONG' in str(e.value), Tests.assertion_info("Env var value is too long", str(e.value)) # Test skip_unsafe=true def test_env_variable_skip_unsafe_true(): - runner = Tests.setup_runner(usage_scenario='env_vars_stress_forbidden.yml', skip_unsafe=True, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) - env_var_output = get_env_vars(runner) + parallel_id = utils.randomword(12) + runner = Tests.setup_runner(usage_scenario='env_vars_stress_forbidden.yml', skip_unsafe=True, parallel_id=parallel_id) + env_var_output = get_env_vars(runner, parallel_id) # Only allowed values should be in env vars, forbidden ones should be skipped assert 'TEST_ALLOWED' in env_var_output, Tests.assertion_info('TEST_ALLOWED in env vars', env_var_output) @@ -98,8 +83,9 @@ def test_env_variable_skip_unsafe_true(): # Test allow_unsafe=true def test_env_variable_allow_unsafe_true(): - runner = Tests.setup_runner(usage_scenario='env_vars_stress_forbidden.yml', allow_unsafe=True, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) - env_var_output = get_env_vars(runner) + parallel_id = utils.randomword(12) + runner = Tests.setup_runner(usage_scenario='env_vars_stress_forbidden.yml', allow_unsafe=True, parallel_id=parallel_id) + env_var_output = get_env_vars(runner, parallel_id) # Both allowed and forbidden values should be in env vars assert 'TEST_ALLOWED' in env_var_output, Tests.assertion_info('TEST_ALLOWED in env vars', env_var_output) @@ -108,11 +94,11 @@ def test_env_variable_allow_unsafe_true(): # ports: [int:int] (optional) # Docker container portmapping on host OS to be used with --allow-unsafe flag. -def get_port_bindings(runner): +def get_port_bindings(runner, parallel_id): try: Tests.run_until(runner, 'setup_services') ps = subprocess.run( - ['docker', 'port', 'test-container', '9018'], + ['docker', 'port', f"test-container-{parallel_id}", '9018'], check=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE, @@ -125,20 +111,22 @@ def get_port_bindings(runner): return port, err def test_port_bindings_allow_unsafe_true(): - runner = Tests.setup_runner(usage_scenario='port_bindings_stress.yml', allow_unsafe=True, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) - port, _ = get_port_bindings(runner) + parallel_id = utils.randomword(12) + runner = Tests.setup_runner(usage_scenario='port_bindings_stress.yml', allow_unsafe=True, parallel_id=parallel_id) + port, _ = get_port_bindings(runner, parallel_id) assert port.startswith('0.0.0.0:9017'), Tests.assertion_info('0.0.0.0:9017', port) def test_port_bindings_skip_unsafe_true(): out = io.StringIO() err = io.StringIO() - runner = Tests.setup_runner(usage_scenario='port_bindings_stress.yml', skip_unsafe=True, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + parallel_id = utils.randomword(12) + runner = Tests.setup_runner(usage_scenario='port_bindings_stress.yml', skip_unsafe=True, parallel_id=parallel_id) # need to catch exception here as otherwise the subprocess returning an error will # fail the test with redirect_stdout(out), redirect_stderr(err), pytest.raises(Exception): - _, docker_port_err = get_port_bindings(runner) - expected_container_error = 'Error: No public port \'9018/tcp\' published for test-container\n' + _, docker_port_err = get_port_bindings(runner, parallel_id) + expected_container_error = f"Error: No public port \'9018/tcp\' published for test-container-{parallel_id}\n" assert docker_port_err == expected_container_error, \ Tests.assertion_info(f"Container Error: {expected_container_error}", docker_port_err) expected_warning = 'Found ports entry but not running in unsafe mode. Skipping' @@ -146,10 +134,11 @@ def test_port_bindings_skip_unsafe_true(): Tests.assertion_info(f"Warning: {expected_warning}", 'no/different warning') def test_port_bindings_no_skip_or_allow(): - runner = Tests.setup_runner(usage_scenario='port_bindings_stress.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + parallel_id = utils.randomword(12) + runner = Tests.setup_runner(usage_scenario='port_bindings_stress.yml', parallel_id=parallel_id) with pytest.raises(Exception) as e: - _, docker_port_err = get_port_bindings(runner) - expected_container_error = 'Error: No public port \'9018/tcp\' published for test-container\n' + _, docker_port_err = get_port_bindings(runner, parallel_id) + expected_container_error = f"Error: No public port \'9018/tcp\' published for test-container-{parallel_id}\n" assert docker_port_err == expected_container_error, \ Tests.assertion_info(f"Container Error: {expected_container_error}", docker_port_err) expected_error = 'Found "ports" but neither --skip-unsafe nor --allow-unsafe is set' @@ -162,14 +151,15 @@ def test_port_bindings_no_skip_or_allow(): def test_setup_commands_one_command(): out = io.StringIO() err = io.StringIO() - runner = Tests.setup_runner(usage_scenario='setup_commands_stress.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + parallel_id = utils.randomword(12) + runner = Tests.setup_runner(usage_scenario='setup_commands_stress.yml', parallel_id=parallel_id) with redirect_stdout(out), redirect_stderr(err): try: Tests.run_until(runner, 'setup_services') finally: runner.cleanup() - assert 'Running command: docker exec test-container sh -c ps -a' in out.getvalue(), \ + assert f"Running command: docker exec test-container-{parallel_id} sh -c ps -a" in out.getvalue(), \ Tests.assertion_info('stdout message: Running command: docker exec ps -a', out.getvalue()) assert '1 root 0:00 /bin/sh' in out.getvalue(), \ Tests.assertion_info('container stdout showing /bin/sh as process 1', 'different message in container stdout') @@ -177,7 +167,8 @@ def test_setup_commands_one_command(): def test_setup_commands_multiple_commands(): out = io.StringIO() err = io.StringIO() - runner = Tests.setup_runner(usage_scenario='setup_commands_multiple_stress.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + parallel_id = utils.randomword(12) + runner = Tests.setup_runner(usage_scenario='setup_commands_multiple_stress.yml', parallel_id=parallel_id) with redirect_stdout(out), redirect_stderr(err): try: @@ -185,17 +176,17 @@ def test_setup_commands_multiple_commands(): finally: runner.cleanup() - expected_pattern = re.compile(r'Running command: docker exec test-container echo hello world.*\ + expected_pattern = re.compile(fr"Running command: docker exec test-container-{parallel_id} echo hello world.*\ \s*Stdout: hello world.*\ \s*Stderr:.*\ -\s*Running command: docker exec test-container ps -a.*\ +\s*Running command: docker exec test-container-{parallel_id} ps -a.*\ \s*Stdout:\s+PID\s+USER\s+TIME\s+COMMAND.*\ \s*1\s+root\s+\d:\d\d\s+/bin/sh.*\ \s*1\d+\s+root\s+\d:\d\d\s+ps -a.*\ \s*Stderr:.*\ -\s*Running command: docker exec test-container echo goodbye world.*\ +\s*Running command: docker exec test-container-{parallel_id} echo goodbye world.*\ \s*Stdout: goodbye world.*\ -', re.MULTILINE) +", re.MULTILINE) assert re.search(expected_pattern, out.getvalue()), \ Tests.assertion_info('container stdout showing 3 commands run in sequence',\ @@ -206,11 +197,11 @@ def create_test_file(path): os.mkdir(path) Path(f"{path}/test-file").touch() -def get_contents_of_bound_volume(runner): +def get_contents_of_bound_volume(runner, parallel_id): try: Tests.run_until(runner, 'setup_services') ps = subprocess.run( - ['docker', 'exec', 'test-container', 'ls', '/tmp/test-data'], + ['docker', 'exec', f"test-container-{parallel_id}", 'ls', '/tmp/test-data'], check=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE, @@ -237,7 +228,8 @@ def assert_order(text, first, second): def test_depends_on_order(): out = io.StringIO() err = io.StringIO() - runner = Tests.setup_runner(usage_scenario='depends_on.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + parallel_id = utils.randomword(12) + runner = Tests.setup_runner(usage_scenario='depends_on.yml', parallel_id=parallel_id) with redirect_stdout(out), redirect_stderr(err): try: @@ -246,15 +238,16 @@ def test_depends_on_order(): runner.cleanup() # Expected order: test-container-2, test-container-4, test-container-3, test-container-1 - assert_order(out.getvalue(), 'test-container-2', 'test-container-4') - assert_order(out.getvalue(), 'test-container-4', 'test-container-3') - assert_order(out.getvalue(), 'test-container-3', 'test-container-1') + assert_order(out.getvalue(), f"test-container-2-{parallel_id}", f"test-container-4-{parallel_id}") + assert_order(out.getvalue(), f"test-container-4-{parallel_id}", f"test-container-3-{parallel_id}") + assert_order(out.getvalue(), f"test-container-3-{parallel_id}", f"test-container-1-{parallel_id}") def test_depends_on_huge(): out = io.StringIO() err = io.StringIO() - runner = Tests.setup_runner(usage_scenario='depends_on_huge.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + parallel_id = utils.randomword(12) + runner = Tests.setup_runner(usage_scenario='depends_on_huge.yml', parallel_id=parallel_id) with redirect_stdout(out), redirect_stderr(err): try: @@ -263,105 +256,109 @@ def test_depends_on_huge(): runner.cleanup() # For test-container-20 - assert_order(out.getvalue(), 'test-container-16', 'test-container-20') - assert_order(out.getvalue(), 'test-container-15', 'test-container-20') + assert_order(out.getvalue(), f"test-container-16-{parallel_id}", f"test-container-20-{parallel_id}") + assert_order(out.getvalue(), f"test-container-15-{parallel_id}", f"test-container-20-{parallel_id}") # For test-container-19 - assert_order(out.getvalue(), 'test-container-14', 'test-container-19') - assert_order(out.getvalue(), 'test-container-13', 'test-container-19') + assert_order(out.getvalue(), f"test-container-14-{parallel_id}", f"test-container-19-{parallel_id}") + assert_order(out.getvalue(), f"test-container-13-{parallel_id}", f"test-container-19-{parallel_id}") # For test-container-18 - assert_order(out.getvalue(), 'test-container-12', 'test-container-18') - assert_order(out.getvalue(), 'test-container-11', 'test-container-18') + assert_order(out.getvalue(), f"test-container-12-{parallel_id}", f"test-container-18-{parallel_id}") + assert_order(out.getvalue(), f"test-container-11-{parallel_id}", f"test-container-18-{parallel_id}") # For test-container-17 - assert_order(out.getvalue(), 'test-container-10', 'test-container-17') - assert_order(out.getvalue(), 'test-container-9', 'test-container-17') + assert_order(out.getvalue(), f"test-container-10-{parallel_id}", f"test-container-17-{parallel_id}") + assert_order(out.getvalue(), f"test-container-9-{parallel_id}", f"test-container-17-{parallel_id}") # For test-container-16 - assert_order(out.getvalue(), 'test-container-8', 'test-container-16') - assert_order(out.getvalue(), 'test-container-7', 'test-container-16') + assert_order(out.getvalue(), f"test-container-8-{parallel_id}", f"test-container-16-{parallel_id}") + assert_order(out.getvalue(), f"test-container-7-{parallel_id}", f"test-container-16-{parallel_id}") # For test-container-15 - assert_order(out.getvalue(), 'test-container-6', 'test-container-15') - assert_order(out.getvalue(), 'test-container-5', 'test-container-15') + assert_order(out.getvalue(), f"test-container-6-{parallel_id}", f"test-container-15-{parallel_id}") + assert_order(out.getvalue(), f"test-container-5-{parallel_id}", f"test-container-15-{parallel_id}") # For test-container-14 - assert_order(out.getvalue(), 'test-container-4', 'test-container-14') + assert_order(out.getvalue(), f"test-container-4-{parallel_id}", f"test-container-14-{parallel_id}") # For test-container-13 - assert_order(out.getvalue(), 'test-container-3', 'test-container-13') + assert_order(out.getvalue(), f"test-container-3-{parallel_id}", f"test-container-13-{parallel_id}") # For test-container-12 - assert_order(out.getvalue(), 'test-container-2', 'test-container-12') + assert_order(out.getvalue(), f"test-container-2-{parallel_id}", f"test-container-12-{parallel_id}") # For test-container-11 - assert_order(out.getvalue(), 'test-container-1', 'test-container-11') + assert_order(out.getvalue(), f"test-container-1-{parallel_id}", f"test-container-11-{parallel_id}") # For test-container-10 - assert_order(out.getvalue(), 'test-container-4', 'test-container-10') + assert_order(out.getvalue(), f"test-container-4-{parallel_id}", f"test-container-10-{parallel_id}") # For test-container-9 - assert_order(out.getvalue(), 'test-container-3', 'test-container-9') + assert_order(out.getvalue(), f"test-container-3-{parallel_id}", f"test-container-9-{parallel_id}") # For test-container-8 - assert_order(out.getvalue(), 'test-container-2', 'test-container-8') + assert_order(out.getvalue(), f"test-container-2-{parallel_id}", f"test-container-8-{parallel_id}") # For test-container-7 - assert_order(out.getvalue(), 'test-container-1', 'test-container-7') + assert_order(out.getvalue(), f"test-container-1-{parallel_id}", f"test-container-7-{parallel_id}") # For test-container-6 - assert_order(out.getvalue(), 'test-container-4', 'test-container-6') + assert_order(out.getvalue(), f"test-container-4-{parallel_id}", f"test-container-6-{parallel_id}") # For test-container-5 - assert_order(out.getvalue(), 'test-container-3', 'test-container-5') + assert_order(out.getvalue(), f"test-container-3-{parallel_id}", f"test-container-5-{parallel_id}") # For test-container-4 - assert_order(out.getvalue(), 'test-container-2', 'test-container-4') + assert_order(out.getvalue(), f"test-container-2-{parallel_id}", f"test-container-4-{parallel_id}") # For test-container-3 - assert_order(out.getvalue(), 'test-container-1', 'test-container-3') + assert_order(out.getvalue(), f"test-container-1-{parallel_id}", f"test-container-3-{parallel_id}") # For test-container-2 - assert_order(out.getvalue(), 'test-container-1', 'test-container-2') + assert_order(out.getvalue(), f"test-container-1-{parallel_id}", f"test-container-2-{parallel_id}") def test_depends_on_error_not_running(): - runner = Tests.setup_runner(usage_scenario='depends_on_error_not_running.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + parallel_id = utils.randomword(12) + runner = Tests.setup_runner(usage_scenario='depends_on_error_not_running.yml', parallel_id=parallel_id) try: with pytest.raises(RuntimeError) as e: Tests.run_until(runner, 'setup_services') finally: runner.cleanup() - assert "Dependent container 'test-container-2' of 'test-container-1' is not running" in str(e.value) , \ - Tests.assertion_info('test-container-2 is not running', str(e.value)) + assert f"Dependent container 'test-container-2-{parallel_id}' of 'test-container-1-{parallel_id}' is not running" in str(e.value) , \ + Tests.assertion_info(f"test-container-2-{parallel_id} is not running", str(e.value)) def test_depends_on_error_cyclic_dependency(): - runner = Tests.setup_runner(usage_scenario='depends_on_error_cycle.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + parallel_id=utils.randomword(12) + runner = Tests.setup_runner(usage_scenario='depends_on_error_cycle.yml', parallel_id=parallel_id) try: with pytest.raises(RuntimeError) as e: Tests.run_until(runner, 'setup_services') finally: runner.cleanup() - - assert "Cycle found in depends_on definition with service 'test-container-1'" in str(e.value) , \ - Tests.assertion_info('cycle in depends_on with test-container-1', str(e.value)) + container_name=f"test-container-1-{parallel_id}" + assert f"Cycle found in depends_on definition with service '{container_name}'" in str(e.value) , \ + Tests.assertion_info(f"cycle in depends_on with {container_name}", str(e.value)) def test_depends_on_error_unsupported_condition(): - runner = Tests.setup_runner(usage_scenario='depends_on_error_unsupported_condition.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + parallel_id = utils.randomword(12) + runner = Tests.setup_runner(usage_scenario='depends_on_error_unsupported_condition.yml', parallel_id=parallel_id) try: with pytest.raises(RuntimeError) as e: Tests.run_until(runner, 'setup_services') finally: runner.cleanup() - message = 'Unsupported condition in healthcheck for service \'test-container-1\': service_completed_successfully' + container_name=f"test-container-1-{parallel_id}" + message = f"Unsupported condition in healthcheck for service \'{container_name}\': service_completed_successfully" assert message in str(e.value) , \ Tests.assertion_info(message, str(e.value)) def test_depends_on_long_form(): - runner = Tests.setup_runner(usage_scenario='depends_on_long_form.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + runner = Tests.setup_runner(usage_scenario='depends_on_long_form.yml') out = io.StringIO() err = io.StringIO() @@ -375,23 +372,25 @@ def test_depends_on_long_form(): runner.cleanup() def test_depends_on_healthcheck(): - runner = Tests.setup_runner(usage_scenario='healthcheck.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + parallel_id = utils.randomword(12) + runner = Tests.setup_runner(usage_scenario='healthcheck.yml', parallel_id=parallel_id) out = io.StringIO() err = io.StringIO() try: with redirect_stdout(out), redirect_stderr(err): runner.run() - message = 'Health of container \'test-container-2\': starting' + message = f"Health of container \'test-container-2-{parallel_id}\': starting" assert message in out.getvalue(), Tests.assertion_info(message, out.getvalue()) - message2 = 'Health of container \'test-container-2\': healthy' + message2 = f"Health of container \'test-container-2-{parallel_id}\': healthy" assert message2 in out.getvalue(), Tests.assertion_info(message, out.getvalue()) finally: runner.cleanup() def test_depends_on_healthcheck_error_missing(): - runner = Tests.setup_runner(usage_scenario='healthcheck_error_missing.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + parallel_id = utils.randomword(12) + runner = Tests.setup_runner(usage_scenario='healthcheck_error_missing.yml', parallel_id=parallel_id) try: with pytest.raises(RuntimeError) as e: @@ -399,43 +398,48 @@ def test_depends_on_healthcheck_error_missing(): finally: runner.cleanup() - expected_exception = "Health check for dependent_container 'test-container-2' was requested, but container has no healthcheck implemented!" + expected_exception = f"Health check for dependent_container 'test-container-2-{parallel_id}' was requested, but container has no healthcheck implemented!" assert str(e.value).startswith(expected_exception),\ Tests.assertion_info(f"Exception: {expected_exception}", str(e.value)) #volumes: [array] (optional) #Array of volumes to be mapped. Only read of runner.py is executed with --allow-unsafe flag def test_volume_bindings_allow_unsafe_true(): - create_test_file('/tmp/gmt-test-data') - runner = Tests.setup_runner(usage_scenario='volume_bindings_stress.yml', allow_unsafe=True, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) - ls = get_contents_of_bound_volume(runner) + parallel_id = utils.randomword(12) + create_test_file("/tmp/gmt-test-data") + runner = Tests.setup_runner(usage_scenario='volume_bindings_stress.yml', allow_unsafe=True, parallel_id=parallel_id) + ls = get_contents_of_bound_volume(runner, parallel_id) assert 'test-file' in ls, Tests.assertion_info('test-file', ls) def test_volumes_bindings_skip_unsafe_true(): - create_test_file('/tmp/gmt-test-data') + parallel_id = utils.randomword(12) + create_test_file("/tmp/gmt-test-data") out = io.StringIO() err = io.StringIO() - runner = Tests.setup_runner(usage_scenario='volume_bindings_stress.yml', skip_unsafe=True, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + parallel_id = utils.randomword(12) + runner = Tests.setup_runner(usage_scenario='volume_bindings_stress.yml', skip_unsafe=True, parallel_id=parallel_id) with redirect_stdout(out), redirect_stderr(err), pytest.raises(Exception): - ls = get_contents_of_bound_volume(runner) + ls = get_contents_of_bound_volume(runner, parallel_id) assert ls == '', Tests.assertion_info('empty list', ls) expected_warning = '' # expecting no warning for safe volumes assert expected_warning in out.getvalue(), \ Tests.assertion_info(f"Warning: {expected_warning}", 'no/different warning') def test_volumes_bindings_no_skip_or_allow(): - create_test_file('/tmp/gmt-test-data') - runner = Tests.setup_runner(usage_scenario='volume_bindings_stress.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + parallel_id = utils.randomword(12) + create_test_file("/tmp/gmt-test-data") + parallel_id = utils.randomword(12) + runner = Tests.setup_runner(usage_scenario='volume_bindings_stress.yml', parallel_id=parallel_id) with pytest.raises(RuntimeError) as e: - ls = get_contents_of_bound_volume(runner) + ls = get_contents_of_bound_volume(runner, parallel_id) assert ls == '', Tests.assertion_info('empty list', ls) expected_exception = '' # Expecting no error for safe volumes assert expected_exception in str(e.value) ,\ Tests.assertion_info(f"Exception: {expected_exception}", str(e.value)) def test_network_created(): - runner = Tests.setup_runner(usage_scenario='network_stress.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + runner = Tests.setup_runner(usage_scenario='network_stress.yml') try: Tests.run_until(runner, 'setup_networks') ps = subprocess.run( @@ -451,11 +455,12 @@ def test_network_created(): assert 'gmt-test-network' in ls, Tests.assertion_info('gmt-test-network', ls) def test_container_is_in_network(): - runner = Tests.setup_runner(usage_scenario='network_stress.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + parallel_id = utils.randomword(12) + runner = Tests.setup_runner(usage_scenario='network_stress.yml', parallel_id=parallel_id) try: Tests.run_until(runner, 'setup_services') ps = subprocess.run( - ['docker', 'network', 'inspect', 'gmt-test-network'], + ['docker', 'network', 'inspect', f"gmt-test-network-{parallel_id}"], check=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE, @@ -464,18 +469,19 @@ def test_container_is_in_network(): inspect = ps.stdout finally: Tests.cleanup(runner) - assert 'test-container' in inspect, Tests.assertion_info('test-container', inspect) + assert f"test-container-{parallel_id}" in inspect, Tests.assertion_info(f"test-container-{parallel_id}", inspect) # command: [str] (optional) # Command to be executed when container is started. # When container does not have a daemon running typically a shell # is started here to have the container running like bash or sh def test_cmd_ran(): - runner = Tests.setup_runner(usage_scenario='cmd_stress.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + parallel_id = utils.randomword(12) + runner = Tests.setup_runner(usage_scenario='cmd_stress.yml', parallel_id=parallel_id) try: Tests.run_until(runner, 'setup_services') ps = subprocess.run( - ['docker', 'exec', 'test-container', 'ps', '-a'], + ['docker', 'exec', f"test-container-{parallel_id}", 'ps', '-a'], check=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE, @@ -492,9 +498,9 @@ def test_cmd_ran(): # / or a remote git repository starting with http(s):// def test_uri_local_dir(): uri = os.path.abspath(os.path.join(CURRENT_DIR, 'stress-application/')) - RUN_NAME = 'test_' + utils.randomword(12) + name = 'test_' + utils.randomword(12) ps = subprocess.run( - ['python3', '../runner.py', '--name', RUN_NAME, '--uri', uri ,'--config-override', 'test-config.yml', + ['python3', '../runner.py', '--name', name, '--uri', uri ,'--config-override', 'test-config.yml', '--skip-system-checks', '--dev-no-sleeps', '--dev-no-build', '--dev-no-metrics'], check=True, stderr=subprocess.PIPE, @@ -502,12 +508,12 @@ def test_uri_local_dir(): encoding='UTF-8' ) - uri_in_db = utils.get_run_data(RUN_NAME)['uri'] + uri_in_db = utils.get_run_data(name)['uri'] assert uri_in_db == uri, Tests.assertion_info(f"uri: {uri}", uri_in_db) assert ps.stderr == '', Tests.assertion_info('no errors', ps.stderr) def test_uri_local_dir_missing(): - runner = Tests.setup_runner(usage_scenario='basic_stress.yml', uri='/tmp/missing', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + runner = Tests.setup_runner(usage_scenario='basic_stress.yml', uri='/tmp/missing') try: with pytest.raises(FileNotFoundError) as e: runner.run() @@ -518,11 +524,12 @@ def test_uri_local_dir_missing(): Tests.assertion_info(f"Exception: {expected_exception}", str(e.value)) # basic positive case +@pytest.mark.serial def test_uri_github_repo(): uri = 'https://github.com/green-coding-berlin/pytest-dummy-repo' - RUN_NAME = 'test_' + utils.randomword(12) + name = 'test_' + utils.randomword(12) ps = subprocess.run( - ['python3', '../runner.py', '--name', RUN_NAME, '--uri', uri ,'--config-override', 'test-config.yml', + ['python3', '../runner.py', '--name', name, '--uri', uri ,'--config-override', 'test-config.yml', '--skip-system-checks', '--dev-no-sleeps', '--dev-no-build', '--dev-no-metrics'], check=True, stderr=subprocess.PIPE, @@ -530,14 +537,14 @@ def test_uri_github_repo(): encoding='UTF-8' ) - uri_in_db = utils.get_run_data(RUN_NAME)['uri'] + uri_in_db = utils.get_run_data(name)['uri'] assert uri_in_db == uri, Tests.assertion_info(f"uri: {uri}", uri_in_db) assert ps.stderr == '', Tests.assertion_info('no errors', ps.stderr) ## --branch BRANCH # Optionally specify the git branch when targeting a git repository def test_uri_local_branch(): - runner = Tests.setup_runner(usage_scenario='basic_stress.yml', branch='test-branch', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + runner = Tests.setup_runner(usage_scenario='basic_stress.yml', branch='test-branch') out = io.StringIO() err = io.StringIO() with redirect_stdout(out), redirect_stderr(err), pytest.raises(RuntimeError) as e: @@ -549,11 +556,12 @@ def test_uri_local_branch(): # basic positive case, branch prepped ahead of time # this branch has a different usage_scenario file name - basic_stress # that makes sure that it really is pulling a different branch +@pytest.mark.serial def test_uri_github_repo_branch(): uri = 'https://github.com/green-coding-berlin/pytest-dummy-repo' - RUN_NAME = 'test_' + utils.randomword(12) + name = 'test_' + utils.randomword(12) ps = subprocess.run( - ['python3', '../runner.py', '--name', RUN_NAME, '--uri', uri , + ['python3', '../runner.py', '--name', name, '--uri', uri , '--branch', 'test-branch' , '--filename', 'basic_stress.yml', '--config-override', 'test-config.yml', '--skip-system-checks', '--dev-no-sleeps', '--dev-no-build', '--dev-no-metrics'], check=True, @@ -562,7 +570,7 @@ def test_uri_github_repo_branch(): encoding='UTF-8' ) - branch_in_db = utils.get_run_data(RUN_NAME)['branch'] + branch_in_db = utils.get_run_data(name)['branch'] assert branch_in_db == 'test-branch', Tests.assertion_info('branch: test-branch', branch_in_db) assert ps.stderr == '', Tests.assertion_info('no errors', ps.stderr) @@ -570,6 +578,7 @@ def test_uri_github_repo_branch(): # give incorrect branch name ## Is the expected_exception OK or should it have a more graceful error? ## ATM this is just the default console error of a failed git command +@pytest.mark.serial def test_uri_github_repo_branch_missing(): runner = Tests.setup_runner(usage_scenario='basic_stress.yml', uri='https://github.com/green-coding-berlin/pytest-dummy-repo', @@ -587,33 +596,35 @@ def test_uri_github_repo_branch_missing(): # # --name NAME # # A name which will be stored to the database to discern this run from others +@pytest.mark.serial def test_name_is_in_db(): uri = os.path.abspath(os.path.join(CURRENT_DIR, 'stress-application/')) - RUN_NAME = 'test_' + utils.randomword(12) + name = 'test_' + utils.randomword(12) subprocess.run( - ['python3', '../runner.py', '--name', RUN_NAME, '--uri', uri ,'--config-override', 'test-config.yml', + ['python3', '../runner.py', '--name', name, '--uri', uri ,'--config-override', 'test-config.yml', '--skip-system-checks', '--dev-no-metrics', '--dev-no-sleeps', '--dev-no-build'], check=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE, encoding='UTF-8' ) - name_in_db = utils.get_run_data(RUN_NAME)['name'] - assert name_in_db == RUN_NAME, Tests.assertion_info(f"name: {RUN_NAME}", name_in_db) + name_in_db = utils.get_run_data(name)['name'] + assert name_in_db == name, Tests.assertion_info(f"name: {name}", name_in_db) # --filename FILENAME # An optional alternative filename if you do not want to use "usage_scenario.yml" # basic positive case +@pytest.mark.serial def test_different_filename(): usage_scenario_path = os.path.join(CURRENT_DIR, 'data/usage_scenarios/', 'basic_stress.yml') dir_name = utils.randomword(12) compose_path = os.path.abspath(os.path.join(CURRENT_DIR, 'stress-application/compose.yml')) Tests.make_proj_dir(dir_name=dir_name, usage_scenario_path=usage_scenario_path, docker_compose_path=compose_path) uri = os.path.join(CURRENT_DIR, 'tmp/', dir_name) - RUN_NAME = 'test_' + utils.randomword(12) - + name = 'test_' + dir_name + print(name) ps = subprocess.run( - ['python3', '../runner.py', '--name', RUN_NAME, '--uri', uri , + ['python3', '../runner.py', '--name', name, '--uri', uri , '--filename', 'basic_stress.yml', '--config-override', 'test-config.yml', '--skip-system-checks', '--dev-no-sleeps', '--dev-no-build', '--dev-no-metrics'], check=True, @@ -622,9 +633,10 @@ def test_different_filename(): encoding='UTF-8' ) + print(ps.stdout) with open(usage_scenario_path, 'r', encoding='utf-8') as f: usage_scenario_contents = yaml.safe_load(f) - usage_scenario_in_db = utils.get_run_data(RUN_NAME)['usage_scenario'] + usage_scenario_in_db = utils.get_run_data(name)['usage_scenario'] assert usage_scenario_in_db == usage_scenario_contents,\ Tests.assertion_info(usage_scenario_contents, usage_scenario_in_db) assert ps.stderr == '', Tests.assertion_info('no errors', ps.stderr) @@ -632,9 +644,9 @@ def test_different_filename(): # if that filename is missing... def test_different_filename_missing(): uri = os.path.abspath(os.path.join(CURRENT_DIR, '..', 'stress-application/')) - RUN_NAME = 'test_' + utils.randomword(12) + name = 'test_' + utils.randomword(12) - runner = Runner(name=RUN_NAME, uri=uri, uri_type='folder', filename='basic_stress.yml', skip_system_checks=True, dev_no_build=True, dev_no_sleeps=True, dev_no_metrics=True) + runner = Runner(name=name, uri=uri, uri_type='folder', filename='basic_stress.yml', skip_system_checks=True, dev_no_build=True, dev_no_sleeps=True, dev_no_metrics=True) with pytest.raises(FileNotFoundError) as e: runner.run() @@ -644,11 +656,12 @@ def test_different_filename_missing(): # --no-file-cleanup # Do not delete files in /tmp/green-metrics-tool +@pytest.mark.serial def test_no_file_cleanup(): uri = os.path.abspath(os.path.join(CURRENT_DIR, 'stress-application/')) - RUN_NAME = 'test_' + utils.randomword(12) + name = 'test_' + utils.randomword(12) subprocess.run( - ['python3', '../runner.py', '--name', RUN_NAME, '--uri', uri , + ['python3', '../runner.py', '--name', name, '--uri', uri , '--no-file-cleanup', '--config-override', 'test-config.yml', '--skip-system-checks', '--dev-no-sleeps', '--dev-no-build', '--dev-no-metrics'], check=True, stderr=subprocess.PIPE, @@ -661,16 +674,17 @@ def test_no_file_cleanup(): #pylint: disable=unused-variable def test_skip_and_allow_unsafe_both_true(): with pytest.raises(RuntimeError) as e: - runner = Tests.setup_runner(usage_scenario='basic_stress.yml', skip_unsafe=True, allow_unsafe=True, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + runner = Tests.setup_runner(usage_scenario='basic_stress.yml', skip_unsafe=True, allow_unsafe=True) expected_exception = 'Cannot specify both --skip-unsafe and --allow-unsafe' assert str(e.value) == expected_exception, Tests.assertion_info('', str(e.value)) +@pytest.mark.serial def test_debug(monkeypatch): monkeypatch.setattr('sys.stdin', io.StringIO('Enter')) uri = os.path.abspath(os.path.join(CURRENT_DIR, 'stress-application/')) - RUN_NAME = 'test_' + utils.randomword(12) + name = 'test_' + utils.randomword(12) ps = subprocess.run( - ['python3', '../runner.py', '--name', RUN_NAME, '--uri', uri , + ['python3', '../runner.py', '--name', name, '--uri', uri , '--debug', '--config-override', 'test-config.yml', '--skip-system-checks', '--dev-no-sleeps', '--dev-no-build', '--dev-no-metrics'], check=True, @@ -687,7 +701,7 @@ def test_debug(monkeypatch): # can check for this note in the DB and the notes are about 2s apart def test_read_detached_process_no_exit(): - runner = Tests.setup_runner(usage_scenario='stress_detached_no_exit.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + runner = Tests.setup_runner(usage_scenario='stress_detached_no_exit.yml') out = io.StringIO() err = io.StringIO() with redirect_stdout(out), redirect_stderr(err): @@ -701,7 +715,7 @@ def test_read_detached_process_no_exit(): Tests.assertion_info('NOT successful run completed', out.getvalue()) def test_read_detached_process_after_exit(): - runner = Tests.setup_runner(usage_scenario='stress_detached_exit.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + runner = Tests.setup_runner(usage_scenario='stress_detached_exit.yml') out = io.StringIO() err = io.StringIO() with redirect_stdout(out), redirect_stderr(err): @@ -713,7 +727,7 @@ def test_read_detached_process_after_exit(): Tests.assertion_info('successful run completed', out.getvalue()) def test_read_detached_process_failure(): - runner = Tests.setup_runner(usage_scenario='stress_detached_failure.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + runner = Tests.setup_runner(usage_scenario='stress_detached_failure.yml') out = io.StringIO() err = io.StringIO() @@ -722,6 +736,7 @@ def test_read_detached_process_failure(): runner.run() finally: runner.cleanup() + print (out.getvalue()) assert '\'g4jiorejf\']\' had bad returncode: 126' in str(e.value), \ Tests.assertion_info('\'g4jiorejf\']\' had bad returncode: 126', str(e.value)) @@ -729,9 +744,9 @@ def test_read_detached_process_failure(): ## rethink this one def wip_test_verbose_provider_boot(): uri = os.path.abspath(os.path.join(CURRENT_DIR, 'stress-application/')) - RUN_NAME = 'test_' + utils.randomword(12) + name = 'test_' + utils.randomword(12) ps = subprocess.run( - ['python3', '../runner.py', '--name', RUN_NAME, '--uri', uri , + ['python3', '../runner.py', '--name', name, '--uri', uri , '--verbose-provider-boot', '--config-override', 'test-config.yml', '--dev-no-sleeps', '--dev-no-build', '--dev-no-metrics'], check=True, @@ -739,7 +754,7 @@ def wip_test_verbose_provider_boot(): stdout=subprocess.PIPE, encoding='UTF-8' ) - run_id = utils.get_run_data(RUN_NAME)['id'] + run_id = utils.get_run_data(name)['id'] query = """ SELECT time, note diff --git a/tests/test_volume_loading.py b/tests/test_volume_loading.py index b90b1dca4..8998fa5ad 100644 --- a/tests/test_volume_loading.py +++ b/tests/test_volume_loading.py @@ -10,20 +10,11 @@ import pytest from tests import test_functions as Tests - from lib import utils from lib.global_config import GlobalConfig -from runner import Runner GlobalConfig().override_config(config_name='test-config.yml') -@pytest.fixture(autouse=True, name="cleanup_tmp_directories") -def cleanup_tmp_directories_fixture(): - yield - tmp_dir = os.path.join(CURRENT_DIR, 'tmp/') - if os.path.exists(tmp_dir): - shutil.rmtree(tmp_dir) - def check_if_container_running(container_name): ps = subprocess.run( ['docker', 'container', 'inspect', '-f', '{{.State.Running}}', container_name], @@ -37,35 +28,35 @@ def check_if_container_running(container_name): return True def test_volume_load_no_escape(): - tmp_dir_name = utils.randomword(12) - tmp_dir = os.path.join(CURRENT_DIR, 'tmp', tmp_dir_name, 'basic_stress_w_import.yml') - runner = Tests.setup_runner(usage_scenario='basic_stress_w_import.yml', docker_compose='volume_load_etc_passwords.yml', dir_name=tmp_dir_name, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=False) - Tests.replace_include_in_usage_scenario(tmp_dir, 'volume_load_etc_passwords.yml') + parallel_id = utils.randomword(12) + + usage_scenario_file="basic_stress_w_import.yml" + usage_scenario_path = os.path.join(CURRENT_DIR, 'data/usage_scenarios/', usage_scenario_file) + docker_compose_file="volume_load_etc_passwords.yml" + docker_compose_path=os.path.join(CURRENT_DIR, 'data/docker-compose-files/', docker_compose_file) + + Tests.make_proj_dir(dir_name=parallel_id, usage_scenario_path=usage_scenario_path, docker_compose_path=docker_compose_path) + + tmp_usage_scenario = os.path.join(CURRENT_DIR, 'tmp', parallel_id, usage_scenario_file) + Tests.replace_include_in_usage_scenario(tmp_usage_scenario, 'volume_load_etc_passwords.yml') + + runner = Tests.setup_runner( usage_scenario=usage_scenario_file, docker_compose=docker_compose_file, + parallel_id=parallel_id, create_tmp_directory=False) try: with pytest.raises(RuntimeError) as e: Tests.run_until(runner, 'setup_services') finally: - container_running = check_if_container_running('test-container') + container_running = check_if_container_running(f"test-container-{parallel_id}") runner.cleanup() - expected_error = 'Service \'test-container\' volume path (/etc/passwd) is outside allowed folder:' + container_name = f'test-container-{parallel_id}' + expected_error = f'Service \'{container_name}\' volume path (/etc/passwd) is outside allowed folder:' assert str(e.value).startswith(expected_error), Tests.assertion_info(expected_error, str(e.value)) - assert container_running is False, Tests.assertion_info('test-container stopped', 'test-container was still running!') - -def create_tmp_dir(): - tmp_dir_name = utils.randomword(12) - if not os.path.exists(os.path.join(CURRENT_DIR, 'tmp/')): - os.mkdir(os.path.join(CURRENT_DIR, 'tmp/')) - os.mkdir('tmp/' + tmp_dir_name) - tmp_dir = os.path.join(CURRENT_DIR, f'tmp/{tmp_dir_name}') - return tmp_dir, tmp_dir_name - -def copy_compose_and_edit_directory(compose_file, tmp_dir): - tmp_compose_file = os.path.join(tmp_dir, 'docker-compose.yml') - shutil.copyfile( - os.path.join(CURRENT_DIR, f'data/docker-compose-files/{compose_file}'), - tmp_compose_file) + assert container_running is False, Tests.assertion_info(f'{container_name} stopped', f'{container_name} was still running!') + +def edit_compose_file(compose_file, tmp_dir): + tmp_compose_file = os.path.join(tmp_dir, compose_file) #regex replace CURRENT_DIR in docker-compose.yml with temp proj directory where test-file exists with open(tmp_compose_file, 'r', encoding='utf-8') as file: @@ -75,21 +66,27 @@ def copy_compose_and_edit_directory(compose_file, tmp_dir): file.write(data) def test_load_files_from_within_gmt(): - tmp_dir, tmp_dir_name = create_tmp_dir() - Tests.create_test_file(tmp_dir) + parallel_id = utils.randomword(12) + + usage_scenario_file="basic_stress_w_import.yml" + usage_scenario_path = os.path.join(CURRENT_DIR, 'data/usage_scenarios/', usage_scenario_file) + docker_compose_file="volume_load_within_proj.yml" + docker_compose_path=os.path.join(CURRENT_DIR, 'data/docker-compose-files/', docker_compose_file) - # copy compose file over so that we can edit it safely - copy_compose_and_edit_directory('volume_load_within_proj.yml', tmp_dir) + dir_path = Tests.make_proj_dir(dir_name=parallel_id, usage_scenario_path=usage_scenario_path, docker_compose_path=docker_compose_path) + tmp_usage_scenario = os.path.join(CURRENT_DIR, 'tmp', parallel_id, usage_scenario_file) + Tests.replace_include_in_usage_scenario(tmp_usage_scenario, docker_compose_file) + edit_compose_file(docker_compose_file, dir_path) + Tests.create_test_file(dir_path) - # setup runner and run test - runner = Tests.setup_runner(usage_scenario='basic_stress_w_import.yml', dir_name=tmp_dir_name, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=False) - Tests.replace_include_in_usage_scenario(os.path.join(tmp_dir, 'basic_stress_w_import.yml'), 'docker-compose.yml') + runner = Tests.setup_runner(usage_scenario=usage_scenario_file, docker_compose=docker_compose_file, + parallel_id=parallel_id, create_tmp_directory=False) try: Tests.run_until(runner, 'setup_services') # check that the volume was loaded ps = subprocess.run( - ['docker', 'exec', 'test-container', '/bin/sh', + ['docker', 'exec', f"test-container-{parallel_id}", '/bin/sh', '-c', 'test -f /tmp/test-file && echo "File mounted"'], stderr=subprocess.PIPE, stdout=subprocess.PIPE, @@ -103,58 +100,87 @@ def test_load_files_from_within_gmt(): assert "File mounted" in out, Tests.assertion_info('/tmp/test-file mounted', f"out: {out} | err: {err}") def test_symlinks_should_fail(): - tmp_dir, tmp_dir_name = create_tmp_dir() - # make a symlink to /etc/passwords in tmp_dir - symlink = os.path.join(tmp_dir, 'symlink') - os.symlink('/etc/passwd', os.path.join(tmp_dir, 'symlink')) + parallel_id = utils.randomword(12) + + usage_scenario_file="basic_stress_w_import.yml" + usage_scenario_path = os.path.join(CURRENT_DIR, 'data/usage_scenarios/', usage_scenario_file) + docker_compose_file="volume_load_symlinks_negative.yml" + docker_compose_path=os.path.join(CURRENT_DIR, 'data/docker-compose-files/', docker_compose_file) - copy_compose_and_edit_directory('volume_load_symlinks_negative.yml', tmp_dir) + dir_path = Tests.make_proj_dir(dir_name=parallel_id, usage_scenario_path=usage_scenario_path, docker_compose_path=docker_compose_path) + tmp_usage_scenario = os.path.join(CURRENT_DIR, 'tmp', parallel_id, usage_scenario_file) + Tests.replace_include_in_usage_scenario(tmp_usage_scenario, docker_compose_file) + edit_compose_file(docker_compose_file, dir_path) - runner = Tests.setup_runner(usage_scenario='basic_stress_w_import.yml', dir_name=tmp_dir_name, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=False) - Tests.replace_include_in_usage_scenario(os.path.join(tmp_dir, 'basic_stress_w_import.yml'), 'docker-compose.yml') + # make a symlink to /etc/passwords in tmp_dir + symlink = os.path.join(dir_path, 'symlink') + os.symlink('/etc/passwd', os.path.join(dir_path, 'symlink')) + + runner = Tests.setup_runner( usage_scenario=usage_scenario_file, docker_compose=docker_compose_file, + parallel_id=parallel_id, create_tmp_directory=False) + container_name = f'test-container-{parallel_id}' try: with pytest.raises(RuntimeError) as e: Tests.run_until(runner, 'setup_services') finally: - container_running = check_if_container_running('test-container') + container_running = check_if_container_running(container_name) runner.cleanup() - expected_error = f"Service 'test-container' volume path ({symlink}) is outside allowed folder:" + expected_error = f"Service '{container_name}' volume path ({symlink}) is outside allowed folder:" assert str(e.value).startswith(expected_error), Tests.assertion_info(expected_error, str(e.value)) - assert container_running is False, Tests.assertion_info('test-container stopped', 'test-container was still running!') + assert container_running is False, Tests.assertion_info(f"{container_name} stopped", f"{container_name} was still running!") def test_non_bind_mounts_should_fail(): - tmp_dir_name = create_tmp_dir()[1] - tmp_dir_usage = os.path.join(CURRENT_DIR, 'tmp', tmp_dir_name, 'basic_stress_w_import.yml') - runner = Tests.setup_runner(usage_scenario='basic_stress_w_import.yml', docker_compose='volume_load_non_bind_mounts.yml', dir_name=tmp_dir_name, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=False) - Tests.replace_include_in_usage_scenario(tmp_dir_usage, 'volume_load_non_bind_mounts.yml') + parallel_id = utils.randomword(12) + + usage_scenario_file="basic_stress_w_import.yml" + usage_scenario_path = os.path.join(CURRENT_DIR, 'data/usage_scenarios/', usage_scenario_file) + docker_compose_file="volume_load_non_bind_mounts.yml" + docker_compose_path=os.path.join(CURRENT_DIR, 'data/docker-compose-files/', docker_compose_file) + + Tests.make_proj_dir(dir_name=parallel_id, usage_scenario_path=usage_scenario_path, docker_compose_path=docker_compose_path) + tmp_usage_scenario = os.path.join(CURRENT_DIR, 'tmp', parallel_id, usage_scenario_file) + Tests.replace_include_in_usage_scenario(tmp_usage_scenario, docker_compose_file) + + runner = Tests.setup_runner(usage_scenario=usage_scenario_file, docker_compose=docker_compose_file, + parallel_id=parallel_id, create_tmp_directory=False) + container_name=f'test-container-{parallel_id}' try: with pytest.raises(RuntimeError) as e: Tests.run_until(runner, 'setup_services') finally: - container_running = check_if_container_running('test-container') + container_running = check_if_container_running(container_name) runner.cleanup() expected_error = 'volume path does not exist' assert expected_error in str(e.value), Tests.assertion_info(expected_error, str(e.value)) - assert container_running is False, Tests.assertion_info('test-container stopped', 'test-container was still running!') + assert container_running is False, Tests.assertion_info(f"{container_name} stopped", f"{container_name} was still running!") def test_load_volume_references(): - tmp_dir, tmp_dir_name = create_tmp_dir() - Tests.create_test_file(tmp_dir) + parallel_id = utils.randomword(12) - copy_compose_and_edit_directory('volume_load_references.yml', tmp_dir) + usage_scenario_file="basic_stress_w_import.yml" + usage_scenario_path = os.path.join(CURRENT_DIR, 'data/usage_scenarios/', usage_scenario_file) + docker_compose_file="volume_load_references.yml" + docker_compose_path=os.path.join(CURRENT_DIR, 'data/docker-compose-files/', docker_compose_file) - runner = Tests.setup_runner(usage_scenario='basic_stress_w_import.yml', dir_name=tmp_dir_name, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=False) - Tests.replace_include_in_usage_scenario(os.path.join(tmp_dir, 'basic_stress_w_import.yml'), 'docker-compose.yml') + dir_path = Tests.make_proj_dir(dir_name=parallel_id, usage_scenario_path=usage_scenario_path, docker_compose_path=docker_compose_path) + tmp_usage_scenario = os.path.join(CURRENT_DIR, 'tmp', parallel_id, usage_scenario_file) + Tests.replace_include_in_usage_scenario(tmp_usage_scenario, docker_compose_file) + edit_compose_file(docker_compose_file, dir_path) + + Tests.create_test_file(dir_path) + runner = Tests.setup_runner( + usage_scenario=usage_scenario_file, docker_compose=docker_compose_file, dir_name=parallel_id, + parallel_id=parallel_id, create_tmp_directory=False) try: Tests.run_until(runner, 'setup_services') # check that the volume was loaded ps = subprocess.run( - ['docker', 'exec', 'test-container-2', '/bin/sh', + ['docker', 'exec', f"test-container-2-{parallel_id}", '/bin/sh', '-c', 'test -f /tmp/test-file && echo "File mounted"'], stderr=subprocess.PIPE, stdout=subprocess.PIPE, @@ -167,10 +193,28 @@ def test_load_volume_references(): Tests.cleanup(runner) assert "File mounted" in out, Tests.assertion_info('/tmp/test-file mounted', f"out: {out} | err: {err}") +def prepare_subdir_tmp_directory(parallel_id): + test_case_path=os.path.join(CURRENT_DIR, 'data/test_cases/subdir_volume_loading') + tmp_dir_path=os.path.join(CURRENT_DIR, 'tmp', parallel_id) + shutil.copytree(test_case_path, tmp_dir_path) + + usage_scenario_path=os.path.join(tmp_dir_path, 'usage_scenario.yml') + compose_yaml_path=os.path.join(tmp_dir_path, 'compose.yaml') + subdir_usage_scenario_path=os.path.join(tmp_dir_path, 'subdir/', 'usage_scenario_subdir.yml') + subdir2_usage_scenario_path=os.path.join(tmp_dir_path, 'subdir/subdir2', 'usage_scenario_subdir2.yml') + + Tests.edit_yml_with_id(usage_scenario_path, parallel_id) + Tests.edit_yml_with_id(compose_yaml_path, parallel_id) + Tests.edit_yml_with_id(subdir_usage_scenario_path, parallel_id) + Tests.edit_yml_with_id(subdir2_usage_scenario_path, parallel_id) + + return tmp_dir_path + +@pytest.mark.serial def test_volume_loading_subdirectories_root(): - uri = os.path.join(CURRENT_DIR, 'data/test_cases/subdir_volume_loading') - RUN_NAME = 'test_' + utils.randomword(12) - runner = Runner(name=RUN_NAME, uri=uri, uri_type='folder', skip_system_checks=True, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=False) + parallel_id = utils.randomword(12) + prepare_subdir_tmp_directory(parallel_id) + runner = Tests.setup_runner(do_parallelize_files=False, parallel_id=parallel_id, create_tmp_directory=False) out = io.StringIO() err = io.StringIO() @@ -180,25 +224,26 @@ def test_volume_loading_subdirectories_root(): run_stdout = out.getvalue() assert run_stderr == '', Tests.assertion_info('stderr empty', f"stderr: {run_stderr}") - expect_content_testfile_root = "stdout from process: ['docker', 'exec', 'test-container-root', 'grep', 'testfile-root-content', '/tmp/testfile-root'] testfile-root-content" + expect_content_testfile_root = f"stdout from process: ['docker', 'exec', 'test-container-root-{parallel_id}', 'grep', 'testfile-root-content', '/tmp/testfile-root'] testfile-root-content" assert expect_content_testfile_root in run_stdout, Tests.assertion_info(expect_content_testfile_root, f"expected output not in {run_stdout}") - expect_extra_testfile_root = "stdout from process: ['docker', 'exec', 'test-container-root', 'grep', 'testfile-root-content', '/tmp/testfile-root-extra-copied'] testfile-root-content" + expect_extra_testfile_root = f"stdout from process: ['docker', 'exec', 'test-container-root-{parallel_id}', 'grep', 'testfile-root-content', '/tmp/testfile-root-extra-copied'] testfile-root-content" assert expect_extra_testfile_root in run_stdout, Tests.assertion_info(expect_extra_testfile_root, f"expected output not in {run_stdout}") - expect_mounted_testfile = "stdout from process: ['docker', 'exec', 'test-container', 'grep', 'testfile-content', '/tmp/testfile-correctly-mounted'] testfile-content" + expect_mounted_testfile = f"stdout from process: ['docker', 'exec', 'test-container-{parallel_id}', 'grep', 'testfile-content', '/tmp/testfile-correctly-mounted'] testfile-content" assert expect_mounted_testfile in run_stdout, Tests.assertion_info(expect_mounted_testfile, f"expected output not in {run_stdout}") - expect_mounted_testfile_2 = "stdout from process: ['docker', 'exec', 'test-container', 'grep', 'testfile2-content', '/tmp/testfile2-correctly-mounted'] testfile2-content" + expect_mounted_testfile_2 = f"stdout from process: ['docker', 'exec', 'test-container-{parallel_id}', 'grep', 'testfile2-content', '/tmp/testfile2-correctly-mounted'] testfile2-content" assert expect_mounted_testfile_2 in run_stdout, Tests.assertion_info(expect_mounted_testfile_2, f"expected output not in {run_stdout}") - expect_mounted_testfile_3 = "stdout from process: ['docker', 'exec', 'test-container-root', 'grep', 'testfile3-content', '/tmp/testfile3-correctly-copied'] testfile3-content" + expect_mounted_testfile_3 = f"stdout from process: ['docker', 'exec', 'test-container-root-{parallel_id}', 'grep', 'testfile3-content', '/tmp/testfile3-correctly-copied'] testfile3-content" assert expect_mounted_testfile_3 in run_stdout, Tests.assertion_info(expect_mounted_testfile_3, f"expected output not in {run_stdout}") def test_volume_loading_subdirectories_subdir(): - uri = os.path.join(CURRENT_DIR, 'data/test_cases/subdir_volume_loading') - RUN_NAME = 'test_' + utils.randomword(12) - runner = Runner(name=RUN_NAME, uri=uri, uri_type='folder', filename="subdir/usage_scenario_subdir.yml", skip_system_checks=True, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=False) + parallel_id = utils.randomword(12) + prepare_subdir_tmp_directory(parallel_id) + runner = Tests.setup_runner(usage_scenario='subdir/usage_scenario_subdir.yml', + do_parallelize_files=False, parallel_id=parallel_id, create_tmp_directory=False) out = io.StringIO() err = io.StringIO() @@ -208,16 +253,17 @@ def test_volume_loading_subdirectories_subdir(): run_stdout = out.getvalue() assert run_stderr == '', Tests.assertion_info('stderr empty', f"stderr: {run_stderr}") - expect_mounted_testfile_2 = "stdout from process: ['docker', 'exec', 'test-container', 'grep', 'testfile2-content', '/tmp/testfile2-correctly-mounted'] testfile2-content" + expect_mounted_testfile_2 = f"stdout from process: ['docker', 'exec', 'test-container-{parallel_id}', 'grep', 'testfile2-content', '/tmp/testfile2-correctly-mounted'] testfile2-content" assert expect_mounted_testfile_2 in run_stdout, Tests.assertion_info(expect_mounted_testfile_2, f"expected output not in {run_stdout}") - expect_mounted_testfile_3 = "stdout from process: ['docker', 'exec', 'test-container', 'grep', 'testfile3-content', '/tmp/testfile3-correctly-mounted'] testfile3-content" + expect_mounted_testfile_3 = f"stdout from process: ['docker', 'exec', 'test-container-{parallel_id}', 'grep', 'testfile3-content', '/tmp/testfile3-correctly-mounted'] testfile3-content" assert expect_mounted_testfile_3 in run_stdout, Tests.assertion_info(expect_mounted_testfile_3, f"expected output not in {run_stdout}") def test_volume_loading_subdirectories_subdir2(): - uri = os.path.join(CURRENT_DIR, 'data/test_cases/subdir_volume_loading') - RUN_NAME = 'test_' + utils.randomword(12) - runner = Runner(name=RUN_NAME, uri=uri, uri_type='folder', filename="subdir/subdir2/usage_scenario_subdir2.yml", skip_system_checks=True, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=False) + parallel_id = utils.randomword(12) + prepare_subdir_tmp_directory(parallel_id) + runner = Tests.setup_runner(usage_scenario='subdir/subdir2/usage_scenario_subdir2.yml', + do_parallelize_files=False, parallel_id=parallel_id, create_tmp_directory=False) out = io.StringIO() err = io.StringIO() @@ -227,14 +273,14 @@ def test_volume_loading_subdirectories_subdir2(): run_stdout = out.getvalue() assert run_stderr == '', Tests.assertion_info('stderr empty', f"stderr: {run_stderr}") - expect_mounted_testfile_2 = "stdout from process: ['docker', 'exec', 'test-container', 'grep', 'testfile2-content', '/tmp/testfile2-correctly-mounted'] testfile2-content" + expect_mounted_testfile_2 = f"stdout from process: ['docker', 'exec', 'test-container-{parallel_id}', 'grep', 'testfile2-content', '/tmp/testfile2-correctly-mounted'] testfile2-content" assert expect_mounted_testfile_2 in run_stdout, Tests.assertion_info(expect_mounted_testfile_2, "expected output not in {run_stdout}") - expect_copied_testfile_2 = "stdout from process: ['docker', 'exec', 'test-container', 'grep', 'testfile2-content', '/tmp/testfile2-correctly-copied'] testfile2-content" + expect_copied_testfile_2 = f"stdout from process: ['docker', 'exec', 'test-container-{parallel_id}', 'grep', 'testfile2-content', '/tmp/testfile2-correctly-copied'] testfile2-content" assert expect_copied_testfile_2 in run_stdout, Tests.assertion_info(expect_copied_testfile_2, f"expected output not in {run_stdout}") - expect_copied_testfile_3 = "stdout from process: ['docker', 'exec', 'test-container', 'grep', 'testfile3-content', '/tmp/testfile3-correctly-copied'] testfile3-content" + expect_copied_testfile_3 = f"stdout from process: ['docker', 'exec', 'test-container-{parallel_id}', 'grep', 'testfile3-content', '/tmp/testfile3-correctly-copied'] testfile3-content" assert expect_copied_testfile_3 in run_stdout, Tests.assertion_info(expect_copied_testfile_3, f"expected output not in {run_stdout}") - expect_copied_testfile_4 = "stdout from process: ['docker', 'exec', 'test-container', 'grep', 'testfile4-content', '/tmp/testfile4-correctly-copied'] testfile4-content" + expect_copied_testfile_4 = f"stdout from process: ['docker', 'exec', 'test-container-{parallel_id}', 'grep', 'testfile4-content', '/tmp/testfile4-correctly-copied'] testfile4-content" assert expect_copied_testfile_4 in run_stdout, Tests.assertion_info(expect_copied_testfile_4, f"expected output not in {run_stdout}") diff --git a/tests/test_yml_parsing.py b/tests/test_yml_parsing.py index f72378c2c..127e7229e 100644 --- a/tests/test_yml_parsing.py +++ b/tests/test_yml_parsing.py @@ -1,5 +1,6 @@ import os import unittest +import pytest CURRENT_DIR = os.path.dirname(os.path.abspath(__file__)) @@ -11,6 +12,7 @@ class TestYML(unittest.TestCase): + @pytest.mark.serial # the editing of the yml files makes the result_obj too different, this can be fixed later def test_includes(self): test_dir = os.path.join(CURRENT_DIR, 'data/usage_scenarios/') test_root_file = 'import_one_root.yml' @@ -27,6 +29,7 @@ def test_includes(self): self.assertEqual(result_obj, runner._usage_scenario) + @pytest.mark.serial # the editing of the yml files makes the result_obj too different, this can be fixed later def test_(self): test_dir = os.path.join(CURRENT_DIR, 'data/usage_scenarios/') test_root_file = 'import_two_root.yml' @@ -48,7 +51,7 @@ def test_(self): print(f"expect: {result_obj}") self.assertEqual(result_obj, runner._usage_scenario) - + @pytest.mark.serial #the parallization uses the yml loader, so this test will always fail before the assert def test_invalid_path(self): name = 'test_' + utils.randomword(12) test_dir = os.path.join(CURRENT_DIR, 'data/usage_scenarios/') diff --git a/tests/tools/test_jobs.py b/tests/tools/test_jobs.py index 555e37450..a2b72eac3 100644 --- a/tests/tools/test_jobs.py +++ b/tests/tools/test_jobs.py @@ -21,12 +21,6 @@ def register_machine_fixture(): machine = Machine(machine_id=1, description='test-machine') machine.register() - -# This should be done once per module -@pytest.fixture(autouse=True, scope="module", name="build_image") -def build_image_fixture(): - subprocess.run(['docker', 'compose', '-f', f"{CURRENT_DIR}/../stress-application/compose.yml", 'build'], check=True) - def get_job(job_id): query = """ SELECT @@ -41,6 +35,7 @@ def get_job(job_id): return data +@pytest.mark.serial def test_no_run_job(): ps = subprocess.run( ['python3', '../tools/jobs.py', 'run', '--config-override', 'test-config.yml'], @@ -53,6 +48,7 @@ def test_no_run_job(): assert 'No job to process. Exiting' in ps.stdout,\ Tests.assertion_info('No job to process. Exiting', ps.stdout) +@pytest.mark.serial def test_no_email_job(): ps = subprocess.run( ['python3', '../tools/jobs.py', 'email', '--config-override', 'test-config.yml'], @@ -64,15 +60,19 @@ def test_no_email_job(): assert 'No job to process. Exiting' in ps.stdout,\ Tests.assertion_info('No job to process. Exiting', ps.stdout) +@pytest.mark.serial def test_insert_job(): job_id = Job.insert('Test Name', 'Test URL', 'Test Email', 'Test Branch', 'Test filename', 1) assert job_id is not None job = Job.get_job('run') assert job._state == 'WAITING' + ## cleanup + DB().query('TRUNCATE TABLE jobs RESTART IDENTITY CASCADE') +@pytest.mark.serial def test_simple_run_job(): name = utils.randomword(12) - url = 'https://github.com/green-coding-berlin/pytest-dummy-repo' + url = 'https://github.com/green-coding-solutions/pytest-dummy-repo' filename = 'usage_scenario.yml' Job.insert(name, url, 'Test Email', 'main', filename, 1) @@ -94,6 +94,7 @@ def test_simple_run_job(): #pylint: disable=unused-variable # for the time being, until I get the mocking to work ## This test doesn't really make sense anymore as is, since we don't have "email jobs" in the same way, ## more that we send an email after a run job is finished. +@pytest.mark.serial def todo_test_simple_email_job(): name = utils.randomword(12) url = 'https://github.com/green-coding-berlin/pytest-dummy-repo'