diff --git a/.github/actions/gmt-pytest/action.yml b/.github/actions/gmt-pytest/action.yml index abf5b36a1..638499c3a 100644 --- a/.github/actions/gmt-pytest/action.yml +++ b/.github/actions/gmt-pytest/action.yml @@ -1,10 +1,6 @@ name: 'GMT Pytest' description: 'Run Pytest with setup and teardown' inputs: - metrics-to-turn-off: - description: 'a list of metrics to turn off that is passed to the disable metrics script' - required: false - default: '' gmt-directory: description: 'The root directory of the gmt repository' required: false @@ -25,82 +21,33 @@ runs: with: python-version: '3.10' - - id: python_cache - uses: actions/cache@v3 - with: - path: venv - key: pip-${{ steps.setup_python.outputs.python-version }}-${{ hashFiles('requirements.txt') }}-${{ hashFiles('requirements-dev.txt') }}-${{ hashFiles('metric_providers/psu/energy/ac/xgboost/machine/model/requirements.txt') }} + # - id: python_cache + # uses: actions/cache@v3 + # with: + # path: venv + # key: pip-${{ steps.setup_python.outputs.python-version }}-${{ hashFiles('requirements.txt') }}-${{ hashFiles('requirements-dev.txt') }}-${{ hashFiles('metric_providers/psu/energy/ac/xgboost/machine/model/requirements.txt') }} - - name: install script and packages - shell: bash - working-directory: ${{ inputs.gmt-directory }} - run: | - ./install_linux.sh -p testpw -a http://api.green-coding.internal:9142 -m http://metrics.green-coding.internal:9142 -n -t - source venv/bin/activate - python3 -m pip install -r requirements-dev.txt - python3 -m pip install -r metric_providers/psu/energy/ac/xgboost/machine/model/requirements.txt - - - name: disable unneeded metric providers and run test setup script + - name: Install docker for MacOS + if: runner.os == 'macOS' shell: bash - working-directory: ${{ inputs.gmt-directory }} run: | - source venv/bin/activate - python3 disable_metric_providers.py ${{ inputs.metrics-to-turn-off }} - cd tests && python3 setup-test-env.py --no-docker-build + bash -x ./install_mac.sh -p testpw -a http://api.green-coding.internal:9142 -m http://metrics.green-coding.internal:9142 + source venv/bin/activate && pip install -r requirements-dev.txt - - name: Set up Docker Buildx - id: buildx - uses: docker/setup-buildx-action@master - - - name: Login to Docker Registry - uses: docker/login-action@v2 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ inputs.github-token }} - - - name: Build docker-compose - id: build-docker-compose - shell: bash - working-directory: ${{ inputs.gmt-directory }}/docker - run: | - { DOCKER_OUTPUT=$(docker buildx bake --file test-compose.yml --file docker-compose-cache.json 2>&1); DOCKER_EXIT_CODE=$?; } || true - if [ "$DOCKER_EXIT_CODE" -ne 0 ]; then - echo "Docker build failed with exit code $DOCKER_EXIT_CODE" - echo "buildx output:" - echo $DOCKER_OUTPUT - if echo "$DOCKER_OUTPUT" | grep -q "403 Forbidden"; then - echo "Docker build failed due to permissions issue. Continuing..." - else - exit 1 - fi - fi - - name: Start Test container + - name: Build test image and start test containers shell: bash - working-directory: ${{ inputs.gmt-directory }}/tests + working-directory: ${{ inputs.gmt-directory }} run: | + source ../venv/bin/activate && python3 setup-test-env.py source ../venv/bin/activate && ./start-test-containers.sh -d - - - name: Sleep for 10 seconds - run: sleep 10s - shell: bash - - # - name: Setup upterm session - # uses: lhotari/action-upterm@v1 - name: Run Tests + continue-on-error: true shell: bash - working-directory: ${{ inputs.gmt-directory }}/tests - run: | - source ../venv/bin/activate - python3 -m ${{ inputs.tests-command }} -rA | tee /tmp/test-results.txt - - - name: Display Results - shell: bash - if: always() + working-directory: ${{ inputs.gmt-directory }} run: | - cat /tmp/test-results.txt | grep -oPz '(=*) short test summary(.*\n)*' >> $GITHUB_STEP_SUMMARY + source ../venv/bin/activate && python3 -m pytest -rA - name: Stop Containers shell: bash diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml deleted file mode 100644 index 38a577c01..000000000 --- a/.github/workflows/codeql.yml +++ /dev/null @@ -1,79 +0,0 @@ -# For most projects, this workflow file will not need changing; you simply need -# to commit it to your repository. -# -# You may wish to alter this file to override the set of languages analyzed, -# or to provide custom queries or build logic. -# -# ******** NOTE ******** -# We have attempted to detect the languages in your repository. Please check -# the `language` matrix defined below to confirm you have the correct set of -# supported CodeQL languages. -# -name: "CodeQL" - -on: - push: - branches: [ "main" ] - pull_request: - branches: [ "main" ] - -jobs: - analyze: - name: Analyze - # Runner size impacts CodeQL analysis time. To learn more, please see: - # - https://gh.io/recommended-hardware-resources-for-running-codeql - # - https://gh.io/supported-runners-and-hardware-resources - # - https://gh.io/using-larger-runners - # Consider using larger runners for possible analysis time improvements. - runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-latest' }} - timeout-minutes: ${{ (matrix.language == 'swift' && 120) || 360 }} - permissions: - actions: read - contents: read - security-events: write - - strategy: - fail-fast: false - matrix: - language: [ 'javascript-typescript', 'python' ] - # CodeQL supports [ 'c-cpp', 'csharp', 'go', 'java-kotlin', 'javascript-typescript', 'python', 'ruby', 'swift' ] - # Use only 'java-kotlin' to analyze code written in Java, Kotlin or both - # Use only 'javascript-typescript' to analyze code written in JavaScript, TypeScript or both - # Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support - - steps: - - name: Checkout repository - uses: actions/checkout@v4 - - # Initializes the CodeQL tools for scanning. - - name: Initialize CodeQL - uses: github/codeql-action/init@v3 - with: - languages: ${{ matrix.language }} - # If you wish to specify custom queries, you can do so here or in a config file. - # By default, queries listed here will override any specified in a config file. - # Prefix the list here with "+" to use these queries and those in the config file. - - # For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs - queries: security-extended - - - # Autobuild attempts to build any compiled languages (C/C++, C#, Go, Java, or Swift). - # If this step fails, then you should remove it and run the build manually (see below) - - name: Autobuild - uses: github/codeql-action/autobuild@v3 - - # â„šī¸ Command-line programs to run using the OS shell. - # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun - - # If the Autobuild fails above, remove it and uncomment the following three lines. - # modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance. - - # - run: | - # echo "Run, Build Application using script" - # ./location_of_script_within_repo/buildscript.sh - - - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v3 - with: - category: "/language:${{matrix.language}}" diff --git a/.github/workflows/docker-test.yml b/.github/workflows/docker-test.yml new file mode 100644 index 000000000..9a2a46382 --- /dev/null +++ b/.github/workflows/docker-test.yml @@ -0,0 +1,24 @@ +name: Install Docker on macOS + +on: [push] # or any other event + +jobs: + install-docker: + runs-on: macos-13 + + steps: + - name: 'Checkout repository' + uses: actions/checkout@v4 + with: + ref: ${{ github.ref }} + submodules: 'true' + + - name: Setup Docker on macOS + uses: douglascamata/setup-docker-macos-action@v1-alpha + with: + colima-network-address: 'true' + + - name: 'Setup, Run, and Teardown Tests' + uses: ./.github/actions/gmt-pytest + with: + github-token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/tests-vm-pr.yml b/.github/workflows/tests-vm-pr.yml index 4c710c210..da88df7a5 100644 --- a/.github/workflows/tests-vm-pr.yml +++ b/.github/workflows/tests-vm-pr.yml @@ -25,7 +25,6 @@ jobs: - name: 'Setup, Run, and Teardown Tests' uses: ./.github/actions/gmt-pytest with: - metrics-to-turn-off: '--categories RAPL Machine Sensors Debug CGroupV2 MacOS GPU --providers PsuEnergyAcSdiaMachineProvider' github-token: ${{ secrets.GITHUB_TOKEN }} - name: Eco CI Energy Estimation - Get Measurement diff --git a/config.yml.example b/config.yml.example index aa9704375..9b0d1ca67 100644 --- a/config.yml.example +++ b/config.yml.example @@ -123,8 +123,6 @@ measurement: #--- Architecture - MacOS macos: #--- MacOS: On Mac you only need this provider. Please remove all others! - powermetrics.provider.PowermetricsProvider: - resolution: 99 cpu.utilization.mach.system.provider.CpuUtilizationMachSystemProvider: resolution: 99 #--- Architecture - Common @@ -137,16 +135,16 @@ measurement: #-- This is a default configuration. Please change this to your system! # CPUChips: 1 # TDP: 65 -# psu.energy.ac.xgboost.machine.provider.PsuEnergyAcXgboostMachineProvider: -# resolution: 99 + psu.energy.ac.xgboost.machine.provider.PsuEnergyAcXgboostMachineProvider: + resolution: 99 #-- This is a default configuration. Please change this to your system! -# CPUChips: 1 -# HW_CPUFreq: 3200 -# CPUCores: 4 -# CPUThreads: 4 -# TDP: 65 -# HW_MemAmountGB: 16 -# Hardware_Availability_Year: 2011 + CPUChips: 1 + HW_CPUFreq: 3200 + CPUCores: 4 + CPUThreads: 4 + TDP: 65 + HW_MemAmountGB: 16 + Hardware_Availability_Year: 2011 #--- END diff --git a/install_linux.sh b/install_linux.sh index 38c3adaa8..b938d45aa 100755 --- a/install_linux.sh +++ b/install_linux.sh @@ -59,13 +59,11 @@ if [[ -z $metrics_url ]] ; then metrics_url=${metrics_url:-"http://metrics.green-coding.internal:9142"} fi -if [[ -f config.yml ]]; then - password_from_file=$(awk '/postgresql:/ {flag=1; next} flag && /password:/ {print $2; exit}' config.yml) -fi - -default_password=${password_from_file:-$(generate_random_password 12)} - if [[ -z "$db_pw" ]] ; then + if [[ -f config.yml ]]; then + password_from_file=$(awk '/postgresql:/ {flag=1; next} flag && /password:/ {print $2; exit}' config.yml) + fi + default_password=${password_from_file:-$(generate_random_password 12)} read -sp "Please enter the new password to be set for the PostgreSQL DB (default: $default_password): " db_pw echo "" # force a newline, because read -sp will consume it db_pw=${db_pw:-"$default_password"} diff --git a/install_mac.sh b/install_mac.sh index 2d306dc1c..9df3e4fd1 100755 --- a/install_mac.sh +++ b/install_mac.sh @@ -10,16 +10,15 @@ function print_message { } function generate_random_password() { - local length=$1 - LC_ALL=C tr -dc 'A-Za-z0-9' < /dev/urandom | head -c "$length" - echo + echo "yourfixedinputstring" | tr -dc 'A-Za-z0-9' | head -c "$length" } db_pw='' api_url='' metrics_url='' +no_build=false -while getopts "p:a:m:" o; do +while getopts "p:a:m:n" o; do case "$o" in p) db_pw=${OPTARG} @@ -30,6 +29,9 @@ while getopts "p:a:m:" o; do m) metrics_url=${OPTARG} ;; + n) + no_build=true + ;; esac done @@ -43,13 +45,13 @@ if [[ -z $metrics_url ]] ; then metrics_url=${metrics_url:-"http://metrics.green-coding.internal:9142"} fi -if [[ -f config.yml ]]; then - password_from_file=$(awk '/postgresql:/ {flag=1; next} flag && /password:/ {print $2; exit}' config.yml) -fi +if [[ -z "$db_pw" ]] ; then + if [[ -f config.yml ]]; then + password_from_file=$(awk '/postgresql:/ {flag=1; next} flag && /password:/ {print $2; exit}' config.yml) + fi -default_password=${password_from_file:-$(generate_random_password 12)} + default_password=${password_from_file:-$(generate_random_password 12)} -if [[ -z "$db_pw" ]] ; then read -sp "Please enter the new password to be set for the PostgreSQL DB (default: $default_password): " db_pw echo "" # force a newline, because read -sp will consume it db_pw=${db_pw:-"$default_password"} @@ -109,8 +111,8 @@ echo "ALL ALL=(ALL) NOPASSWD:/usr/bin/killall powermetrics" | sudo tee /etc/sudo echo "ALL ALL=(ALL) NOPASSWD:/usr/bin/killall -9 powermetrics" | sudo tee /etc/sudoers.d/green_coding_kill_powermetrics_sigkill print_message "Writing to /etc/hosts file..." -etc_hosts_line_1="127.0.0.1 green-coding-postgres-container" -etc_hosts_line_2="127.0.0.1 ${host_api_url} ${host_metrics_url}" +etc_hosts_line_1="192.168.106.2 green-coding-postgres-container" +etc_hosts_line_2="192.168.106.2 ${host_api_url} ${host_metrics_url}" # Entry 1 is needed for the local resolution of the containers through the jobs.py and runner.py if ! sudo grep -Fxq "$etc_hosts_line_1" /etc/hosts; then @@ -129,7 +131,7 @@ if [[ ${host_metrics_url} == *".green-coding.internal"* ]];then fi if ! command -v stdbuf &> /dev/null; then - print_message "Trying to install 'coreutils' via homebew. If this fails (because you do not have brew or use another package manager), please install it manually ..." + print_message "Trying to install 'coreutils' via homebrew. If this fails (because you do not have brew or use another package manager), please install it manually ..." brew install coreutils fi @@ -147,16 +149,17 @@ while IFS= read -r subdir; do fi done -print_message "Building / Updating docker containers" -docker compose -f docker/compose.yml down -docker compose -f docker/compose.yml build -docker compose -f docker/compose.yml pull - -print_message "Updating python requirements" -python3 -m pip install --upgrade pip -python3 -m pip install -r requirements.txt -python3 -m pip install -r metric_providers/psu/energy/ac/xgboost/machine/model/requirements.txt +if [[ $no_build != true ]] ; then + print_message "Building / Updating docker containers" + docker compose -f docker/compose.yml down + docker compose -f docker/compose.yml build + docker compose -f docker/compose.yml pull + print_message "Updating python requirements" + python3 -m pip install --upgrade pip + python3 -m pip install -r requirements.txt + python3 -m pip install -r metric_providers/psu/energy/ac/xgboost/machine/model/requirements.txt +fi echo "" echo -e "${GREEN}Successfully installed Green Metrics Tool!${NC}" diff --git a/runner.py b/runner.py index 9f6899a8a..0b7db35ab 100755 --- a/runner.py +++ b/runner.py @@ -113,7 +113,7 @@ def __init__(self, self._uri_type = uri_type self._original_filename = filename self._branch = branch - self._tmp_folder = '/tmp/green-metrics-tool' + self._tmp_folder = '/Users/runner/tmp/green-metrics-tool' self._usage_scenario = {} self._architecture = utils.get_architecture() self._sci = {'R_d': None, 'R': 0} diff --git a/tests/edit-etc-hosts.sh b/tests/edit-etc-hosts.sh index 74780a18e..054c1ead5 100755 --- a/tests/edit-etc-hosts.sh +++ b/tests/edit-etc-hosts.sh @@ -1,7 +1,7 @@ #!/bin/bash set -euo pipefail -etc_hosts_line_1="127.0.0.1 test-green-coding-postgres-container" +etc_hosts_line_1="192.168.106.2 test-green-coding-postgres-container" echo "Writing to /etc/hosts file..." if ! grep -Fxq "$etc_hosts_line_1" /etc/hosts; then diff --git a/tests/pytest.ini b/tests/pytest.ini new file mode 100644 index 000000000..693cd3ea8 --- /dev/null +++ b/tests/pytest.ini @@ -0,0 +1,3 @@ +[pytest] +markers = + no_mac_workflow: tests that cannot be run during in our mac workflows \ No newline at end of file diff --git a/tests/test_functions.py b/tests/test_functions.py index 8ce135d4b..bff234502 100644 --- a/tests/test_functions.py +++ b/tests/test_functions.py @@ -35,7 +35,7 @@ def replace_include_in_usage_scenario(usage_scenario_path, docker_compose_filena def setup_runner(usage_scenario, docker_compose=None, uri='default', uri_type='folder', branch=None, debug_mode=False, allow_unsafe=False, no_file_cleanup=False, - skip_unsafe=False, verbose_provider_boot=False, dir_name=None, dev_no_build=False, skip_system_checks=True, + skip_unsafe=False, verbose_provider_boot=False, dir_name=None, dev_no_build=True, skip_system_checks=True, dev_no_sleeps=True, dev_no_metrics=True): usage_scenario_path = os.path.join(CURRENT_DIR, 'data/usage_scenarios/', usage_scenario) if docker_compose is not None: diff --git a/tests/test_usage_scenario.py b/tests/test_usage_scenario.py index cacdfa9fe..e8619a3e9 100644 --- a/tests/test_usage_scenario.py +++ b/tests/test_usage_scenario.py @@ -71,7 +71,7 @@ def get_env_vars(runner): # Test allowed characters def test_env_variable_allowed_characters(): - runner = Tests.setup_runner(usage_scenario='env_vars_stress_allowed.yml', skip_unsafe=False, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + runner = Tests.setup_runner(usage_scenario='env_vars_stress_allowed.yml', skip_unsafe=False) env_var_output = get_env_vars(runner) assert 'TESTALLOWED=alpha-num123_' in env_var_output, Tests.assertion_info('TESTALLOWED=alpha-num123_', env_var_output) @@ -81,7 +81,7 @@ def test_env_variable_allowed_characters(): # Test too long values def test_env_variable_too_long(): - runner = Tests.setup_runner(usage_scenario='env_vars_stress_forbidden.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + runner = Tests.setup_runner(usage_scenario='env_vars_stress_forbidden.yml') with pytest.raises(RuntimeError) as e: get_env_vars(runner) @@ -89,7 +89,7 @@ def test_env_variable_too_long(): # Test skip_unsafe=true def test_env_variable_skip_unsafe_true(): - runner = Tests.setup_runner(usage_scenario='env_vars_stress_forbidden.yml', skip_unsafe=True, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + runner = Tests.setup_runner(usage_scenario='env_vars_stress_forbidden.yml', skip_unsafe=True) env_var_output = get_env_vars(runner) # Only allowed values should be in env vars, forbidden ones should be skipped @@ -125,14 +125,14 @@ def get_port_bindings(runner): return port, err def test_port_bindings_allow_unsafe_true(): - runner = Tests.setup_runner(usage_scenario='port_bindings_stress.yml', allow_unsafe=True, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + runner = Tests.setup_runner(usage_scenario='port_bindings_stress.yml', allow_unsafe=True) port, _ = get_port_bindings(runner) assert port.startswith('0.0.0.0:9017'), Tests.assertion_info('0.0.0.0:9017', port) def test_port_bindings_skip_unsafe_true(): out = io.StringIO() err = io.StringIO() - runner = Tests.setup_runner(usage_scenario='port_bindings_stress.yml', skip_unsafe=True, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + runner = Tests.setup_runner(usage_scenario='port_bindings_stress.yml', skip_unsafe=True) # need to catch exception here as otherwise the subprocess returning an error will # fail the test @@ -146,7 +146,7 @@ def test_port_bindings_skip_unsafe_true(): Tests.assertion_info(f"Warning: {expected_warning}", 'no/different warning') def test_port_bindings_no_skip_or_allow(): - runner = Tests.setup_runner(usage_scenario='port_bindings_stress.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + runner = Tests.setup_runner(usage_scenario='port_bindings_stress.yml') with pytest.raises(Exception) as e: _, docker_port_err = get_port_bindings(runner) expected_container_error = 'Error: No public port \'9018/tcp\' published for test-container\n' @@ -162,7 +162,7 @@ def test_port_bindings_no_skip_or_allow(): def test_setup_commands_one_command(): out = io.StringIO() err = io.StringIO() - runner = Tests.setup_runner(usage_scenario='setup_commands_stress.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + runner = Tests.setup_runner(usage_scenario='setup_commands_stress.yml') with redirect_stdout(out), redirect_stderr(err): try: @@ -177,7 +177,7 @@ def test_setup_commands_one_command(): def test_setup_commands_multiple_commands(): out = io.StringIO() err = io.StringIO() - runner = Tests.setup_runner(usage_scenario='setup_commands_multiple_stress.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + runner = Tests.setup_runner(usage_scenario='setup_commands_multiple_stress.yml') with redirect_stdout(out), redirect_stderr(err): try: @@ -237,7 +237,7 @@ def assert_order(text, first, second): def test_depends_on_order(): out = io.StringIO() err = io.StringIO() - runner = Tests.setup_runner(usage_scenario='depends_on.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + runner = Tests.setup_runner(usage_scenario='depends_on.yml') with redirect_stdout(out), redirect_stderr(err): try: @@ -254,7 +254,7 @@ def test_depends_on_order(): def test_depends_on_huge(): out = io.StringIO() err = io.StringIO() - runner = Tests.setup_runner(usage_scenario='depends_on_huge.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + runner = Tests.setup_runner(usage_scenario='depends_on_huge.yml') with redirect_stdout(out), redirect_stderr(err): try: @@ -327,7 +327,7 @@ def test_depends_on_huge(): def test_depends_on_error_not_running(): - runner = Tests.setup_runner(usage_scenario='depends_on_error_not_running.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + runner = Tests.setup_runner(usage_scenario='depends_on_error_not_running.yml') try: with pytest.raises(RuntimeError) as e: Tests.run_until(runner, 'setup_services') @@ -338,7 +338,7 @@ def test_depends_on_error_not_running(): Tests.assertion_info('test-container-2 is not running', str(e.value)) def test_depends_on_error_cyclic_dependency(): - runner = Tests.setup_runner(usage_scenario='depends_on_error_cycle.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + runner = Tests.setup_runner(usage_scenario='depends_on_error_cycle.yml') try: with pytest.raises(RuntimeError) as e: Tests.run_until(runner, 'setup_services') @@ -349,7 +349,7 @@ def test_depends_on_error_cyclic_dependency(): Tests.assertion_info('cycle in depends_on with test-container-1', str(e.value)) def test_depends_on_error_unsupported_condition(): - runner = Tests.setup_runner(usage_scenario='depends_on_error_unsupported_condition.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + runner = Tests.setup_runner(usage_scenario='depends_on_error_unsupported_condition.yml') try: with pytest.raises(RuntimeError) as e: Tests.run_until(runner, 'setup_services') @@ -361,7 +361,7 @@ def test_depends_on_error_unsupported_condition(): Tests.assertion_info(message, str(e.value)) def test_depends_on_long_form(): - runner = Tests.setup_runner(usage_scenario='depends_on_long_form.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + runner = Tests.setup_runner(usage_scenario='depends_on_long_form.yml') out = io.StringIO() err = io.StringIO() @@ -375,7 +375,7 @@ def test_depends_on_long_form(): runner.cleanup() def test_depends_on_healthcheck(): - runner = Tests.setup_runner(usage_scenario='healthcheck.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + runner = Tests.setup_runner(usage_scenario='healthcheck.yml') out = io.StringIO() err = io.StringIO() @@ -391,7 +391,7 @@ def test_depends_on_healthcheck(): runner.cleanup() def test_depends_on_healthcheck_error_missing(): - runner = Tests.setup_runner(usage_scenario='healthcheck_error_missing.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + runner = Tests.setup_runner(usage_scenario='healthcheck_error_missing.yml') try: with pytest.raises(RuntimeError) as e: @@ -405,9 +405,10 @@ def test_depends_on_healthcheck_error_missing(): #volumes: [array] (optional) #Array of volumes to be mapped. Only read of runner.py is executed with --allow-unsafe flag +@pytest.mark.no_mac_workflow def test_volume_bindings_allow_unsafe_true(): create_test_file('/tmp/gmt-test-data') - runner = Tests.setup_runner(usage_scenario='volume_bindings_stress.yml', allow_unsafe=True, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + runner = Tests.setup_runner(usage_scenario='volume_bindings_stress.yml', allow_unsafe=True) ls = get_contents_of_bound_volume(runner) assert 'test-file' in ls, Tests.assertion_info('test-file', ls) @@ -415,7 +416,7 @@ def test_volumes_bindings_skip_unsafe_true(): create_test_file('/tmp/gmt-test-data') out = io.StringIO() err = io.StringIO() - runner = Tests.setup_runner(usage_scenario='volume_bindings_stress.yml', skip_unsafe=True, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + runner = Tests.setup_runner(usage_scenario='volume_bindings_stress.yml', skip_unsafe=True) with redirect_stdout(out), redirect_stderr(err), pytest.raises(Exception): ls = get_contents_of_bound_volume(runner) @@ -426,7 +427,7 @@ def test_volumes_bindings_skip_unsafe_true(): def test_volumes_bindings_no_skip_or_allow(): create_test_file('/tmp/gmt-test-data') - runner = Tests.setup_runner(usage_scenario='volume_bindings_stress.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + runner = Tests.setup_runner(usage_scenario='volume_bindings_stress.yml') with pytest.raises(RuntimeError) as e: ls = get_contents_of_bound_volume(runner) assert ls == '', Tests.assertion_info('empty list', ls) @@ -435,7 +436,7 @@ def test_volumes_bindings_no_skip_or_allow(): Tests.assertion_info(f"Exception: {expected_exception}", str(e.value)) def test_network_created(): - runner = Tests.setup_runner(usage_scenario='network_stress.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + runner = Tests.setup_runner(usage_scenario='network_stress.yml') try: Tests.run_until(runner, 'setup_networks') ps = subprocess.run( @@ -451,7 +452,7 @@ def test_network_created(): assert 'gmt-test-network' in ls, Tests.assertion_info('gmt-test-network', ls) def test_container_is_in_network(): - runner = Tests.setup_runner(usage_scenario='network_stress.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + runner = Tests.setup_runner(usage_scenario='network_stress.yml') try: Tests.run_until(runner, 'setup_services') ps = subprocess.run( @@ -471,7 +472,7 @@ def test_container_is_in_network(): # When container does not have a daemon running typically a shell # is started here to have the container running like bash or sh def test_cmd_ran(): - runner = Tests.setup_runner(usage_scenario='cmd_stress.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + runner = Tests.setup_runner(usage_scenario='cmd_stress.yml') try: Tests.run_until(runner, 'setup_services') ps = subprocess.run( @@ -507,7 +508,7 @@ def test_uri_local_dir(): assert ps.stderr == '', Tests.assertion_info('no errors', ps.stderr) def test_uri_local_dir_missing(): - runner = Tests.setup_runner(usage_scenario='basic_stress.yml', uri='/tmp/missing', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + runner = Tests.setup_runner(usage_scenario='basic_stress.yml', uri='/tmp/missing') try: with pytest.raises(FileNotFoundError) as e: runner.run() @@ -537,7 +538,7 @@ def test_uri_github_repo(): ## --branch BRANCH # Optionally specify the git branch when targeting a git repository def test_uri_local_branch(): - runner = Tests.setup_runner(usage_scenario='basic_stress.yml', branch='test-branch', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + runner = Tests.setup_runner(usage_scenario='basic_stress.yml', branch='test-branch') out = io.StringIO() err = io.StringIO() with redirect_stdout(out), redirect_stderr(err), pytest.raises(RuntimeError) as e: @@ -574,10 +575,7 @@ def test_uri_github_repo_branch_missing(): runner = Tests.setup_runner(usage_scenario='basic_stress.yml', uri='https://github.com/green-coding-berlin/pytest-dummy-repo', uri_type='URL', - branch='missing-branch', - dev_no_sleeps=True, - dev_no_build=True, - dev_no_metrics=True, + branch='missing-branch' ) with pytest.raises(subprocess.CalledProcessError) as e: runner.run() @@ -661,7 +659,7 @@ def test_no_file_cleanup(): #pylint: disable=unused-variable def test_skip_and_allow_unsafe_both_true(): with pytest.raises(RuntimeError) as e: - runner = Tests.setup_runner(usage_scenario='basic_stress.yml', skip_unsafe=True, allow_unsafe=True, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + runner = Tests.setup_runner(usage_scenario='basic_stress.yml', skip_unsafe=True, allow_unsafe=True) expected_exception = 'Cannot specify both --skip-unsafe and --allow-unsafe' assert str(e.value) == expected_exception, Tests.assertion_info('', str(e.value)) @@ -687,7 +685,7 @@ def test_debug(monkeypatch): # can check for this note in the DB and the notes are about 2s apart def test_read_detached_process_no_exit(): - runner = Tests.setup_runner(usage_scenario='stress_detached_no_exit.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + runner = Tests.setup_runner(usage_scenario='stress_detached_no_exit.yml') out = io.StringIO() err = io.StringIO() with redirect_stdout(out), redirect_stderr(err): @@ -701,7 +699,7 @@ def test_read_detached_process_no_exit(): Tests.assertion_info('NOT successful run completed', out.getvalue()) def test_read_detached_process_after_exit(): - runner = Tests.setup_runner(usage_scenario='stress_detached_exit.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + runner = Tests.setup_runner(usage_scenario='stress_detached_exit.yml') out = io.StringIO() err = io.StringIO() with redirect_stdout(out), redirect_stderr(err): @@ -713,7 +711,7 @@ def test_read_detached_process_after_exit(): Tests.assertion_info('successful run completed', out.getvalue()) def test_read_detached_process_failure(): - runner = Tests.setup_runner(usage_scenario='stress_detached_failure.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + runner = Tests.setup_runner(usage_scenario='stress_detached_failure.yml') out = io.StringIO() err = io.StringIO() diff --git a/tests/test_volume_loading.py b/tests/test_volume_loading.py index b90b1dca4..de3bafde8 100644 --- a/tests/test_volume_loading.py +++ b/tests/test_volume_loading.py @@ -39,7 +39,7 @@ def check_if_container_running(container_name): def test_volume_load_no_escape(): tmp_dir_name = utils.randomword(12) tmp_dir = os.path.join(CURRENT_DIR, 'tmp', tmp_dir_name, 'basic_stress_w_import.yml') - runner = Tests.setup_runner(usage_scenario='basic_stress_w_import.yml', docker_compose='volume_load_etc_passwords.yml', dir_name=tmp_dir_name, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=False) + runner = Tests.setup_runner(usage_scenario='basic_stress_w_import.yml', docker_compose='volume_load_etc_passwords.yml', dir_name=tmp_dir_name) Tests.replace_include_in_usage_scenario(tmp_dir, 'volume_load_etc_passwords.yml') try: @@ -82,7 +82,7 @@ def test_load_files_from_within_gmt(): copy_compose_and_edit_directory('volume_load_within_proj.yml', tmp_dir) # setup runner and run test - runner = Tests.setup_runner(usage_scenario='basic_stress_w_import.yml', dir_name=tmp_dir_name, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=False) + runner = Tests.setup_runner(usage_scenario='basic_stress_w_import.yml', dir_name=tmp_dir_name) Tests.replace_include_in_usage_scenario(os.path.join(tmp_dir, 'basic_stress_w_import.yml'), 'docker-compose.yml') try: @@ -110,7 +110,7 @@ def test_symlinks_should_fail(): copy_compose_and_edit_directory('volume_load_symlinks_negative.yml', tmp_dir) - runner = Tests.setup_runner(usage_scenario='basic_stress_w_import.yml', dir_name=tmp_dir_name, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=False) + runner = Tests.setup_runner(usage_scenario='basic_stress_w_import.yml', dir_name=tmp_dir_name) Tests.replace_include_in_usage_scenario(os.path.join(tmp_dir, 'basic_stress_w_import.yml'), 'docker-compose.yml') try: @@ -127,7 +127,7 @@ def test_symlinks_should_fail(): def test_non_bind_mounts_should_fail(): tmp_dir_name = create_tmp_dir()[1] tmp_dir_usage = os.path.join(CURRENT_DIR, 'tmp', tmp_dir_name, 'basic_stress_w_import.yml') - runner = Tests.setup_runner(usage_scenario='basic_stress_w_import.yml', docker_compose='volume_load_non_bind_mounts.yml', dir_name=tmp_dir_name, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=False) + runner = Tests.setup_runner(usage_scenario='basic_stress_w_import.yml', docker_compose='volume_load_non_bind_mounts.yml', dir_name=tmp_dir_name) Tests.replace_include_in_usage_scenario(tmp_dir_usage, 'volume_load_non_bind_mounts.yml') try: @@ -147,7 +147,7 @@ def test_load_volume_references(): copy_compose_and_edit_directory('volume_load_references.yml', tmp_dir) - runner = Tests.setup_runner(usage_scenario='basic_stress_w_import.yml', dir_name=tmp_dir_name, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=False) + runner = Tests.setup_runner(usage_scenario='basic_stress_w_import.yml', dir_name=tmp_dir_name) Tests.replace_include_in_usage_scenario(os.path.join(tmp_dir, 'basic_stress_w_import.yml'), 'docker-compose.yml') try: @@ -167,6 +167,7 @@ def test_load_volume_references(): Tests.cleanup(runner) assert "File mounted" in out, Tests.assertion_info('/tmp/test-file mounted', f"out: {out} | err: {err}") +@pytest.mark.no_mac_workflow def test_volume_loading_subdirectories_root(): uri = os.path.join(CURRENT_DIR, 'data/test_cases/subdir_volume_loading') RUN_NAME = 'test_' + utils.randomword(12) @@ -195,6 +196,7 @@ def test_volume_loading_subdirectories_root(): expect_mounted_testfile_3 = "stdout from process: ['docker', 'exec', 'test-container-root', 'grep', 'testfile3-content', '/tmp/testfile3-correctly-copied'] testfile3-content" assert expect_mounted_testfile_3 in run_stdout, Tests.assertion_info(expect_mounted_testfile_3, f"expected output not in {run_stdout}") +@pytest.mark.no_mac_workflow def test_volume_loading_subdirectories_subdir(): uri = os.path.join(CURRENT_DIR, 'data/test_cases/subdir_volume_loading') RUN_NAME = 'test_' + utils.randomword(12) @@ -214,6 +216,7 @@ def test_volume_loading_subdirectories_subdir(): expect_mounted_testfile_3 = "stdout from process: ['docker', 'exec', 'test-container', 'grep', 'testfile3-content', '/tmp/testfile3-correctly-mounted'] testfile3-content" assert expect_mounted_testfile_3 in run_stdout, Tests.assertion_info(expect_mounted_testfile_3, f"expected output not in {run_stdout}") +@pytest.mark.no_mac_workflow def test_volume_loading_subdirectories_subdir2(): uri = os.path.join(CURRENT_DIR, 'data/test_cases/subdir_volume_loading') RUN_NAME = 'test_' + utils.randomword(12) diff --git a/tests/tools/test_jobs.py b/tests/tools/test_jobs.py index 555e37450..75207df5b 100644 --- a/tests/tools/test_jobs.py +++ b/tests/tools/test_jobs.py @@ -70,6 +70,7 @@ def test_insert_job(): job = Job.get_job('run') assert job._state == 'WAITING' +@pytest.mark.no_mac_workflow def test_simple_run_job(): name = utils.randomword(12) url = 'https://github.com/green-coding-berlin/pytest-dummy-repo'