Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 1 addition & 2 deletions .github/workflows/pull_request.yml
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ jobs:
run: |
TAG_FORMAT=$(yq '.TAG_FORMAT' .github/config/settings.yml)
echo "TAG_FORMAT=$TAG_FORMAT" >> "$GITHUB_OUTPUT"

quality_checks:
uses: NHSDigital/eps-common-workflows/.github/workflows/quality-checks.yml@2b3ddfd1e59daf9905522d0140c6cd08e2547432
needs: [get_asdf_version]
Expand All @@ -50,7 +50,6 @@ jobs:
tags: "@ping"
environment: INTERNAL-DEV
product: EPS-FHIR
id: "Pull Request Basic Regression Tests"
tag_release:
needs: [get_asdf_version]
uses: NHSDigital/eps-common-workflows/.github/workflows/tag-release.yml@2b3ddfd1e59daf9905522d0140c6cd08e2547432
Expand Down
24 changes: 0 additions & 24 deletions .github/workflows/regression_tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -28,10 +28,6 @@ on:
- EPS-ASSIST-ME
required: false
default: EPS-FHIR
id:
description: "Unique run identifier (Do not change this)"
required: false
default: "Manually Triggered Run"
pull_request_id:
description: "The ID of the pull request. This should be in the format pr-xxxx where xxxx is the pull request id"
required: false
Expand All @@ -58,11 +54,6 @@ on:
description: "The product we are testing"
type: string
default: EPS-FHIR
id:
description: "Unique run identifier (Do not change this)"
required: false
type: string
default: "Manually Triggered Run"
pull_request_id:
description: "The ID of the pull request. This should be in the format pr-xxxx where xxxx is the pull request id"
required: false
Expand All @@ -83,42 +74,27 @@ jobs:
runs-on: ubuntu-22.04
environment: ${{ inputs.environment }}
steps:
# we need to leave this step in as run_regression test scripts expect the third step in the job to have the name of the input id
# the first step is auto generated 'set up job' step
- name: show_input_parameters
env:
tags: ${{ inputs.tags }}
environment: ${{ inputs.environment }}
product: ${{ inputs.product }}
id: ${{ inputs.id }}
pull_request_id: ${{ inputs.pull_request_id }}
github_tag: ${{ inputs.github_tag }}
run: |
echo "tags: ${tags}"
echo "environment: ${environment}"
echo "product: ${product}"
echo "id: ${id}"
echo "pull_request_id: ${pull_request_id}"
echo "github_tag: ${github_tag}"
# output to summary
# shellcheck disable=SC2129
echo "tags: ${tags}" >> "$GITHUB_STEP_SUMMARY"
echo "environment: ${environment}" >> "$GITHUB_STEP_SUMMARY"
echo "product: ${product}" >> "$GITHUB_STEP_SUMMARY"
echo "id: ${id}" >> "$GITHUB_STEP_SUMMARY"
echo "pull_request_id: ${pull_request_id}" >> "$GITHUB_STEP_SUMMARY"
echo "github_tag: ${github_tag}" >> "$GITHUB_STEP_SUMMARY"

- name: ${{github.event.inputs.id}}
env:
ID: ${{github.event.inputs.id}}
ENV: ${{ inputs.environment }}
PRODUCT: ${{ inputs.product }}
PULL_REQUEST_ID: ${{ inputs.pull_request_id }}
run: |
echo run identifier "$ID"-"$PRODUCT"-"$ENV"-"$PULL_REQUEST_ID"
echo run identifier "$ID"-"$PRODUCT"-"$ENV"-"$PULL_REQUEST_ID" >> "$GITHUB_STEP_SUMMARY"

run_regression_tests:
runs-on: ubuntu-22.04
environment: ${{ inputs.environment }}
Expand Down
87 changes: 4 additions & 83 deletions scripts/run_regression_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,6 @@
check the status of the regression test run to be reported to the CI.
"""
import argparse
from datetime import datetime, timedelta, timezone
import random
import string
import time
import requests
from requests.auth import HTTPBasicAuth, AuthBase
Expand Down Expand Up @@ -43,30 +40,17 @@ def get_headers():
}


def generate_unique_run_id(length=15):
return "".join(random.choices(string.ascii_uppercase + string.digits, k=length))


def generate_timestamp():
delta_time = timedelta(minutes=2)
date_time = (datetime.now(timezone.utc) - delta_time).strftime("%Y-%m-%dT%H:%M")
print(f"Generated Date as: {date_time}")
return date_time


def trigger_test_run(
env,
pr_label,
product,
auth_header,
run_id,
regression_test_repo_tag,
regression_test_workflow_tag,
):
body = {
"ref": regression_test_workflow_tag,
"inputs": {
"id": run_id,
"tags": "@regression",
"environment": ENVIRONMENT_NAMES[env],
"pull_request_id": pr_label,
Expand All @@ -82,69 +66,10 @@ def trigger_test_run(
json=body,
timeout=120,
)

print(f"Dispatch workflow. Unique workflow identifier: {run_id}")
assert (
response.status_code == 204 or response.status_code == 200
), f"Failed to trigger test run. Expected 204, got {response.status_code}. Response: {response.text}"


def get_workflow_runs(auth_header, run_date_filter):
print(f"Getting workflow runs after date: {run_date_filter}")
response = requests.get(
f"{GITHUB_API_URL}/runs?created=%3E{run_date_filter}",
headers=get_headers(),
auth=auth_header,
timeout=120,
)
assert (
response.status_code == 200
), f"Unable to get workflow runs. Expected 200, got {response.status_code}"
return response.json()["workflow_runs"]


def get_jobs_for_workflow(jobs_url, auth_header):
print("Getting jobs for workflow...")
response = requests.get(jobs_url, auth=auth_header, timeout=120)
assert (
response.status_code == 200
), f"Unable to get workflow jobs. Expected 200, got {response.status_code}"
return response.json()["jobs"]


def find_workflow(auth_header, run_id, run_date_filter):
max_attempts = 5
current_attempt = 0

while current_attempt < max_attempts:
time.sleep(10)
current_attempt = current_attempt + 1
print(f"Attempt {current_attempt}")

workflow_runs = get_workflow_runs(auth_header, run_date_filter)
for workflow in workflow_runs:
time.sleep(3)
current_workflow_id = workflow["id"]
jobs_url = workflow["jobs_url"]

list_of_jobs = get_jobs_for_workflow(jobs_url, auth_header)

if list_of_jobs:
job = list_of_jobs[0] # this is fine to get the first job
steps = job["steps"]

if len(steps) >= 2:
third_step = steps[2]
if third_step["name"] == run_id:
print(f"Workflow Job found! Using ID: {current_workflow_id}")
return current_workflow_id
else:
print("Not enough steps have been executed for this run yet...")
else:
print("Jobs for this workflow run haven't populated yet...")
print(
"Processed all available workflows but no jobs were matching the Unique ID were found!"
)
), f"Failed to trigger test run. Expected 200, got {response.status_code}. Response: {response.text}"
return response.json()["workflow_run_id"]


def get_auth_header(is_called_from_github, token, user):
Expand Down Expand Up @@ -246,25 +171,21 @@ def main():
print(f"regression_tests_repo_tag: {arguments.regression_test_repo_tag}")
print(f"regression_test_workflow_tag: {arguments.regression_test_workflow_tag}")

run_id = generate_unique_run_id()
run_date_filter = generate_timestamp()
auth_header = get_auth_header(
arguments.is_called_from_github, arguments.token, arguments.user
)

pr_label = arguments.pr_label.lower()
trigger_test_run(
workflow_id = trigger_test_run(
arguments.env,
pr_label,
arguments.product,
auth_header,
run_id,
arguments.regression_test_repo_tag,
arguments.regression_test_workflow_tag,
)

workflow_id = find_workflow(auth_header, run_id, run_date_filter)
print(f"See {GITHUB_RUN_URL}/{workflow_id}/ for run details")

job_status = check_job(auth_header, workflow_id)
if job_status != "success":
if arguments.pr_label:
Expand Down
Loading