diff --git a/.github/actions/make/action.yml b/.github/actions/make/action.yml index 9b08cf2c4..ea8a7eece 100644 --- a/.github/actions/make/action.yml +++ b/.github/actions/make/action.yml @@ -12,6 +12,18 @@ inputs: description: "[Optional] Cache suffix (e.g. 'base')" required: false default: "" + coverage-cache-suffix: + description: "[Optional] Coverage cache suffix (e.g. 'coverage')" + required: false + default: "" + restore-coverage: + description: "[Optional] Coverage restore? if true please provide coverage-cache-suffix" + required: false + default: "false" + cache-coverage: + description: "[Optional] Coverage caching? if true please provide coverage-cache-suffix and paths-to-cache as the coverage file" + required: false + default: "false" restore-from-cache: description: "[Optional] restore from cache?" required: false @@ -20,6 +32,9 @@ inputs: description: "[Optional] save to cache?" required: false default: "false" + paths-to-cache: + description: "[Optional] paths to cache?" + required: false runs: using: "composite" steps: @@ -28,12 +43,19 @@ runs: shell: bash - name: Restore from the cache - if: ${{ inputs.restore-from-cache == 'true' }} + if: ${{ inputs.restore-from-cache == 'true' && inputs.restore-coverage != 'true'}} uses: ./.github/actions/components/caching/ with: save-or-restore: "restore" cache-suffix: ${{ inputs.cache-suffix }} + - name: Restore coverage from the cache + if: ${{ inputs.restore-from-cache == 'true' && inputs.restore-coverage == 'true'}} + uses: ./.github/actions/components/caching/ + with: + save-or-restore: "restore" + cache-suffix: ${{ inputs.coverage-cache-suffix }} + - name: Configure AWS Credentials id: configure-aws-credentials if: inputs.requires-aws @@ -66,8 +88,16 @@ runs: AWS_SECRET_ACCESS_KEY: ${{ steps.configure-aws-credentials.outputs.aws-secret-access-key }} AWS_SESSION_TOKEN: ${{ steps.configure-aws-credentials.outputs.aws-session-token }} - - name: Save to the cache - if: ${{ inputs.save-to-cache == 'true' }} + - name: Save to the cache with path (for coverage) + if: ${{ inputs.save-to-cache == 'true' && inputs.cache-coverage == 'true'}} + uses: ./.github/actions/components/caching/ + with: + save-or-restore: "save" + cache-suffix: ${{ inputs.coverage-cache-suffix }} + paths-to-cache: ${{ inputs.paths-to-cache }} + + - name: Save to the cache default paths + if: ${{ inputs.save-to-cache == 'true' && inputs.cache-coverage != 'true'}} uses: ./.github/actions/components/caching/ with: save-or-restore: "save" diff --git a/.github/workflows/merge.yml b/.github/workflows/merge.yml index 546c4a8c2..b4b795ca7 100644 --- a/.github/workflows/merge.yml +++ b/.github/workflows/merge.yml @@ -23,6 +23,25 @@ jobs: branch_name=${branch_name#*refs/tags/} echo "branch_name=${branch_name}" >> $GITHUB_OUTPUT + test--coverage-sonar: + runs-on: [self-hosted, ci] + steps: + - uses: actions/checkout@v4 + with: + ref: main + - name: Restore coverage from the cache + uses: ./.github/actions/components/caching/ + with: + save-or-restore: "restore" + cache-suffix: "coverage" + paths-to-cache: "sonarcloud-coverage.xml" + + - name: SonarQube Scan + uses: SonarSource/sonarqube-scan-action@v5 + env: + SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }} + SONAR_HOST_URL: https://sonarcloud.io + make-tag: runs-on: [self-hosted, ci] permissions: write-all diff --git a/.github/workflows/pull-requests.yml b/.github/workflows/pull-requests.yml index 072acf57e..7937f89f3 100644 --- a/.github/workflows/pull-requests.yml +++ b/.github/workflows/pull-requests.yml @@ -9,8 +9,8 @@ permissions: env: BASE_CACHE_SUFFIX: base - BASE_BRANCH_NAME: ${{ github.event.pull_request.base.ref }} - BRANCH_NAME: ${{ github.event.pull_request.head.ref }} + BASE_BRANCH_NAME: main + BRANCH_NAME: feature/PI-855-add_sonar_code_coverage CI_ROLE_NAME: ${{ secrets.CI_ROLE_NAME }} BRANCH_GITHUB_SHA_SHORT: $(echo ${{ github.event.pull_request.head.sha }} | cut -c 1-7) TF_CLI_ARGS: -no-color @@ -275,6 +275,39 @@ jobs: command: test--smoke requires-aws: true + test--coverage-sonar: + needs: + [ + build-head, + workflow--codebase-checks, + test--unit, + test--feature--local, + terraform-head-build, + test--feature--integration, + test--integration, + apigee--deploy, + apigee--attach-product, + test--smoke, + ] + runs-on: [self-hosted, ci] + steps: + - uses: actions/checkout@v4 + with: + ref: ${{ env.BRANCH_NAME }} + - uses: ./.github/actions/make/ + with: + command: test--coverage + requires-aws: true + save-to-cache: "true" + cache-coverage: "true" + coverage-cache-suffix: "coverage" + paths-to-cache: "sonarcloud-coverage.xml" + - name: SonarQube Scan + uses: SonarSource/sonarqube-scan-action@v5 + env: + SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }} + SONAR_HOST_URL: https://sonarcloud.io + apigee--detach-product: needs: [ diff --git a/.gitignore b/.gitignore index 184f83671..f3f27ec7e 100644 --- a/.gitignore +++ b/.gitignore @@ -86,3 +86,4 @@ cpm.cdx.json test_failure.json test_success_*.json pyrightconfig.json +sonarcloud-coverage.xml diff --git a/.sonarcloud.properties b/.sonarcloud.properties deleted file mode 100644 index 075a6bc85..000000000 --- a/.sonarcloud.properties +++ /dev/null @@ -1 +0,0 @@ -sonar.cpd.exclusions=src/api/**/index.py diff --git a/CHANGELOG.md b/CHANGELOG.md index b94110ad4..aa40baa96 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,10 @@ # Changelog +## 2025-03-18 +- [PI-761] Product Team name cannot be blank +- [PI-850] Readme Review +- [PI-855] Code coverage + ## 2025-03-12 - [PI-841] Remove VPC cpm-sds-etl-hscn-vpc - [PI-851] Swagger spec refinement - Part Deux diff --git a/README.md b/README.md index ca89d5137..72868f2fc 100644 --- a/README.md +++ b/README.md @@ -4,34 +4,88 @@ ## Table of Contents +1. [TLDR](#tldr) + 1. [Requirements](#requirements) + 2. [Building a local environment](#building-a-local-environment) + 3. [Testing](#testing) + 4. [Destroying a local environment](#destroying-a-local-environment) 1. [Setup](#setup) 1. [Prerequisites](#prerequisites) - 2. [Project build](#project-build) - 3. [AWS SSO Setup](#aws-sso-setup) - 4. [Other helpful commands](#other-helpful-commands) -2. [Tests](#tests) + 2. [Useful tools](#useful-tools) + 3. [Project build](#project-build) + 4. [Proxygen](#proxygen) + 1. [Temporary Proxies](#temporary-proxies) + 5. [AWS SSO Setup](#aws-sso-setup) + 6. [Build a local workspace on AWS (DEV)](#build-a-local-workspace-on-aws-dev) + 7. [Build a local workspace on AWS (QA)](#build-a-local-workspace-on-aws-qa) + 8. [Destroy a local workspace on AWS](#destroy-a-local-workspace-on-aws) + 9. [Automated workspace destroys](#automated-workspace-destroys) + 10. [Destroy Expired Workspaces](#destroy-expired-workspaces) + 11. [Destroy Redundant Workspaces](#destroy-redundant-workspaces) + 12. [Destroy Corrupted Workspaces](#destroy-corrupted-workspaces) + 13. [Updating Roles](#updating-roles) + 14. [Other helpful commands](#other-helpful-commands) +1. [Tests](#tests) 1. [pytest tests](#pytest-tests) 2. [End-to-End feature tests](#end-to-end-feature-tests) - 3. [Generate the Feature Test Postman collection](#generate-the-feature-test-postman-collection) -3. [Data modelling](#data-modelling) + 3. [Local](#local) + 4. [Integration](#integration) + 5. [Generate the Feature Test Postman collection](#generate-the-feature-test-postman-collection) +1. [Data modelling](#data-modelling) 1. [Domain models](#domain-models) 2. [Database models](#database-models) 3. [Response models](#response-models) 4. [Request models](#request-models) -4. [Workflow](#workflow) -5. [Swagger](#swagger) -6. [ETL](#etl) -7. [Administration](#administration) +1. [Workflow](#workflow) +1. [Deployment](#deployment) +1. [Swagger](#swagger) +1. [Setting Lamba permissions](#setting-lambda-permissions) +1. [Terraform](#terraform) +1. [Administration](#administration) +1. [SBOM](#sbom) +1. [Extras](#extras) + 1. [Archive](#archive) --- +## TLDR + +If you want to get up and running as quickly as possible then read this section. However it is advisable to read the full README when possible. + +### Requirements + +- Ensure you have `AWS SSO` setup. For more information please read the [AWS SSO Setup](#aws-sso-setup) section. +- Ensure you have `asdf` installed and the `docker engine` running. For more information please read the [Setup](#setup) section. + +### Building a local environment + +- Inside the connecting party manager root directory, run... +- `make terraform--apply` +- For more information please read the [Build a local workspace on AWS (DEV)](#build-a-local-workspace-on-aws-dev) section. + +### Testing + +- Inside the connecting party manager root directory, run any of the following commands... +- `make test--unit` +- `make test--integration` +- `make test--feature--local` +- `make test--feature--integration` +- `make test--smoke` + +### Destroying a local environment + +- Provided you haven't changed the workspace name, then. +- Inside the connecting party manager root directory, run... +- `make terraform--destroy` +- For more information please read the [Destroy a local workspace on AWS](#destroy-a-local-workspace-on-aws) section. + ## Setup ### Prerequisites We use `asdf` to fetch the required versions of prerequisite libraries instead of your system's default version. To get it up and running go to . You can check it installed properly by using the command `asdf --version`. -However, you will also need to install the `docker engine` separately +You will also need to install the `docker engine` separately Additionally you will need `wget` (doing `which wget` will return blank if not installed). Please Google "how to install wget on my operating system", if you don't already have this installed. @@ -41,9 +95,9 @@ Otherwise `asdf` should do the work for you. ### Useful tools -`VScode` is useful and we have a workspace file setup to allow easy integration +`VScode` is a useful IDE and we have a workspace file setup to allow easy integration -`Postman` &/or `Newman` Feature tests create a postman.collection which can be used for manual testing. +`Postman` &/or `Newman`. Feature tests create a postman.collection which can be used for manual testing. ### Project build @@ -61,7 +115,7 @@ We use [Proxygen](https://github.com/NHSDigital/proxygen-cli) to deploy our prox - `make apigee--deploy` -This when run locally will need you to have done a local `terraform--plan` and `terraform--apply` (as it will read some details from the output files) +When you run this locally you will need to have done a local `terraform--plan` and `terraform--apply` (as it will read some details from the output files) Caveats @@ -117,7 +171,7 @@ This is the preferred method of switching between profiles, as it will cause the You can build a working copy of the CPM service in your own workspace within the `dev` environment. To do this follow these steps. (You must have SSO setup on your system and have MGMT admin access) -You must pass a `TERRAFORM_WORKSPACE` variable to each command in the format `YOUR_SHORTCODE_AND_JIRA_NUMBER`. This variable must not contain spaces, but can contain underscores and hyphens. `e.g. jobl3-PI-100` +Your workspace will build with an 8 digit hash of your system username by default. Therefore you do not need to provide a workspace name, however, if you would like to give your workspace a specific name then you must pass a `TERRAFORM_WORKSPACE` variable to each command. It is recommended that you use the format `YOUR_SHORTCODE_AND_JIRA_NUMBER`, but it will accept any name, however this variable must not contain spaces, but can contain underscores and hyphens. `e.g. jobl3-PI-100` ```shell make terraform--init TERRAFORM_WORKSPACE="" # Will attempt to login to AWS first using SSO @@ -198,13 +252,13 @@ Run `make` to get a list of helpful commands. ### `pytest` tests -There are four types of `pytest` in this project: +There are three types of `pytest` in this project: - Unit: these _do not have_ any `@pytest.mark` markers; - Integration: these have `@pytest.mark.integration` markers; - Smoke: these have `@pytest.mark.smoke` markers; -In order to run these you can do one of:: +In order to run these you can do one of: ```shell make test--unit @@ -287,7 +341,39 @@ Modelling in Connecting Party Manager is split into four partially-decoupled com ### Domain models -TBC +We have two Domain models, ProductTeam and Product. + +``` +class ProductTeam() + id: str + name: str + ods_code: str + status: enum(ACTIVE, INACTIVE) + created_on: datetime + updated_on: datetime + deleted_on: datetime + keys: list[{ + key_type: str, + key_value: uuid + }] +``` + +``` +class CpmProduct(AggregateRoot): + id: str + cpm_product_team_id: str + product_team_id: str + name: str + ods_code: str + status: enum(ACTIVE, INACTIVE) + created_on: datetime + updated_on: datetime + deleted_on: datetime + keys: list[{ + key_type: str, + key_value: str + }] +``` ### Database models @@ -303,7 +389,7 @@ The table is structured as below. (For purposes of clarity extra "entity specifi | PT#12345 | P#P.123-XYZ | product | P.123-XYZ | A product | 2025-01-30T14:30:18.191643+00:00 | null | null | active | P#P.123-XYZ | P#P.123-XYZ | ORG#AB123 | P#P.123-XYZ | true | | PT#FOOBAR | PT#FOOBAR | product_team_alias | 12345 | A Product Team | 2025-01-30T14:30:18.191643+00:00 | null | null | active | PT#FOOBAR | PT#FOOBAR | NULL | NULL | false | -Product Teams can additionally be indexed by any keys (see [Domain models](#domain-models)) that they have. For every key in an domain object, that is of type product_team_alias_id, +Product Teams can additionally be indexed by any keys (see [Domain models](#domain-models)) that they have. For a key in an domain object, that is of type product_team_id, a copy is made in the database with the index being that key, rather than the object's identifier. Such copies are referred to as non-root objects, whereas the "original" (indexed by identifier) is referred to @@ -317,11 +403,11 @@ A `read` and `search` is available on all `Repository` patterns (almost) for fre ### Response models -TBC +For all response models please refer to the Swagger/OAS spec ### Request models -TBC +For all request models please refer to the Swagger/OAS spec ## Workflow @@ -360,6 +446,34 @@ This command will also: - Update the version in `pyproject.toml` with the release branch version. - Update the VERSION file with the release branch version number. +## Deployment + +### Create a Release branch + +When requested by QA + +- Create a release branch using the make command described in the previous section +- Create a new changelog file with the correct date in the changelog directory. (Follow the patterns used in previous files.) +- merge the required branches through the command line into the release branch. i.e. `git merge origin/feature/PI-123-A_branch_for_qa` +- commit and push the release branch to github and create a PR. +- Use the workflow on the #platforms-connecting-party-manager Slack channel to notify of the new release branch. + +### After QA has approved the changes + +QA should approve the release branch PR and notify developers on the #platforms-connecting-party-manager slack channel of a request to merge the release branch. + +- Merge the release branch within the github UI. This will merge the release and close any feature branches that are associated with it. +- Reply to the QA notification on slack that its has been merged and to rebase. Make sure this reply also appears on the main feed. `Merged, rebase rebase :alert:` is usually enough. +- Now we need to deploy to all environments. +- In the github UI, navigate to the actions tab Select `Deploy: Workspace - Nonprod` and select the dropdown `Run workflow` +- Select the `Tag` to deploy (This is the release branch that has just been merged.) +- Select the Account to deploy to as well as if it should be the sandbox or not. +- You must do this for all the environments. +- Now navigate to the `Deploy: Workspace - Production` on the left +- Run workflow. Select the Tag to deploy and Run. +- In production you will need to approve the deployment once the terraform plan has run. All other environments will deploy until finish without any user interaction. +- Once all environments are deployed successfully you can move any Jira tickets to "Done" + ## Swagger This is all done by `make build`. For more details on how to update the Swagger, please see [the swagger README](infrastructure/swagger/README.md). @@ -381,52 +495,6 @@ If you find yourself with a locked terraform state, do: make terraform--unlock TERRAFORM_ARGS= ``` -## ETL - -### Debugging the state after changelog errors - -In order to get the latest head state of the ETL, do either (for your developer workspace) - -``` -make etl--head-state--developer -``` - -or for a persistent workspace (`dev`, `prod`, etc): - -``` -make etl--head-state--persistent-workspace WORKSPACE= -``` - -For the developer operation, the script will automatically activate via SSO, however for the persistent-workspace -operation you will need to export credentials by navigating yourself to the SSO login page and exporting the -credentials for the workspace into your terminal. - -### Clearing the state (don't take this lightly, intended for first time bulk upload only) - -Before running the bulk trigger, you need to clear the initial ETL state, do: - -``` -make etl--clear-state -``` - -Before running the changelog trigger you additionally need to specify a changelog number (ideally close to the true latest changelog number, otherwise the logs will be pretty heavy!) - -``` -make etl--clear-state SET_CHANGELOG_NUMBER=540210 -``` - -You can additionally set the workspace name if you want to clear the state for a given (e.g. persistent) workspace name: - -``` -make etl--clear-state WORKSPACE=dev -``` - -and - -``` -make etl--clear-state WORKSPACE=dev SET_CHANGELOG_NUMBER=540210 -``` - ## Administration ### Generating Ids @@ -451,7 +519,9 @@ We also have a confluence page `https://nhsd-confluence.digital.nhs.uk/display/SPINE/CPM+Swagger+Docs` -In time we will also have our spec uploaded to bloomreach via proxygen +The Spec is also available on the NHS API Catalogue. + +`https://digital.nhs.uk/developer/api-catalogue/connecting-party-manager/content` ## SBOM (Service Bill of Materials) diff --git a/VERSION b/VERSION index 371556384..2e48bd7f0 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2025.03.12 +2025.03.18 diff --git a/changelog/2025-03-18.md b/changelog/2025-03-18.md new file mode 100644 index 000000000..f64d0b35b --- /dev/null +++ b/changelog/2025-03-18.md @@ -0,0 +1,3 @@ +- [PI-761] Product Team name cannot be blank +- [PI-850] Readme Review +- [PI-855] Code coverage diff --git a/pyproject.toml b/pyproject.toml index 6fb7b91d4..fba0464aa 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "connecting-party-manager" -version = "2025.03.12" +version = "2025.03.18" description = "Repository for the Connecting Party Manager API and related services" authors = ["NHS England"] license = "LICENSE.md" @@ -33,6 +33,7 @@ black = "^25.1.0" flake8 = "^7.0.0" behave = "^1.2.6" pytest = "^8.2.0" +pytest-cov = "^6.0.0" pytest-custom-exit-code = "^0.3.0" sh = "^2.0.6" hypothesis = "^6.87.3" diff --git a/scripts/test/test.mk b/scripts/test/test.mk index ccee4a2fa..73e1b379e 100644 --- a/scripts/test/test.mk +++ b/scripts/test/test.mk @@ -40,3 +40,6 @@ test--feature--local: _behave ## Run local feature (gherkin) tests test--feature--%--auto-retry: ## Autoretry of failed feature (gherkin) tests $(MAKE) test--feature--$* _INTERNAL_FLAGS="--define='auto_retry=true'" + +test--coverage: aws--login ## Run unit (pytest) tests + $(MAKE) _pytest _INTERNAL_FLAGS="--cov --cov-report=xml:sonarcloud-coverage.xml $(_INTERNAL_FLAGS)" _CACHE_CLEAR=$(_CACHE_CLEAR) AWS_ACCESS_KEY_ID=$(AWS_ACCESS_KEY_ID) AWS_SECRET_ACCESS_KEY=$(AWS_SECRET_ACCESS_KEY) AWS_SESSION_TOKEN=$(AWS_SESSION_TOKEN) diff --git a/sonar-project.properties b/sonar-project.properties new file mode 100644 index 000000000..b502e58ab --- /dev/null +++ b/sonar-project.properties @@ -0,0 +1,9 @@ +sonar.projectKey=NHSDigital_connecting-party-manager +sonar.organization=nhsdigital +sonar.projectVersion=1.0 +sonar.sources=src/,infrastructure/ +sonar.exclusions=src/**/test_*.py,test_ui/,changelog/,archived_epr/ +sonar.test.inclusions=**/test_*.py + +sonar.python.coverage.reportPaths=sonarcloud-coverage.xml +sonar.cpd.exclusions=src/api/**/index.py diff --git a/src/api/tests/feature_tests/features/createProductTeam.failure.feature b/src/api/tests/feature_tests/features/createProductTeam.failure.feature index 032e8833a..f8c5e96de 100644 --- a/src/api/tests/feature_tests/features/createProductTeam.failure.feature +++ b/src/api/tests/feature_tests/features/createProductTeam.failure.feature @@ -122,6 +122,21 @@ Feature: Create Product Team - failure scenarios | Content-Type | application/json | | Content-Length | 138 | + Scenario: Cannot create a ProductTeam with a missing name + When I make a "POST" request with "default" headers to "ProductTeam" with body: + | path | value | + | ods_code | F5H11 | + | keys.0.key_type | product_team_id | + | keys.0.key_value | 0a78ee8f-5bcf-4db1-9341-ef1d67248715 | + Then I receive a status code "400" with body + | path | value | + | errors.0.code | MISSING_VALUE | + | errors.0.message | CreateProductTeamIncomingParams.name: field required | + And the response headers contain: + | name | value | + | Content-Type | application/json | + | Content-Length | 106 | + Scenario: Cannot create a ProductTeam with empty product team id key When I make a "POST" request with "default" headers to "ProductTeam" with body: | path | value |