Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/pull-requests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -222,7 +222,7 @@ jobs:
runs-on: [self-hosted, ci]
strategy:
matrix:
test-type: [integration, s3]
test-type: [integration]
steps:
- uses: actions/checkout@v4
with:
Expand Down
4 changes: 4 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,9 @@
# Changelog

## 2025-02-24
- [PI-794] Remove EPR S3 tests
- [PI-788] Create Product Search and Delete Flows for test UI

## 2025-02-21
- [PI-754] Search Product
- Dependabot: datamodel-code-generator
Expand Down
2 changes: 1 addition & 1 deletion VERSION
Original file line number Diff line number Diff line change
@@ -1 +1 @@
2025.02.21.a
2025.02.24
2 changes: 2 additions & 0 deletions changelog/2025-02-24.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
- [PI-794] Remove EPR S3 tests
- [PI-788] Create Product Search and Delete Flows for test UI
51 changes: 51 additions & 0 deletions infrastructure/swagger/05_paths.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,15 @@ tags:
description: Create, Read and Delete Product IDs
- name: Options
description: These exist for CORS
- name: Core EPR Creation Endpoints
description: Core EPR API creation endpoints
- name: Core EPR Read Endpoints
description: Core EPR API read endpoints
- name: Core EPR Deletion Endpoints
description: Core EPR API deletion endpoints
- name: SDS Search Endpoints
description: Search Results provided for the SDS FHIR API

paths:
/_status:
get:
Expand Down Expand Up @@ -298,6 +307,48 @@ paths:
- app-level0: []

/ProductTeam/{product_team_id}/Product/{product_id}:
options:
operationId: productactions
summary: Product actions (OPTIONS)
parameters:
- $ref: "#/components/parameters/ProductTeamId"
- $ref: "#/components/parameters/ProductId"
tags:
- Core Product ID Endpoints
responses:
"400":
$ref: "#/components/responses/BadRequest"
"200":
description: "200 response"
headers:
Access-Control-Allow-Origin:
schema:
type: "string"
Access-Control-Allow-Methods:
schema:
type: "string"
Access-Control-Allow-Headers:
schema:
type: "string"
content:
application/json:
schema:
$ref: "#/components/schemas/Empty"
x-amazon-apigateway-integration:
responses:
default:
statusCode: "200"
responseParameters:
method.response.header.Access-Control-Allow-Methods: "'GET,OPTIONS,POST,DELETE'"
method.response.header.Access-Control-Allow-Headers: "'apikey,authorization,content-type,version'"
method.response.header.Access-Control-Allow-Origin: "'*'"
requestTemplates:
application/json: '{"statusCode": 200}'
passthroughBehavior: "never"
type: "mock"
security:
- ${authoriser_name}: []
- app-level0: []
get:
operationId: readproductendpoint
summary: Read a Product resource (GET)
Expand Down
14 changes: 12 additions & 2 deletions infrastructure/swagger/12_components--responses.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -86,8 +86,18 @@ components:
headers:
Access-Control-Allow-Origin:
schema:
type: string
example: "*"
$ref: "#/components/schemas/ErrorResponse"
examples:
ValidationErrorExtraFields:
value:
errors:
- code: "VALIDATION_ERROR"
message: "SearchSDSEndpointQueryParams.foo: extra fields not permitted"
ValidationError:
value:
errors:
- code: "VALIDATION_ERROR"
message: "SearchSDSEndpointQueryParams.__root__: At least 2 query parameters should be provided of type, nhs_id_code, nhs_mhs_svc_ia and nhs_mhs_party_key"
UnprocessableContent:
description: Unprocessable Content
content:
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "connecting-party-manager"
version = "2025.02.21.a"
version = "2025.02.24"
description = "Repository for the Connecting Party Manager API and related services"
authors = ["NHS England"]
license = "LICENSE.md"
Expand Down
4 changes: 2 additions & 2 deletions scripts/test/test.mk
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,8 @@ test--integration: aws--login ## Run integration (pytest) tests
test--slow: ## Run slow (pytest) tests
$(MAKE) _pytest _INTERNAL_FLAGS="-m 'slow'" _CACHE_CLEAR=$(_CACHE_CLEAR)

test--s3: aws--login ## Run (pytest) tests that require s3 downloads
$(MAKE) _pytest _INTERNAL_FLAGS="-m 's3' $(_INTERNAL_FLAGS)" _CACHE_CLEAR=$(_CACHE_CLEAR) AWS_ACCESS_KEY_ID=$(AWS_ACCESS_KEY_ID) AWS_SECRET_ACCESS_KEY=$(AWS_SECRET_ACCESS_KEY) AWS_SESSION_TOKEN=$(AWS_SESSION_TOKEN)
# test--s3: aws--login ## Run (pytest) tests that require s3 downloads
# $(MAKE) _pytest _INTERNAL_FLAGS="-m 's3' $(_INTERNAL_FLAGS)" _CACHE_CLEAR=$(_CACHE_CLEAR) AWS_ACCESS_KEY_ID=$(AWS_ACCESS_KEY_ID) AWS_SECRET_ACCESS_KEY=$(AWS_SECRET_ACCESS_KEY) AWS_SESSION_TOKEN=$(AWS_SESSION_TOKEN)

test--smoke: aws--login ## Run end-to-end smoke tests (pytest)
AWS_DEFAULT_REGION=$(AWS_DEFAULT_REGION) AWS_ACCESS_KEY_ID=$(AWS_ACCESS_KEY_ID) AWS_SECRET_ACCESS_KEY=$(AWS_SECRET_ACCESS_KEY) AWS_SESSION_TOKEN=$(AWS_SESSION_TOKEN) WORKSPACE=$(WORKSPACE) ACCOUNT=$(ACCOUNT) poetry run python -m pytest $(PYTEST_FLAGS) -m 'smoke' --ignore=src/layers --ignore=src/etl --ignore=archived_epr $(_CACHE_CLEAR)
Expand Down
19 changes: 19 additions & 0 deletions src/api/tests/smoke_tests/test_smoke.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,25 @@ def _request(base_url: str, headers: dict, path: str, method: str):
"SearchProductQueryParams.__root__: Please provide exactly one valid query parameter: ('product_team_id', 'organisation_code')."
],
],
[
"/ProductTeam/123/Product/abc",
"DELETE",
404,
],
[
"/ProductTeam/123",
"DELETE",
404,
],
[
"/Product",
"GET",
400,
["VALIDATION_ERROR"],
[
"SearchProductQueryParams.__root__: Please provide exactly one valid query parameter: ('product_team_id', 'organisation_code')."
],
],
],
)
def test_smoke_tests(request_details):
Expand Down
44 changes: 22 additions & 22 deletions src/layers/etl_utils/io/test/test_io_dump.py
Original file line number Diff line number Diff line change
@@ -1,30 +1,30 @@
from io import BytesIO
# from io import BytesIO

import pytest
from etl_utils.io import pkl_dump_lz4, pkl_dumps_lz4, pkl_load_lz4
from etl_utils.io.test.io_utils import pkl_loads_lz4
from event.json import json_load
# from etl_utils.io import pkl_dump_lz4, pkl_dumps_lz4, pkl_load_lz4
# from etl_utils.io.test.io_utils import pkl_loads_lz4
# from event.json import json_load
# import pytest

from etl.sds.tests.constants import EtlTestDataPath
# from etl.sds.tests.constants import EtlTestDataPath


@pytest.mark.s3(EtlTestDataPath.FULL_JSON)
def test_pkl_lz4(test_data_paths):
(path,) = test_data_paths
with open(path, "rb") as f:
data = json_load(f)
# @pytest.mark.s3(EtlTestDataPath.FULL_JSON) Uncomment this when archived
# def test_pkl_lz4(test_data_paths):
# (path,) = test_data_paths
# with open(path, "rb") as f:
# data = json_load(f)

buffer = BytesIO()
pkl_dump_lz4(fp=buffer, obj=data)
buffer.seek(0)
assert pkl_load_lz4(fp=buffer) == data
# buffer = BytesIO()
# pkl_dump_lz4(fp=buffer, obj=data)
# buffer.seek(0)
# assert pkl_load_lz4(fp=buffer) == data


@pytest.mark.s3(EtlTestDataPath.FULL_JSON)
def test_pkl_lz4_bytes(test_data_paths):
(path,) = test_data_paths
with open(path, "rb") as f:
data = json_load(f)
# @pytest.mark.s3(EtlTestDataPath.FULL_JSON) Uncomment this when archived
# def test_pkl_lz4_bytes(test_data_paths):
# (path,) = test_data_paths
# with open(path, "rb") as f:
# data = json_load(f)

data_as_bytes = pkl_dumps_lz4(obj=data)
assert pkl_loads_lz4(data=data_as_bytes) == data
# data_as_bytes = pkl_dumps_lz4(obj=data)
# assert pkl_loads_lz4(data=data_as_bytes) == data
100 changes: 50 additions & 50 deletions src/layers/sds/domain/tests/test_sds_bulk_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,56 +17,56 @@
BULK_FILTER_SKIPS = [64320]


@pytest.mark.s3(EtlTestDataPath.MINI_LDIF)
def test_bulk_data_is_valid_sds_mini(test_data_paths):
(ldif_path,) = test_data_paths

unprocessed_records = deque(parse_ldif(file_opener=open, path_or_data=ldif_path))
processed_records = []
while unprocessed_records:
distinguished_name, record = unprocessed_records[0]
try:
sds_record = parse_sds_record(
distinguished_name=distinguished_name, record=record
)
processed_records.append(type(sds_record))
except Exception as exception:
processed_records.append(exception)
else:
unprocessed_records.popleft()

counts = Counter(processed_records)
assert counts[NhsMhs] == 1655
assert counts[NhsAccreditedSystem] == 252


@memory_intensive
@pytest.mark.s3(EtlTestDataPath.FULL_LDIF)
def test_bulk_data_is_valid_sds_full(test_data_paths):
(ldif_path,) = test_data_paths

unprocessed_records = deque(parse_ldif(file_opener=open, path_or_data=ldif_path))

index = 0
processed_records = []
while unprocessed_records:
distinguished_name, record = unprocessed_records[0]
try:
if index not in BULK_SKIPS:
sds_record = parse_sds_record(
distinguished_name=distinguished_name, record=record
)
processed_records.append(type(sds_record))
except Exception as exception:
processed_records.append(exception)
else:
unprocessed_records.popleft()
index += 1

assert Counter(processed_records) == {
NhsMhs: 154506,
NhsAccreditedSystem: 5631,
}
# @pytest.mark.s3(EtlTestDataPath.MINI_LDIF) Uncomment this when archived
# def test_bulk_data_is_valid_sds_mini(test_data_paths):
# (ldif_path,) = test_data_paths

# unprocessed_records = deque(parse_ldif(file_opener=open, path_or_data=ldif_path))
# processed_records = []
# while unprocessed_records:
# distinguished_name, record = unprocessed_records[0]
# try:
# sds_record = parse_sds_record(
# distinguished_name=distinguished_name, record=record
# )
# processed_records.append(type(sds_record))
# except Exception as exception:
# processed_records.append(exception)
# else:
# unprocessed_records.popleft()

# counts = Counter(processed_records)
# assert counts[NhsMhs] == 1655
# assert counts[NhsAccreditedSystem] == 252


# @memory_intensive
# @pytest.mark.s3(EtlTestDataPath.FULL_LDIF) Uncomment this when archived
# def test_bulk_data_is_valid_sds_full(test_data_paths):
# (ldif_path,) = test_data_paths

# unprocessed_records = deque(parse_ldif(file_opener=open, path_or_data=ldif_path))

# index = 0
# processed_records = []
# while unprocessed_records:
# distinguished_name, record = unprocessed_records[0]
# try:
# if index not in BULK_SKIPS:
# sds_record = parse_sds_record(
# distinguished_name=distinguished_name, record=record
# )
# processed_records.append(type(sds_record))
# except Exception as exception:
# processed_records.append(exception)
# else:
# unprocessed_records.popleft()
# index += 1

# assert Counter(processed_records) == {
# NhsMhs: 154506,
# NhsAccreditedSystem: 5631,
# }


@pytest.mark.integration
Expand Down
Loading
Loading