Skip to content

Commit cb9e6a0

Browse files
committed
Merge branch 'feature/PI-794-disable_epr_s3_integratioon_tests' into release/2025-02-24
2 parents 51643ba + df91901 commit cb9e6a0

File tree

5 files changed

+177
-180
lines changed

5 files changed

+177
-180
lines changed

.github/workflows/pull-requests.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -222,7 +222,7 @@ jobs:
222222
runs-on: [self-hosted, ci]
223223
strategy:
224224
matrix:
225-
test-type: [integration, s3]
225+
test-type: [integration]
226226
steps:
227227
- uses: actions/checkout@v4
228228
with:

scripts/test/test.mk

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,8 +24,8 @@ test--integration: aws--login ## Run integration (pytest) tests
2424
test--slow: ## Run slow (pytest) tests
2525
$(MAKE) _pytest _INTERNAL_FLAGS="-m 'slow'" _CACHE_CLEAR=$(_CACHE_CLEAR)
2626

27-
test--s3: aws--login ## Run (pytest) tests that require s3 downloads
28-
$(MAKE) _pytest _INTERNAL_FLAGS="-m 's3' $(_INTERNAL_FLAGS)" _CACHE_CLEAR=$(_CACHE_CLEAR) AWS_ACCESS_KEY_ID=$(AWS_ACCESS_KEY_ID) AWS_SECRET_ACCESS_KEY=$(AWS_SECRET_ACCESS_KEY) AWS_SESSION_TOKEN=$(AWS_SESSION_TOKEN)
27+
# test--s3: aws--login ## Run (pytest) tests that require s3 downloads
28+
# $(MAKE) _pytest _INTERNAL_FLAGS="-m 's3' $(_INTERNAL_FLAGS)" _CACHE_CLEAR=$(_CACHE_CLEAR) AWS_ACCESS_KEY_ID=$(AWS_ACCESS_KEY_ID) AWS_SECRET_ACCESS_KEY=$(AWS_SECRET_ACCESS_KEY) AWS_SESSION_TOKEN=$(AWS_SESSION_TOKEN)
2929

3030
test--smoke: aws--login ## Run end-to-end smoke tests (pytest)
3131
AWS_DEFAULT_REGION=$(AWS_DEFAULT_REGION) AWS_ACCESS_KEY_ID=$(AWS_ACCESS_KEY_ID) AWS_SECRET_ACCESS_KEY=$(AWS_SECRET_ACCESS_KEY) AWS_SESSION_TOKEN=$(AWS_SESSION_TOKEN) WORKSPACE=$(WORKSPACE) ACCOUNT=$(ACCOUNT) poetry run python -m pytest $(PYTEST_FLAGS) -m 'smoke' --ignore=src/layers --ignore=src/etl --ignore=archived_epr $(_CACHE_CLEAR)
Lines changed: 22 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -1,30 +1,30 @@
1-
from io import BytesIO
1+
# from io import BytesIO
22

3-
import pytest
4-
from etl_utils.io import pkl_dump_lz4, pkl_dumps_lz4, pkl_load_lz4
5-
from etl_utils.io.test.io_utils import pkl_loads_lz4
6-
from event.json import json_load
3+
# from etl_utils.io import pkl_dump_lz4, pkl_dumps_lz4, pkl_load_lz4
4+
# from etl_utils.io.test.io_utils import pkl_loads_lz4
5+
# from event.json import json_load
6+
# import pytest
77

8-
from etl.sds.tests.constants import EtlTestDataPath
8+
# from etl.sds.tests.constants import EtlTestDataPath
99

1010

11-
@pytest.mark.s3(EtlTestDataPath.FULL_JSON)
12-
def test_pkl_lz4(test_data_paths):
13-
(path,) = test_data_paths
14-
with open(path, "rb") as f:
15-
data = json_load(f)
11+
# @pytest.mark.s3(EtlTestDataPath.FULL_JSON) Uncomment this when archived
12+
# def test_pkl_lz4(test_data_paths):
13+
# (path,) = test_data_paths
14+
# with open(path, "rb") as f:
15+
# data = json_load(f)
1616

17-
buffer = BytesIO()
18-
pkl_dump_lz4(fp=buffer, obj=data)
19-
buffer.seek(0)
20-
assert pkl_load_lz4(fp=buffer) == data
17+
# buffer = BytesIO()
18+
# pkl_dump_lz4(fp=buffer, obj=data)
19+
# buffer.seek(0)
20+
# assert pkl_load_lz4(fp=buffer) == data
2121

2222

23-
@pytest.mark.s3(EtlTestDataPath.FULL_JSON)
24-
def test_pkl_lz4_bytes(test_data_paths):
25-
(path,) = test_data_paths
26-
with open(path, "rb") as f:
27-
data = json_load(f)
23+
# @pytest.mark.s3(EtlTestDataPath.FULL_JSON) Uncomment this when archived
24+
# def test_pkl_lz4_bytes(test_data_paths):
25+
# (path,) = test_data_paths
26+
# with open(path, "rb") as f:
27+
# data = json_load(f)
2828

29-
data_as_bytes = pkl_dumps_lz4(obj=data)
30-
assert pkl_loads_lz4(data=data_as_bytes) == data
29+
# data_as_bytes = pkl_dumps_lz4(obj=data)
30+
# assert pkl_loads_lz4(data=data_as_bytes) == data

src/layers/sds/domain/tests/test_sds_bulk_model.py

Lines changed: 50 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -17,56 +17,56 @@
1717
BULK_FILTER_SKIPS = [64320]
1818

1919

20-
@pytest.mark.s3(EtlTestDataPath.MINI_LDIF)
21-
def test_bulk_data_is_valid_sds_mini(test_data_paths):
22-
(ldif_path,) = test_data_paths
23-
24-
unprocessed_records = deque(parse_ldif(file_opener=open, path_or_data=ldif_path))
25-
processed_records = []
26-
while unprocessed_records:
27-
distinguished_name, record = unprocessed_records[0]
28-
try:
29-
sds_record = parse_sds_record(
30-
distinguished_name=distinguished_name, record=record
31-
)
32-
processed_records.append(type(sds_record))
33-
except Exception as exception:
34-
processed_records.append(exception)
35-
else:
36-
unprocessed_records.popleft()
37-
38-
counts = Counter(processed_records)
39-
assert counts[NhsMhs] == 1655
40-
assert counts[NhsAccreditedSystem] == 252
41-
42-
43-
@memory_intensive
44-
@pytest.mark.s3(EtlTestDataPath.FULL_LDIF)
45-
def test_bulk_data_is_valid_sds_full(test_data_paths):
46-
(ldif_path,) = test_data_paths
47-
48-
unprocessed_records = deque(parse_ldif(file_opener=open, path_or_data=ldif_path))
49-
50-
index = 0
51-
processed_records = []
52-
while unprocessed_records:
53-
distinguished_name, record = unprocessed_records[0]
54-
try:
55-
if index not in BULK_SKIPS:
56-
sds_record = parse_sds_record(
57-
distinguished_name=distinguished_name, record=record
58-
)
59-
processed_records.append(type(sds_record))
60-
except Exception as exception:
61-
processed_records.append(exception)
62-
else:
63-
unprocessed_records.popleft()
64-
index += 1
65-
66-
assert Counter(processed_records) == {
67-
NhsMhs: 154506,
68-
NhsAccreditedSystem: 5631,
69-
}
20+
# @pytest.mark.s3(EtlTestDataPath.MINI_LDIF) Uncomment this when archived
21+
# def test_bulk_data_is_valid_sds_mini(test_data_paths):
22+
# (ldif_path,) = test_data_paths
23+
24+
# unprocessed_records = deque(parse_ldif(file_opener=open, path_or_data=ldif_path))
25+
# processed_records = []
26+
# while unprocessed_records:
27+
# distinguished_name, record = unprocessed_records[0]
28+
# try:
29+
# sds_record = parse_sds_record(
30+
# distinguished_name=distinguished_name, record=record
31+
# )
32+
# processed_records.append(type(sds_record))
33+
# except Exception as exception:
34+
# processed_records.append(exception)
35+
# else:
36+
# unprocessed_records.popleft()
37+
38+
# counts = Counter(processed_records)
39+
# assert counts[NhsMhs] == 1655
40+
# assert counts[NhsAccreditedSystem] == 252
41+
42+
43+
# @memory_intensive
44+
# @pytest.mark.s3(EtlTestDataPath.FULL_LDIF) Uncomment this when archived
45+
# def test_bulk_data_is_valid_sds_full(test_data_paths):
46+
# (ldif_path,) = test_data_paths
47+
48+
# unprocessed_records = deque(parse_ldif(file_opener=open, path_or_data=ldif_path))
49+
50+
# index = 0
51+
# processed_records = []
52+
# while unprocessed_records:
53+
# distinguished_name, record = unprocessed_records[0]
54+
# try:
55+
# if index not in BULK_SKIPS:
56+
# sds_record = parse_sds_record(
57+
# distinguished_name=distinguished_name, record=record
58+
# )
59+
# processed_records.append(type(sds_record))
60+
# except Exception as exception:
61+
# processed_records.append(exception)
62+
# else:
63+
# unprocessed_records.popleft()
64+
# index += 1
65+
66+
# assert Counter(processed_records) == {
67+
# NhsMhs: 154506,
68+
# NhsAccreditedSystem: 5631,
69+
# }
7070

7171

7272
@pytest.mark.integration
Lines changed: 102 additions & 105 deletions
Original file line numberDiff line numberDiff line change
@@ -1,112 +1,109 @@
1-
from io import StringIO
1+
# from io import StringIO
22

3-
import pytest
4-
from etl_utils.ldif.ldif import parse_ldif
5-
from etl_utils.ldif.model import DistinguishedName
6-
from sds.domain.changelog import ChangelogRecord
7-
from sds.domain.parse import parse_sds_record
8-
9-
from etl.sds.tests.constants import EtlTestDataPath
3+
# from etl_utils.ldif.ldif import parse_ldif
4+
# from etl_utils.ldif.model import DistinguishedName
5+
# from sds.domain.changelog import ChangelogRecord
6+
# from sds.domain.parse import parse_sds_record
107

118

129
# all files listed here get downloaded to the paths listed in 'test_data_paths'
13-
@pytest.mark.s3(EtlTestDataPath.CHANGELOG)
14-
def test_changelog_model_against_changelog_data(test_data_paths):
15-
(ldif_path,) = test_data_paths
16-
17-
ldif_lines = parse_ldif(file_opener=open, path_or_data=ldif_path)
18-
19-
# Implicit check that one changelog line is expected
20-
((distinguished_name, record),) = ldif_lines
21-
22-
# Implicit check that it parses
23-
changelog_record = ChangelogRecord(
24-
_distinguished_name=distinguished_name,
25-
**record,
26-
)
27-
28-
# Check that the record has been parsed correctly
29-
assert changelog_record.distinguished_name.change_number == "75852519"
30-
assert changelog_record.distinguished_name.common_name == "changelog"
31-
assert changelog_record.distinguished_name.organisation == "nhs"
32-
33-
assert changelog_record.object_class == "changeLogEntry"
34-
assert (
35-
changelog_record.change_number
36-
== changelog_record.distinguished_name.change_number
37-
)
38-
assert changelog_record.change_time == "20240116173441Z"
39-
assert changelog_record.change_type == "add"
40-
assert changelog_record.target_distinguished_name == DistinguishedName(
41-
parts=(("o", "nhs"), ("ou", "services"), ("uniqueidentifier", "200000042019"))
42-
)
10+
# @pytest.mark.s3(EtlTestDataPath.CHANGELOG) Uncomment this when archived
11+
# def test_changelog_model_against_changelog_data(test_data_paths):
12+
# (ldif_path,) = test_data_paths
13+
14+
# ldif_lines = parse_ldif(file_opener=open, path_or_data=ldif_path)
15+
16+
# # Implicit check that one changelog line is expected
17+
# ((distinguished_name, record),) = ldif_lines
18+
19+
# # Implicit check that it parses
20+
# changelog_record = ChangelogRecord(
21+
# _distinguished_name=distinguished_name,
22+
# **record,
23+
# )
24+
25+
# # Check that the record has been parsed correctly
26+
# assert changelog_record.distinguished_name.change_number == "75852519"
27+
# assert changelog_record.distinguished_name.common_name == "changelog"
28+
# assert changelog_record.distinguished_name.organisation == "nhs"
29+
30+
# assert changelog_record.object_class == "changeLogEntry"
31+
# assert (
32+
# changelog_record.change_number
33+
# == changelog_record.distinguished_name.change_number
34+
# )
35+
# assert changelog_record.change_time == "20240116173441Z"
36+
# assert changelog_record.change_type == "add"
37+
# assert changelog_record.target_distinguished_name == DistinguishedName(
38+
# parts=(("o", "nhs"), ("ou", "services"), ("uniqueidentifier", "200000042019"))
39+
# )
4340

4441

4542
# all files listed here get downloaded to the paths listed in 'test_data_paths'
46-
@pytest.mark.s3(EtlTestDataPath.CHANGELOG)
47-
def test_changelog_changes_are_valid_ldif(test_data_paths):
48-
(ldif_path,) = test_data_paths
49-
50-
ldif_lines = parse_ldif(file_opener=open, path_or_data=ldif_path)
51-
52-
# Implicit check that one changelog line is expected
53-
((distinguished_name, record),) = ldif_lines
54-
55-
# Implicit check that it parses
56-
changelog_record = ChangelogRecord(
57-
_distinguished_name=distinguished_name,
58-
**record,
59-
)
60-
61-
# Check that the change itself is valid LDIF
62-
nested_ldif_lines = list(
63-
parse_ldif(
64-
file_opener=StringIO, path_or_data=changelog_record.changes_as_ldif()
65-
)
66-
)
67-
assert len(nested_ldif_lines) == 1
68-
69-
# Check that the change is a valid SDS record
70-
((nested_distinguished_name, nested_record),) = nested_ldif_lines
71-
sds_record = parse_sds_record(
72-
distinguished_name=nested_distinguished_name, record=nested_record
73-
)
74-
assert sds_record.dict() == {
75-
"change_type": "add",
76-
"description": None,
77-
"nhs_approver_urp": "System",
78-
"nhs_as_acf": None,
79-
"nhs_as_category_bag": None,
80-
"nhs_as_client": {"K81045"},
81-
"nhs_as_svc_ia": {
82-
"urn:nhs:names:services:gpconnect:fhir:operation:gpc.getcarerecord",
83-
"urn:nhs:names:services:gpconnect:fhir:operation:gpc.registerpatient-1",
84-
"urn:nhs:names:services:gpconnect:fhir:rest:cancel:appointment-1",
85-
"urn:nhs:names:services:gpconnect:fhir:rest:create:appointment-1",
86-
"urn:nhs:names:services:gpconnect:fhir:rest:read:appointment-1",
87-
"urn:nhs:names:services:gpconnect:fhir:rest:read:location-1",
88-
"urn:nhs:names:services:gpconnect:fhir:rest:read:metadata",
89-
"urn:nhs:names:services:gpconnect:fhir:rest:read:metadata-1",
90-
"urn:nhs:names:services:gpconnect:fhir:rest:read:organization-1",
91-
"urn:nhs:names:services:gpconnect:fhir:rest:read:patient-1",
92-
"urn:nhs:names:services:gpconnect:fhir:rest:read:practitioner-1",
93-
"urn:nhs:names:services:gpconnect:fhir:rest:search:organization-1",
94-
"urn:nhs:names:services:gpconnect:fhir:rest:search:patient-1",
95-
"urn:nhs:names:services:gpconnect:fhir:rest:search:patient_appointments-1",
96-
"urn:nhs:names:services:gpconnect:fhir:rest:search:practitioner-1",
97-
"urn:nhs:names:services:gpconnect:fhir:rest:search:slot-1",
98-
"urn:nhs:names:services:gpconnect:fhir:rest:update:appointment-1",
99-
},
100-
"nhs_date_approved": "20240116173441",
101-
"nhs_date_requested": "20240116173439",
102-
"nhs_id_code": "K81045",
103-
"nhs_mhs_manufacturer_org": "K81045",
104-
"nhs_mhs_party_key": "R3U6M-831547",
105-
"nhs_product_key": "6255",
106-
"nhs_product_name": "Continuum Health GPC",
107-
"nhs_product_version": "Consumer AS",
108-
"nhs_requestor_urp": "uniqueidentifier=865945089569,uniqueidentifier=065150856568,uid=798965609042,ou=people, o=nhs",
109-
"nhs_temp_uid": None,
110-
"object_class": "nhsas",
111-
"unique_identifier": "200000042019",
112-
}
43+
# @pytest.mark.s3(EtlTestDataPath.CHANGELOG) Uncomment this when archived
44+
# def test_changelog_changes_are_valid_ldif(test_data_paths):
45+
# (ldif_path,) = test_data_paths
46+
47+
# ldif_lines = parse_ldif(file_opener=open, path_or_data=ldif_path)
48+
49+
# # Implicit check that one changelog line is expected
50+
# ((distinguished_name, record),) = ldif_lines
51+
52+
# # Implicit check that it parses
53+
# changelog_record = ChangelogRecord(
54+
# _distinguished_name=distinguished_name,
55+
# **record,
56+
# )
57+
58+
# # Check that the change itself is valid LDIF
59+
# nested_ldif_lines = list(
60+
# parse_ldif(
61+
# file_opener=StringIO, path_or_data=changelog_record.changes_as_ldif()
62+
# )
63+
# )
64+
# assert len(nested_ldif_lines) == 1
65+
66+
# # Check that the change is a valid SDS record
67+
# ((nested_distinguished_name, nested_record),) = nested_ldif_lines
68+
# sds_record = parse_sds_record(
69+
# distinguished_name=nested_distinguished_name, record=nested_record
70+
# )
71+
# assert sds_record.dict() == {
72+
# "change_type": "add",
73+
# "description": None,
74+
# "nhs_approver_urp": "System",
75+
# "nhs_as_acf": None,
76+
# "nhs_as_category_bag": None,
77+
# "nhs_as_client": {"K81045"},
78+
# "nhs_as_svc_ia": {
79+
# "urn:nhs:names:services:gpconnect:fhir:operation:gpc.getcarerecord",
80+
# "urn:nhs:names:services:gpconnect:fhir:operation:gpc.registerpatient-1",
81+
# "urn:nhs:names:services:gpconnect:fhir:rest:cancel:appointment-1",
82+
# "urn:nhs:names:services:gpconnect:fhir:rest:create:appointment-1",
83+
# "urn:nhs:names:services:gpconnect:fhir:rest:read:appointment-1",
84+
# "urn:nhs:names:services:gpconnect:fhir:rest:read:location-1",
85+
# "urn:nhs:names:services:gpconnect:fhir:rest:read:metadata",
86+
# "urn:nhs:names:services:gpconnect:fhir:rest:read:metadata-1",
87+
# "urn:nhs:names:services:gpconnect:fhir:rest:read:organization-1",
88+
# "urn:nhs:names:services:gpconnect:fhir:rest:read:patient-1",
89+
# "urn:nhs:names:services:gpconnect:fhir:rest:read:practitioner-1",
90+
# "urn:nhs:names:services:gpconnect:fhir:rest:search:organization-1",
91+
# "urn:nhs:names:services:gpconnect:fhir:rest:search:patient-1",
92+
# "urn:nhs:names:services:gpconnect:fhir:rest:search:patient_appointments-1",
93+
# "urn:nhs:names:services:gpconnect:fhir:rest:search:practitioner-1",
94+
# "urn:nhs:names:services:gpconnect:fhir:rest:search:slot-1",
95+
# "urn:nhs:names:services:gpconnect:fhir:rest:update:appointment-1",
96+
# },
97+
# "nhs_date_approved": "20240116173441",
98+
# "nhs_date_requested": "20240116173439",
99+
# "nhs_id_code": "K81045",
100+
# "nhs_mhs_manufacturer_org": "K81045",
101+
# "nhs_mhs_party_key": "R3U6M-831547",
102+
# "nhs_product_key": "6255",
103+
# "nhs_product_name": "Continuum Health GPC",
104+
# "nhs_product_version": "Consumer AS",
105+
# "nhs_requestor_urp": "uniqueidentifier=865945089569,uniqueidentifier=065150856568,uid=798965609042,ou=people, o=nhs",
106+
# "nhs_temp_uid": None,
107+
# "object_class": "nhsas",
108+
# "unique_identifier": "200000042019",
109+
# }

0 commit comments

Comments
 (0)