Skip to content

Commit 306721e

Browse files
committed
Format files.
1 parent 8f110ec commit 306721e

File tree

39 files changed

+237
-667
lines changed

39 files changed

+237
-667
lines changed

apps/bfd-model-idr/claims_generator.py

Lines changed: 56 additions & 157 deletions
Large diffs are not rendered by default.

apps/bfd-model-idr/compile_resources.py

Lines changed: 10 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -39,12 +39,9 @@ def get_referenced_maps(compiled_map_path):
3939

4040
def get_structure_definitions():
4141
try:
42-
source_dir = (
43-
Path(__file__).parent.absolute() / "StructureDefinitions" / "Source"
44-
)
42+
source_dir = Path(__file__).parent.absolute() / "StructureDefinitions" / "Source"
4543
structure_defs = [
46-
f"StructureDefinitions/Source/{file.name}"
47-
for file in source_dir.glob("*.json")
44+
f"StructureDefinitions/Source/{file.name}" for file in source_dir.glob("*.json")
4845
]
4946
return " ".join([f"-ig {def_file}" for def_file in structure_defs])
5047
except Exception as e:
@@ -54,12 +51,9 @@ def get_structure_definitions():
5451

5552
def get_sushi_resources():
5653
try:
57-
sushi_dir = (
58-
Path(__file__).parent.absolute() / "sushi" / "fsh-generated" / "resources"
59-
)
54+
sushi_dir = Path(__file__).parent.absolute() / "sushi" / "fsh-generated" / "resources"
6055
sushi_resources = [
61-
f"sushi/fsh-generated/resources/{file.name}"
62-
for file in sushi_dir.glob("*.json")
56+
f"sushi/fsh-generated/resources/{file.name}" for file in sushi_dir.glob("*.json")
6357
]
6458
return " ".join([f"-ig {resource}" for resource in sushi_resources])
6559
except Exception as e:
@@ -75,10 +69,8 @@ def run_conformance_test(input_file, output_file):
7569
sushi_resources = get_sushi_resources()
7670

7771
print("Running conformance testing")
78-
test_cmd = (
79-
f"java -jar validator_cli.jar {input_file} -output {output_file} -version 4.0.1 \
72+
test_cmd = f"java -jar validator_cli.jar {input_file} -output {output_file} -version 4.0.1 \
8073
{structure_defs} -ig hl7.fhir.us.carin-bb#2.1.0 {sushi_resources}"
81-
)
8274
stdout, stderr = run_command(test_cmd, cwd=script_dir)
8375

8476
print("Conformance test output:")
@@ -100,9 +92,7 @@ def run_conformance_test(input_file, output_file):
10092

10193
def main():
10294
# Parse args instead of just putting everything in separate READMEs.
103-
parser = argparse.ArgumentParser(
104-
description="Compile and execute FHIR structure maps."
105-
)
95+
parser = argparse.ArgumentParser(description="Compile and execute FHIR structure maps.")
10696
parser.add_argument(
10797
"--map", "-m", type=str, help="Path to the structure map file", required=True
10898
)
@@ -148,8 +138,10 @@ def main():
148138
# Compile FML files.
149139
print("Compiling FML ")
150140
compiled_map_path = f"StructureMaps/BFD-{Path(args.map).stem}-StructureMap.json"
151-
compile_cmd = f"java -jar validator_cli.jar -version 4.0.1 -ig {args.map} -compile {args.resource} \
141+
compile_cmd = (
142+
f"java -jar validator_cli.jar -version 4.0.1 -ig {args.map} -compile {args.resource} \
152143
-output {compiled_map_path}"
144+
)
153145
print("Input compilation command was:" + compile_cmd)
154146
stdout, stderr = run_command(compile_cmd, cwd=script_dir)
155147
print("Compilation output:")
@@ -163,11 +155,9 @@ def main():
163155
map_imports = " ".join([f"-ig {map_file}" for map_file in referenced_maps])
164156

165157
print("Executing Transform")
166-
execute_cmd = (
167-
f"java -jar validator_cli.jar {args.input} -output {args.output} -transform \
158+
execute_cmd = f"java -jar validator_cli.jar {args.input} -output {args.output} -transform \
168159
{args.resource} -version 4.0.1 -ig {compiled_map_path} {structure_defs} \
169160
-ig hl7.fhir.us.carin-bb#2.1.0 {map_imports} {sushi_resources}"
170-
)
171161
stdout, stderr = run_command(execute_cmd, cwd=script_dir)
172162

173163
print("Execution output:")

apps/bfd-model-idr/dd_helper_script.py

Lines changed: 3 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -37,9 +37,7 @@
3737
usecols=["Target Table", "Target Column"],
3838
)
3939
for _, row in df.iterrows():
40-
element_concatenated = (
41-
f"{row['Target Table'].strip()}.{row['Target Column'].strip()}"
42-
)
40+
element_concatenated = f"{row['Target Table'].strip()}.{row['Target Column'].strip()}"
4341
# there are newlines in some, we should ask IDR to consider changing the DD structure?
4442
if (
4543
row["Target Table"] != "-"
@@ -48,12 +46,9 @@
4846
):
4947
if (
5048
element_concatenated in applies_to
51-
and translations[cur_profile]
52-
not in applies_to[element_concatenated]["profiles"]
49+
and translations[cur_profile] not in applies_to[element_concatenated]["profiles"]
5350
):
54-
applies_to[element_concatenated]["profiles"].append(
55-
translations[cur_profile]
56-
)
51+
applies_to[element_concatenated]["profiles"].append(translations[cur_profile])
5752
else:
5853
applies_to[element_concatenated] = {
5954
"profiles": [translations[cur_profile]],

apps/bfd-model-idr/gen_dd.py

Lines changed: 16 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -59,23 +59,19 @@
5959
test_resource = json.load(file)
6060
for element in test_resource["differential"]["element"]:
6161
structure_def_names_descriptions[element["id"]] = {}
62-
structure_def_names_descriptions[element["id"]]["name"] = element[
63-
"label"
64-
]
62+
structure_def_names_descriptions[element["id"]]["name"] = element["label"]
6563
if "definition" in element:
66-
structure_def_names_descriptions[element["id"]]["definition"] = (
67-
element["definition"]
68-
)
64+
structure_def_names_descriptions[element["id"]]["definition"] = element[
65+
"definition"
66+
]
6967

7068
for walk_info in os.walk(idr_ref_folder):
7169
files = list(filter(lambda file: ".csv" in file, walk_info[2]))
7270
for file_name in files:
7371
idr_table_descriptors[file_name[0 : len(file_name) - 4]] = {}
7472
df = pd.read_csv(idr_ref_folder + "/" + str(file_name))
7573
for _, row in df.iterrows():
76-
idr_table_descriptors[file_name[0 : len(file_name) - 4]][row["name"]] = row[
77-
"comment"
78-
]
74+
idr_table_descriptors[file_name[0 : len(file_name) - 4]][row["name"]] = row["comment"]
7975

8076
coverage_parts = ["PartA", "PartB", "PartC", "PartD", "DUAL"]
8177
claim_profiles = [
@@ -115,9 +111,7 @@
115111
[
116112
"node",
117113
"eval_fhirpath.js",
118-
json.dumps(
119-
sample_resources_by_profile[entry["appliesTo"][0]]
120-
),
114+
json.dumps(sample_resources_by_profile[entry["appliesTo"][0]]),
121115
entry["fhirPath"],
122116
],
123117
check=True,
@@ -130,23 +124,17 @@
130124
entry["example"] = entry["example"][0]
131125
else:
132126
entry["example"] = ""
133-
if (
134-
"sourceView" in entry
135-
and entry["sourceView"] in idr_table_descriptors
136-
):
137-
entry["Description"] = idr_table_descriptors[
138-
entry["sourceView"]
139-
][entry["sourceColumn"]]
127+
if "sourceView" in entry and entry["sourceView"] in idr_table_descriptors:
128+
entry["Description"] = idr_table_descriptors[entry["sourceView"]][
129+
entry["sourceColumn"]
130+
]
140131

141132
# Populate the element names + missing descriptions
142133
if entry["inputPath"] in structure_def_names_descriptions:
143-
entry["Field Name"] = structure_def_names_descriptions[
144-
entry["inputPath"]
145-
]["name"]
146-
if (
147-
"definition"
148-
in structure_def_names_descriptions[entry["inputPath"]]
149-
):
134+
entry["Field Name"] = structure_def_names_descriptions[entry["inputPath"]][
135+
"name"
136+
]
137+
if "definition" in structure_def_names_descriptions[entry["inputPath"]]:
150138
entry["Description"] = structure_def_names_descriptions[
151139
entry["inputPath"]
152140
]["definition"]
@@ -162,9 +150,7 @@
162150
def replace_str(input_str):
163151
# Yes, the below is intentional.
164152
if input_str == input_str and len(str(input_str)) > 0:
165-
return "https://bluebutton.cms.gov/fhir/CodeSystem/" + str(input_str).replace(
166-
"_", "-"
167-
)
153+
return "https://bluebutton.cms.gov/fhir/CodeSystem/" + str(input_str).replace("_", "-")
168154
return ""
169155

170156

@@ -213,9 +199,7 @@ def replace_str(input_str):
213199

214200
workbook = writer.book
215201
worksheet = writer.sheets["Data Dictionary"]
216-
header_format = workbook.add_format(
217-
{"bold": True, "bg_color": "#DCE6F2", "border": 1}
218-
)
202+
header_format = workbook.add_format({"bold": True, "bg_color": "#DCE6F2", "border": 1})
219203
text_format = workbook.add_format({"border": 1})
220204

221205
worksheet.write(0, 0, "Row", header_format)

apps/bfd-model-idr/generator_util.py

Lines changed: 5 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -119,9 +119,7 @@ def gen_bene_sk(self):
119119
return bene_sk
120120

121121
def generate_bene_xref(self, new_bene_sk, old_bene_sk):
122-
bene_hicn_num = str(random.randint(1000, 100000000)) + random.choice(
123-
string.ascii_letters
124-
)
122+
bene_hicn_num = str(random.randint(1000, 100000000)) + random.choice(string.ascii_letters)
125123

126124
# 10% chance for invalid xref.
127125
kill_cred_cd = 1 if random.randint(1, 10) == 1 else 2
@@ -248,9 +246,7 @@ def handle_mbis(self, patient, num_mbis, custom_first_mbi=None):
248246
patient["BENE_MBI_ID"] = current_mbi
249247

250248
def generate_coverages(self, patient):
251-
parts = random.choices(
252-
[["A"], ["B"], ["A", "B"], []], weights=[0.2, 0.2, 0.5, 0.1]
253-
)[0]
249+
parts = random.choices([["A"], ["B"], ["A", "B"], []], weights=[0.2, 0.2, 0.5, 0.1])[0]
254250
include_tp = random.random() > 0.2
255251
expired = random.random() < 0.2
256252
future = random.random() < 0.2
@@ -314,12 +310,9 @@ def _generate_coverages(self, patient, coverage_parts, include_tp, expired, futu
314310
"BENE_SK": patient["BENE_SK"],
315311
"IDR_LTST_TRANS_FLG": "Y",
316312
"BENE_MDCR_ENTLMT_TYPE_CD": coverage_type,
317-
"BENE_MDCR_ENRLMT_RSN_CD": random.choice(
318-
self.code_systems["BENE_ENRLMT_RSN_CD"]
319-
),
313+
"BENE_MDCR_ENRLMT_RSN_CD": random.choice(self.code_systems["BENE_ENRLMT_RSN_CD"]),
320314
"BENE_MDCR_ENTLMT_STUS_CD": "Y",
321-
"IDR_TRANS_EFCTV_TS": str(medicare_start_date)
322-
+ "T00:00:00.000000+0000",
315+
"IDR_TRANS_EFCTV_TS": str(medicare_start_date) + "T00:00:00.000000+0000",
323316
"IDR_INSRT_TS": str(medicare_start_date) + "T00:00:00.000000+0000",
324317
"IDR_UPDT_TS": str(medicare_start_date) + "T00:00:00.000000+0000",
325318
"IDR_TRANS_OBSLT_TS": "9999-12-31T00:00:00.000000+0000",
@@ -333,8 +326,7 @@ def _generate_coverages(self, patient, coverage_parts, include_tp, expired, futu
333326
"BENE_SK": patient["BENE_SK"],
334327
"IDR_LTST_TRANS_FLG": "Y",
335328
"BENE_TP_TYPE_CD": coverage_type,
336-
"IDR_TRANS_EFCTV_TS": str(medicare_start_date)
337-
+ "T00:00:00.000000+0000",
329+
"IDR_TRANS_EFCTV_TS": str(medicare_start_date) + "T00:00:00.000000+0000",
338330
"IDR_INSRT_TS": str(medicare_start_date) + "T00:00:00.000000+0000",
339331
"IDR_UPDT_TS": str(medicare_start_date) + "T00:00:00.000000+0000",
340332
"IDR_TRANS_OBSLT_TS": "9999-12-31T00:00:00.000000+0000",

apps/bfd-model-idr/patient_generator.py

Lines changed: 6 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -13,9 +13,7 @@
1313

1414
# Command line argument parsing
1515
parser = argparse.ArgumentParser(description="Generate synthetic patient data")
16-
parser.add_argument(
17-
"--benes", type=str, help="Path to CSV file containing beneficiary data"
18-
)
16+
parser.add_argument("--benes", type=str, help="Path to CSV file containing beneficiary data")
1917
parser.add_argument(
2018
"--claims",
2119
action="store_true",
@@ -48,9 +46,7 @@
4846
csv_data = None
4947
if args.benes:
5048
try:
51-
csv_data = pd.read_csv(
52-
args.benes, dtype={"BENE_SEX_CD": "Int64", "BENE_RACE_CD": "Int64"}
53-
)
49+
csv_data = pd.read_csv(args.benes, dtype={"BENE_SEX_CD": "Int64", "BENE_RACE_CD": "Int64"})
5450
print(f"Loaded {len(csv_data)} rows from CSV file: {args.benes}")
5551

5652
patients_to_generate = len(csv_data)
@@ -100,9 +96,7 @@
10096
if pd.notna(row.get("BENE_VRFY_DEATH_DAY_SW")):
10197
patient["BENE_VRFY_DEATH_DAY_SW"] = str(row["BENE_VRFY_DEATH_DAY_SW"])
10298
else:
103-
patient["BENE_VRFY_DEATH_DAY_SW"] = (
104-
"Y" if random.randint(0, 1) == 1 else "N"
105-
)
99+
patient["BENE_VRFY_DEATH_DAY_SW"] = "Y" if random.randint(0, 1) == 1 else "N"
106100
else:
107101
if random.randint(0, 10) < 2:
108102
# death!
@@ -187,9 +181,7 @@
187181
patient["BENE_VRFY_DEATH_DAY_SW"] = "~"
188182

189183
patient["BENE_SEX_CD"] = str(random.randint(1, 2))
190-
patient["BENE_RACE_CD"] = random.choice(
191-
["~", "0", "1", "2", "3", "4", "5", "6", "7", "8"]
192-
)
184+
patient["BENE_RACE_CD"] = random.choice(["~", "0", "1", "2", "3", "4", "5", "6", "7", "8"])
193185

194186
pt_bene_sk = generator.gen_bene_sk()
195187
patient["BENE_SK"] = str(pt_bene_sk)
@@ -227,14 +219,10 @@
227219

228220
generator.generate_bene_xref(pt_bene_sk, old_bene_sk)
229221

230-
generator.set_timestamps(
231-
prior_patient, datetime.date(year=2017, month=5, day=20)
232-
)
222+
generator.set_timestamps(prior_patient, datetime.date(year=2017, month=5, day=20))
233223

234224
# Override the obsolete timestamp to be in the past year instead of future
235-
past_year_date = datetime.date.today() - datetime.timedelta(
236-
days=random.randint(30, 365)
237-
)
225+
past_year_date = datetime.date.today() - datetime.timedelta(days=random.randint(30, 365))
238226
prior_patient["IDR_TRANS_OBSLT_TS"] = f"{past_year_date}T00:00:00.000000+0000"
239227

240228
generator.bene_hstry_table.append(prior_patient)

apps/bfd-model/bfd-model-rif/src/main/resources/db/scripts/BFD-1700-remove-synthetic/make_sql.py

Lines changed: 5 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -125,9 +125,7 @@
125125
# Helpers
126126

127127

128-
def make_bene_id_pattern(
129-
bene_ids: list[str], pattern: str, cast_id: bool = False
130-
) -> str:
128+
def make_bene_id_pattern(bene_ids: list[str], pattern: str, cast_id: bool = False) -> str:
131129
"""Make the WHERE clause to handle bene_ids that are in the list or fit the pattern."""
132130
output = " bene_id IN ('" + "', '".join(bene_ids) + "')"
133131
if pattern is not None:
@@ -150,9 +148,7 @@ def make_claims_sql(
150148
if is_count:
151149
output += f"SELECT COUNT(lines.*) AS {claim_lines_table}\n"
152150
output += f"FROM {claim_lines_table} AS lines\n"
153-
output += (
154-
f"LEFT JOIN {claims_table} AS claims ON (claims.clm_id = lines.clm_id)\n"
155-
)
151+
output += f"LEFT JOIN {claims_table} AS claims ON (claims.clm_id = lines.clm_id)\n"
156152
output += f"WHERE\n{bene_id_clause};\n\n\n"
157153
else:
158154
output += f"DELETE FROM {claim_lines_table} AS lines\n"
@@ -212,21 +208,13 @@ def main(args: list):
212208
sys.exit()
213209

214210
bene_id_pattern = make_bene_id_pattern(bene_ids["ids"], bene_ids["pattern"], False)
215-
new_bene_id_pattern = make_bene_id_pattern(
216-
bene_ids["ids"], bene_ids["pattern"], True
217-
)
211+
new_bene_id_pattern = make_bene_id_pattern(bene_ids["ids"], bene_ids["pattern"], True)
218212

219213
for claims_table, claim_lines_table in CLAIMS_TABLES:
220-
print(
221-
make_claims_sql(bene_id_pattern, claims_table, claim_lines_table, is_count)
222-
)
214+
print(make_claims_sql(bene_id_pattern, claims_table, claim_lines_table, is_count))
223215

224216
for claims_table, claim_lines_table in NEW_CLAIMS_TABLES:
225-
print(
226-
make_claims_sql(
227-
new_bene_id_pattern, claims_table, claim_lines_table, is_count
228-
)
229-
)
217+
print(make_claims_sql(new_bene_id_pattern, claims_table, claim_lines_table, is_count))
230218

231219
for base_table in BASE_TABLES:
232220
print(make_base_sql(bene_id_pattern, base_table, is_count))

0 commit comments

Comments
 (0)