-
Notifications
You must be signed in to change notification settings - Fork 17
Expand file tree
/
Copy pathcost_estimation.py
More file actions
2427 lines (2062 loc) · 125 KB
/
cost_estimation.py
File metadata and controls
2427 lines (2062 loc) · 125 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
import ast
import itertools
import math
import re
import textwrap
from collections import defaultdict
from itertools import cycle
from pathlib import Path
from typing import Literal, Optional, Union
import matplotlib.container as mpc
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import squarify
from tlo import Date
from tlo.analysis.utils import (
extract_results,
get_scenario_info,
load_pickled_dataframes,
unflatten_flattened_multi_index_in_logging,
)
# %%
# Define a helper function to load necessary unit cost data
def load_unit_cost_assumptions(resourcefilepath: Path) -> dict[str, dict]:
"""
Load and parse all cost-related input files from CSV format.
Parameters
----------
resourcefilepath : Path
Base path to the root of the resource files directory.
Returns
-------
cost_inputs : dict of pd.DataFrame
Dictionary of cost input DataFrames: HR, consumables, equipment, facility operations.
"""
# Load cost input CSVs
cost_inputs = {
'hr': pd.read_csv(resourcefilepath / "costing" / "ResourceFile_Costing_HR.csv"),
'consumables': pd.read_csv(resourcefilepath / "costing" / "ResourceFile_Costing_Consumables.csv"),
'equipment': pd.read_csv(resourcefilepath / "costing" / "ResourceFile_Costing_Equipment.csv"),
'facility_operations': pd.read_csv(
resourcefilepath / "costing" / "ResourceFile_Costing_Facility_Operations.csv"),
'actual_expenditure_data': pd.read_csv(resourcefilepath / "costing" / "ResourceFile_Resource_Mapping.csv"),
'health_spending_projections': pd.read_csv(
resourcefilepath / "costing" / "ResourceFile_Health_Spending_Projections.csv")
}
# Clean unit cost data
# Convert facility level to string for consistency across values (HR)
cost_inputs['hr']['Facility_Level'] = cost_inputs['hr']['Facility_Level'].astype(str)
# Keep only necessary columns and rows
cost_inputs['consumables'] = cost_inputs['consumables'][['Item_Code', 'Price_per_unit']].reset_index(drop=True)
cost_inputs['consumables'] = cost_inputs['consumables'][cost_inputs['consumables']['Item_Code'].notna()]
cost_inputs['equipment'] = cost_inputs['equipment'][cost_inputs['equipment']['Item_code'].notna()]
cost_inputs['actual_expenditure_data'] = cost_inputs['actual_expenditure_data'][
(cost_inputs['actual_expenditure_data']['Cost Type'].notna()) &
(cost_inputs['actual_expenditure_data']['Cost Type'] != "Grand Total")]
first_nan_index_facility_operations = cost_inputs['facility_operations'][
cost_inputs['facility_operations']['Facility_Level'].isna()].index.min()
if pd.notna(first_nan_index_facility_operations):
cost_inputs['facility_operations'] = cost_inputs['facility_operations'].loc[
:first_nan_index_facility_operations - 1]
cost_inputs['health_spending_projections'].columns = cost_inputs['health_spending_projections'].iloc[1]
cost_inputs['health_spending_projections'] = cost_inputs['health_spending_projections'].iloc[2:].reset_index(
drop=True) # Assign the fourth row as column names
first_nan_index_health_spending = cost_inputs['health_spending_projections'][
cost_inputs['health_spending_projections']['year'].isna()].index.min()
if pd.notna(first_nan_index_health_spending):
cost_inputs['health_spending_projections'] = cost_inputs['health_spending_projections'].loc[
:first_nan_index_health_spending - 1]
cost_inputs['health_spending_projections']['year'] = cost_inputs['health_spending_projections']['year'].astype(int)
return cost_inputs
# Define a helper function to load necessary simulation metadata
def load_simulation_metadata(results_folder: Path) -> tuple[dict, list[int], tuple[Date, Date]]:
"""
Load simulation scenario metadata and derive key parameters.
Parameters
----------
results_folder : Path
Path to the folder containing TLO model simulation results.
Returns
-------
info : dict
Scenario metadata including number of draws and runs per draw.
years : list of int
Full list of simulation years.
target_period : tuple of Date
Simulation date range from first to last year as TLO Date objects.
"""
# Load a sample log to derive time bounds
log = load_pickled_dataframes(results_folder, 0, 0)
dates = log['tlo.methods.healthsystem.summary']['hsi_event_counts']['date']
first_year = min(dates).year
last_year = max(dates).year
years = list(range(first_year, last_year + 1))
target_period = (Date(first_year, 1, 1), Date(last_year, 12, 31))
# Get simulation info (number of draws, runs, etc.)
info = get_scenario_info(results_folder)
return info, years, target_period
# Define a function to discount and summarise costs by cost_category
def apply_discounting_to_cost_data(_df: pd.DataFrame,
_discount_rate: Union[float, dict[int, float]] = 0,
_initial_year: Optional[int] = None,
_column_for_discounting: str = 'cost') -> pd.DataFrame:
"""
Apply discounting to the specified column over time, using a fixed or year-specific rate.
Parameters:
----------
_df : pd.DataFrame
DataFrame containing a 'year' column and a cost column to be discounted.
_discount_rate : float or dict of {year: float}, default 0
Discount rate to apply. Can be:
- A single fixed rate (e.g., 0.03 for 3%)
- A dictionary of year-specific rates {2025: 0.03, 2026: 0.035, ...}
_initial_year : int, optional
Reference year for discounting. If None, uses the earliest year in the DataFrame.
_column_for_discounting : str, default 'cost'
Name of the column to apply discounting to.
Returns:
-------
pd.DataFrame
A copy of the input DataFrame with the specified column discounted in-place.
"""
if _initial_year is None:
# Determine the initial year from the dataframe
_initial_year = min(_df['year'].unique())
def get_discount_factor(year):
"""Compute the cumulative discount factor for a given year."""
if isinstance(_discount_rate, dict):
# Compute the cumulative discount factor as the product of (1 + discount_rate) for all previous years
discount_factor = 1
for y in range(_initial_year + 1,
year + 1): # only starting from initial year + 1 as the discount factor for initial year should be 1
discount_factor *= (1 + _discount_rate.get(y, 0)) # Default to 0 if year not in dictionary
return discount_factor
else:
# If a single value is provided, use standard discounting
return (1 + _discount_rate) ** (year - _initial_year)
# Apply discounting to each row
_df.loc[:, _column_for_discounting] = _df[_column_for_discounting] / _df['year'].apply(get_discount_factor)
return _df
# Clean the names of consumables in input cost dataframe
def clean_consumable_name(name: str) -> str:
"""
Clean consumable names for analysis and plotting.
Removes procurement suffixes, packaging metadata,
harmonises spelling, and capitalises the first letter.
"""
if not isinstance(name, str):
return name
cleaned = name
# --- 1. Remove common procurement suffixes ---
cleaned = re.sub(
r'_(CMST|IDA|Each_CMST|each_CMST|each|ID|PFR|nt)(\b|_)',
'',
cleaned,
flags=re.IGNORECASE
)
# --- 2. Remove trailing numeric package indicators ---
cleaned = re.sub(r'_\d+(\.\d+)?$', '', cleaned)
cleaned = re.sub(
r'\b\d+\s*(tests|pieces|doses|pack|packs|box|boxes)\b',
'',
cleaned,
flags=re.IGNORECASE
)
# --- 3. Remove awkward characters ---
cleaned = cleaned.replace('Â', '')
cleaned = cleaned.replace('½', '1/2')
# --- 4. Normalise whitespace ---
cleaned = re.sub(r'\s+', ' ', cleaned).strip()
# --- 5. Harmonise common spelling variants ---
harmonisation = {
'Amoxycillin': 'Amoxicillin',
'Gentamycin': 'Gentamicin',
'Declofenac': 'Diclofenac',
'Frusemide': 'Furosemide',
'Cotrimoxizole': 'Cotrimoxazole',
"ringer's lactate": "Ringer's lactate",
}
for old, new in harmonisation.items():
cleaned = re.sub(rf'\b{old}\b', new, cleaned, flags=re.IGNORECASE)
# --- 6. Canonical renaming for key nutrition / diagnostics items ---
canonical_map = {
'Therapeutic spread, sachet 92g/CAR-150':
'Ready-to-use therapeutic food (RUTF)',
'Therapeutic spread, sachet 92g / CAR-150':
'Ready-to-use therapeutic food (RUTF)',
'VL test':
'Viral load test',
'Dietary supplements (country-specific)':
'Multiple micronutrient powder (MNP) supplement'
}
# Apply canonical renaming (case-insensitive exact match)
for old, new in canonical_map.items():
if cleaned.lower() == old.lower():
cleaned = new
break
# --- 7. Capitalise first letter only (preserve acronyms elsewhere) ---
cleaned = re.sub(r'^.', lambda m: m.group(0).upper(), cleaned)
return cleaned
# Clean the names of equipment in the cost dataframe, Drop irrelevant ones
def clean_equipment_name(name: str, equipment_drop_list = None) -> str:
"""
Clean and standardise medical equipment names for analysis.
Applies light normalisation and explicit renaming only.
"""
if not isinstance(name, str):
return name
cleaned = name
# --- 1. Fix known encoding artefacts ---
cleaned = cleaned.replace('â\x80\x99', '’')
cleaned = cleaned.replace('Â', '')
# --- 2. Normalise slashes and whitespace ---
cleaned = re.sub(r'\s*/\s*', ' / ', cleaned)
cleaned = re.sub(r'\s+', ' ', cleaned).strip()
# --- 3. Explicit canonical renaming (keep minimal) ---
rename_map = {
'Image view station, for conferences':
'Clinical image viewing workstation (PACS / case review)',
'Cusco’s / bivalved Speculum (small, medium, large)':
'Cusco’s / bivalved speculum (small, medium, large)',
'Cuscoâ\x80\x99s/ bivalved Speculum (small, medium, large)':
'Cusco’s / bivalved speculum (small, medium, large)',
}
for old, new in rename_map.items():
if cleaned.lower() == old.lower():
cleaned = new
break
# --- 4. Capitalise first letter only (preserve acronyms) ---
cleaned = re.sub(r'^.', lambda m: m.group(0).upper(), cleaned)
return cleaned
def estimate_input_cost_of_scenarios(results_folder: Path,
resourcefilepath: Path,
_draws: Optional[list[int]] = None,
_runs: Optional[list[int]] = None,
summarize: bool = False,
_metric: Literal['mean', 'median'] = 'mean',
_years: Optional[list[int]] = None,
cost_only_used_staff: bool = True,
_discount_rate: Union[float, dict[int, float]] = 0) -> pd.DataFrame:
"""
Estimate health system input costs for a given simulation.
Parameters:
----------
results_folder : Path
Path to the directory containing simulation output files.
resourcefilepath : Path, optional
Path to the resource files
_draws : list, optional
Specific draws to include in the cost estimation. Defaults to all available draws.
_runs : list, optional
Specific runs to include in the cost estimation. Defaults to all runs.
summarize : bool, default False
Whether to summarize the costs across draws/runs with central metric (specified below) and confidence intervals.
_metric : {'mean', 'median'}, default 'mean'
Summary statistic to use if `summarize=True`.
_years : list of int, optional
Years to include in the cost output. If None, all years are included.
cost_only_used_staff : bool, default True
If True, only costs for level-cadre combinations ever used in simulation are included.
_discount_rate : float or dict of {int: float}, default 0
Discount rate to apply to future costs. Can be a constant or year-specific dictionary.
Returns:
-------
pd.DataFrame
A dataframe containing discounted costs disaggregated by category, sub-category, category-specific subgroup, year, draw, and run.
Note that if a discount rate is used, the dataframe will provide cost as the NPV during the first year of the dataframe
"""
# Useful common functions
def drop_outside_period(_df):
"""Return a dataframe which only includes for which the date is within the limits defined by TARGET_PERIOD"""
return _df.drop(index=_df.index[~_df['date'].between(*TARGET_PERIOD)])
def melt_model_output_draws_and_runs(_df, id_vars):
multi_index = pd.MultiIndex.from_tuples(_df.columns)
_df.columns = multi_index
melted_df = pd.melt(_df, id_vars=id_vars).rename(columns={'variable_0': 'draw', 'variable_1': 'run'})
return melted_df
# Define a relative pathway for relavant folders
path_for_consumable_resourcefiles = resourcefilepath / "healthsystem/consumables"
# %% Gathering basic information
# Load basic simulation parameters
# -------------------------------------
info, years, TARGET_PERIOD = load_simulation_metadata(results_folder)
if _draws is None:
_draws = range(0, info['number_of_draws'])
if _runs is None:
_runs = range(0, info['runs_per_draw'])
# Load cost input files
# ------------------------
# Extract districts and facility levels from the Master Facility List
mfl = pd.read_csv(resourcefilepath / "healthsystem" / "organisation" / "ResourceFile_Master_Facilities_List.csv")
district_dict = pd.read_csv(resourcefilepath / 'demography' / 'ResourceFile_Population_2010.csv')[
['District_Num', 'District']].drop_duplicates()
district_dict = dict(zip(district_dict['District_Num'], district_dict['District']))
facility_id_levels_dict = dict(zip(mfl['Facility_ID'], mfl['Facility_Level']))
fac_levels = set(mfl.Facility_Level)
# If variable discount rate is provided, use the average across the relevant years for the purpose of annuitization of HR and equipment costs
def calculate_annuitization_rate(_discount_rate, _years):
if isinstance(_discount_rate, (int, float)):
# Single discount rate, return as is
return _discount_rate
elif isinstance(_discount_rate, dict):
# Extract rates for the specified years (default to 0 if year is missing)
rates = [_discount_rate.get(year, 0) for year in _years]
return sum(rates) / len(rates) # Average discount rate
else:
raise ValueError("`_discount_rate` must be either a number (single rate) or a dictionary {year: rate}.")
annuitization_rate = calculate_annuitization_rate(_discount_rate, _years)
# Read all cost parameters
# ---------------------------------------
unit_costs = load_unit_cost_assumptions(resourcefilepath)
# Calculate necessary equipment cost components based on HSSP-III assumptions
if _discount_rate == 0:
unit_costs['equipment']['replacement_cost_annual'] = unit_costs['equipment'].apply(
lambda row: row['Unit_Purchase_Cost'] / row['Life_Span'],
axis=1) # straight line depreciation is discount rate is 0
else:
unit_costs['equipment']['replacement_cost_annual'] = unit_costs['equipment'].apply(
lambda row: row['Unit_Purchase_Cost'] / (
1 + (1 - (1 + annuitization_rate) ** (-row['Life_Span'] + 1)) / annuitization_rate),
axis=1) # Annuitised over the life span of the equipment assuming outlay at the beginning of the year
unit_costs['equipment']['service_fee_annual'] = unit_costs['equipment'].apply(
lambda row: row['Unit_Purchase_Cost'] * 0.8 / 8 if row['Unit_Purchase_Cost'] > 1000 else 0,
axis=1) # 80% of the value of the item over 8 years
unit_costs['equipment']['spare_parts_annual'] = unit_costs['equipment'].apply(
lambda row: row['Unit_Purchase_Cost'] * 0.2 / 8 if row['Unit_Purchase_Cost'] > 1000 else 0,
axis=1) # 20% of the value of the item over 8 years
unit_costs['equipment']['major_corrective_maintenance_cost_annual'] = unit_costs['equipment'].apply(
lambda row: row['Unit_Purchase_Cost'] * 0.2 * 0.2 / 8 if row['Unit_Purchase_Cost'] < 250000 else 0,
axis=1) # 20% of the value of 20% of the items over 8 years
# TODO consider discounting the other components
# Quantity needed for each equipment by facility
unit_costs['equipment'] = unit_costs['equipment'][['Item_code', 'Equipment_tlo',
'replacement_cost_annual', 'service_fee_annual',
'spare_parts_annual', 'major_corrective_maintenance_cost_annual',
'Health Post_Prioritised_Quantity',
'Community_Prioritised_Quantity',
'Health Center_Prioritised_Quantity',
'District_Prioritised_Quantity', 'Central_Prioritised_Quantity']]
unit_costs['equipment'] = unit_costs['equipment'].rename(
columns={col: 'Quantity_' + col.replace('_Prioritised_Quantity', '') for col in unit_costs['equipment'].columns
if col.endswith('_Prioritised_Quantity')})
unit_costs['equipment'] = unit_costs['equipment'].rename(
columns={col: col.replace(' ', '_') for col in unit_costs['equipment'].columns})
unit_costs['equipment'] = pd.wide_to_long(unit_costs['equipment'], stubnames=['Quantity_'],
i=['Item_code', 'Equipment_tlo', 'replacement_cost_annual',
'service_fee_annual', 'spare_parts_annual',
'major_corrective_maintenance_cost_annual'],
j='Facility_Level', suffix='(\d+|\w+)').reset_index()
facility_level_mapping = {'Health_Post': '0', 'Health_Center': '1a', 'Community': '1b', 'District': '2',
'Central': '3'}
unit_costs['equipment']['Facility_Level'] = unit_costs['equipment']['Facility_Level'].replace(
facility_level_mapping)
unit_costs['equipment'] = unit_costs['equipment'].rename(columns={'Quantity_': 'Quantity'})
# Function to prepare cost dataframe ready to be merged across cross categories
def retain_relevant_column_subset(_df, _category_specific_group):
columns_to_retain = ['draw', 'run', 'year', 'cost_subcategory', 'Facility_Level', _category_specific_group,
'cost']
if 'cost_category' in _df.columns:
columns_to_retain.append('cost_category')
_df = _df[columns_to_retain]
return _df
def prepare_cost_dataframe(_df, _category_specific_group, _cost_category):
_df = _df.rename(columns={_category_specific_group: 'cost_subgroup'})
_df['cost_category'] = _cost_category
return retain_relevant_column_subset(_df, 'cost_subgroup')
# CALCULATE COSTS
# %%
# 1. HR cost
# ------------------------
print("Now estimating HR costs...")
# Define a function to merge unit cost data with model outputs
def merge_cost_and_model_data(cost_df, model_df, varnames):
"""
Merge model data with cost parameters based on specificity (overall, by cadre, by facility).
Parameters
----------
cost_df : pd.DataFrame
DataFrame with columns ['Parameter_name', 'OfficerType', 'Facility_Level', 'Value'].
model_df : pd.DataFrame
Simulation output to merge cost data into.
varnames : list of str
List of parameter names to extract and merge.
Returns
-------
pd.DataFrame
The model_df with added columns for each cost parameter.
"""
merged_df = model_df.copy()
for varname in varnames:
new_cost_df = cost_df[cost_df['Parameter_name'] == varname][['OfficerType', 'Facility_Level', 'Value']]
new_cost_df = new_cost_df.rename(columns={"Value": varname})
is_identical_for_all_cadres = (new_cost_df['OfficerType'] == 'All').all()
is_identical_for_all_levels = (new_cost_df['Facility_Level'] == 'All').all()
# Some parameters are specific to the facility level/cadre, others are general
if (is_identical_for_all_cadres) and (is_identical_for_all_levels):
merged_df[varname] = new_cost_df[varname].mean()
elif (is_identical_for_all_cadres) and not (is_identical_for_all_levels):
merged_df = pd.merge(merged_df, new_cost_df[['Facility_Level', varname]], on=['Facility_Level'],
how="left")
elif (is_identical_for_all_levels) and not (is_identical_for_all_cadres):
merged_df = pd.merge(merged_df, new_cost_df[['OfficerType', varname]], on=['OfficerType'], how="left")
else:
merged_df = pd.merge(merged_df, new_cost_df, on=['OfficerType', 'Facility_Level'], how="left")
return merged_df
# Get available staff count for each year and draw
def get_staff_count_by_facid_and_officer_type(_df: pd.Series) -> pd.Series:
"""Summarise the parsed logged-key results for one draw (as dataframe) into a pd.Series."""
_df = _df.set_axis(_df['date'].dt.year).drop(columns=['date'])
_df.index.name = 'year'
def change_to_standard_flattened_index_format(col):
parts = col.split("_", 3) # Split by "_" only up to 3 parts
if len(parts) > 2:
return parts[0] + "=" + parts[1] + "|" + parts[2] + "=" + parts[
3] # Rejoin with "I" at the second occurrence
return col # If there's no second underscore, return the string as it is
_df.columns = [change_to_standard_flattened_index_format(col) for col in _df.columns]
return unflatten_flattened_multi_index_in_logging(_df).stack(level=[0, 1]) # expanded flattened axis
# Staff count by Facility ID
available_staff_count_by_facid_and_officertype = extract_results(
Path(results_folder),
module='tlo.methods.healthsystem.summary',
key='number_of_hcw_staff',
custom_generate_series=get_staff_count_by_facid_and_officer_type,
do_scaling=True,
)
# Update above series to get staff count by Facility_Level
available_staff_count_by_facid_and_officertype = available_staff_count_by_facid_and_officertype.reset_index().rename(
columns={'FacilityID': 'Facility_ID', 'Officer': 'OfficerType'})
available_staff_count_by_facid_and_officertype['Facility_ID'] = pd.to_numeric(
available_staff_count_by_facid_and_officertype['Facility_ID'])
available_staff_count_by_facid_and_officertype['Facility_Level'] = available_staff_count_by_facid_and_officertype[
'Facility_ID'].map(facility_id_levels_dict)
idx = pd.IndexSlice
available_staff_count_by_level_and_officer_type = available_staff_count_by_facid_and_officertype.drop(
columns=[idx['Facility_ID']]).groupby([idx['year'], idx['Facility_Level'], idx['OfficerType']]).sum()
available_staff_count_by_level_and_officer_type = melt_model_output_draws_and_runs(
available_staff_count_by_level_and_officer_type.reset_index(),
id_vars=['year', 'Facility_Level', 'OfficerType'])
available_staff_count_by_level_and_officer_type['Facility_Level'] = available_staff_count_by_level_and_officer_type[
'Facility_Level'].astype(str) # make sure facility level is stored as string
available_staff_count_by_level_and_officer_type = available_staff_count_by_level_and_officer_type.drop(
available_staff_count_by_level_and_officer_type[available_staff_count_by_level_and_officer_type[
'Facility_Level'] == '5'].index) # drop headquarters because we're only concerned with staff engaged in service delivery
available_staff_count_by_level_and_officer_type.rename(columns={'value': 'staff_count'}, inplace=True)
# Get list of cadres which were utilised in each run to get the count of staff used in the simulation
# Note that we still cost the full staff count for any cadre-Facility_Level combination that was ever used in a run, and
# not the amount of time which was used
def get_capacity_used_by_officer_type_and_facility_level(_df: pd.Series) -> pd.Series:
"""Summarise the parsed logged-key results for one draw (as dataframe) into a pd.Series."""
_df = _df.set_axis(_df['date'].dt.year).drop(columns=['date'])
_df.index.name = 'year'
return unflatten_flattened_multi_index_in_logging(_df).stack(level=[0, 1]) # expanded flattened axis
annual_capacity_used_by_cadre_and_level = extract_results(
Path(results_folder),
module='tlo.methods.healthsystem.summary',
key='Capacity_By_OfficerType_And_FacilityLevel',
custom_generate_series=get_capacity_used_by_officer_type_and_facility_level,
do_scaling=False,
)
# Prepare capacity used dataframe to be multiplied by staff count
average_capacity_used_by_cadre_and_level = annual_capacity_used_by_cadre_and_level.groupby(
['OfficerType', 'FacilityLevel']).mean().reset_index(drop=False)
# TODO see if cadre-level combinations should be chosen by year
average_capacity_used_by_cadre_and_level.reset_index(drop=True) # Flatten multi=index column
average_capacity_used_by_cadre_and_level = average_capacity_used_by_cadre_and_level.melt(
id_vars=['OfficerType', 'FacilityLevel'],
var_name=['draw', 'run'],
value_name='capacity_used')
list_of_cadre_and_level_combinations_used = \
average_capacity_used_by_cadre_and_level[average_capacity_used_by_cadre_and_level['capacity_used'] != 0][
['OfficerType', 'FacilityLevel', 'draw', 'run']]
print(
f"Out of {average_capacity_used_by_cadre_and_level.groupby(['OfficerType', 'FacilityLevel']).size().count()} cadre and level combinations available, {list_of_cadre_and_level_combinations_used.groupby(['OfficerType', 'FacilityLevel']).size().count()} are used across the simulations")
list_of_cadre_and_level_combinations_used = list_of_cadre_and_level_combinations_used.rename(
columns={'FacilityLevel': 'Facility_Level'})
# Subset scenario staffing level to only include cadre-level combinations used in the simulation
used_staff_count_by_level_and_officer_type = available_staff_count_by_level_and_officer_type.merge(
list_of_cadre_and_level_combinations_used, on=['draw', 'run', 'OfficerType', 'Facility_Level'], how='right',
validate='m:m')
used_staff_count_by_level_and_officer_type.rename(columns={'value': 'staff_count'}, inplace=True)
if (cost_only_used_staff):
print(
"The input for 'cost_only_used_staff' implies that only cadre-level combinations which have been used in the run are costed")
staff_size_chosen_for_costing = used_staff_count_by_level_and_officer_type
else:
print(
"The input for 'cost_only_used_staff' implies that all staff are costed regardless of the cadre-level combinations which have been used in the run are costed")
staff_size_chosen_for_costing = available_staff_count_by_level_and_officer_type
# Calculate various components of HR cost
# 1.1 Salary cost for health workforce cadres used in the simulation (Staff count X Annual salary)
# ---------------------------------------------------------------------------------------------------------------
salary_for_staff = merge_cost_and_model_data(cost_df=unit_costs['hr'], model_df=staff_size_chosen_for_costing,
varnames=['salary_usd'])
salary_for_staff['cost'] = salary_for_staff['salary_usd'] * salary_for_staff['staff_count']
# 1.2 Pre-service training & recruitment cost to fill gap created by attrition
# ---------------------------------------------------------------------------------------------------------------
preservice_training_cost = merge_cost_and_model_data(cost_df=unit_costs['hr'],
model_df=staff_size_chosen_for_costing,
varnames=['annual_attrition_rate',
'licensure_exam_passing_rate', 'graduation_rate',
'absorption_rate_of_students_into_public_workforce',
'proportion_of_workforce_recruited_from_abroad',
'average_annual_preservice_training_cost_for_cadre',
'preservice_training_duration',
'recruitment_cost_per_person_recruited_usd',
'average_length_of_tenure_in_the_public_sector'])
def calculate_npv_past_training_expenses_by_row(row, r=_discount_rate):
# Initialize the NPV for the row
npv = 0
annual_cost = row['average_annual_preservice_training_cost_for_cadre']
full_years = int(row['preservice_training_duration']) # Extract integer part of the year
partial_year = row['preservice_training_duration'] - full_years # Fractional part of the year
# Iterate over each year of the training duration to calculate compounded cost to the present
# Calculate NPV for each full year of training
for t in range(full_years):
npv += annual_cost * (1 + r) ** (t + 1 + 1) # 1 added twice because range(4) is [0,1,2,3]
# Account for the fractional year at the end if it exists
if partial_year > 0:
npv += annual_cost * partial_year * (1 + r) ** (1 + r)
# Add recruitment cost assuming this happens during the partial year or the year after graduation if partial year == 0
npv += row['recruitment_cost_per_person_recruited_usd'] * (1 + r)
return npv
# Calculate NPV for each row using iterrows and store in a new column
npv_values = []
for index, row in preservice_training_cost.iterrows():
npv = calculate_npv_past_training_expenses_by_row(row, r=annuitization_rate)
npv_values.append(npv)
preservice_training_cost['npv_of_training_and_recruitment_cost'] = npv_values
preservice_training_cost['npv_of_training_and_recruitment_cost_per_recruit'] = preservice_training_cost[
'npv_of_training_and_recruitment_cost'] * \
(1 / (preservice_training_cost[
'absorption_rate_of_students_into_public_workforce'] +
preservice_training_cost[
'proportion_of_workforce_recruited_from_abroad'])) * \
(1 / preservice_training_cost[
'graduation_rate']) * (1 /
preservice_training_cost[
'licensure_exam_passing_rate'])
if _discount_rate == 0: # if the discount rate is 0, then the pre-service + recruitment cost simply needs to be divided by the number of years in tenure
preservice_training_cost['annuitisation_rate'] = preservice_training_cost[
'average_length_of_tenure_in_the_public_sector']
else:
preservice_training_cost['annuitisation_rate'] = 1 + (1 - (1 + annuitization_rate) ** (
-preservice_training_cost[
'average_length_of_tenure_in_the_public_sector'] + 1)) / annuitization_rate
preservice_training_cost['annuitised_training_and_recruitment_cost_per_recruit'] = preservice_training_cost[
'npv_of_training_and_recruitment_cost_per_recruit'] / \
preservice_training_cost[
'annuitisation_rate']
# Cost per student trained * 1/Rate of absorption from the local and foreign graduates * 1/Graduation rate * attrition rate
# the inverse of attrition rate is the average expected tenure; and the preservice training cost needs to be divided by the average tenure
preservice_training_cost['cost'] = preservice_training_cost[
'annuitised_training_and_recruitment_cost_per_recruit'] * \
preservice_training_cost['staff_count'] * preservice_training_cost[
'annual_attrition_rate'] # not multiplied with attrition rate again because this is already factored into 'Annual_cost_per_staff_recruited'
preservice_training_cost = preservice_training_cost[
['draw', 'run', 'year', 'OfficerType', 'Facility_Level', 'cost']]
# 1.3 In-service training cost to train all staff
# ---------------------------------------------------------------------------------------------------------------
inservice_training_cost = merge_cost_and_model_data(cost_df=unit_costs['hr'],
model_df=staff_size_chosen_for_costing,
varnames=['annual_inservice_training_cost_usd'])
inservice_training_cost['cost'] = inservice_training_cost['staff_count'] * inservice_training_cost[
'annual_inservice_training_cost_usd']
inservice_training_cost = inservice_training_cost[['draw', 'run', 'year', 'OfficerType', 'Facility_Level', 'cost']]
# 1.4 Regular mentorship and supportive supervision costs
# ---------------------------------------------------------------------------------------------------------------
mentorship_and_supportive_cost = merge_cost_and_model_data(cost_df=unit_costs['hr'],
model_df=staff_size_chosen_for_costing,
varnames=['annual_mentorship_and_supervision_cost'])
mentorship_and_supportive_cost['cost'] = mentorship_and_supportive_cost['staff_count'] * \
mentorship_and_supportive_cost['annual_mentorship_and_supervision_cost']
mentorship_and_supportive_cost = mentorship_and_supportive_cost[
['draw', 'run', 'year', 'OfficerType', 'Facility_Level', 'cost']]
# 1.5 Store all HR costs in one standard format dataframe
# ---------------------------------------------------------------------------------------------------------------
# Function to melt and label the cost category
def label_rows_of_cost_dataframe(_df, label_var, label):
_df = _df.reset_index()
_df[label_var] = label
return _df
# Initialize HR with the salary data
if (cost_only_used_staff):
human_resource_costs = retain_relevant_column_subset(
label_rows_of_cost_dataframe(salary_for_staff, 'cost_subcategory', 'salary_for_cadres_used'), 'OfficerType')
# Concatenate additional cost categories
additional_costs = [
(preservice_training_cost, 'preservice_training_and_recruitment_cost_for_attrited_workers'),
(inservice_training_cost, 'inservice_training_cost_for_cadres_used'),
(mentorship_and_supportive_cost, 'mentorship_and_supportive_cost_for_cadres_used')
]
else:
human_resource_costs = retain_relevant_column_subset(
label_rows_of_cost_dataframe(salary_for_staff, 'cost_subcategory', 'salary_for_all_staff'), 'OfficerType')
# Concatenate additional cost categories
additional_costs = [
(preservice_training_cost, 'preservice_training_and_recruitment_cost_for_attrited_workers'),
(inservice_training_cost, 'inservice_training_cost_for_all_staff'),
(mentorship_and_supportive_cost, 'mentorship_and_supportive_cost_for_all_staff')
]
# Iterate through additional costs, melt and concatenate
for df, label in additional_costs:
labelled_df = retain_relevant_column_subset(label_rows_of_cost_dataframe(df, 'cost_subcategory', label),
'OfficerType')
human_resource_costs = pd.concat([human_resource_costs, labelled_df])
human_resource_costs = prepare_cost_dataframe(human_resource_costs, _category_specific_group='OfficerType',
_cost_category='human resources for health')
# Only preserve the draws and runs requested
if _draws is not None:
human_resource_costs = human_resource_costs[human_resource_costs.draw.isin(_draws)]
if _runs is not None:
human_resource_costs = human_resource_costs[human_resource_costs.run.isin(_runs)]
# %%
# 2. Consumables cost
# ------------------------
print("Now estimating Consumables costs...")
def get_quantity_of_consumables_dispensed(results_folder):
def get_counts_of_items_requested(_df):
_df = drop_outside_period(_df)
counts_of_used = defaultdict(lambda: defaultdict(int))
counts_of_not_available = defaultdict(lambda: defaultdict(int))
for _, row in _df.iterrows():
date = row['date']
for item, num in row['Item_Used'].items():
counts_of_used[date][item] += num
for item, num in row['Item_NotAvailable'].items():
counts_of_not_available[date][item] += num
used_df = pd.DataFrame(counts_of_used).fillna(0).astype(int).stack().rename('Used')
not_available_df = pd.DataFrame(counts_of_not_available).fillna(0).astype(int).stack().rename(
'Not_Available')
# Combine the two dataframes into one series with MultiIndex (date, item, availability_status)
combined_df = pd.concat([used_df, not_available_df], axis=1).fillna(0).astype(int)
# Convert to a pd.Series, as expected by the custom_generate_series function
return combined_df.stack()
cons_req = extract_results(
results_folder,
module='tlo.methods.healthsystem.summary',
key='Consumables',
custom_generate_series=get_counts_of_items_requested,
do_scaling=True)
cons_dispensed = cons_req.xs("Used", level=2) # only keep actual dispensed amount, i.e. when available
return cons_dispensed
consumables_dispensed = get_quantity_of_consumables_dispensed(results_folder)
consumables_dispensed = consumables_dispensed.reset_index().rename(
columns={'level_0': 'Item_Code', 'level_1': 'year'})
consumables_dispensed[idx['year']] = pd.to_datetime(
consumables_dispensed[idx['year']]).dt.year # Extract only year from date
consumables_dispensed[idx['Item_Code']] = pd.to_numeric(consumables_dispensed[idx['Item_Code']])
# Make a list of columns in the DataFrame pertaining to quantity dispensed
quantity_columns = consumables_dispensed.columns.to_list()
quantity_columns = [tup for tup in quantity_columns if tup not in [('Item_Code', ''), ('year', '')]]
# 2.1 Cost of consumables dispensed
# ---------------------------------------------------------------------------------------------------------------
# Multiply number of items needed by cost of consumable
# consumables_dispensed.columns = consumables_dispensed.columns.get_level_values(0).str() + "_" + consumables_dispensed.columns.get_level_values(1) # Flatten multi-level columns for pandas merge
unit_costs['consumables'].columns = pd.MultiIndex.from_arrays(
[unit_costs['consumables'].columns, [''] * len(unit_costs['consumables'].columns)])
cost_of_consumables_dispensed = consumables_dispensed.merge(unit_costs['consumables'], on=idx['Item_Code'],
validate='m:1', how='left')
price_column = 'Price_per_unit'
cost_of_consumables_dispensed[quantity_columns] = cost_of_consumables_dispensed[quantity_columns].multiply(
cost_of_consumables_dispensed[price_column], axis=0)
# 2.2 Cost of consumables stocked (quantity needed for what is dispensed)
# ---------------------------------------------------------------------------------------------------------------
# Stocked amount should be higher than dispensed because of i. excess capacity, ii. theft, iii. expiry
# While there are estimates in the literature of what % these might be, we agreed that it is better to rely upon
# an empirical estimate based on OpenLMIS data
inflow_to_outflow_ratio = pd.read_csv(
resourcefilepath / "costing/ResourceFile_Consumables_Inflow_Outflow_Ratio.csv")
inflow_to_outflow_ratio = inflow_to_outflow_ratio.set_index(
['item_category', 'item_code', 'district', 'fac_type_tlo'])
average_inflow_to_outflow_ratio_ratio = inflow_to_outflow_ratio[
'inflow_to_outflow_ratio'].mean() # Use average where item-specific ratio is not available
# Multiply number of items needed by cost of consumable
inflow_to_outflow_ratio_by_consumable = inflow_to_outflow_ratio.groupby(level='item_code').mean()
excess_stock_ratio = inflow_to_outflow_ratio_by_consumable - 1
excess_stock_ratio = excess_stock_ratio.reset_index().rename(
columns={'inflow_to_outflow_ratio': 'excess_stock_proportion_of_dispensed'})
cost_of_excess_consumables_stocked = consumables_dispensed.merge(unit_costs['consumables'], left_on='Item_Code',
right_on='Item_Code', validate='m:1', how='left')
excess_stock_ratio.columns = pd.MultiIndex.from_arrays(
[excess_stock_ratio.columns, [''] * len(excess_stock_ratio.columns)])
cost_of_excess_consumables_stocked = cost_of_excess_consumables_stocked.merge(excess_stock_ratio,
left_on='Item_Code',
right_on='item_code', validate='m:1',
how='left')
cost_of_excess_consumables_stocked.loc[
cost_of_excess_consumables_stocked.excess_stock_proportion_of_dispensed.isna(), 'excess_stock_proportion_of_dispensed'] = average_inflow_to_outflow_ratio_ratio - 1 # TODO disaggregate the average by program
cost_of_excess_consumables_stocked[quantity_columns] = cost_of_excess_consumables_stocked[
quantity_columns].multiply(cost_of_excess_consumables_stocked[idx[price_column]], axis=0)
cost_of_excess_consumables_stocked[quantity_columns] = cost_of_excess_consumables_stocked[
quantity_columns].multiply(cost_of_excess_consumables_stocked[idx['excess_stock_proportion_of_dispensed']],
axis=0)
# 2.3 Store all consumable costs in one standard format dataframe
# ---------------------------------------------------------------------------------------------------------------
# Function to melt and label the cost category
consumables_dict = \
pd.read_csv(path_for_consumable_resourcefiles / 'ResourceFile_Consumables_Items_and_Packages.csv', low_memory=False,
encoding="ISO-8859-1")[['Items', 'Item_Code']]
consumables_dict = dict(zip(consumables_dict['Item_Code'], consumables_dict['Items']))
def melt_and_label_consumables_cost(_df, label):
multi_index = pd.MultiIndex.from_tuples(_df.columns)
_df.columns = multi_index
# Select 'Item_Code', 'year', and all columns where both levels of the MultiIndex are numeric (these are the (draw,run) columns with cost values)
selected_columns = [col for col in _df.columns if
(col[0] in ['Item_Code', 'year']) or (isinstance(col[0], int) and isinstance(col[1], int))]
_df = _df[selected_columns] # Subset the dataframe with the selected columns
# reshape dataframe and assign 'draw' and 'run' as the correct column headers
melted_df = pd.melt(_df, id_vars=['year', 'Item_Code']).rename(
columns={'variable_0': 'draw', 'variable_1': 'run'})
# Replace item_code with consumable_name_tlo
melted_df['consumable'] = melted_df['Item_Code'].map(consumables_dict)
melted_df['cost_subcategory'] = label
melted_df[
'Facility_Level'] = 'all' # TODO this is temporary until 'tlo.methods.healthsystem.summary' only logs consumable at the aggregate level
melted_df = melted_df.rename(columns={'value': 'cost'})
return melted_df
def disaggregate_separately_managed_medical_supplies_from_consumable_costs(_df,
_consumables_dict,
# This is a dictionary mapping codes to names
list_of_unique_medical_products):
reversed_consumables_dict = {value: key for key, value in
_consumables_dict.items()} # reverse dictionary to map names to codes
new_df = _df.copy()
new_df['item_code'] = new_df['consumable'].map(reversed_consumables_dict)
cost_of_consumables = new_df[~new_df['item_code'].isin(list_of_unique_medical_products)]
cost_of_separately_managed_medical_supplies = new_df[new_df['item_code'].isin(list_of_unique_medical_products)]
cost_of_separately_managed_medical_supplies['cost_subcategory'] = cost_of_separately_managed_medical_supplies[
'cost_subcategory'].replace(
{'consumables_dispensed': 'separately_managed_medical_supplies_dispensed',
'consumables_stocked': 'separately_managed_medical_supplies_stocked'}, regex=True)
return cost_of_consumables.drop(columns='item_code'), cost_of_separately_managed_medical_supplies.drop(
columns='item_code')
separately_managed_medical_supplies = [127, 141, 161] # Oxygen, Blood, IRS
cost_of_consumables_dispensed, cost_of_separately_managed_medical_supplies_dispensed = disaggregate_separately_managed_medical_supplies_from_consumable_costs(
_df=retain_relevant_column_subset(
melt_and_label_consumables_cost(cost_of_consumables_dispensed, 'cost_of_consumables_dispensed'),
'consumable'),
_consumables_dict=consumables_dict,
list_of_unique_medical_products=separately_managed_medical_supplies)
cost_of_excess_consumables_stocked, cost_of_separately_managed_medical_supplies_excess_stock = disaggregate_separately_managed_medical_supplies_from_consumable_costs(
_df=retain_relevant_column_subset(
melt_and_label_consumables_cost(cost_of_excess_consumables_stocked, 'cost_of_excess_consumables_stocked'),
'consumable'),
_consumables_dict=consumables_dict,
list_of_unique_medical_products=separately_managed_medical_supplies)
consumable_costs = pd.concat([cost_of_consumables_dispensed, cost_of_excess_consumables_stocked])
# 2.4 Supply chain costs
# ---------------------------------------------------------------------------------------------------------------
# Assume that the cost of procurement, warehousing and distribution is a fixed proportion of consumable purchase costs
# The fixed proportion is based on Resource Mapping Expenditure data from 2018
resource_mapping_data = unit_costs['actual_expenditure_data']
# Make sure values are numeric
expenditure_column = ['EXPENDITURE (USD) (Jul 2018 - Jun 2019)']
resource_mapping_data[expenditure_column] = resource_mapping_data[expenditure_column].apply(
lambda x: pd.to_numeric(x, errors='coerce'))
# The numerator includes Supply chain expenditure for EHP consumables
supply_chain_expenditure = \
resource_mapping_data[resource_mapping_data['Cost Type'] == 'Supply Chain'][expenditure_column].sum()[0]
# The denominator include all drugs and commodities expenditure, excluding what is recategorised as non-EHP or admin
drug_expenditure_condition = resource_mapping_data['Cost Type'].str.contains('Drugs and Commodities')
excluded_drug_expenditure_condition = (resource_mapping_data[
'Calibration_category'] == 'Program Management & Administration') | (
resource_mapping_data[
'Calibration_category'] == 'Non-EHP consumables')
consumables_purchase_expenditure = \
resource_mapping_data[drug_expenditure_condition][expenditure_column].sum()[0] - \
resource_mapping_data[drug_expenditure_condition & excluded_drug_expenditure_condition][
expenditure_column].sum()[0]
supply_chain_cost_proportion = supply_chain_expenditure / consumables_purchase_expenditure
# Estimate supply chain costs based on the total consumable purchase cost calculated above
# Note that Oxygen, IRS, and Blood costs are already excluded because the unit_cost of these commodities already
# includes the procurement/production, storage and distribution costs
supply_chain_costs = (consumable_costs.groupby(['draw', 'run', 'year'])[
'cost'].sum() * supply_chain_cost_proportion).reset_index()
# Assign relevant additional columns to match the format of the rest of consumables costs
supply_chain_costs['Facility_Level'] = 'all'
supply_chain_costs['consumable'] = 'supply chain (all consumables)'
supply_chain_costs['cost_subcategory'] = 'supply_chain'
assert set(supply_chain_costs.columns) == set(consumable_costs.columns)
# Append supply chain costs to the full consumable cost dataframe
consumable_costs = pd.concat([consumable_costs, supply_chain_costs])
other_costs = pd.concat([cost_of_separately_managed_medical_supplies_dispensed,
cost_of_separately_managed_medical_supplies_excess_stock])
consumable_costs = prepare_cost_dataframe(consumable_costs, _category_specific_group='consumable',
_cost_category='medical consumables')
other_costs = prepare_cost_dataframe(other_costs, _category_specific_group='consumable',
_cost_category='medical consumables')
# Only preserve the draws and runs requested
if _draws is not None:
consumable_costs = consumable_costs[consumable_costs.draw.isin(_draws)]
other_costs = other_costs[other_costs.draw.isin(_draws)]
if _runs is not None:
consumable_costs = consumable_costs[consumable_costs.run.isin(_runs)]
other_costs = other_costs[other_costs.run.isin(_runs)]
# %%
# 3. Equipment cost
# --------------------------------------------
print("Now estimating Medical equipment costs...")
# Total cost of equipment required as per SEL (HSSP-III) only at facility IDs where it has been used in the simulation
# Get list of equipment used in the simulation by district and level
def get_equipment_used_by_district_and_facility(_df: pd.Series) -> pd.Series:
"""Summarise the parsed logged-key results for one draw (as dataframe) into a pd.Series."""
_df = _df.pivot_table(index=['District', 'Facility_Level'],
values='EquipmentEverUsed',
aggfunc='first')
_df.index.name = 'year'
return _df['EquipmentEverUsed']
list_of_equipment_used_by_draw_and_run = extract_results(
Path(results_folder),
module='tlo.methods.healthsystem.summary',
key='EquipmentEverUsed_ByFacilityID',
custom_generate_series=get_equipment_used_by_district_and_facility,
do_scaling=False,
)
for col in list_of_equipment_used_by_draw_and_run.columns:
list_of_equipment_used_by_draw_and_run[col] = list_of_equipment_used_by_draw_and_run[col].apply(
ast.literal_eval)
# Initialize an empty DataFrame
equipment_cost_across_sim = pd.DataFrame()
# Extract equipment cost for each draw and run
for d in _draws:
for r in _runs:
print(f"Processing draw {d} and run {r} of equipment costs")
# Extract a list of equipment which was used at each facility level within each district
equipment_used = {district: {level: [] for level in fac_levels} for district in list(
district_dict.values())} # create a dictionary with a key for each district and facility level
list_of_equipment_used_by_current_draw_and_run = list_of_equipment_used_by_draw_and_run[
(d, r)].reset_index()
for dist in list(district_dict.values()):
for level in fac_levels:
equipment_used_subset = list_of_equipment_used_by_current_draw_and_run[
(list_of_equipment_used_by_current_draw_and_run['District'] == dist) & (
list_of_equipment_used_by_current_draw_and_run['Facility_Level'] == level)]
equipment_used_subset.columns = ['District', 'Facility_Level', 'EquipmentEverUsed']
equipment_used[dist][level] = set().union(*equipment_used_subset['EquipmentEverUsed'])
equipment_used = pd.concat({
k: pd.DataFrame.from_dict(v, 'index') for k, v in equipment_used.items()},
axis=0)
full_list_of_equipment_used = set(equipment_used.values.flatten())
full_list_of_equipment_used = set(filter(pd.notnull, full_list_of_equipment_used))
equipment_df = pd.DataFrame()
equipment_df.index = equipment_used.index
for item in full_list_of_equipment_used:
equipment_df[str(item)] = 0
for dist_fac_index in equipment_df.index:
equipment_df.loc[equipment_df.index == dist_fac_index, str(item)] = equipment_used[
equipment_used.index == dist_fac_index].isin([item]).any(axis=1)
# equipment_df.to_csv('./outputs/equipment_use.csv')
equipment_df = equipment_df.reset_index().rename(
columns={'level_0': 'District', 'level_1': 'Facility_Level'})
equipment_df = pd.melt(equipment_df, id_vars=['District', 'Facility_Level']).rename(
columns={'variable': 'Item_code', 'value': 'whether_item_was_used'})
equipment_df['Item_code'] = pd.to_numeric(equipment_df['Item_code'])
# Merge the count of facilities by district and level
equipment_df = equipment_df.merge(mfl[['District', 'Facility_Level', 'Facility_Count']],
on=['District', 'Facility_Level'], how='left')
equipment_df.loc[equipment_df.Facility_Count.isna(), 'Facility_Count'] = 0
# Because levels 1b and 2 are collapsed together, we assume that the same equipment is used by level 1b as that recorded for level 2
def update_itemuse_for_level1b_using_level2_data(_df):
# Create a list of District and Item_code combinations for which use == True
list_of_equipment_used_at_level2 = \
_df[(_df.Facility_Level == '2') & (_df['whether_item_was_used'] is True)][['District', 'Item_code']]
# Now update the 'whether_item_was_used' for 'Facility_Level' == '1b' to match that of level '2'
_df.loc[
(_df['Facility_Level'] == '1b') &
(_df[['District', 'Item_code']].apply(tuple, axis=1).isin(
list_of_equipment_used_at_level2.apply(tuple, axis=1))),
'whether_item_was_used'