Skip to content

Commit b0fc63c

Browse files
authored
updated the message template mapping to causal pathways to add Not top performer and top performer messages. added process_candidate switch to bulk up app (#399)
1 parent 0df7795 commit b0fc63c

File tree

6 files changed

+35
-27
lines changed

6 files changed

+35
-27
lines changed

bulk-up/README.md

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,10 @@ The defaults behavior is to run 10 requests, using 1 worker, with `http://localh
6060
- default: None
6161
- format: CSV
6262

63+
#### PROCESS_CANDIDATES: If set to True then processes candidates and print detailed report
64+
- default: True
65+
- note: If the PFP is not logging candidate detail you must skip candidate processing by setting this variable to False
66+
6367
### Authentication (Google Cloud only)
6468

6569
If TARGET_AUDIENCE is set then the bulk requester will send Google cloud credentials to the PFP service. Both TARGET_AUDIENCE and SERVICE_ACCOUNT_KEY_PATH must be set. Both values can be obtained from deployment team.

bulk-up/src/bulk_up/req.py

Lines changed: 23 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@
3131
OUTPUT = os.environ.get("OUTPUT", None)
3232

3333
process_candidates_str = os.environ.get("PROCESS_CANDIDATES", "True")
34-
PROCESS_CANDIDATES = process_candidates_str.lower() in ['true', 't', '1', 'yes']
34+
PROCESS_CANDIDATES = process_candidates_str.lower() in ["true", "t", "1", "yes"]
3535

3636
candidate_df: pd.DataFrame = pd.DataFrame()
3737
response_df: pd.DataFrame = pd.DataFrame()
@@ -114,29 +114,28 @@ def add_candidates(response_data: dict, performance_month: str):
114114
if data:
115115
candidates = pd.DataFrame(data[1:], columns=data[0])
116116
candidates["performance_month"] = performance_month
117-
candidate_df = pd.concat(
118-
[candidate_df, candidates], ignore_index=True
119-
)
117+
candidate_df = pd.concat([candidate_df, candidates], ignore_index=True)
120118

121119

122120
def add_response(response: requests.Response, response_data):
123121
global response_df
124122
timing_total = response_data.get("timing", {}).get("total", float("NaN"))
125123
selected_candidate = response_data.get("selected_candidate", None)
126-
124+
127125
response_dict: dict = {
128126
"staff_number": [response_data.get("staff_number", None)],
129-
"causal_pathway": selected_candidate["acceptable_by"] if selected_candidate else [None],
127+
"causal_pathway": selected_candidate["acceptable_by"]
128+
if selected_candidate
129+
else [None],
130130
"status_code": [response.status_code],
131131
"elapsed": [response.elapsed.total_seconds()],
132132
"timing.total": [timing_total],
133133
"ok": [response.ok],
134-
135134
}
136135
response_df = pd.concat(
137136
[response_df, pd.DataFrame(response_dict)], ignore_index=True
138137
)
139-
print(response_dict, end='\r')
138+
print(response_dict, end="\r")
140139

141140

142141
def analyse_responses():
@@ -152,50 +151,51 @@ def analyse_responses():
152151
.agg(pfp_time=("mean"))
153152
.reset_index()
154153
)
155-
154+
156155
r2 = (
157156
response_df.groupby("causal_pathway")["staff_number"]
158157
.agg(count=("count"))
159158
.reset_index()
160159
)
161160

161+
r2["% "] = round(r2["count"] / r2["count"].sum() * 100, 1)
162+
162163
r = pd.merge(r, r1, on="status_code", how="left")
163164

164165
r["pfp_time"] = round(r["pfp_time"] * 1000, 1)
165166
r["response_time"] = round(r["response_time"] * 1000, 1)
166167

167168
print(f"\n {r} \n")
168-
169+
169170
print(f"\n {r2} \n")
170171

172+
171173
def analyse_candidates():
172174
global candidate_df
173175

174176
if OUTPUT:
175177
candidate_df.to_csv(OUTPUT, index=False)
176178

177-
178179
candidate_df.rename(columns={"acceptable_by": "causal_pathway"}, inplace=True)
179180
candidate_df["score"] = candidate_df["score"].astype(float)
180181
candidate_df.rename(columns={"name": "message"}, inplace=True)
181-
182+
182183
# pd.set_option("display.max_columns", None)
183184
# pd.set_option("display.expand_frame_repr", False)
184185
# pd.set_option("display.width", 1000)
185186
# pd.set_option("display.max_colwidth", None)
186187

187-
188188
# causal pathways
189189
causal_pathway_report = build_table("causal_pathway")
190190
print(causal_pathway_report, "\n")
191191

192192
# messages
193193
message_report = build_table("message")
194194
print(message_report, "\n")
195-
195+
196196
# measures
197197
measure_report = build_table("measure")
198-
print(measure_report, "\n")
198+
print(measure_report, "\n")
199199

200200

201201
def build_table(grouping_column):
@@ -207,8 +207,9 @@ def build_table(grouping_column):
207207
scores = round(
208208
candidate_df.groupby(grouping_column)["score"]
209209
.agg(acceptable_score=("mean"))
210-
.reset_index()
211-
,2)
210+
.reset_index(),
211+
2,
212+
)
212213
report_table = pd.merge(report_table, scores, on=grouping_column, how="left")
213214

214215
report_table["% acceptable"] = round(
@@ -224,8 +225,9 @@ def build_table(grouping_column):
224225
candidate_df[candidate_df["selected"]]
225226
.groupby(grouping_column)["score"]
226227
.agg(selected_score=("mean"))
227-
.reset_index()
228-
,2)
228+
.reset_index(),
229+
2,
230+
)
229231
report_table = pd.merge(
230232
report_table, selected_scores, on=grouping_column, how="left"
231233
)
@@ -242,7 +244,7 @@ def build_table(grouping_column):
242244
"% of acceptable selected",
243245
]
244246
]
245-
247+
246248
return report_table
247249

248250

@@ -267,7 +269,7 @@ def main():
267269
if SAMPLE:
268270
n = min(SAMPLE, len(input_files))
269271
input_files = sorted(random.sample(input_files, n), key=extract_number)
270-
272+
271273
with ThreadPoolExecutor(WORKERS) as executor:
272274
executor.map(post_json_message, input_files)
273275

candidate_pudding/candidate_pudding.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -82,10 +82,11 @@ def acceptable_by(candidate: Resource):
8282
def add_causal_pathway(candidate: Resource):
8383
# map message templates schema:name to causal pathway schema:name
8484
causal_pathway_map: dict = {
85-
"Congratulations High Performance": "social better",
85+
"Top Performer": "social better",
8686
"Getting Worse": "worsening",
8787
"In Top 25%": "social better",
8888
"Opportunity to Improve Top 10 Peer Benchmark": "social worse",
89+
"Not Top Performer": "social worse",
8990
"Performance Improving": "improving",
9091
"Reached Goal": "goal gain",
9192
"Drop Below Goal": "goal loss",

esteemer/esteemer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -175,7 +175,7 @@ def final_score(m, h, p):
175175

176176
score = m * 1 + h * 2 + p * 1.3
177177

178-
return round(score, 1)
178+
return round(score, 2)
179179

180180

181181
def score_better(candidate: Resource, motivating_informations: List[Resource]) -> float:

pictoralist/pictoralist.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -205,7 +205,8 @@ def finalize_text(self):
205205
## Replace placeholders in the template with actual values:
206206
# Format "[measure name]":
207207
self.message_text = self.message_text.replace(
208-
"[measure name]", f"{self.sel_measure_title}",
208+
"[measure name]",
209+
f"{self.sel_measure_title}",
209210
)
210211
# Format "[recipient performance level]":
211212
self.message_text = self.message_text.replace(

tests/esteemer/test_esteemer.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,7 @@ def candidate_resource(performance_data_frame):
8282

8383
def test_score(candidate_resource):
8484
esteemer.score(candidate_resource, None, {})
85-
assert candidate_resource.value(SLOWMO.Score).value == pytest.approx(0.1)
85+
assert candidate_resource.value(SLOWMO.Score).value == pytest.approx(2.05)
8686

8787

8888
def test_calculate_preference_score(candidate_resource):
@@ -129,9 +129,9 @@ def test_get_trend_info():
129129

130130

131131
def test_no_history_signal_is_score_0(candidate_resource):
132-
assert esteemer.score_history(candidate_resource, {}) == 0.0
132+
assert esteemer.score_history(candidate_resource, {}) == 1.0
133133

134-
assert esteemer.score_history(candidate_resource, None) == 0.0
134+
assert esteemer.score_history(candidate_resource, None) == 1.0
135135

136136

137137
def test_history_with_two_recurrances(candidate_resource, history):

0 commit comments

Comments
 (0)