Skip to content

Commit 4cb3add

Browse files
authored
349 add recency propertiesmoderators to history signal (#381)
* add recency properties/moderators to history signal * used coachiness highest group selecting candidate. Added scoring normalization to score methods. added coachiness toggle. preferences are normalized closes #361, #349 and #318
1 parent 3e26e97 commit 4cb3add

File tree

9 files changed

+304
-151
lines changed

9 files changed

+304
-151
lines changed

bitstomach/bitstomach.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
import pandas as pd
2-
from rdflib import RDF, BNode, Graph
2+
from rdflib import RDF, BNode, Graph, Literal
33

44
from bitstomach.signals import SIGNALS
55
from utils.namespace import PSDO, SLOWMO
@@ -14,7 +14,7 @@ def extract_signals(perf_df: pd.DataFrame) -> Graph:
1414
g = Graph()
1515
r = g.resource(BNode("performance_content"))
1616
r.set(RDF.type, PSDO.performance_content)
17-
17+
r.set(SLOWMO.PerformanceMonth, Literal(perf_df.attrs["performance_month"]))
1818
if perf_df.empty:
1919
return g
2020

esteemer/esteemer.py

Lines changed: 130 additions & 73 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
import random
2+
from datetime import datetime
23
from typing import List
34

45
from rdflib import XSD, BNode, Graph, Literal, URIRef
@@ -12,62 +13,92 @@
1213

1314
MPM = {
1415
"social worse": {
15-
Comparison.signal_type: 0.5,
16-
History.signal_type: -0.5,
16+
"comparison_size": 0.5,
17+
"message_recency": 0.9,
18+
"message_received_count": 0.5,
19+
"measure_recency": 0.5,
1720
"coachiness": 1.0,
1821
},
1922
"social better": {
20-
Comparison.signal_type: 0.5,
21-
History.signal_type: -0.1,
23+
"comparison_size": 0.5,
24+
"message_recency": 0.9,
25+
"message_received_count": 0.9,
26+
"measure_recency": 0.5,
2227
"coachiness": 0.0,
2328
},
24-
"improving": {Trend.signal_type: 0.8, History.signal_type: -0.1, "coachiness": 0.5},
25-
"worsening": {Trend.signal_type: 0.8, History.signal_type: -0.5, "coachiness": 1.0},
29+
"improving": {
30+
"trend_size": 0.8,
31+
"message_recency": 0.9,
32+
"message_received_count": 0.9,
33+
"measure_recency": 1.0,
34+
"coachiness": 0.5,
35+
},
36+
"worsening": {
37+
"trend_size": 0.8,
38+
"message_recency": 0.9,
39+
"message_received_count": 0.5,
40+
"measure_recency": 1.0,
41+
"coachiness": 1.0,
42+
},
2643
"goal gain": {
27-
Comparison.signal_type: 0.5,
28-
Trend.signal_type: 0.8,
44+
"comparison_size": 0.5,
45+
"trend_size": 0.8,
2946
"achievement_recency": 0.5,
30-
History.signal_type: -0.1,
47+
"message_recency": 0.9,
48+
"message_received_count": 0.9,
49+
"measure_recency": 0.5,
3150
"coachiness": 0.5,
3251
},
3352
"goal loss": {
34-
Comparison.signal_type: 0.5,
35-
Trend.signal_type: 0.8,
53+
"comparison_size": 0.5,
54+
"trend_size": 0.8,
3655
"loss_recency": 0.5,
37-
History.signal_type: -0.5,
56+
"message_recency": 0.9,
57+
"message_received_count": 0.5,
58+
"measure_recency": 0.5,
3859
"coachiness": 1.0,
3960
},
4061
"social gain": {
41-
Comparison.signal_type: 0.5,
42-
Trend.signal_type: 0.8,
62+
"comparison_size": 0.5,
63+
"trend_size": 0.8,
4364
"achievement_recency": 0.5,
44-
History.signal_type: -0.1,
65+
"message_recency": 0.9,
66+
"message_received_count": 0.9,
67+
"measure_recency": 0.5,
4568
"coachiness": 0.5,
4669
},
4770
"social loss": {
48-
Comparison.signal_type: 0.5,
49-
Trend.signal_type: 0.8,
71+
"comparison_size": 0.5,
72+
"trend_size": 0.8,
5073
"loss_recency": 0.5,
51-
History.signal_type: -0.5,
74+
"message_recency": 0.9,
75+
"message_received_count": 0.5,
76+
"measure_recency": 0.5,
5277
"coachiness": 1.0,
5378
},
5479
"goal worse": {
55-
Comparison.signal_type: 0.5,
56-
History.signal_type: -0.5,
80+
"comparison_size": 0.5,
81+
"message_recency": 0.9,
82+
"message_received_count": 0.5,
83+
"measure_recency": 0.5,
5784
"coachiness": 1.0,
5885
},
5986
"social approach": {
60-
Comparison.signal_type: 0.5,
61-
Trend.signal_type: 0.8,
87+
"comparison_size": 0.5,
88+
"trend_size": 0.8,
6289
"achievement_recency": 0.5,
63-
History.signal_type: -0.1,
90+
"message_recency": 0.9,
91+
"message_received_count": 0.9,
92+
"measure_recency": 1.0,
6493
"coachiness": 1.0,
6594
},
6695
"goal approach": {
67-
Comparison.signal_type: 0.5,
68-
Trend.signal_type: 0.8,
96+
"comparison_size": 0.5,
97+
"trend_size": 0.8,
6998
"achievement_recency": 0.5,
70-
History.signal_type: -0.1,
99+
"message_recency": 0.9,
100+
"message_received_count": 0.9,
101+
"measure_recency": 1.0,
71102
"coachiness": 1.0,
72103
},
73104
}
@@ -97,7 +128,7 @@ def score(candidate: Resource, history: dict, preferences: dict) -> Resource:
97128
"social loss": {"score": score_loss, "rules": rule_social_lowest},
98129
"goal worse": {"score": score_worse, "rules": null_rule},
99130
"goal approach": {"score": score_approach, "rules": null_rule},
100-
"social approach": {"score": score_approach, "rules": rule_social_highest},
131+
"social approach": {"score": score_approach, "rules": rule_social_lowest},
101132
}
102133

103134
causal_pathway = candidate.value(SLOWMO.AcceptableBy)
@@ -125,48 +156,33 @@ def score(candidate: Resource, history: dict, preferences: dict) -> Resource:
125156
)
126157

127158
# coachiness
128-
coachiness_score = MPM[causal_pathway.value]["coachiness"] / 10
159+
coachiness_score = MPM[causal_pathway.value]["coachiness"]
129160
candidate[URIRef("coachiness_score")] = Literal(
130161
coachiness_score, datatype=XSD.double
131162
)
132163

133-
final_calculated_score = final_score(
134-
mi_score, history_score, preference_score, coachiness_score
135-
)
164+
final_calculated_score = final_score(mi_score, history_score, preference_score)
136165

137166
candidate[SLOWMO.Score] = Literal(final_calculated_score, datatype=XSD.double)
138167

139168
return candidate
140169

141170

142-
def final_score(m, h, p, c):
171+
def final_score(m, h, p):
143172
"""
144173
the function, final_score, takes two inputs, s and p. the range for s is 0 to 1. the range for p is -2 to +2. The function f(s,p) increases with either s or p increasing. The function should have the following constraints: f(1,-2) == f(.5, 0) == f(0,2) and f(0.5, -2) == f(0.25, -1) == f(0, 0).
145174
"""
146175

147-
s = m + h
148-
# Define the scaling factors for s and p
149-
scale_s = 4 # default to stated range of p
150-
scale_p = 1 # default to stated range of 2
151-
152-
# Calculate the base value for the constraints, e.g. f(1,-2) == f(0.5, 0) == f(0,2)
153-
# base_value = scale_s * 0.5 + scale_p * 0.0 # default to mid-points of stated ranges
154-
base_value = scale_s * 0.5 + scale_p * 0 # default to mid-points of stated ranges
176+
score = m * 1 + h * 2 + p * 1.3
155177

156-
# Adjust the function to increase with either s or p increasing
157-
score = (scale_s * s + scale_p * p + base_value) / (scale_s + scale_p + base_value)
158-
159-
# apply coachiness factor
160-
score = score + c
161-
162-
return score
178+
return round(score, 1)
163179

164180

165181
def score_better(candidate: Resource, motivating_informations: List[Resource]) -> float:
166182
moderators = comparator_moderators(candidate, motivating_informations, Comparison)
167183
mpm = MPM[candidate.value(SLOWMO.AcceptableBy).value]
168184

169-
score = (moderators["gap_size"]) * mpm[Comparison.signal_type]
185+
score = moderators["gap_size"] # * mpm["comparison_size"]
170186

171187
return score
172188

@@ -249,7 +265,7 @@ def score_worse(candidate: Resource, motivating_informations: List[Resource]) ->
249265
moderators = comparator_moderators(candidate, motivating_informations, Comparison)
250266
mpm = MPM[candidate.value(SLOWMO.AcceptableBy).value]
251267

252-
score = (moderators["gap_size"]) * mpm[Comparison.signal_type]
268+
score = moderators["gap_size"] # * mpm["comparison_size"]
253269

254270
return score
255271

@@ -260,7 +276,7 @@ def score_improving(
260276
moderators = Trend.moderators(motivating_informations)[0]
261277
mpm = MPM[candidate.value(SLOWMO.AcceptableBy).value]
262278

263-
score = (moderators["trend_size"]) * mpm[Trend.signal_type]
279+
score = moderators["trend_size"] # * mpm["trend_size"]
264280

265281
return score
266282

@@ -272,7 +288,7 @@ def score_worsening(
272288
moderators = Trend.moderators(motivating_informations)[0]
273289
mpm = MPM[candidate.value(SLOWMO.AcceptableBy).value]
274290

275-
score = (moderators["trend_size"]) * mpm[Trend.signal_type]
291+
score = moderators["trend_size"] # * mpm["trend_size"]
276292

277293
return score
278294

@@ -284,10 +300,10 @@ def score_approach(
284300
mpm = MPM[candidate.value(SLOWMO.AcceptableBy).value]
285301

286302
score = (
287-
moderators["gap_size"] * mpm[Comparison.signal_type]
288-
+ moderators["trend_size"] * mpm[Trend.signal_type]
303+
moderators["gap_size"] * mpm["comparison_size"]
304+
+ moderators["trend_size"] * mpm["trend_size"]
289305
+ moderators["streak_length"] * mpm["achievement_recency"]
290-
)
306+
) / (mpm["comparison_size"] + mpm["trend_size"] + mpm["achievement_recency"])
291307

292308
return score
293309

@@ -297,10 +313,10 @@ def score_gain(candidate: Resource, motivating_informations: List[Resource]) ->
297313
mpm = MPM[candidate.value(SLOWMO.AcceptableBy).value]
298314

299315
score = (
300-
moderators["gap_size"] * mpm[Comparison.signal_type]
301-
+ moderators["trend_size"] * mpm[Trend.signal_type]
316+
moderators["gap_size"] * mpm["comparison_size"]
317+
+ moderators["trend_size"] * mpm["trend_size"]
302318
+ moderators["streak_length"] * mpm["achievement_recency"]
303-
)
319+
) / (mpm["comparison_size"] + mpm["trend_size"] + mpm["achievement_recency"])
304320

305321
return score
306322

@@ -310,10 +326,10 @@ def score_loss(candidate: Resource, motivating_informations: List[Resource]) ->
310326
mpm = MPM[candidate.value(SLOWMO.AcceptableBy).value]
311327

312328
score = (
313-
moderators["gap_size"] * mpm[Comparison.signal_type]
314-
+ moderators["trend_size"] * mpm[Trend.signal_type]
329+
moderators["gap_size"] * mpm["comparison_size"]
330+
+ moderators["trend_size"] * mpm["trend_size"]
315331
+ moderators["streak_length"] * mpm["loss_recency"]
316-
)
332+
) / (mpm["comparison_size"] + mpm["trend_size"] + mpm["loss_recency"])
317333

318334
return score
319335

@@ -332,7 +348,7 @@ def comparator_moderators(candidate, motivating_informations, signal: Signal):
332348
return scoring_detail
333349

334350

335-
def score_history(candidate, history) -> float:
351+
def score_history(candidate: Resource, history) -> float:
336352
"""
337353
calculates history sub-score.
338354
@@ -344,24 +360,35 @@ def score_history(candidate, history) -> float:
344360
float: history sub-score.
345361
"""
346362
if not history or not settings.use_history:
347-
return 0.0
363+
return 1.0
348364

349365
# turn candidate resource into a 'history' element for the current month
350-
current_hist = History.to_element(candidate)
366+
g: Graph = candidate.graph
367+
performance_month = next(g.objects(None, SLOWMO.PerformanceMonth)).value
351368
# add to history
352-
history.update(current_hist)
369+
# history[performance_month] = History.to_element(candidate)
353370

354-
signals = History.detect(history)
371+
signals = History.detect(
372+
history,
373+
{datetime.fromisoformat(performance_month): History.to_element(candidate)},
374+
)
355375

356376
if not signals:
357-
return 0.0
377+
return 1.0
358378

359379
mod = History.moderators(signals)[0]
360-
score = mod["recurrence_count"]
361380

362381
causal_pathway = candidate.value(SLOWMO.AcceptableBy)
363382

364-
return score * MPM[causal_pathway.value][History.signal_type]
383+
return (
384+
mod["message_recurrence"] * MPM[causal_pathway.value]["message_received_count"]
385+
+ mod["message_recency"] * MPM[causal_pathway.value]["message_recency"]
386+
+ mod["measure_recency"] * MPM[causal_pathway.value]["measure_recency"]
387+
) / (
388+
MPM[causal_pathway.value]["message_received_count"]
389+
+ MPM[causal_pathway.value]["message_recency"]
390+
+ MPM[causal_pathway.value]["measure_recency"]
391+
)
365392

366393

367394
def score_preferences(candidate_resource: Resource, preferences: dict) -> float:
@@ -395,7 +422,7 @@ def score_preferences(candidate_resource: Resource, preferences: dict) -> float:
395422
}
396423

397424
key = map_cp_to_preferences.get(candidate_resource.value(SLOWMO.AcceptableBy).value)
398-
return float(preferences.get(key, 0.0))
425+
return preferences.get(key, 0.0)
399426

400427

401428
def select_candidate(performer_graph: Graph) -> BNode:
@@ -416,15 +443,37 @@ def select_candidate(performer_graph: Graph) -> BNode:
416443
if not set(performer_graph[: SLOWMO.AcceptableBy :]):
417444
return None
418445

446+
# filter acceptable candidates
447+
candidates = utils.candidates(performer_graph, filter_acceptable=True)
448+
449+
# filter scored candidates
450+
candidates = [
451+
candidate
452+
for candidate in candidates
453+
if (candidate.value(URIRef("coachiness_score")) is not None)
454+
]
455+
456+
if settings.use_coachiness:
457+
# filter highest coachiness candidates
458+
highest_coachiness_candidates = candidates_from_coachiness_category(
459+
candidates, category=1.0
460+
)
461+
if not highest_coachiness_candidates:
462+
highest_coachiness_candidates = candidates_from_coachiness_category(
463+
candidates, category=0.5
464+
)
465+
if highest_coachiness_candidates:
466+
candidates = highest_coachiness_candidates
467+
419468
max_score = max(
420-
[score for _, score in performer_graph.subject_objects(SLOWMO.Score)],
469+
[candidate.value(SLOWMO.Score).value for candidate in candidates],
421470
default=None,
422471
)
423472

424473
candidates_with_max_score = [
425-
(candidate)
426-
for candidate, score in performer_graph.subject_objects(SLOWMO.Score)
427-
if score == max_score
474+
(candidate.identifier)
475+
for candidate in candidates
476+
if candidate.value(SLOWMO.Score).value == max_score
428477
]
429478

430479
# Randomly select one of the candidates with the known maximum score
@@ -433,3 +482,11 @@ def select_candidate(performer_graph: Graph) -> BNode:
433482
performer_graph.add((selected_candidate, SLOWMO.Selected, Literal(True)))
434483

435484
return selected_candidate
485+
486+
487+
def candidates_from_coachiness_category(candidates, category):
488+
return [
489+
candidate
490+
for candidate in candidates
491+
if (candidate.value(URIRef("coachiness_score")).value == category)
492+
]

0 commit comments

Comments
 (0)