-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathexperiment.py
More file actions
162 lines (139 loc) · 5.88 KB
/
experiment.py
File metadata and controls
162 lines (139 loc) · 5.88 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
from collections import defaultdict
import time
import numpy as np
from scipy import stats
from trust_graph import TrustGraph
import trust_models as tm
class Experiment(object):
""" Represents one interation of a contained experiement with parameters.
An Experiment in some sense orchestrates the connections between other
individual components, and is responsible for measurement. An Experiment
basically does three things:
1. Generate the graph
2. Compute the trust model scores
3. Measure the informativeness of those scores.
self.global_ttms has the following format:
{
<global trust model>: {
'scores': [<raw trust model score (1D)>],
'pearson': <computed pearson score>,
'kendalltau': <computed KT score>,
'spearman': <computed spearman score>
},
...
}
self.personalized_ttms has the following format:
{
<personalized trust model>: {
'scores' [<raw trust model scores (2D)>],
'pearson': {
'values': [<pearson score for each agent (1D)],
'mean_simple': <simple average of each agent's score>,
'mean_weighted': <weighted average of each agent's score>
},
'kendalltau': ...,
'spearman': ...
},
...
}
self.info_scores has the following format:
{
'pearson': {
'pagerank': ...,
'hitting_time_all': ...,
'hitting_time_top': ...,
'max_flow': ...,
'max_flow_weighted_means': ...,
'shortest_path': ...,
'shortest_path_weighted_means': ...,
},
'kendalltau': ...,
'spearman': ...
}
self.runtimes has the following format:
{
'pagerank_weighted': <float>,
...
}
"""
# Format:
# 1. True if a global TTM; False if a personalized TTM
# 2. The name for the TTM; used as the key in the dict
# 3. The trust model function
# 4. (list) args to be passed to the ttm function
# 5. (dict) kwargs to be passed to the ttm function
TTM_PARAMS = [
(True, 'pagerank', tm.pagerank, [], {}),
(True, 'hitting_time_all', tm.hitting_pagerank, ['all'], {}),
# (True, 'hitting_time_top', tm.hitting_pagerank, ['top'], {}),
(False, 'max_flow', tm.max_flow, [], {}),
(False, 'shortest_path', tm.shortest_path, [], {})
]
MODEL_NAMES = [x[1] for x in TTM_PARAMS]
CORRELATIONS = {
# 'pearson': stats.pearsonr,
'kendalltau': stats.kendalltau,
'spearman': stats.spearmanr,
}
def __init__(self, num_nodes, agent_type_prior, edge_strategy,
edges_per_node, edge_weight_strategy, num_weight_samples):
"""
Args:
(These are exactly the same as TrustGraph. Please refer there for
details)
"""
self.graph = TrustGraph(
num_nodes, agent_type_prior, edge_strategy,
edges_per_node, edge_weight_strategy, num_weight_samples)
self.global_ttms = defaultdict(dict)
self.personalized_ttms = defaultdict(dict)
self.info_scores = defaultdict(dict)
self.runtimes = dict()
def compute_informativeness(self):
""" Compute trust model scores and measure informativeness. """
self.compute_scores()
self.measure_informativeness()
def compute_scores(self):
""" Actually run the trust model routines. Can take a while. """
for is_global, name, tm_function, args, kwargs in self.TTM_PARAMS:
# First calculate the runtime of the method
start_time = time.clock()
score = tm_function(self.graph, *args, **kwargs)
runtime = time.clock() - start_time
# Then actually store it.
d = self.global_ttms if is_global else self.personalized_ttms
d[name]['scores'] = score
self.runtimes[name] = runtime
def measure_informativeness(self):
""" Use correlation functions to measure the informativeness of scores.
This function should be run after compute_scores() is executed. This
function uses three measures of correlation:
1. Pearson product-moment correlation coefficient
2. Kendall tau rank correlation coefficient
3. Spearman's rank correlation coefficient
"""
at = self.graph.agent_types
for modelname, model in self.global_ttms.items():
for corrname, corr in self.CORRELATIONS.items():
info_score, _ = corr(at, model['scores'])
model[corrname] = info_score
self.info_scores[corrname][modelname] = info_score
for modelname, model in self.personalized_ttms.items():
for corrname, corr in self.CORRELATIONS.items():
model[corrname] = {}
model[corrname]['values'] = []
for row in model['scores']:
# Only correlate values that are not None
none_indices = [i for i, x in enumerate(row) if x is None]
corrval, _ = corr(
[val for i, val in enumerate(at) if i not in none_indices],
[val for i, val in enumerate(row) if i not in none_indices])
model[corrname]['values'].append(corrval)
info_score_simple = np.mean(model[corrname]['values'])
info_score_weighted = np.average(model[corrname]['values'],
weights=at)
model[corrname]['mean_simple'] = info_score_simple
model[corrname]['mean_weighted'] = info_score_weighted
self.info_scores[corrname][modelname] = info_score_simple
self.info_scores[corrname][modelname + '_weighted_means'] = \
info_score_weighted