Skip to content

Commit 1806289

Browse files
committed
Reorg of ANN evolver code
1 parent 3a09e13 commit 1806289

File tree

1 file changed

+65
-66
lines changed

1 file changed

+65
-66
lines changed

ann_evolve.py

Lines changed: 65 additions & 66 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,69 @@
1212
from statistics import mean, pstdev
1313

1414
import axelrod
15+
from axelrod.strategies.ann import split_weights
16+
17+
18+
19+
20+
# def split_weights(weights, input_values, hidden_layer_size):
21+
# number_of_input_to_hidden_weights = input_values * hidden_layer_size
22+
# number_of_hidden_bias_weights = hidden_layer_size
23+
# number_of_hidden_to_output_weights = hidden_layer_size
24+
#
25+
# input2hidden = []
26+
# for i in range(0, number_of_input_to_hidden_weights, input_values):
27+
# input2hidden.append(weights[i:i + input_values])
28+
#
29+
# hidden2output = weights[
30+
# number_of_input_to_hidden_weights:number_of_input_to_hidden_weights + number_of_hidden_to_output_weights]
31+
# bias = weights[
32+
# number_of_input_to_hidden_weights + number_of_hidden_to_output_weights:]
33+
#
34+
# return (input2hidden, hidden2output, bias)
35+
36+
37+
def get_random_weights(number):
38+
return [random.uniform(-1, 1) for _ in range(number)]
39+
40+
41+
def score_single(my_strategy_factory, other_strategy_factory, iterations=200,
42+
debug=False):
43+
if other_strategy_factory.classifier['stochastic']:
44+
repetitions = 10
45+
else:
46+
repetitions = 1
47+
all_scores = []
48+
for _ in range(repetitions):
49+
me = my_strategy_factory()
50+
other = other_strategy_factory()
51+
me.set_tournament_attributes(length=iterations)
52+
other.set_tournament_attributes(length=iterations)
53+
54+
g = axelrod.Game()
55+
for _ in range(iterations):
56+
me.play(other)
57+
# print(me.history)
58+
iteration_score = sum([g.score(pair)[0] for pair in
59+
zip(me.history, other.history)]) / iterations
60+
all_scores.append(iteration_score)
61+
62+
def score_all_weights(population):
63+
return sorted(pool.map(score_weights, population), reverse=True)
64+
65+
66+
def score_weights(weights):
67+
in2h, h2o, bias = split_weights(weights, input_values, hidden_layer_size)
68+
return (score_for(lambda: ANN(in2h, h2o, bias), strategies), weights)
69+
70+
71+
def score_for(my_strategy_factory, other_strategies=strategies, iterations=200,
72+
debug=False):
73+
my_scores = map(
74+
lambda x: score_single(my_strategy_factory, x, iterations, debug=debug),
75+
other_strategies)
76+
my_average_score = sum(my_scores) / len(my_scores)
77+
return (my_average_score)
1578

1679

1780
def evolve(starting_weights, mutation_rate, mutation_distance, generations,
@@ -38,7 +101,7 @@ def evolve(starting_weights, mutation_rate, mutation_distance, generations,
38101
for i in range(len(c)):
39102
if random.random() < mutation_rate:
40103
c[i] = c[i] * (
41-
1 + (random.uniform(-1, 1) * mutation_distance))
104+
1 + (random.uniform(-1, 1) * mutation_distance))
42105

43106
population = copies + weights_to_copy
44107

@@ -60,68 +123,4 @@ def evolve(starting_weights, mutation_rate, mutation_distance, generations,
60123
mutation_rate *= 0.99
61124
mutation_distance *= 0.99
62125

63-
return (current_bests)
64-
65-
66-
def get_random_weights(number):
67-
return [random.uniform(-1, 1) for _ in range(number)]
68-
69-
70-
def score_single(my_strategy_factory, other_strategy_factory, iterations=200,
71-
debug=False):
72-
if other_strategy_factory.classifier['stochastic']:
73-
repetitions = 10
74-
else:
75-
repetitions = 1
76-
all_scores = []
77-
for _ in range(repetitions):
78-
me = my_strategy_factory()
79-
other = other_strategy_factory()
80-
me.set_tournament_attributes(length=iterations)
81-
other.set_tournament_attributes(length=iterations)
82-
83-
g = axelrod.Game()
84-
for _ in range(iterations):
85-
me.play(other)
86-
# print(me.history)
87-
iteration_score = sum([g.score(pair)[0] for pair in
88-
zip(me.history, other.history)]) / iterations
89-
all_scores.append(iteration_score)
90-
91-
92-
def split_weights(weights, input_values, hidden_layer_size):
93-
number_of_input_to_hidden_weights = input_values * hidden_layer_size
94-
number_of_hidden_bias_weights = hidden_layer_size
95-
number_of_hidden_to_output_weights = hidden_layer_size
96-
97-
input2hidden = []
98-
for i in range(0, number_of_input_to_hidden_weights, input_values):
99-
input2hidden.append(weights[i:i + input_values])
100-
101-
hidden2output = weights[
102-
number_of_input_to_hidden_weights:number_of_input_to_hidden_weights + number_of_hidden_to_output_weights]
103-
bias = weights[
104-
number_of_input_to_hidden_weights + number_of_hidden_to_output_weights:]
105-
106-
return (input2hidden, hidden2output, bias)
107-
108-
109-
def score_all_weights(population):
110-
return sorted(pool.map(score_weights, population), reverse=True)
111-
112-
113-
def _score_weights(weights):
114-
in2h, h2o, bias = split_weights(weights, input_values, hidden_layer_size)
115-
return (score_for(lambda: ANN(in2h, h2o, bias), strategies), weights)
116-
117-
118-
score_weights = _score_weights
119-
120-
121-
def score_for(my_strategy_factory, other_strategies=strategies, iterations=200,
122-
debug=False):
123-
my_scores = map(
124-
lambda x: score_single(my_strategy_factory, x, iterations, debug=debug),
125-
other_strategies)
126-
my_average_score = sum(my_scores) / len(my_scores)
127-
return (my_average_score)
126+
return current_bests

0 commit comments

Comments
 (0)