Skip to content

Commit 10bf24a

Browse files
authored
Merge pull request #794 from Axelrod-Python/nice_meta
Nice meta
2 parents 3f8f9a4 + 90a7a38 commit 10bf24a

File tree

8 files changed

+213
-191
lines changed

8 files changed

+213
-191
lines changed

axelrod/strategies/__init__.py

Lines changed: 27 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -11,31 +11,35 @@
1111
MetaMajorityMemoryOne, MetaMajorityFiniteMemory, MetaMajorityLongMemory,
1212
MetaMinority, MetaMixer, MetaWinner, MetaWinnerDeterministic,
1313
MetaWinnerEnsemble, MetaWinnerMemoryOne, MetaWinnerFiniteMemory,
14-
MetaWinnerLongMemory, MetaWinnerStochastic, MWEDeterministic,
15-
MWEFiniteMemory, MWELongMemory, MWEMemoryOne, MWEStochastic
14+
MetaWinnerLongMemory, MetaWinnerStochastic, NMWEDeterministic,
15+
NMWEFiniteMemory, NMWELongMemory, NMWEMemoryOne, NMWEStochastic,
16+
NiceMetaWinner, NiceMetaWinnerEnsemble,
1617
)
1718

18-
all_strategies += [MetaHunter,
19-
MetaHunterAggressive,
20-
MetaMajority,
21-
MetaMajorityMemoryOne,
22-
MetaMajorityFiniteMemory,
23-
MetaMajorityLongMemory,
24-
MetaMinority,
25-
MetaMixer,
26-
MetaWinner,
27-
MetaWinnerDeterministic,
28-
MetaWinnerEnsemble,
29-
MetaWinnerMemoryOne,
30-
MetaWinnerFiniteMemory,
31-
MetaWinnerLongMemory,
32-
MetaWinnerStochastic,
33-
MWEDeterministic,
34-
MWEFiniteMemory,
35-
MWELongMemory,
36-
MWEMemoryOne,
37-
MWEStochastic
38-
]
19+
all_strategies += [
20+
MetaHunter,
21+
MetaHunterAggressive,
22+
MetaMajority,
23+
MetaMajorityMemoryOne,
24+
MetaMajorityFiniteMemory,
25+
MetaMajorityLongMemory,
26+
MetaMinority,
27+
MetaMixer,
28+
MetaWinner,
29+
MetaWinnerDeterministic,
30+
MetaWinnerEnsemble,
31+
MetaWinnerMemoryOne,
32+
MetaWinnerFiniteMemory,
33+
MetaWinnerLongMemory,
34+
MetaWinnerStochastic,
35+
NMWEDeterministic,
36+
NMWEFiniteMemory,
37+
NMWELongMemory,
38+
NMWEMemoryOne,
39+
NMWEStochastic,
40+
NiceMetaWinner,
41+
NiceMetaWinnerEnsemble
42+
]
3943

4044

4145
# Distinguished strategy collections in addition to

axelrod/strategies/meta.py

Lines changed: 50 additions & 69 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,11 @@
11
from axelrod import Actions, Player, init_args, obey_axelrod
2+
from axelrod.strategy_transformers import NiceTransformer
23
from ._strategies import all_strategies
34
from .hunter import (
45
AlternatorHunter, CooperatorHunter, CycleHunter, DefectorHunter,
56
EventualCycleHunter, MathConstantHunter, RandomHunter,)
67
from numpy.random import choice
7-
from random import sample
8+
89

910
# Needs to be computed manually to prevent circular dependency
1011
ordinary_strategies = [s for s in all_strategies if obey_axelrod(s)]
@@ -56,12 +57,12 @@ def __init__(self, team=None):
5657
self.classifier['makes_use_of'].update(t.classifier['makes_use_of'])
5758

5859
def strategy(self, opponent):
59-
# Make sure the history of all hunters is current.
60-
for ih in range(len(self.team)):
61-
self.team[ih].history = self.history
62-
6360
# Get the results of all our players.
64-
results = [player.strategy(opponent) for player in self.team]
61+
results = []
62+
for player in self.team:
63+
play = player.strategy(opponent)
64+
player.history.append(play)
65+
results.append(play)
6566

6667
# A subclass should just define a way to choose the result based on
6768
# team results.
@@ -119,51 +120,38 @@ class MetaWinner(MetaPlayer):
119120
@init_args
120121
def __init__(self, team=None):
121122
super(MetaWinner, self).__init__(team=team)
122-
123123
# For each player, we will keep the history of proposed moves and
124124
# a running score since the beginning of the game.
125-
for t in self.team:
126-
t.proposed_history = []
127-
t.score = 0
128-
125+
self.scores = [0] * len(self.team)
129126
self.classifier['long_run_time'] = True
130127

131-
def strategy(self, opponent):
128+
def _update_scores(self, opponent):
132129
# Update the running score for each player, before determining the
133130
# next move.
131+
game = self.match_attributes["game"]
134132
if len(self.history):
135-
for player in self.team:
136-
game = self.match_attributes["game"]
137-
last_round = (player.proposed_history[-1], opponent.history[-1])
133+
for i, player in enumerate(self.team):
134+
last_round = (player.history[-1], opponent.history[-1])
138135
s = game.scores[last_round][0]
139-
player.score += s
140-
return super(MetaWinner, self).strategy(opponent)
136+
self.scores[i] += s
141137

142138
def meta_strategy(self, results, opponent):
143-
scores = [pl.score for pl in self.team]
144-
bestscore = max(scores)
145-
beststrategies = [i for (i, pl) in enumerate(self.team)
146-
if pl.score == bestscore]
139+
self._update_scores(opponent)
140+
# Choice an action based on the collection of scores
141+
bestscore = max(self.scores)
142+
beststrategies = [i for (i, score) in enumerate(self.scores)
143+
if score == bestscore]
147144
bestproposals = [results[i] for i in beststrategies]
148145
bestresult = C if C in bestproposals else D
149-
150-
# Update each player's proposed history with his proposed result, but
151-
# always after the new result has been settled based on scores
152-
# accumulated until now.
153-
for r, t in zip(results, self.team):
154-
t.proposed_history.append(r)
155-
156-
if opponent.defections == 0:
157-
# Don't poke the bear
158-
return C
159-
160146
return bestresult
161147

162148
def reset(self):
163149
MetaPlayer.reset(self)
164-
for t in self.team:
165-
t.proposed_history = []
166-
t.score = 0
150+
self.scores = [0] * len(self.team)
151+
152+
153+
NiceMetaWinner = NiceTransformer()(MetaWinner)
154+
NiceMetaWinner.name = "Nice Meta Winner"
167155

168156

169157
class MetaWinnerEnsemble(MetaWinner):
@@ -179,25 +167,18 @@ class MetaWinnerEnsemble(MetaWinner):
179167
name = "Meta Winner Ensemble"
180168

181169
def meta_strategy(self, results, opponent):
170+
self._update_scores(opponent)
182171
# Sort by score
183-
scores = [(pl.score, i) for (i, pl) in enumerate(self.team)]
172+
scores = [(score, i) for (i, score) in enumerate(self.scores)]
184173
# Choose one of the best scorers at random
185174
scores.sort(reverse=True)
186175
prop = max(1, int(len(scores) * 0.08))
187176
index = choice([i for (s, i) in scores[:prop]])
177+
return results[index]
188178

189-
# Update each player's proposed history with his proposed result, but
190-
# always after the new result has been settled based on scores
191-
# accumulated until now.
192-
for r, t in zip(results, self.team):
193-
t.proposed_history.append(r)
194-
195-
if opponent.defections == 0:
196-
# Don't poke the bear
197-
return C
198179

199-
# return result
200-
return results[index]
180+
NiceMetaWinnerEnsemble = NiceTransformer()(MetaWinnerEnsemble)
181+
NiceMetaWinnerEnsemble.name = "Nice Meta Winner Ensemble"
201182

202183

203184
class MetaHunter(MetaPlayer):
@@ -354,7 +335,7 @@ def __init__(self):
354335

355336

356337
class MetaWinnerDeterministic(MetaWinner):
357-
"""Meta Winner Ensemble with the team of Deterministic Players."""
338+
"""Meta Winner with the team of Deterministic Players."""
358339

359340
name = "Meta Winner Deterministic"
360341

@@ -367,7 +348,7 @@ def __init__(self):
367348

368349

369350
class MetaWinnerStochastic(MetaWinner):
370-
"""Meta Winner Ensemble with the team of Stochastic Players."""
351+
"""Meta Winner with the team of Stochastic Players."""
371352

372353
name = "Meta Winner Stochastic"
373354

@@ -416,63 +397,63 @@ def meta_strategy(self, results, opponent):
416397
return choice(results, p=self.distribution)
417398

418399

419-
class MWEDeterministic(MetaWinnerEnsemble):
420-
"""Meta Winner Ensemble with the team of Deterministic Players."""
400+
class NMWEDeterministic(NiceMetaWinnerEnsemble):
401+
"""Nice Meta Winner Ensemble with the team of Deterministic Players."""
421402

422-
name = "MWE Deterministic"
403+
name = "NMWE Deterministic"
423404

424405
@init_args
425406
def __init__(self):
426407
team = [s for s in ordinary_strategies if
427408
not s().classifier['stochastic']]
428-
super(MWEDeterministic, self).__init__(team=team)
409+
super(NMWEDeterministic, self).__init__(team=team)
429410
self.classifier["stochastic"] = True
430411

431412

432-
class MWEStochastic(MetaWinnerEnsemble):
433-
"""Meta Winner Ensemble with the team of Stochastic Players."""
413+
class NMWEStochastic(NiceMetaWinnerEnsemble):
414+
"""Nice Meta Winner Ensemble with the team of Stochastic Players."""
434415

435-
name = "MWE Stochastic"
416+
name = "NMWE Stochastic"
436417

437418
@init_args
438419
def __init__(self):
439420
team = [s for s in ordinary_strategies if
440421
s().classifier['stochastic']]
441-
super(MWEStochastic, self).__init__(team=team)
422+
super(NMWEStochastic, self).__init__(team=team)
442423

443424

444-
class MWEFiniteMemory(MetaWinnerEnsemble):
445-
"""Meta Winner Ensemble with the team of Finite Memory Players."""
425+
class NMWEFiniteMemory(NiceMetaWinnerEnsemble):
426+
"""Nice Meta Winner Ensemble with the team of Finite Memory Players."""
446427

447-
name = "MWE Finite Memory"
428+
name = "NMWE Finite Memory"
448429

449430
@init_args
450431
def __init__(self):
451432
team = [s for s in ordinary_strategies if s().classifier['memory_depth']
452433
< float('inf')]
453-
super(MWEFiniteMemory, self).__init__(team=team)
434+
super(NMWEFiniteMemory, self).__init__(team=team)
454435

455436

456-
class MWELongMemory(MetaWinnerEnsemble):
457-
"""Meta Winner Ensemble with the team of Long Memory Players."""
437+
class NMWELongMemory(NiceMetaWinnerEnsemble):
438+
"""Nice Meta Winner Ensemble with the team of Long Memory Players."""
458439

459-
name = "MWE Long Memory"
440+
name = "NMWE Long Memory"
460441

461442
@init_args
462443
def __init__(self):
463444
team = [s for s in ordinary_strategies if s().classifier['memory_depth']
464445
== float('inf')]
465-
super(MWELongMemory, self).__init__(team=team)
446+
super(NMWELongMemory, self).__init__(team=team)
466447

467448

468-
class MWEMemoryOne(MetaWinnerEnsemble):
469-
"""Meta Winner Ensemble with the team of Memory One Players."""
449+
class NMWEMemoryOne(NiceMetaWinnerEnsemble):
450+
"""Nice Meta Winner Ensemble with the team of Memory One Players."""
470451

471-
name = "MWE Memory One"
452+
name = "NMWE Memory One"
472453

473454
@init_args
474455
def __init__(self):
475456
team = [s for s in ordinary_strategies if s().classifier['memory_depth']
476457
<= 1]
477-
super(MWEMemoryOne, self).__init__(team=team)
458+
super(NMWEMemoryOne, self).__init__(team=team)
478459
self.classifier["long_run_time"] = False

axelrod/strategy_transformers.py

Lines changed: 16 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -17,11 +17,8 @@
1717

1818
C, D = Actions.C, Actions.D
1919

20-
# Note: After a transformation is applied,
21-
# the player's history is overwritten with the modified history
22-
# just like in the noisy tournament case
23-
# This can lead to unexpected behavior, such as when
24-
# FlipTransform is applied to Alternator
20+
# Note: After a transformation is applied, the player's history is overwritten with the modified history just like in
21+
# the noisy tournament case. This can lead to unexpected behavior, such as when FlipTransform is applied to Alternator.
2522

2623

2724
def StrategyTransformerFactory(strategy_wrapper, name_prefix=None):
@@ -74,7 +71,7 @@ def __call__(self, PlayerClass):
7471
args = self.args
7572
kwargs = self.kwargs
7673
try:
77-
# if "name_prefix" in kwargs remove as only want dec arguments
74+
# If "name_prefix" in kwargs remove as only want decorator arguments
7875
del kwargs["name_prefix"]
7976
except KeyError:
8077
pass
@@ -228,6 +225,19 @@ def forgiver_wrapper(player, opponent, action, p):
228225
forgiver_wrapper, name_prefix="Forgiving")
229226

230227

228+
def nice_wrapper(player, opponent, action):
229+
"""Makes sure that the player doesn't defect unless the opponent has already
230+
defected."""
231+
if action == D:
232+
if opponent.defections == 0:
233+
return C
234+
return action
235+
236+
237+
NiceTransformer = StrategyTransformerFactory(
238+
nice_wrapper, name_prefix="Nice")
239+
240+
231241
def initial_sequence(player, opponent, action, initial_seq):
232242
"""Play the moves in `seq` first (must be a list), ignoring the strategy's
233243
moves until the list is exhausted."""

axelrod/tests/unit/test_classification.py

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -184,13 +184,15 @@ def test_long_run_strategies(self):
184184
axl.MetaMajorityLongMemory,
185185
axl.MetaWinnerLongMemory,
186186
axl.MetaMixer,
187-
axl.MWEFiniteMemory,
187+
axl.NMWEFiniteMemory,
188188
axl.MetaWinnerDeterministic,
189-
axl.MWELongMemory,
190-
axl.MWEStochastic,
191-
axl.MWEDeterministic,
192-
axl.MetaWinnerStochastic
193-
]
189+
axl.NMWELongMemory,
190+
axl.NMWEStochastic,
191+
axl.NMWEDeterministic,
192+
axl.MetaWinnerStochastic,
193+
axl.NiceMetaWinner,
194+
axl.NiceMetaWinnerEnsemble
195+
]
194196

195197
self.assertEqual(str_reps(long_run_time_strategies),
196198
str_reps(axl.long_run_time_strategies))

0 commit comments

Comments
 (0)