Skip to content

Commit 7d3fc42

Browse files
Merge pull request #1269 from danilobellini/detective
Add Detective strategy from Nicky Case game
2 parents e9251b9 + 12d11f3 commit 7d3fc42

File tree

4 files changed

+87
-0
lines changed

4 files changed

+87
-0
lines changed

axelrod/strategies/_strategies.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -163,6 +163,7 @@
163163
from .oncebitten import FoolMeForever, FoolMeOnce, ForgetfulFoolMeOnce, OnceBitten
164164
from .prober import (
165165
CollectiveStrategy,
166+
Detective,
166167
HardProber,
167168
NaiveProber,
168169
Prober,
@@ -285,6 +286,7 @@
285286
DefectorHunter,
286287
Desperate,
287288
DelayedAON1,
289+
Detective,
288290
DoubleCrosser,
289291
Doubler,
290292
DoubleResurrection,

axelrod/strategies/prober.py

Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,45 @@ def strategy(self, opponent: Player) -> Action:
4848
return D
4949

5050

51+
class Detective(Player):
52+
"""
53+
Starts with C, D, C, C, or with the given sequence of actions.
54+
If the opponent defects at least once in the first fixed rounds,
55+
play as TFT forever, else defect forever.
56+
57+
Names:
58+
59+
- Detective: [NC2019]_
60+
"""
61+
62+
name = "Detective"
63+
classifier = {
64+
"memory_depth": float("inf"),
65+
"stochastic": False,
66+
"makes_use_of": set(),
67+
"long_run_time": False,
68+
"inspects_source": False,
69+
"manipulates_source": False,
70+
"manipulates_state": False,
71+
}
72+
73+
def __init__(self, initial_actions: List[Action] = None) -> None:
74+
super().__init__()
75+
if initial_actions is None:
76+
self.initial_actions = [C, D, C, C]
77+
else:
78+
self.initial_actions = initial_actions
79+
80+
def strategy(self, opponent: Player) -> Action:
81+
hist_size = len(self.history)
82+
init_size = len(self.initial_actions)
83+
if hist_size < init_size:
84+
return self.initial_actions[hist_size]
85+
if D not in opponent.history[:init_size]:
86+
return D
87+
return opponent.history[-1] # TFT
88+
89+
5190
class Prober(Player):
5291
"""
5392
Plays D, C, C initially. Defects forever if opponent cooperated in moves 2

axelrod/tests/strategies/test_prober.py

Lines changed: 45 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,51 @@ def test_strategy(self):
3636
self.versus_test(opponent=axelrod.Defector(), expected_actions=actions)
3737

3838

39+
class TestDetective(TestPlayer):
40+
41+
name = "Detective"
42+
player = axelrod.Detective
43+
expected_classifier = {
44+
"memory_depth": float("inf"),
45+
"stochastic": False,
46+
"makes_use_of": set(),
47+
"long_run_time": False,
48+
"inspects_source": False,
49+
"manipulates_source": False,
50+
"manipulates_state": False,
51+
}
52+
53+
def test_strategy(self):
54+
self.versus_test(
55+
opponent=axelrod.TitForTat(),
56+
expected_actions=[(C, C), (D, C), (C, D)] + [(C, C)] * 15,
57+
)
58+
59+
self.versus_test(
60+
opponent=axelrod.Cooperator(),
61+
expected_actions=[(C, C), (D, C), (C, C), (C, C)] + [(D, C)] * 15,
62+
)
63+
64+
self.versus_test(
65+
opponent=axelrod.Defector(),
66+
expected_actions=[(C, D), (D, D), (C, D), (C, D)] + [(D, D)] * 15,
67+
)
68+
69+
def test_other_initial_actions(self):
70+
self.versus_test(
71+
opponent=axelrod.TitForTat(),
72+
expected_actions=[(C, C), (C, C), (D, C)] + [(D, D)] * 15,
73+
init_kwargs={"initial_actions": [C, C]},
74+
)
75+
76+
# Extreme case: no memory at all, it's simply a defector
77+
self.versus_test(
78+
opponent=axelrod.TitForTat(),
79+
expected_actions=[(D, C)] + [(D, D)] * 15,
80+
init_kwargs={"initial_actions": []},
81+
)
82+
83+
3984
class TestProber(TestPlayer):
4085

4186
name = "Prober"

docs/reference/bibliography.rst

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,7 @@ documentation.
4545
International Conference on Autonomous Agents and Multiagent Systems.
4646
.. [Mittal2009] Mittal, S., & Deb, K. (2009). Optimal strategies of the iterated prisoner’s dilemma problem for multiple conflicting objectives. IEEE Transactions on Evolutionary Computation, 13(3), 554–565. https://doi.org/10.1109/TEVC.2008.2009459
4747
.. [Nachbar1992] Nachbar J., Evolution in the finitely repeated prisoner’s dilemma, Journal of Economic Behavior & Organization, 19(3): 307-326, 1992.
48+
.. [NC2019] https://github.com/ncase/trust (Accessed: 30 October 2019)
4849
.. [Nowak1989] Nowak, Martin, and Karl Sigmund. "Game-dynamical aspects of the prisoner's dilemma." Applied Mathematics and Computation 30.3 (1989): 191-213.
4950
.. [Nowak1990] Nowak, M., & Sigmund, K. (1990). The evolution of stochastic strategies in the Prisoner's Dilemma. Acta Applicandae Mathematica. https://link.springer.com/article/10.1007/BF00049570
5051
.. [Nowak1992] Nowak, M.., & May, R. M. (1992). Evolutionary games and spatial chaos. Nature. http://doi.org/10.1038/359826a0

0 commit comments

Comments
 (0)