Skip to content

Add Detective strategy from Nicky Case game #1269

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Oct 30, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions axelrod/strategies/_strategies.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,6 +163,7 @@
from .oncebitten import FoolMeForever, FoolMeOnce, ForgetfulFoolMeOnce, OnceBitten
from .prober import (
CollectiveStrategy,
Detective,
HardProber,
NaiveProber,
Prober,
Expand Down Expand Up @@ -285,6 +286,7 @@
DefectorHunter,
Desperate,
DelayedAON1,
Detective,
DoubleCrosser,
Doubler,
DoubleResurrection,
Expand Down
39 changes: 39 additions & 0 deletions axelrod/strategies/prober.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,45 @@ def strategy(self, opponent: Player) -> Action:
return D


class Detective(Player):
"""
Starts with C, D, C, C, or with the given sequence of actions.
If the opponent defects at least once in the first fixed rounds,
play as TFT forever, else defect forever.

Names:

- Detective: [NC2019]_
"""

name = "Detective"
classifier = {
"memory_depth": float("inf"),
"stochastic": False,
"makes_use_of": set(),
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}

def __init__(self, initial_actions: List[Action] = None) -> None:
super().__init__()
if initial_actions is None:
self.initial_actions = [C, D, C, C]
else:
self.initial_actions = initial_actions

def strategy(self, opponent: Player) -> Action:
hist_size = len(self.history)
init_size = len(self.initial_actions)
if hist_size < init_size:
return self.initial_actions[hist_size]
if D not in opponent.history[:init_size]:
return D
return opponent.history[-1] # TFT


class Prober(Player):
"""
Plays D, C, C initially. Defects forever if opponent cooperated in moves 2
Expand Down
45 changes: 45 additions & 0 deletions axelrod/tests/strategies/test_prober.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,51 @@ def test_strategy(self):
self.versus_test(opponent=axelrod.Defector(), expected_actions=actions)


class TestDetective(TestPlayer):

name = "Detective"
player = axelrod.Detective
expected_classifier = {
"memory_depth": float("inf"),
"stochastic": False,
"makes_use_of": set(),
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}

def test_strategy(self):
self.versus_test(
opponent=axelrod.TitForTat(),
expected_actions=[(C, C), (D, C), (C, D)] + [(C, C)] * 15,
)

self.versus_test(
opponent=axelrod.Cooperator(),
expected_actions=[(C, C), (D, C), (C, C), (C, C)] + [(D, C)] * 15,
)

self.versus_test(
opponent=axelrod.Defector(),
expected_actions=[(C, D), (D, D), (C, D), (C, D)] + [(D, D)] * 15,
)

def test_other_initial_actions(self):
self.versus_test(
opponent=axelrod.TitForTat(),
expected_actions=[(C, C), (C, C), (D, C)] + [(D, D)] * 15,
init_kwargs={"initial_actions": [C, C]},
)

# Extreme case: no memory at all, it's simply a defector
self.versus_test(
opponent=axelrod.TitForTat(),
expected_actions=[(D, C)] + [(D, D)] * 15,
init_kwargs={"initial_actions": []},
)


class TestProber(TestPlayer):

name = "Prober"
Expand Down
1 change: 1 addition & 0 deletions docs/reference/bibliography.rst
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@ documentation.
International Conference on Autonomous Agents and Multiagent Systems.
.. [Mittal2009] Mittal, S., & Deb, K. (2009). Optimal strategies of the iterated prisoner’s dilemma problem for multiple conflicting objectives. IEEE Transactions on Evolutionary Computation, 13(3), 554–565. https://doi.org/10.1109/TEVC.2008.2009459
.. [Nachbar1992] Nachbar J., Evolution in the finitely repeated prisoner’s dilemma, Journal of Economic Behavior & Organization, 19(3): 307-326, 1992.
.. [NC2019] https://github.com/ncase/trust (Accessed: 30 October 2019)
.. [Nowak1989] Nowak, Martin, and Karl Sigmund. "Game-dynamical aspects of the prisoner's dilemma." Applied Mathematics and Computation 30.3 (1989): 191-213.
.. [Nowak1990] Nowak, M., & Sigmund, K. (1990). The evolution of stochastic strategies in the Prisoner's Dilemma. Acta Applicandae Mathematica. https://link.springer.com/article/10.1007/BF00049570
.. [Nowak1992] Nowak, M.., & May, R. M. (1992). Evolutionary games and spatial chaos. Nature. http://doi.org/10.1038/359826a0
Expand Down