diff --git a/axelrod/strategies/oncebitten.py b/axelrod/strategies/oncebitten.py index d36c36207..6ef0a933f 100644 --- a/axelrod/strategies/oncebitten.py +++ b/axelrod/strategies/oncebitten.py @@ -1,5 +1,5 @@ import random -from axelrod.actions import Actions +from axelrod.actions import Actions, Action from axelrod.player import Player C, D = Actions.C, Actions.D @@ -22,13 +22,13 @@ class OnceBitten(Player): 'manipulates_state': False } - def __init__(self): + def __init__(self) -> None: super().__init__() self.mem_length = 10 self.grudged = False self.grudge_memory = 0 - def strategy(self, opponent): + def strategy(self, opponent: Player) -> Action: """ Begins by playing C, then plays D for mem_length rounds if the opponent ever plays D twice in a row. @@ -72,7 +72,7 @@ class FoolMeOnce(Player): } @staticmethod - def strategy(opponent): + def strategy(opponent: Player) -> Action: if not opponent.history: return C if opponent.defections > 1: @@ -98,7 +98,7 @@ class ForgetfulFoolMeOnce(Player): 'manipulates_state': False } - def __init__(self, forget_probability=0.05): + def __init__(self, forget_probability: float=0.05) -> None: """ Parameters ---------- @@ -110,7 +110,7 @@ def __init__(self, forget_probability=0.05): self._initial = C self.forget_probability = forget_probability - def strategy(self, opponent): + def strategy(self, opponent: Player) -> Action: r = random.random() if not opponent.history: return self._initial @@ -145,7 +145,7 @@ class FoolMeForever(Player): } @staticmethod - def strategy(opponent): + def strategy(opponent: Player) -> Action: if opponent.defections > 0: return C return D diff --git a/axelrod/strategies/prober.py b/axelrod/strategies/prober.py index a95c2f603..b6ae1a623 100644 --- a/axelrod/strategies/prober.py +++ b/axelrod/strategies/prober.py @@ -1,4 +1,4 @@ -from axelrod.actions import Actions +from axelrod.actions import Actions, Action from axelrod.player import Player from axelrod.random_ import random_choice @@ -31,7 +31,7 @@ class CollectiveStrategy(Player): 'manipulates_state': False } - def strategy(self, opponent): + def strategy(self, opponent: Player) -> Action: turn = len(self.history) if turn == 0: return C @@ -61,7 +61,7 @@ class Prober(Player): 'manipulates_state': False } - def strategy(self, opponent): + def strategy(self, opponent: Player) -> Action: turn = len(self.history) if turn == 0: return D @@ -94,7 +94,7 @@ class Prober2(Player): 'manipulates_state': False } - def strategy(self, opponent): + def strategy(self, opponent: Player) -> Action: turn = len(self.history) if turn == 0: return D @@ -127,7 +127,7 @@ class Prober3(Player): 'manipulates_state': False } - def strategy(self, opponent): + def strategy(self, opponent: Player) -> Action: turn = len(self.history) if turn == 0: return D @@ -165,7 +165,7 @@ class Prober4(Player): 'manipulates_state': False } - def __init__(self): + def __init__(self) -> None: super().__init__() self.init_sequence = [ C, C, D, C, D, D, D, C, C, D, C, D, C, C, D, C, D, D, C, D @@ -174,7 +174,7 @@ def __init__(self): self.unjust_Ds = 0 self.turned_defector = False - def strategy(self, opponent): + def strategy(self, opponent: Player) -> Action: if not self.history: return self.init_sequence[0] turn = len(self.history) @@ -219,7 +219,7 @@ class HardProber(Player): 'manipulates_state': False } - def strategy(self, opponent): + def strategy(self, opponent: Player) -> Action: turn = len(self.history) if turn == 0: return D @@ -258,7 +258,7 @@ class NaiveProber(Player): 'manipulates_state': False } - def __init__(self, p=0.1): + def __init__(self, p: Player=0.1) -> None: """ Parameters ---------- @@ -270,7 +270,7 @@ def __init__(self, p=0.1): if (self.p == 0) or (self.p == 1): self.classifier['stochastic'] = False - def strategy(self, opponent): + def strategy(self, opponent: Player) -> Action: # First move if len(self.history) == 0: return C @@ -281,7 +281,7 @@ def strategy(self, opponent): choice = random_choice(1 - self.p) return choice - def __repr__(self): + def __repr__(self) -> str: return "%s: %s" % (self.name, round(self.p, 2)) @@ -314,11 +314,11 @@ class RemorsefulProber(NaiveProber): 'manipulates_state': False } - def __init__(self, p=0.1): + def __init__(self, p: float=0.1) -> None: super().__init__(p) self.probing = False - def strategy(self, opponent): + def strategy(self, opponent: Player) -> Action: # First move if len(self.history) == 0: return C diff --git a/axelrod/strategies/punisher.py b/axelrod/strategies/punisher.py index 1d01cc6db..37d02fa97 100644 --- a/axelrod/strategies/punisher.py +++ b/axelrod/strategies/punisher.py @@ -1,4 +1,4 @@ -from axelrod.actions import Actions +from axelrod.actions import Actions, Action from axelrod.player import Player C, D = Actions.C, Actions.D @@ -27,7 +27,7 @@ class Punisher(Player): 'manipulates_state': False } - def __init__(self): + def __init__(self) -> None: """ Initialised the player """ @@ -36,7 +36,7 @@ def __init__(self): self.grudged = False self.grudge_memory = 1 - def strategy(self, opponent): + def strategy(self, opponent: Player) -> Action: """ Begins by playing C, then plays D for an amount of rounds proportional to the opponents historical '%' of playing D if the opponent ever @@ -89,14 +89,14 @@ class InversePunisher(Player): 'manipulates_state': False } - def __init__(self): + def __init__(self) -> None: super().__init__() - self.history = [] + self.history = [] # type: List[Action] self.mem_length = 1 self.grudged = False self.grudge_memory = 1 - def strategy(self, opponent): + def strategy(self, opponent: Player) -> Action: """ Begins by playing C, then plays D for an amount of rounds proportional to the opponents historical '%' of playing C if the opponent ever plays @@ -147,7 +147,7 @@ class LevelPunisher(Player): 'manipulates_state': False } - def strategy(self, opponent): + def strategy(self, opponent: Player) -> Action: if len(opponent.history) < 10: return C elif (len(opponent.history) - opponent.cooperations) / len(opponent.history) > 0.2: diff --git a/axelrod/strategies/rand.py b/axelrod/strategies/rand.py index 2fc4b0b21..231c5c957 100644 --- a/axelrod/strategies/rand.py +++ b/axelrod/strategies/rand.py @@ -1,4 +1,5 @@ from axelrod.player import Player +from axelrod.actions import Action from axelrod.random_ import random_choice @@ -22,7 +23,7 @@ class Random(Player): 'manipulates_state': False } - def __init__(self, p=0.5): + def __init__(self, p: float=0.5) -> None: """ Parameters ---------- @@ -39,8 +40,8 @@ def __init__(self, p=0.5): if p in [0, 1]: self.classifier['stochastic'] = False - def strategy(self, opponent): + def strategy(self, opponent: Player) -> Action: return random_choice(self.p) - def __repr__(self): + def __repr__(self) -> str: return "%s: %s" % (self.name, round(self.p, 2)) diff --git a/type_tests.sh b/type_tests.sh index 0f6e7e93b..06f26a824 100755 --- a/type_tests.sh +++ b/type_tests.sh @@ -21,16 +21,20 @@ mypy --ignore-missing-imports --follow-imports skip axelrod/strategies/cycler.py mypy --ignore-missing-imports --follow-imports skip axelrod/strategies/darwin.py mypy --ignore-missing-imports --follow-imports skip axelrod/strategies/defector.py mypy --ignore-missing-imports --follow-imports skip axelrod/strategies/forgiver.py +mypy --ignore-missing-imports --follow-imports skip axelrod/strategies/geller.py mypy --ignore-missing-imports --follow-imports skip axelrod/strategies/gradualkiller.py mypy --ignore-missing-imports --follow-imports skip axelrod/strategies/grudger.py mypy --ignore-missing-imports --follow-imports skip axelrod/strategies/grumpy.py mypy --ignore-missing-imports --follow-imports skip axelrod/strategies/handshake.py +mypy --ignore-missing-imports --follow-imports skip axelrod/strategies/hunter.py mypy --ignore-missing-imports --follow-imports skip axelrod/strategies/inverse.py mypy --ignore-missing-imports --follow-imports skip axelrod/strategies/mathematicalconstants.py +mypy --ignore-missing-imports --follow-imports skip axelrod/strategies/memorytwo.py mypy --ignore-missing-imports --follow-imports skip axelrod/strategies/mindcontrol.py mypy --ignore-missing-imports --follow-imports skip axelrod/strategies/mindreader.py mypy --ignore-missing-imports --follow-imports skip axelrod/strategies/mutual.py mypy --ignore-missing-imports --follow-imports skip axelrod/strategies/negation.py -mypy --ignore-missing-imports --follow-imports skip axelrod/strategies/hunter.py -mypy --ignore-missing-imports --follow-imports skip axelrod/strategies/geller.py -mypy --ignore-missing-imports --follow-imports skip axelrod/strategies/memorytwo.py +mypy --ignore-missing-imports --follow-imports skip axelrod/strategies/oncebitten.py +mypy --ignore-missing-imports --follow-imports skip axelrod/strategies/prober.py +mypy --ignore-missing-imports --follow-imports skip axelrod/strategies/punisher.py +mypy --ignore-missing-imports --follow-imports skip axelrod/strategies/rand.py