diff --git a/README.rst b/README.rst index 870a5e2be..9751228eb 100644 --- a/README.rst +++ b/README.rst @@ -79,7 +79,7 @@ The simplest way to install is:: To install from source:: - $ git clone https://github.com/Axelrod-Python/Axelrod.git + $ git clone https://github.com/benjjo/Axelrod.git $ cd Axelrod $ python setup.py install diff --git a/axelrod/strategies/_strategies.py b/axelrod/strategies/_strategies.py index 92dafc523..cad9da264 100644 --- a/axelrod/strategies/_strategies.py +++ b/axelrod/strategies/_strategies.py @@ -87,7 +87,7 @@ from .cycler import Cycler, EvolvableCycler # pylint: disable=unused-import from .darwin import Darwin from .dbs import DBS -from .defector import Defector, TrickyDefector +from .defector import Defector, TrickyDefector, ModalDefector from .doubler import Doubler from .finite_state_machines import ( TF1, @@ -260,6 +260,7 @@ HardTitFor2Tats, HardTitForTat, Michaelos, + ModalTFT, NTitsForMTats, OmegaTFT, OriginalGradual, @@ -409,6 +410,8 @@ MEM2, MathConstantHunter, Michaelos, + ModalDefector, + ModalTFT, NTitsForMTats, NaiveProber, Negation, diff --git a/axelrod/strategies/defector.py b/axelrod/strategies/defector.py index f52637ba0..bc1012126 100644 --- a/axelrod/strategies/defector.py +++ b/axelrod/strategies/defector.py @@ -1,5 +1,6 @@ from axelrod.action import Action from axelrod.player import Player +import statistics C, D = Action.C, Action.D @@ -61,3 +62,35 @@ def strategy(self, opponent: Player) -> Action: ): return C return D + + +class ModalDefector(Player): + """ + A player starts by Defecting and then analyses the history of the opponent. If the opponent Cooperated in the + last round, they are returned with a Defection. If the opponent chose to Defect in the previous round, + then this strategy will return with the mode of the previous opponent responses. + """ + + # These are various properties for the strategy + name = "Modal Defector" + classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def strategy(self, opponent: Player) -> Action: + """This is the actual strategy""" + # First move + if not self.history: + return D + # React to the opponent's historical moves + if opponent.history[-1] == C: + return D + else: + # returns with the mode of the opponent's history. + return statistics.mode(opponent.history) + diff --git a/axelrod/strategies/titfortat.py b/axelrod/strategies/titfortat.py index e98a8aadf..3d611eba3 100644 --- a/axelrod/strategies/titfortat.py +++ b/axelrod/strategies/titfortat.py @@ -1,5 +1,6 @@ from axelrod.action import Action, actions_to_str from axelrod.player import Player +import statistics from axelrod.strategy_transformers import ( FinalTransformer, TrackHistoryTransformer, @@ -872,7 +873,7 @@ def strategy(self, opponent: Player) -> Action: class RandomTitForTat(Player): """ A player starts by cooperating and then follows by copying its - opponent (tit for tat style). From then on the player + opponent (tit-for-tat style). From then on the player will switch between copying its opponent and randomly responding every other iteration. @@ -945,9 +946,41 @@ def strategy(self, opponent): if not opponent.history: # Make sure we cooperate first turn return C - # BBE modification + # BBE modification if opponent.history[-1] == C: # Cooperate with 0.9 return self._random.random_choice(0.9) # Else TFT. Opponent played D, so play D in return. return D + + +class ModalTFT(Player): + """ + A player starts by cooperating and then analyses the history of the opponent. If the opponent Cooperated in the + last round, they are returned with a Cooperation. If the opponent chose to Defect in the previous round, + then this strategy will return with the mode of the previous opponent responses. + """ + + # These are various properties for the strategy + name = "Modal TFT" + classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def strategy(self, opponent: Player) -> Action: + """This is the actual strategy""" + # First move + if not self.history: + return C + # React to the opponent's historical moves + if opponent.history[-1] == C: + return C + else: + # returns with the mode of the opponent's history. + return statistics.mode(opponent.history) + diff --git a/axelrod/tests/strategies/test_defector.py b/axelrod/tests/strategies/test_defector.py index d9dffa48d..227bc2990 100644 --- a/axelrod/tests/strategies/test_defector.py +++ b/axelrod/tests/strategies/test_defector.py @@ -61,3 +61,21 @@ def test_defects_if_opponent_last_three_are_not_D(self): self.versus_test( axl.MockPlayer(actions=opponent_actions), expected_actions=actions ) + + +class TestModalDefector(TestPlayer): + name = "Modal Defector" + player = axl.ModalDefector + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + opponent = axl.MockPlayer(actions=[C, C, D, D, D, D, C, D]) + actions = [(D, C), (D, C), (D, D), (C, D), (C, D), (D, D), (D, C), (D, D), (D, C)] + self.versus_test(opponent, expected_actions=actions) diff --git a/axelrod/tests/strategies/test_titfortat.py b/axelrod/tests/strategies/test_titfortat.py index 09d82b80f..5c9c7f5b1 100644 --- a/axelrod/tests/strategies/test_titfortat.py +++ b/axelrod/tests/strategies/test_titfortat.py @@ -1341,3 +1341,23 @@ def test_vs_cooperator2(self): def test_vs_defector(self): actions = [(C, D), (D, D), (D, D), (D, D), (D, D)] self.versus_test(axl.Defector(), expected_actions=actions) + + +class TestModalTFT(TestPlayer): + name = "Modal TFT" + player = axl.ModalTFT + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + opponent = axl.MockPlayer(actions=[C, C, D, D, D, D, C, D]) + actions = [(C, C), (C, C), (C, D), (C, D), (C, D), (D, D), (D, C), (C, D), (D, C)] + self.versus_test(opponent, expected_actions=actions) + +