From 124359a99d5c3facd65c2cd15c3f57845695db95 Mon Sep 17 00:00:00 2001 From: Vince Knight Date: Tue, 6 Jun 2017 11:50:59 +0100 Subject: [PATCH] Add all but 2 remaining strategies. Currently missing: - Soft Joss. Soft Joss is GTFT with a different parameter. We need to find the literature with that particular parameter. - Also cannot find EXTORT-4 --- axelrod/strategies/alternator.py | 1 + axelrod/strategies/cooperator.py | 1 + axelrod/strategies/cycler.py | 2 + axelrod/strategies/defector.py | 8 +- axelrod/strategies/forgiver.py | 8 ++ axelrod/strategies/gambler.py | 18 ++- axelrod/strategies/geller.py | 17 ++- axelrod/strategies/gobymajority.py | 41 ++++++ axelrod/strategies/grudger.py | 36 +++-- axelrod/strategies/grumpy.py | 10 +- axelrod/strategies/hmm.py | 4 +- axelrod/strategies/hunter.py | 49 ++++++- axelrod/strategies/inverse.py | 10 +- axelrod/strategies/lookerup.py | 16 ++- axelrod/strategies/mathematicalconstants.py | 28 +++- axelrod/strategies/memoryone.py | 64 +++++++-- axelrod/strategies/meta.py | 139 +++++++++++++++++--- axelrod/strategies/mindcontrol.py | 15 ++- axelrod/strategies/mindreader.py | 21 ++- axelrod/strategies/negation.py | 2 +- axelrod/strategies/oncebitten.py | 16 +++ axelrod/strategies/punisher.py | 6 +- axelrod/strategies/resurrection.py | 4 +- axelrod/strategies/retaliate.py | 12 +- axelrod/strategies/sequence_player.py | 4 + axelrod/strategies/titfortat.py | 11 +- docs/reference/bibliography.rst | 7 +- 27 files changed, 460 insertions(+), 90 deletions(-) diff --git a/axelrod/strategies/alternator.py b/axelrod/strategies/alternator.py index c93985d57..2b24fd368 100644 --- a/axelrod/strategies/alternator.py +++ b/axelrod/strategies/alternator.py @@ -11,6 +11,7 @@ class Alternator(Player): Names - Alternator: [Axelrod1984]_ + - Periodic player CD: [Mittal2009]_ """ name = 'Alternator' diff --git a/axelrod/strategies/cooperator.py b/axelrod/strategies/cooperator.py index 30002daad..a57daecae 100644 --- a/axelrod/strategies/cooperator.py +++ b/axelrod/strategies/cooperator.py @@ -11,6 +11,7 @@ class Cooperator(Player): - Cooperator: [Axelrod1984]_ - ALLC: [Press2012]_ + - Always cooperate: [Mittal2009]_ """ name = 'Cooperator' diff --git a/axelrod/strategies/cycler.py b/axelrod/strategies/cycler.py index 7f0471e5f..72d011d29 100644 --- a/axelrod/strategies/cycler.py +++ b/axelrod/strategies/cycler.py @@ -126,6 +126,7 @@ class CyclerCCD(Cycler): Names: - Cycler CCD: Original name by Marc Harper + - Periodic player CCD: [Mittal2009]_ """ name = 'Cycler CCD' classifier = copy.copy(Cycler.classifier) @@ -142,6 +143,7 @@ class CyclerDDC(Cycler): Names: - Cycler DDC: Original name by Marc Harper + - Periodic player DDC: [Mittal2009]_ """ name = 'Cycler DDC' classifier = copy.copy(Cycler.classifier) diff --git a/axelrod/strategies/defector.py b/axelrod/strategies/defector.py index abf64f2c3..7d41144ef 100644 --- a/axelrod/strategies/defector.py +++ b/axelrod/strategies/defector.py @@ -11,6 +11,7 @@ class Defector(Player): - Defector: [Axelrod1984]_ - ALLD: [Press2012]_ + - Always defect: [Mittal2009]_ """ name = 'Defector' @@ -30,7 +31,12 @@ def strategy(opponent: Player) -> Action: class TrickyDefector(Player): - """A defector that is trying to be tricky.""" + """A defector that is trying to be tricky. + + Names: + + - Tricky Defector: Original name by Karol Langner + """ name = "Tricky Defector" classifier = { diff --git a/axelrod/strategies/forgiver.py b/axelrod/strategies/forgiver.py index 30c8bb4e0..0d834d9fd 100644 --- a/axelrod/strategies/forgiver.py +++ b/axelrod/strategies/forgiver.py @@ -8,6 +8,10 @@ class Forgiver(Player): """ A player starts by cooperating however will defect if at any point the opponent has defected more than 10 percent of the time + + Names: + + - Forgiver: Original name by Thomas Campbell """ name = 'Forgiver' @@ -36,6 +40,10 @@ class ForgivingTitForTat(Player): A player starts by cooperating however will defect if at any point, the opponent has defected more than 10 percent of the time, and their most recent decision was defect. + + Names: + + - Forgiving Tit For Tat: Original name by Thomas Campbell """ name = 'Forgiving Tit For Tat' diff --git a/axelrod/strategies/gambler.py b/axelrod/strategies/gambler.py index e183d25b8..bbc4af114 100644 --- a/axelrod/strategies/gambler.py +++ b/axelrod/strategies/gambler.py @@ -20,6 +20,10 @@ class Gambler(LookerUp): """ A stochastic version of LookerUp which will select randomly an action in some cases. + + Names: + + - Gambler: Original name by Georgios Koutsovoulos """ name = 'Gambler' @@ -47,7 +51,8 @@ class PSOGamblerMem1(Gambler): Axelrod library. Names: - - PSO Gambler Mem1: Original name by Marc Harper + + - PSO Gambler Mem1: Original name by Marc Harper """ name = "PSO Gambler Mem1" @@ -64,7 +69,8 @@ class PSOGambler1_1_1(Gambler): A 1x1x1 PSOGambler trained with pyswarm. Names: - - PSO Gambler 1_1_1: Original name by Marc Harper + + - PSO Gambler 1_1_1: Original name by Marc Harper """ name = "PSO Gambler 1_1_1" @@ -78,10 +84,12 @@ def __init__(self) -> None: class PSOGambler2_2_2(Gambler): """ - A 2x2x2 PSOGambler trained with pyswarm. Original version by @GDKO. + A 2x2x2 PSOGambler trained with a particle swarm algorithm (implemented in + pyswarm). Original version by Georgios Koutsovoulos. Names: - - PSO Gambler 2_2_2: Original name by Marc Harper + + - PSO Gambler 2_2_2: Original name by Marc Harper """ name = "PSO Gambler 2_2_2" @@ -98,8 +106,8 @@ class PSOGambler2_2_2_Noise05(Gambler): A 2x2x2 PSOGambler trained with pyswarm with noise=0.05. Names: - - PSO Gambler 2_2_2 Noise 05: Original name by Marc Harper + - PSO Gambler 2_2_2 Noise 05: Original name by Marc Harper """ name = "PSO Gambler 2_2_2 Noise 05" diff --git a/axelrod/strategies/geller.py b/axelrod/strategies/geller.py index f46f0aa31..b12ad8335 100644 --- a/axelrod/strategies/geller.py +++ b/axelrod/strategies/geller.py @@ -17,7 +17,6 @@ class Geller(Player): If unable to do this: will play randomly. - Geller - by Martin Chorley (@martinjc), heavily inspired by Matthew Williams (@voxmjw) This code is inspired by Matthew Williams' talk "Cheating at rock-paper-scissors — meta-programming in Python" @@ -33,6 +32,10 @@ class Geller(Player): This is almost certainly cheating, and more than likely against the spirit of the 'competition' :-) + + Names: + + - Geller: Original name by Martin Chorley (@martinjc) """ name = 'Geller' @@ -64,6 +67,10 @@ def strategy(self, opponent: Player) -> Action: class GellerCooperator(Geller): """Observes what the payer will do (like :code:`Geller`) but if unable to will cooperate. + + Names: + + - Geller Cooperator: Original name by Karol Langner """ name = 'Geller Cooperator' classifier = { @@ -78,13 +85,19 @@ class GellerCooperator(Geller): @staticmethod def foil_strategy_inspection() -> Action: - """Foils _strategy_utils.inspect_strategy and _strategy_utils.look_ahead""" + """ + Foils _strategy_utils.inspect_strategy and _strategy_utils.look_ahead + """ return C class GellerDefector(Geller): """Observes what the payer will do (like :code:`Geller`) but if unable to will defect. + + Names: + + - Geller Defector: Original name by Karol Langner """ name = 'Geller Defector' classifier = { diff --git a/axelrod/strategies/gobymajority.py b/axelrod/strategies/gobymajority.py index 0af604381..42797101a 100644 --- a/axelrod/strategies/gobymajority.py +++ b/axelrod/strategies/gobymajority.py @@ -19,6 +19,11 @@ class GoByMajority(Player): An optional memory attribute will limit the number of turns remembered (by default this is 0) + + Names: + + - Go By Majority: [Axelrod1984]_ + - Soft Majority: [Mittal2009]_ """ name = 'Go By Majority' @@ -86,6 +91,10 @@ def strategy(self, opponent: Player) -> Action: class GoByMajority40(GoByMajority): """ GoByMajority player with a memory of 40. + + Names: + + - Go By Majority 40: Original name by Karol Langner """ name = 'Go By Majority 40' classifier = copy.copy(GoByMajority.classifier) @@ -98,6 +107,10 @@ def __init__(self) -> None: class GoByMajority20(GoByMajority): """ GoByMajority player with a memory of 20. + + Names: + + - Go By Majority 20: Original name by Karol Langner """ name = 'Go By Majority 20' classifier = copy.copy(GoByMajority.classifier) @@ -110,6 +123,10 @@ def __init__(self) -> None: class GoByMajority10(GoByMajority): """ GoByMajority player with a memory of 10. + + Names: + + - Go By Majority 10: Original name by Karol Langner """ name = 'Go By Majority 10' classifier = copy.copy(GoByMajority.classifier) @@ -122,6 +139,10 @@ def __init__(self) -> None: class GoByMajority5(GoByMajority): """ GoByMajority player with a memory of 5. + + Names: + + - Go By Majority 5: Original name by Karol Langner """ name = 'Go By Majority 5' classifier = copy.copy(GoByMajority.classifier) @@ -138,6 +159,10 @@ class HardGoByMajority(GoByMajority): An optional memory attribute will limit the number of turns remembered (by default this is 0) + + Names: + + - Hard Majority: [Mittal2009]_ """ name = 'Hard Go By Majority' @@ -148,6 +173,10 @@ def __init__(self, memory_depth: Union[int, float] = float('inf')) -> None: class HardGoByMajority40(HardGoByMajority): """ HardGoByMajority player with a memory of 40. + + Names: + + - Hard Go By Majority 40: Original name by Karol Langner """ name = 'Hard Go By Majority 40' classifier = copy.copy(GoByMajority.classifier) @@ -160,6 +189,10 @@ def __init__(self) -> None: class HardGoByMajority20(HardGoByMajority): """ HardGoByMajority player with a memory of 20. + + Names: + + - Hard Go By Majority 20: Original name by Karol Langner """ name = 'Hard Go By Majority 20' classifier = copy.copy(GoByMajority.classifier) @@ -172,6 +205,10 @@ def __init__(self) -> None: class HardGoByMajority10(HardGoByMajority): """ HardGoByMajority player with a memory of 10. + + Names: + + - Hard Go By Majority 10: Original name by Karol Langner """ name = 'Hard Go By Majority 10' classifier = copy.copy(GoByMajority.classifier) @@ -184,6 +221,10 @@ def __init__(self) -> None: class HardGoByMajority5(HardGoByMajority): """ HardGoByMajority player with a memory of 5. + + Names: + + - Hard Go By Majority 5: Original name by Karol Langner """ name = 'Hard Go By Majority 5' classifier = copy.copy(GoByMajority.classifier) diff --git a/axelrod/strategies/grudger.py b/axelrod/strategies/grudger.py index 27204a77e..e6351b87d 100644 --- a/axelrod/strategies/grudger.py +++ b/axelrod/strategies/grudger.py @@ -39,8 +39,14 @@ def strategy(opponent: Player) -> Action: class ForgetfulGrudger(Player): - """A player starts by cooperating however will defect if at any point the - opponent has defected, but forgets after mem_length matches.""" + """ + A player starts by cooperating however will defect if at any point the + opponent has defected, but forgets after mem_length matches. + + Names: + + - Forgetful Grudger: Original name by Geraint Palmer + """ name = 'Forgetful Grudger' classifier = { @@ -83,8 +89,14 @@ def reset(self): class OppositeGrudger(Player): - """A player starts by defecting however will cooperate if at any point the - opponent has cooperated.""" + """ + A player starts by defecting however will cooperate if at any point the + opponent has cooperated. + + Names: + + - Opposite Grudger: Original name by Geraint Palmer + """ name = 'Opposite Grudger' classifier = { @@ -107,7 +119,13 @@ def strategy(opponent: Player) -> Action: class Aggravater(Player): - """Grudger, except that it defects on the first 3 turns""" + """ + Grudger, except that it defects on the first 3 turns + + Names + + Aggravater: Original name by Thomas Campbell + """ name = 'Aggravater' classifier = { @@ -135,10 +153,7 @@ class SoftGrudger(Player): defecting: punishes by playing: D, D, D, D, C, C. (Will continue to cooperate afterwards). - For reference see: "Engineering Design of Strategies for Winning - Iterated Prisoner's Dilemma Competitions" by Jiawei Li, Philip Hingston, - and Graham Kendall. IEEE TRANSACTIONS ON COMPUTATIONAL INTELLIGENCE AND AI - IN GAMES, VOL. 3, NO. 4, DECEMBER 2011 + - Soft Grudger (SGRIM): [Li2011]_ """ name = 'Soft Grudger' @@ -218,7 +233,8 @@ class EasyGo(Player): Names: - - Easy Go [Prison1998]_ + - Easy Go: [Prison1998]_ + - Reverse Grudger (RGRIM): [Li2011]_ """ name = 'EasyGo' diff --git a/axelrod/strategies/grumpy.py b/axelrod/strategies/grumpy.py index 33f63c88a..6def93419 100644 --- a/axelrod/strategies/grumpy.py +++ b/axelrod/strategies/grumpy.py @@ -5,9 +5,15 @@ class Grumpy(Player): - """A player that defects after a certain level of grumpiness. + """ + A player that defects after a certain level of grumpiness. Grumpiness increases when the opponent defects and decreases - when the opponent co-operates.""" + when the opponent co-operates. + + Names: + + - Grumpy: Original name by Jason Young + """ name = 'Grumpy' classifier = { diff --git a/axelrod/strategies/hmm.py b/axelrod/strategies/hmm.py index d53d5ef85..e48c6a3cf 100644 --- a/axelrod/strategies/hmm.py +++ b/axelrod/strategies/hmm.py @@ -96,7 +96,7 @@ class HMMPlayer(Player): Names - - HMM Player + - HMM Player: Original name by Marc Harper """ name = "HMM Player" @@ -163,7 +163,7 @@ class EvolvedHMM5(HMMPlayer): Names: - - Evolved HMM 5 + - Evolved HMM 5: Original name by Marc Harper """ name = "Evolved HMM 5" diff --git a/axelrod/strategies/hunter.py b/axelrod/strategies/hunter.py index 50e7a1f38..51eb14d5f 100644 --- a/axelrod/strategies/hunter.py +++ b/axelrod/strategies/hunter.py @@ -8,7 +8,12 @@ class DefectorHunter(Player): - """A player who hunts for defectors.""" + """A player who hunts for defectors. + + Names: + + Defector Hunter: Original name by Marc Harper + """ name = 'Defector Hunter' classifier = { @@ -28,7 +33,12 @@ def strategy(self, opponent: Player) -> Action: class CooperatorHunter(Player): - """A player who hunts for cooperators.""" + """A player who hunts for cooperators. + + Names: + + Cooperator Hunter: Original name by Marc Harper + """ name = 'Cooperator Hunter' classifier = { @@ -55,7 +65,12 @@ def is_alternator(history: List[Action]) -> bool: class AlternatorHunter(Player): - """A player who hunts for alternators.""" + """A player who hunts for alternators. + + Names: + + Alternator Hunter: Original name by Marc Harper + """ name = 'Alternator Hunter' classifier = { @@ -89,7 +104,12 @@ def reset(self): class CycleHunter(Player): """Hunts strategies that play cyclically, like any of the Cyclers, - Alternator, etc.""" + Alternator, etc. + + Names: + + Cycle Hunter: Original name by Marc Harper + """ name = 'Cycle Hunter' classifier = { @@ -122,7 +142,12 @@ def reset(self): class EventualCycleHunter(CycleHunter): - """Hunts strategies that eventually play cyclically.""" + """Hunts strategies that eventually play cyclically. + + Names: + + Eventual Cycle Hunter: Original name by Marc Harper + """ name = 'Eventual Cycle Hunter' @@ -142,7 +167,12 @@ def strategy(self, opponent: Player) -> None: class MathConstantHunter(Player): - """A player who hunts for mathematical constant players.""" + """A player who hunts for mathematical constant players. + + Names: + + Math Constant Hunter: Original name by Marc Harper + """ name = "Math Constant Hunter" classifier = { @@ -180,7 +210,12 @@ def strategy(self, opponent: Player) -> Action: class RandomHunter(Player): - """A player who hunts for random players.""" + """A player who hunts for random players. + + Names: + + Random Hunter: Original name by Marc Harper + """ name = "Random Hunter" classifier = { diff --git a/axelrod/strategies/inverse.py b/axelrod/strategies/inverse.py index 3b91d8395..72e899d4d 100644 --- a/axelrod/strategies/inverse.py +++ b/axelrod/strategies/inverse.py @@ -7,7 +7,12 @@ class Inverse(Player): """A player who defects with a probability that diminishes relative to how - long ago the opponent defected.""" + long ago the opponent defected. + + Names: + + - Inverse: Original Name by Karol Langner + """ name = 'Inverse' classifier = { @@ -29,7 +34,8 @@ def strategy(opponent: Player) -> Action: # calculate how many turns ago the opponent defected index = next((index for index, value in - enumerate(opponent.history[::-1], start=1) if value == D), None) + enumerate(opponent.history[::-1], start=1) + if value == D), None) if index is None: return C diff --git a/axelrod/strategies/lookerup.py b/axelrod/strategies/lookerup.py index aba61d957..d883d98c1 100644 --- a/axelrod/strategies/lookerup.py +++ b/axelrod/strategies/lookerup.py @@ -266,6 +266,10 @@ class LookerUp(Player): LookerUp's LookupTable defaults to Tit-For-Tat. The initial_actions defaults to playing C. + + Names: + + - Lookerup: Original name by Martin Jones """ name = 'LookerUp' @@ -366,7 +370,8 @@ class EvolvedLookerUp1_1_1(LookerUp): A 1 1 1 Lookerup trained with an evolutionary algorithm. Names: - - Evolved Lookerup 1 1 1: Original name by Marc Harper + + - Evolved Lookerup 1 1 1: Original name by Marc Harper """ name = "EvolvedLookerUp1_1_1" @@ -381,7 +386,8 @@ class EvolvedLookerUp2_2_2(LookerUp): A 2 2 2 Lookerup trained with an evolutionary algorithm. Names: - - Evolved Lookerup 2 2 2: Original name by Marc Harper + + - Evolved Lookerup 2 2 2: Original name by Marc Harper """ name = "EvolvedLookerUp2_2_2" @@ -397,7 +403,8 @@ class Winner12(LookerUp): A lookup table based strategy. Names: - - Winner12 [Mathieu2015]_ + + - Winner12: [Mathieu2015]_ """ name = "Winner12" @@ -413,7 +420,8 @@ class Winner21(LookerUp): A lookup table based strategy. Names: - - Winner21 [Mathieu2015]_ + + - Winner21: [Mathieu2015]_ """ name = "Winner21" diff --git a/axelrod/strategies/mathematicalconstants.py b/axelrod/strategies/mathematicalconstants.py index 4ba5870fc..2cd76a5b8 100644 --- a/axelrod/strategies/mathematicalconstants.py +++ b/axelrod/strategies/mathematicalconstants.py @@ -8,7 +8,12 @@ class CotoDeRatio(Player): """The player will always aim to bring the ratio of co-operations to - defections closer to the ratio as given in a sub class""" + defections closer to the ratio as given in a sub class + + Names: + + - Co to Do Ratio: Original Name by Timothy Standen + """ classifier = { 'stochastic': False, @@ -37,7 +42,12 @@ def strategy(self, opponent: Player) -> Action: class Golden(CotoDeRatio): """The player will always aim to bring the ratio of co-operations to - defections closer to the golden mean""" + defections closer to the golden mean + + Names: + + - Golden: Original Name by Timothy Standen + """ name = '$\phi$' ratio = (1 + math.sqrt(5)) / 2 @@ -45,7 +55,12 @@ class Golden(CotoDeRatio): class Pi(CotoDeRatio): """The player will always aim to bring the ratio of co-operations to - defections closer to the pi""" + defections closer to the pi + + Names: + + - Pi: Original Name by Timothy Standen + """ name = '$\pi$' ratio = math.pi @@ -53,7 +68,12 @@ class Pi(CotoDeRatio): class e(CotoDeRatio): """The player will always aim to bring the ratio of co-operations to - defections closer to the e""" + defections closer to the e + + Names: + + - e: Original Name by Timothy Standen + """ name = '$e$' ratio = math.e diff --git a/axelrod/strategies/memoryone.py b/axelrod/strategies/memoryone.py index 0a274d8bd..38db189ca 100644 --- a/axelrod/strategies/memoryone.py +++ b/axelrod/strategies/memoryone.py @@ -89,8 +89,8 @@ class WinStayLoseShift(MemoryOnePlayer): Names: - - WSLS: [Stewart2012]_ - Win Stay Lose Shift: [Nowak1993]_ + - WSLS: [Stewart2012]_ - Pavlov: [Kraines1989]_ """ @@ -213,7 +213,15 @@ def __init__(self) -> None: class StochasticWSLS(MemoryOnePlayer): - """Stochastic WSLS, similar to Generous TFT""" + """ + Stochastic WSLS, similar to Generous TFT. Note that this is not the same as + Stochastic WSLS described in [Amaral2016]_, that strategy is a modification + of WSLS that learns from the performance of other strategies. + + Names: + + - Stochastic WSLS: Original name by Marc Harper + """ name = 'Stochastic WSLS' @@ -246,6 +254,10 @@ class LRPlayer(MemoryOnePlayer): This parameterization is Equation 14 in http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0077886. See Figure 2 of the article for a more in-depth explanation. + + Names: + + - Linear Relation player: [Hilde2013]_ """ name = 'LinearRelation' @@ -290,7 +302,13 @@ def receive_match_attributes(self, phi: float = 0, s: float = None, l: float = N class ZDExtort2(LRPlayer): - """An Extortionate Zero Determinant Strategy with l=P.""" + """ + An Extortionate Zero Determinant Strategy with l=P. + + Names: + + - Extort-2: [Stewart2012]_ + """ name = 'ZD-Extort-2' @@ -314,7 +332,14 @@ def receive_match_attributes(self): class ZDExtort2v2(LRPlayer): - """An Extortionate Zero Determinant Strategy with l=1.""" + """ + An Extortionate Zero Determinant Strategy with l=1. + + + Names: + + - EXTORT2: [Kuhn2017]_ + """ name = 'ZD-Extort-2 v2' @@ -361,7 +386,13 @@ def receive_match_attributes(self): class ZDGen2(LRPlayer): - """A Generous Zero Determinant Strategy with l=3.""" + """ + A Generous Zero Determinant Strategy with l=3. + + Names: + + - GEN2: [Kuhn2017]_ + """ name = 'ZD-GEN-2' @@ -384,7 +415,13 @@ def receive_match_attributes(self): class ZDGTFT2(LRPlayer): - """A Generous Zero Determinant Strategy with l=R.""" + """ + A Generous Zero Determinant Strategy with l=R. + + Names: + + - ZDGTFT-2: [Stewart2012]_ + """ name = 'ZD-GTFT-2' @@ -408,7 +445,13 @@ def receive_match_attributes(self): class ZDSet2(LRPlayer): - """A Generous Zero Determinant Strategy with l=2.""" + """ + A Generous Zero Determinant Strategy with l=2. + + Names: + + - SET2: [Kuhn2017]_ + """ name = 'ZD-SET-2' @@ -430,9 +473,6 @@ def receive_match_attributes(self): self.phi, self.s, self.l) -### Strategies for recreating tournaments -# See also Joss in axelrod_tournaments.py - class SoftJoss(MemoryOnePlayer): """ Defects with probability 0.9 when the opponent defects, otherwise @@ -468,6 +508,10 @@ class ALLCorALLD(Player): For now starting choice is random of 0.6, but that was an arbitrary choice at implementation time. + + Names: + + - ALLC or ALLD: Original name by Marc Harper """ name = "ALLCorALLD" diff --git a/axelrod/strategies/meta.py b/axelrod/strategies/meta.py index 5c5e31825..46eb1e564 100644 --- a/axelrod/strategies/meta.py +++ b/axelrod/strategies/meta.py @@ -15,7 +15,13 @@ class MetaPlayer(Player): - """A generic player that has its own team of players.""" + """ + A generic player that has its own team of players. + + Names: + + - Meta Player: Original name by Karol Langner + """ name = "Meta Player" classifier = { @@ -86,7 +92,12 @@ def reset(self): class MetaMajority(MetaPlayer): - """A player who goes by the majority vote of all other non-meta players.""" + """A player who goes by the majority vote of all other non-meta players. + + Names: + + - Meta Marjority: Original name by Karol Langner + """ name = "Meta Majority" @@ -101,7 +112,12 @@ def meta_strategy(results, opponent): class MetaMinority(MetaPlayer): - """A player who goes by the minority vote of all other non-meta players.""" + """A player who goes by the minority vote of all other non-meta players. + + Names: + + - Meta Minority: Original name by Karol Langner + """ name = "Meta Minority" @@ -116,7 +132,12 @@ def meta_strategy(results, opponent): class MetaWinner(MetaPlayer): - """A player who goes by the strategy of the current winner.""" + """A player who goes by the strategy of the current winner. + + Names: + + - Meta Winner: Original name by Karol Langner + """ name = "Meta Winner" @@ -162,7 +183,7 @@ class MetaWinnerEnsemble(MetaWinner): Names: - Meta Winner Ensemble: Original name by Marc Harper + - Meta Winner Ensemble: Original name by Marc Harper """ name = "Meta Winner Ensemble" @@ -182,7 +203,12 @@ def meta_strategy(self, results, opponent): class MetaHunter(MetaPlayer): - """A player who uses a selection of hunters.""" + """A player who uses a selection of hunters. + + Names + + - Meta Hunter: Original name by Marc Harper + """ name = "Meta Hunter" classifier = { @@ -225,7 +251,12 @@ def meta_strategy(results, opponent): class MetaHunterAggressive(MetaPlayer): - """A player who uses a selection of hunters.""" + """A player who uses a selection of hunters. + + Names + + - Meta Hunter: Original name by Marc Harper + """ name = "Meta Hunter Aggressive" classifier = { @@ -264,7 +295,12 @@ def meta_strategy(results, opponent): class MetaMajorityMemoryOne(MetaMajority): - """MetaMajority with the team of Memory One players""" + """MetaMajority with the team of Memory One players + + Names + + - Meta Majority Memory One: Original name by Marc Harper + """ name = "Meta Majority Memory One" @@ -275,7 +311,12 @@ def __init__(self): class MetaMajorityFiniteMemory(MetaMajority): - """MetaMajority with the team of Finite Memory Players""" + """MetaMajority with the team of Finite Memory Players + + Names + + - Meta Majority Finite Memory: Original name by Marc Harper + """ name = "Meta Majority Finite Memory" @@ -286,7 +327,12 @@ def __init__(self): class MetaMajorityLongMemory(MetaMajority): - """MetaMajority with the team of Long (infinite) Memory Players""" + """MetaMajority with the team of Long (infinite) Memory Players + + Names + + - Meta Majority Long Memory: Original name by Marc Harper + """ name = "Meta Majority Long Memory" @@ -297,7 +343,12 @@ def __init__(self): class MetaWinnerMemoryOne(MetaWinner): - """MetaWinner with the team of Memory One players""" + """MetaWinner with the team of Memory One players + + Names + + - Meta Winner Memory Memory One: Original name by Marc Harper + """ name = "Meta Winner Memory One" @@ -308,7 +359,12 @@ def __init__(self): class MetaWinnerFiniteMemory(MetaWinner): - """MetaWinner with the team of Finite Memory Players""" + """MetaWinner with the team of Finite Memory Players + + Names + + - Meta Winner Finite Memory: Original name by Marc Harper + """ name = "Meta Winner Finite Memory" @@ -319,8 +375,12 @@ def __init__(self): class MetaWinnerLongMemory(MetaWinner): - """MetaWinner with the team of Long (infinite) Memory Players""" + """MetaWinner with the team of Long (infinite) Memory Players + Names + + - Meta Winner Long Memory: Original name by Marc Harper + """ name = "Meta Winner Long Memory" def __init__(self): @@ -330,7 +390,12 @@ def __init__(self): class MetaWinnerDeterministic(MetaWinner): - """Meta Winner with the team of Deterministic Players.""" + """Meta Winner with the team of Deterministic Players. + + Names + + - Meta Winner Deterministic: Original name by Marc Harper + """ name = "Meta Winner Deterministic" @@ -342,7 +407,12 @@ def __init__(self): class MetaWinnerStochastic(MetaWinner): - """Meta Winner with the team of Stochastic Players.""" + """Meta Winner with the team of Stochastic Players. + + Names + + - Meta Winner Stochastic: Original name by Marc Harper + """ name = "Meta Winner Stochastic" @@ -367,6 +437,10 @@ class MetaMixer(MetaPlayer): distribution : list representing a probability distribution, optional This gives the distribution from which to select the players. If none is passed will select uniformly. + + Names + + - Meta Mixer: Original name by Vince Knight """ name = "Meta Mixer" @@ -390,7 +464,12 @@ def meta_strategy(self, results, opponent): class NMWEDeterministic(NiceMetaWinnerEnsemble): - """Nice Meta Winner Ensemble with the team of Deterministic Players.""" + """Nice Meta Winner Ensemble with the team of Deterministic Players. + + Names + + - Nice Meta Winner Ensemble Deterministic: Original name by Marc Harper + """ name = "NMWE Deterministic" @@ -402,7 +481,12 @@ def __init__(self): class NMWEStochastic(NiceMetaWinnerEnsemble): - """Nice Meta Winner Ensemble with the team of Stochastic Players.""" + """Nice Meta Winner Ensemble with the team of Stochastic Players. + + Names + + - Nice Meta Winner Ensemble Stochastic: Original name by Marc Harper + """ name = "NMWE Stochastic" @@ -413,7 +497,12 @@ def __init__(self): class NMWEFiniteMemory(NiceMetaWinnerEnsemble): - """Nice Meta Winner Ensemble with the team of Finite Memory Players.""" + """Nice Meta Winner Ensemble with the team of Finite Memory Players. + + Names + + - Nice Meta Winner Ensemble Finite Memory: Original name by Marc Harper + """ name = "NMWE Finite Memory" @@ -424,7 +513,12 @@ def __init__(self): class NMWELongMemory(NiceMetaWinnerEnsemble): - """Nice Meta Winner Ensemble with the team of Long Memory Players.""" + """Nice Meta Winner Ensemble with the team of Long Memory Players. + + Names + + - Nice Meta Winner Ensemble Long Memory: Original name by Marc Harper + """ name = "NMWE Long Memory" @@ -435,7 +529,12 @@ def __init__(self): class NMWEMemoryOne(NiceMetaWinnerEnsemble): - """Nice Meta Winner Ensemble with the team of Memory One Players.""" + """Nice Meta Winner Ensemble with the team of Memory One Players. + + Names + + - Nice Meta Winner Ensemble Memory One: Original name by Marc Harper + """ name = "NMWE Memory One" diff --git a/axelrod/strategies/mindcontrol.py b/axelrod/strategies/mindcontrol.py index 23f7012ad..a1bf7fe7e 100644 --- a/axelrod/strategies/mindcontrol.py +++ b/axelrod/strategies/mindcontrol.py @@ -5,7 +5,12 @@ class MindController(Player): - """A player that changes the opponents strategy to cooperate.""" + """A player that changes the opponents strategy to cooperate. + + Names + + - Mind Controller: Original name by Karol Langner + """ name = 'Mind Controller' classifier = { @@ -34,6 +39,10 @@ class MindWarper(Player): """ A player that changes the opponent's strategy but blocks changes to its own. + + Names + + - Mind Warper: Original name by Karol Langner """ name = 'Mind Warper' @@ -63,6 +72,10 @@ class MindBender(MindWarper): """ A player that changes the opponent's strategy by modifying the internal dictionary. + + Names + + - Mind Bender: Original name by Karol Langner """ name = 'Mind Bender' diff --git a/axelrod/strategies/mindreader.py b/axelrod/strategies/mindreader.py index 9d12acbff..2842a18a2 100644 --- a/axelrod/strategies/mindreader.py +++ b/axelrod/strategies/mindreader.py @@ -13,7 +13,12 @@ class MindReader(Player): """A player that looks ahead at what the opponent will do and decides what - to do.""" + to do. + + Names: + + - Mind reader: Original name by Jason Young + """ name = 'Mind Reader' classifier = { @@ -46,7 +51,12 @@ def strategy(self, opponent: Player) -> Action: class ProtectedMindReader(MindReader): """A player that looks ahead at what the opponent will do and decides what - to do. It is also protected from mind control strategies""" + to do. It is also protected from mind control strategies + + Names: + + - Protected Mind reader: Original name by Jason Young + """ name = 'Protected Mind Reader' classifier = { @@ -70,7 +80,12 @@ def __setattr__(self, name: str, val: str): class MirrorMindReader(ProtectedMindReader): """A player that will mirror whatever strategy it is playing against by - cheating and calling the opponent's strategy function instead of its own.""" + cheating and calling the opponent's strategy function instead of its own. + + Names: + + - Protected Mind reader: Original name by Brice Fernandes + """ name = 'Mirror Mind Reader' diff --git a/axelrod/strategies/negation.py b/axelrod/strategies/negation.py index 42e793f62..c623e8d8d 100644 --- a/axelrod/strategies/negation.py +++ b/axelrod/strategies/negation.py @@ -12,7 +12,7 @@ class Negation(Player): Names: - Negation - [http://www.prisoners-dilemma.com/competition.html] + - Negation: [PD2017]_ """ name = "Negation" diff --git a/axelrod/strategies/oncebitten.py b/axelrod/strategies/oncebitten.py index 6ef0a933f..00c3c4b77 100644 --- a/axelrod/strategies/oncebitten.py +++ b/axelrod/strategies/oncebitten.py @@ -9,6 +9,10 @@ class OnceBitten(Player): """ Cooperates once when the opponent defects, but if they defect twice in a row defaults to forgetful grudger for 10 turns defecting. + + Names: + + - Once Bitten: Original name by Holly Marissa """ name = 'Once Bitten' @@ -58,6 +62,10 @@ def reset(self): class FoolMeOnce(Player): """ Forgives one D then retaliates forever on a second D. + + Names: + + - Fool me once: Original name by Marc Harper """ name = 'Fool Me Once' @@ -85,6 +93,10 @@ class ForgetfulFoolMeOnce(Player): Forgives one D then retaliates forever on a second D. Sometimes randomly forgets the defection count, and so keeps a secondary count separate from the standard count in Player. + + Names: + + - Forgetful Fool Me Once: Original name by Marc Harper """ name = 'Forgetful Fool Me Once' @@ -131,6 +143,10 @@ class FoolMeForever(Player): """ Fool me once, shame on me. Teach a man to fool me and I'll be fooled for the rest of my life. + + Names: + + - Fool Me Forever: Original name by Marc Harper """ name = 'Fool Me Forever' diff --git a/axelrod/strategies/punisher.py b/axelrod/strategies/punisher.py index 8ca006437..86eb6d921 100644 --- a/axelrod/strategies/punisher.py +++ b/axelrod/strategies/punisher.py @@ -15,7 +15,7 @@ class Punisher(Player): Names: - - Punisher: Original by Geraint Palmer + - Punisher: Original name by Geraint Palmer """ name = 'Punisher' @@ -79,7 +79,7 @@ class InversePunisher(Player): Names: - - Inverse Punisher: Original by Geraint Palmer + - Inverse Punisher: Original name by Geraint Palmer """ name = 'Inverse Punisher' @@ -137,7 +137,7 @@ class LevelPunisher(Player): Names: - - Level Punisher: Name from CoopSim https://github.com/jecki/CoopSim + - Level Punisher: [Eckhart2015]_ """ name = 'Level Punisher' diff --git a/axelrod/strategies/resurrection.py b/axelrod/strategies/resurrection.py index f217880d8..71694d3bc 100644 --- a/axelrod/strategies/resurrection.py +++ b/axelrod/strategies/resurrection.py @@ -13,7 +13,7 @@ class Resurrection(Player): Otherwise, the strategy plays like Tit-for-tat. Names: - - Resurrection: Name from CoopSim https://github.com/jecki/CoopSim + - Resurrection: [Eckhart2015]_ """ # These are various properties for the strategy @@ -46,7 +46,7 @@ class DoubleResurrection(Player): If the last five rounds were defections, the player cooperates. Names: - - DoubleResurrection: Name from CoopSim https://github.com/jecki/CoopSim + - DoubleResurrection: [Eckhart2015]_ """ name = 'DoubleResurrection' diff --git a/axelrod/strategies/retaliate.py b/axelrod/strategies/retaliate.py index 64836d452..0337c5292 100644 --- a/axelrod/strategies/retaliate.py +++ b/axelrod/strategies/retaliate.py @@ -13,7 +13,7 @@ class Retaliate(Player): Names: - - Retaliate: Original strategy by Owen Campbell + - Retaliate: Original name by Owen Campbell """ name = 'Retaliate' @@ -62,7 +62,7 @@ class Retaliate2(Retaliate): Names: - - Retaliate2: Original strategy by Owen Campbell + - Retaliate 2: Original name by Owen Campbell """ name = 'Retaliate 2' @@ -77,7 +77,7 @@ class Retaliate3(Retaliate): Names: - - Retaliate3: Original strategy by Owen Campbell + - Retaliate 3: Original name by Owen Campbell """ name = 'Retaliate 3' @@ -95,7 +95,7 @@ class LimitedRetaliate(Player): Names: - - LimitedRetaliate: Original strategy by Owen Campbell + - Limited Retaliate: Original name by Owen Campbell """ name = 'Limited Retaliate' @@ -169,7 +169,7 @@ class LimitedRetaliate2(LimitedRetaliate): Names: - - LimitedRetaliate2: Original strategy by Owen Campbell + - Limited Retaliate 2: Original name by Owen Campbell """ name = 'Limited Retaliate 2' @@ -188,7 +188,7 @@ class LimitedRetaliate3(LimitedRetaliate): Names: - - LimitedRetaliate3: Original strategy by Owen Campbell + - Limited Retaliate 3: Original name by Owen Campbell """ name = 'Limited Retaliate 3' diff --git a/axelrod/strategies/sequence_player.py b/axelrod/strategies/sequence_player.py index 05cd886dd..af8dcb723 100644 --- a/axelrod/strategies/sequence_player.py +++ b/axelrod/strategies/sequence_player.py @@ -11,6 +11,10 @@ class SequencePlayer(Player): """Abstract base class for players that use a generated sequence to determine their plays. + + Names: + + - Sequence Player: Original name by Marc Harper """ def __init__(self, generator_function: FunctionType, diff --git a/axelrod/strategies/titfortat.py b/axelrod/strategies/titfortat.py index 2ba5b6a15..adcf6b901 100644 --- a/axelrod/strategies/titfortat.py +++ b/axelrod/strategies/titfortat.py @@ -166,7 +166,7 @@ class SneakyTitForTat(Player): Names: - - Sneaky Tit For Tat: Reference Required + - Sneaky Tit For Tat: Original name by Karol Langner """ name = "Sneaky Tit For Tat" @@ -245,7 +245,7 @@ class HardTitForTat(Player): Names: - - Hard Tit For Tat: Reference Required + - Hard Tit For Tat: [PD2017]_ """ name = 'Hard Tit For Tat' @@ -277,7 +277,7 @@ class HardTitFor2Tats(Player): Names: - - Hard Tit For Two Tats: Reference Required + - Hard Tit For Two Tats: [Stewart2012]_ """ name = "Hard Tit For 2 Tats" @@ -498,6 +498,9 @@ class SlowTitForTwoTats(Player): A player plays C twice, then if the opponent plays the same move twice, plays that move. + Names: + + - Slow tit for two tats: Original name by Ranjini Das """ name = 'Slow Tit For Two Tats' @@ -650,7 +653,7 @@ class SlowTitForTwoTats2(Player): Names: - - Slow Tit For Tat [Prison1998]_ + - Slow Tit For Tat: [Prison1998]_ """ name = 'Slow Tit For Two Tats 2' diff --git a/docs/reference/bibliography.rst b/docs/reference/bibliography.rst index 2bdcc7603..0c81fa320 100644 --- a/docs/reference/bibliography.rst +++ b/docs/reference/bibliography.rst @@ -7,6 +7,7 @@ This is a collection of various bibliographic items referenced in the documentation. .. [Adami2013] Adami C and Hintze A. (2013) Evolutionary instability of zero-determinant strategies demonstrates that winning is not everything. Nature communications. https://www.nature.com/articles/ncomms3193 +.. [Amaral2016] Amaral, M. A., Wardil, L., Perc, M., & Da Silva, J. K. L. (2016). Stochastic win-stay-lose-shift strategy with dynamic aspirations in evolutionary social dilemmas. Physical Review E - Statistical, Nonlinear, and Soft Matter Physics, 94(3), 1–9. https://doi.org/10.1103/PhysRevE.94.032317 .. [Andre2013] Andre L. C., Honovan P., Felipe T. and Frederico G. (2013). Iterated Prisoner’s Dilemma - An extended analysis, http://abricom.org.br/wp-content/uploads/2016/03/bricsccicbic2013_submission_202.pdf .. [Ashlock2006] Ashlock, D., & Kim E. Y, & Leahy, N. (2006). Understanding Representational Sensitivity in the Iterated Prisoner’s Dilemma with Fingerprints. IEEE Transactions On Systems, Man, And Cybernetics, Part C: Applications And Reviews, 36 (4) .. [Ashlock2006b] Ashlock, W. & Ashlock, D. (2006). Changes in Prisoner's Dilemma Strategies Over Evolutionary Time With Different Population Sizes 2006 IEEE International Conference on Evolutionary Computation. http://DOI.org/10.1109/CEC.2006.1688322 @@ -23,8 +24,10 @@ documentation. .. [Bendor1993] Bendor, Jonathan. "Uncertainty and the Evolution of Cooperation." The Journal of Conflict Resolution, 37(4), 709–734. .. [Beaufils1997] Beaufils, B. and Delahaye, J. (1997). Our Meeting With Gradual: A Good Strategy For The Iterated Prisoner’s Dilemma. http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.4041 .. [Berg2015] Berg, P. Van Den, & Weissing, F. J. (2015). The importance of mechanisms for the evolution of cooperation. Proceedings of the Royal Society B-Biological Sciences, 282. -.. [Frean1994] Frean, Marcus R. “The Prisoner's Dilemma without Synchrony.” Proceedings: Biological Sciences, vol. 257, no. 1348, 1994, pp. 75–79. www.jstor.org/stable/50253. +.. [Eckhart2015] Eckhart Arnold (2016) CoopSim v0.9.9 beta 6. https://github.com/jecki/CoopSim/ +.. [Frean1994] Frean, Marcus R. "The Prisoner's Dilemma without Synchrony." Proceedings: Biological Sciences, vol. 257, no. 1348, 1994, pp. 75–79. www.jstor.org/stable/50253. .. [Hilde2013] Hilbe, C., Nowak, M.A. and Traulsen, A. (2013). Adaptive dynamics of extortion and compliance, PLoS ONE, 8(11), p. e77886. doi: 10.1371/journal.pone.0077886. +.. [Kuhn2017] Kuhn, Steven, "Prisoner's Dilemma", The Stanford Encyclopedia of Philosophy (Spring 2017 Edition), Edward N. Zalta (ed.), https://plato.stanford.edu/archives/spr2017/entries/prisoner-dilemma/ .. [Kraines1989] Kraines, D. & Kraines, V. Theor Decis (1989) 26: 47. doi:10.1007/BF00134056 .. [LessWrong2011] Zoo of Strategies (2011) LessWrong. Available at: http://lesswrong.com/lw/7f2/prisoners_dilemma_tournament_results/ .. [Li2007] Li, J, How to Design a Strategy to Win an IPD Tournament, in Kendall G., Yao X. and Chong S. (eds.) The iterated prisoner’s dilemma: 20 years on. World Scientific, chapter 4, pp. 29-40, 2007. @@ -34,10 +37,12 @@ documentation. .. [Mathieu2015] Mathieu, P. and Delahaye, J. (2015). New Winning Strategies for the Iterated Prisoner's Dilemma. Proceedings of the 2015 International Conference on Autonomous Agents and Multiagent Systems. +.. [Mittal2009] Mittal, S., & Deb, K. (2009). Optimal strategies of the iterated prisoner’s dilemma problem for multiple conflicting objectives. IEEE Transactions on Evolutionary Computation, 13(3), 554–565. https://doi.org/10.1109/TEVC.2008.2009459 .. [Nachbar1992] Nachbar J., Evolution in the finitely repeated prisoner’s dilemma, Journal of Economic Behavior & Organization, 19(3): 307-326, 1992. .. [Nowak1990] Nowak, M., & Sigmund, K. (1990). The evolution of stochastic strategies in the Prisoner's Dilemma. Acta Applicandae Mathematica. https://link.springer.com/article/10.1007/BF00049570 .. [Nowak1992] Nowak, M.., & May, R. M. (1992). Evolutionary games and spatial chaos. Nature. http://doi.org/10.1038/359826a0 .. [Nowak1993] Nowak, M., & Sigmund, K. (1993). A strategy of win-stay, lose-shift that outperforms tit-for-tat in the Prisoner’s Dilemma game. Nature, 364(6432), 56–58. http://doi.org/10.1038/364056a0 +.. [PD2017] http://www.prisoners-dilemma.com/competition.html (Accessed: 6 June 2017) .. [Press2012] Press, W. H., & Dyson, F. J. (2012). Iterated Prisoner’s Dilemma contains strategies that dominate any evolutionary opponent. Proceedings of the National Academy of Sciences, 109(26), 10409–10413. http://doi.org/10.1073/pnas.1206569109 .. [Prison1998] LIFL (1998) PRISON. Available at: http://www.lifl.fr/IPD/ipd.frame.html (Accessed: 19 September 2016). .. [Robson1989] Robson, Arthur, (1989), EFFICIENCY IN EVOLUTIONARY GAMES: DARWIN, NASH AND SECRET HANDSHAKE, Working Papers, Michigan - Center for Research on Economic & Social Theory, http://EconPapers.repec.org/RePEc:fth:michet:89-22.