diff --git a/axelrod/player.py b/axelrod/player.py index 060a52cc8..56f40680a 100644 --- a/axelrod/player.py +++ b/axelrod/player.py @@ -77,6 +77,7 @@ def __init__(self, myarg1, ...) def wrapper(self, *args, **kwargs): r = func(self, *args, **kwargs) self.init_args = args + self.init_kwargs = kwargs return r return wrapper @@ -99,6 +100,13 @@ class Player(object): 'manipulates_state': None } + def __new__(cls, *args, **kwargs): + """Caches arguments for Player cloning.""" + obj = super().__new__(cls) + obj.init_args = args + obj.init_kwargs = kwargs + return obj + def __init__(self): """Initiates an empty history and 0 score for a player.""" self.history = [] @@ -111,7 +119,6 @@ def __init__(self): self.cooperations = 0 self.defections = 0 self.state_distribution = defaultdict(int) - self.init_args = () self.set_match_attributes() def receive_match_attributes(self): @@ -162,12 +169,12 @@ def clone(self): """Clones the player without history, reapplying configuration parameters as necessary.""" - # You may be tempted to reimplement using the `copy` module + # You may be tempted to re-implement using the `copy` module # Note that this would require a deepcopy in some cases and there may # be significant changes required throughout the library. - # Consider overriding in special cases only if necessary + # Override in special cases only if absolutely necessary cls = self.__class__ - new_player = cls(*self.init_args) + new_player = cls(*self.init_args, **self.init_kwargs) new_player.match_attributes = copy.copy(self.match_attributes) return new_player diff --git a/axelrod/strategies/adaptive.py b/axelrod/strategies/adaptive.py index a07676017..6fb1594a0 100644 --- a/axelrod/strategies/adaptive.py +++ b/axelrod/strategies/adaptive.py @@ -1,4 +1,4 @@ -from axelrod import Actions, Player, init_args +from axelrod import Actions, Player C, D = Actions.C, Actions.D @@ -24,7 +24,6 @@ class Adaptive(Player): 'manipulates_state': False } - @init_args def __init__(self, initial_plays=None): super().__init__() if not initial_plays: diff --git a/axelrod/strategies/ann.py b/axelrod/strategies/ann.py index bff279ff9..98f7479ce 100644 --- a/axelrod/strategies/ann.py +++ b/axelrod/strategies/ann.py @@ -1,7 +1,7 @@ # Original Source: https://gist.github.com/mojones/550b32c46a8169bb3cd89d917b73111a#file-ann-strategy-test-L60 # Original Author: Martin Jones, @mojones -from axelrod import Actions, Player, init_args, load_weights +from axelrod import Actions, Player, load_weights C, D = Actions.C, Actions.D nn_weights = load_weights() @@ -62,7 +62,6 @@ class ANN(Player): 'long_run_time': False } - @init_args def __init__(self, weights, num_features, num_hidden): super().__init__() (i2h, h2o, bias) = split_weights(weights, num_features, num_hidden) @@ -189,7 +188,6 @@ class EvolvedANN(ANN): name = "Evolved ANN" - @init_args def __init__(self): num_features, num_hidden, weights = nn_weights["Evolved ANN"] super().__init__(weights, num_features, num_hidden) @@ -207,7 +205,6 @@ class EvolvedANN5(ANN): name = "Evolved ANN 5" - @init_args def __init__(self): num_features, num_hidden, weights = nn_weights["Evolved ANN 5"] super().__init__(weights, num_features, num_hidden) @@ -225,7 +222,6 @@ class EvolvedANNNoise05(ANN): name = "Evolved ANN 5 Noise 05" - @init_args def __init__(self): num_features, num_hidden, weights = nn_weights["Evolved ANN 5 Noise 05"] super().__init__(weights, num_features, num_hidden) diff --git a/axelrod/strategies/axelrod_first.py b/axelrod/strategies/axelrod_first.py index a4f709c48..ca08b7830 100644 --- a/axelrod/strategies/axelrod_first.py +++ b/axelrod/strategies/axelrod_first.py @@ -4,7 +4,7 @@ import random -from axelrod import Actions, Player, init_args, flip_action, random_choice +from axelrod import Actions, Player, flip_action, random_choice from.memoryone import MemoryOnePlayer @@ -34,7 +34,6 @@ class Davis(Player): 'manipulates_state': False } - @init_args def __init__(self, rounds_to_cooperate=10): """ Parameters @@ -76,7 +75,6 @@ class RevisedDowning(Player): 'manipulates_state': False } - @init_args def __init__(self, revised=True): super().__init__() self.revised = revised @@ -159,7 +157,6 @@ class Feld(Player): 'manipulates_state': False } - @init_args def __init__(self, start_coop_prob=1.0, end_coop_prob=0.5, rounds_of_decay=200): """ @@ -248,7 +245,6 @@ class Joss(MemoryOnePlayer): name = "Joss" - @init_args def __init__(self, p=0.9): """ Parameters @@ -269,7 +265,7 @@ class Nydegger(Player): """ Submitted to Axelrod's first tournament by Rudy Nydegger. - The program begins with tit for tat for the first three moves, except + The program begins with tit for tat for the first three moves, except that if it was the only one to cooperate on the first move and the only one to defect on the second move, it defects on the third move. After the third move, its choice is determined from the 3 preceding outcomes in the following manner. Let A be the sum formed by counting the other's defection as 2 points and one's own as 1 point, and giving weights of 16, 4, and 1 to the preceding three moves in chronological order. The choice can be described as defecting only when A equals 1, 6, 7, 17, 22, 23, 26, 29, 30, 31, 33, 38, 39, 45, 49, 54, 55, 58, or 61. @@ -420,7 +416,6 @@ class Tullock(Player): 'manipulates_state': False } - @init_args def __init__(self, rounds_to_cooperate=11): """ Parameters @@ -458,7 +453,7 @@ class UnnamedStrategy(Player): score than the other player. Unfortunately, the complex process of adjustment frequently left the probability of cooperation in the 30% to 70% range, and therefore the rule appeared random to many other players. - + Names: - Unnamed Strategy: [Axelrod1980]_ diff --git a/axelrod/strategies/defector.py b/axelrod/strategies/defector.py index 20527a48c..f9e5cfa9d 100644 --- a/axelrod/strategies/defector.py +++ b/axelrod/strategies/defector.py @@ -2,6 +2,7 @@ C, D = Actions.C, Actions.D + class Defector(Player): """A player who only ever defects.""" diff --git a/axelrod/strategies/finite_state_machines.py b/axelrod/strategies/finite_state_machines.py index b58ea6192..2ff850b6a 100644 --- a/axelrod/strategies/finite_state_machines.py +++ b/axelrod/strategies/finite_state_machines.py @@ -1,4 +1,4 @@ -from axelrod import Actions, Player, init_args +from axelrod import Actions, Player C, D = Actions.C, Actions.D @@ -47,7 +47,6 @@ class FSMPlayer(Player): 'manipulates_state': False } - @init_args def __init__(self, transitions=None, initial_state=None, initial_action=None): if not transitions: @@ -91,7 +90,6 @@ class Fortress3(FSMPlayer): 'manipulates_state': False } - @init_args def __init__(self): transitions = ( (1, D, 2, D), @@ -121,7 +119,6 @@ class Fortress4(FSMPlayer): 'manipulates_state': False } - @init_args def __init__(self): transitions = ( (1, C, 1, D), @@ -151,7 +148,6 @@ class Predator(FSMPlayer): 'manipulates_state': False } - @init_args def __init__(self): transitions = ( (0, C, 0, D), @@ -195,7 +191,6 @@ class Pun1(FSMPlayer): 'manipulates_state': False } - @init_args def __init__(self): transitions = ( (1, C, 2, C), @@ -221,7 +216,6 @@ class Raider(FSMPlayer): 'manipulates_state': False } - @init_args def __init__(self): transitions = ( (0, C, 2, D), @@ -251,7 +245,6 @@ class Ripoff(FSMPlayer): 'manipulates_state': False } - @init_args def __init__(self): transitions = ( (1, C, 2, C), @@ -279,7 +272,6 @@ class SolutionB1(FSMPlayer): 'manipulates_state': False } - @init_args def __init__(self): transitions = ( (1, C, 2, D), @@ -307,7 +299,6 @@ class SolutionB5(FSMPlayer): 'manipulates_state': False } - @init_args def __init__(self): transitions = ( (1, C, 2, C), @@ -341,7 +332,6 @@ class Thumper(FSMPlayer): 'manipulates_state': False } - @init_args def __init__(self): transitions = ( (1, C, 1, C), @@ -373,7 +363,6 @@ class EvolvedFSM4(FSMPlayer): 'manipulates_state': False } - @init_args def __init__(self): transitions = ( (0, C, 0, C), @@ -410,7 +399,6 @@ class EvolvedFSM16(FSMPlayer): 'manipulates_state': False } - @init_args def __init__(self): transitions = ( (0, C, 0, C), @@ -471,7 +459,6 @@ class EvolvedFSM16Noise05(FSMPlayer): 'manipulates_state': False } - @init_args def __init__(self): transitions = ( (0, C, 8, C), diff --git a/axelrod/strategies/forgiver.py b/axelrod/strategies/forgiver.py index 8435f729b..e12a55f8b 100644 --- a/axelrod/strategies/forgiver.py +++ b/axelrod/strategies/forgiver.py @@ -2,6 +2,7 @@ C, D = Actions.C, Actions.D + class Forgiver(Player): """ A player starts by cooperating however will defect if at any point diff --git a/axelrod/strategies/gambler.py b/axelrod/strategies/gambler.py index 4148997f3..442aa2339 100644 --- a/axelrod/strategies/gambler.py +++ b/axelrod/strategies/gambler.py @@ -5,7 +5,7 @@ https://gist.github.com/GDKO/60c3d0fd423598f3c4e4 """ -from axelrod import Actions, random_choice, load_pso_tables +from axelrod import Actions, load_pso_tables, random_choice from .lookerup import LookerUp, create_lookup_table_from_pattern diff --git a/axelrod/strategies/geller.py b/axelrod/strategies/geller.py index 4c40cc38f..08b4f3838 100644 --- a/axelrod/strategies/geller.py +++ b/axelrod/strategies/geller.py @@ -1,11 +1,10 @@ -# -*- coding: utf-8 -*- - import inspect from axelrod import Actions, Player, random_choice C, D = Actions.C, Actions.D + class Geller(Player): """Observes what the player will do in the next round and adjust. diff --git a/axelrod/strategies/gobymajority.py b/axelrod/strategies/gobymajority.py index 90df5bcef..7634ac861 100644 --- a/axelrod/strategies/gobymajority.py +++ b/axelrod/strategies/gobymajority.py @@ -1,4 +1,4 @@ -from axelrod import Actions, Player, init_args +from axelrod import Actions, Player import copy @@ -29,7 +29,6 @@ class GoByMajority(Player): 'memory_depth': float('inf') # memory_depth may be altered by __init__ } - @init_args def __init__(self, memory_depth=float('inf'), soft=True): """ Parameters @@ -86,7 +85,6 @@ class GoByMajority40(GoByMajority): classifier = copy.copy(GoByMajority.classifier) classifier['memory_depth'] = 40 - @init_args def __init__(self, memory_depth=40, soft=True): super().__init__(memory_depth=memory_depth, soft=soft) @@ -99,7 +97,6 @@ class GoByMajority20(GoByMajority): classifier = copy.copy(GoByMajority.classifier) classifier['memory_depth'] = 20 - @init_args def __init__(self, memory_depth=20, soft=True): super().__init__(memory_depth=memory_depth, soft=soft) @@ -112,7 +109,6 @@ class GoByMajority10(GoByMajority): classifier = copy.copy(GoByMajority.classifier) classifier['memory_depth'] = 10 - @init_args def __init__(self, memory_depth=10, soft=True): super().__init__(memory_depth=memory_depth, soft=soft) @@ -125,7 +121,6 @@ class GoByMajority5(GoByMajority): classifier = copy.copy(GoByMajority.classifier) classifier['memory_depth'] = 5 - @init_args def __init__(self, memory_depth=5, soft=True): super().__init__(memory_depth=memory_depth, soft=soft) @@ -140,7 +135,6 @@ class HardGoByMajority(GoByMajority): """ name = 'Hard Go By Majority' - @init_args def __init__(self, memory_depth=float('inf'), soft=False): super().__init__(memory_depth=memory_depth, soft=soft) @@ -153,7 +147,6 @@ class HardGoByMajority40(HardGoByMajority): classifier = copy.copy(GoByMajority.classifier) classifier['memory_depth'] = 40 - @init_args def __init__(self, memory_depth=40, soft=False): super().__init__(memory_depth=memory_depth, soft=soft) @@ -166,7 +159,6 @@ class HardGoByMajority20(HardGoByMajority): classifier = copy.copy(GoByMajority.classifier) classifier['memory_depth'] = 20 - @init_args def __init__(self, memory_depth=20, soft=False): super().__init__(memory_depth=memory_depth, soft=soft) @@ -179,7 +171,6 @@ class HardGoByMajority10(HardGoByMajority): classifier = copy.copy(GoByMajority.classifier) classifier['memory_depth'] = 10 - @init_args def __init__(self, memory_depth=10, soft=False): super().__init__(memory_depth=memory_depth, soft=soft) @@ -192,6 +183,5 @@ class HardGoByMajority5(HardGoByMajority): classifier = copy.copy(GoByMajority.classifier) classifier['memory_depth'] = 5 - @init_args def __init__(self, memory_depth=5, soft=False): super().__init__(memory_depth=memory_depth, soft=soft) diff --git a/axelrod/strategies/gradualkiller.py b/axelrod/strategies/gradualkiller.py index 859e82114..f415e9ec9 100644 --- a/axelrod/strategies/gradualkiller.py +++ b/axelrod/strategies/gradualkiller.py @@ -1,8 +1,9 @@ -from axelrod import Actions, Player, init_args +from axelrod import Actions, Player from axelrod.strategy_transformers import InitialTransformer C, D = Actions.C, Actions.D + @InitialTransformer((D, D, D, D, D, C, C), name_prefix=None) class GradualKiller(Player): """ @@ -32,4 +33,4 @@ class GradualKiller(Player): def strategy(self, opponent): if opponent.history[5:7] == [D, D]: return D - return C \ No newline at end of file + return C diff --git a/axelrod/strategies/grudger.py b/axelrod/strategies/grudger.py index 91916de77..f262fb29b 100644 --- a/axelrod/strategies/grudger.py +++ b/axelrod/strategies/grudger.py @@ -2,6 +2,7 @@ C, D = Actions.C, Actions.D + class Grudger(Player): """ A player starts by cooperating however will defect if at any point the diff --git a/axelrod/strategies/grumpy.py b/axelrod/strategies/grumpy.py index 782d5a22a..1d90820f3 100644 --- a/axelrod/strategies/grumpy.py +++ b/axelrod/strategies/grumpy.py @@ -1,4 +1,4 @@ -from axelrod import Actions, Player, init_args +from axelrod import Actions, Player C, D = Actions.C, Actions.D @@ -19,7 +19,6 @@ class Grumpy(Player): 'manipulates_state': False } - @init_args def __init__(self, starting_state='Nice', grumpy_threshold=10, nice_threshold=-10): """ diff --git a/axelrod/strategies/handshake.py b/axelrod/strategies/handshake.py index b171b8029..eef6f6043 100644 --- a/axelrod/strategies/handshake.py +++ b/axelrod/strategies/handshake.py @@ -1,4 +1,4 @@ -from axelrod import Actions, Player, init_args +from axelrod import Actions, Player C, D = Actions.C, Actions.D @@ -23,7 +23,6 @@ class Handshake(Player): 'manipulates_state': False } - @init_args def __init__(self, initial_plays=None): super().__init__() if not initial_plays: diff --git a/axelrod/strategies/hmm.py b/axelrod/strategies/hmm.py index cd244e3e5..a4887def5 100644 --- a/axelrod/strategies/hmm.py +++ b/axelrod/strategies/hmm.py @@ -1,6 +1,6 @@ from numpy.random import choice -from axelrod import Actions, Player, init_args, random_choice +from axelrod import Actions, Player, random_choice C, D = Actions.C, Actions.D @@ -98,7 +98,6 @@ class HMMPlayer(Player): 'manipulates_state': False } - @init_args def __init__(self, transitions_C=None, transitions_D=None, emission_probabilities=None, initial_state=0, initial_action=C): @@ -163,8 +162,6 @@ class EvolvedHMM5(HMMPlayer): 'manipulates_state': False } - - @init_args def __init__(self): initial_state = 3 initial_action = C @@ -183,4 +180,3 @@ def __init__(self): emissions = [1, 0, 0, 1, 0.111] super().__init__(t_C, t_D, emissions, initial_state, initial_action) - diff --git a/axelrod/strategies/human.py b/axelrod/strategies/human.py index cdddda6a2..0965fa2b9 100644 --- a/axelrod/strategies/human.py +++ b/axelrod/strategies/human.py @@ -1,5 +1,5 @@ from os import linesep -from axelrod import Actions, Player, init_args +from axelrod import Actions, Player from prompt_toolkit import prompt from prompt_toolkit.token import Token from prompt_toolkit.styles import style_from_dict @@ -49,7 +49,6 @@ class Human(Player): 'manipulates_state': False } - @init_args def __init__(self, name='Human', c_symbol='C', d_symbol='D'): """ Parameters diff --git a/axelrod/strategies/lookerup.py b/axelrod/strategies/lookerup.py index b42771329..a1b1ad742 100644 --- a/axelrod/strategies/lookerup.py +++ b/axelrod/strategies/lookerup.py @@ -3,7 +3,6 @@ import sys from axelrod import Actions, Player, init_args, load_lookerup_tables -from axelrod.strategy_transformers import InitialTransformer module = sys.modules[__name__] C, D = Actions.C, Actions.D @@ -23,6 +22,7 @@ def create_lookup_table_keys(plays, op_plays, op_start_plays): other_histories)) return lookup_table_keys + def create_lookup_table_from_pattern(plays, op_plays, op_start_plays, pattern): lookup_table_keys = create_lookup_table_keys( plays=plays, op_plays=op_plays, @@ -99,7 +99,6 @@ class LookerUp(Player): 'manipulates_state': False } - @init_args def __init__(self, lookup_table=None, initial_actions=None): """ If no lookup table is provided to the constructor, then use the TFT one. diff --git a/axelrod/strategies/memoryone.py b/axelrod/strategies/memoryone.py index 52b1fe009..22b619f92 100644 --- a/axelrod/strategies/memoryone.py +++ b/axelrod/strategies/memoryone.py @@ -1,4 +1,4 @@ -from axelrod import Actions, Player, init_args, random_choice +from axelrod import Actions, Player, random_choice C, D = Actions.C, Actions.D @@ -21,7 +21,6 @@ class MemoryOnePlayer(Player): 'manipulates_state': False } - @init_args def __init__(self, four_vector=None, initial=C): """ Parameters @@ -92,7 +91,6 @@ class WinStayLoseShift(MemoryOnePlayer): 'manipulates_state': False } - @init_args def __init__(self, initial=C): super().__init__() self.set_four_vector([1, 0, 0, 1]) @@ -118,7 +116,6 @@ class WinShiftLoseStay(MemoryOnePlayer): 'manipulates_state': False } - @init_args def __init__(self, initial=D): super().__init__() self.set_four_vector([0, 1, 1, 0]) @@ -180,7 +177,6 @@ class FirmButFair(MemoryOnePlayer): name = 'Firm But Fair' - @init_args def __init__(self): four_vector = (1, 0, 1, 2/3) super().__init__(four_vector) @@ -192,7 +188,6 @@ class StochasticCooperator(MemoryOnePlayer): name = 'Stochastic Cooperator' - @init_args def __init__(self): four_vector = (0.935, 0.229, 0.266, 0.42) super().__init__(four_vector) @@ -204,7 +199,6 @@ class StochasticWSLS(MemoryOnePlayer): name = 'Stochastic WSLS' - @init_args def __init__(self, ep=0.05): """ Parameters @@ -282,8 +276,7 @@ class ZDExtort2(LRPlayer): name = 'ZD-Extort-2' - @init_args - def __init__(self, phi=1/9, s=0.5): + def __init__ (self, phi=1/9, s=0.5): """ Parameters @@ -307,7 +300,6 @@ class ZDExtort2v2(LRPlayer): name = 'ZD-Extort-2 v2' - @init_args def __init__(self, phi=1/8, s=0.5, l=1): """ Parameters @@ -332,7 +324,6 @@ class ZDExtort4(LRPlayer): name = 'ZD-Extort-4' - @init_args def __init__(self, phi=4/17, s=0.25, l=1): """ Parameters @@ -356,7 +347,6 @@ class ZDGen2(LRPlayer): name = 'ZD-GEN-2' - @init_args def __init__(self, phi=1/8, s=0.5, l=3): """ Parameters @@ -380,7 +370,6 @@ class ZDGTFT2(LRPlayer): name = 'ZD-GTFT-2' - @init_args def __init__(self, phi=0.25, s=0.5): """ Parameters @@ -405,7 +394,6 @@ class ZDSet2(LRPlayer): name = 'ZD-SET-2' - @init_args def __init__(self, phi=1/4, s=0., l=2): """ Parameters @@ -435,7 +423,6 @@ class SoftJoss(MemoryOnePlayer): name = "Soft Joss" - @init_args def __init__(self, q=0.9): """ Parameters diff --git a/axelrod/strategies/meta.py b/axelrod/strategies/meta.py index e4e2f3b4e..f4779ae08 100644 --- a/axelrod/strategies/meta.py +++ b/axelrod/strategies/meta.py @@ -1,4 +1,4 @@ -from axelrod import Actions, Player, init_args, obey_axelrod +from axelrod import Actions, Player, obey_axelrod from axelrod.strategy_transformers import NiceTransformer from ._strategies import all_strategies from .hunter import ( @@ -26,7 +26,6 @@ class MetaPlayer(Player): 'manipulates_state': False } - @init_args def __init__(self, team=None): super().__init__() # The default is to use all strategies available, but we need to import @@ -85,7 +84,6 @@ class MetaMajority(MetaPlayer): name = "Meta Majority" - @init_args def __init__(self, team=None): super().__init__(team=team) @@ -101,7 +99,6 @@ class MetaMinority(MetaPlayer): name = "Meta Minority" - @init_args def __init__(self, team=None): super().__init__(team=team) @@ -117,7 +114,6 @@ class MetaWinner(MetaPlayer): name = "Meta Winner" - @init_args def __init__(self, team=None): super().__init__(team=team) # For each player, we will keep the history of proposed moves and @@ -267,7 +263,6 @@ class MetaMajorityMemoryOne(MetaMajority): name = "Meta Majority Memory One" - @init_args def __init__(self): team = [s for s in ordinary_strategies if s().classifier['memory_depth'] <= 1] super().__init__(team=team) @@ -279,7 +274,6 @@ class MetaMajorityFiniteMemory(MetaMajority): name = "Meta Majority Finite Memory" - @init_args def __init__(self): team = [s for s in ordinary_strategies if s().classifier['memory_depth'] < float('inf')] @@ -291,7 +285,6 @@ class MetaMajorityLongMemory(MetaMajority): name = "Meta Majority Long Memory" - @init_args def __init__(self): team = [s for s in ordinary_strategies if s().classifier['memory_depth'] == float('inf')] @@ -303,7 +296,6 @@ class MetaWinnerMemoryOne(MetaWinner): name = "Meta Winner Memory One" - @init_args def __init__(self): team = [s for s in ordinary_strategies if s().classifier['memory_depth'] <= 1] super().__init__(team=team) @@ -315,7 +307,6 @@ class MetaWinnerFiniteMemory(MetaWinner): name = "Meta Winner Finite Memory" - @init_args def __init__(self): team = [s for s in ordinary_strategies if s().classifier['memory_depth'] < float('inf')] @@ -327,7 +318,6 @@ class MetaWinnerLongMemory(MetaWinner): name = "Meta Winner Long Memory" - @init_args def __init__(self): team = [s for s in ordinary_strategies if s().classifier['memory_depth'] == float('inf')] @@ -339,7 +329,6 @@ class MetaWinnerDeterministic(MetaWinner): name = "Meta Winner Deterministic" - @init_args def __init__(self): team = [s for s in ordinary_strategies if not s().classifier['stochastic']] @@ -352,7 +341,6 @@ class MetaWinnerStochastic(MetaWinner): name = "Meta Winner Stochastic" - @init_args def __init__(self): team = [s for s in ordinary_strategies if s().classifier['stochastic']] @@ -387,7 +375,6 @@ class MetaMixer(MetaPlayer): 'manipulates_state': False } - @init_args def __init__(self, team=None, distribution=None): self.distribution = distribution super().__init__(team=team) @@ -402,7 +389,6 @@ class NMWEDeterministic(NiceMetaWinnerEnsemble): name = "NMWE Deterministic" - @init_args def __init__(self): team = [s for s in ordinary_strategies if not s().classifier['stochastic']] @@ -415,7 +401,6 @@ class NMWEStochastic(NiceMetaWinnerEnsemble): name = "NMWE Stochastic" - @init_args def __init__(self): team = [s for s in ordinary_strategies if s().classifier['stochastic']] @@ -427,7 +412,6 @@ class NMWEFiniteMemory(NiceMetaWinnerEnsemble): name = "NMWE Finite Memory" - @init_args def __init__(self): team = [s for s in ordinary_strategies if s().classifier['memory_depth'] < float('inf')] @@ -439,7 +423,6 @@ class NMWELongMemory(NiceMetaWinnerEnsemble): name = "NMWE Long Memory" - @init_args def __init__(self): team = [s for s in ordinary_strategies if s().classifier['memory_depth'] == float('inf')] @@ -451,7 +434,6 @@ class NMWEMemoryOne(NiceMetaWinnerEnsemble): name = "NMWE Memory One" - @init_args def __init__(self): team = [s for s in ordinary_strategies if s().classifier['memory_depth'] <= 1] diff --git a/axelrod/strategies/mutual.py b/axelrod/strategies/mutual.py index 485ddaefb..53c5a3e72 100644 --- a/axelrod/strategies/mutual.py +++ b/axelrod/strategies/mutual.py @@ -1,4 +1,4 @@ -from axelrod import Actions, Player, init_args +from axelrod import Actions, Player from axelrod.random_ import random_choice C, D = Actions.C, Actions.D diff --git a/axelrod/strategies/negation.py b/axelrod/strategies/negation.py index de289f896..5f3594d8a 100644 --- a/axelrod/strategies/negation.py +++ b/axelrod/strategies/negation.py @@ -1,9 +1,7 @@ -import random -from axelrod import Actions, Player, random_choice, flip_action, init_args -from axelrod.strategy_transformers import TrackHistoryTransformer - +from axelrod import Actions, Player, random_choice, flip_action C, D = Actions.C, Actions.D + class Negation(Player): """ A player starts by cooperating or defecting randomly if it's their first move, @@ -29,7 +27,7 @@ def strategy(self, opponent): # Random first move if not self.history: return random_choice(); - + # Act opposite of opponent otherwise return flip_action(opponent.history[-1]) - + diff --git a/axelrod/strategies/oncebitten.py b/axelrod/strategies/oncebitten.py index 24cd13e5f..4837434ba 100644 --- a/axelrod/strategies/oncebitten.py +++ b/axelrod/strategies/oncebitten.py @@ -1,8 +1,9 @@ import random -from axelrod import Actions, Player, init_args +from axelrod import Actions, Player C, D = Actions.C, Actions.D + class OnceBitten(Player): """ Cooperates once when the opponent defects, but if they defect twice in a row defaults to forgetful grudger for 10 turns defecting @@ -95,7 +96,6 @@ class ForgetfulFoolMeOnce(Player): 'manipulates_state': False } - @init_args def __init__(self, forget_probability=0.05): """ Parameters diff --git a/axelrod/strategies/prober.py b/axelrod/strategies/prober.py index 3449832c0..66c77a540 100644 --- a/axelrod/strategies/prober.py +++ b/axelrod/strategies/prober.py @@ -1,4 +1,4 @@ -from axelrod import Actions, Player, init_args, random_choice +from axelrod import Actions, Player, random_choice import random @@ -256,7 +256,6 @@ class NaiveProber(Player): 'manipulates_state': False } - @init_args def __init__(self, p=0.1): """ Parameters @@ -313,7 +312,6 @@ class RemorsefulProber(NaiveProber): 'manipulates_state': False } - @init_args def __init__(self, p=0.1): super().__init__(p) self.probing = False diff --git a/axelrod/strategies/punisher.py b/axelrod/strategies/punisher.py index f3d9b5890..4aa0f7ded 100644 --- a/axelrod/strategies/punisher.py +++ b/axelrod/strategies/punisher.py @@ -2,6 +2,7 @@ C, D = Actions.C, Actions.D + class Punisher(Player): """ A player starts by cooperating however will defect if at any point the diff --git a/axelrod/strategies/rand.py b/axelrod/strategies/rand.py index a18929f97..530cb1f04 100644 --- a/axelrod/strategies/rand.py +++ b/axelrod/strategies/rand.py @@ -1,4 +1,4 @@ -from axelrod import Player, init_args, random_choice +from axelrod import Player, random_choice class Random(Player): @@ -21,7 +21,6 @@ class Random(Player): 'manipulates_state': False } - @init_args def __init__(self, p=0.5): """ Parameters diff --git a/axelrod/strategies/retaliate.py b/axelrod/strategies/retaliate.py index fcdce46d2..614da0cfb 100644 --- a/axelrod/strategies/retaliate.py +++ b/axelrod/strategies/retaliate.py @@ -1,6 +1,6 @@ from collections import defaultdict -from axelrod import Actions, Player, init_args +from axelrod import Actions, Player C, D = Actions.C, Actions.D @@ -26,7 +26,6 @@ class Retaliate(Player): 'manipulates_state': False } - @init_args def __init__(self, retaliation_threshold=0.1): """ Uses the basic init from the Player class, but also set the name to @@ -112,7 +111,6 @@ class LimitedRetaliate(Player): 'manipulates_state': False } - @init_args def __init__(self, retaliation_threshold=0.1, retaliation_limit=20): """ Parameters diff --git a/axelrod/strategies/sequence_player.py b/axelrod/strategies/sequence_player.py index c36715184..3f4281237 100644 --- a/axelrod/strategies/sequence_player.py +++ b/axelrod/strategies/sequence_player.py @@ -1,4 +1,4 @@ -from axelrod import Actions, Player, init_args +from axelrod import Actions, Player from axelrod._strategy_utils import thue_morse_generator @@ -7,7 +7,6 @@ class SequencePlayer(Player): determine their plays. """ - @init_args def __init__(self, generator_function, generator_args=()): super().__init__() # Initialize the sequence generator @@ -59,7 +58,6 @@ class ThueMorse(SequencePlayer): 'manipulates_state': False } - @init_args def __init__(self): super().__init__(thue_morse_generator, (0,)) @@ -83,7 +81,6 @@ class ThueMorseInverse(ThueMorse): 'manipulates_state': False } - @init_args def __init__(self): super(ThueMorse, self).__init__(thue_morse_generator, (0,)) diff --git a/axelrod/strategies/titfortat.py b/axelrod/strategies/titfortat.py index e33c32c30..196e0fe6a 100644 --- a/axelrod/strategies/titfortat.py +++ b/axelrod/strategies/titfortat.py @@ -1,4 +1,4 @@ -from axelrod import Actions, Player, init_args +from axelrod import Actions, Player from axelrod.strategy_transformers import TrackHistoryTransformer C, D = Actions.C, Actions.D @@ -286,7 +286,6 @@ class OmegaTFT(Player): 'manipulates_state': False } - @init_args def __init__(self, deadlock_threshold=3, randomness_threshold=8): super().__init__() self.deadlock_threshold = deadlock_threshold @@ -527,7 +526,6 @@ class AdaptiveTitForTat(Player): } world = 0.5 - @init_args def __init__(self, rate=0.5): super().__init__() self.rate, self.starting_rate = rate, rate diff --git a/axelrod/strategy_transformers.py b/axelrod/strategy_transformers.py index 9bd3d93d3..6b0db9fc4 100644 --- a/axelrod/strategy_transformers.py +++ b/axelrod/strategy_transformers.py @@ -191,7 +191,8 @@ def dual_wrapper(player, opponent, proposed_action): action: an axelrod.Action, C or D """ if not player.history: - player.original_player = player.original_class(*player.init_args) + player.original_player = player.original_class(*player.init_args, + **player.init_kwargs) action = player.original_player.strategy(opponent) player.original_player.history.append(action) diff --git a/docs/tutorials/contributing/strategy/writing_the_new_strategy.rst b/docs/tutorials/contributing/strategy/writing_the_new_strategy.rst index 3bf5b85ff..ec2b9856b 100644 --- a/docs/tutorials/contributing/strategy/writing_the_new_strategy.rst +++ b/docs/tutorials/contributing/strategy/writing_the_new_strategy.rst @@ -143,12 +143,7 @@ Now we have separate names for different instantiations:: This helps distinguish players in tournaments that have multiple instances of the same strategy. If you modify the :code:`__repr__` method of player, be sure to add an appropriate test. - -Similarly, if your strategy's :code:`__init__` method takes any parameters other -than :code:`self`, you can decorate it with :code:`@init_args` to ensure that -when it is cloned that the correct parameter values will be applied. -(This will trip a test if ommitted.) - + There are various examples of helpful functions and properties that make writing strategies easier. Do not hesitate to get in touch with the Axelrod-Python team for guidance.