diff --git a/.gitignore b/.gitignore index af448dbd4..3bfad54a1 100644 --- a/.gitignore +++ b/.gitignore @@ -5,3 +5,4 @@ _build/ *.log dist/ MANIFEST +Axelrod.egg-info diff --git a/.travis.yml b/.travis.yml index ce9d30bcc..c648d74d0 100644 --- a/.travis.yml +++ b/.travis.yml @@ -20,5 +20,7 @@ script: - cd .. - coverage run --source=axelrod -m unittest discover - coverage report -m + # Run the doctests + - sh doctest after_success: - coveralls diff --git a/docs/auto_generate_strategies_list.py b/docs/auto_generate_strategies_list.py deleted file mode 100644 index f8b4da374..000000000 --- a/docs/auto_generate_strategies_list.py +++ /dev/null @@ -1,42 +0,0 @@ -""" -A script to generate the file needed for the strategy documentation. - -Run: - - python strategies.py > strategies.rst -""" -import os -import sys - -sys.path.insert(0, os.path.abspath("../")) -from axelrod import basic_strategies -from axelrod import ordinary_strategies -from axelrod import cheating_strategies - - -def print_header(string, character): - print string - print character * len(string) - print "" - - -if __name__ == "__main__": - - print ".. currentmodule:: axelrod.strategies" - print_header("Index of strategies", '=') - - print_header("Basic strategies", '-') - for strategy in basic_strategies: - print ".. autoclass:: %s" % strategy.__name__ - - print "" - - print_header("Further (honest) Strategies", '-') - for strategy in ordinary_strategies: - print ".. autoclass:: %s" % strategy.__name__ - - print "" - - print_header("Cheating strategies", '-') - for strategy in cheating_strategies: - print ".. autoclass:: %s" % strategy.__name__ diff --git a/docs/background.rst b/docs/background/background.rst similarity index 100% rename from docs/background.rst rename to docs/background/background.rst diff --git a/docs/description.rst b/docs/description.rst deleted file mode 100644 index b7387825f..000000000 --- a/docs/description.rst +++ /dev/null @@ -1,4 +0,0 @@ -Description -=========== - -Describe the available strategies. diff --git a/docs/index.rst b/docs/index.rst index ca4209416..58698af74 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -3,25 +3,14 @@ You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. -Welcome to the documentation for an implementation of Axelrod's tournament in Python -==================================================================================== - -This project is both: - -* A python library that reproduces Axelrod's tournament. -* A github experiment allowing anyone to contribute a strategy. - - -Contents: +Welcome to the documentation for the Axelrod Python library +=========================================================== .. toctree:: :maxdepth: 2 - background - usage - overview_of_strategies - index_of_strategies - contributing + tutorials/index.rst + reference/index.rst Indices and tables diff --git a/docs/index_of_strategies.rst b/docs/index_of_strategies.rst deleted file mode 100644 index 68e2d855f..000000000 --- a/docs/index_of_strategies.rst +++ /dev/null @@ -1,90 +0,0 @@ -.. currentmodule:: axelrod.strategies -Index of strategies -=================== - -Basic strategies ----------------- - -.. autoclass:: Alternator -.. autoclass:: Cooperator -.. autoclass:: Defector -.. autoclass:: Random -.. autoclass:: TitForTat - -Further (honest) Strategies ---------------------------- - -.. autoclass:: AlternatorHunter -.. autoclass:: Appeaser -.. autoclass:: AntiTitForTat -.. autoclass:: ArrogantQLearner -.. autoclass:: AverageCopier -.. autoclass:: Bully -.. autoclass:: CautiousQLearner -.. autoclass:: CooperatorHunter -.. autoclass:: Davis -.. autoclass:: DefectorHunter -.. autoclass:: e -.. autoclass:: Feld -.. autoclass:: FoolMeOnce -.. autoclass:: ForgetfulFoolMeOnce -.. autoclass:: ForgetfulGrudger -.. autoclass:: Forgiver -.. autoclass:: ForgivingTitForTat -.. autoclass:: GTFT -.. autoclass:: GoByMajority -.. autoclass:: GoByMajority10 -.. autoclass:: GoByMajority20 -.. autoclass:: GoByMajority40 -.. autoclass:: GoByMajority5 -.. autoclass:: Golden -.. autoclass:: Grofman -.. autoclass:: Grudger -.. autoclass:: Grumpy -.. autoclass:: HesitantQLearner -.. autoclass:: Inverse -.. autoclass:: InversePunisher -.. autoclass:: Joss -.. autoclass:: LimitedRetaliate -.. autoclass:: LimitedRetaliate2 -.. autoclass:: LimitedRetaliate3 -.. autoclass:: MathConstantHunter -.. autoclass:: MetaHunter -.. autoclass:: MetaMajority -.. autoclass:: MetaMinority -.. autoclass:: MetaWinner -.. autoclass:: NiceAverageCopier -.. autoclass:: OnceBitten -.. autoclass:: OppositeGrudger -.. autoclass:: Pi -.. autoclass:: Punisher -.. autoclass:: RandomHunter -.. autoclass:: Retaliate -.. autoclass:: Retaliate2 -.. autoclass:: Retaliate3 -.. autoclass:: RiskyQLearner -.. autoclass:: Shubik -.. autoclass:: SneakyTitForTat -.. autoclass:: StochasticWSLS -.. autoclass:: SuspiciousTitForTat -.. autoclass:: TitFor2Tats -.. autoclass:: TrickyCooperator -.. autoclass:: TrickyDefector -.. autoclass:: Tullock -.. autoclass:: TwoTitsForTat -.. autoclass:: WinStayLoseShift -.. autoclass:: ZDExtort2 -.. autoclass:: ZDGTFT2 - -Cheating strategies -------------------- - -.. autoclass:: Darwin -.. autoclass:: Geller -.. autoclass:: GellerCooperator -.. autoclass:: GellerDefector -.. autoclass:: MindBender -.. autoclass:: MindController -.. autoclass:: MindReader -.. autoclass:: MindWarper -.. autoclass:: ProtectedMindReader diff --git a/docs/reference/glossary.rst b/docs/reference/glossary.rst new file mode 100644 index 000000000..9ba02e7c4 --- /dev/null +++ b/docs/reference/glossary.rst @@ -0,0 +1,68 @@ +Glossary +======== + +There are a variety of terms used in the documentation and throughout the +library. Here is an overview: + +An action +--------- + +An **action** is either :code:`C` or :code:`D`. +You can access these actions as follows but should not really have a reason to:: + + >>> import axelrod as axl + >>> axl.Actions.C + 'C' + >>> axl.Actions.D + 'D' + +A play +------ + +A **play** is a single player choosing an **action**. +In terms of code this is equivalent to:: + + >>> p1, p2 = axl.Cooperator(), axl.Defector() + >>> p1.play(p2) # This constitues two 'plays' (p1 plays and p2 plays). + +This is equivalent to :code:`p2.play(p1)`. Either function invokes both +:code:`p1.strategy(p2)` and :code:`p2.strategy(p1)`. + +A turn +------ + +A **turn** is a 1 shot interaction between two players. It is in effect a +composition of two **plays**. + +Each turn has four possible outcomes of a play: :code:`(C, C)`, :code:`(C, D)`, +:code:`(D, C)`, or :code:`(D, D)`. + +A match +------- + +A **match** is a consecutive number of **turns**. The default number of turns +used in the tournament is 200. Here is a single match between two players over +10 turns:: + + >>> p1, p2 = axl.Cooperator(), axl.Defector() + >>> for turn in range(10): + ... p1.play(p2) + >>> p1.history, p2.history + (['C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C'], ['D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D']) + +A win +----- + +A **win** is attributed to the player who has the higher total score at the end +of a match. For the example above, :code:`Defector` would win that match. + +A round robin +------------- + +A **round robin** is the set of all potential (order invariant) matches between +a given collection of players. + +A tournament +------------ + +A **tournament** is a repetition of round robins so as to smooth out stochastic effects. diff --git a/docs/reference/index.rst b/docs/reference/index.rst new file mode 100644 index 000000000..a67deacd3 --- /dev/null +++ b/docs/reference/index.rst @@ -0,0 +1,12 @@ +Reference +========= + +This section is the reference guide for the various components of the library. + +Contents: + +.. toctree:: + :maxdepth: 2 + + overview_of_strategies.rst + glossary.rst diff --git a/docs/overview_of_strategies.rst b/docs/reference/overview_of_strategies.rst similarity index 75% rename from docs/overview_of_strategies.rst rename to docs/reference/overview_of_strategies.rst index 9d3a4fb10..533e90c9e 100644 --- a/docs/overview_of_strategies.rst +++ b/docs/reference/overview_of_strategies.rst @@ -64,29 +64,22 @@ Implementation Here is a quick implementation of this in the library:: - import axelrod - p1 = axelrod.TitForTat() # Create a player that plays tit for tat - p2 = axelrod.Cooperator() # Create a player that always cooperates - for round in range(5): - p1.play(p2) - - print p1.history - -which gives:: - + >>> import axelrod + >>> p1 = axelrod.TitForTat() # Create a player that plays tit for tat + >>> p2 = axelrod.Cooperator() # Create a player that always cooperates + >>> for round in range(5): + ... p1.play(p2) + >>> p1.history ['C', 'C', 'C', 'C', 'C'] We see that Tit for Tat cooperated every time, let us see how things change when it plays against a player that always defects:: - p1 = axelrod.TitForTat() # Create a player that plays tit for tat - p3 = axelrod.Defector() # Create a player that always defects - for round in range(5): - p1.play(p3) - print p1.history - -which gives:: - + >>> p1 = axelrod.TitForTat() # Create a player that plays tit for tat + >>> p3 = axelrod.Defector() # Create a player that always defects + >>> for round in range(5): + ... p1.play(p3) + >>> p1.history ['C', 'D', 'D', 'D', 'D'] We see that after cooperating once, Tit For Tat defects at every step. @@ -151,8 +144,10 @@ Finally this strategy defects if and only if: Grofman ^^^^^^^ -This is a pretty simple strategy: it cooperates with probability :math:`\frac{2}{7}`. In contemporary terminology, this is a memory-one player -with all four conditional probabilities of cooperation equal to :math:`\frac{2}{7}`. +This is a pretty simple strategy: it cooperates with probability +:math:`\frac{2}{7}`. In contemporary terminology, this is a memory-one player +with all four conditional probabilities of cooperation equal to +:math:`\frac{2}{7}`. *This strategy came 4th in Axelrod's original tournament.* @@ -161,34 +156,14 @@ Implementation Here is how Grofman is implemented in the library:: - import axelrod - p1 = axelrod.Grofman() # Create a Grofman player - p2 = axelrod.Random() # Create a player that plays randomly - for round in range(5): - p1.play(p2) - - print p1.history - -which gives:: - + >>> import axelrod + >>> p1 = axelrod.Grofman() # Create a Grofman player + >>> p2 = axelrod.Random() # Create a player that plays randomly + >>> for round in range(5): + ... p1.play(p2) + >>> p1.history # doctest: +SKIP ['C', 'C', 'D', 'D', 'D'] -Over a longer number of rounds:: - - from collections import Counter - for round in range(5): - p1.play(p2) - counter = Counter(p1.history) - print(counter) - Counter({'D': 367, 'C': 138}) - print float(counter['C']) / (counter['C'] + counter['D']) - print 2./7 - -We have that Grofman cooperates roughly in :math:`\frac{2}{7}`-ths of the rounds:: - - 0.2732673267326733 # Grofman - 0.2857142857142857 # 2./7 - Shubik ^^^^^^ @@ -203,18 +178,14 @@ Implementation Here is how Shubik is implemented in the library:: - import axelrod - p1 = axelrod.Shubik() # Create a Shubik player - p2 = axelrod.Random() # Create a player that plays randomly - for round in range(13): - p1.play(p2) - - print p1.history - print p2.history - -This yields the following history of play:: - + >>> import axelrod + >>> p1 = axelrod.Shubik() # Create a Shubik player + >>> p2 = axelrod.Random() # Create a player that plays randomly + >>> for round in range(13): + ... p1.play(p2) + >>> p1.history # doctest: +SKIP ['C', 'D', 'C', 'D', 'D', 'D', 'C', 'C', 'C', 'D', 'D', 'D', 'C'] + >>> p2.history # doctest: +SKIP ['D', 'C', 'D', 'C', 'D', 'C', 'C', 'C', 'D', 'C', 'C', 'C', 'D'] The increasing retaliation periods are visible in the output. Note that @@ -248,19 +219,15 @@ Implementation Here is how this is implemented in the library:: - import axelrod - p1 = axelrod.Grudger() # Create a player that grudger - p2 = axelrod.Random() # Create a player that plays randomly - for round in range(5): - p1.play(p2) - - print p1.history - print p2.history - -which gives (for the random seed used):: - - ['C', 'C', 'D', 'D', 'D'] - ['C', 'D', 'C', 'D', 'D'] + >>> import axelrod + >>> p1 = axelrod.Grudger() # Create a player that grudger + >>> p2 = axelrod.Random() # Create a player that plays randomly + >>> for round in range(5): + ... p1.play(p2) + >>> p1.history # doctest: +SKIP + ['C', 'C', 'D', 'D', 'D'] + >>> p2.history # doctest: +SKIP + ['C', 'D', 'C', 'D', 'D'] We see that as soon as :code:`p2` defected :code:`p1` defected for the rest of the play. @@ -278,19 +245,14 @@ Implementation Davis is implemented as follows:: - import axelrod - p1 = axelrod.Davis() # Create a Davis player - p2 = axelrod.Random() # Create a player that plays randomly - for round in range(15): - p1.play(p2) - - print p1.history - print p2.history - -This always produces (at least) 10 rounds of attempted cooperation followed by -Grudger:: - + >>> import axelrod + >>> p1 = axelrod.Davis() # Create a Davis player + >>> p2 = axelrod.Random() # Create a player that plays randomly + >>> for round in range(15): + ... p1.play(p2) + >>> p1.history # doctest: +SKIP ['C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'D', 'D', 'D', 'D', 'D'] + >>> p2.history # doctest: +SKIP ['D', 'C', 'D', 'D', 'C', 'D', 'D', 'C', 'D', 'C', 'D', 'D', 'C', 'C', 'D'] Graaskamp @@ -341,18 +303,14 @@ Implementation Feld is implemented in the library as follows:: - import axelrod - p1 = axelrod.Feld() # Create a Feld player - p2 = axelrod.Random() # Create a player that plays randomly - for round in range(10): - p1.play(p2) - - print p1.history - print p2.history - -We can see from the output that Feld defects when its opponent does:: - + >>> import axelrod + >>> p1 = axelrod.Feld() # Create a Feld player + >>> p2 = axelrod.Random() # Create a player that plays randomly + >>> for round in range(10): + ... p1.play(p2) + >>> p1.history # doctest: +SKIP ['C', 'D', 'C', 'D', 'D', 'D', 'D', 'C', 'D', 'D'] + >>> p2.history # doctest: +SKIP ['D', 'C', 'D', 'D', 'D', 'D', 'C', 'D', 'D', 'D'] The defection times lengthen each time the opponent defects when Feld @@ -372,18 +330,14 @@ Implementation This is a memory-one strategy with four-vector :math:`(0.9, 0, 1, 0)`. Here is how Joss is implemented in the library:: - import axelrod - p1 = axelrod.Joss() # Create a Joss player - p2 = axelrod.Random() # Create a player that plays randomly - for round in range(10): - p1.play(p2) - - print p1.history - print p2.history - -This gives:: - + >>> import axelrod + >>> p1 = axelrod.Joss() # Create a Joss player + >>> p2 = axelrod.Random() # Create a player that plays randomly + >>> for round in range(10): + ... p1.play(p2) + >>> p1.history # doctest: +SKIP ['C', 'C', 'C', 'D', 'C', 'D', 'C', 'C', 'C', 'C'] + >>> p2.history # doctest: +SKIP ['C', 'C', 'D', 'C', 'D', 'C', 'C', 'C', 'C', 'D'] Which is the same as Tit-For-Tat for these 10 rounds. @@ -401,18 +355,14 @@ Implementation Tullock is implemented in the library as follows:: - import axelrod - p1 = axelrod.Tullock() # Create a Tullock player - p2 = axelrod.Random() # Create a player that plays randomly - for round in range(15): - p1.play(p2) - - print p1.history - print p2.history - -This gives:: - + >>> import axelrod + >>> p1 = axelrod.Tullock() # Create a Tullock player + >>> p2 = axelrod.Random() # Create a player that plays randomly + >>> for round in range(15): + ... p1.play(p2) + >>> p1.history # doctest: +SKIP ['C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'D', 'D', 'C', 'D'] + >>> p2.history # doctest: +SKIP ['D', 'C', 'C', 'D', 'D', 'C', 'C', 'D', 'D', 'D', 'C', 'D', 'C', 'D', 'C'] We have 10 rounds of cooperation and some apparently random plays afterward. @@ -445,19 +395,15 @@ Implementation Here is how this is implemented in the library:: - import axelrod - p1 = axelrod.Random() # Create a player that plays randomly - p2 = axelrod.Random() # Create a player that plays randomly - for round in range(5): - p1.play(p2) - - print p1.history - print p2.history - -which gives (for the random seed used):: - - ['D', 'D', 'C', 'C', 'C'] - ['D', 'C', 'D', 'D', 'C'] + >>> import axelrod + >>> p1 = axelrod.Random() # Create a player that plays randomly + >>> p2 = axelrod.Random() # Create a player that plays randomly + >>> for round in range(5): + ... p1.play(p2) + >>> p1.history # doctest: +SKIP + ['D', 'D', 'C', 'C', 'C'] + >>> p2.history # doctest: +SKIP + ['D', 'C', 'D', 'D', 'C'] Axelrod's second tournament --------------------------- @@ -479,21 +425,17 @@ Implementation Here is how Eatherley is implemented in the library:: - import axelrod - p1 = axelrod.Eatherley() # Create a Eatherley player - p2 = axelrod.Random() # Create a player that plays randomly - for round in range(5): - p1.play(p2) - - print p1.history - print p2.history - -which gives (for a particular random seed):: + >>> import axelrod + >>> p1 = axelrod.Eatherley() # Create a Eatherley player + >>> p2 = axelrod.Random() # Create a player that plays randomly + >>> for round in range(5): + ... p1.play(p2) + >>> p1.history # doctest: +SKIP ['C', 'C', 'C', 'D', 'C'] + >>> p2.history # doctest: +SKIP ['C', 'D', 'D', 'C', 'C'] - CHAMPION ^^^^^^^^ @@ -511,21 +453,16 @@ Implementation Here is how Champion is implemented in the library:: - import axelrod - p1 = axelrod.Champion() # Create a Champion player - p2 = axelrod.Random() # Create a player that plays randomly - for round in range(5): - p1.play(p2) - - print p1.history - print p2.history - -which gives (for a particular random seed):: - + >>> import axelrod + >>> p1 = axelrod.Champion() # Create a Champion player + >>> p2 = axelrod.Random() # Create a player that plays randomly + >>> for round in range(5): + ... p1.play(p2) + >>> p1.history # doctest: +SKIP ['C', 'C', 'C', 'C', 'C'] + >>> p2.history # doctest: +SKIP ['D', 'C', 'D', 'D', 'C'] - TESTER ^^^^^^ @@ -541,22 +478,16 @@ Implementation Here is how this is implemented in the library:: - import axelrod - p1 = axelrod.Tester() # Create a Tester player - p2 = axelrod.Random() # Create a player that plays randomly - for round in range(5): - p1.play(p2) - - print p1.history - print p2.history - -which gives (for a particular random seed):: - + >>> import axelrod + >>> p1 = axelrod.Tester() # Create a Tester player + >>> p2 = axelrod.Random() # Create a player that plays randomly + >>> for round in range(5): + ... p1.play(p2) + >>> p1.history # doctest: +SKIP ['D', 'C', 'C', 'D', 'D'] + >>> p2.history # doctest: +SKIP ['C', 'D', 'D', 'D', 'C'] - - Stewart and Plotkin's Tournament (2012) --------------------------------------- @@ -650,17 +581,13 @@ Implementation Here is a quick implementation of this in the library:: - import axelrod - p1 = axelrod.GTFT() # Create a player that plays GTFT - p2 = axelrod.Defector() # Create a player that always defects - for round in range(10): - p1.play(p2) - - print p1.history - -this gives (for the random seed used):: - - ['C', 'D', 'D', 'C', 'D', 'D', 'D', 'D', 'D', 'D'] + >>> import axelrod + >>> p1 = axelrod.GTFT() # Create a player that plays GTFT + >>> p2 = axelrod.Defector() # Create a player that always defects + >>> for round in range(10): + ... p1.play(p2) + >>> p1.history # doctest: +SKIP + ['C', 'D', 'D', 'C', 'D', 'D', 'D', 'D', 'D', 'D'] which shows that :code:`GTFT` tried to forgive :code:`Defector`. @@ -677,17 +604,13 @@ Implementation Here is the implementation of this in the library:: - import axelrod - p1 = axelrod.TitFor2Tats() # Create a player that plays TF2T - p2 = axelrod.Defector() # Create a player that always defects - for round in range(3): - p1.play(p2) - - print p1.history - -which gives:: - - ['C', 'C', 'D'] + >>> import axelrod + >>> p1 = axelrod.TitFor2Tats() # Create a player that plays TF2T + >>> p2 = axelrod.Defector() # Create a player that always defects + >>> for round in range(3): + ... p1.play(p2) + >>> p1.history + ['C', 'C', 'D'] we see that it takes 2 defections to trigger a defection by :code:`TitFor2Tats`. @@ -710,17 +633,13 @@ Implementation Here is a quick implementation of this in the library:: - import axelrod - p1 = axelrod.WinStayLoseShift() # Create a player that plays WSLS - p2 = axelrod.Alternator() # Create a player that alternates - for round in range(5): - p1.play(p2) - - print p1.history - -this gives:: - - ['C', 'C', 'D', 'D', 'C'] + >>> import axelrod + >>> p1 = axelrod.WinStayLoseShift() # Create a player that plays WSLS + >>> p2 = axelrod.Alternator() # Create a player that alternates + >>> for round in range(5): + ... p1.play(p2) + >>> p1.history + ['C', 'C', 'D', 'D', 'C'] which shows that :code:`WSLS` will choose the strategy that was a best response in the previous round. @@ -755,18 +674,14 @@ Implementation Here is how ZDGTFT-2 is implemented in the library:: - import axelrod - p1 = axelrod.ZDGTFT2() # Create a ZDGTFT-2 player - p2 = axelrod.Random() # Create a player that plays randomly - for round in range(5): - p1.play(p2) - - print p2.history - print p1.history - -which gives (for the particular random seed used):: - + >>> import axelrod + >>> p1 = axelrod.ZDGTFT2() # Create a ZDGTFT-2 player + >>> p2 = axelrod.Random() # Create a player that plays randomly + >>> for round in range(5): + ... p1.play(p2) + >>> p2.history # doctest: +SKIP ['D', 'D', 'D', 'C', 'C', 'D', 'C', 'D', 'D', 'D'] + >>> p1.history # doctest: +SKIP ['C', 'C', 'D', 'D', 'C', 'C', 'D', 'C', 'D', 'D'] looking closely (and repeating the above) will show that the above @@ -790,18 +705,14 @@ Implementation Here is how EXTORT-2 is implemented in the library:: - import axelrod - p1 = axelrod.ZDExtort2() # Create a EXTORT-2 player - p2 = axelrod.Random() # Create a player that plays randomly - for round in range(10): - p1.play(p2) - - print p2.history - print p1.history - -which gives (for the particular seed used):: - + >>> import axelrod + >>> p1 = axelrod.ZDExtort2() # Create a EXTORT-2 player + >>> p2 = axelrod.Random() # Create a player that plays randomly + >>> for round in range(10): + ... p1.play(p2) + >>> p2.history # doctest: +SKIP ['D', 'C', 'C', 'C', 'D', 'D', 'D', 'D', 'C', 'D'] + >>> p1.history # doctest: +SKIP ['C', 'C', 'D', 'C', 'C', 'D', 'D', 'D', 'D', 'D'] you can see that :code:`ZDExtort2` never cooperates after both strategies defect. @@ -821,16 +732,12 @@ Implementation Here is how GRIM is implemented in the library:: - import axelrod - p1 = axelrod.Grudger() # Create a GRIM player - p2 = axelrod.Defector() # Create a player that always defects - for round in range(5): - p1.play(p2) - - print p1.history - -this gives:: - + >>> import axelrod + >>> p1 = axelrod.Grudger() # Create a GRIM player + >>> p2 = axelrod.Defector() # Create a player that always defects + >>> for round in range(5): + ... p1.play(p2) + >>> p1.history ['C', 'D', 'D', 'D', 'D'] HARD_JOSS @@ -867,22 +774,18 @@ Implementation HARD_MAJO is implemented in the library:: - import axelrod - p1 = axelrod.GoByMajority() # Create a HARD_TFT player - p2 = axelrod.Random() # Create a player that plays randomly - for round in range(5): - p1.play(p2) - - print p2.history - print p1.history - -which gives (for this seed):: - - + >>> import axelrod + >>> p1 = axelrod.GoByMajority() # Create a HARD_TFT player + >>> p2 = axelrod.Random() # Create a player that plays randomly + >>> for round in range(5): + ... p1.play(p2) + >>> p2.history # doctest: +SKIP ['D', 'C', 'C', 'D', 'D'] + >>> p1.history # doctest: +SKIP ['C', 'D', 'C', 'C', 'C'] -we see that following the third round (at which point the opponent has cooperated a lot), :code:`GoByMajority` cooperates. +we see that following the third round (at which point the opponent has +cooperated a lot), :code:`GoByMajority` cooperates. HARD_TFT ^^^^^^^^ @@ -900,20 +803,16 @@ Implementation HARD_TFT is implemented in the library:: - import axelrod - p1 = axelrod.HardTitForTat() # Create a HARD_TFT player - p2 = axelrod.Alternator() # Create a player that alternates - for round in range(5): - p1.play(p2) - - print p1.history - -which gives:: - + >>> import axelrod + >>> p1 = axelrod.HardTitForTat() # Create a HARD_TFT player + >>> p2 = axelrod.Alternator() # Create a player that alternates + >>> for round in range(5): + ... p1.play(p2) + >>> p1.history ['C', 'C', 'D', 'D', 'D'] we see that :code:`HardTitForTat` cooperates for the first two moves but then -constantly defetcts as there is always a defection in it's opponent's recent +constantly defects as there is always a defection in it's opponent's recent history. HARD_TF2T @@ -931,18 +830,15 @@ Implementation HARD_TF2T is implemented in the library:: - import axelrod - p1 = axelrod.HardTitFor2Tats() # Create a HARD_TF2T player - p2 = axelrod.Random() # Create a player that plays randomly - for round in range(5): - p1.play(p2) - - print p2.history - print p1.history - -which gives (for this particular seed):: + >>> import axelrod + >>> p1 = axelrod.HardTitFor2Tats() # Create a HARD_TF2T player + >>> p2 = axelrod.Random() # Create a player that plays randomly + >>> for round in range(5): + ... p1.play(p2) + >>> p2.history # doctest: +SKIP ['D', 'D', 'C', 'D', 'C'] + >>> p1.history # doctest: +SKIP ['C', 'C', 'D', 'D', 'C'] we see that :code:`HardTitFor2Tats` waited for 2 defects before defecting, but @@ -958,20 +854,16 @@ Calculator attempts to detect a cycle in the opponents history, and defects unconditionally thereafter if a cycle is found. Otherwise Calculator plays like TFT for the remaining rounds. -Calculator is implemented in the library as follows: - - import axelrod - p1 = axelrod.Calculator() # Create a HARD_TF2T player - p2 = axelrod.Cooperator() # Create a player that always cooperates - for round in range(5): - p1.play(p2) - - print p1.history - print p2.history - -This returns (for a particular random seed):: +Calculator is implemented in the library as follows:: + >>> import axelrod + >>> p1 = axelrod.Calculator() # Create a HARD_TF2T player + >>> p2 = axelrod.Cooperator() # Create a player that always cooperates + >>> for round in range(5): + ... p1.play(p2) + >>> p1.history # doctest: +SKIP ['C', 'C', 'C', 'C', 'C'] + >>> p2.history # doctest: +SKIP ['C', 'C', 'C', 'C', 'C'] Prober @@ -989,18 +881,14 @@ Implementation Prober is implemented in the library:: - import axelrod - p1 = axelrod.Prober() # Create a Prober player - p2 = axelrod.Cooperator() # Create a player that always cooperates - for round in range(5): - p1.play(p2) - - print p2.history - print p1.history - -which gives:: - + >>> import axelrod + >>> p1 = axelrod.Prober() # Create a Prober player + >>> p2 = axelrod.Cooperator() # Create a player that always cooperates + >>> for round in range(5): + ... p1.play(p2) + >>> p1.history ['D', 'C', 'C', 'D', 'D'] + >>> p2.history ['C', 'C', 'C', 'C', 'C'] Prober2 @@ -1018,18 +906,14 @@ Implementation Prober2 is implemented in the library:: - import axelrod - p1 = axelrod.Prober2() # Create a Prober2 player - p2 = axelrod.Cooperator() # Create a player that always cooperates - for round in range(5): - p1.play(p2) - - print p2.history - print p1.history - -which gives:: - + >>> import axelrod + >>> p1 = axelrod.Prober2() # Create a Prober2 player + >>> p2 = axelrod.Cooperator() # Create a player that always cooperates + >>> for round in range(5): + ... p1.play(p2) + >>> p1.history ['D', 'C', 'C', 'C', 'C'] + >>> p2.history ['C', 'C', 'C', 'C', 'C'] Prober3 @@ -1047,18 +931,14 @@ Implementation Prober3 is implemented in the library:: - import axelrod - p1 = axelrod.Prober3() # Create a Prober3 player - p2 = axelrod.Cooperator() # Create a player that always cooperates - for round in range(5): - p1.play(p2) - - print p2.history - print p1.history - -which gives:: - + >>> import axelrod + >>> p1 = axelrod.Prober3() # Create a Prober3 player + >>> p2 = axelrod.Cooperator() # Create a player that always cooperates + >>> for round in range(5): + ... p1.play(p2) + >>> p1.history ['D', 'C', 'D', 'D', 'D'] + >>> p2.history ['C', 'C', 'C', 'C', 'C'] HardProber @@ -1076,58 +956,64 @@ Implementation HardProber is implemented in the library:: - import axelrod - p1 = axelrod.HardProber() # Create a Prober3 player - p2 = axelrod.Cooperator() # Create a player that always cooperates - for round in range(5): - p1.play(p2) - - print p2.history - print p1.history - -which gives:: + >>> import axelrod + >>> p1 = axelrod.HardProber() # Create a Prober3 player + >>> p2 = axelrod.Cooperator() # Create a player that always cooperates + >>> for round in range(5): + ... p1.play(p2) + >>> p1.history ['D', 'D', 'C', 'C', 'D'] + >>> p2.history ['C', 'C', 'C', 'C', 'C'] Strategies implemented in the module ------------------------------------ -There are several original strategies which have been created as part of this project and have never (to our knowledge) appeared in previous tournaments. +There are several original strategies which have been created as part of this +project and have never (to our knowledge) appeared in previous tournaments. Fool Me Once -^^^^^^^^^^^ +^^^^^^^^^^^^ -This strategy begins by cooperating but will defect if at any point the opponent has defected more than once. +This strategy begins by cooperating but will defect if at any point the opponent +has defected more than once. Forgetful Fool Me Once ^^^^^^^^^^^^^^^^^^^^^^ -Like Fool Me Once, this strategy defects if the opponent ever defects, but sometimes -forgets that the opponent had defected, cooperating again until another defection. +Like Fool Me Once, this strategy defects if the opponent ever defects, but +sometimes forgets that the opponent had defected, cooperating again until +another defection. Fool Me Forever ^^^^^^^^^^^^^^^ -This strategy defects until the opponent defects, and then cooperates there after. -Note that this strategy is different than opposite grudger, which cooperates -indefinitely after an opponent cooperation. +This strategy defects until the opponent defects, and then cooperates there +after. Note that this strategy is different than opposite grudger, which +cooperates indefinitely after an opponent cooperation. Backstabber ^^^^^^^^^^^ -Forgives the first 3 defections but on the fourth will defect forever. Defects after the 198th round unconditionally. +Forgives the first 3 defections but on the fourth will defect forever. Defects +unconditionally on the last 2 rounds (depending on the tournament length). DoubleCrosser ^^^^^^^^^^^^^ -Forgives the first 3 defections but on the fourth will defect forever. If the opponent did not defect in the first 6 rounds the player will cooperate until the 180th round. Defects after the 198th round unconditionally. +Forgives the first 3 defections but on the fourth will defect forever. If the +opponent did not defect in the first 6 rounds the player will cooperate until +the 180th round. Defects unconditionally on the last 2 rounds (depending on the +tournament length). Aggravater ^^^^^^^^^^ -This strategy begins by defecting 3 times and then will cooperate until the opponent defects. After the opponent defects it will defect unconditionally. Essentially Grudger, but begins by defecting 3 times. +This strategy begins by defecting 3 times and then will cooperate until the +opponent defects. After the opponent defects it will defect unconditionally. +Essentially Grudger, but begins by defecting 3 times. Alternator ^^^^^^^^^^ diff --git a/docs/tutorials/advanced/index.rst b/docs/tutorials/advanced/index.rst new file mode 100644 index 000000000..ddfd52de6 --- /dev/null +++ b/docs/tutorials/advanced/index.rst @@ -0,0 +1,10 @@ +Advanced +======== + +This is a section aiming to showcase some problems solved and/or insights gained +using the Axelrod library. Please be the first to submit such a tutorial! + +Contents: + +.. toctree:: + :maxdepth: 2 diff --git a/docs/tutorials/contributing/guidelines.rst b/docs/tutorials/contributing/guidelines.rst new file mode 100644 index 000000000..30bf7ce79 --- /dev/null +++ b/docs/tutorials/contributing/guidelines.rst @@ -0,0 +1,21 @@ +Guidelines +========== + +All contributions to this repository are welcome via pull request on the `github repository `_. + +The project follows the following guidelines: + +1. Use the base Python library unless completely necessary. A few external + libraries (such as numpy) have been included in requirements.txt -- feel free + to use these as needed. +2. Try as best as possible to follow `PEP8 + `_ which includes **using + descriptive variable names**. +3. Testing: the project uses the `unittest + `_ library and has a nice + testing suite that makes some things very easy to write tests for. Please try + to increase the test coverage on pull requests. +4. Merging pull-requests: We require two of the (currently four) core-team + maintainers to merge (and preferably not the submitted). Opening a PR for early + feedback or to check test coverage is OK, just indicate that the PR is not ready + to merge (and update when it is). diff --git a/docs/tutorials/contributing/index.rst b/docs/tutorials/contributing/index.rst new file mode 100644 index 000000000..c7d845a84 --- /dev/null +++ b/docs/tutorials/contributing/index.rst @@ -0,0 +1,19 @@ +.. Axelrod documentation master file, created by + sphinx-quickstart on Sat Mar 7 07:05:57 2015. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +Contributing +============ + +This section contains a variety of tutorials that should help you contribute to +the library. + +Contents: + +.. toctree:: + :maxdepth: 2 + + guidelines.rst + strategy/index.rst + library/index.rst diff --git a/docs/tutorials/contributing/library/index.rst b/docs/tutorials/contributing/library/index.rst new file mode 100644 index 000000000..797691ea8 --- /dev/null +++ b/docs/tutorials/contributing/library/index.rst @@ -0,0 +1,9 @@ +Contributing to the library +=========================== + +All contributions (docs, tests, etc) are very welcome, if there is a specific +functionality that you would like to add the please open an issue (or indeed +take a look at the ones already there and jump in the conversation!). + +If you want to work on documentation please keep in mind that doctests are +encouraged to help keep the documentation up to date. diff --git a/docs/tutorials/contributing/strategy/adding_the_new_strategy.rst b/docs/tutorials/contributing/strategy/adding_the_new_strategy.rst new file mode 100644 index 000000000..782ed7556 --- /dev/null +++ b/docs/tutorials/contributing/strategy/adding_the_new_strategy.rst @@ -0,0 +1,22 @@ +Adding the new strategy +======================= + +To get the strategy to be recognised by the library we need to add it to the +files that initialise when someone types :code:`import axelrod`. This is done +in the :code:`axelrod/strategies/_strategies.py` file. + +If you have added your strategy to a file that already existed (perhaps you +added a new variant of :code:`titfortat` to the :code:`titfortat.py` file), +add a line similar to:: + + from import * + +Where :code:`file_name.py` is the name of the file you created. So for the +:code:`TitForTat` strategy which is written in the :code:`titfortat.py` file we +have:: + + from titfortat import * + +Once you have done that (**and you need to do this even if you have added a +strategy to an already existing file**), you need to add the class itself to +the :code:`strategies` list. diff --git a/docs/tutorials/contributing/strategy/classifying_the_new_strategy.rst b/docs/tutorials/contributing/strategy/classifying_the_new_strategy.rst new file mode 100644 index 000000000..c831db87d --- /dev/null +++ b/docs/tutorials/contributing/strategy/classifying_the_new_strategy.rst @@ -0,0 +1,45 @@ +Classifying the new strategy +============================ + +Every class has a classifier dictionary that gives some classification of the +strategy according to certain dimensions:: + +Let us take a look at :code:`TitForTat`:: + + >>> import axelrod + >>> classifier = axelrod.TitForTat.classifier + >>> for key in sorted(classifier.keys()): + ... print("{}: {}".format(key, classifier[key])) + inspects_source: False + manipulates_source: False + manipulates_state: False + memory_depth: 1 + stochastic: False + +You can read more about this in the :ref:`classification-of-strategies` section +but here are some tips about fill this part in correctly. + +Note that when an instance of a class is created it gets it's own copy of the +default classifier dictionary from the class. This might sometimes be modified by +the initialisation depending on input parameters. A good example of this is the +:code:`Joss` strategy:: + + >>> joss = axelrod.Joss() + >>> boring_joss = axelrod.Joss(1) + >>> joss.classifier['stochastic'], boring_joss.classifier['stochastic'] + (True, False) + +Dimensions that are not classified have value `None` in the dictionary. + +There are currently three important dimensions that help identify if a strategy +obeys axelrod's original tournament rules. + +1. :code:`inspects_source` - does the strategy 'read' any source code that + it would not normally have access to. An example of this is :code:`Geller`. +2. :code:`manipulates_source` - does the strategy 'write' any source code that + it would not normally be able to. An example of this is :code:`Mind Bender`. +3. :code:`manipulates_state` - does the strategy 'change' any attributes that + it would not normally be able to. An example of this is :code:`Mind Reader`. + +These dimensions are currently relevant to the `obey_axelrod` strategy which +checks if a strategy obeys Axelrod's original rules. diff --git a/docs/tutorials/contributing/strategy/index.rst b/docs/tutorials/contributing/strategy/index.rst new file mode 100644 index 000000000..266097f18 --- /dev/null +++ b/docs/tutorials/contributing/strategy/index.rst @@ -0,0 +1,17 @@ +Contributing a strategy +======================= + +This section contains a variety of tutorials that should help you contribute to +the library. + +Contents: + +.. toctree:: + :maxdepth: 2 + + instructions.rst + writing_the_new_strategy.rst + adding_the_new_strategy.rst + classifying_the_new_strategy.rst + writing_test_for_the_new_strategy.rst + running_tests.rst diff --git a/docs/tutorials/contributing/strategy/instructions.rst b/docs/tutorials/contributing/strategy/instructions.rst new file mode 100644 index 000000000..27a11ed17 --- /dev/null +++ b/docs/tutorials/contributing/strategy/instructions.rst @@ -0,0 +1,50 @@ +Instructions +============ + +Here is the file structure for the Axelrod repository:: + + . + ├── axelrod + │ └── __init__.py + │ └── ecosystem.py + │ └── game.py + │ └── player.py + │ └── plot.py + │ └── result_set.py + │ └── round_robin.py + │ └── tournament.py + │ └── /strategies/ + │ └── __init__.py + │ └── _strategies.py + │ └── cooperator.py + │ └── defector.py + │ └── grudger.py + │ └── titfortat.py + │ └── gobymajority.py + │ └── ... + │ └── /tests/ + │ └── functional + │ └── unit + │ └── test_*.py + └── README.md + +To contribute a strategy you need to follow as many of the following steps as possible: + +1. Fork the `github repository `_. +2. Add a :code:`.py` file to the strategies directory. (Take a look + at the others in there: you need to write code for the strategy and one other + simple thing.) +3. Update the :code:`./axelrod/strategies/_strategies.py` file (you need to + write the import statement and add the strategy to the relevant python list). +4. Update :code:`./axelrod/docs/overview_of_strategies.rst` with a description + of what the strategy does and include an example of it working. If relevant + please also add a source for the strategy (if it is not an original one). +5. This one is optional: write some unit tests in the :code:`./axelrod/tests/` + directory. +6. This one is also optional: ping us a message and we'll add you to the + Contributors team. This would add an Axelrod-Python organisation badge to + your profile. +7. Send us a pull request. + +**If you would like a hand with any of the above please do get in touch: we're +always delighted to have new strategies.** diff --git a/docs/tutorials/contributing/strategy/running_tests.rst b/docs/tutorials/contributing/strategy/running_tests.rst new file mode 100644 index 000000000..936cb9e24 --- /dev/null +++ b/docs/tutorials/contributing/strategy/running_tests.rst @@ -0,0 +1,30 @@ +Running tests +============= + +The project has an extensive test suite which is run each time a new +contribution is made to the repository. If you want to check that all the tests +pass before you submit a pull request you can run the tests yourself:: + + $ python -m unittest discover + +If you are developing new tests for the suite, it is useful to run a single test +file so that you don't have to wait for the entire suite each time. For +example, to run only the tests for the Grudger strategy:: + + $ python -m unittest axelrod.tests.unit.test_grudger + +The test suite is divided into two categories: unit tests and integration tests. +Each can be run individually:: + + $ python -m unittest discover -s axelrod.tests.unit + $ python -m unittest discover -s axelrod.tests.integration + +Furthermore the documentation is also doctested, to run those tests you can run +the script:: + + $ sh doctest + +Note that this project is being taken care off by `travis-ci +`_, so all tests will be run automatically when opening +a pull request. You can see the latest build status `here +`_. diff --git a/docs/tutorials/contributing/strategy/writing_test_for_the_new_strategy.rst b/docs/tutorials/contributing/strategy/writing_test_for_the_new_strategy.rst new file mode 100644 index 000000000..93a14da8a --- /dev/null +++ b/docs/tutorials/contributing/strategy/writing_test_for_the_new_strategy.rst @@ -0,0 +1,131 @@ +Writing tests for the new strategy +================================== + +To write tests you either need to create a file called :code:`test_.py` +where :code:`.py` is the name of the file you have created or similarly +add tests to the test file that is already present in the +:code:`axelrod/tests/unit/` directory. + +As an example, the tests for Tit-For-Tat are as follows:: + + import axelrod + + from test_player import TestPlayer + + C, D = axelrod.Actions.C, axelrod.Actions.D + + class TestTitForTat(TestPlayer): + + name = "Tit For Tat" + player = axelrod.TitForTat + expected_classifier = { + 'memory_depth': 1, + 'stochastic': False, + 'inspects_source': False, + 'manipulates_source': False, + 'manipulates_state': False + } + + def test_strategy(self): + """Starts by cooperating.""" + P1 = axelrod.TitForTat() + P2 = axelrod.Player() + self.assertEqual(P1.strategy(P2), C) + + def test_effect_of_strategy(self): + """ + Repeats last action of opponent history + """ + P1 = axelrod.TitForTat() + P2 = axelrod.Player() + P2.history = [C, C, C, C] + self.assertEqual(P1.strategy(P2), C) + P2.history = [C, C, C, C, D] + self.assertEqual(P1.strategy(P2), D) + +The :code:`test_effect_of_strategy` method mainly checks that the +:code:`strategy` method in the :code:`TitForTat` class works as expected: + +1. If the opponent's last strategy was :code:`C`: then :code:`TitForTat` should + cooperate:: + + P2.history = ['C', 'C', 'C', 'C'] + self.assertEqual(P1.strategy(P2), 'C') + +2. If the opponent's last strategy was :code:`D`: then :code:`TitForTat` should + defect:: + + P2.history = ['C', 'C', 'C', 'C', 'D'] + self.assertEqual(P1.strategy(P2), 'D') + +We have added some convenience member functions to the :code:`TestPlayer` class. +All three of these functions can take an optional keyword argument +:code:`random_seed` (useful for stochastic strategies). + +1. The member function :code:`first_play_test` tests the first strategy, e.g.:: + + def test_strategy(self): + self.first_play_test('C') + + This is equivalent to:: + + def test_effect_of_strategy(self): + P1 = axelrod.TitForTat() # Or whatever player is in your test class + P2 = axelrod.Player() + P2.history = [] + P2.history = [] + self.assertEqual(P1.strategy(P2), 'C') + +2. The member function :code:`markov_test` takes a list of four plays, each + following one round of CC, CD, DC, and DD respectively:: + + def test_effect_of_strategy(self): + self.markov_test(['C', 'D', 'D', 'C']) + + This is equivalent to:: + + def test_effect_of_strategy(self): + P1 = axelrod.TitForTat() # Or whatever player is in your test class + P2 = axelrod.Player() + P2.history = ['C'] + P2.history = ['C'] + self.assertEqual(P1.strategy(P2), 'C') + P2.history = ['C'] + P2.history = ['D'] + self.assertEqual(P1.strategy(P2), 'D') + P2.history = ['D'] + P2.history = ['C'] + self.assertEqual(P1.strategy(P2), 'D') + P2.history = ['D'] + P2.history = ['D'] + self.assertEqual(P1.strategy(P2), 'C') + +3. The member function :code:`responses_test` takes arbitrary histories for each + player and tests a list of expected next responses:: + + def test_effect_of_strategy(self): + self.responses_test([C], [C], [D, C, C, C], random_seed=15) + + In this case each player has their history set to :code:`[C]` and the + expected responses are D, C, C, C. Note that the histories will elongate as + the responses accumulated. + +Finally, there is a :code:`TestHeadsUp` class that streamlines the testing of +two strategies playing each other using a test function :code:`versus_test`. For +example, to test several rounds of play of Tit-For-Two-Tats versus Bully:: + + class TestTF2TvsBully(TestHeadsUp): + """Test Tit for Two Tats vs Bully""" + def test_rounds(self): + outcomes = [[C, D], [C, D], [D, D], [D, C], [C, C], [C, D], [C, D], [D, D]] + self.versus_test(axelrod.TitFor2Tats, axelrod.Bully, outcomes) + +The function :code:`versus_test` also accepts a :code:`random_seed` keyword, and +like :code:`responses_test` the history is accumulated. + +The :code:`expected_classifier` dictionary tests that the classification of the +strategy is as expected (the tests for this is inherited in the :code:`init` +method). Please be sure to classify new strategies according to the already +present dimensions but if you create a new dimension you do not **need** to re +classify all the other strategies (but feel free to! :)), but please do add it +to the :code:`default_classifier` in the :code:`axelrod/player.py` parent class. diff --git a/docs/tutorials/contributing/strategy/writing_the_new_strategy.rst b/docs/tutorials/contributing/strategy/writing_the_new_strategy.rst new file mode 100644 index 000000000..b525aece4 --- /dev/null +++ b/docs/tutorials/contributing/strategy/writing_the_new_strategy.rst @@ -0,0 +1,136 @@ +Writing the new strategy +======================== + +Identify a new strategy +----------------------- + +The library has a large number of strategies:: + + >>> import axelrod as axl + >>> len(axl.strategies) + 103 + +If you're not sure if you have a strategy that has already been implemented +please get in touch: `via the gitter room +`_ or `open an issue +`_. + +Several strategies are special cases of other strategies. For example, both +`Cooperator` and `Defector` are special cases of `Random`, `Random(1)` and +`Random(0)` respectively. While we could eliminate `Cooperator` in its current +form, these strategies are intentionally left as is as simple examples for new +users and contributors. Nevertheless, please feel free to update the docstrings +of strategies like `Random` to point out such cases. + +The code +-------- + +There are a couple of things that need to be created in a strategy.py file. Let +us take a look at the :code:`TitForTat` class (located in the +:code:`axelrod/strategies/titfortat.py` file):: + + class TitForTat(Player): + """ + A player starts by cooperating and then mimics previous move by + opponent. + + Note that the code for this strategy is written in a fairly verbose + way. This is done so that it can serve as an example strategy for + those who might be new to Python. + """ + + # These are various properties for the strategy + name = 'Tit For Tat' + classifier = { + 'memory_depth': 1, # Four-Vector = (1.,0.,1.,0.) + 'stochastic': False, + 'inspects_source': False, + 'manipulates_source': False, + 'manipulates_state': False + } + + def strategy(opponent): + """This is the actual strategy""" + # First move + if len(self.history) == 0: + return C + # React to the opponent's last move + if opponent.history[-1] == D: + return D + return C + +The first thing that is needed is a docstring that explains what the strategy +does:: + + """A player starts by cooperating and then mimics previous move by opponent.""" + +After that simply add in the string that will appear as the name of the +strategy:: + + name = 'Tit For Tat' + +Note that this is mainly used in plots by :code:`matplotlib` so you can use +LaTeX if you want to. For example there is strategy with :math:`\pi` as a +name:: + + name = '$\pi$' + +Following that you can add in the :code:`classifier` dictionary:: + + classifier = { + 'memory_depth': 1, # Four-Vector = (1.,0.,1.,0.) + 'stochastic': False, + 'inspects_source': False, + 'manipulates_source': False, + 'manipulates_state': False + } + +This helps classify the strategy as described in +:ref:`classification-of-strategies`. + +After that the only thing required is to write the :code:`strategy` method +which takes an opponent as an argument. In the case of :code:`TitForTat` the +strategy checks if it has any history (:code:`if len(self.history) == 0`). If +it does not (ie this is the first play of the match) then it returns :code:`C`. +If not, the strategy simply repeats the opponent's last move (:code:`return +opponent.history[-1]`):: + + def strategy(opponent): + """This is the actual strategy""" + # First move + if len(self.history) == 0: + return C + # Repeat the opponent's last move + return opponent.history[-1] + +The variables :code:`C` and :code:`D` represent the cooperate and defect actions respectively. + +If your strategy creates any particular attribute along the way you need to make +sure that there is a :code:`reset` method that takes account of it. An example +of this is the :code:`ForgetfulGrudger` strategy. + +You can also modify the name of the strategy with the `__repr__` method, which +is invoked when `str` is applied to a player instance. For example, the player +`Random` takes a parameter `p` for how often it cooperates, and the `__repr__` +method adds the value of this parameter to the name:: + + def __repr__(self): + return "%s: %s" % (self.name, round(self.p, 2)) + +Now we have separate names for different instantiations:: + + >>> import axelrod + >>> player1 = axelrod.Random(p=0.5) + >>> player2 = axelrod.Random(p=0.1) + >>> player1 + Random: 0.5 + >>> player2 + Random: 0.1 + +This helps distinguish players in tournaments that have multiple instances of the +same strategy. If you modify the `__repr__` method of player, be sure to add an +appropriate test. + +There are various examples of helpful functions and properties that make +writing strategies easier. Do not hesitate to get in touch with the +Axelrod-Python team for guidance. diff --git a/docs/tutorials/further_topics/classification_of_strategies.rst b/docs/tutorials/further_topics/classification_of_strategies.rst new file mode 100644 index 000000000..42ffe2d94 --- /dev/null +++ b/docs/tutorials/further_topics/classification_of_strategies.rst @@ -0,0 +1,44 @@ +.. _classification-of-strategies: + +Classification of strategies +============================ + +Due to the large number of strategies, every class and instance of the class has +a :code:`classifier` attribute which classifies that strategy according to +various dimensions. + +Here is the :code:`classifier` for the :code:`Cooperator` strategy:: + + >>> import axelrod as axl + >>> expected_dictionary = {'manipulates_state': False, 'stochastic': False, 'manipulates_source': False, 'inspects_source': False, 'memory_depth': 0} # Order of this dictionary might be different on your machine + >>> axl.Cooperator.classifier == expected_dictionary + True + +Note that instances of the class also have this classifier:: + + >>> s = axl.Cooperator() + >>> s.classifier == expected_dictionary + True + +This allows us to, for example, quickly identify all the stochastic +strategies:: + + >>> len([s for s in axl.strategies if s().classifier['stochastic']]) + 31 + +Or indeed find out how many strategy have only use 1 turn worth of memory to +make a decision:: + + >>> len([s for s in axl.strategies if s().classifier['memory_depth']==1]) + 13 + +Similarly, strategies that :code:`manipulate_source`, :code:`manipulate_state` +and/or :code:`inspect_source` return :code:`False` for the :code:`obey_axelrod` +function:: + + >>> s = axl.MindBender() + >>> axl.obey_axelrod(s) + False + >>> s = axl.TitForTat() + >>> axl.obey_axelrod(s) + True diff --git a/docs/tutorials/further_topics/index.rst b/docs/tutorials/further_topics/index.rst new file mode 100644 index 000000000..e60957f58 --- /dev/null +++ b/docs/tutorials/further_topics/index.rst @@ -0,0 +1,13 @@ +Further Topics +============== + +This section contains a variety of tutorials that show some more in depth +capabilities of the axelrod library + +Contents: + +.. toctree:: + :maxdepth: 2 + + classification_of_strategies.rst + morality_metrics.rst diff --git a/docs/tutorials/further_topics/morality_metrics.rst b/docs/tutorials/further_topics/morality_metrics.rst new file mode 100644 index 000000000..5e0dfb965 --- /dev/null +++ b/docs/tutorials/further_topics/morality_metrics.rst @@ -0,0 +1,44 @@ +Morality Metrics +================ + +Tyler Singer-Clark's June 2014 paper, "Morality Metrics On Iterated Prisoner’s +Dilemma Players," describes several interesting metrics which may be used to +analyse IPD tournaments all of which are available within the ResultSet class. +(Tyler's paper is available here: http://www.scottaaronson.com/morality.pdf). + +Each metric depends upon the cooperation rate of the players, defined by Tyler +Singer-Clark as: + +.. math:: + + CR(b) = \frac{C(b)}{TT} + +where C(b) is the total number of turns where a player chose to cooperate and TT +is the total number of turns played. + +A matrix of cooperation rates is available within a tournament's ResultSet:: + + >>> import axelrod as axl + >>> strategies = [axl.Cooperator(), axl.Defector(), + ... axl.TitForTat(), axl.Grudger()] + >>> tournament = axl.Tournament(strategies) + >>> results = tournament.play() + >>> results.normalised_cooperation + [[1.0, 1.0, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0], [1.0, 0.005, 1.0, 1.0], [1.0, 0.005, 1.0, 1.0]] + +There is also a 'good partner' matrix showing how often a player cooperated at +least as much as its opponent:: + + >>> results.good_partner_matrix + [[0, 10, 10, 10], [0, 0, 0, 0], [10, 10, 0, 10], [10, 10, 10, 0]] + +Each of the metrics described in Tyler's paper is available as follows (here they are rounded to 2 digits):: + + >>> [round(ele, 2) for ele in results.cooperating_rating] + [1.0, 0.0, 0.75, 0.75] + >>> [round(ele, 2) for ele in results.good_partner_rating] + [1.0, 0.0, 1.0, 1.0] + >>> [round(ele, 2) for ele in results.eigenjesus_rating] + [0.58, 0.0, 0.58, 0.58] + >>> [round(ele, 2) for ele in results.eigenmoses_rating] + [0.37, -0.37, 0.6, 0.6] diff --git a/docs/_static/usage/demo_strategies_boxplot.svg b/docs/tutorials/getting_started/_static/getting_started/demo_deterministic_strategies_boxplot.svg similarity index 59% rename from docs/_static/usage/demo_strategies_boxplot.svg rename to docs/tutorials/getting_started/_static/getting_started/demo_deterministic_strategies_boxplot.svg index 3b2d4a97b..a42d8a023 100644 --- a/docs/_static/usage/demo_strategies_boxplot.svg +++ b/docs/tutorials/getting_started/_static/getting_started/demo_deterministic_strategies_boxplot.svg @@ -2,7 +2,7 @@ - + - + @@ -413,17 +364,117 @@ Q45.0625 54.2969 48.7812 52.5938" id="BitstreamVeraSans-Roman-63"/> - + - + - + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + @@ -537,115 +588,15 @@ Q8.5 10.2969 8.5 21.5781" id="BitstreamVeraSans-Roman-75"/> - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + - + - + - + @@ -696,7 +647,7 @@ Q26.2656 6.10938 33.4062 6.10938 Q40.5312 6.10938 44.6094 11.75 Q48.6875 17.3906 48.6875 27.2969" id="BitstreamVeraSans-Roman-70"/> - + @@ -711,150 +662,85 @@ Q48.6875 17.3906 48.6875 27.2969" id="BitstreamVeraSans-Roman-70"/> - + - + - + - + - - + + + + + +M0 0 +L4 0" id="m728421d6d4" style="stroke:#000000;stroke-width:0.5;"/> + + + + + + + - - +M0 0 +L-4 0" id="mcb0005524f" style="stroke:#000000;stroke-width:0.5;"/> + + + + + + + + +M19.1875 8.29688 +L53.6094 8.29688 +L53.6094 0 +L7.32812 0 +L7.32812 8.29688 +Q12.9375 14.1094 22.625 23.8906 +Q32.3281 33.6875 34.8125 36.5312 +Q39.5469 41.8438 41.4219 45.5312 +Q43.3125 49.2188 43.3125 52.7812 +Q43.3125 58.5938 39.2344 62.25 +Q35.1562 65.9219 28.6094 65.9219 +Q23.9688 65.9219 18.8125 64.3125 +Q13.6719 62.7031 7.8125 59.4219 +L7.8125 69.3906 +Q13.7656 71.7812 18.9375 73 +Q24.125 74.2188 28.4219 74.2188 +Q39.75 74.2188 46.4844 68.5469 +Q53.2188 62.8906 53.2188 53.4219 +Q53.2188 48.9219 51.5312 44.8906 +Q49.8594 40.875 45.4062 35.4062 +Q44.1875 33.9844 37.6406 27.2188 +Q31.1094 20.4531 19.1875 8.29688" id="BitstreamVeraSans-Roman-32"/> +M31.7812 66.4062 +Q24.1719 66.4062 20.3281 58.9062 +Q16.5 51.4219 16.5 36.375 +Q16.5 21.3906 20.3281 13.8906 +Q24.1719 6.39062 31.7812 6.39062 +Q39.4531 6.39062 43.2812 13.8906 +Q47.125 21.3906 47.125 36.375 +Q47.125 51.4219 43.2812 58.9062 +Q39.4531 66.4062 31.7812 66.4062 +M31.7812 74.2188 +Q44.0469 74.2188 50.5156 64.5156 +Q56.9844 54.8281 56.9844 36.375 +Q56.9844 17.9688 50.5156 8.26562 +Q44.0469 -1.42188 31.7812 -1.42188 +Q19.5312 -1.42188 13.0625 8.26562 +Q6.59375 17.9688 6.59375 36.375 +Q6.59375 54.8281 13.0625 64.5156 +Q19.5312 74.2188 31.7812 74.2188" id="BitstreamVeraSans-Roman-30"/> - - - - - - - - - - - - + + + + + - - + + - + - + - + - - - - - + + +M10.7969 72.9062 +L49.5156 72.9062 +L49.5156 64.5938 +L19.8281 64.5938 +L19.8281 46.7344 +Q21.9688 47.4688 24.1094 47.8281 +Q26.2656 48.1875 28.4219 48.1875 +Q40.625 48.1875 47.75 41.5 +Q54.8906 34.8125 54.8906 23.3906 +Q54.8906 11.625 47.5625 5.09375 +Q40.2344 -1.42188 26.9062 -1.42188 +Q22.3125 -1.42188 17.5469 -0.640625 +Q12.7969 0.140625 7.71875 1.70312 +L7.71875 11.625 +Q12.1094 9.23438 16.7969 8.0625 +Q21.4844 6.89062 26.7031 6.89062 +Q35.1562 6.89062 40.0781 11.3281 +Q45.0156 15.7656 45.0156 23.3906 +Q45.0156 31 40.0781 35.4375 +Q35.1562 39.8906 26.7031 39.8906 +Q22.75 39.8906 18.8125 39.0156 +Q14.8906 38.1406 10.7969 36.2812 +z +" id="BitstreamVeraSans-Roman-35"/> + + + + + + + + + + - + - - - - + - + - - + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + - + + - + - + - + - + - - - - - - + + + - + + - + - + - + - + - - - + + + - + + - + - + - + - + - - - + + + + - + - + - + - + - - + + - + + - + - + - + - + - - - - + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +M54.8906 33.0156 +L54.8906 0 +L45.9062 0 +L45.9062 32.7188 +Q45.9062 40.4844 42.875 44.3281 +Q39.8438 48.1875 33.7969 48.1875 +Q26.5156 48.1875 22.3125 43.5469 +Q18.1094 38.9219 18.1094 30.9062 +L18.1094 0 +L9.07812 0 +L9.07812 54.6875 +L18.1094 54.6875 +L18.1094 46.1875 +Q21.3438 51.125 25.7031 53.5625 +Q30.0781 56 35.7969 56 +Q45.2188 56 50.0469 50.1719 +Q54.8906 44.3438 54.8906 33.0156" id="BitstreamVeraSans-Roman-6e"/> +M52 44.1875 +Q55.375 50.25 60.0625 53.125 +Q64.75 56 71.0938 56 +Q79.6406 56 84.2812 50.0156 +Q88.9219 44.0469 88.9219 33.0156 +L88.9219 0 +L79.8906 0 +L79.8906 32.7188 +Q79.8906 40.5781 77.0938 44.375 +Q74.3125 48.1875 68.6094 48.1875 +Q61.625 48.1875 57.5625 43.5469 +Q53.5156 38.9219 53.5156 30.9062 +L53.5156 0 +L44.4844 0 +L44.4844 32.7188 +Q44.4844 40.625 41.7031 44.4062 +Q38.9219 48.1875 33.1094 48.1875 +Q26.2188 48.1875 22.1562 43.5312 +Q18.1094 38.875 18.1094 30.9062 +L18.1094 0 +L9.07812 0 +L9.07812 54.6875 +L18.1094 54.6875 +L18.1094 46.1875 +Q21.1875 51.2188 25.4844 53.6094 +Q29.7812 56 35.6875 56 +Q41.6562 56 45.8281 52.9688 +Q50 49.9531 52 44.1875" id="BitstreamVeraSans-Roman-6d"/> +" id="BitstreamVeraSans-Roman-28"/> + - + @@ -1427,52 +1168,51 @@ Q40.5781 54.5469 44.2812 53.0781" id="BitstreamVeraSans-Roman-73"/> - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + diff --git a/docs/tutorials/getting_started/_static/visualising_results/demo_strategies_boxplot.svg b/docs/tutorials/getting_started/_static/visualising_results/demo_strategies_boxplot.svg new file mode 100644 index 000000000..6e0f144d4 --- /dev/null +++ b/docs/tutorials/getting_started/_static/visualising_results/demo_strategies_boxplot.svg @@ -0,0 +1,2374 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/tutorials/getting_started/_static/visualising_results/demo_strategies_noisy_boxplot.svg b/docs/tutorials/getting_started/_static/visualising_results/demo_strategies_noisy_boxplot.svg new file mode 100644 index 000000000..677a3aa8d --- /dev/null +++ b/docs/tutorials/getting_started/_static/visualising_results/demo_strategies_noisy_boxplot.svg @@ -0,0 +1,1968 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/tutorials/getting_started/_static/visualising_results/demo_strategies_noisy_winplot.svg b/docs/tutorials/getting_started/_static/visualising_results/demo_strategies_noisy_winplot.svg new file mode 100644 index 000000000..e8292e132 --- /dev/null +++ b/docs/tutorials/getting_started/_static/visualising_results/demo_strategies_noisy_winplot.svg @@ -0,0 +1,1223 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/_static/usage/demo_strategies_payoff.svg b/docs/tutorials/getting_started/_static/visualising_results/demo_strategies_payoff.svg similarity index 100% rename from docs/_static/usage/demo_strategies_payoff.svg rename to docs/tutorials/getting_started/_static/visualising_results/demo_strategies_payoff.svg diff --git a/docs/tutorials/getting_started/_static/visualising_results/demo_strategies_stackplot.svg b/docs/tutorials/getting_started/_static/visualising_results/demo_strategies_stackplot.svg new file mode 100644 index 000000000..21876e775 --- /dev/null +++ b/docs/tutorials/getting_started/_static/visualising_results/demo_strategies_stackplot.svg @@ -0,0 +1,2293 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/tutorials/getting_started/_static/visualising_results/demo_strategies_winplot.svg b/docs/tutorials/getting_started/_static/visualising_results/demo_strategies_winplot.svg new file mode 100644 index 000000000..04ab787a7 --- /dev/null +++ b/docs/tutorials/getting_started/_static/visualising_results/demo_strategies_winplot.svg @@ -0,0 +1,1284 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/tutorials/getting_started/command_line.rst b/docs/tutorials/getting_started/command_line.rst new file mode 100644 index 000000000..fe08e3c5e --- /dev/null +++ b/docs/tutorials/getting_started/command_line.rst @@ -0,0 +1,45 @@ +Using the command line tool +=========================== + +Once :code:`axelrod` is installed you have access to a `run_axelrod` script that +can help run some of the tournaments, including the tournament that involves all +of the strategies from the library. You can view them on this repository: +`https://github.com/Axelrod-Python/tournament/`_ + +To view the help for the :code:`run_axelrod` file run:: + + $ run_axelrod.py -h + +Note that if you have not installed the package you can still use this script +directly from the repository:: + + $ python run_axelrod -h + +There is a variety of options that include: + +- Excluding certain strategy sets. +- Not running the ecological variant. +- Running the rounds of the tournament in parallel. +- Include background noise + +Particular parameters can also be changed: + +- The output directory for the plot and csv files. +- The number of turns and repetitions for the tournament. + +Here is a command that will run the whole tournament, excluding the strategies +that do not obey Axelrod's original rules and using all available CPUS (this can +take quite a while!):: + + $ run_axelrod --xc -p 0 + +Here are some of the plots that are output when running with the latest total number of strategies: + +The results from the tournament itself (ordered by median score): + +.. image:: http://axelrod-python.github.io/tournament/assets/strategies_boxplot.svg + :width: 50% + :align: center + +This is just a brief overview of what the tool can do, take a look at the help +to see all the options. diff --git a/docs/contributing.rst b/docs/tutorials/getting_started/contributing.old similarity index 100% rename from docs/contributing.rst rename to docs/tutorials/getting_started/contributing.old diff --git a/docs/tutorials/getting_started/ecological_variant.rst b/docs/tutorials/getting_started/ecological_variant.rst new file mode 100644 index 000000000..df8f2900f --- /dev/null +++ b/docs/tutorials/getting_started/ecological_variant.rst @@ -0,0 +1,24 @@ +Ecological Variant +================== + +To study the evolutionary stability of each strategy it is possible to create an +ecosystem based on the payoff matrix of a tournament:: + + >>> import axelrod as axl + >>> strategies = [axl.Cooperator(), axl.Defector(), + ... axl.TitForTat(), axl.Grudger(), + ... axl.Random()] + >>> tournament = axl.Tournament(strategies) + >>> results = tournament.play() + >>> eco = axl.Ecosystem(results) + >>> eco.reproduce(100) # Evolve the population over 100 time steps + +Here is how we obtain a nice stackplot of the system evolving over time:: + + >>> plot = axl.Plot(results) + >>> p = plot.stackplot(eco) + >>> p.show() + +.. image:: _static/visualising_results/demo_strategies_stackplot.svg + :width: 50% + :align: center diff --git a/docs/tutorials/getting_started/getting_started.rst b/docs/tutorials/getting_started/getting_started.rst new file mode 100644 index 000000000..587b746db --- /dev/null +++ b/docs/tutorials/getting_started/getting_started.rst @@ -0,0 +1,54 @@ +Getting started +=============== + +Installation +------------ + +The simplest way to install the package is to obtain it from the PyPi +repository:: + + $ pip install axelrod + + +You can also build it from source if you would like to:: + + $ git clone https://github.com/Axelrod-Python/Axelrod.git + $ cd Axelrod + $ python setup.py install + +If you do this you will need to also install the dependencies:: + + $ pip install -r requirements.txt + +Creating and running a simple tournament +---------------------------------------- + +The following lines of code create a simple list of strategies:: + + >>> import axelrod as axl + >>> strategies = [axl.Cooperator(), axl.Defector(), + ... axl.TitForTat(), axl.Grudger()] + >>> strategies + [Cooperator, Defector, Tit For Tat, Grudger] + +We can now create a tournament, play it, save the results and view the rank of +each player:: + + >>> tournament = axl.Tournament(strategies) + >>> results = tournament.play() + >>> results.ranked_names + ['Defector', 'Tit For Tat', 'Grudger', 'Cooperator'] + +We can also plot these results:: + + >>> plot = axl.Plot(results) + >>> p = plot.boxplot() + >>> p.show() + +.. image:: _static/getting_started/demo_deterministic_strategies_boxplot.svg + :width: 50% + :align: center + +Note that in this case none of our strategies are stochastic so the boxplot +shows that there is no variation. Take a look at the :ref:`visualising-results` +section to see plots showing a stochastic effect. diff --git a/docs/tutorials/getting_started/index.rst b/docs/tutorials/getting_started/index.rst new file mode 100644 index 000000000..99cffd34c --- /dev/null +++ b/docs/tutorials/getting_started/index.rst @@ -0,0 +1,17 @@ +Getting Started +=============== + +This section contains a variety of tutorials that should help get you started +with the Axelrod library. + +Contents: + +.. toctree:: + :maxdepth: 2 + + getting_started.rst + payoff_matrix.rst + visualising_results.rst + noisy_tournaments.rst + ecological_variant.rst + command_line.rst diff --git a/docs/tutorials/getting_started/noisy_tournaments.rst b/docs/tutorials/getting_started/noisy_tournaments.rst new file mode 100644 index 000000000..0fa59ec39 --- /dev/null +++ b/docs/tutorials/getting_started/noisy_tournaments.rst @@ -0,0 +1,46 @@ +Noisy tournaments +================= + +A common variation on iterated prisoner’s dilemma tournaments is to add +stochasticity in the choice of plays, simply called noise. This noise is +introduced by flipping plays between ‘C’ and ‘D’ with some probability that is +applied to all plays after they are delivered by the player. + +The presence of this persistent background noise causes some strategies to +behave substantially differently. For example, TitForTat can fall into +defection loops with itself when there is noise. While TitForTat would usually +cooperate well with itself:: + + C C C C C ... + C C C C C ... + +Noise can cause a C to flip to a D (or vice versa), disrupting the cooperative +chain:: + + C C C D C D C D D D ... + C C C C D C D D D D ... + +To create a noisy tournament you simply need to add the `noise` argument:: + + >>> import axelrod as axl + >>> strategies = [axl.Cooperator(), axl.Defector(), + ... axl.TitForTat(), axl.Grudger()] + >>> noise = 0.1 + >>> tournament = axl.Tournament(strategies, noise=noise) + >>> results = tournament.play() + >>> plot = axl.Plot(results) + >>> p = plot.boxplot() + >>> p.show() + +.. image:: _static/visualising_results/demo_strategies_noisy_boxplot.svg + :width: 50% + :align: center + +Here is how the distribution of wins now looks:: + + >>> p = plot.winplot() + >>> p.show() + +.. image:: _static/visualising_results/demo_strategies_noisy_winplot.svg + :width: 50% + :align: center diff --git a/docs/tutorials/getting_started/payoff_matrix.rst b/docs/tutorials/getting_started/payoff_matrix.rst new file mode 100644 index 000000000..20ba841c1 --- /dev/null +++ b/docs/tutorials/getting_started/payoff_matrix.rst @@ -0,0 +1,31 @@ +Accessing the payoff matrix +=========================== + +lipsum + +This tutorial will show you briefly how to access the payoff matrix +corresponding to the tournament. + +As shown in `Getting_started`_ let us create a tournament:: + + >>> import axelrod as axl + >>> strategies = [axl.Cooperator(), axl.Defector(), + ... axl.TitForTat(), axl.Grudger()] + >>> tournament = axl.Tournament(strategies) + >>> results = tournament.play() + +We can view the payoff matrix of our tournament showing the score of the row-th +strategy when played against the column-th strategy:: + + >>> m = results.payoff_matrix + >>> for row in m: + ... print([round(ele, 1) for ele in row]) # Rounding output + [3.0, 0.0, 3.0, 3.0] + [5.0, 1.0, 1.0, 1.0] + [3.0, 1.0, 3.0, 3.0] + [3.0, 1.0, 3.0, 3.0] + +Here we see that the second strategy (:code:`Defector`) obtains an average +utility per game of :code:`5.0` against the first strategy (:code:`Cooperator`) +as expected. + diff --git a/docs/usage.rst b/docs/tutorials/getting_started/usage.old similarity index 100% rename from docs/usage.rst rename to docs/tutorials/getting_started/usage.old diff --git a/docs/tutorials/getting_started/visualising_results.rst b/docs/tutorials/getting_started/visualising_results.rst new file mode 100644 index 000000000..0a3e4328e --- /dev/null +++ b/docs/tutorials/getting_started/visualising_results.rst @@ -0,0 +1,55 @@ +.. _visualising-results: + +Visualising results +=================== + +This tutorial will show you briefly how to visualise some basic results + +Visualising the results of the tournament +----------------------------------------- + +As shown in `Getting_started`_ let us create a tournament, but this time we will +include a strategy that acts randomly:: + + >>> import axelrod as axl + >>> strategies = [axl.Cooperator(), axl.Defector(), + ... axl.TitForTat(), axl.Grudger()] + >>> strategies.append(axl.Random()) + >>> tournament = axl.Tournament(strategies) + >>> results = tournament.play() + +We can view these results (which helps visualise the stochastic effects):: + + >>> plot = axl.Plot(results) + >>> p = plot.boxplot() + >>> p.show() + +.. image:: _static/visualising_results/demo_strategies_boxplot.svg + :width: 50% + :align: center + +Visualising the distributions of wins +------------------------------------- + +We can view the distributions of wins for each player:: + + >>> p = plot.winplot() + >>> p.show() + +.. image:: _static/visualising_results/demo_strategies_winplot.svg + :width: 50% + :align: center + +Visualising the payoff matrix +----------------------------- + +We can also easily view the payoff matrix described in `Payoff_matrix`_, this +becomes particularly useful when viewing the outputs of tournaments with a large +number of strategies:: + + >>> p = plot.payoff() + >>> p.show() + +.. image:: _static/visualising_results/demo_strategies_payoff.svg + :width: 50% + :align: center diff --git a/docs/tutorials/index.rst b/docs/tutorials/index.rst new file mode 100644 index 000000000..7dce1e7b2 --- /dev/null +++ b/docs/tutorials/index.rst @@ -0,0 +1,19 @@ +.. Axelrod documentation master file, created by + sphinx-quickstart on Sat Mar 7 07:05:57 2015. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +Tutorials +========= + +This section contains a variety of tutorials related to the Axelrod library. + +Contents: + +.. toctree:: + :maxdepth: 2 + + getting_started/index.rst + further_topics/index.rst + advanced/index.rst + contributing/index.rst diff --git a/doctest b/doctest new file mode 100755 index 000000000..daea869e5 --- /dev/null +++ b/doctest @@ -0,0 +1,5 @@ +#!/usr/bin/env bash +python -m doctest docs/tutorials/getting_started/*rst +python -m doctest docs/tutorials/further_topics/*rst +python -m doctest docs/reference/*rst +python -m doctest docs/tutorials/contributing/strategy/*rst diff --git a/test b/test new file mode 100755 index 000000000..ed49bd58a --- /dev/null +++ b/test @@ -0,0 +1,3 @@ +#!/usr/bin/env bash +python -m unittest discover axelrod/tests/ +./doctest