Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
from axelrod.action import Action
from axelrod.player import Player
C, D = Action.C, Action.D
class Defector(Player):
"""A player who only ever defects.
Names:
- Defector: [Axelrod1984]_
- ALLD: [Press2012]_
- Always defect: [Mittal2009]_
"""
name = "Defector"
classifier = {
"memory_depth": 0,
"stochastic": False,
from collections import defaultdict
from axelrod.action import Action
from axelrod.player import Player
C, D = Action.C, Action.D
class Retaliate(Player):
"""
A player starts by cooperating but will retaliate once the opponent
has won more than 10 percent times the number of defections the player has.
Names:
- Retaliate: Original name by Owen Campbell
"""
name = "Retaliate"
classifier = {
"memory_depth": float("inf"), # Long memory
"stochastic": False,
from multiprocessing import Process, Queue, cpu_count
from tempfile import mkstemp
from typing import List, Optional, Tuple
import axelrod.interaction_utils as iu
import tqdm
from axelrod import DEFAULT_TURNS
from axelrod.action import Action, actions_to_str, str_to_actions
from axelrod.player import Player
from .game import Game
from .match import Match
from .match_generator import MatchGenerator
from .result_set import ResultSet
C, D = Action.C, Action.D
class Tournament(object):
def __init__(
self,
players: List[Player],
name: str = "axelrod",
game: Game = None,
turns: int = None,
prob_end: float = None,
repetitions: int = 10,
noise: float = 0,
edges: List[Tuple] = None,
match_attributes: dict = None,
) -> None:
"""
import random
from axelrod.action import Action
from axelrod import random_choice
from axelrod.player import Player
C, D = Action.C, Action.D
class BushMosteller(Player):
"""
A player that is based on Bush Mosteller reinforced learning algorithm, it
decides what it will
play only depending on its own previous payoffs.
The probability of playing C or D will be updated using a stimulus which
represents a win or a loss of value based on its previous play's payoff in
the specified probability. The more a play will be rewarded through rounds,
the more the player will be tempted to use it.
Names:
- Bush Mosteller: [Luis2008]_
"""
Additional strategies from Axelrod's second tournament.
"""
import random
from axelrod.action import Action
from axelrod.player import Player
from axelrod.random_ import random_choice
C, D = Action.C, Action.D
class Champion(Player):
"""
Strategy submitted to Axelrod's second tournament by Danny Champion.
This player cooperates on the first 10 moves and plays Tit for Tat for the
next 15 more moves. After 25 moves, the program cooperates unless all the
following are true: the other player defected on the previous move, the
other player cooperated less than 60% and the random number between 0 and 1
is greater that the other player's cooperation rate.
Names:
- Champion: [Axelrod1980b]_
"""
from axelrod.action import Action
from axelrod.player import Player
C, D = Action.C, Action.D
class Alternator(Player):
"""
A player who alternates between cooperating and defecting.
Names
- Alternator: [Axelrod1984]_
- Periodic player CD: [Mittal2009]_
"""
name = "Alternator"
classifier = {
"memory_depth": 1,
"stochastic": False,
from axelrod.action import Action
from axelrod.player import Player
from axelrod.random_ import random_choice
from axelrod.strategy_transformers import FinalTransformer
C, D = Action.C, Action.D
@FinalTransformer((D,), name_prefix=None) # End with defection
class Stalker(Player):
"""
This is a strategy which is only influenced by the score.
Its behavior is based on three values:
the very_bad_score (all rounds in defection)
very_good_score (all rounds in cooperation)
wish_score (average between bad and very_good score)
It starts with cooperation.
- If current_average_score > very_good_score, it defects
- If current_average_score lies in (wish_score, very_good_score) it
from collections import Counter
from axelrod.action import Action, actions_to_str
C, D = Action.C, Action.D
class History(object):
"""
History class to track the history of play and metadata including
the number of cooperations and defections, and if available, the
opponents plays and the state distribution of the history of play.
"""
def __init__(self, plays=None, coplays=None):
"""
Parameters
----------
plays:
An ordered iterable of the actions of the player.
coplays:
from collections import Counter, namedtuple
import csv
import itertools
from multiprocessing import cpu_count
import numpy as np
import tqdm
from axelrod.action import Action
import dask as da
import dask.dataframe as dd
from . import eigen
C, D = Action.C, Action.D
def update_progress_bar(method):
"""A decorator to update a progress bar if it exists"""
def wrapper(*args, **kwargs):
"""Run the method and update the progress bar if it exists"""
output = method(*args, **kwargs)
try:
args[0].progress_bar.update(1)
except AttributeError:
pass
return output
import random
from typing import List
from axelrod.action import Action
from axelrod.player import Player
from axelrod.random_ import random_choice
Vector = List[float]
C, D = Action.C, Action.D
class CollectiveStrategy(Player):
"""Defined in [Li2009]_. 'It always cooperates in the first move and defects
in the second move. If the opponent also cooperates in the first move and
defects in the second move, CS will cooperate until the opponent defects.
Otherwise, CS will always defect.'
Names:
- Collective Strategy: [Li2009]_
"""
name = "CollectiveStrategy"