How to use the axelrod.action.Action function in Axelrod

To help you get started, we’ve selected a few Axelrod examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github Axelrod-Python / Axelrod / axelrod / strategies / averagecopier.py View on Github external
from axelrod.action import Action
from axelrod.player import Player
from axelrod.random_ import random_choice

C, D = Action.C, Action.D


class AverageCopier(Player):
    """
    The player will cooperate with probability p if the opponent's cooperation
    ratio is p. Starts with random decision.

    Names:

    - Average Copier: Original name by Geraint Palmer
    """

    name = "Average Copier"
    classifier = {
        "memory_depth": float("inf"),  # Long memory
        "stochastic": True,
github Axelrod-Python / Axelrod / axelrod / strategies / axelrod_first.py View on Github external
Additional strategies from Axelrod's first tournament.
"""

import random

from axelrod.action import Action
from axelrod.player import Player
from axelrod.random_ import random_choice
from axelrod.strategy_transformers import FinalTransformer
from .memoryone import MemoryOnePlayer

from scipy.stats import chisquare

from typing import List, Dict, Tuple

C, D = Action.C, Action.D


class Davis(Player):
    """
    Submitted to Axelrod's first tournament by Morton Davis.

    A player starts by cooperating for 10 rounds then plays Grudger,
    defecting if at any point the opponent has defected.

    This strategy came 8th in Axelrod's original tournament.

    Names:

    - Davis: [Axelrod1980]_
    """
github Axelrod-Python / Axelrod / axelrod / strategies / tranquiliser.py View on Github external
import axelrod as axl
import numpy
from axelrod.action import Action
from axelrod.player import Player
from axelrod.interaction_utils import compute_final_score
import random

C, D = Action.C, Action.D

dict = {C: 0, D: 1}


class Tranquiliser(Player):
    '''
A player that uses two ratios (which are dependent on the number of cooperations
defections of player and the opponent) to decide the next move to play.
The player can be present in three states(denoted FD): 0, 1 or 2 each causing a different outcome
dependent on the value of FD. Value of FD is dependent on the aforementioned ratios.
'''

    name = 'Tranquiliser'
    classifier = {
        'memory_depth': float('inf'),
        'stochastic': True,
github Axelrod-Python / Axelrod / axelrod / strategy_transformers.py View on Github external
"""

import collections
import copy
import inspect
import random
from importlib import import_module
from typing import Any

from numpy.random import choice

from .action import Action
from .player import Player, defaultdict
from .random_ import random_choice

C, D = Action.C, Action.D

# Note: After a transformation is applied, the player's history is overwritten
# with the modified history just like in the noisy tournament case. This can
# lead to unexpected behavior, such as when FlipTransform is applied to
# Alternator.


def StrategyTransformerFactory(strategy_wrapper, name_prefix=None, reclassifier=None):
    """Modify an existing strategy dynamically by wrapping the strategy
    method with the argument `strategy_wrapper`.

    Parameters
    ----------
    strategy_wrapper: function
        A function of the form `strategy_wrapper(player, opponent, proposed_action, *args, **kwargs)`
        Can also use a class that implements
github Axelrod-Python / Axelrod / axelrod / strategies / selfsteem.py View on Github external
from math import pi, sin

from axelrod.action import Action
from axelrod.player import Player
from axelrod.random_ import random_choice


C, D = Action.C, Action.D


class SelfSteem(Player):
    """
    This strategy is based on the feeling with the same name.
    It is modeled on the sine curve(f = sin( 2* pi * n / 10 )), which varies
    with the current iteration.

    If f > 0.95, 'ego' of the algorithm is inflated; always defects.
    If 0.95 > abs(f) > 0.3, rational behavior; follows TitForTat algortithm.
    If 0.3 > f > -0.3; random behavior.
    If f < -0.95, algorithm is at rock bottom; always cooperates.

    Futhermore, the algorithm implements a retaliation policy, if the opponent
    defects; the sin curve is shifted. But due to lack of further information,
    this implementation does not include a sin phase change.
github Axelrod-Python / Axelrod / axelrod / strategies / oncebitten.py View on Github external
import random

from axelrod.action import Action
from axelrod.player import Player

C, D = Action.C, Action.D


class OnceBitten(Player):
    """
    Cooperates once when the opponent defects, but if they defect twice in a row
    defaults to forgetful grudger for 10 turns defecting.

    Names:

    - Once Bitten: Original name by Holly Marissa
    """

    name = "Once Bitten"
    classifier = {
        "memory_depth": 12,  # Long memory
        "stochastic": False,
github Axelrod-Python / Axelrod / axelrod / strategies / forgiver.py View on Github external
from axelrod.action import Action
from axelrod.player import Player

C, D = Action.C, Action.D


class Forgiver(Player):
    """
    A player starts by cooperating however will defect if at any point
    the opponent has defected more than 10 percent of the time

    Names:

    - Forgiver: Original name by Thomas Campbell
    """

    name = "Forgiver"
    classifier = {
        "memory_depth": float("inf"),  # Long memory
        "stochastic": False,