How to use the axelrod.Actions.D function in Axelrod

To help you get started, we’ve selected a few Axelrod examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github Axelrod-Python / Axelrod / axelrod / payoff.py View on Github external
import math
from numpy import median, mean, std
from axelrod import Actions

C, D = Actions.C, Actions.D


def player_count(interactions):
    """
    The number of players derived from a dictionary of interactions

    Parameters
    ----------
    interactions : dictionary
        A dictionary of the form:

        e.g. for a round robin between Cooperator, Defector and Alternator
        with 2 turns per round:
        {
            (0, 0): [(C, C), (C, C)].
            (0, 1): [(C, D), (C, D)],
github Axelrod-Python / Axelrod / axelrod / strategies / handshake.py View on Github external
from axelrod import Actions, Player, init_args

C, D = Actions.C, Actions.D


class Handshake(Player):
    """Starts with C, D. If the opponent plays the same way, cooperate forever,
    else defect forever.

    Names:

    - Handshake: [Robson1989]_
    """

    name = 'Handshake'
    classifier = {
        'memory_depth': float('inf'),  # Long memory
        'stochastic': False,
        'makes_use_of': set(),
github Axelrod-Python / Axelrod / axelrod / strategies / apavlov.py View on Github external
from axelrod import Actions, Player

C, D = Actions.C, Actions.D


class APavlov2006(Player):
    """
    APavlov as defined in http://www.cs.nott.ac.uk/~pszjl/index_files/chapter4.pdf
    (pages 10-11).

    APavlov attempts to classify its opponent as one of five strategies:
    Cooperative, ALLD, STFT, PavlovD, or Random. APavlov then responds in a
    manner intended to achieve mutual cooperation or to defect against
    uncooperative opponents.
    """

    name = "Adaptive Pavlov 2006"
    classifier = {
        'memory_depth': float('inf'),
github Axelrod-Python / Axelrod / axelrod / strategies / naiveprober.py View on Github external
from axelrod import Actions, Player, init_args, random_choice

C, D = Actions.C, Actions.D


class NaiveProber(Player):
    """
    Like tit-for-tat, but it occasionally defects with a small probability.
    """

    name = 'Naive Prober'
    classifier = {
        'memory_depth': 1,  # Four-Vector = (1.,0.,1.,0.)
        'stochastic': True,
        'makes_use_of': set(),
        'inspects_source': False,
        'manipulates_source': False,
        'manipulates_state': False
    }
github Axelrod-Python / Axelrod / axelrod / strategies / meta.py View on Github external
from axelrod import Actions, Player, init_args, obey_axelrod
from axelrod.strategy_transformers import NiceTransformer
from ._strategies import all_strategies
from .hunter import (
    AlternatorHunter, CooperatorHunter, CycleHunter, DefectorHunter,
    EventualCycleHunter, MathConstantHunter, RandomHunter,)
from numpy.random import choice


# Needs to be computed manually to prevent circular dependency
ordinary_strategies = [s for s in all_strategies if obey_axelrod(s)]
C, D = Actions.C, Actions.D


class MetaPlayer(Player):
    """A generic player that has its own team of players."""

    name = "Meta Player"
    classifier = {
        'memory_depth': float('inf'),  # Long memory
        'stochastic': True,
        'makes_use_of': {'game', 'length'},
        'long_run_time': True,
        'inspects_source': False,
        'manipulates_source': False,
        'manipulates_state': False
    }
github Axelrod-Python / Axelrod / axelrod / random_.py View on Github external
import random
import numpy
from axelrod import Actions

C, D = Actions.C, Actions.D

def random_choice(p=0.5):
    """
    Return 'C' with probability `p`, else return 'D'

    Emulates Python's random.choice(['C', 'D']) since it is not consistent
    across Python 2.7 to Python 3.4

    Parameters
    ----------

    p : float
        The probability of picking 'C'

    Returns
    -------
github Axelrod-Python / Axelrod / axelrod / strategies / calculator.py View on Github external
from axelrod import Actions, Player
from axelrod._strategy_utils import detect_cycle
from .axelrod_first import Joss


C, D = Actions.C, Actions.D


class Calculator(Player):
    """
    Plays like (Hard) Joss for the first 20 rounds. If periodic behavior is
    detected, defect forever. Otherwise play TFT.
    """

    name = "Calculator"
    classifier = {
        'memory_depth': float('inf'),
        'stochastic': True,
        'makes_use_of': set(),
        'long_run_time': False,
        'inspects_source': False,
        'manipulates_source': False,
github Axelrod-Python / Axelrod / axelrod / strategies / gobymajority.py View on Github external
from axelrod import Actions, Player, init_args

import copy

C, D = Actions.C, Actions.D


class GoByMajority(Player):
    """A player examines the history of the opponent: if the opponent has more
    defections than cooperations then the player defects.

    In case of equal
    number of defections and cooperations this player will Cooperate. Passing
    the `soft=False` keyword argument when initialising will create a
    HardGoByMajority which Defects in case of equality.

    An optional memory attribute will limit the number of turns remembered (by
    default this is 0)
    """

    name = 'Go By Majority'
github Axelrod-Python / Axelrod / axelrod / strategies / memoryone.py View on Github external
from axelrod import Actions, Player, init_args, random_choice

C, D = Actions.C, Actions.D


class MemoryOnePlayer(Player):
    """Uses a four-vector for strategies based on the last round of play,
    (P(C|CC), P(C|CD), P(C|DC), P(C|DD)), defaults to Win-Stay Lose-Shift.
    Intended to be used as an abstract base class or to at least be supplied
    with a initializing four_vector."""

    name = 'Generic Memory One Player'
    classifier = {
        'memory_depth': 1,  # Memory-one Four-Vector
        'stochastic': True,
        'makes_use_of': set(),
        'long_run_time': False,
        'inspects_source': False,
        'manipulates_source': False,
github Axelrod-Python / Axelrod / axelrod / strategies / cycler.py View on Github external
def strategy(self, opponent):
        if self.cycle_counter < self.cycle_length:
            self.cycle_counter += 1
            return Actions.C
        else:
            self.cycle_length += 1
            self.cycle_counter = 0
            return Actions.D