How to use the axelrod.player.Player function in Axelrod

To help you get started, we’ve selected a few Axelrod examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github Axelrod-Python / Axelrod / axelrod / strategies / punisher.py View on Github external
self.grudge_memory = 0
            self.grudged = False

        if self.grudged:
            self.grudge_memory += 1
            return D

        elif D in opponent.history[-1:]:
            self.mem_length = (opponent.defections * 20) // len(opponent.history)
            self.grudged = True
            return D

        return C


class InversePunisher(Player):
    """
    An inverted version of Punisher. The player starts by cooperating however
    will defect if at any point the opponent has defected, and forgets after
    mem_length matches, with 1 <= mem_length <= 20. This time mem_length is
    proportional to the amount of time the opponent has played C.

    Names:

    - Inverse Punisher: Original name by Geraint Palmer
    """

    name = "Inverse Punisher"
    classifier = {
        "memory_depth": float("inf"),  # Long memory
        "stochastic": False,
        "makes_use_of": set(),
github Axelrod-Python / Axelrod / axelrod / strategies / forgiver.py View on Github external
"inspects_source": False,
        "manipulates_source": False,
        "manipulates_state": False,
    }

    def strategy(self, opponent: Player) -> Action:
        """
        Begins by playing C, then plays D if the opponent has defected more
        than 10 percent of the time.
        """
        if opponent.defections > len(opponent.history) / 10.0:
            return D
        return C


class ForgivingTitForTat(Player):
    """
    A player starts by cooperating however will defect if at any point, the
    opponent has defected more than 10 percent of the time, and their most
    recent decision was defect.

    Names:

    - Forgiving Tit For Tat: Original name by Thomas Campbell
    """

    name = "Forgiving Tit For Tat"
    classifier = {
        "memory_depth": float("inf"),  # Long memory
        "stochastic": False,
        "makes_use_of": set(),
        "long_run_time": False,
github Axelrod-Python / Axelrod / axelrod / strategies / grudger.py View on Github external
"long_run_time": False,
        "inspects_source": False,
        "manipulates_source": False,
        "manipulates_state": False,
    }

    @staticmethod
    def strategy(opponent: Player) -> Action:
        """Begins by playing C, then plays D for the remaining rounds if the
        opponent ever plays D."""
        if opponent.defections:
            return D
        return C


class ForgetfulGrudger(Player):
    """
    A player starts by cooperating however will defect if at any point the
    opponent has defected, but forgets after mem_length matches.

    Names:

    - Forgetful Grudger: Original name by Geraint Palmer
    """

    name = "Forgetful Grudger"
    classifier = {
        "memory_depth": 10,
        "stochastic": False,
        "makes_use_of": set(),
        "long_run_time": False,
        "inspects_source": False,
github Axelrod-Python / Axelrod / axelrod / strategies / resurrection.py View on Github external
from axelrod.action import Action
from axelrod.player import Player

C, D = Action.C, Action.D


class Resurrection(Player):
    """
    A player starts by cooperating and defects if the number of rounds
    played by the player is greater than five and the last five rounds
    are defections.

    Otherwise, the strategy plays like Tit-for-tat.

    Names:

    - Resurrection: [Eckhart2015]_
    """

    # These are various properties for the strategy
    name = "Resurrection"
    classifier = {
        "memory_depth": 5,
github Axelrod-Python / Axelrod / axelrod / strategies / grudger.py View on Github external
from axelrod.action import Action
from axelrod.player import Player

C, D = Action.C, Action.D


class Grudger(Player):
    """
    A player starts by cooperating however will defect if at any point the
    opponent has defected.

    This strategy came 7th in Axelrod's original tournament.

    Names:

    - Friedman's strategy: [Axelrod1980]_
    - Grudger: [Li2011]_
    - Grim: [Berg2015]_
    - Grim Trigger: [Banks1990]_
    - Spite: [Beaufils1997]_
    - Vengeful: [Ashlock2009]_
    """
github Axelrod-Python / Axelrod / axelrod / strategies / negation.py View on Github external
from axelrod.action import Action
from axelrod.player import Player
from axelrod.random_ import random_choice

C, D = Action.C, Action.D


class Negation(Player):
    """
    A player starts by cooperating or defecting randomly if it's their first move,
    then simply doing the opposite of the opponents last move thereafter.

    Names:

    - Negation: [PD2017]_
    """

    name = "Negation"
    classifier = {
        "memory_depth": 1,
        "stochastic": True,
        "makes_use_of": set(),
        "long_run_time": False,
        "inspects_source": False,
github Axelrod-Python / Axelrod / axelrod / strategies / prober.py View on Github external
}

    def strategy(self, opponent: Player) -> Action:
        turn = len(self.history)
        if turn == 0:
            return C
        if turn == 1:
            return D
        if opponent.defections > 1:
            return D
        if opponent.history[0:2] == [C, D]:
            return C
        return D


class Detective(Player):
    """
    Starts with C, D, C, C, or with the given sequence of actions.
    If the opponent defects at least once in the first fixed rounds,
    play as TFT forever, else defect forever.

    Names:

    - Detective: [NC2019]_
    """

    name = "Detective"
    classifier = {
        "memory_depth": float("inf"),
        "stochastic": False,
        "makes_use_of": set(),
        "long_run_time": False,
github Axelrod-Python / Axelrod / axelrod / deterministic_cache.py View on Github external
cache[key2] = result2
...
if some_key in cache:
    do_something(cache[some_key])
else:
    ...
"""

import pickle
from collections import UserDict
from typing import List, Tuple

from .action import Action
from .player import Player

CachePlayerKey = Tuple[Player, Player]
CacheKey = Tuple[str, str]


def _key_transform(key: CachePlayerKey) -> CacheKey:
    """Convert a CachePlayerKey to a CacheKey

    Parameters
    ----------
    key: tuple
        A 3-tuple: (player instance, player instance)
    """
    return key[0].name, key[1].name


def _is_valid_key(key: CachePlayerKey) -> bool:
    """Validate a deterministic cache player key.
github Axelrod-Python / Axelrod / axelrod / strategies / defector.py View on Github external
classifier = {
        "memory_depth": 0,
        "stochastic": False,
        "makes_use_of": set(),
        "long_run_time": False,
        "inspects_source": False,
        "manipulates_source": False,
        "manipulates_state": False,
    }

    @staticmethod
    def strategy(opponent: Player) -> Action:
        return D


class TrickyDefector(Player):
    """A defector that is trying to be tricky.

    Names:

    - Tricky Defector: Original name by Karol Langner
    """

    name = "Tricky Defector"
    classifier = {
        "memory_depth": float("inf"),  # Long memory
        "stochastic": False,
        "makes_use_of": set(),
        "long_run_time": False,
        "inspects_source": False,
        "manipulates_source": False,
        "manipulates_state": False,
github Axelrod-Python / Axelrod / axelrod / strategies / adaptor.py View on Github external
from typing import Dict, Tuple

from axelrod.action import Action
from axelrod.player import Player
from axelrod.random_ import random_choice

from numpy import heaviside

C, D = Action.C, Action.D


class AbstractAdaptor(Player):
    """
    An adaptive strategy that updates an internal state based on the last
    round of play. Using this state the player Cooperates with a probability
    derived from the state.

    s, float:
        the internal state, initially 0
    perr, float:
        an error threshold for misinterpreted moves
    delta, a dictionary of floats:
        additive update values for s depending on the last round's outcome

    Names:

    - Adaptor: [Hauert2002]_