In [ ]:
import numpy as np
import scipy as sp
import matplotlib as mpl
import matplotlib.pyplot as plt
import random
import matplotlib.patches as mpatches
import math
import copy
from enum import Enum
import re

Game definition


In [ ]:
def withChance(p):
    return random.random() < p

def flatten(l):
    return [item for sublist in l for item in sublist]

class Behavior(Enum):
    C = 1
    D = 0

class Player:
    def __init__(self, initial, strategy):
        self.behavior = [initial]
        self.strategies = [strategy]
        self.payoffs = []
        
    def current_behavior(self):
        return self.behavior[-1]
    
    def current_strategy(self):
        return self.strategies[-1]
    
    def set_current_behavior(self, b):
        self.behavior[-1] = b
        
    def set_current_strategy(self, s):
        self.strategies[-1] = s
        
    def add_behavior(self, b):
        self.behavior.append(b)

    def add_strategy(self, s):
        self.strategies.append(s)
        
    def is_cooperator(self):
        return self.current_behavior() == Behavior.C
    
    def give_payoff(self, c):
        self.payoffs.append(c)
        
class PublicGoodsGame:
    def __init__(self, **kwargs):
        self.__dict__.update(kwargs)
        self.players = []
        self.rounds = 0
        
        for _ in range(self.Z/self.N): # Z/N groups
            group = []
            for i in range(self.N): # N people per group
                initialBehavior = Behavior.C if withChance(self.initialC) else Behavior.D
                chance = random.random()
                strategy = None
                for strategyOdds in self.strategies:
                    if chance < strategyOdds[0]:
                        strategy = strategyOdds[1]
                        break
                    else:
                        chance -= strategyOdds[0]
                group.append(Player(initialBehavior, strategy))
                
            self.players.append(group)
            
        self.strategies = []
        for player in flatten(self.players):
            if player.current_strategy() not in self.strategies:
                self.strategies.append(player.current_strategy())
        
        cCount = float(sum([1 for player in flatten(self.players) if player.is_cooperator()]))
        
        #print "Initial C ratio is {}, aimed at {}".format(cCount/self.Z, self.initialC)

    def pick_random_player(self):
        return random.choice(random.choice(self.players))
    
    def is_first_round(self):
        return self.rounds == 0
    
    def behavior_fitness(self, target_behavior):
        payoffs = 0.
        amount = 0
        for group in self.players:
            for player in group:
                for behavior, payoff in zip(player.behavior, player.payoffs):
                    if behavior == target_behavior:
                        payoffs += payoff
                        amount += 1
        return payoffs / amount if amount != 0 else 0
    
    def strategy_fitness(self, target_strategy):
        payoffs = 0.
        amount = 0
        for group in self.players:
            for player in group:
                for strategy, payoff in zip(player.strategies, player.payoffs):
                    if strategy == target_strategy:
                        payoffs += payoff
                        amount += 1
        return payoffs / amount if amount != 0 else 0
    
    def possible_strategies(self):
        return self.strategies
    
    def determine_new_behaviors(self):
        # Fermi update rule
        random_player = self.pick_random_player()
        new_behavior = None
        
        if withChance(self.mu): # Change behavior with chance Mu
            strategies = self.possible_strategies()
            strategies = filter(lambda x: x != random_player.current_strategy(), strategies)
            if len(strategies):
                new_behavior = random.choice(strategies)
        elif not self.is_first_round(): # Imitate someone else according to their fitness
            #print 'imitating someone else'
            another_player = self.pick_random_player()
            if another_player.current_strategy() != random_player.current_strategy():
                # calculate the chance with which this player should change its behavior
                fsa = self.strategy_fitness(random_player.current_strategy())
                fsb = self.strategy_fitness(another_player.current_strategy())
                chance = (1. - self.mu) / (1 + (math.exp(self.beta * (fsa - fsb))))
                if withChance(chance):
                    new_behavior = another_player.current_strategy()
                    
        # Perpetuate everyone else's behavior
        for player in flatten(self.players):
            player.add_strategy(player.current_strategy())
            
        # Update that single player from before (Fermi)
        if new_behavior:
            random_player.set_current_strategy(new_behavior)
        
        # determine behaviors for this round
        if not self.is_first_round():
            new_behaviors = []
            for group in self.players:
                group_behaviors = [player.current_strategy()(player, self, group) for player in group]
                new_behaviors += group_behaviors
                    
            for behavior, player in zip(new_behaviors, flatten(self.players)):
                player.add_behavior(behavior)
            
        # Every player may deviate from their behavior with chance epsilon
        for player in flatten(self.players):
            if withChance(self.epsilon):
                player.set_current_behavior(Behavior.D if player.is_cooperator() else Behavior.C)
                
    def current_behaviors(self):
        return [player.current_behavior() for player in flatten(game.players)]
        
    def do_contributions(self):
        self.determine_new_behaviors()
        
        for group in self.players:
            k = sum([1 for player in group if player.is_cooperator()])  # contributor count
            defector_payoff = k * self.F * self.c / self.N
            contributor_payoff = defector_payoff = defector_payoff - self.c
            
            for player in group:
                player.give_payoff(contributor_payoff if player.is_cooperator() else defector_payoff)
        
        self.rounds += 1
            
    def contributor_fractions_throughout_time(self):
        behaviors = [player.behavior for player in flatten(self.players)]
        behaviors_per_round = zip(*behaviors)
        contributor_amounts = [sum([1 for behavior in round if behavior == Behavior.C]) for round in behaviors_per_round]
        contributor_fractions = map(lambda x: x / float(self.Z), contributor_amounts)
        return contributor_fractions
    
    def strategy_fractions_throughout_time(self, target_strategy):
        strategies = [player.strategies for player in flatten(self.players)]
        strategies_per_round = zip(*strategies)
        target_amounts = [sum([1 for strategy in round if strategy == target_strategy]) for round in strategies_per_round]
        target_fractions = map(lambda x: x / float(self.Z), target_amounts)
        return target_fractions
    
    def mean_payoff_throughout_time(self):
        payoffs = [player.payoffs for player in flatten(self.players)]
        payoffs_per_round = zip(*payoffs)
        mean_payoffs = map(lambda l: sum(l) / float(self.Z), payoffs_per_round)
        return mean_payoffs
    
    def do_game(self, w):
        rounds = 0
        while withChance(w):
            rounds += 1
            self.do_contributions()
        #print "{} rounds played".format(rounds)
        return rounds

Helper functions for later on


In [ ]:
def all_same(l):
    if len(l) == 0:
        return True
    first = l[0]
    for behavior in l[1:]:
        if behavior != first:
            return False
    return True

def allOrNone(player, game, group):
    last_round = [player.current_behavior() for player in group]
    if all_same(last_round):
        return Behavior.C # cooperate
    else:          # if not all same
        return Behavior.D # defect
    
AoN = allOrNone
    
def unequal_mean(lists, cutoff=0):
    means = []
    max_len = len(max(lists, key=lambda l: len(l)))
    for i in range(max_len):
        total = 0.
        current = 0
        for l in lists:
            if i < len(l):
                current += 1
                total += l[i]
        if current <= cutoff:
            return means
        means.append(total / current)
    return means

def cutoff_longer_ones(lists):
    min_len = len(min(lists, key=lambda l: len(l)))
    return [l[:min_len] for l in lists]

Base config, can be changed as suited


In [ ]:
config = {
    'Z': 100,
    'N': 10,
    'F': 8.,# resource multiplier
    'c': 2,    # amount to contribute
    'beta': 1, # intensity of selection
    'epsilon': 0.05,
    'mu': 0.001,
    'initialC': 0.5, # determines initial population behavior
    'strategies': [
        (1., AoN), # population consisting solely of allOrNone players
    ],
}

Main loop for basic configuration

Collects cooperation fraction and payoffs


In [ ]:
random.seed(4)
w = 0.96
simulations = 1000

contributors_per_sim = []
payoffs_per_sim = []
rounds_per_sim = []

for sim in xrange(simulations):
    game = PublicGoodsGame(**config)
    rounds = game.do_game(w)
    rounds_per_sim.append(rounds)
    contributor_fractions = game.contributor_fractions_throughout_time()
    contributors_per_sim.append(contributor_fractions)
    mean_payoffs = game.mean_payoff_throughout_time()
    payoffs_per_sim.append(mean_payoffs)
    
# Cutoff means that we only calculate the mean of a point if at least <cutoff> points have contributed to that mean
cutoff = simulations / 4.
mean_contributor_fractions = unequal_mean(contributors_per_sim, cutoff)
mean_payoffs = unequal_mean(payoffs_per_sim, cutoff)
max_rounds = max(rounds_per_sim)

Cooperation fraction plot


In [ ]:
plt.clf()  # Clear previous figure
xs = range(1, len(mean_contributor_fractions)+1)

plt.xlabel('round')
plt.ylabel('$\eta$')
title = '$\eta$ throughout time (AON, {} sims)'.format(simulations)
clean_title = 'cfraction_aon'
plt.title(title)

plt.plot(xs, mean_contributor_fractions)
plt.savefig('img/' + clean_title + '.png')

Mean payoff plot


In [ ]:
plt.clf()  # Clear previous figure
xs = range(1, len(mean_payoffs)+1)

plt.xlabel('round')
plt.ylabel('Mean payoff')
title = 'Mean payoff throughout time (AoN, {} sims)'.format(simulations)
clean_title = 'payoff_aon'
plt.title(title)

plt.plot(xs, mean_payoffs)
plt.savefig('img/' + clean_title + '.png')

Single variable exploration

The code below allows to iterate over different values for a specified parameter.


In [ ]:
unword = re.compile('[^\w]')
clean = lambda x: unword.sub('', x)

def explore(**params):
    random.seed(4)

    conf_name = params.get('conf_name')
    values = params.get('values')
    plotted_name = params.get('plotted_name')
    w = params.get('w', .96)
    simulations = params.get('simulations', 1000)
    to_label = params.get('to_label', lambda v: "{} $ = {}$".format(plotted_name, v))
    to_value = params.get('to_value', lambda i: i)
    clean_name = clean(plotted_name)
    strategy_name = params.get('strategy_name', 'AoN')
    override = params.get('conf_override', {})
    
    exp_config = copy.copy(config)
    for key in override.keys():
        exp_config[key] = override[key]
        
    target_strategy = exp_config['strategies'][0][1] # default to the first strategy supplied
        
    contrib_fractions_per_exp = []
    strategy_fractions_per_exp = []
    payoffs_per_exp = []

    for value in values:
        exp_config[conf_name] = value

        contributors_per_sim = []
        strategies_per_sim = []
        payoffs_per_sim = []
        rounds_per_sim = []

        for sim in xrange(simulations):
            game = PublicGoodsGame(**exp_config)
            rounds = game.do_game(w)
            rounds_per_sim.append(rounds)
            contributor_fractions = game.contributor_fractions_throughout_time()
            contributors_per_sim.append(contributor_fractions)
            mean_payoffs = game.mean_payoff_throughout_time()
            payoffs_per_sim.append(mean_payoffs)
            strategy_fractions = game.strategy_fractions_throughout_time(target_strategy)
            strategies_per_sim.append(strategy_fractions)

        # Cutoff means that we only calculate the mean of a point if at least <cutoff> points have contributed to that mean
        cutoff = simulations / 4.
        mean_contributor_fractions = unequal_mean(contributors_per_sim, cutoff)
        mean_payoffs = unequal_mean(payoffs_per_sim, cutoff)
        mean_strategies = unequal_mean(strategies_per_sim, cutoff)
        
        contrib_fractions_per_exp.append(mean_contributor_fractions)
        payoffs_per_exp.append(mean_payoffs)
        strategy_fractions_per_exp.append(mean_strategies)

    # cut off the longer ones... looks prettier
    contrib_fractions_per_exp = cutoff_longer_ones(contrib_fractions_per_exp)
    payoffs_per_exp = cutoff_longer_ones(payoffs_per_exp)
    strategy_fractions_per_exp = cutoff_longer_ones(strategy_fractions_per_exp)
    
    ### PLOT \eta
    plt.clf()  # Clear previous figure
    plt.xlabel('round')
    plt.ylabel('$\eta$')
    plt.ylim([0, 1.1])
    title = '$\eta$ for different {} ({}, {} sims)'.format(plotted_name, strategy_name, simulations)
    clean_title = 'cfraction_{}_{}'.format(clean_name, clean(strategy_name.lower()))
    plt.title(title)

    for i, value in enumerate(values):
        value = to_value(value)
        mean_contributor_fractions = contrib_fractions_per_exp[i]
        xs = range(len(mean_contributor_fractions))

        plt.plot(xs, mean_contributor_fractions, label=to_label(value))

    legend = plt.legend(bbox_to_anchor=(1, 1), loc='upper left')
    plt.savefig('img/' + clean_title + '.png', additional_artists=[legend], bbox_inches='tight')
    
    ### PLOT PAYOFF
    plt.clf()  # Clear previous figure

    plt.xlabel('round')
    plt.ylabel('mean payoff')
    if params.get('payoff_ylim'):
        plt.ylim(params.get('payoff_ylim'))
    title = 'Mean payoff for different {} ({}, {} sims)'.format(plotted_name, strategy_name, simulations)
    clean_title = 'meanpayoff_{}_{}'.format(clean_name, clean(strategy_name.lower()))
    plt.title(title)

    for i, value in enumerate(values):
        value = to_value(value)
        mean_payoffs = payoffs_per_exp[i]
        xs = range(len(mean_payoffs))

        plt.plot(xs, mean_payoffs, label=to_label(value))

    legend = plt.legend(bbox_to_anchor=(1, 1), loc='upper left')
    plt.savefig('img/' + clean_title + '.png', additional_artists=[legend], bbox_inches='tight')
    
    ### PLOT STRATEGY FRACTION
    plt.clf()  # Clear previous figure

    plt.xlabel('round')
    plt.ylabel('{} fraction'.format(target_strategy.__name__))
    title = '{} fraction for different {} ({}, {} sims)'.format(target_strategy.__name__,plotted_name, strategy_name, simulations)
    clean_title = 'strategyfrac_{}_{}'.format(clean_name, clean(strategy_name.lower()))
    plt.title(title)

    for i, value in enumerate(values):
        value = to_value(value)
        strategy_fractions = strategy_fractions_per_exp[i]
        xs = range(len(strategy_fractions))

        plt.plot(xs, strategy_fractions, label=to_label(value))

    legend = plt.legend(bbox_to_anchor=(1, 1), loc='upper left')
    plt.savefig('img/' + clean_title + '.png', additional_artists=[legend], bbox_inches='tight')

Epsilon, F and N exploration


In [ ]:
epsilon_exploration_params = {
    'conf_name': 'epsilon',
    'values': [0.001, 0.01, 0.1, 0.2],
    'plotted_name': '$\epsilon$',
}
explore(**epsilon_exploration_params)

In [ ]:
f_exploration_params = {
    'conf_name': 'F',
    'values': [1, 2, 4, 8],
    'plotted_name': '$F$',
}
explore(**f_exploration_params)

In [ ]:
n_exploration_params = {
    'conf_name': 'N',
    'values': [3, 5, 10],
    'plotted_name': '$N$',
}
explore(**n_exploration_params)

Introduction of different strategies

Both AllC and AllD will be introduced


In [ ]:
def allD(*params):
    return Behavior.D

def allC(*params):
    return Behavior.C

In [ ]:
alld_exploration_params = {
    'conf_name': 'strategies',
    'values': [[(p, AoN), ((1-p), allD)] for p in np.arange(0.1, 1, 0.1)],
    'plotted_name': 'AoN/AllD fractions',
    'to_value': lambda v: v[0][0],
    'to_label': lambda v: 'AON fraction = {}'.format(v),
}
explore(**alld_exploration_params)

In [ ]:
allc_exploration_params = {
    'conf_name': 'strategies',
    'values': [[(p, AoN), ((1-p), allC)] for p in np.arange(0.1, 1, 0.1)],
    'plotted_name': 'AoN/AllC fractions',
    'to_value': lambda v: v[0][0],
    'to_label': lambda v: 'AoN fraction = {}'.format(v),
}
explore(**allc_exploration_params)

Introduction of Not Quite AON (NQAON)


In [ ]:
def notQuiteAllOrNone(gamma):
    def NQAoN(player, game, group):
        last_round = [player.current_behavior() for player in group]
        group_size = float(len(group))
        cooperators = sum([1 for p in group if p.is_cooperator()])
        defectors = sum([1 for p in group if not p.is_cooperator()])
        #print gamma, cooperators, defectors, cooperators / group_size >= 1 - gamma or defectors / group_size >= 1 - gamma
        if cooperators / group_size >= 1 - gamma or defectors / group_size >= 1 - gamma:
            return Behavior.C
        else:
            return Behavior.D
        
    return NQAoN

In [ ]:
epsilon_exploration_params = {
    'conf_override': {
        'strategies': [(1., notQuiteAllOrNone(0.1)),],
    },
    'strategy_name': 'NQAoN ($\gamma = 0.1$)',
    'conf_name': 'epsilon',
    'values': [0.001, 0.01, 0.1, 0.2],
    'plotted_name': '$\epsilon$',
    'payoff_ylim': [0, 15],
    #'simulations': 3,
}
explore(**epsilon_exploration_params)

In [ ]:
epsilon_exploration_params = {
    'conf_override': {
        'strategies': [(1., notQuiteAllOrNone(0.2)),],
    },
    'strategy_name': 'NQAoN ($\gamma = 0.2$)',
    'conf_name': 'epsilon',
    'values': [0.001, 0.01, 0.1, 0.2],
    'plotted_name': '$\epsilon$',
    'payoff_ylim': [0, 15],
    #'simulations': 3,
}
explore(**epsilon_exploration_params)

In [ ]:
alld_exploration_params = {
    'strategy_name': 'NQAoN ($\gamma = 0.2$)',
    'conf_name': 'strategies',
    'values': [[(p, notQuiteAllOrNone(0.2)), ((1-p), allD)] for p in np.arange(0.1, 1, 0.1)],
    'plotted_name': 'NQAoN/AllD fractions',
    'to_value': lambda v: v[0][0],
    'to_label': lambda v: 'NQAoN fraction = {}'.format(v),
    'simulations': 1,
}
explore(**alld_exploration_params)

In [ ]:
allc_exploration_params = {
    'strategy_name': 'NQAoN ($\gamma = 0.2$)',
    'conf_name': 'strategies',
    'values': [[(p, notQuiteAllOrNone(0.2)), ((1-p), allC)] for p in np.arange(0.1, 1, 0.1)],
    'plotted_name': 'NQAoN/AllC fractions',
    'to_value': lambda v: v[0][0],
    'to_label': lambda v: 'NQAoN fraction = {}'.format(v),
}
explore(**allc_exploration_params)

In [ ]:


In [ ]: