In [11]:
% load_ext autoreload
% autoreload 2

import numpy as np
from random import *
from dmww_classes import *
from sampling_helper import *

seed(1) # for debugging

(1) Mutual Exclusivity

[a] Familiar word from corpus

Define model parameters


In [3]:
p = Params(n_samps=1,
           n_particles=1,
           alpha_r=.1,
           alpha_nr=10,
           empty_intent=.0001,
           n_hypermoves=5)

#(n_samps=1, #200 samples
         #  n_particles=1,
          # alpha_r=.1,
          # alpha_nr=1,
         #  empty_intent=.01,
         #  n_hypermoves=5)

Set up world and corpus.


In [4]:
corpusfile = 'corpora/corpus.csv'

w = World(corpus=corpusfile)
w.show()

c = Corpus(world=w, corpus=corpusfile)


n_objs = 23
n_words = 420

In [5]:
# BIRD = 22
# FEP = 23
# fep = 420

w.update(1, labels = ["fep"])
w.show()

c.sents.append([array([22,23]),array([420])])

c.update()


new object indices: 23
new word indices: 420
n_objs = 24
n_words = 421

Do inference with Gibbs sampler.


In [6]:
l = Lexicon(c, p,
            verbose=0,
            hyper_inf=True)

l.learn_lex_gibbs(c, p);




--- score full lex ---
[[ 0.  0.  0. ...,  0.  0.  0.]
 [ 0.  0.  0. ...,  0.  0.  0.]
 [ 0.  0.  0. ...,  1.  0.  0.]
 ..., 
 [ 0.  0.  0. ...,  0.  0.  0.]
 [ 1.  0.  0. ...,  0.  0.  0.]
 [ 0.  0.  0. ...,  0.  0.  0.]]
 [   5.    0.   20.    4.    1.   57.    1.    0.    6.    0.    2.    1.
    2.    1.   28.    4.    1.    1.    2.    3.    3.    1.    1.   24.
    2.    4.    4.    1.    6.    2.    0.   12.    0.    5.    5.    1.
    1.    1.    0.    2.    5.   16.    1.    0.    1.    0.    1.    2.
    2.    3.    2.    4.    7.    1.    3.   11.    8.    0.   12.    0.
    3.    2.    1.    8.    1.   26.    1.    2.    3.    5.   30.    0.
    2.    4.    2.    2.    2.    1.    1.    2.    0.    1.    1.    2.
   37.    0.    1.    2.    1.    0.    1.    1.    1.    0.    7.    2.
    2.    1.    1.    0.    0.    7.    2.    2.    1.    1.    2.    0.
    7.   19.    1.    2.    2.    2.    3.    0.    2.    1.    2.    1.
    1.    1.    5.    0.    2.   11.    3.    1.    0.    0.    3.    0.
   14.    0.    2.    2.    0.    4.    9.    5.    1.    4.    2.    8.
    1.    5.    2.   12.    2.   19.    1.    0.    3.    2.    2.    5.
    1.    2.    7.    2.    1.    1.    6.    3.    2.   51.    0.    6.
    1.    1.    2.    1.    1.    1.    1.    2.   19.    6.    1.    3.
    7.    1.    2.    1.    3.    2.    1.    1.    6.    1.    1.    1.
    6.    0.    5.    1.    1.    0.    1.    1.    1.    3.    1.   19.
    6.    2.   14.    0.    3.    1.   11.    1.    1.    2.    2.    1.
    1.    2.    1.    1.    3.    0.    1.    3.    4.    1.   23.   15.
    1.    6.    0.    7.    0.    1.    1.    2.    1.    5.   24.    0.
    1.    1.    1.    1.    0.    1.    1.    0.    1.    0.   13.    1.
    0.    0.    1.    1.    1.    0.    1.    1.    3.    3.    1.    6.
    1.    6.    0.    1.    6.    8.    5.    1.    3.    1.    0.    4.
   17.    0.    1.   30.   38.    1.    2.   22.    1.    3.    5.    1.
    0.    1.    2.    9.    1.    0.    1.    3.    0.    1.    0.    7.
    1.    2.   17.    2.    1.    1.    1.    1.    1.   92.    1.    5.
    1.    8.    3.    1.    1.    8.    2.    2.    7.    9.    0.   12.
    2.    5.   15.    1.    7.    0.    8.    6.    1.   21.    4.    0.
    0.    2.    1.    1.    2.    2.    2.    6.    5.    6.    2.    0.
    1.    9.    1.    8.    3.    8.    3.    2.    1.    1.    4.   11.
    4.   31.    1.   10.    1.    1.    1.    1.    2.    6.    1.    2.
    4.    5.   12.    4.    1.    5.   19.    1.    1.    1.    3.    3.
    0.    2.    1.    8.    1.    9.    1.    1.   12.    1.    3.    1.
    5.    6.    6.    1.    4.    8.    0.    1.    1.    1.    4.    1.
    1.    1.  118.    0.    1.    1.    3.    0.    4.    1.    0.    2.
    0.]
counts: 2468
    intent obj: [2 0 2 4 3 4 2 3 3 4 2 3 2 4 1 3 2 4 4 0 3 0 0 4 6 2 4 4 4 0 0 0 0 1 0 0 0
 0 0 0 0 0 0 0 0 1 0 1 1 1 0 0 1 1 0 0 0 0 0 0 0 0 0 0 1 1 1 0 1 1 1 1 0 0
 0 0 1 0 0 1 1 1 0 0 0 0 1 0 0 1 0 1 0 0 1 0 1 0 0 1 0 0 0 1 1 1 1 1 0 0 0
 1 0 1 1 1 0 0 0 0 0 0 1 0 1 0 0 0 0 1 1 1 1 0 0 1 0 1 0 1 0 0 0 0 0 0 0 0
 0 0 0 0 1 0 0 0 0 0 0 0 2 1 2 2 1 1 0 2 0 0 0 1 2 2 1 2 2 2 1 0 2 1 2 1 2
 1 0 1 1 0 1 2 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 1 0 1 1 0 1 2 0 0 0 0 0 0 0
 0 0 0 0 1 0 0 0 0 0 0 2 1 1 1 1 1 1 1 2 2 0 1 2 1 0 0 1 0 0 0 0 0 0 1 0 2
 1 0 1 0 0 0 0 1 2 2 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 1 1 3 3 3 2 2 3 2
 2 1 3 2 2 1 0 0 1 0 1 3 3 3 2 5 4 4 4 0 3 3 4 2 1 0 0 0 0 0 0 0 0 0 0 1 0
 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 1 0 0 0 0 0 2 0 0 0 0 1 1 0 0 1 0 0
 0 0 1 0 0 0 0 0 0 0 1 1 0 0 0 1 0 1 0 0 1 0 1 0 1 1 0 0 1 1 1 1 2 2 1 1 2
 1 1 1 0 0 0 1 1 2 1 1 2 1 1 1 1 1 1 1 0 2 2 0 0 0 0 1 1 1 1 0 1 0 0 0 0 0
 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0
 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 1 1 1 0 0 0 0 0 0 1 0 0 0 1 0 0 1 0 0 1
 0 0 1 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1]
    ref word: [ 3  4 15  1  4  0  1  3  1  2  1  3  0  4  0  0  1  2  4  2  0  0  1  0  2
  1  0  0  0  3  0  1  1  4  3  5  5  5  1  3  0  3  5  4  2  2  5  2  0  2
  0  0  1  0  0  1  0  4  3  0  3  3  0  2  3  3  0  0  1  1  1  0  1  1  0
  4  0  2  0  1  0  2  0  0  0 10  0  6  2  1  1  3  2  2  0  4  0  0  0  0
  2  1  1 10  8  0  1  2  0  2  0  0  2  3  1  0  1  0  3  0  0  0  0  0  0
  2  1  0  3  1  0  2  0  0  0  0  0  0  0  4  0  0  3  0  0  0  1  0  0  0
  0  0  0  0  0  0  5  0  0  3  3  3  0  0  0  2  2  0  0  0  1  6  9  1  1
  0  0  0  0  1  0  0  2  1  1  1  8  4  0  0  6  0  0  0  1  0  4  0  3  1
  0  4  0  1  4  3  0  1  2  0  0  1  1  0  0  0  2  0  1  3  6  0  0  0  0
  0  1  0  6  0  0  2  3  0  0  0  4  3  1  0  1  0  0  1  1  4  6  0  0  2
  0  0  0  0  1  7  0  0  0  4  2  6  5  4  0  1  2  3  2  0  4  2  5  1  1
  3  0  1  0  1  1  0  4  0  2  0  0  2  0  0  0 16  1  2  1  0  0 11  0  4
  3  2  1  2  1  0  0  1  3  3  1  2  2 10  2  0  0  0  0  0  0  3  0  1  1
  0  2  1  2  3  4  1  3  3  2  0  3  3  1  0  2  2  1  0  2  3  2  0  3  3
  1  1  0  0  2  1  0  0  2  0  0  4  3  2  0  5  2  2  0  0  2  2  2  0  0
  2  2  0  0  4  4  0  1  5  0  3  2  0  1  2  2  0  0  0  0  2  1  3  6  0
  0  0  1  2  3  0  0  5  0  5  0  1  2  0  0  0  1  1  1  3  2  0  2  2  0
  0  7  3  1  2  2  0  0  0  0  0  2  2  5  1  0  0  0  6  1  3  0  0  0  3
  0  1  1  1  4  0  8  2  4  2  4  4  0  2  0  0  1  0  4  6  1  0  4  3  4
  3  1  0  4  1  1  0  3  2  2  2  0  0  0  0  0  2  0  0  4  0  1  0  4  4
  0  2  2  2  1  2  0  1  2  2  6  0  4  2  0  2  0  0  2  2  0  1  0  3  0
  1  4  4  1  3  3  3  0  0  1  4  4 12  6  5  0  0  0  2  0  0  0  1  0  5
  3  2  2  1  1  0  1  5  0  1  0  0  0  1  2  1  0  8  1  0  0  1  0  0  0
  0  0  1  5  3  3  0  3  2  0  3  3  0  0  1  2  0  2 11  0  3  2  1  1  3
  3  3  3  1  5  0  0  0  5  3  4  0  0  1  0  0  0 11  1  0  0  1  1  1  0]
    intent obj prob: [-1.4 -1.4 -1.6 -1.6 -1.6 -1.6 -1.6 -1.6 -1.6 -1.6 -1.6 -1.6 -1.6 -1.6 -1.6
 -1.6 -1.6 -1.6 -1.6 -1.6 -1.6 -1.6 -1.6 -1.9 -1.9 -1.9 -1.9 -1.9 -1.9 -0.7
 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7
 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.  -0.  -0.  -0.  -0.  -0.
 -0.  -0.  -0.  -0.  -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7
 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.  -0.  -0.  -0.7 -0.7 -0.7 -0.7 -0.7
 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7
 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7
 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7
 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.
 -0.  -0.  -9.2 -0.  -0.  -0.  -0.  -0.  -0.  -0.  -1.1 -1.1 -1.1 -1.1 -1.1
 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1
 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -0.7 -0.7 -0.7
 -0.7 -9.2 -9.2 -9.2 -9.2 -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.
 -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.
 -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -9.2 -0.  -0.  -0.  -0.  -0.  -0.
 -0.  -0.  -0.  -0.  -0.  -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -9.2 -0.  -0.  -0.
 -0.  -0.  -0.  -0.  -0.  -0.  -0.7 -0.7 -0.7 -0.  -0.  -0.  -0.7 -0.7 -1.1
 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1
 -0.  -9.2 -0.  -0.  -0.  -0.  -0.  -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1
 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.
 -0.  -0.  -0.  -0.  -0.  -0.  -0.7 -1.1 -1.1 -1.1 -1.4 -1.4 -1.4 -1.4 -1.4
 -1.4 -1.4 -1.4 -1.4 -1.4 -1.4 -1.4 -1.4 -1.4 -1.4 -1.4 -1.4 -1.4 -1.4 -1.4
 -1.4 -1.4 -1.4 -1.8 -1.8 -1.8 -1.8 -1.8 -1.8 -1.8 -1.8 -1.8 -0.7 -0.  -0.
 -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -9.2 -0.  -0.  -0.  -0.  -0.  -0.
 -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -9.2 -0.
 -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.
 -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.
 -0.  -9.2 -0.  -0.  -0.  -0.  -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -9.2 -0.7 -0.7
 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7
 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7
 -0.7 -1.1 -1.1 -1.4 -1.1 -1.1 -1.1 -0.7 -0.7 -0.7 -0.7 -1.1 -1.1 -1.1 -1.1
 -1.1 -1.1 -1.1 -1.1 -0.7 -0.7 -0.7 -0.7 -0.7 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1
 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7
 -0.7 -0.7 -0.7 -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.
 -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.
 -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -9.2 -0.  -0.  -0.  -0.  -0.  -0.
 -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.7 -0.7 -0.7 -0.7
 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7
 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.
 -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.
 -0.  -0.  -0.  -0.  -0.  -0.  -0.7 -0.7 -0.7 -0.7]
full score: r -3405.8, nr -10642.7, i -466.6, p -6.2,  total: -14521.2

 *** average sample time: 5.428 sec

Plot.


In [7]:
l.plot_lex(w, certainwords = 0)



In [8]:
correct = l.ref[23, 420] #FEP, fep
incorrect = l.ref[22, 420] #BIRD, fep
known_word = l.ref[22, 8] #BIRD, bigbird
lc = divide(correct, correct + incorrect)
known_word, correct, incorrect, lc


-c:4: RuntimeWarning: invalid value encountered in divide
Out[8]:
(21.0, 0.0, 0.0, nan)

Do inference with particle filter.


In [9]:
l = Lexicon(c, p,
            verbose=0,
            hyper_inf=False)

l.learn_lex_pf(c, p, resample=False)
#l.output_lex_pf(c, p)



...............................................................................

...............................................................................

...............................................................................

...............................................................................

...............................................................................

...............................................................................

...............................................................................

................................................................

In [10]:
#l.plot_lex(w, certainwords = 0)
#w.words_dict

Luce choice in 2AFC novel-familiar task


In [11]:
correct = l.ref[23, 420]
incorrect = l.ref[22, 420]
known_word = l.ref[22, 8]
lc = divide(correct, correct + incorrect)
known_word, correct, incorrect, lc


Out[11]:
(0.0, 0.0, 0.0, nan)

[b] Familiar word learned ("novel-novel" ME)

Define model parameters.


In [12]:
p = Params(n_samps=1,
           n_particles=1,
           alpha_r=.1,
           alpha_nr=10,
           empty_intent=.0001,
           n_hypermoves=5)

Set up world and corpus.


In [13]:
corpusfile = 'corpora/corpus.csv'

w = World(corpus=corpusfile)
w.show()

c = Corpus(world=w,corpus=corpusfile)


n_objs = 23
n_words = 420

In [14]:
# FEP = 23
# fep = 420
# TOMA = 24
# toma = 421

w.update(2, labels = ["fep", "toma"])
w.show()

c.sents.append([array([23]),array([420])])
c.sents.append([array([23,24]),array([421])])

c.update()


new object indices: 23, 24
new word indices: 420, 421
n_objs = 25
n_words = 422

Do inference with Gibbs sampler.


In [15]:
l = Lexicon(c, p,
            verbose=0,
            hyper_inf=True)

l.learn_lex_gibbs(c, p);




--- score full lex ---
[[ 0.  0.  4. ...,  0.  0.  0.]
 [ 0.  0.  0. ...,  0.  0.  0.]
 [ 0.  0.  0. ...,  0.  0.  0.]
 ..., 
 [ 0.  0.  0. ...,  0.  0.  0.]
 [ 0.  0.  0. ...,  0.  0.  0.]
 [ 0.  0.  0. ...,  0.  0.  0.]]
 [   6.    5.   13.    4.    0.   48.    0.    0.    8.    3.    2.    1.
    2.    1.   16.    4.    1.    1.    2.    3.    3.    1.    6.   24.
    0.    6.    4.    1.    6.    0.    0.    9.    1.    5.    5.    1.
    1.    1.    0.    1.    5.   16.    1.    1.    1.    2.    1.    2.
    2.    3.    4.    4.    4.    1.    3.   15.    8.    1.   13.    1.
    0.    2.    0.    9.    1.   28.    0.    2.    3.    5.   32.    1.
    2.    5.    2.    5.    2.    1.    1.    3.    0.    1.    0.    0.
   62.    1.    1.    2.    1.    5.    1.    0.    1.    0.    7.    2.
    2.    1.    0.    1.    1.    7.    2.    0.    1.    0.    2.    1.
    2.   12.    1.    2.    3.    1.    3.    0.    2.    1.    2.    1.
    1.    1.    5.    0.    2.   11.    3.    0.    3.    0.    3.    0.
   14.    0.    5.    1.    2.    4.    9.    1.    1.    4.    2.    7.
    1.    4.    2.   14.    2.   13.    1.    1.    2.    8.    2.    0.
    1.    2.    6.    2.    1.    1.    5.    3.    2.   52.    0.    4.
    1.    1.    2.    1.    1.    1.    1.    0.   12.    5.    1.    0.
    5.    1.    2.    1.    3.    2.    0.    1.    3.    2.    1.    0.
    6.    2.    5.    1.    1.    1.    0.    1.    1.    3.    1.   19.
    3.    2.   18.    3.    3.    1.   14.    1.    3.    2.    2.    1.
    3.    1.    0.    0.    5.    0.    1.    3.    5.    0.   26.   21.
    1.    7.    0.    7.    2.    1.    1.    2.    1.    7.   22.    1.
    1.    3.    1.    1.    1.    5.    1.    1.    1.    0.   13.    1.
    0.    0.    1.    1.    5.    0.    1.    2.    3.    3.    1.    2.
    1.    3.    0.    0.    6.    0.    4.    1.    2.    1.    2.    4.
   18.    0.    1.   31.   37.    1.    1.   20.    1.    3.    4.    1.
    1.    1.    2.   11.    1.    0.    1.    3.    1.    1.    0.    5.
    1.    0.   21.    2.    0.    1.    4.    1.    3.   93.    1.    5.
    1.    9.    0.    0.    1.   12.    2.    2.    7.    6.    2.   10.
    2.    3.    8.    1.    7.    0.   11.    4.    1.   21.    4.    3.
    1.    2.    1.    0.    1.    2.    0.    6.    3.    6.    2.    1.
    1.    5.    1.    8.    3.    6.    3.    2.    1.    1.    4.    5.
    3.   25.    3.   13.    1.    1.    1.    1.   14.    7.    1.    2.
    4.    5.   12.    4.    1.    5.   14.    1.    1.    1.    3.    3.
    2.    2.    1.   11.    3.    9.    1.    1.   12.    1.    2.    1.
    5.    1.    8.    1.    4.    7.    0.    1.    2.    1.    4.    1.
    1.    1.  103.    1.    0.    1.    3.    2.    4.    1.    5.    2.
    0.    0.]
counts: 2467
    intent obj: [1 0 3 0 1 1 1 3 3 1 0 0 0 0 3 1 3 3 3 1 3 2 4 0 6 1 0 5 0 1 0 0 0 1 0 1 0
 1 1 1 1 1 0 1 0 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 1 0
 1 0 0 1 1 0 1 0 0 0 0 0 0 1 1 1 1 1 0 0 1 0 0 0 1 1 0 0 1 1 0 1 1 1 0 1 0
 0 0 0 0 1 0 0 1 0 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 1 1 0 1 0 0 0 0 0 0 0 0 0
 0 0 0 0 0 0 0 0 0 0 0 0 0 1 2 2 2 1 1 2 0 1 1 1 1 1 2 1 0 0 1 0 1 1 0 0 1
 0 2 1 0 2 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 0 1 1 0 0 0 0 0 0 0
 0 0 1 0 0 0 0 0 1 0 0 0 2 0 0 1 1 0 2 2 2 1 1 1 1 2 0 0 0 0 0 0 0 2 2 2 2
 0 1 2 2 1 2 2 1 1 1 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 0 0 0 3 3
 3 0 3 0 3 1 2 2 2 0 1 2 2 0 3 1 2 0 3 0 1 5 4 1 1 0 0 0 0 0 0 0 0 0 0 0 0
 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 1 1 1 0 0 0 0 1 1 0 0
 1 0 1 0 0 1 1 0 1 0 0 0 0 1 0 0 1 1 1 1 1 1 2 2 1 1 1 2 0 0 0 0 0 1 1 2 1
 1 0 2 1 0 0 1 1 1 1 1 1 2 2 0 1 1 1 0 0 2 0 0 0 0 0 1 0 0 0 0 1 0 0 0 0 0
 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 1 0 0
 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0]
    ref word: [ 0  2  9  0  1  0  0  1  1  0  2  0  1  4  0  0  2  3  3  0  6  0  2  0  5
  0  0  3  0  2  0  1  1  2  3  6  0  6  1  4  0  5  0  5  3  4  3  2  0  2
  0  5  1  0  0  4  0  4  1  0  3  3  0  2  1  1  0  1  0  0  0  0  1  0  0
  2  0  2  0  1  0  1  0  0  1  7  0  4  0  0  0  2  3  3  0  4  0  0  0  0
  0  1  4  3  5  4  0  8  0  5  3  0  4  4  1  0  3  0  1  0  7  0  1  9  0
  4  3  0  0  0  0  3  1  0  0  1  1  0  4 11  0  0  2  0  0  0  1  1  1  0
  0  0  0  0  0  0  2  0  0  3  0  0  0  0  0  3  3  0  0  0  3  5  9  1  1
  0  3  3  0  1  0  1  0  3  0  0  8  4  0  0  5  3  0  1  0  0  9  0  0  0
  0  4  0  1  1  0  0  0  2  1  2  2  0  0  0  0  4  0  1  3  6  1  0  0  0
  3  2  0 15  0  0  2  3  0  0  0  4  3  2  0  0  0  0  1  1  7  4  4  0  4
  0  0  1  1  1  2  0  0  0  0  1  4  3  4  0  1  0  1  1  0  4  2  0  0  1
  3  0  1  0  1  7  2  1  0  5  1  0  1  0  3  0 13  4  0  3  0  8  8  0  3
  1  1  2  3  1  0  0  4  6  0  0  2  2  0  4  0  0  1  1  0  1  0  0  0  0
  0  2  4  1  2  0  1  2  2  2  0  1  2  1  1  0  0  4  0  2  1  1  0  2  1
  1  2  0  0  1  3  0  0  2  0  1  5  4  3  0  6  3  4  0  0  2  1  3  0  2
  1  1  0  0  3  0  0  0  4  0  3  2  4  0  2  2  0  0  0  0  2  1  1  2  0
  1  0  1  0  1  0  0  3  0  3  0  1  2  0  0  0  3  1  0  0  1  0  2  2  0
  0  5  3  2  3  3  0  0  0  0  0  7  2  3  1  0  0  4  0  2  2  3  1  0  2
  2  1  1  1  4  0  4  2  6  1  5  1  0  1  0  2  2  1  7  3  4  2  0  0  0
  0  2  1  5  3  0  0  0  1  0  2  2  0  0  0  0  1  0  1  2  2  1  0  2  2
  0  2  3  2  1  2  0  0  1  0  3  0  6  4  6  3  0  0  1  1  0  4  0  0  1
  0  3  3  0  4  4  4  0  0  0  1  4 10  1  1  0  1  0  2  1  0  0  1  0  2
  0  0  2  2  1  0 10  5  1  6  0  0  0  2  2  2  0  8  2  0  0  2  0  0  0
  0  0  2  5  1  1  0  1  0  0  1  1  0  0  1  1  1  1  4  0  1  2  1  0  0
  2  4  0  3  6  0  0  0  5  0  1  2  0  1  2  0  0 11  6  0  0  2  2  3  0]
    intent obj prob: [-1.4 -1.4 -1.6 -1.6 -1.6 -1.6 -1.6 -1.6 -1.6 -1.6 -1.6 -1.6 -1.6 -1.6 -1.6
 -1.6 -1.6 -1.6 -1.6 -1.6 -1.6 -1.6 -1.6 -1.9 -1.9 -1.9 -1.9 -1.9 -1.9 -0.7
 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7
 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.  -0.  -0.  -0.  -0.  -0.
 -0.  -0.  -0.  -0.  -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7
 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.  -0.  -0.  -0.7 -0.7 -0.7 -0.7 -0.7
 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7
 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7
 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7
 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.
 -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -1.1 -1.1 -1.1 -1.1 -1.1
 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1
 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -0.7 -0.7 -0.7
 -0.7 -9.2 -9.2 -9.2 -9.2 -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.
 -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.
 -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.
 -0.  -0.  -0.  -0.  -0.  -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.  -0.  -0.
 -0.  -0.  -0.  -0.  -0.  -0.  -0.7 -0.7 -0.7 -0.  -0.  -0.  -0.7 -0.7 -1.1
 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1
 -0.  -0.  -0.  -0.  -0.  -0.  -0.  -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1
 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.
 -0.  -0.  -0.  -0.  -0.  -0.  -0.7 -1.1 -1.1 -1.1 -1.4 -1.4 -1.4 -1.4 -1.4
 -1.4 -1.4 -1.4 -1.4 -1.4 -1.4 -1.4 -1.4 -1.4 -1.4 -1.4 -1.4 -1.4 -1.4 -1.4
 -1.4 -1.4 -1.4 -1.8 -1.8 -1.8 -1.8 -1.8 -1.8 -1.8 -1.8 -1.8 -0.7 -0.  -0.
 -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.
 -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -9.2 -0.
 -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.
 -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.
 -0.  -0.  -0.  -0.  -0.  -0.  -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7
 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7
 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7
 -0.7 -1.1 -1.1 -1.4 -1.1 -1.1 -1.1 -0.7 -0.7 -0.7 -0.7 -1.1 -1.1 -1.1 -1.1
 -1.1 -1.1 -1.1 -1.1 -0.7 -0.7 -0.7 -0.7 -0.7 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1
 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7
 -0.7 -0.7 -0.7 -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.
 -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.
 -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.
 -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.7 -0.7 -0.7 -0.7
 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7
 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.7 -0.
 -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.  -0.
 -0.  -0.  -0.  -0.  -0.  -0.  -0.7 -0.7 -0.7 -0.7]
full score: r -3444.1, nr -10376.9, i -394.3, p -20.5,  total: -14235.8

 *** average sample time: 5.267 sec

Plot.

Luce choice in 2AFC novel-novel task


In [16]:
correct = l.ref[24, 421]
incorrect = l.ref[23, 421]

lc = divide(correct, correct + incorrect)
correct,incorrect,lc


Out[16]:
(0.0, 0.0, nan)

(2) Yu and Smith (2007): X-Sit Sim

Make corpus.


In [17]:
# set parameters 
condition = 3
num_words = 18
num_occurrences = 6
total_words = num_words * num_occurrences
num_trials = total_words / condition
items = np.ones(num_words) * num_occurrences

# make empty corpus
yucorpus =  list()
for i in range(num_trials):
    yucorpus.append([np.zeros(condition, dtype=int8), 
                     np.zeros(condition, dtype=int8)])

# generate actual corpus
try:
    for i in range(num_trials): # for each trial
        for j in range(condition): # for each word/object pairing in the trial
            item_probs = np.divide(items, total_words)
            
            yucorpus[i][0][j] = where(np.random.multinomial(1, item_probs) == 1)[0][0]
            
            # avoid repeats
            c = 1
            while sum(yucorpus[i][0][j] == yucorpus[i][0]) > 1:
              yucorpus[i][0][j] = where(np.random.multinomial(1, item_probs) == 1)[0][0]
              c = c + 1;
              if c > 1000:
                    break 
            
            yucorpus[i][1][j] = yucorpus[i][0][j]
         
            # decrement the item counts
            items[yucorpus[i][0][j]] = items[yucorpus[i][0][j]]  - 1;
            total_words = total_words - 1;
except ValueError:
      print 'failed to generate corpus, run again!'

Set up world and corpus.


In [26]:
w = World(n_words=18, n_objs=18)

c = Corpus(w,
           n_per_sent=condition,
           n_sents=num_trials)        
c.sents = yucorpus
c.update()

Define model parameters.


In [27]:
p = Params(n_samps=200,
           n_particles=1,
           alpha_r=.1,
           alpha_nr=1,
           empty_intent=.0001,
           n_hypermoves=5)

Do inference with Gibbs sampler.


In [57]:
l = Lexicon(c, p,
            verbose=0,
            hyper_inf=True)

l.learn_lex_gibbs(c, p)



...............................................................................

...............................................................................

.......................................
--- score full lex ---
[[ 4.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.]
 [ 0.  3.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.]
 [ 0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.]
 [ 0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.]
 [ 0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.]
 [ 0.  0.  0.  0.  0.  6.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.]
 [ 0.  0.  0.  0.  0.  0.  0.  1.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.]
 [ 0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.]
 [ 0.  0.  0.  0.  0.  0.  1.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.]
 [ 0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.]
 [ 0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  4.  0.  0.  0.  0.  0.  0.  0.]
 [ 0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.]
 [ 0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  3.  0.  0.  0.  0.  0.]
 [ 0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  5.  0.  0.  0.  0.]
 [ 0.  0.  0.  0.  0.  0.  0.  0.  1.  0.  0.  0.  0.  0.  0.  0.  0.  0.]
 [ 0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.]
 [ 0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  2.  0.]
 [ 0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  6.]]
 [ 2.  3.  6.  6.  6.  0.  5.  5.  5.  6.  2.  6.  3.  1.  6.  6.  4.  0.]
counts: 108
    intent obj: [2 0 0 0 1 0 2 2 0 2 2 1 2 1 2 0 1 2 2 1 2 2 1 2 0 2 0 2 2 0 0 1 0 2 0 2]
    ref word: [2 0 0 0 0 0 2 2 0 2 2 1 0 1 2 0 1 2 2 1 2 2 1 2 0 2 0 2 2 0 0 1 2 2 0 2]
    intent obj prob: [-1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1
 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1
 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1]
full score: r -33.1, nr -215.1, i -39.6, p -5.7,  total: -293.4

 *** average sample time: 0.098 sec

Plot.


In [58]:
l.plot_lex(w, certainwords = 0)


Evaluate posterior using luce choice. At test, learners were given 4AFC task (1 correct referent and 3 foils).


In [60]:
epsilon = .00001
l.ref = l.ref + epsilon  
num_foils = 3
lc = zeros(num_words)
correct = zeros(num_words)
incorrect = zeros(num_words)
for i in range(num_words):
    foils = sample(xrange(num_words),num_foils)
    while i in foils:
        foils = sample(xrange(num_words),num_foils)
        
    correct[i] = l.ref[i,i]
    incorrect[i] = l.ref[foils[0],i] + l.ref[foils[1],0] + l.ref[foils[2],0] 
    lc[i] = divide(correct[i], correct[i] + incorrect[i])
    
choice_score = sum(lc)/num_words
choice_score


Out[60]:
0.54166030571945811

Now, do with particle filter


In [63]:
p = Params(n_samps=1,
           n_particles=10,
           alpha_r=.1,
           alpha_nr=10,
           empty_intent=.0001,
           n_hypermoves=10)

l = Lexicon(c, p,
            verbose=0,
            hyper_inf=True)

l.learn_lex_pf(c, p, resample=False);
l.output_lex_pf(c, p)
l.plot_lex(w, certainwords = 0)
l.show()



...................................
**** BEST PARTICLE ****

--- score full lex ---
[[ 3.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.]
 [ 0.  5.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.]
 [ 0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  1.  0.  0.  0.  0.  0.  0.  0.]
 [ 0.  0.  0.  1.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.]
 [ 0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  1.  0.  0.  1.  0.  0.  0.  0.]
 [ 0.  0.  0.  0.  0.  4.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.]
 [ 0.  0.  0.  0.  0.  0.  0.  1.  0.  0.  1.  0.  0.  0.  0.  0.  0.  0.]
 [ 0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.]
 [ 0.  0.  0.  0.  0.  2.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.]
 [ 0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  1.  0.  0.  0.  0.]
 [ 0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.]
 [ 0.  0.  0.  0.  0.  0.  1.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.]
 [ 0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  6.  0.  0.  0.  0.  0.]
 [ 0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  1.  0.  0.  0.]
 [ 0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  3.  0.  0.  0.]
 [ 0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  1.  0.  0.  0.  0.  0.  0.  0.]
 [ 0.  0.  0.  0.  1.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.]
 [ 0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  2.]]
 [ 3.  1.  6.  5.  5.  0.  5.  5.  6.  6.  2.  6.  0.  4.  2.  6.  6.  4.]
counts: 108
    intent obj: [0, 0, 1, 0, 1, 1, 0, 2, 1, 2, 2, 0, 1, 2, 0, 1, 1, 1, 2, 1, 2, 0, 2, 2, 2, 1, 2, 2, 0, 0, 0, 1, 0, 2, 2, 1]
    ref word: [2, 0, 0, 2, 1, 2, 0, 1, 1, 2, 2, 1, 0, 1, 0, 1, 1, 2, 2, 1, 2, 2, 1, 2, 2, 1, 2, 2, 0, 0, 0, 1, 2, 2, 2, 1]
    intent obj prob: [-1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1
 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1
 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1]
full score: r -57.8, nr -215.2, i -39.6, p -6.2,  total: -318.8

**** GRAND MEAN ****
[[ 1.8  0.   0.   0.   0.   0.1  0.1  0.   0.   0.1  0.   0.1  0.   0.   0.
   0.   0.   0. ]
 [ 0.   3.2  0.   0.1  0.   0.   0.   0.   0.   0.1  0.   0.   0.   0.   0.
   0.   0.   0.1]
 [ 0.   0.   1.1  0.   0.   0.1  0.   0.   0.   0.1  0.3  0.   0.1  0.   0.
   0.   0.   0. ]
 [ 0.2  0.1  0.   1.2  0.   0.   0.   0.   0.   0.2  0.   0.   0.   0.   0.
   0.   0.   0. ]
 [ 0.   0.   0.   0.   1.6  0.   0.   0.   0.   0.2  0.1  0.   0.   0.3  0.
   0.   0.2  0. ]
 [ 0.   0.   0.   0.   0.   1.7  0.   0.2  0.2  0.1  0.   0.1  0.   0.   0.
   0.   0.   0. ]
 [ 0.   0.   0.   0.   0.   0.   0.1  0.3  0.3  0.   0.5  0.1  0.   0.   0.
   0.1  0.   0. ]
 [ 0.   0.   0.   0.   0.   0.   0.   1.8  0.   0.   0.1  0.   0.   0.   0.
   0.1  0.   0. ]
 [ 0.   0.   0.   0.   0.1  0.8  0.   0.   0.7  0.   0.   0.   0.1  0.3
   0.1  0.   0.   0. ]
 [ 0.   0.   0.   0.3  0.1  0.1  0.   0.   0.   0.6  0.   0.   0.   0.2
   0.1  0.   0.   0. ]
 [ 0.   0.   0.   0.   0.2  0.   0.1  0.1  0.   0.1  0.5  0.   0.   0.   0.
   0.1  0.   0. ]
 [ 0.   0.   0.   0.   0.   0.1  0.1  0.   0.1  0.   0.   1.4  0.   0.   0.1
   0.   0.2  0.1]
 [ 0.   0.   0.   0.2  0.   0.   0.   0.1  0.1  0.   0.3  0.   1.4  0.   0.
   0.1  0.   0.1]
 [ 0.   0.   0.   0.   0.2  0.   0.   0.   0.2  0.1  0.   0.   0.2  1.1
   0.1  0.   0.1  0. ]
 [ 0.   0.   0.4  0.   0.   0.   0.   0.   0.2  0.   0.   0.1  0.   0.1
   0.9  0.1  0.   0. ]
 [ 0.   0.2  0.   0.   0.   0.1  0.   0.1  0.   0.   0.5  0.   0.   0.   0.
   0.5  0.   0. ]
 [ 0.   0.   0.   0.   0.4  0.   0.   0.   0.   0.   0.   0.3  0.   0.   0.
   0.2  0.9  0. ]
 [ 0.   0.   0.   0.   0.   0.   0.   0.1  0.   0.   0.   0.   0.   0.   0.2
   0.1  0.2  2.2]]
[[ 1.8  0.   0.   0.   0.   0.1  0.1  0.   0.   0.1  0.   0.1  0.   0.   0.
   0.   0.   0. ]
 [ 0.   3.2  0.   0.1  0.   0.   0.   0.   0.   0.1  0.   0.   0.   0.   0.
   0.   0.   0.1]
 [ 0.   0.   1.1  0.   0.   0.1  0.   0.   0.   0.1  0.3  0.   0.1  0.   0.
   0.   0.   0. ]
 [ 0.2  0.1  0.   1.2  0.   0.   0.   0.   0.   0.2  0.   0.   0.   0.   0.
   0.   0.   0. ]
 [ 0.   0.   0.   0.   1.6  0.   0.   0.   0.   0.2  0.1  0.   0.   0.3  0.
   0.   0.2  0. ]
 [ 0.   0.   0.   0.   0.   1.7  0.   0.2  0.2  0.1  0.   0.1  0.   0.   0.
   0.   0.   0. ]
 [ 0.   0.   0.   0.   0.   0.   0.1  0.3  0.3  0.   0.5  0.1  0.   0.   0.
   0.1  0.   0. ]
 [ 0.   0.   0.   0.   0.   0.   0.   1.8  0.   0.   0.1  0.   0.   0.   0.
   0.1  0.   0. ]
 [ 0.   0.   0.   0.   0.1  0.8  0.   0.   0.7  0.   0.   0.   0.1  0.3
   0.1  0.   0.   0. ]
 [ 0.   0.   0.   0.3  0.1  0.1  0.   0.   0.   0.6  0.   0.   0.   0.2
   0.1  0.   0.   0. ]
 [ 0.   0.   0.   0.   0.2  0.   0.1  0.1  0.   0.1  0.5  0.   0.   0.   0.
   0.1  0.   0. ]
 [ 0.   0.   0.   0.   0.   0.1  0.1  0.   0.1  0.   0.   1.4  0.   0.   0.1
   0.   0.2  0.1]
 [ 0.   0.   0.   0.2  0.   0.   0.   0.1  0.1  0.   0.3  0.   1.4  0.   0.
   0.1  0.   0.1]
 [ 0.   0.   0.   0.   0.2  0.   0.   0.   0.2  0.1  0.   0.   0.2  1.1
   0.1  0.   0.1  0. ]
 [ 0.   0.   0.4  0.   0.   0.   0.   0.   0.2  0.   0.   0.1  0.   0.1
   0.9  0.1  0.   0. ]
 [ 0.   0.2  0.   0.   0.   0.1  0.   0.1  0.   0.   0.5  0.   0.   0.   0.
   0.5  0.   0. ]
 [ 0.   0.   0.   0.   0.4  0.   0.   0.   0.   0.   0.   0.3  0.   0.   0.
   0.2  0.9  0. ]
 [ 0.   0.   0.   0.   0.   0.   0.   0.1  0.   0.   0.   0.   0.   0.   0.2
   0.1  0.2  2.2]]
nr: [ 0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.]

Evaluate posterior using luce choice. At test, learners were given 4AFC task (1 correct referent and 3 foils).


In [64]:
num_foils = 3
lc = zeros(num_words)
correct = zeros(num_words)
incorrect = zeros(num_words)
for i in range(num_words):
    foils = sample(xrange(num_words),num_foils)
    while i in foils:
        foils = sample(xrange(num_words),num_foils)
        
    correct[i] = l.ref[i,i]
    incorrect[i] = l.ref[foils[0],i] + l.ref[foils[1],0] + l.ref[foils[2],0] 
    lc[i] = divide(correct[i], correct[i] + incorrect[i])
    
choice_score = nansum(lc)/num_words

correct, incorrect, lc, choice_score


Out[64]:
(array([ 1.8,  3.2,  1.1,  1.2,  1.6,  1.7,  0.1,  1.8,  0.7,  0.6,  0.5,
        1.4,  1.4,  1.1,  0.9,  0.5,  0.9,  2.2]),
 array([ 0. ,  0. ,  0. ,  0. ,  0. ,  0. ,  0. ,  0.2,  0. ,  0.2,  2.1,
        1.8,  0. ,  0. ,  1.9,  0.1,  0. ,  0. ]),
 array([ 1.        ,  1.        ,  1.        ,  1.        ,  1.        ,
        1.        ,  1.        ,  0.9       ,  1.        ,  0.75      ,
        0.19230769,  0.4375    ,  1.        ,  1.        ,  0.32142857,
        0.83333333,  1.        ,  1.        ]),
 0.85747608872608871)

(3) Dewar and Xu (2007)


In [25]:
w = World(n_words=3,
          n_objs=4)
w.show()
w.update(6, labels = ["fep", "toma", "dax", "zim", "pid", "wug"])


c = Corpus(w)

# 8 training trials
# 4 test trials
#c.sents = [[array([1,2]),array([])],[array([1,2]),array([])], 
           [array([3,4]),array([])],[array([3,4]),array([])],
           [array([5,5]),array([])],[array([5,5]),array([])],
           [array([6,6]),array([])],[array([6,6]),array([])],
           [array([1,2]),array([])],[array([1,2]),array([])],
           [array([1,2]),array([])],[array([1,2]),array([])]]
#c.show()


  File "<ipython-input-25-ff928c06a603>", line 12
    [array([3,4]),array([])],[array([3,4]),array([])],
    ^
IndentationError: unexpected indent

In [ ]:
corpusfile = 'corpora/corpus.csv'

w = World(corpus=corpusfile)
c = Corpus(world=w, corpus=corpusfile)
w.update(6, labels = ["fep", "toma", "dax", "zim", "pid", "wug"])
c.update()

# expected - different
c.sents.append([array([23, 23, 23, 24, 24, 24]),array([420, 421])])

# expected - same
c.sents.append([array([25, 25, 25, 25, 25, 25]),array([422, 422])])

# unexpected - different
c.sents.append([array([26, 26, 26, 27, 27, 27]),array([423, 423])])

# unexpected - same
c.sents.append([array([28, 28, 28, 28, 28, 28]),array([424, 425])])
# TO DO: ME: * what are the appropriate parameters * how report results YU AN SMITH: * How evaluate lexicon? F score? DEWAR AND XU: * how set up sim RAMSCAR

In [39]:
from matplotlib import pyplot as plt
x = arange(3)
y = array([0.734509062,	0.601131376, 0.488957579])
f = pylab.figure()
ax = f.add_axes([0.1, 0.1, .8, .8])
ax.bar(x, y, align='center')
ax.set_xticks(x)
ax.set_xticklabels(['2x2', '3x3', '4x4'])
plt.axhline(.25, color = "black", linestyle='dashed', linewidth=2)
ax.set_title('Gibbs sampler')
f.show()



In [ ]:
x = arange(3)
y = array([0.734509062,	0.601131376, 0.488957579])
f = pylab.figure()
ax = f.add_axes([0.1, 0.1, .8, .8])
ax.bar(x, y, align='center')
ax.set_xticks(x)
ax.set_xticklabels(['2x2', '3x3', '4x4'])
plt.axhline(.25, color = "black", linestyle='dashed', linewidth=2)
ax.set_title('Particle Filter')
f.show()