In [1]:
import numpy as np
from matplotlib import pyplot as plt
from __future__ import division
from spacetime.CA_Simulators.CAs import *
from spacetime.Local_Measures.Local_Complexity import *
%matplotlib inline

In [2]:
def wildcard_tiling(x,t):
    '''
    Returns spacetime field of dimension (x,t) sampled from 0-wildcard tiling language.
    '''
    field = np.zeros((t,x), dtype=int)
    for i in xrange(t):
        for j in xrange(x):
            if i % 2 == 0 and j % 2 == 0:
                field[i,j] = np.random.choice([0,1])
            elif i % 2 == 1 and j % 2 == 1:
                field[i,j] = np.random.choice([0,1])
    
    return field

In [3]:
def random_field(x,t):
    '''
    Returns binary random field (each site drawn from uniform distribution over {0,1}) of dimension (x,t).
    '''
    field = np.random.choice((0,1), (t,x))
    return field

In [6]:
print random_field(10,5)


[[0 0 0 1 1 0 0 0 1 0]
 [0 1 1 1 1 0 0 1 1 1]
 [0 1 1 0 0 0 0 1 0 1]
 [0 1 0 0 1 1 0 1 1 0]
 [1 1 1 0 1 0 0 0 1 0]]

In [4]:
field = random_field(600, 600)

In [5]:
random_states = epsilon_field(field)
random_states.estimate_states(2,2,1)
random_states.filter_data()

In [6]:
print random_states.number_of_states()


1

In [6]:
print random_states.nonunifilar_transitions()


[]

In [7]:
print len(random_states.all_transitions())


32

In [8]:
for trans in random_states.transitions_from_state(2):
    print trans


(2, 'f:010', 1)
(2, 'r:01', 1)
(2, 'l:11', 2)
(2, 'f:101', 2)
(2, 'r:11', 2)
(2, 'l:01', 1)
(2, 'f:110', 2)
(2, 'r:00', 1)
(2, 'f:000', 1)
(2, 'l:00', 1)
(2, 'f:001', 1)
(2, 'r:10', 2)
(2, 'f:011', 1)
(2, 'f:111', 2)
(2, 'l:10', 2)
(2, 'f:100', 2)

In [9]:
for trans in random_states.transitions_from_state(1):
    print trans


(1, 'f:110', 2)
(1, 'r:01', 1)
(1, 'l:11', 2)
(1, 'f:000', 1)
(1, 'l:00', 1)
(1, 'f:001', 1)
(1, 'r:10', 2)
(1, 'l:01', 1)
(1, 'f:010', 1)
(1, 'r:00', 1)
(1, 'f:100', 2)
(1, 'r:11', 2)
(1, 'l:10', 2)
(1, 'f:101', 2)
(1, 'f:011', 1)
(1, 'f:111', 2)

In [14]:
t = random_states.all_transitions()[20]
print t[1]
print t[1][2:]


f:000
000

In [ ]:


In [ ]:


In [15]:
for trans in random_states.transitions_to_state(1):
    print trans


(2, 'r:01', 1)
(2, 'l:00', 1)
(1, 'r:01', 1)
(1, 'f:000', 1)
(1, 'r:00', 1)
(1, 'l:01', 1)
(2, 'f:001', 1)
(2, 'l:01', 1)
(2, 'f:011', 1)
(2, 'f:010', 1)
(1, 'f:010', 1)
(1, 'l:00', 1)
(2, 'r:00', 1)
(1, 'f:001', 1)
(2, 'f:000', 1)
(1, 'f:011', 1)

In [ ]:

It appears the two states are equivalent, which means this is a single state. This is the spacetime equivalent of a fair coin, so this is the desired result. Which makes me feel better about the local epsilon machine constructed from the lightcone equivalence relation.

*** This has been fixed by excluding the present from future light cones. This eliminates the state splitting issue in the reconstruction algorithm ***


In [11]:
state_overlay_diagram(field, random_states.get_causal_field(), t_max = 50, x_max = 50)



In [13]:
for state in random_states.causal_states():
    print state.plc_configs()


['1010', '1110', '1011', '1000', '1101', '1111', '1100', '1001']
['0011', '0111', '0101', '0001', '0110', '0100', '0010', '0000']

In [16]:
for state in random_states.causal_states():
    print state.morph()


[ 0.          0.          0.          0.          0.          0.          0.
  0.          0.12468342  0.12490943  0.12479892  0.12524395  0.12500044
  0.12493243  0.12504694  0.12538446]
[ 0.12486931  0.12485829  0.12466091  0.12506719  0.12476711  0.1250712
  0.12526457  0.12544142  0.          0.          0.          0.          0.
  0.          0.          0.        ]

In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [20]:
t_trans = random_states.all_transitions(zipped = False)[1]
print np.unique(t_trans)


['f:000' 'f:001' 'f:010' 'f:011' 'f:100' 'f:101' 'f:110' 'f:111' 'l:00'
 'l:01' 'l:10' 'l:11' 'r:00' 'r:01' 'r:10' 'r:11']

In [22]:
print np.log(8)/np.log(2)


3.0

In [5]:
print random_states.entropy_rate('forward')


0.99999186758

In [6]:
print random_states.entropy_rate('right')


0.999992313223

In [7]:
print random_states.entropy_rate('left')


0.999994987578

Would like that the value of intrinsic randomness of this field be 1 bit in both time and space. Here we have three bits for both with past depth 1. (still need to change code to have correct value of depth)


In [23]:
random_states = epsilon_field(random_field(600,600))
random_states.estimate_states(3,2,1)
random_states.filter_data()

In [24]:
t_trans = random_states.all_transitions(zipped = False)[1]
print np.unique(t_trans)


['f:00000' 'f:00001' 'f:00010' 'f:00011' 'f:00100' 'f:00101' 'f:00110'
 'f:00111' 'f:01000' 'f:01001' 'f:01010' 'f:01011' 'f:01100' 'f:01101'
 'f:01110' 'f:01111' 'f:10000' 'f:10001' 'f:10010' 'f:10011' 'f:10100'
 'f:10101' 'f:10110' 'f:10111' 'f:11000' 'f:11001' 'f:11010' 'f:11011'
 'f:11100' 'f:11101' 'f:11110' 'f:11111' 'l:000' 'l:001' 'l:010' 'l:011'
 'l:100' 'l:101' 'l:110' 'l:111' 'r:000' 'r:001' 'r:010' 'r:011' 'r:100'
 'r:101' 'r:110' 'r:111']

In [26]:
print np.log(32)/np.log(2)
print np.log(8)/np.log(2)


5.0
3.0

It seems we can get correct value of 1 bit of uncertainty if we treat each direction separately (not just each dimension) and divide the branching uncertainty by the size of the fringe along that direction. This procedure does make some sense, and it's good that it works out in this simple case.


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [16]:
wildcard_field = wildcard_tiling(1000,1000)

In [17]:
wildcard_states = epsilon_field(wildcard_field)
wildcard_states.estimate_states(3,3,1)
wildcard_states.filter_data()

In [18]:
print wildcard_states.number_of_states()


2

In [ ]: