In [2]:
import synchwordfinder as swf
import graphgenerator as gg
import probabilisticgraph as pg
import dmarkov as dm
import sequenceanalyzer as sa
import yaml
graph_path = 'logisticmap'
alpha = 0.95
alpha2 = 0.99
Lmax = 14
lseq = 10000000
test = 'chi-squared'
sw_file = 'synchwords'
with open('results/' + graph_path + '/probabilities/original.yaml','r') as f:
base_probs = yaml.load(f)[0]
In [1]:
#Generate D-Markov and generate its sequence
rng = range(4,12)
for d in rng:
g = pg.ProbabilisticGraph(path='graphs/' + graph_path + '/rtp_L' + str(Lmax) + '.yaml')
h = dm.DMarkov(g, d)
h.save_graph_file('graphs/' + graph_path + '/dmarkov_d' + str(d) + '.yaml')
# s, a = h.generate_sequence(lseq, h.states[0])
# with open('sequences/' + graph_path + '/len_' + str(lseq) + '_dmarkov_d' + str(d) + '.yaml', 'w') as f:
# yaml.dump(s, f)
In [3]:
#Generate Mk4 and its sequence
lrange = range(4, 12)
for l in lrange:
g = gg.GraphGenerator('graphs/' + graph_path + '/dmarkov_d' + str(l) + '.yaml',
['e'],
'graphs/' + graph_path + '/L' + str(l) + '_alpha' + str(alpha) + '_v2',
'')
h = g.mk4(graph_path, l)
s, alph = h.generate_sequence(lseq, h.states[0])
seqpath = 'sequences/' + graph_path + '/len_' + str(lseq) + '_L' + str(l) + '_alpha' + str(alpha) + '_mk4_v2.yaml'
with open(seqpath, 'w') as f:
yaml.dump(s,f)
In [ ]:
#Sequence analysis for Mk4
rng = range(4, 13)
K = []
Phi = []
L = 10
k = 9
a= 20
stats = {
'probs': True,
'cond_probs': True,
'cond_entropy': True,
'autocorr': False,
'kld': True,
'phi': True
}
for l in rng:
s = sa.SequenceAnalyzer('sequences/' + graph_path + '/len_' + str(lseq) + '_L' + str(l) + '_alpha' + str(alpha) \
+ '_mk4_v2.yaml')
if stats['probs']:
p = s.calc_probs(L)
with open('results/' + graph_path + '/probabilities/L' + str(l) + '_alpha' + str(alpha) + '_mk4_v2.yaml',
'w') as f:
yaml.dump(p, f)
else:
with open('results/' + graph_path + '/probabilities/L' + str(l) + '_alpha' + str(alpha) + '_mk4_v2.yaml',
'r') as f:
p, alph = yaml.load(f)
s.probabilities = p
s.alphabet = alph
if stats['cond_probs']:
cp = s.calc_cond_probs(L-1)
with open('results/' + graph_path + '/probabilities/cond_L' + str(l) + '_alpha' + str(alpha) + '_mk4_v2.yaml',
'w') as f:
yaml.dump(cp, f)
else:
with open('results/' + graph_path + '/probabilities/cond_L' + str(l) + '_alpha' + str(alpha) + '_mk4_v2.yaml',
'r') as f:
s.conditional_probabilities = yaml.load(f)
if stats['cond_entropy']:
h = s.calc_cond_entropy(L-1)
with open('results/' + graph_path + '/cond_entropies/L' + str(l) + '_alpha' + str(alpha) + '_mk4_v2.yaml',
'w') as f:
yaml.dump(h,f)
if stats['autocorr']:
ac = s.calc_autocorrelation(a)
with open('results/' + graph_path + '/autocorrelations/L' + str(l) + '_alpha' + str(alpha) + '_mk4_v2.yaml',
'w') as f:
yaml.dump(ac,f)
if stats['kld']:
K.append(s.calc_kldivergence(base_probs, k))
if stats['phi']:
Phi.append(s.calc_kldivergence(base_probs, k))
if stats['kld']:
with open('results/' + graph_path + '/kld/mk4_v2.yaml','w') as f:
yaml.dump(K,f)
if stats['kld']:
with open('results/' + graph_path + '/l1metric/mk4_v2.yaml','w') as f:
yaml.dump(Phi,f)
In [ ]:
#Find synchronization words
w = 6
l2range = range(1,4)
syncher = swf.SynchWordFinder(graph_path, w, Lmax, alpha, test, l2range)
sw = syncher.find_synch_words()
with open('synch_words/' + graph_path + '/' + sw_file + '.yaml', 'w') as f:
yaml.dump(sw,f)
In [ ]:
#Create terminations
terms = [
'dmark',
'omega_inverted'
]
rng = range(4,13,2)
with open('synch_words/' + graph_path + '/' + sw_file + '.yaml','r') as f:
sw = yaml.load(f)
for t in terms:
for l in rng:
g = pg.ProbabilisticGraph(path='graphs/' + graph_path + '/rtp_L' + str(Lmax) + '.yaml')
h = g.expand_last_level(l, t, alpha, test, sw)
h.save_graph_file('graphs/' + graph_path + '/rtp_L' + str(l) + '_alpha' + str(alpha) + '_' + t + '.yaml')
In [ ]:
#Generate Mk1 and Mk2 and their sequences:
algos = [
'mk1',
'mk2_moore'
]
terms = [
'dmark',
'omega_inverted'
]
rng = range(4,13,2)
l2 = 3
with open('synch_words/' + graph_path + '/' + sw_file + '.yaml','r') as f:
sw = yaml.load(f)
for l in rng:
for t in terms:
g = gg.GraphGenerator('graphs/' + graph_path + '/rtp_L' + str(l) + '_alpha' + str(alpha) + '_' + t + '.yaml',
sw,
'graphs/' + graph_path + '/L' + str(l) + '_alpha' + str(alpha) + '_' + t,
'')
for a in algos:
if a == 'mk1':
h = g.mk1(test, alpha, l2)
else:
h = g.mk2_moore(test, alpha, l2)
s, alph = h.generate_sequence(lseq, h.states[0])
seqpath = 'sequences/' + graph_path + '/len_' + str(lseq) + '_L' + str(l) + '_alpha' + str(alpha) + '_' + t + \
'_' + a + '.yaml'
with open(seqpath, 'w') as f:
yaml.dump(s, f)
In [ ]:
#Generate CRISSiS and its sequence:
l2range = range(1,4)
for l2 in l2range:
g = gg.GraphGenerator('graphs/' + graph_path + '/rtp_L' + str(Lmax) + '.yaml',
sw,
'graphs/' + graph_path + '/L2' + str(l2) + '_alpha' + str(alpha),
'')
h = g.crissis(test, alpha, l2)
s, alph = h.generate_sequence(lseq, h.states[0])
seqpath = 'sequences/' + graph_path + '/len_' + str(lseq) + '_L2_' + str(l2) + '_alpha' + str(alpha) + '_crissis.yaml'
with open(seqpath, 'w') as f:
yaml.dump(s,f)
In [ ]:
#Sequence analysis for D-Markov
rng = range(4, 13)
K = []
Phi = []
L = 10
k = 9
a= 20
stats = {
'probs': True,
'cond_probs': True,
'cond_entropy': True,
'autocorr': False,
'kld': True,
'phi': True
}
for d in rng:
s = sa.SequenceAnalyzer('sequences/' + graph_path + '/len_' + str(lseq) + '_dmarkov_d' + str(d) + '.yaml')
if stats['probs']:
p = s.calc_probs(L)
with open('results/' + graph_path + '/probabilities/dmarkov_d' + str(d) + '.yaml','w') as f:
yaml.dump(p, f)
else:
with open('results/' + graph_path + '/probabilities/dmarkov_d' + str(d) + '.yaml','r') as f:
p, alph = yaml.load(f)
s.probabilities = p
s.alphabet = alph
if stats['cond_probs']:
cp = s.calc_cond_probs(L-1)
with open('results/' + graph_path + '/probabilities/cond_dmarkov_d' + str(d) + '.yaml','w') as f:
yaml.dump(cp, f)
else:
with open('results/' + graph_path + '/probabilities/cond_dmarkov_d' + str(d) + '.yaml','r') as f:
s.conditional_probabilities = yaml.load(f)
if stats['cond_entropy']:
h = s.calc_cond_entropy(L-1)
with open('results/' + graph_path + '/cond_entropies/dmarkov_d' + str(d) + '.yaml','w') as f:
yaml.dump(h,f)
if stats['autocorr']:
ac = s.calc_autocorrelation(a)
with open('results/' + graph_path + '/autocorrelations/dmarkov_d' + str(d) + '.yaml','w') as f:
yaml.dump(ac,f)
if stats['kld']:
K.append(s.calc_kldivergence(base_probs, k))
if stats['phi']:
Phi.append(s.calc_kldivergence(base_probs, k))
if stats['kld']:
with open('results/' + graph_path + '/kld/dmarkov.yaml','w') as f:
yaml.dump(K,f)
if stats['kld']:
with open('results/' + graph_path + '/l1metric/dmarkov.yaml','w') as f:
yaml.dump(Phi,f)
In [ ]:
#Sequence analysis for CRISSiS
rng = range(1, 4)
K = []
Phi = []
L = 10
k = 6
a= 20
stats = {
'probs': True,
'cond_probs': True,
'cond_entropy': True,
'autocorr': False,
'kld': True,
'phi': True
}
for l2 in rng:
s = sa.SequenceAnalyzer('sequences/' + graph_path + '/len_' + str(lseq) + '_L2_' + str(l2) + '_alpha' + str(alpha) \
+ '_crissis.yaml')
if stats['probs']:
p = s.calc_probs(L)
with open('results/' + graph_path + '/probabilities/L2_' + str(l2) + '_alpha' + str(alpha) + '_crissis.yaml',
'w') as f:
yaml.dump(p, f)
else:
with open('results/' + graph_path + '/probabilities/L2_' + str(l2) + '_alpha' + str(alpha) + '_crissis.yaml',
'r') as f:
p, alph = yaml.load(f)
s.probabilities = p
s.alphabet = alph
if stats['cond_probs']:
cp = s.calc_cond_probs(L-1)
with open('results/' + graph_path + '/probabilities/cond_L2_' + str(l2) + '_alpha' + str(alpha) + '_crissis.yaml',
'w') as f:
yaml.dump(cp, f)
else:
with open('results/' + graph_path + '/probabilities/cond_L2_' + str(l2) + '_alpha' + str(alpha) + '_crissis.yaml'
,'r') as f:
s.conditional_probabilities = yaml.load(f)
if stats['cond_entropy']:
h = s.calc_cond_entropy(L-1)
with open('results/' + graph_path + '/cond_entropies/L2_' + str(l2) + '_alpha' + str(alpha) + '_crissis.yaml',
'w') as f:
yaml.dump(h,f)
if stats['autocorr']:
ac = s.calc_autocorrelation(a)
with open('results/' + graph_path + '/autocorrelations/L2_' + str(l2) + '_alpha' + str(alpha) + '_crissis.yaml',
'r') as f:
yaml.dump(ac,f)
if stats['kld']:
K.append(s.calc_kldivergence(base_probs, k))
if stats['phi']:
Phi.append(s.calc_kldivergence(base_probs, k))
if stats['kld']:
with open('results/' + graph_path + '/kld/crissis.yaml','w') as f:
yaml.dump(K,f)
if stats['kld']:
with open('results/' + graph_path + '/l1metric/crissis.yaml','w') as f:
yaml.dump(Phi,f)
In [ ]:
#Sequence analysis for Mk1 and Mk2
rng = range(4, 13, 2)
K = []
Phi = []
L = 10
k = 6
a= 20
algos = [
'mk1',
'mk2_moore'
]
terms = [
'dmark',
'omega_inverted'
]
stats = {
'probs': False,
'cond_probs': True,
'cond_entropy': True,
'autocorr': False,
'kld': True,
'phi': True
}
for a in algos:
for t in terms:
for l in rng:
s = sa.SequenceAnalyzer('sequences/' + graph_path + '/len_' + str(lseq) + '_L' + str(l) + '_alpha' + \
str(alpha) + '_' + t + '_' + a + '.yaml')
if stats['probs']:
p = s.calc_probs(L)
with open('results/' + graph_path + '/probabilities/L' + str(l) + '_alpha' + str(alpha) + '_' + t + '_' + \
a + '.yaml', 'w') as f:
yaml.dump(p, f)
else:
with open('results/' + graph_path + '/probabilities/L' + str(l) + '_alpha' + str(alpha) + '_' + t + '_' + \
a + '.yaml', 'r') as f:
p, alph = yaml.load(f)
s.probabilities = p
s.alphabet = alph
if stats['cond_probs']:
cp = s.calc_cond_probs(L-1)
with open('results/' + graph_path + '/probabilities/cond_L' + str(l) + '_alpha' + str(alpha) + '_' + t + \
'_' + a + '.yaml', 'w') as f:
yaml.dump(cp, f)
else:
with open('results/' + graph_path + '/probabilities/cond_L' + str(l) + '_alpha' + str(alpha) + '_' + t + \
'_' + a + '.yaml', 'r') as f:
s.conditional_probabilities = yaml.load(f)
if stats['cond_entropy']:
h = s.calc_cond_entropy(L-1)
with open('results/' + graph_path + '/cond_entropies/L' + str(l) + '_alpha' + str(alpha) + '_' + t + '_' + \
a + '.yaml', 'w') as f:
yaml.dump(h,f)
if stats['autocorr']:
ac = s.calc_autocorrelation(a)
with open('results/' + graph_path + '/autocorrelations/L' + str(l) + '_alpha' + str(alpha) + '_' + t + '_' + \
a + '.yaml', 'w') as f:
yaml.dump(ac,f)
if stats['kld']:
K.append(s.calc_kldivergence(base_probs, k))
if stats['phi']:
Phi.append(s.calc_kldivergence(base_probs, k))
if stats['kld']:
with open('results/' + graph_path + '/kld/' + a + '_' + t + '.yaml','w') as f:
yaml.dump(K,f)
if stats['kld']:
with open('results/' + graph_path + '/l1metric/' + a + '_' + t + '.yaml','w') as f:
yaml.dump(Phi,f)
In [ ]:
In [ ]: