In [1]:
%pylab
In [2]:
mpl.rcParams.update({'font.family': 'serif',
'font.size': 12,
'font.sans-serif': [],
'legend.fontsize': 12,
'figure.figsize': (5.39, 3.33)})
In [7]:
390 / 72.27
Out[7]:
In [28]:
390 / 72.27 * 0.618
Out[28]:
In [9]:
import sys
from collections import defaultdict
from imp import reload
import numpy.fft as fft
from mpl_toolkits.mplot3d import Axes3D
#import prettyplotlib
import ssad
import ensemble as we
In [10]:
reload(ssad)
reload(we)
from ssad import Reaction, Trajectory
In [11]:
# From http://nbviewer.ipython.org/github/ipython/ipython/blob/master/examples/notebooks/Progress%20Bars.ipynb
class ProgressBar:
def __init__(self, iterations):
self.iterations = iterations
self.prog_bar = '[]'
self.fill_char = '='
self.width = 50
self.__update_amount(0)
def animate(self, iter):
print('\r', self, end='')
sys.stdout.flush()
self.update_iteration(iter + 1)
def update_iteration(self, elapsed_iter):
self.__update_amount((elapsed_iter / float(self.iterations)) * 100.0)
self.prog_bar += ' %d of %s complete' % (elapsed_iter, self.iterations)
def __update_amount(self, new_amount):
percent_done = int(round((new_amount / 100.0) * 100.0))
all_full = self.width - 2
num_hashes = int(round((percent_done / 100.0) * all_full))
self.prog_bar = '[' + self.fill_char * num_hashes + ' ' * (all_full - num_hashes) + ']'
pct_place = (len(self.prog_bar) // 2) - len(str(percent_done))
pct_string = '%d%%' % percent_done
self.prog_bar = self.prog_bar[0:pct_place] + \
(pct_string + self.prog_bar[pct_place + len(pct_string):])
def __str__(self):
return str(self.prog_bar)
In [7]:
species = ['X']
rxns = [ssad.Reaction([1], [-1], 1), # Production
ssad.Reaction([0], [+1], 25)] # Degradation
In [8]:
trj = Trajectory([50, 50], rxns)
In [9]:
trj.run_dynamics(50, 1e5)
In [10]:
trj.rxn_counter
Out[10]:
In [11]:
times = array(trj.hist_times)
states = array(trj.hist_states)
In [12]:
plot(times, states[:,0])
Out[12]:
Now try it with a weighted ensemble.
In [25]:
paving = we.UniformPaving([0], [50], 25)
init_trjs = []
for idx in range(50):
init_trjs.append(we.WeightedTrajectory([idx], rxns, 1/50))
ens = we.Ensemble(10, paving, (5, 10), init_trjs)
In [40]:
%prun ens.run_time(100)
In [33]:
states_sample = []
for trj in ens:
states_sample.append(trj.state)
In [46]:
species = ['X', 'A', 'B']
rxn1 = Reaction([2, 1, 0], [+1, -1, 0], 3.0E-7)
rxn2 = Reaction([0, 0, 1], [+1, 0, -1], 1.0E-3)
rxns = [rxn1, rxn1.get_reverse_rxn(1.0E-4), rxn2, rxn2.get_reverse_rxn(3.5)]
In [25]:
rxns = [Reaction([], [0], [+1], 10)]
trjs = []
ntrjs = 200
for tidx in range(ntrjs):
trjs.append(Trajectory([0], rxns, weight=1/30))
In [26]:
for trj in trjs:
trj.run_dynamics(200)
In [27]:
weights = np.empty((ntrjs))
states = np.empty((ntrjs, 1))
In [28]:
for tidx, trj in enumerate(trjs):
weights[tidx] = trj.weight
states[tidx,:] = trj.state
In [29]:
hist(states)
Out[29]:
In [30]:
rxns = [Reaction([], [0], [+1, 0], 10),
Reaction([], [1], [0, +1], 5)]
trjs = []
ntrjs = 200
for tidx in range(ntrjs):
trjs.append(Trajectory([0], rxns, weight=1/ntrjs))
In [31]:
weights2 = np.empty((ntrjs))
states2 = np.empty((ntrjs, 2))
In [32]:
for tidx, trj in enumerate(trjs):
trj.run_dynamics(200)
weights2[tidx] = trj.weight
states2[tidx,:] = trj.state
In [34]:
hist(states2[:,1])
Out[34]:
Save this section! (contains verification, as well as a thesis-worthy plot)
In [6]:
k_plus = 25
k_minus = 1
rxns = [Reaction([0], [+1], k_plus),
Reaction([1], [-1], k_minus)]
numbins = 50
paving = we.UniformPaving([0], [numbins], numbins)
bin_xs = np.array(range(numbins))
init_trjs = []
for istate in range(numbins):
init_trjs.append(we.WeightedTrajectory([istate], rxns, 1.0/numbins))
In [15]:
x_fine = linspace(0, 50, 1000)
pdist_analytic = sqrt(k_minus / (2.0 * pi * k_plus)) * exp(-1.0 * k_minus / (2.0 * k_plus) * (x_fine - k_plus * 1.0 / k_minus)**2)
In [8]:
sum(pdist_analytic) * 50 / 1000
Out[8]:
In [10]:
tot_time = 400.0
niter = 800
ens = we.Ensemble(tot_time / niter, paving, (10, 15), init_trjs)
pbar = ProgressBar(niter)
In [20]:
times = linspace(0.0, tot_time, niter + 1)
times = times[1:]
In [10]:
pdist_rwt = np.zeros((numbins, niter))
for idx in range(niter):
pbar.animate(idx)
ens.run_step()
pdist_rwt[:,idx] = ens.get_pdist()
pbar.animate(idx+1)
In [16]:
np.savez('../results/prod_deg_rwt.npz', bin_xs=bin_xs, times=times, pdist=pdist_rwt)
In [12]:
save_rwt = np.load('../results/prod_deg_rwt.npz')
pdist_rwt = save_rwt['pdist']
In [11]:
# Visually determine when the distribution equilibriates
iters_rwt = range(pdist_rwt.shape[1])
x_3d = outer(ones_like(bin_xs), iters_rwt)
y_3d = outer(bin_xs, ones_like(iters_rwt))
fig = figure()
ax = fig.gca(projection='3d')
ax.plot_surface(x_3d, y_3d, pdist_rwt)
ax.set_xlabel('WE Iteration')
ax.set_ylabel('Bin')
Out[11]:
In [13]:
nthrow = 5
pdist_rwt_avg = np.mean(pdist_rwt[:,nthrow:], axis=1)
pdist_rwt_std = np.std(pdist_rwt[:,nthrow:], axis=1)
pdist_rwt_sem = pdist_rwt_std / np.sqrt(niter - nthrow)
In [18]:
ens_norwt = we.Ensemble(tot_time / niter, paving, (10, 10), init_trjs)
ens_norwt._resample()
pbar = ProgressBar(niter)
In [ ]:
pdist_norwt = np.zeros((numbins, niter))
pbar.animate(0)
for idx in range(niter):
ens_norwt.run_step(resample=False)
pbar.animate(idx + 1)
pdist_norwt[:,idx] = ens_norwt.get_pdist()
ens_norwt._recompute_bins()
In [22]:
np.savez('../results/prod_deg_norwt.npz', bin_xs=bin_xs, times=times, pdist=pdist_norwt)
In [7]:
save_norwt = np.load('../results/prod_deg_norwt.npz')
pdist_norwt = save_norwt['pdist']
In [23]:
# Visually determine when the distribution equilibriates
iters_rwt = range(pdist_rwt.shape[1])
x_3d = outer(ones_like(bin_xs), iters_rwt)
y_3d = outer(bin_xs, ones_like(iters_rwt))
fig = figure()
ax = fig.gca(projection='3d')
ax.plot_surface(x_3d, y_3d, pdist_norwt)
ax.set_xlabel('WE Iteration')
ax.set_ylabel('Bin')
Out[23]:
In [11]:
nthrow = 10
pdist_norwt_avg = np.mean(pdist_norwt[:,nthrow:], axis=1)
pdist_norwt_std = np.std(pdist_norwt[:,nthrow:], axis=1)
pdist_norwt_sem = pdist_norwt_std / sqrt(niter - nthrow)
In [45]:
plot(x_fine, pdist_analytic, color='black')
b_norwt = bar(bin_xs - 0.5, pdist_norwt_avg, yerr=pdist_norwt_sem, color='red', ecolor='red', alpha=0.6)
b_rwt = bar(bin_xs - 0.5, pdist_rwt_avg, yerr=pdist_rwt_sem, color='blue', ecolor='blue', alpha=0.6)
fig = gcf()
ax = fig.gca()
ax.set_xlim(10, 40)
ax.set_ylim(top=0.13, bottom=0.0)
ax.set_xlabel('Number of X')
ax.set_ylabel('Probability Density (per bin)')
legend(('Analytic Solution', 'SSA, Reweighting ($\\tau$ = 0.5 s)', 'No Reweighting'), loc='upper right')
fig.tight_layout()
In [46]:
fig.savefig('../results/gaussian-verification-sdm.pgf')
In [66]:
fig.savefig('../results/gaussian-verification.pgf')
Now plot the differences (essentially $\chi^2$)
In [46]:
analytic_coarse = sqrt(k_minus / (2.0 * pi * k_plus)) * exp(-1.0 * k_minus / (2.0 * k_plus) * ((bin_xs) - k_plus * 1.0 / k_minus)**2)
chis_rwt = (pdist_rwt_avg - analytic_coarse) / pdist_rwt_sem
chis_norwt = (pdist_norwt_avg - analytic_coarse) / pdist_norwt_sem
In [34]:
se_both = sqrt(pdist_rwt_sem**2 + pdist_norwt_sem**2)
chis = (pdist_norwt_avg - pdist_rwt_avg) / se_both
In [49]:
fig = figure()
ax = fig.gca()
ax.bar(bin_xs, chis, color='blue')
ax.set_xlabel('Population of X')
ax.set_ylabel('Normalized difference')
ax.set_xlim(10, 40)
ax.axhline(0.0, color='black')
ax.grid(True)
fig.tight_layout()
In [50]:
fig.savefig('../results/prod_deg_chi.pgf')
In [41]:
fig = figure()
ax = fig.gca()
ax.bar(bin_xs[10:41], chis_norwt[10:41], color='red', alpha=0.6, label='No Resampling')
ax.bar(bin_xs[10:41], chis_rwt[10:41], color='blue', alpha=0.6, label='With Resampling')
ax.set_xlabel('Number of X')
ax.set_ylabel(r'Difference from Analytic Solution (units of $\sigma$)')
In [67]:
leg = ax.legend(loc='best')
ax.axhline(0, color='black')
ax.grid()
In [69]:
fig.savefig('../results/gaussian-verification-chi.pgf')
In [58]:
np.sum(chis_rwt[10:41]**2)
Out[58]:
In [9]:
?Reaction
In [20]:
rxns = [ssad.Reaction([0], [+1], 100),
ssad.Reaction([1], [-1], 4.1),
ssad.Reaction([1], [-1], 10, delay=20.0)]
init_state = [5]
trj1 = Trajectory(init_state, rxns)
trj2 = Trajectory(init_state, rxns)
In [21]:
rand_state = random.get_state()
trj1.run_dynamics(100)
random.set_state(rand_state)
trj2.run_dynamics(10)
trj2.run_dynamics(33)
trj2.run_dynamics(57)
In [22]:
states1 = array(trj1.hist_states)
states2 = array(trj2.hist_states)
times1 = array(trj1.hist_times)
times2 = array(trj2.hist_times)
In [16]:
step(times1, states1, where='post')
step(times2, states2, color='red', where='post')
Out[16]:
In [23]:
print(norm(times1 - times2, ord=Inf))
print(norm(states1 - states2, ord=Inf))
Excellent, the trajectories coincide. Make sure it stays that way if the code is modified!
Now do the same test on weighted trajectories.
In [25]:
init_state = [0]
# Give them different weights, just for fun.
wtrj1 = we.WeightedTrajectory(init_state, rxns, pi/4)
wtrj2 = we.WeightedTrajectory(init_state, rxns, e/3)
In [26]:
rand_state = random.get_state()
wtrj1.run_dynamics(100)
random.set_state(rand_state)
wtrj2.run_dynamics(33)
wtrj2.run_dynamics(10)
wtrj2.run_dynamics(57)
In [27]:
states1 = array(wtrj1.hist_states)
states2 = array(wtrj2.hist_states)
times1 = array(wtrj1.hist_times)
times2 = array(wtrj2.hist_times)
In [28]:
print(norm(times1 - times2, ord=Inf))
print(norm(states1 - states2, ord=Inf))
Good; as expected, the weighting didn't change anything (even if the trajectories had different/weird weights).
In [13]:
rxns = [Reaction([0], [+1], 100),
Reaction([1], [-1], 3),
Reaction([1], [-1], 5, delay=20.0)]
In [14]:
trj = Trajectory([20], rxns)
In [7]:
trj.sample_state(-20, use_init_state=True)
Out[7]:
In [8]:
nsamples = int(1e5)
rxn_tallies = defaultdict(lambda: 0)
time_stats = defaultdict(lambda: [])
for snum in range(nsamples):
rxn, time = trj._sample_next_reaction()
rxn_tallies[rxn] += 1
time_stats[rxn].append(time)
In [9]:
for rxn in rxns:
print(rxn_tallies[rxn])
In [10]:
for rxn in rxns:
print(rxn.calc_propensity([20]))
In [ ]: