In [1]:
%matplotlib inline
from pylab import *
import matplotlib.pyplot as plt
In [2]:
alpha = 0.7
phi_ext = 2 * pi * 0.5
def flux_qubit_potential(phi_m, phi_p):
return 2 + alpha - 2 * cos(phi_p)*cos(phi_m) - alpha * cos(phi_ext - 2*phi_p)
In [3]:
phi_m = linspace(0, 2*pi, 100)
phi_p = linspace(0, 2*pi, 100)
X,Y = meshgrid(phi_p, phi_m)
Z = flux_qubit_potential(X, Y)
In [4]:
fig, ax = plt.subplots()
p = ax.pcolor(X/(2*pi), Y/(2*pi), Z, cmap=cm.RdBu, vmin=abs(Z).min(), vmax=abs(Z).max())
cb = fig.colorbar(p, ax=ax)
In [5]:
Z?
In [6]:
alphabet = ['A', 'C', 'G', 'T']
monomer = 'ACCGCTAGAAAT'
monomer * 10
Out[6]:
In [7]:
monomer = ''.join([choice(alphabet) for index in range(frequency)])
monomer
In [ ]:
copies = ceil(bpPerFrequency / frequency)
repeat = monomer * copies
repeat
In [ ]:
'ACGAT' * ceil(3.2)
In [ ]:
def debugSequence(maxFrequency, sectionLength, startFrequency=1):
seq = ''
for frequency in range(startFrequency, maxFrequency+1):
monomer = ''.join([choice(alphabet) for index in range(frequency)])
copies = ceil(sectionLength / frequency)
repeat = monomer * copies
seq += (repeat)
return seq
In [ ]:
debugSequence(5, 20, 3)
In [ ]:
raw_bias = """TTT F 0.58 22.1( 80995) TCT S 0.17 10.4( 38027) TAT Y 0.59 17.5( 63937) TGT C 0.46 5.2( 19138)
TTC F 0.42 16.0( 58774) TCC S 0.15 9.1( 33430) TAC Y 0.41 12.2( 44631) TGC C 0.54 6.1( 22188)
TTA L 0.14 14.3( 52382) TCA S 0.14 8.9( 32715) TAA * 0.61 2.0( 7356) TGA * 0.30 1.0( 3623)
TTG L 0.13 13.0( 47500) TCG S 0.14 8.5( 31146) TAG * 0.09 0.3( 989) TGG W 1.00 13.9( 50991)
CTT L 0.12 11.9( 43449) CCT P 0.18 7.5( 27340) CAT H 0.57 12.5( 45879) CGT R 0.36 20.0( 73197)
CTC L 0.10 10.2( 37347) CCC P 0.13 5.4( 19666) CAC H 0.43 9.3( 34078) CGC R 0.36 19.7( 72212)
CTA L 0.04 4.2( 15409) CCA P 0.20 8.6( 31534) CAA Q 0.34 14.6( 53394) CGA R 0.07 3.8( 13844)
CTG L 0.47 48.4(177210) CCG P 0.49 20.9( 76644) CAG Q 0.66 28.4(104171) CGG R 0.11 5.9( 21552)
ATT I 0.49 29.8(109072) ACT T 0.19 10.3( 37842) AAT N 0.49 20.6( 75436) AGT S 0.16 9.9( 36097)
ATC I 0.39 23.7( 86796) ACC T 0.40 22.0( 80547) AAC N 0.51 21.4( 78443) AGC S 0.25 15.2( 55551)
ATA I 0.11 6.8( 24984) ACA T 0.17 9.3( 33910) AAA K 0.74 35.3(129137) AGA R 0.07 3.6( 13152)
ATG M 1.00 26.4( 96695) ACG T 0.25 13.7( 50269) AAG K 0.26 12.4( 45459) AGG R 0.04 2.1( 7607)
GTT V 0.28 19.8( 72584) GCT A 0.18 17.1( 62479) GAT D 0.63 32.7(119939) GGT G 0.35 25.5( 93325)
GTC V 0.20 14.3( 52439) GCC A 0.26 24.2( 88721) GAC D 0.37 19.2( 70394) GGC G 0.37 27.1( 99390)
GTA V 0.17 11.6( 42420) GCA A 0.23 21.2( 77547) GAA E 0.68 39.1(143353) GGA G 0.13 9.5( 34799)
GTG V 0.35 24.4( 89265) GCG A 0.33 30.1(110308) GAG E 0.32 18.7( 68609) GGG G 0.15 11.3( 41277)"""
In [ ]:
biases = [('TTT', 80995),
('TCT', 38027),
('TAT', 63937),
('TGT', 19138),
('TTC', 58774),
('TCC', 33430),
('TAC', 44631),
('TGC', 22188),
('TTA', 52382),
('TCA', 32715),
('TAA', 7356),
('TGA', 3623),
('TTG', 47500),
('TCG', 31146),
('TAG', 989),
('TGG', 50991),
('CTT', 43449),
('CCT', 27340),
('CAT', 45879),
('CGT', 73197),
('CTC', 37347),
('CCC', 19666),
('CAC', 34078),
('CGC', 72212),
('CTA', 15409),
('CCA', 31534),
('CAA', 53394),
('CGA', 13844),
('CTG',177210),
('CCG', 76644),
('CAG',104171),
('CGG', 21552),
('ATT',109072),
('ACT', 37842),
('AAT', 75436),
('AGT', 36097),
('ATC', 86796),
('ACC', 80547),
('AAC', 78443),
('AGC', 55551),
('ATA', 24984),
('ACA', 33910),
('AAA',129137),
('AGA', 13152),
('ATG', 96695),
('ACG', 50269),
('AAG', 45459),
('AGG', 7607),
('GTT', 72584),
('GCT', 62479),
('GAT',119939),
('GGT', 93325),
('GTC', 52439),
('GCC', 88721),
('GAC', 70394),
('GGC', 99390),
('GTA', 42420),
('GCA', 77547),
('GAA',143353),
('GGA', 34799),
('GTG', 89265),
('GCG',110308),
('GAG', 68609),
('GGG', 41277),]
In [ ]:
def weighted_choice(items, probs, bincount=10000):
'''Puts items in bins in proportion to probs
then uses random.choice() to select items.
Larger bincount for more memory use but
higher accuracy (on avarage).'''
bins = []
for item,prob in zip(items, probs):
bins += [item]*int(bincount*prob)
while True:
yield random.choice(bins)
In [ ]:
codons, weights = ([entry[0] for entry in biases], [entry[1] for entry in biases])
In [ ]:
total = sum(weights)
In [ ]:
probabilities = [w / total for w in weights]
probabilities
In [ ]:
itera = weighted_choice(codons, probabilities)
itera * 100
# a = [iter.next() for x in range(10)]
# a
In [ ]: