In many applications the poles of the transfer function can be found by solving a polynomial. If the denominator is a polynoamial in $x = exp(-zT_{\text{gcd}})$ for some delay $T_{\text{gcd}}$ then this can be done. In the more general case network when the delays $T_1,...,T_n$ are not commensurate, a different method is needed, such as the root-finding algorithm implemented in Roots.py. Here we show how from the commensurate delays we can find roots in a desired frequency range.
In [683]:
import numpy as np
import numpy.linalg as la
In [684]:
import Potapov_Code.Time_Delay_Network as networks
In [685]:
import matplotlib.pyplot as plt
%pylab inline
In [686]:
import sympy as sp
from sympy import init_printing
init_printing()
In [687]:
from fractions import gcd
In [688]:
## To identify commensurate delays, we must use a decimal and NOT a binary representation of the delays,
## as done by standard python floats/longs
from decimal import Decimal
In [689]:
X = networks.Example3()
In [690]:
X.delays
Out[690]:
In [691]:
def gcd_lst(lst):
l = len(lst)
if l == 0:
return None
elif l == 1:
return lst[0]
elif l == 2:
return gcd(lst[0],lst[1])
else:
return gcd(lst[0],gcd_lst(lst[1:]))
In [692]:
gcd_lst([1,2,3,4,5,6])
Out[692]:
In [693]:
gcd_lst([2,4,8,6])
Out[693]:
In [694]:
gcd_lst([3,27,300,6])
Out[694]:
In [695]:
def find_commensurate(delays):
mult = min([d.as_tuple().exponent for d in delays])
power = 10**-mult
delays = map(lambda x: x*power,delays)
int_gcd = gcd_lst(delays)
return int_gcd/power
In [696]:
delays = [Decimal('1.14'),Decimal('532.23423'),Decimal('0.06'),Decimal('0.1')]
In [697]:
gcd_delays = find_commensurate(delays)
gcd_delays
Out[697]:
In [701]:
map(lambda z: z / gcd_delays, delays)
Out[701]:
In [702]:
## in general converting floats to Decimal shoudl be avoided because floats are stored in binary form.
## converting to a string first will round the number.
## Good practice would be to specify the delays in Decimals, then convert to floats later.
gcd_delays = find_commensurate(map(lambda x: Decimal(str(x)),X.delays))
gcd_delays
Out[702]:
In [703]:
Decimal_delays =map(lambda x: Decimal(str(x)),X.delays)
Decimal_delays
Out[703]:
In [704]:
E = sp.Matrix(np.zeros_like(X.M1))
In [705]:
x = sp.symbols('x')
In [706]:
for i,delay in enumerate(Decimal_delays):
E[i,i] = x**int(delay / gcd_delays)
In [708]:
E
Out[708]:
In [709]:
M1 = sp.Matrix(X.M1)
In [710]:
M1
Out[710]:
In [711]:
E - M1
Out[711]:
In [712]:
expr = sp.apart((E - M1).det())
In [713]:
a = sp.Poly(expr, x)
In [714]:
a
Out[714]:
In [715]:
poly_coeffs = a.all_coeffs()
poly_coeffs
Out[715]:
In [716]:
roots = np.roots(poly_coeffs)
plt.figure(figsize =(6,6))
fig = plt.scatter(roots.real,roots.imag)
plt.title('Roots of the above polynomial',{'fontsize':24})
plt.xlabel('Real part',{'fontsize':24})
plt.ylabel('Imaginary part',{'fontsize':24})
Out[716]:
The roots repeat with a period of $2 \pi / T_{\text{gcd}} \approx 628$.
In [717]:
zs = np.asarray(map(lambda r: np.log(r) / - float(gcd_delays), roots))
In [718]:
plt.figure(figsize =(6,6))
fig = plt.scatter(zs.real,zs.imag)
plt.title('Roots of the above polynomial',{'fontsize':24})
plt.xlabel('Real part',{'fontsize':24})
plt.ylabel('Imaginary part',{'fontsize':24})
Out[718]:
In [555]:
def find_instances_in_range(z,freq_range):
T = 2.*np.pi / float(lcm)
if z.imag >= freq_range[0] and z.imag <= freq_range[1]:
lst_in_range = [z]
num_below = int((z.imag - freq_range[0])/T )
num_above = int((freq_range[1] - z.imag)/T )
lst_in_range += [z + 1j * disp for disp in (np.asarray(range(num_above))+1) * T]
lst_in_range += [z - 1j * disp for disp in (np.asarray(range(num_below))+1) * T]
return lst_in_range
elif z.imag > freq_range[1]:
min_dist = (int((z.imag - freq_range[1])/T)+1) * T
max_dist = int((z.imag - freq_range[0]) / T) * T
if min_dist > max_dist:
return []
else:
return find_instances_in_range(z - 1j*min_dist,freq_range)
else:
min_dist = (int((freq_range[0] - z.imag)/T)+1) * T
max_dist = int((freq_range[1] - z.imag)/T) * T
if min_dist > max_dist:
return []
else:
return find_instances_in_range(z + 1j*min_dist,freq_range)
In [556]:
z = zs[40]
z.imag
Out[556]:
In [562]:
## test cases...
In [558]:
find_instances_in_range(z,(-3000,-1000))
Out[558]:
In [559]:
find_instances_in_range(z,(-3000,1000))
Out[559]:
In [560]:
find_instances_in_range(z,(3000,5000))
Out[560]:
In [561]:
find_instances_in_range(z,(3000,1000))
Out[561]:
In [567]:
def find_roots_in_range(roots,freq_range):
return np.concatenate([find_instances_in_range(r,freq_range) for r in roots])
In [571]:
roots_in_range = find_roots_in_range(zs,(-1000,1000))
In [581]:
plt.figure(figsize =(6,12))
fig = plt.scatter(roots_in_range.real,roots_in_range.imag,label='extended')
plt.title('Roots of the above polynomial',{'fontsize':24})
plt.xlabel('Real part',{'fontsize':24})
plt.ylabel('Imaginary part',{'fontsize':24})
plt.scatter(zs.real,zs.imag,label='original',c='yellow')
plt.legend()
Out[581]:
In [285]:
sample_pol_length = [10,50,100,1000,2000,3000]
In [274]:
import time
In [275]:
ts = [time.clock()]
for pol_length in sample_pol_length:
pol = [0]*(pol_length+1)
pol[0] = 1
pol[pol_length] = 5
np.roots(pol)
ts.append(time.clock())
In [276]:
delta_ts = [ts[i+1]-ts[i] for i in range(len(ts)-1)]
In [281]:
plt.plot(sort(sample_pol_length),sort(delta_ts))
plt.loglog()
Out[281]:
In [300]:
(np.log(delta_ts[-1])-np.log(delta_ts[0])) / (np.log(sample_pol_length[-1]) - np.log(sample_pol_length[0]))
Out[300]:
The slowest part of the Potapov analysis is generating the Potapov projection vectors once the roots of known. The runtime of this procedure is $O(n^2)$ because all of the previously known projectors must be invoked when finding each new projector. This results in slow performance when the number of modes is large (more than 1000 modes or so begins taking a substantial amount of time to process). Is there a way to reduce this overhead when the system is commensurate?
There are two ways to address this issue. The first is to simply note that in the high-Q approximation linear modes don't interact much so that we can just consider a diagonal linear Hamiltonian to begin with. However, this yields no information for the B/C matrices, which still depend on the Potapov vectors. Although we can still get the spatial mode location, it would be nice to actually have an efficient method to compute the Potapov projectors.
When the roots become periodic in the frequency direction of the complex plane, we might anticipate the Potapov vectors corresponding to them would also have the same periodicity (i.e. all poles $\{ p_k + i n T | n \in \mathbb{R}\}$ have the same Potapov vectors where $T$ is the periodicity for the system and $p_k$ is a pole). We can test this assertion by using a modified limiting procedure described below.
In our original procedure we evaluated each $v_k$ in the product \begin{align} T(z) = \prod_k \left(I - v_k^\dagger v_k + v_k^\dagger v_k \left( \frac{z + \bar p_k}{z - p_k} \right) \right). \end{align} This was done by multiplying by $z-p_k$ and taking $z \to p_k$. Now suppose all $v_k$ are the same for some subset of poles (ones related by a period). In this case we can instead multiply by $\prod_n (z_n - p_k - i n T)$ (multiplying by some factor to normalize both sides) and then take the limit $z_n \to p_k^{(n)} \equiv p_k + i n T$ (the limits will commute when $T(z)$ is meromorphic. We can also introduce a notion of taking the limits at the same time for these poles by using the same $\delta-\epsilon$, etc., since they are just translations of each other ): \begin{align} \lim_{z_n \to p_k^{(n)}}\left[ T(z) \prod_n (z - p_k - i n T) \right]= \lim_{z_n \to p_k^{(n)}}\prod_n v_k^\dagger v_k (z + \bar p_k - i n T) ( z - p_k - i n T). \end{align} If all the $v_k$ vectors are the same on the right hand side (let's also say they are normal), then the right hand side turns into \begin{align}
v_k^\dagger vk \lim{z \to p_k}\left(\prod_n (z_n + \bar p_k) ( z - p_k)\right)^n. \end{align} This suggests we can just use the same $v_k$ vectors as for the particular root and it's periodic copies as long as we arrange the Potapov terms for the root and it's periodic copies adjacent to each other in the expansion.
Another note is that the Potapov expansion can be fast to evaluate because matrix multiplicaiton only needs to be done between different commutative subspaces (i.e. all the periodic copies belong in one commensurate subset).
In [639]:
## The poles of the transfer function, no multiplicities included
poles = -zs
plt.scatter(poles.real,poles.imag)
Out[639]:
I ended up not using the stuff below. The Potapov vectors are implemented in Potapov_Code.Potapov.py.
In [719]:
T = X.T
In [720]:
from Potapov_Code.functions import limit
In [761]:
def Potapov_prod(z,poles,vecs,N):
'''
Original Potapov product.
'''
R = np.asmatrix(np.eye(N))
for pole_i,vec in zip(poles,vecs):
Pi = vec*vec.H
R = R*(np.eye(N) - Pi + Pi * ( z + pole_i.conjugate() )/( z - pole_i) )
return R
In [762]:
def get_Potapov_vecs(T,poles):
'''
Original method to get vectors.
'''
N = T(0).shape[0]
found_vecs = []
for pole in poles:
L = (la.inv(Potapov_prod(pole,poles,found_vecs,N)) *
limit(lambda z: (z-pole)*T(z),pole) ) ## Current bottleneck O(n^2).
[eigvals,eigvecs] = la.eig(L)
index = np.argmax(map(abs,eigvals))
big_vec = np.asmatrix(eigvecs[:,index])
found_vecs.append(big_vec)
return found_vecs
In [ ]:
def get_Potapov_vecs_commensurate(T,commensurate_poles):
'''
Original method to get vectors.
'''
N = T(0).shape[0]
found_vecs = []
for pole in poles:
L = (la.inv(Potapov_prod(pole,poles,found_vecs,N)) *
limit(lambda z: (z-pole)*T(z),pole) ) ## Current bottleneck O(n^2).
[eigvals,eigvecs] = la.eig(L)
index = np.argmax(map(abs,eigvals))
big_vec = np.asmatrix(eigvecs[:,index])
found_vecs.append(big_vec)
return found_vecs
In [763]:
n_tot = 21
In [ ]:
In [793]:
def periodic_poles_prod(z,pole,gcd_delays,n_tot = 21):
'''
Return the normalized product :math:`prod_n (z - pole - i * n * T_{\text{gcd}})`.
'''
n_range = np.asarray(range(n_tot)) - n_tot/2
return np.exp(np.sum([1j*np.angle(z-pole - 1j*n*2*np.pi / gcd_delays) for n in n_range]))
In [794]:
## testing...
periodic_poles_prod(10000j-100,poles[0],float(gcd_delays),n_tot=33)
Out[794]: