In [53]:
# Shanshank

from scipy.special import expit
import scipy.optimize
from scipy.optimize import minimize #, differential_evolution
import numpy as np
from math import sin,cos
#xarr=var('xarr')
x=np.linspace(0,2,11)
#x=np.array([1.0])
hvar=5
numeqs=4
omega=1.0
theta=1.0
bounds=np.zeros([3*4*5,2])
for i in range(3*4*5):
    bounds[i,0]=-5
    bounds[i,1]=5
partot=np.array(np.zeros(3*hvar*numeqs))
x0=[1.0,0.0,0.0,0.0]
#par = par.reshape(3,hvar)
print partot
print x
one=np.ones(hvar)

def sig(x,par):
    ans=[]
    par1 = par.reshape(3,hvar)
    #print "test", par[2]
    for i in x:
        ans.append(expit(i*par1[1,:]+par1[2,:]))
        #ans.append(np.tanh(i*par1[1,:]+par1[2,:]))
    return ans
def N(x,par):
    par1=par.reshape(3,hvar)
    ans=np.inner(par1[0,:],sig(x,par))
    return ans
def y(x,par,xini):
    return xini+x*N(x,par)
def dNdx(x,par):
    par1=par.reshape(3,hvar)
    ans=np.zeros(len(x))
    #print len(x)
    for j in range(len(x)):
        for i in range(hvar):
            ans[j]=ans[j]+(par1[0,i])*(sig(x,par)[j][i])*((one-sig(x,par))[j][i])*par1[1,i]
    return(ans)
def dydx(x,par):
    return N(x,par)+x*dNdx(x,par)
def yp(partot):
    partot1=partot.reshape((numeqs,3,hvar))
    cost=0.0
    cost=cost+np.sum(0.5*(dydx(x,partot1[0,:,:])-2*omega*sin(2*theta)*y(x,partot1[3,:,:],x0[3]))**2)
    cost=cost+np.sum(0.5*(dydx(x,partot1[1,:,:])+2*omega*sin(2*theta)*y(x,partot1[3,:,:],x0[3]))**2)
    cost=cost+np.sum(0.5*(dydx(x,partot1[2,:,:])-2*omega*cos(2*theta)*y(x,partot1[3,:,:],x0[3]))**2)
    cost=cost+np.sum(0.5*(dydx(x,partot1[3,:,:])+2*omega*cos(2*theta)*y(x,partot1[2,:,:],x0[2])+omega*sin(2*theta)*y(x,partot1[0,:,:],x0[0])-omega*sin(2*theta)*y(x,partot1[1,:,:],x0[1]))**2)
    cost = cost#+np.sum((y(x,partot1[0,:,:],x0[0])+y(x,partot1[1,:,:],x0[1])-1.0)**2)


    return cost
#def ypprime(par): 
#vout=minimize(yp,par,method='COBYLA',options={"maxfev": 10000})
#vout=minimize(yp,partot,method='SLSQP',options={"maxiter": 1000})
#vout=minimize(yp,partot,method='Nelder-Mead',tol=1e-5,options={"ftol":1e-3, "maxfev": 1000000,"maxiter":1000000})
#vout=differential_evolution(yp,bounds,strategy='best1bin',tol=0.1,maxiter=1,polish=True)
#print vout


[ 0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.
  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.
  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.  0.
  0.  0.  0.  0.  0.  0.]
[ 0.   0.2  0.4  0.6  0.8  1.   1.2  1.4  1.6  1.8  2. ]

In [61]:
# Here is the activation function

def act(x):
    return expit(x)

# Density matrix in the forms that I wrote down on my Neutrino Physics notebook
# x is a real array of 12 arrays.

init = np.array([1.0,0.0,0.0,0.0])

def rho2(x,ti,initialCondition):
    
    elem = np.ones(4)
    
    for i in np.linspace(0,3,4):
        elem[i] = np.sum(ti*x[i*3]*act(ti*x[i*3+1] + x[i*3+2]) )
    
    return init + elem
    
# Hamiltonian of the problem, in terms of four real components

hamil = np.array( [  np.cos(2.0),np.sin(2.0) , np.sin(2.0),np.cos(2.0) ] )

# Cost function for each time step

def rhop(x,ti,initialCondition):
    
    rhoprime = np.zeros(4)
    
    for i in np.linspace(0,3,4):
        rhoprime[i] = np.sum(x[i*3] * (act(ti*x[i*3+1] + x[i*3+2]) ) ) +  np.sum( ti*x[i*3]* (act(ti*x[i*3+1] + x[i*3+2]) ) * (1.0 - (act(ti*x[i*3+1] + x[i*3+2])  ) )* x[i*3+1]  )
        
    return rhoprime


## This is the regularization

regularization = 0.0001

def costi(x,ti,initialCondition):
    
    rhoi = rho2(x,ti,initialCondition)
    rhopi = rhop(x,ti,initialCondition)
    
    costTemp = np.zeros(4)
    
    costTemp[0] = ( rhopi[0] - 2.0*rhoi[3]*hamil[1] )**2
    costTemp[1] = ( rhopi[1] + 2.0*rhoi[3]*hamil[1] )**2
    costTemp[2] = ( rhopi[2] - 2.0*rhoi[3]*hamil[0] )**2
    costTemp[3] = ( rhopi[3] + 2.0*rhoi[2]*hamil[0] - hamil[1] * (rhoi[1] - rhoi[0] ) )**2
    
    return np.sum(costTemp)# + 2.0*(rhoi[0]+rhoi[1]-1.0)**2

    
#    return np.sum(costTemp) + regularization*np.sum(x**2)
    
def cost2(x,t,initialCondition):
    
    costTotal = map(lambda t: costi(x,t,initialCondition),t)

    return 0.5*np.sum(costTotal)

In [62]:
array12 = np.asarray(np.split(np.random.rand(2,60)[0],12))
#array12 = np.asarray(np.split(np.ones(60),12))

ssfun11 = y(np.array([2.0]),array12.reshape((numeqs,3,hvar))[1,:,:],x0[1])
mifun11 = rho2(array12,2,init)[1]
print mifun11, ssfun11, ssfun11/mifun11 ## Good we have the same function values


2.36779048927 [ 2.36779049] [ 1.]

In [63]:
# rho prime
index=3
mirhop=rhop(array12,1,init)
ssrhop=dydx(x,array12.reshape((numeqs,3,hvar))[index,:,:])
print mirhop, ssrhop
## OK we have exactly the same rho prime.


[ 1.89300839  1.21239164  2.6472061   1.35053111] [ 1.08430389  1.14225019  1.19834742  1.25202231  1.30284305  1.35053111
  1.39495512  1.43611112  1.47409488  1.50907187  1.54124914]

In [64]:
mine = cost2(array12,x,init)
ss = yp(array12)
print mine, ss, ss/mine
#cost(xresult,np.array([0,4,11]),init)


179.054795807 179.054795807 1.0

In [ ]:


In [65]:
print hamil


[-0.41614684  0.90929743  0.90929743 -0.41614684]

In [66]:
np.cos(2)


Out[66]:
-0.41614683654714241

In [ ]:
# with ramdom initial guess

#initGuess = np.asarray(np.split(np.random.rand(5,60)[0],12))
initGuess = costvFResultSLSQP.get("x")
#initGuess = np.split(np.zeros(60),12)
endpoint = 2
tlin = np.linspace(0,endpoint,11)

costF = lambda x: cost2(x,tlin,init)

costvFResultSLSQP = minimize(costF,initGuess,method='Nelder-Mead',tol=1e-8,options={"ftol":1e-4, "maxfev": 1000000,"maxiter":100000})

print costvFResultSLSQP

In [ ]:
xresult = costvFResultSLSQP.get("x")
plttlin=np.linspace(0,endpoint,20)
pltdata11 = np.array([])
for i in plttlin:
    pltdata11 = np.append(pltdata11 ,(rho2(xresult,i,init)[0] ) )
    
print pltdata11

ss11 = y(plttlin,xresult.reshape((numeqs,3,hvar))[0,:,:],x0[0])

%matplotlib inline
import matplotlib.pyplot as plt

plt.plot(plttlin,pltdata11,"b4-",label="vac_rho11")
plt.show()

In [ ]:
print ss11

In [ ]: