In [226]:
import numpy as np
import tensorflow as tf

In [286]:
def generate_landscape2(sizeGrid,nobsMax,K,b0,b1,b2,errStd,cutW):
    """
    Generate a landscape with a random number  of observations, only the 
    maximum number of neighboring Observations is set (nObsMax)
    
    Inputs
        nObs = Number of farms
        K = Number of characteristics X
        b0 = constant
        b1 = coef of own effect
        b2 = coef of WX
        errStd = std of error terms
        cutW = disctance cut off
    """ 
    

    # Draw nObs 
    nObs = int(np.random.randint(0,nobsMax, size=(1,1)))
    
    # Create location of observations
    locOwn = np.array([[int(sizeGrid/2),int(sizeGrid/2)]])
    while  True:
        loc = np.random.randint(0,sizeGrid, size=(nObs,2))
        locAll = np.concatenate((locOwn, loc), axis=0)
        # Make sure that farm are not one same location
        locAll = np.unique(locAll, axis=0)
        if nObs+1 == locAll.shape[0]:
            break

    # Create own characteristics
    X = np.random.randint(0,100, size=(nObs,K))
    Xown = np.random.randint(0,100, size=(1,K))
    # Create spatial weigthing matrix W
    W = distance_matrix(locOwn, loc)<=cutW
    row_sum = W.sum(axis=1,keepdims=True) # calc row sum
    if row_sum!=0:
        W = W/row_sum  # row standardize

    # Create error
    err = np.random.normal(0, errStd, 1)
    # Calcualte Y
    Y = b0 + np.matmul(Xown,b1)+ np.matmul(np.matmul(W,X),b2)+err

    assert(Y.shape==(1,1))

    maps = np.zeros((sizeGrid,sizeGrid,K))

    # 
    for k in range(0,K):
        I = np.concatenate((locOwn[:,0],loc[:,0]),axis=0)
        J = np.concatenate((locOwn[:,1],loc[:,1]),axis=0)
        V = np.concatenate((Xown[:,k],X[:,k]),axis=0)
        A = sparse.coo_matrix((V,(I,J)),shape=(sizeGrid,sizeGrid))
        maps[:,:,k] = A.todense()
    #
    return maps,Y,X,Xown,W,loc,locOwn

In [290]:
sizeGrid = 9 # size of the grid
maxNObs = 30 # number of observations
K = 1 # number of features

# set coefficients
b0 = np.random.normal(5, 2, 1)
b1 = np.random.normal(5, 2, K).reshape(K,1) # coef for own characteristics
b2 = np.random.normal(5, 2, K).reshape(K,1) # coef for neighbor characteristics
errStd = 0 # error added to Y
cutW = distance_matrix([[4,4]], [[4+2,4+2]]) # cut distance in W all cells not more then 2 away

In [292]:
maps,y,x,xown,w,loc,locOwn = generate_landscape2(sizeGrid,maxNObs,K,b0,b1,b2,errStd,cutW)

#print("y: \n",y)
#print("x: \n",x)
#print("xown: \n",xown)

print("map k=0: \n",maps[:,:,0])
#print("map k=1: \n",maps[:,:,1])
#print("map k=2: \n",maps[:,:,2])

#print("loc: \n",loc)
#print("w: \n",w)
#print("locOwn: \n",locOwn)


map k=0: 
 [[ 68.   0.   0.   9.   0.   0.  24.  52.   0.]
 [  0.   0.   0.   0.   0.   0.   0.  98.   0.]
 [ 65.   0.   0.   2.   0.   0.   0.   0.   0.]
 [ 81.   0.  87.   0.  42.   0.   0.   0.   0.]
 [ 71.   0.   0.   0.  84.   0.   0.  37.   0.]
 [ 70.   0.   0.  93.  59.   0.   0.   0.  44.]
 [  0.   0.   0.   0.   0.   0.   0.  87.   0.]
 [  0.   0.   0.   0.   0.   0.   0.   0.   0.]
 [  0.   0.   0.   0.   0.  91.   0.   0.   0.]]

In [297]:
mapNeig = np.copy(maps)
mapNeig[int(sizeGrid/2),int(sizeGrid/2)] = 0 # Set mid point to zero
mapNeigBinary = (maps>0)*1

print(mapNeig[:,:,0])
print(mapNeigBinary[:,:,0])


[[ 68.   0.   0.   9.   0.   0.  24.  52.   0.]
 [  0.   0.   0.   0.   0.   0.   0.  98.   0.]
 [ 65.   0.   0.   2.   0.   0.   0.   0.   0.]
 [ 81.   0.  87.   0.  42.   0.   0.   0.   0.]
 [ 71.   0.   0.   0.   0.   0.   0.  37.   0.]
 [ 70.   0.   0.  93.  59.   0.   0.   0.  44.]
 [  0.   0.   0.   0.   0.   0.   0.  87.   0.]
 [  0.   0.   0.   0.   0.   0.   0.   0.   0.]
 [  0.   0.   0.   0.   0.  91.   0.   0.   0.]]
[[1 0 0 1 0 0 1 1 0]
 [0 0 0 0 0 0 0 1 0]
 [1 0 0 1 0 0 0 0 0]
 [1 0 1 0 1 0 0 0 0]
 [1 0 0 0 1 0 0 1 0]
 [1 0 0 1 1 0 0 0 1]
 [0 0 0 0 0 0 0 1 0]
 [0 0 0 0 0 0 0 0 0]
 [0 0 0 0 0 1 0 0 0]]

In [317]:


In [338]:
F1 = np.array([[1, 1, 1],
               [1, 1, 1],
               [1, 1, 1]]).reshape(3,3,1)
F11 = np.concatenate((F1,F1),axis=2).reshape(3,3,2,1)
F111 = np.concatenate((F11,F11),axis=3)
F111.shape


Out[338]:
(3, 3, 2, 2)

In [386]:
F1 = np.array([[1, 1, 1],
               [1, 1, 1],
               [1, 1, 1]]).reshape(3,3,1,1)

Fsize = 3

F =  np.ones((Fsize,Fsize)).reshape(3,3,1,1)

for j in range(2,sizeGrid+1):
    
    Fi = np.ones((Fsize,Fsize))/j
    Fi = Fi.reshape(3,3,1,1)    
    F = np.concatenate((F,Fi),axis=3)
    

print('F.shape ',F.shape)

print(F[:,:,0,8])


F.shape  (3, 3, 1, 9)
[[ 0.11111111  0.11111111  0.11111111]
 [ 0.11111111  0.11111111  0.11111111]
 [ 0.11111111  0.11111111  0.11111111]]

In [379]:
inMap


Out[379]:
<tf.Tensor 'Const:0' shape=(1, 9, 9, 2) dtype=float32>

In [406]:
tf.reset_default_graph()
sess = tf.InteractiveSession()
m = 1 # number of samples

inMap =tf.convert_to_tensor(mapNeigBinary.reshape(m,sizeGrid,sizeGrid,1), dtype=tf.float32)

kernel1 = tf.convert_to_tensor(F,
             dtype=tf.float32)

conv1 = tf.nn.conv2d(inMap,filter=kernel1,padding='VALID',strides=[1,3,3,1]) 

Z1 = tf.tanh( (conv1-1)*10000)

print(sess.run(inMap)[0,:,:,0])

print(sess.run(conv1).shape)
for j in range(0,sizeGrid):
    print('Conv1 ',str(j+1),' \n',sess.run(conv1)[0,:,:,j])


[[ 1.  0.  0.  1.  0.  0.  1.  1.  0.]
 [ 0.  0.  0.  0.  0.  0.  0.  1.  0.]
 [ 1.  0.  0.  1.  0.  0.  0.  0.  0.]
 [ 1.  0.  1.  0.  1.  0.  0.  0.  0.]
 [ 1.  0.  0.  0.  1.  0.  0.  1.  0.]
 [ 1.  0.  0.  1.  1.  0.  0.  0.  1.]
 [ 0.  0.  0.  0.  0.  0.  0.  1.  0.]
 [ 0.  0.  0.  0.  0.  0.  0.  0.  0.]
 [ 0.  0.  0.  0.  0.  1.  0.  0.  0.]]
(1, 3, 3, 9)
Conv1  1  
 [[ 2.  2.  3.]
 [ 4.  4.  2.]
 [ 0.  1.  1.]]
Conv1  2  
 [[ 1.   1.   1.5]
 [ 2.   2.   1. ]
 [ 0.   0.5  0.5]]
Conv1  3  
 [[ 0.66666669  0.66666669  1.        ]
 [ 1.33333337  1.33333337  0.66666669]
 [ 0.          0.33333334  0.33333334]]
Conv1  4  
 [[ 0.5   0.5   0.75]
 [ 1.    1.    0.5 ]
 [ 0.    0.25  0.25]]
Conv1  5  
 [[ 0.40000001  0.40000001  0.60000002]
 [ 0.80000001  0.80000001  0.40000001]
 [ 0.          0.2         0.2       ]]
Conv1  6  
 [[ 0.33333334  0.33333334  0.5       ]
 [ 0.66666669  0.66666669  0.33333334]
 [ 0.          0.16666667  0.16666667]]
Conv1  7  
 [[ 0.2857143   0.2857143   0.42857146]
 [ 0.5714286   0.5714286   0.2857143 ]
 [ 0.          0.14285715  0.14285715]]
Conv1  8  
 [[ 0.25   0.25   0.375]
 [ 0.5    0.5    0.25 ]
 [ 0.     0.125  0.125]]
Conv1  9  
 [[ 0.22222222  0.22222222  0.33333334]
 [ 0.44444445  0.44444445  0.22222222]
 [ 0.          0.11111111  0.11111111]]

In [ ]:


In [254]:
arg


Out[254]:
<tf.Tensor 'Const_668:0' shape=(1, 9, 9, 1) dtype=float32>

In [281]:
sizeGrid = 9
#arg = tf.placeholder(tf.float32, shape=(sizeGrid, sizeGrid))


nparg = np.linspace(0, sizeGrid*sizeGrid, num=sizeGrid*sizeGrid,dtype ='int').reshape(1,sizeGrid,sizeGrid,1)
arg =tf.convert_to_tensor(nparg, dtype=tf.float32)


kernel0 = tf.convert_to_tensor(
             np.array([10000]).reshape(1,1,1,1),
             dtype=tf.float32)

kernel1 = tf.convert_to_tensor(
             np.array([[1, 1, 1],
                       [1, 1, 1],
                       [1, 1, 1]]).reshape(3,3,1,1),
             dtype=tf.float32)


conv1 = tf.nn.convolution(arg,filter=kernel1,padding='VALID',strides=[3,3]) 


conv0 = tf.nn.convolution(arg,filter=kernel0,padding='VALID',strides=[1,1]) 
Z1 = tf.sigmoid(conv0-10)

sumInput = tf.reduce_sum(arg)
sumConv = tf.reduce_sum(conv1)
print(sess.run(conv1)[0,:,:,0])
print('Z1 ',np.round(sess.run(Z1)[0,:,:,0]))
print('sumArg',sess.run(sumInput))
print('sumConv',sess.run(sumConv))


[[  90.  117.  144.]
 [ 333.  360.  387.]
 [ 576.  603.  631.]]
Z1  [[ 0.  1.  1.  1.  1.  1.  1.  1.  1.]
 [ 1.  1.  1.  1.  1.  1.  1.  1.  1.]
 [ 1.  1.  1.  1.  1.  1.  1.  1.  1.]
 [ 1.  1.  1.  1.  1.  1.  1.  1.  1.]
 [ 1.  1.  1.  1.  1.  1.  1.  1.  1.]
 [ 1.  1.  1.  1.  1.  1.  1.  1.  1.]
 [ 1.  1.  1.  1.  1.  1.  1.  1.  1.]
 [ 1.  1.  1.  1.  1.  1.  1.  1.  1.]
 [ 1.  1.  1.  1.  1.  1.  1.  1.  1.]]
sumArg 3241.0
sumConv 3241.0

In [ ]:
import numpy as np
from scipy.signal import convolve2d

In [37]:
sizeGrid = 9
arr = np.linspace(0, sizeGrid*sizeGrid, num=sizeGrid*sizeGrid,dtype ='int').reshape(sizeGrid,sizeGrid)
print(arr)


kernel1 = np.array([[1, 0],
                   [0, 0]])

kernel2 = np.array([[0, 0],
                   [0, 1]])
kernel3 = np.array([[1, 0],
                   [0, 0]])
kernel4 = np.array([[0, 0],
                   [0, 1]])
kernel5 = np.array([[1, 1],
                   [1, 1]])
kernel6 = np.array([[1, 0],
                   [0, 1]])

conv1 = convolve2d(arr, kernel1[::-1, ::-1], 'valid')
print(conv1)
conv2 = convolve2d(conv1, kernel2[::-1, ::-1], 'valid')
print(conv2)
conv3 = convolve2d(conv2, kernel3[::-1, ::-1], 'valid')
print(conv3)
conv4 = convolve2d(conv3, kernel4[::-1, ::-1], 'valid')
print(conv4)
conv5 = convolve2d(conv4, kernel5[::-1, ::-1], 'valid')
print(conv5)
conv6 = convolve2d(conv5, kernel6[::-1, ::-1], 'valid')
print(conv6)


[[ 0  1  2  3  4  5  6  7  8]
 [ 9 10 11 12 13 14 15 16 17]
 [18 19 20 21 22 23 24 25 26]
 [27 28 29 30 31 32 33 34 35]
 [36 37 38 39 40 41 42 43 44]
 [45 46 47 48 49 50 51 52 53]
 [54 55 56 57 58 59 60 61 62]
 [63 64 65 66 67 68 69 70 71]
 [72 73 74 75 76 77 78 79 81]]
[[ 0  0  0  0  0  0  0  0  0]
 [ 0  0  1  2  3  4  5  6  7]
 [ 0  9 10 11 12 13 14 15 16]
 [ 0 18 19 20 21 22 23 24 25]
 [ 0 27 28 29 30 31 32 33 34]
 [ 0 36 37 38 39 40 41 42 43]
 [ 0 45 46 47 48 49 50 51 52]
 [ 0 54 55 56 57 58 59 60 61]
 [ 0 63 64 65 66 67 68 69 70]]
[[ 0  1  2  3  4  5  6  7]
 [ 9 10 11 12 13 14 15 16]
 [18 19 20 21 22 23 24 25]
 [27 28 29 30 31 32 33 34]
 [36 37 38 39 40 41 42 43]
 [45 46 47 48 49 50 51 52]
 [54 55 56 57 58 59 60 61]
 [63 64 65 66 67 68 69 70]]
[[ 0  1  2  3  4  5  6]
 [ 9 10 11 12 13 14 15]
 [18 19 20 21 22 23 24]
 [27 28 29 30 31 32 33]
 [36 37 38 39 40 41 42]
 [45 46 47 48 49 50 51]
 [54 55 56 57 58 59 60]]
[[10 11 12 13 14 15]
 [19 20 21 22 23 24]
 [28 29 30 31 32 33]
 [37 38 39 40 41 42]
 [46 47 48 49 50 51]
 [55 56 57 58 59 60]]
[[ 60  64  68  72  76]
 [ 96 100 104 108 112]
 [132 136 140 144 148]
 [168 172 176 180 184]
 [204 208 212 216 220]]
[[160 168 176 184]
 [232 240 248 256]
 [304 312 320 328]
 [376 384 392 400]]

In [33]:
print(np.sum(conv4))
print(np.sum(conv5))
print(np.sum(conv6))


1000
2560
2880

In [79]:
sizeGrid = 9
arr = np.linspace(0, sizeGrid*sizeGrid, num=sizeGrid*sizeGrid,dtype ='int').reshape(sizeGrid,sizeGrid)
print('input \n ', arr)


kernel1 = np.array([[0, 0, 0],
                   [0, 0, 0],
                   [0, 0, 1]])

kernel2 = np.array([[1, 0, 0],
                   [0, 0, 0],
                   [0, 0, 0 ]])
kernel3a = np.array([[1, 1, 1],
                   [1, 1, 1],
                   [1, 1, 1]])
kernel3b = np.array([[0, 0, 0],
                   [0, -1, -1],
                   [0, -1, -1]])

conv1 = convolve2d(arr, kernel1[::-1, ::-1],'valid' )
print('conv1 \n ',conv1)
conv2 = convolve2d(conv1, kernel2[::-1, ::-1],'valid')
print('conv2 \n ',conv2)
print('sum(conv2) \n',np.sum(conv2))
conv3a = convolve2d(conv2, kernel3a[::-1, ::-1],'valid')
print('conv3a \n ',conv3a)
print('sum(conv3a) \n',np.sum(conv3a))
conv3b = convolve2d(conv2, kernel3b[::-1, ::-1],'valid')
print('conv3b \n ',conv3b)
#print('conv3a+conv3b \n ',np.sum(conv3a+conv3b))


input 
  [[ 0  1  2  3  4  5  6  7  8]
 [ 9 10 11 12 13 14 15 16 17]
 [18 19 20 21 22 23 24 25 26]
 [27 28 29 30 31 32 33 34 35]
 [36 37 38 39 40 41 42 43 44]
 [45 46 47 48 49 50 51 52 53]
 [54 55 56 57 58 59 60 61 62]
 [63 64 65 66 67 68 69 70 71]
 [72 73 74 75 76 77 78 79 81]]
conv1 
  [[20 21 22 23 24 25 26]
 [29 30 31 32 33 34 35]
 [38 39 40 41 42 43 44]
 [47 48 49 50 51 52 53]
 [56 57 58 59 60 61 62]
 [65 66 67 68 69 70 71]
 [74 75 76 77 78 79 81]]
conv2 
  [[20 21 22 23 24]
 [29 30 31 32 33]
 [38 39 40 41 42]
 [47 48 49 50 51]
 [56 57 58 59 60]]
sum(conv2) 
 1000
conv3a 
  [[270 279 288]
 [351 360 369]
 [432 441 450]]
sum(conv3a) 
 3240
conv3b 
  [[-140 -144 -148]
 [-176 -180 -184]
 [-212 -216 -220]]

In [80]:
%matplotlib inline
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import matplotlib
import numpy as np
from scipy.spatial import distance_matrix
from scipy import sparse

In [206]:
def generate_landscape3(sizeGrid,nobsMax,K,b0,b1,b2,errStd,cutW):
    """
    Generate a landscape with a random number  of observations, only the 
    maximum number of neighboring Observations is set (nObsMax)
    
    Use sum of neighbors instead of average !!!
    
    Inputs
        nObs = Number of farms
        K = Number of characteristics X
        b0 = constant
        b1 = coef of own effect
        b2 = coef of WX
        errStd = std of error terms
        cutW = disctance cut off
    """ 
    

    # Draw nObs 
    nObs = int(np.random.randint(0,nobsMax, size=(1,1)))
    
    # Create location of observations
    locOwn = np.array([[int(sizeGrid/2),int(sizeGrid/2)]])
    while  True:
        loc = np.random.randint(0,sizeGrid, size=(nObs,2))
        locAll = np.concatenate((locOwn, loc), axis=0)
        # Make sure that farm are not one same location
        locAll = np.unique(locAll, axis=0)
        if nObs+1 == locAll.shape[0]:
            break

    # Create own characteristics
    X = np.random.randint(0,100, size=(nObs,K))
    Xown = np.random.randint(0,100, size=(1,K))
    # Create spatial weigthing matrix W
    W = distance_matrix(locOwn, loc)<=cutW
    row_sum = W.sum(axis=1,keepdims=True) # calc row sum
    if row_sum!=0:
        W = W/row_sum  # row standardize

    # Create error
    err = np.random.normal(0, errStd, 1)
    # Calcualte Y
    Y = b0 + np.matmul(Xown,b1)+ np.matmul(np.matmul(W,X),b2)+err

    assert(Y.shape==(1,1))

    maps = np.zeros((sizeGrid,sizeGrid,K))

    # 
    for k in range(0,K):
        I = np.concatenate((locOwn[:,0],loc[:,0]),axis=0)
        J = np.concatenate((locOwn[:,1],loc[:,1]),axis=0)
        V = np.concatenate((Xown[:,k],X[:,k]),axis=0)
        A = sparse.coo_matrix((V,(I,J)),shape=(sizeGrid,sizeGrid))
        maps[:,:,k] = A.todense()
    #
    return maps,Y,X,Xown,W,loc,locOwn

In [82]:
from __future__ import print_function
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
from scipy.spatial import distance_matrix
from scipy import sparse
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split


Using TensorFlow backend.

In [83]:
def mapToX2Canal(N,nObsMax):
    # %%

    #X = np.zeros((N,sizeGrid,sizeGrid,K))
    X = np.zeros((N,sizeGrid,sizeGrid,2))
    Y = np.zeros((N,1))

    for i in range(0,N):
        #
        maps,y,x,xown,w,loc,locOwn = generate_landscape3(sizeGrid,nObsMax,K,b0,b1,b2,errStd,cutW)

        Y[i,:] = y
        #X[i,:,:,:] = maps
        #X[i,:,:,:] = maps

        X[i,:,:,:] = maps
        X[i,4,4,0] = 0
        X[i,:,:,1] = np.zeros((sizeGrid,sizeGrid))
        X[i,4,4,1] = maps[4,4,0]

    return X,Y

In [84]:
def standardSplit(X,Y,test_size=0.1,random_state=42):

    # %% standardized features and targets
    x_min_max = MinMaxScaler()
    y_min_max = MinMaxScaler()
    X_minmax = x_min_max.fit_transform(X.reshape(X.shape[0],-1)).reshape(X.shape)
    Y_minmax  = y_min_max.fit_transform(Y)

    # %% Split sample in test and training_set
    x_train, x_test, y_train, y_test = train_test_split(X_minmax, Y_minmax, test_size=test_size, random_state=random_state)
    
    return x_min_max, y_min_max, x_train, x_test, y_train, y_test

In [ ]:
#model 0
def model0(input_shape,num_classes):
    model = Sequential()
    model.add(Conv2D(2, kernel_size=(8, 8),
                     activation='relu',
                     input_shape=input_shape,
                     name='conv1'))
    model.add(Conv2D(4, kernel_size=(4, 4),
                 activation='relu',
                 name='conv2'))
    model.add(Conv2D(8, kernel_size=(2, 2),
             activation='relu',
             name='conv3'))


    #model.add(Conv2D(64, (3, 3), activation='relu'))
    #model.add(MaxPooling2D(pool_size=(2, 2)))
    #model.add(Dropout(0.25))
    model.add(Flatten())
    
    #model.add(Dense(128, activation='relu'))
    model.add(Dense(9, activation='relu'))
    #model.add(Dropout(0.5))
    model.add(Dense(num_classes, activation='relu'))

    model.compile(loss='mean_squared_error',
                  optimizer=keras.optimizers.Adadelta(),
                  metrics=['mae'])
    return model

In [201]:
input_shape


Out[201]:
(9, 9, 2)

In [141]:
# %%
def r2(y_true, y_pred):
    """Calcualte and return R2.
    
    y_true -- the observed values
    y_pred -- the prediced values
    """
    SS_res =  np.sum(np.square(y_true - y_pred)) 
    SS_tot = np.sum(np.square(y_true - np.mean(y_true))) 
    return ( 1 - SS_res/SS_tot )

In [225]:
sizeGrid =32 # size of the grid
nObs = 6 # number of observations
K = 1 # number of features
N = 60000
nObsMax = 300

# set coefficients
b0 = np.random.normal(5, 2, 1)
b1 = np.random.normal(5, 2, K).reshape(K,1) # coef for own characteristics
b2 = np.random.normal(5, 2, K).reshape(K,1) # coef for neighbor characteristics
errStd = 0 # error added to Y
cutW = distance_matrix([[4,4]], [[4+2,4+2]]) # cut distance in W all cells not more then 2 away


# Generate data 
X,Y = mapToX2Canal(N,nObsMax)
#X,Y = mapToX2Canal(N,nObsMax)

# Standardize data and split in training and test
x_min_max, y_min_max, x_train, x_test, y_train, y_test = standardSplit(X,Y)


---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-225-246bd07865c2> in <module>()
     14 
     15 # Generate data
---> 16 X,Y = mapToX2Canal(N,nObsMax)
     17 #X,Y = mapToX2Canal(N,nObsMax)
     18 

<ipython-input-83-18956e1ba67c> in mapToX2Canal(N, nObsMax)
      8     for i in range(0,N):
      9         #
---> 10         maps,y,x,xown,w,loc,locOwn = generate_landscape3(sizeGrid,nObsMax,K,b0,b1,b2,errStd,cutW)
     11 
     12         Y[i,:] = y

<ipython-input-206-cf849e1ca7d4> in generate_landscape3(sizeGrid, nobsMax, K, b0, b1, b2, errStd, cutW)
     26         locAll = np.concatenate((locOwn, loc), axis=0)
     27         # Make sure that farm are not one same location
---> 28         locAll = np.unique(locAll, axis=0)
     29         if nObs+1 == locAll.shape[0]:
     30             break

~\AppData\Local\conda\conda\envs\tensorflow\lib\site-packages\numpy\lib\arraysetops.py in unique(ar, return_index, return_inverse, return_counts, axis)
    241 
    242     output = _unique1d(consolidated, return_index,
--> 243                        return_inverse, return_counts)
    244     if not (return_index or return_inverse or return_counts):
    245         return reshape_uniq(output)

~\AppData\Local\conda\conda\envs\tensorflow\lib\site-packages\numpy\lib\arraysetops.py in _unique1d(ar, return_index, return_inverse, return_counts)
    253     Find the unique elements of an array, ignoring shape.
    254     """
--> 255     ar = np.asanyarray(ar).flatten()
    256 
    257     optional_indices = return_index or return_inverse

KeyboardInterrupt: 

In [222]:
input_shape = X[0,:,:,:].shape
num_classes = 1

model = model0(input_shape,num_classes)

In [223]:
batch_size = 128
epochs = 10

hist= model.fit(x_train, y_train,
          batch_size=batch_size,
          epochs=epochs,
          verbose=1,
          validation_data=(x_test, y_test))


Train on 54000 samples, validate on 6000 samples
Epoch 1/10
54000/54000 [==============================] - 3s - loss: 0.0351 - mean_absolute_error: 0.1408 - val_loss: 0.0144 - val_mean_absolute_error: 0.0983
Epoch 2/10
54000/54000 [==============================] - 3s - loss: 0.0131 - mean_absolute_error: 0.0927 - val_loss: 0.0124 - val_mean_absolute_error: 0.0902
Epoch 3/10
54000/54000 [==============================] - 3s - loss: 0.0117 - mean_absolute_error: 0.0869 - val_loss: 0.0115 - val_mean_absolute_error: 0.0845
Epoch 4/10
54000/54000 [==============================] - 3s - loss: 0.0110 - mean_absolute_error: 0.0838 - val_loss: 0.0109 - val_mean_absolute_error: 0.0832
Epoch 5/10
54000/54000 [==============================] - 3s - loss: 0.0107 - mean_absolute_error: 0.0822 - val_loss: 0.0106 - val_mean_absolute_error: 0.0813
Epoch 6/10
54000/54000 [==============================] - 3s - loss: 0.0104 - mean_absolute_error: 0.0811 - val_loss: 0.0105 - val_mean_absolute_error: 0.0799
Epoch 7/10
54000/54000 [==============================] - 3s - loss: 0.0101 - mean_absolute_error: 0.0799 - val_loss: 0.0100 - val_mean_absolute_error: 0.0787
Epoch 8/10
54000/54000 [==============================] - 3s - loss: 0.0098 - mean_absolute_error: 0.0782 - val_loss: 0.0095 - val_mean_absolute_error: 0.0777
Epoch 9/10
54000/54000 [==============================] - 3s - loss: 0.0093 - mean_absolute_error: 0.0762 - val_loss: 0.0090 - val_mean_absolute_error: 0.0751
Epoch 10/10
54000/54000 [==============================] - 3s - loss: 0.0087 - mean_absolute_error: 0.0732 - val_loss: 0.0085 - val_mean_absolute_error: 0.0703

In [224]:
model.summary()


_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv1 (Conv2D)               (None, 7, 7, 1)           19        
_________________________________________________________________
conv2 (Conv2D)               (None, 5, 5, 2)           20        
_________________________________________________________________
conv3 (Conv2D)               (None, 3, 3, 4)           76        
_________________________________________________________________
flatten_18 (Flatten)         (None, 36)                0         
_________________________________________________________________
dense_27 (Dense)             (None, 9)                 333       
_________________________________________________________________
dense_28 (Dense)             (None, 1)                 10        
=================================================================
Total params: 458
Trainable params: 458
Non-trainable params: 0
_________________________________________________________________

In [191]:
layer = model.get_layer('conv1')
print('conv1: F1 \n',layer.get_weights()[0][:,:,0,0])


conv1: F1 
 [[ -5.93720586e-04  -1.53677259e-03   8.76308531e-02]
 [ -3.01922497e-04   3.87885189e-03   5.15556276e-01]
 [ -4.09872009e-04  -1.16856559e-03   7.25656077e-02]]

In [184]:
layer = model.get_layer('conv1')
print('conv1: F1 \n',layer.get_weights()[0][:,:,0,0])
print('conv1: F2 \n',layer.get_weights()[0][:,:,0,1])
print('conv1: F3 \n',layer.get_weights()[0][:,:,0,2])
print('conv1: F4 \n',layer.get_weights()[0][:,:,0,3])


conv1: F1 
 [[-0.01389307  0.17003067 -0.03251779]
 [ 0.23845771  0.01316227  0.24472411]
 [ 0.08561687  0.01705086 -0.0065648 ]]
conv1: F2 
 [[-0.00094727  0.20711035  0.06163559]
 [ 0.19773988  0.30687481 -0.00072881]
 [ 0.21798897  0.04644343  0.254053  ]]
conv1: F3 
 [[-0.01317494 -0.23720431  0.00952121]
 [-0.1574703   0.01048541 -0.24766214]
 [-0.06078539  0.00424148  0.01330663]]
conv1: F4 
 [[  2.19325218e-02   5.47097661e-02   2.44493131e-02]
 [  2.27168631e-02   2.43583217e-01  -5.61946363e-05]
 [ -2.78414018e-03   1.92698520e-02   4.01462545e-04]]

In [173]:
layer = model.get_layer('conv1')
print('conv1: \n',layer.get_weights()[0][:,:,0,0])
layer = model.get_layer('conv2')
print('conv2: F1 \n',layer.get_weights()[0][:,:,0,0])
print('conv2: F2 \n',layer.get_weights()[0][:,:,0,1])
layer = model.get_layer('conv3')
print('conv3: F1 \n',layer.get_weights()[0][:,:,0,0])
print('conv3: F2 \n',layer.get_weights()[0][:,:,0,1])


conv1: 
 [[ 0.35886604  0.04339897  0.4249891 ]
 [ 0.14725298  0.31968129  0.10292919]
 [ 0.06036174 -0.1287933  -0.1016439 ]]
conv2: F1 
 [[ 0.01222548  0.0267969  -0.15720029]
 [ 0.28399032  0.30905095  0.11879439]
 [ 0.2385062  -0.04193814  0.42602625]]
conv2: F2 
 [[ 0.02180257 -0.22463414  0.1539041 ]
 [-0.23485067 -0.46853673 -0.46655214]
 [-0.35479519  0.01785348  0.36701736]]
conv3: F1 
 [[ 0.20487316 -0.06818334  0.19874625]
 [ 0.16658981 -0.14302346  0.21957561]
 [ 0.03651127  0.02106592  0.02299863]]
conv3: F2 
 [[ 0.16932167  0.29734549 -0.00536137]
 [ 0.04205468 -0.03474634 -0.01920556]
 [ 0.17667079  0.44449779  0.39847866]]

In [214]:
# %% Model
Yhat_test = model.predict(x_test,batch_size=32)

oY_test = y_min_max.inverse_transform(y_test)
oY_hat = y_min_max.inverse_transform(Yhat_test)
#oY_test = y_test
#oY_hat = Yhat_test
# %%
fig, ax = plt.subplots()
ax.scatter(oY_test, oY_hat, edgecolors=(0, 0, 0))
ax.plot([oY_test.min(), oY_test.max()], [oY_test.min(), oY_test.max()], 'k--', lw=4)
ax.set_xlabel('Measured')
ax.set_ylabel('Predicted')
plt.show()

r2Model = r2(oY_test,oY_hat)
print("R2 Model: ",r2Model)


R2 Model:  0.897534423714

In [ ]: