In [226]:
import numpy as np
import tensorflow as tf
In [286]:
def generate_landscape2(sizeGrid,nobsMax,K,b0,b1,b2,errStd,cutW):
"""
Generate a landscape with a random number of observations, only the
maximum number of neighboring Observations is set (nObsMax)
Inputs
nObs = Number of farms
K = Number of characteristics X
b0 = constant
b1 = coef of own effect
b2 = coef of WX
errStd = std of error terms
cutW = disctance cut off
"""
# Draw nObs
nObs = int(np.random.randint(0,nobsMax, size=(1,1)))
# Create location of observations
locOwn = np.array([[int(sizeGrid/2),int(sizeGrid/2)]])
while True:
loc = np.random.randint(0,sizeGrid, size=(nObs,2))
locAll = np.concatenate((locOwn, loc), axis=0)
# Make sure that farm are not one same location
locAll = np.unique(locAll, axis=0)
if nObs+1 == locAll.shape[0]:
break
# Create own characteristics
X = np.random.randint(0,100, size=(nObs,K))
Xown = np.random.randint(0,100, size=(1,K))
# Create spatial weigthing matrix W
W = distance_matrix(locOwn, loc)<=cutW
row_sum = W.sum(axis=1,keepdims=True) # calc row sum
if row_sum!=0:
W = W/row_sum # row standardize
# Create error
err = np.random.normal(0, errStd, 1)
# Calcualte Y
Y = b0 + np.matmul(Xown,b1)+ np.matmul(np.matmul(W,X),b2)+err
assert(Y.shape==(1,1))
maps = np.zeros((sizeGrid,sizeGrid,K))
#
for k in range(0,K):
I = np.concatenate((locOwn[:,0],loc[:,0]),axis=0)
J = np.concatenate((locOwn[:,1],loc[:,1]),axis=0)
V = np.concatenate((Xown[:,k],X[:,k]),axis=0)
A = sparse.coo_matrix((V,(I,J)),shape=(sizeGrid,sizeGrid))
maps[:,:,k] = A.todense()
#
return maps,Y,X,Xown,W,loc,locOwn
In [290]:
sizeGrid = 9 # size of the grid
maxNObs = 30 # number of observations
K = 1 # number of features
# set coefficients
b0 = np.random.normal(5, 2, 1)
b1 = np.random.normal(5, 2, K).reshape(K,1) # coef for own characteristics
b2 = np.random.normal(5, 2, K).reshape(K,1) # coef for neighbor characteristics
errStd = 0 # error added to Y
cutW = distance_matrix([[4,4]], [[4+2,4+2]]) # cut distance in W all cells not more then 2 away
In [292]:
maps,y,x,xown,w,loc,locOwn = generate_landscape2(sizeGrid,maxNObs,K,b0,b1,b2,errStd,cutW)
#print("y: \n",y)
#print("x: \n",x)
#print("xown: \n",xown)
print("map k=0: \n",maps[:,:,0])
#print("map k=1: \n",maps[:,:,1])
#print("map k=2: \n",maps[:,:,2])
#print("loc: \n",loc)
#print("w: \n",w)
#print("locOwn: \n",locOwn)
In [297]:
mapNeig = np.copy(maps)
mapNeig[int(sizeGrid/2),int(sizeGrid/2)] = 0 # Set mid point to zero
mapNeigBinary = (maps>0)*1
print(mapNeig[:,:,0])
print(mapNeigBinary[:,:,0])
In [317]:
In [338]:
F1 = np.array([[1, 1, 1],
[1, 1, 1],
[1, 1, 1]]).reshape(3,3,1)
F11 = np.concatenate((F1,F1),axis=2).reshape(3,3,2,1)
F111 = np.concatenate((F11,F11),axis=3)
F111.shape
Out[338]:
In [386]:
F1 = np.array([[1, 1, 1],
[1, 1, 1],
[1, 1, 1]]).reshape(3,3,1,1)
Fsize = 3
F = np.ones((Fsize,Fsize)).reshape(3,3,1,1)
for j in range(2,sizeGrid+1):
Fi = np.ones((Fsize,Fsize))/j
Fi = Fi.reshape(3,3,1,1)
F = np.concatenate((F,Fi),axis=3)
print('F.shape ',F.shape)
print(F[:,:,0,8])
In [379]:
inMap
Out[379]:
In [406]:
tf.reset_default_graph()
sess = tf.InteractiveSession()
m = 1 # number of samples
inMap =tf.convert_to_tensor(mapNeigBinary.reshape(m,sizeGrid,sizeGrid,1), dtype=tf.float32)
kernel1 = tf.convert_to_tensor(F,
dtype=tf.float32)
conv1 = tf.nn.conv2d(inMap,filter=kernel1,padding='VALID',strides=[1,3,3,1])
Z1 = tf.tanh( (conv1-1)*10000)
print(sess.run(inMap)[0,:,:,0])
print(sess.run(conv1).shape)
for j in range(0,sizeGrid):
print('Conv1 ',str(j+1),' \n',sess.run(conv1)[0,:,:,j])
In [ ]:
In [254]:
arg
Out[254]:
In [281]:
sizeGrid = 9
#arg = tf.placeholder(tf.float32, shape=(sizeGrid, sizeGrid))
nparg = np.linspace(0, sizeGrid*sizeGrid, num=sizeGrid*sizeGrid,dtype ='int').reshape(1,sizeGrid,sizeGrid,1)
arg =tf.convert_to_tensor(nparg, dtype=tf.float32)
kernel0 = tf.convert_to_tensor(
np.array([10000]).reshape(1,1,1,1),
dtype=tf.float32)
kernel1 = tf.convert_to_tensor(
np.array([[1, 1, 1],
[1, 1, 1],
[1, 1, 1]]).reshape(3,3,1,1),
dtype=tf.float32)
conv1 = tf.nn.convolution(arg,filter=kernel1,padding='VALID',strides=[3,3])
conv0 = tf.nn.convolution(arg,filter=kernel0,padding='VALID',strides=[1,1])
Z1 = tf.sigmoid(conv0-10)
sumInput = tf.reduce_sum(arg)
sumConv = tf.reduce_sum(conv1)
print(sess.run(conv1)[0,:,:,0])
print('Z1 ',np.round(sess.run(Z1)[0,:,:,0]))
print('sumArg',sess.run(sumInput))
print('sumConv',sess.run(sumConv))
In [ ]:
import numpy as np
from scipy.signal import convolve2d
In [37]:
sizeGrid = 9
arr = np.linspace(0, sizeGrid*sizeGrid, num=sizeGrid*sizeGrid,dtype ='int').reshape(sizeGrid,sizeGrid)
print(arr)
kernel1 = np.array([[1, 0],
[0, 0]])
kernel2 = np.array([[0, 0],
[0, 1]])
kernel3 = np.array([[1, 0],
[0, 0]])
kernel4 = np.array([[0, 0],
[0, 1]])
kernel5 = np.array([[1, 1],
[1, 1]])
kernel6 = np.array([[1, 0],
[0, 1]])
conv1 = convolve2d(arr, kernel1[::-1, ::-1], 'valid')
print(conv1)
conv2 = convolve2d(conv1, kernel2[::-1, ::-1], 'valid')
print(conv2)
conv3 = convolve2d(conv2, kernel3[::-1, ::-1], 'valid')
print(conv3)
conv4 = convolve2d(conv3, kernel4[::-1, ::-1], 'valid')
print(conv4)
conv5 = convolve2d(conv4, kernel5[::-1, ::-1], 'valid')
print(conv5)
conv6 = convolve2d(conv5, kernel6[::-1, ::-1], 'valid')
print(conv6)
In [33]:
print(np.sum(conv4))
print(np.sum(conv5))
print(np.sum(conv6))
In [79]:
sizeGrid = 9
arr = np.linspace(0, sizeGrid*sizeGrid, num=sizeGrid*sizeGrid,dtype ='int').reshape(sizeGrid,sizeGrid)
print('input \n ', arr)
kernel1 = np.array([[0, 0, 0],
[0, 0, 0],
[0, 0, 1]])
kernel2 = np.array([[1, 0, 0],
[0, 0, 0],
[0, 0, 0 ]])
kernel3a = np.array([[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
kernel3b = np.array([[0, 0, 0],
[0, -1, -1],
[0, -1, -1]])
conv1 = convolve2d(arr, kernel1[::-1, ::-1],'valid' )
print('conv1 \n ',conv1)
conv2 = convolve2d(conv1, kernel2[::-1, ::-1],'valid')
print('conv2 \n ',conv2)
print('sum(conv2) \n',np.sum(conv2))
conv3a = convolve2d(conv2, kernel3a[::-1, ::-1],'valid')
print('conv3a \n ',conv3a)
print('sum(conv3a) \n',np.sum(conv3a))
conv3b = convolve2d(conv2, kernel3b[::-1, ::-1],'valid')
print('conv3b \n ',conv3b)
#print('conv3a+conv3b \n ',np.sum(conv3a+conv3b))
In [80]:
%matplotlib inline
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import matplotlib
import numpy as np
from scipy.spatial import distance_matrix
from scipy import sparse
In [206]:
def generate_landscape3(sizeGrid,nobsMax,K,b0,b1,b2,errStd,cutW):
"""
Generate a landscape with a random number of observations, only the
maximum number of neighboring Observations is set (nObsMax)
Use sum of neighbors instead of average !!!
Inputs
nObs = Number of farms
K = Number of characteristics X
b0 = constant
b1 = coef of own effect
b2 = coef of WX
errStd = std of error terms
cutW = disctance cut off
"""
# Draw nObs
nObs = int(np.random.randint(0,nobsMax, size=(1,1)))
# Create location of observations
locOwn = np.array([[int(sizeGrid/2),int(sizeGrid/2)]])
while True:
loc = np.random.randint(0,sizeGrid, size=(nObs,2))
locAll = np.concatenate((locOwn, loc), axis=0)
# Make sure that farm are not one same location
locAll = np.unique(locAll, axis=0)
if nObs+1 == locAll.shape[0]:
break
# Create own characteristics
X = np.random.randint(0,100, size=(nObs,K))
Xown = np.random.randint(0,100, size=(1,K))
# Create spatial weigthing matrix W
W = distance_matrix(locOwn, loc)<=cutW
row_sum = W.sum(axis=1,keepdims=True) # calc row sum
if row_sum!=0:
W = W/row_sum # row standardize
# Create error
err = np.random.normal(0, errStd, 1)
# Calcualte Y
Y = b0 + np.matmul(Xown,b1)+ np.matmul(np.matmul(W,X),b2)+err
assert(Y.shape==(1,1))
maps = np.zeros((sizeGrid,sizeGrid,K))
#
for k in range(0,K):
I = np.concatenate((locOwn[:,0],loc[:,0]),axis=0)
J = np.concatenate((locOwn[:,1],loc[:,1]),axis=0)
V = np.concatenate((Xown[:,k],X[:,k]),axis=0)
A = sparse.coo_matrix((V,(I,J)),shape=(sizeGrid,sizeGrid))
maps[:,:,k] = A.todense()
#
return maps,Y,X,Xown,W,loc,locOwn
In [82]:
from __future__ import print_function
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
from scipy.spatial import distance_matrix
from scipy import sparse
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
In [83]:
def mapToX2Canal(N,nObsMax):
# %%
#X = np.zeros((N,sizeGrid,sizeGrid,K))
X = np.zeros((N,sizeGrid,sizeGrid,2))
Y = np.zeros((N,1))
for i in range(0,N):
#
maps,y,x,xown,w,loc,locOwn = generate_landscape3(sizeGrid,nObsMax,K,b0,b1,b2,errStd,cutW)
Y[i,:] = y
#X[i,:,:,:] = maps
#X[i,:,:,:] = maps
X[i,:,:,:] = maps
X[i,4,4,0] = 0
X[i,:,:,1] = np.zeros((sizeGrid,sizeGrid))
X[i,4,4,1] = maps[4,4,0]
return X,Y
In [84]:
def standardSplit(X,Y,test_size=0.1,random_state=42):
# %% standardized features and targets
x_min_max = MinMaxScaler()
y_min_max = MinMaxScaler()
X_minmax = x_min_max.fit_transform(X.reshape(X.shape[0],-1)).reshape(X.shape)
Y_minmax = y_min_max.fit_transform(Y)
# %% Split sample in test and training_set
x_train, x_test, y_train, y_test = train_test_split(X_minmax, Y_minmax, test_size=test_size, random_state=random_state)
return x_min_max, y_min_max, x_train, x_test, y_train, y_test
In [ ]:
#model 0
def model0(input_shape,num_classes):
model = Sequential()
model.add(Conv2D(2, kernel_size=(8, 8),
activation='relu',
input_shape=input_shape,
name='conv1'))
model.add(Conv2D(4, kernel_size=(4, 4),
activation='relu',
name='conv2'))
model.add(Conv2D(8, kernel_size=(2, 2),
activation='relu',
name='conv3'))
#model.add(Conv2D(64, (3, 3), activation='relu'))
#model.add(MaxPooling2D(pool_size=(2, 2)))
#model.add(Dropout(0.25))
model.add(Flatten())
#model.add(Dense(128, activation='relu'))
model.add(Dense(9, activation='relu'))
#model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='relu'))
model.compile(loss='mean_squared_error',
optimizer=keras.optimizers.Adadelta(),
metrics=['mae'])
return model
In [201]:
input_shape
Out[201]:
In [141]:
# %%
def r2(y_true, y_pred):
"""Calcualte and return R2.
y_true -- the observed values
y_pred -- the prediced values
"""
SS_res = np.sum(np.square(y_true - y_pred))
SS_tot = np.sum(np.square(y_true - np.mean(y_true)))
return ( 1 - SS_res/SS_tot )
In [225]:
sizeGrid =32 # size of the grid
nObs = 6 # number of observations
K = 1 # number of features
N = 60000
nObsMax = 300
# set coefficients
b0 = np.random.normal(5, 2, 1)
b1 = np.random.normal(5, 2, K).reshape(K,1) # coef for own characteristics
b2 = np.random.normal(5, 2, K).reshape(K,1) # coef for neighbor characteristics
errStd = 0 # error added to Y
cutW = distance_matrix([[4,4]], [[4+2,4+2]]) # cut distance in W all cells not more then 2 away
# Generate data
X,Y = mapToX2Canal(N,nObsMax)
#X,Y = mapToX2Canal(N,nObsMax)
# Standardize data and split in training and test
x_min_max, y_min_max, x_train, x_test, y_train, y_test = standardSplit(X,Y)
In [222]:
input_shape = X[0,:,:,:].shape
num_classes = 1
model = model0(input_shape,num_classes)
In [223]:
batch_size = 128
epochs = 10
hist= model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
In [224]:
model.summary()
In [191]:
layer = model.get_layer('conv1')
print('conv1: F1 \n',layer.get_weights()[0][:,:,0,0])
In [184]:
layer = model.get_layer('conv1')
print('conv1: F1 \n',layer.get_weights()[0][:,:,0,0])
print('conv1: F2 \n',layer.get_weights()[0][:,:,0,1])
print('conv1: F3 \n',layer.get_weights()[0][:,:,0,2])
print('conv1: F4 \n',layer.get_weights()[0][:,:,0,3])
In [173]:
layer = model.get_layer('conv1')
print('conv1: \n',layer.get_weights()[0][:,:,0,0])
layer = model.get_layer('conv2')
print('conv2: F1 \n',layer.get_weights()[0][:,:,0,0])
print('conv2: F2 \n',layer.get_weights()[0][:,:,0,1])
layer = model.get_layer('conv3')
print('conv3: F1 \n',layer.get_weights()[0][:,:,0,0])
print('conv3: F2 \n',layer.get_weights()[0][:,:,0,1])
In [214]:
# %% Model
Yhat_test = model.predict(x_test,batch_size=32)
oY_test = y_min_max.inverse_transform(y_test)
oY_hat = y_min_max.inverse_transform(Yhat_test)
#oY_test = y_test
#oY_hat = Yhat_test
# %%
fig, ax = plt.subplots()
ax.scatter(oY_test, oY_hat, edgecolors=(0, 0, 0))
ax.plot([oY_test.min(), oY_test.max()], [oY_test.min(), oY_test.max()], 'k--', lw=4)
ax.set_xlabel('Measured')
ax.set_ylabel('Predicted')
plt.show()
r2Model = r2(oY_test,oY_hat)
print("R2 Model: ",r2Model)
In [ ]: