In [1]:
%matplotlib inline

import matplotlib as mpl
import matplotlib.pyplot as plt

mpl.rc('font', size=15)
mpl.rc('figure', figsize=(8, 5))

import numpy as np
import scipy.signal as sig
import keras

from keras.layers import Input, Dense, Activation, Dropout
from keras.models import Model
from keras.models import load_model
from keras import regularizers
from keras.initializers import glorot_normal, glorot_uniform
from keras.optimizers import Adam

from mpl_toolkits.basemap import Basemap
from matplotlib.path import Path


Using TensorFlow backend.

Load data and Preprocessing


In [2]:
# fix random seed for reproducibility
np.random.seed(7)

# Load data and exclude nan value
data = np.genfromtxt('H1O1O2_GPR_earthquakes.txt')
#data1 = np.genfromtxt('Hanfordtestdata.txt')
print (len(data))


2105

In [3]:
#####maybe if we cut out the  P waves we can get better results## 
#going to chooose 6000 

#eqgpstime = data[:,0]
#peakgpstime = data[:,24]
#arrivaltime = np.subtract(peakgpstime,eqgpstime)
#distance = data[:,12]
#Velocity = np.divide(distance, arrivaltime)
#pwaveomit = 6000
#Velocity1 = Velocity[Velocity<6000]
#data = data[Velocity<6000]
#print (len(data))


1695

In [4]:
#eq_lat2 = data[:,10]
#eq_lon2 = data[:,11]
#map1 = Basemap(projection='aeqd', lon_0 = 10, lat_0 = 50, resolution='h')
#lats = eq_lat2  #[:100]                                                                                                        \
                                                                                                                                
#lons = eq_lon2
#x, y = map1(lons, lats)
#locations = np.c_[x, y]
#polygons = [Path(p.boundary) for p in map1.landpolygons]
#result = np.zeros(len(locations), dtype=bool)
#for polygon in polygons:
#    result += np.array(polygon.contains_points(locations))

###eq_lat1=lats[result]
###eq_lon1=lons[result]
print (len(data))
###print (result)
#data =data[result]
#print (len(data))


1695

In [5]:
# Extract X and y and divide into train, val, and test set
#X = data[:, [2, 11, 12, 13, 14, 15]] #iris   #side note nikils has log10distnace maybe we should try that
X = data[:, [1, 10, 11, 12, 13, 14]] #L10102, H10102, V10102
#side note nikils has log10distnace maybe we should try that
#X = data1[:, [0, 1, 2, 3, 4, 5]] #from Handford test data with log10 distance

#z = np.log10(data[:, 18]) #iris 
Z = np.log10(data[:, 25]) #L10102, H10102, V10102

#Have to redifine velocity parameters so same shape after above cuts
eqgpstime = data[:,0]
peakgpstime = data[:,24]
arrivaltime = np.subtract(peakgpstime,eqgpstime)
distance = data[:,12]
Velocity = np.divide(distance, arrivaltime)





#y =np.subtract((data[:,24 ]),(data[:, 0]))
y = Velocity
#y = np.log10(y)
#print(y)


# Data preprocessing
# Exclude bad data
#y = np.log10(1e-6)
#mask = Z > -6.0 

#y = y[mask]
#X = X[mask]



print(y.shape)

# Normalizing
X -= np.mean(X, axis=0) #these standard deviations need to be changed if im not doing log?
X /= np.std(X, axis=0)

mean_y = np.mean(y, axis=0)
stdv_y = np.std(y, axis=0)
y = (y-mean_y)/stdv_y

# Shuffle and divide into train and val set
mask = np.random.permutation(X.shape[0]) #(does this work with seed)
X = X[mask]
y = y[mask]

tfrac = int(0.8*y.size) 
X_train = X[:tfrac]
y_train = y[:tfrac]
X_val = X[tfrac:]
y_val = y[tfrac:]

print('X_train shape: {}'.format(X_train.shape))
print('y_train shape: {}'.format(y_train.shape))
print('X_val shape: {}'.format(X_val.shape))
print('y_val shape: {}'.format(y_val.shape))


(380,)
X_train shape: (304, 6)
y_train shape: (304,)
X_val shape: (76, 6)
y_val shape: (76,)

Create a DENSE network


In [6]:
def QuakeNet(input_shape, lr=1e-3, reg=0.00, dropout=0.5):
      #orig (input_shape, lr=1e-3, reg=0.00, dropout=0.0)
    X_input = Input(input_shape)
    
    X = Dense(64, kernel_regularizer=regularizers.l2(reg),
              activation='relu')(X_input)   
   # X = Dense(64, kernel_regularizer=regularizers.l2(reg),
     #         activation='relu')(X)   
    #X = Dense(64, kernel_regularizer=regularizers.l2(reg),
    #          activation='relu')(X)
    X = Dense(64, kernel_regularizer=regularizers.l2(reg),
              activation='relu')(X)
    X = Dense(2, kernel_regularizer=regularizers.l2(reg),
              activation='relu')(X)
    X = Dropout(rate=dropout)(X)
    X = Dense(1, kernel_regularizer=regularizers.l2(reg))(X)

    model = Model(inputs=X_input, outputs=X, name='QuakeNet')
    model.compile(optimizer=Adam(lr=lr), loss='mse')
    
    return model

In [7]:
input_shape = (X_train.shape[1], )
model = QuakeNet(input_shape=input_shape)
model.summary()


_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_1 (InputLayer)         (None, 6)                 0         
_________________________________________________________________
dense_1 (Dense)              (None, 64)                448       
_________________________________________________________________
dense_2 (Dense)              (None, 32)                2080      
_________________________________________________________________
dense_3 (Dense)              (None, 2)                 66        
_________________________________________________________________
dropout_1 (Dropout)          (None, 2)                 0         
_________________________________________________________________
dense_4 (Dense)              (None, 1)                 3         
=================================================================
Total params: 2,597
Trainable params: 2,597
Non-trainable params: 0
_________________________________________________________________

Train


In [8]:
stats = model.fit(X_train, y_train, epochs=150, batch_size=32, validation_data=(X_val, y_val))


Train on 304 samples, validate on 76 samples
Epoch 1/150
304/304 [==============================] - 0s 1ms/step - loss: 1.0263 - val_loss: 0.9025
Epoch 2/150
304/304 [==============================] - 0s 44us/step - loss: 1.0121 - val_loss: 0.9246
Epoch 3/150
304/304 [==============================] - 0s 53us/step - loss: 1.0010 - val_loss: 0.9181
Epoch 4/150
304/304 [==============================] - 0s 47us/step - loss: 0.9989 - val_loss: 0.9203
Epoch 5/150
304/304 [==============================] - 0s 48us/step - loss: 0.9971 - val_loss: 0.9270
Epoch 6/150
304/304 [==============================] - 0s 51us/step - loss: 0.9951 - val_loss: 0.9172
Epoch 7/150
304/304 [==============================] - 0s 54us/step - loss: 1.0040 - val_loss: 0.9204
Epoch 8/150
304/304 [==============================] - 0s 48us/step - loss: 1.0025 - val_loss: 0.9092
Epoch 9/150
304/304 [==============================] - 0s 55us/step - loss: 0.9993 - val_loss: 0.9260
Epoch 10/150
304/304 [==============================] - 0s 53us/step - loss: 0.9916 - val_loss: 0.9244
Epoch 11/150
304/304 [==============================] - 0s 48us/step - loss: 0.9870 - val_loss: 0.9235
Epoch 12/150
304/304 [==============================] - 0s 61us/step - loss: 0.9855 - val_loss: 0.9224
Epoch 13/150
304/304 [==============================] - 0s 46us/step - loss: 0.9855 - val_loss: 0.9343
Epoch 14/150
304/304 [==============================] - 0s 57us/step - loss: 0.9833 - val_loss: 0.9246
Epoch 15/150
304/304 [==============================] - 0s 54us/step - loss: 0.9816 - val_loss: 0.9233
Epoch 16/150
304/304 [==============================] - 0s 52us/step - loss: 0.9971 - val_loss: 0.9388
Epoch 17/150
304/304 [==============================] - 0s 49us/step - loss: 0.9769 - val_loss: 0.9524
Epoch 18/150
304/304 [==============================] - 0s 59us/step - loss: 0.9962 - val_loss: 0.9286
Epoch 19/150
304/304 [==============================] - 0s 50us/step - loss: 0.9784 - val_loss: 0.9294
Epoch 20/150
304/304 [==============================] - 0s 49us/step - loss: 0.9720 - val_loss: 0.9334
Epoch 21/150
304/304 [==============================] - 0s 56us/step - loss: 0.9900 - val_loss: 0.9335
Epoch 22/150
304/304 [==============================] - 0s 52us/step - loss: 0.9785 - val_loss: 0.9237
Epoch 23/150
304/304 [==============================] - 0s 53us/step - loss: 0.9891 - val_loss: 0.9238
Epoch 24/150
304/304 [==============================] - 0s 52us/step - loss: 0.9697 - val_loss: 0.9365
Epoch 25/150
304/304 [==============================] - 0s 49us/step - loss: 0.9968 - val_loss: 0.9262
Epoch 26/150
304/304 [==============================] - 0s 58us/step - loss: 0.9882 - val_loss: 0.9247
Epoch 27/150
304/304 [==============================] - 0s 61us/step - loss: 0.9863 - val_loss: 0.9333
Epoch 28/150
304/304 [==============================] - 0s 47us/step - loss: 0.9777 - val_loss: 0.9313
Epoch 29/150
304/304 [==============================] - 0s 56us/step - loss: 0.9824 - val_loss: 0.9264
Epoch 30/150
304/304 [==============================] - 0s 50us/step - loss: 0.9888 - val_loss: 0.9325
Epoch 31/150
304/304 [==============================] - 0s 50us/step - loss: 0.9668 - val_loss: 0.9291
Epoch 32/150
304/304 [==============================] - 0s 74us/step - loss: 0.9803 - val_loss: 0.9298
Epoch 33/150
304/304 [==============================] - 0s 53us/step - loss: 0.9696 - val_loss: 0.9176
Epoch 34/150
304/304 [==============================] - 0s 48us/step - loss: 0.9726 - val_loss: 0.9158
Epoch 35/150
304/304 [==============================] - 0s 58us/step - loss: 0.9535 - val_loss: 0.9142
Epoch 36/150
304/304 [==============================] - 0s 50us/step - loss: 0.9899 - val_loss: 0.9157
Epoch 37/150
304/304 [==============================] - 0s 54us/step - loss: 0.9710 - val_loss: 0.9169
Epoch 38/150
304/304 [==============================] - 0s 68us/step - loss: 0.9765 - val_loss: 0.9101
Epoch 39/150
304/304 [==============================] - 0s 48us/step - loss: 0.9788 - val_loss: 0.9143
Epoch 40/150
304/304 [==============================] - 0s 68us/step - loss: 0.9574 - val_loss: 0.9285
Epoch 41/150
304/304 [==============================] - 0s 53us/step - loss: 0.9743 - val_loss: 0.9214
Epoch 42/150
304/304 [==============================] - 0s 53us/step - loss: 0.9678 - val_loss: 0.9188
Epoch 43/150
304/304 [==============================] - 0s 61us/step - loss: 0.9692 - val_loss: 0.9212
Epoch 44/150
304/304 [==============================] - 0s 50us/step - loss: 0.9769 - val_loss: 0.9342
Epoch 45/150
304/304 [==============================] - 0s 52us/step - loss: 0.9644 - val_loss: 0.9338
Epoch 46/150
304/304 [==============================] - 0s 58us/step - loss: 0.9873 - val_loss: 0.9125
Epoch 47/150
304/304 [==============================] - 0s 54us/step - loss: 0.9602 - val_loss: 0.9075
Epoch 48/150
304/304 [==============================] - 0s 55us/step - loss: 0.9838 - val_loss: 0.9276
Epoch 49/150
304/304 [==============================] - 0s 58us/step - loss: 0.9779 - val_loss: 0.9210
Epoch 50/150
304/304 [==============================] - 0s 51us/step - loss: 0.9789 - val_loss: 0.9059
Epoch 51/150
304/304 [==============================] - 0s 62us/step - loss: 0.9701 - val_loss: 0.9173
Epoch 52/150
304/304 [==============================] - 0s 55us/step - loss: 0.9496 - val_loss: 0.9239
Epoch 53/150
304/304 [==============================] - 0s 47us/step - loss: 0.9688 - val_loss: 0.9200
Epoch 54/150
304/304 [==============================] - 0s 59us/step - loss: 0.9586 - val_loss: 0.9162
Epoch 55/150
304/304 [==============================] - 0s 49us/step - loss: 0.9692 - val_loss: 0.9120
Epoch 56/150
304/304 [==============================] - 0s 48us/step - loss: 0.9696 - val_loss: 0.9277
Epoch 57/150
304/304 [==============================] - 0s 57us/step - loss: 0.9704 - val_loss: 0.9551
Epoch 58/150
304/304 [==============================] - 0s 55us/step - loss: 0.9805 - val_loss: 0.9607
Epoch 59/150
304/304 [==============================] - 0s 47us/step - loss: 0.9651 - val_loss: 0.9572
Epoch 60/150
304/304 [==============================] - 0s 55us/step - loss: 0.9787 - val_loss: 0.9322
Epoch 61/150
304/304 [==============================] - 0s 54us/step - loss: 0.9572 - val_loss: 0.9375
Epoch 62/150
304/304 [==============================] - 0s 54us/step - loss: 0.9611 - val_loss: 0.9440
Epoch 63/150
304/304 [==============================] - 0s 67us/step - loss: 0.9804 - val_loss: 0.9280
Epoch 64/150
304/304 [==============================] - 0s 59us/step - loss: 0.9529 - val_loss: 0.9175
Epoch 65/150
304/304 [==============================] - 0s 56us/step - loss: 0.9628 - val_loss: 0.9128
Epoch 66/150
304/304 [==============================] - 0s 57us/step - loss: 0.9813 - val_loss: 0.9169
Epoch 67/150
304/304 [==============================] - 0s 51us/step - loss: 0.9659 - val_loss: 0.9236
Epoch 68/150
304/304 [==============================] - 0s 76us/step - loss: 0.9567 - val_loss: 0.9219
Epoch 69/150
304/304 [==============================] - 0s 55us/step - loss: 0.9927 - val_loss: 0.9267
Epoch 70/150
304/304 [==============================] - 0s 52us/step - loss: 0.9851 - val_loss: 0.9166
Epoch 71/150
304/304 [==============================] - 0s 60us/step - loss: 0.9718 - val_loss: 0.9282
Epoch 72/150
304/304 [==============================] - 0s 55us/step - loss: 0.9597 - val_loss: 0.9409
Epoch 73/150
304/304 [==============================] - 0s 59us/step - loss: 0.9556 - val_loss: 0.9244
Epoch 74/150
304/304 [==============================] - 0s 56us/step - loss: 0.9604 - val_loss: 0.9362
Epoch 75/150
304/304 [==============================] - 0s 51us/step - loss: 0.9643 - val_loss: 0.9203
Epoch 76/150
304/304 [==============================] - 0s 59us/step - loss: 0.9817 - val_loss: 0.9305
Epoch 77/150
304/304 [==============================] - 0s 51us/step - loss: 0.9764 - val_loss: 0.9042
Epoch 78/150
304/304 [==============================] - 0s 52us/step - loss: 0.9767 - val_loss: 0.9143
Epoch 79/150
304/304 [==============================] - 0s 57us/step - loss: 0.9809 - val_loss: 0.9190
Epoch 80/150
304/304 [==============================] - 0s 55us/step - loss: 0.9522 - val_loss: 0.9111
Epoch 81/150
304/304 [==============================] - 0s 60us/step - loss: 0.9726 - val_loss: 0.9177
Epoch 82/150
304/304 [==============================] - 0s 55us/step - loss: 0.9626 - val_loss: 0.9158
Epoch 83/150
304/304 [==============================] - 0s 47us/step - loss: 0.9787 - val_loss: 0.9320
Epoch 84/150
304/304 [==============================] - 0s 47us/step - loss: 0.9488 - val_loss: 0.9144
Epoch 85/150
304/304 [==============================] - 0s 51us/step - loss: 0.9870 - val_loss: 0.8817
Epoch 86/150
304/304 [==============================] - 0s 48us/step - loss: 0.9876 - val_loss: 0.9068
Epoch 87/150
304/304 [==============================] - 0s 45us/step - loss: 0.9391 - val_loss: 0.9200
Epoch 88/150
304/304 [==============================] - 0s 52us/step - loss: 0.9791 - val_loss: 0.9210
Epoch 89/150
304/304 [==============================] - 0s 46us/step - loss: 0.9605 - val_loss: 0.9295
Epoch 90/150
304/304 [==============================] - 0s 49us/step - loss: 0.9554 - val_loss: 0.9344
Epoch 91/150
304/304 [==============================] - 0s 50us/step - loss: 0.9707 - val_loss: 0.9459
Epoch 92/150
304/304 [==============================] - 0s 46us/step - loss: 0.9490 - val_loss: 0.9457
Epoch 93/150
304/304 [==============================] - 0s 47us/step - loss: 0.9650 - val_loss: 0.9331
Epoch 94/150
304/304 [==============================] - 0s 46us/step - loss: 0.9561 - val_loss: 0.9305
Epoch 95/150
304/304 [==============================] - 0s 50us/step - loss: 0.9319 - val_loss: 0.9200
Epoch 96/150
304/304 [==============================] - 0s 44us/step - loss: 0.9456 - val_loss: 0.9139
Epoch 97/150
304/304 [==============================] - 0s 48us/step - loss: 0.9680 - val_loss: 0.9251
Epoch 98/150
304/304 [==============================] - 0s 49us/step - loss: 0.9470 - val_loss: 0.9241
Epoch 99/150
304/304 [==============================] - 0s 47us/step - loss: 0.9697 - val_loss: 0.9246
Epoch 100/150
304/304 [==============================] - 0s 46us/step - loss: 0.9838 - val_loss: 0.9533
Epoch 101/150
304/304 [==============================] - 0s 54us/step - loss: 0.9512 - val_loss: 0.9609
Epoch 102/150
304/304 [==============================] - 0s 46us/step - loss: 0.9576 - val_loss: 0.9805
Epoch 103/150
304/304 [==============================] - 0s 48us/step - loss: 0.9561 - val_loss: 0.9504
Epoch 104/150
304/304 [==============================] - 0s 53us/step - loss: 0.9667 - val_loss: 0.9375
Epoch 105/150
304/304 [==============================] - 0s 47us/step - loss: 0.9826 - val_loss: 0.9650
Epoch 106/150
304/304 [==============================] - 0s 43us/step - loss: 0.9442 - val_loss: 0.9207
Epoch 107/150
304/304 [==============================] - 0s 46us/step - loss: 0.9508 - val_loss: 0.9226
Epoch 108/150
304/304 [==============================] - 0s 55us/step - loss: 0.9554 - val_loss: 0.9338
Epoch 109/150
304/304 [==============================] - 0s 52us/step - loss: 0.9552 - val_loss: 0.9610
Epoch 110/150
304/304 [==============================] - 0s 61us/step - loss: 0.9671 - val_loss: 0.9415
Epoch 111/150
304/304 [==============================] - 0s 54us/step - loss: 0.9618 - val_loss: 0.9339
Epoch 112/150
304/304 [==============================] - 0s 41us/step - loss: 0.9676 - val_loss: 0.9270
Epoch 113/150
304/304 [==============================] - 0s 50us/step - loss: 0.9443 - val_loss: 0.9188
Epoch 114/150
304/304 [==============================] - 0s 51us/step - loss: 0.9560 - val_loss: 0.9490
Epoch 115/150
304/304 [==============================] - 0s 45us/step - loss: 0.9547 - val_loss: 0.9748
Epoch 116/150
304/304 [==============================] - 0s 54us/step - loss: 0.9247 - val_loss: 0.9583
Epoch 117/150
304/304 [==============================] - 0s 48us/step - loss: 0.9370 - val_loss: 0.9413
Epoch 118/150
304/304 [==============================] - 0s 53us/step - loss: 0.9339 - val_loss: 0.9514
Epoch 119/150
304/304 [==============================] - 0s 56us/step - loss: 0.9340 - val_loss: 0.9843
Epoch 120/150
304/304 [==============================] - 0s 48us/step - loss: 0.9249 - val_loss: 0.9730
Epoch 121/150
304/304 [==============================] - 0s 45us/step - loss: 0.9539 - val_loss: 0.9619
Epoch 122/150
304/304 [==============================] - 0s 58us/step - loss: 0.9696 - val_loss: 0.9701
Epoch 123/150
304/304 [==============================] - 0s 43us/step - loss: 0.9350 - val_loss: 0.9346
Epoch 124/150
304/304 [==============================] - 0s 49us/step - loss: 0.9135 - val_loss: 0.9499
Epoch 125/150
304/304 [==============================] - 0s 61us/step - loss: 0.9277 - val_loss: 0.9949
Epoch 126/150
304/304 [==============================] - 0s 44us/step - loss: 0.9721 - val_loss: 0.9848
Epoch 127/150
304/304 [==============================] - 0s 47us/step - loss: 0.9492 - val_loss: 1.0465
Epoch 128/150
304/304 [==============================] - 0s 53us/step - loss: 0.9050 - val_loss: 0.9893
Epoch 129/150
304/304 [==============================] - 0s 46us/step - loss: 0.9319 - val_loss: 0.9962
Epoch 130/150
304/304 [==============================] - 0s 44us/step - loss: 0.9477 - val_loss: 0.9865
Epoch 131/150
304/304 [==============================] - 0s 52us/step - loss: 0.9313 - val_loss: 0.9563
Epoch 132/150
304/304 [==============================] - 0s 50us/step - loss: 0.8963 - val_loss: 0.9671
Epoch 133/150
304/304 [==============================] - 0s 48us/step - loss: 0.8923 - val_loss: 0.9456
Epoch 134/150
304/304 [==============================] - 0s 56us/step - loss: 0.9250 - val_loss: 0.9324
Epoch 135/150
304/304 [==============================] - 0s 49us/step - loss: 0.9191 - val_loss: 0.9503
Epoch 136/150
304/304 [==============================] - 0s 45us/step - loss: 0.9498 - val_loss: 0.9724
Epoch 137/150
304/304 [==============================] - 0s 50us/step - loss: 0.9348 - val_loss: 0.9971
Epoch 138/150
304/304 [==============================] - 0s 54us/step - loss: 0.9004 - val_loss: 0.9526
Epoch 139/150
304/304 [==============================] - 0s 50us/step - loss: 0.9259 - val_loss: 0.9453
Epoch 140/150
304/304 [==============================] - 0s 48us/step - loss: 0.9485 - val_loss: 0.9668
Epoch 141/150
304/304 [==============================] - 0s 54us/step - loss: 0.9275 - val_loss: 0.9533
Epoch 142/150
304/304 [==============================] - 0s 45us/step - loss: 0.9361 - val_loss: 0.9716
Epoch 143/150
304/304 [==============================] - 0s 44us/step - loss: 0.9396 - val_loss: 0.9929
Epoch 144/150
304/304 [==============================] - 0s 59us/step - loss: 0.9057 - val_loss: 0.9717
Epoch 145/150
304/304 [==============================] - 0s 44us/step - loss: 0.8985 - val_loss: 0.9575
Epoch 146/150
304/304 [==============================] - 0s 44us/step - loss: 0.9026 - val_loss: 0.9697
Epoch 147/150
304/304 [==============================] - 0s 57us/step - loss: 0.9336 - val_loss: 0.9710
Epoch 148/150
304/304 [==============================] - 0s 57us/step - loss: 0.9385 - val_loss: 1.0032
Epoch 149/150
304/304 [==============================] - 0s 46us/step - loss: 0.9491 - val_loss: 0.9807
Epoch 150/150
304/304 [==============================] - 0s 57us/step - loss: 0.9342 - val_loss: 1.0443

Predict


In [9]:
model.save('hlvavat1.hdf5')

model = load_model('hlvavat1.hdf5')

In [10]:
y_pred = model.predict(X_val) # X_val could be new data too?
# Inverse-normalize
y_val = y_val*stdv_y + mean_y
y_pred = y_pred*stdv_y + mean_y

In [11]:
print(y_val.shape)
y_pred = y_pred.flatten()

print(y_pred.shape)
fig, ax = plt.subplots()

y_val = abs(y_val)
y_pred = abs(y_pred)

v_min = min(np.min(y_val), np.min(y_pred))
v_max = max(np.max(y_val), np.max(y_pred))
x = np.linspace(v_min, v_max, 1000)


ax.plot(y_val, y_pred, '.')
ax.plot(x, x)
ax.set(title= 'Actual vs. Predicted Arrival times', xlabel='Predicted arrival times [s]', ylabel='Actual arrival times [s]')

fig.tight_layout()
ax.set(adjustable='box', aspect='equal')
plt.savefig('HLVATtest.png', dpi =300,bbox_inches='tight')
#plt.savefig('VIRATtest.png', dpi =300,bbox_inches='tight')
plt.show()


(76,)
(76,)

In [12]:
x = np.linspace(v_min, v_max, 1000)
fig2, ax, = plt.subplots()
x_bins = np.logspace(np.log10(y_val.min()), np.log10(y_val.max()),np.sqrt(10000)) #12279
y_bins = np.logspace(np.log10(y_pred.min()), np.log10(y_pred.max()),np.sqrt(10000))
H, xedges, yedges = np.histogram2d(y_val, y_pred, bins=[x_bins,y_bins])
#ax2 = fig.add_subplot(212)
h = ax.pcolormesh(xedges, yedges, H.T)
#ax.set_aspect('equal')
#ax.set(adjustable='box-forced', aspect='equal')
#a2.imshow(img, origin='lower', extent=extent, aspect='auto')
#ax.set_xscale('log')
#ax.set_yscale('log')
ax.axis([yedges.min(),yedges.max(),yedges.min(),yedges.max()])
ax.set(ylabel='Predicted arrival times [s]', xlabel='Actual Arrival times [m/s]',title = 'Actual vs. predicted arrival times')




cbar = plt.colorbar(h, ax=ax)
ax.plot(x, x, c='r',linewidth=.5)
#ax.set_ylim([0, 10e-2])
#ax.set_xlim([0, 10e-2])
#ax.set_aspect('equal')
#cbar =plt.colorbar()
#cbar.ax.set_ylabel('Counts')
cbar.set_label('Counts', rotation=270,labelpad=9)

fig.tight_layout()
ax.set(adjustable='box', aspect='equal')
plt.savefig('HLVATdensitytest.png', dpi =300,bbox_inches='tight')
#plt.savefig('VIRATdensitytest.png', dpi =300,bbox_inches='tight')
plt.show()



In [13]:
z = np.array(abs((y_val -y_pred)/y_val))

#print(z)
print(z.shape)
print(np.min(z))
print(np.max(z))
print (np.average(z))
#x_bins = np.logspace(np.log10(antiy_val.min()), np.log10(antiy_val.max()),np.sqrt(12279))
#y_bins = np.logspace(np.log10(antiy_pred.min()), np.log10(antiy_pred.max()),np.sqrt(12279))
plt.hist(z, bins=30,range =[0,3.5], facecolor='blue', alpha=0.5)
plt.xlabel('(Predicted-Actual)/Actual Error')
plt.ylabel('Counts')
plt.title('Predicted Arrival times amount falling within error')
plt.savefig('HLVAThisttest.png', dpi =300,bbox_inches='tight')
#plt.savefig('VIRAThisttest.png', dpi =300,bbox_inches='tight')
plt.show()


(76,)
0.008114851524446917
0.5365235381002298
0.20304920894887613

In [14]:
#to check if target is uniformly distributed (input data not predicted) 

#weights = (np.ones_like(y_pred)/float(len(y_pred)))*100
#bins =100
#plt.hist(y, bins=bins, facecolor='blue', alpha=0.5)
#plt.xlabel('EQ ground velocities [meters/seconds]')