In [1]:
%matplotlib inline

import matplotlib as mpl
import matplotlib.pyplot as plt

mpl.rc('font', size=15)
mpl.rc('figure', figsize=(8, 5))

import numpy as np
import scipy.signal as sig
import keras

from keras.layers import Input, Dense, Activation, Dropout
from keras.models import Model
from keras.layers.advanced_activations import ELU
from keras import regularizers
from keras.models import load_model
from keras.initializers import glorot_normal, glorot_uniform
from keras.optimizers import Adam

from mpl_toolkits.basemap import Basemap
from matplotlib.path import Path


Using TensorFlow backend.

Load data and Preprocessing


In [2]:
# fix random seed for reproducibility
np.random.seed(7)

# Load data and exclude nan value
data = np.genfromtxt('IRISwest.txt')

In [3]:
#####maybe if we cut out the IRIS P waves we can get better results## 
#going to chooose 6000 

eqgpstime = data[:,1]
peakgpstime = data[:,17]
arrivaltime = np.subtract(peakgpstime,eqgpstime)
distance = data[:,13]
Velocity = np.divide(distance, arrivaltime)
pwaveomit = 6000
Velocity1 = Velocity[Velocity<6000]
data = data[Velocity<6000]
print (len(data))


169232

In [4]:
########cutting to only use western hemi data, not needed for IRISwest.txt##########
######side note for sky, refernces w/ [:,] instead of np.array
#print(len(data1))
#eq_lon1 =data1[:,11]
#print(eq_lon1)
#data = data1[(eq_lon1>=-180) & (eq_lon1<=-30)]
#print(len(data))
#########cutting out ocean points ############

eq_lat2 = data[:,11]
eq_lon2 = data[:,12]
map1 = Basemap(projection='aeqd', lon_0 = 10, lat_0 = 50, resolution='h')
lats = eq_lat2  #[:100]                                                                                                        \
                                                                                                                                
lons = eq_lon2
x, y = map1(lons, lats)
locations = np.c_[x, y]
polygons = [Path(p.boundary) for p in map1.landpolygons]
result = np.zeros(len(locations), dtype=bool)
for polygon in polygons:
    result += np.array(polygon.contains_points(locations))

eq_lat1=lats[result]
eq_lon1=lons[result]
print (len(data))
print (result)
data =data[result]
print (len(data))


169232
[False False  True ...  True  True False]
29635

In [5]:
# Extract X and y and divide into train, val, and test set
X = data[:, [2, 11, 12, 13, 14, 15]]   #side note nikils has log10distnace maybe we should try that
 # y = (data[:, 24] - data[:, 0])
y = np.log10(data[:, 18]) #(tri's org)
#y = data[:,25]


# Data preprocessing
# Exclude bad data
#z = np.log10(1e-6)
mask = y > -6.0 #-6.5 #(tri's orig)
#mask = y > 1e-6
y = y[mask]
X = X[mask]

###if i were to try and test on all data (good or bad idea?)

print(y.shape)

# Normalizing
X -= np.mean(X, axis=0) #these standard deviations need to be changed if im not doing log?
X /= np.std(X, axis=0)

mean_y = np.mean(y, axis=0)
stdv_y = np.std(y, axis=0)
y = (y-mean_y)/stdv_y

# Shuffle and divide into train and val set
mask = np.random.permutation(X.shape[0]) #(does this work with seed)
X = X[mask]
y = y[mask]

tfrac = int(0.8*y.size) 
X_train = X[:tfrac]
y_train = y[:tfrac]
X_val = X[tfrac:]
y_val = y[tfrac:]

#trying to test against all of itself
#tfrac = int(1*y.size) 
#X_train = X[:tfrac]
#y_train = y[:tfrac]
#X_val = X[:tfrac]
#y_val = y[:tfrac]



print('')
print('X_train shape: {}'.format(X_train.shape))
print('y_train shape: {}'.format(y_train.shape))
print('X_val shape: {}'.format(X_val.shape))
print('y_val shape: {}'.format(y_val.shape))


(28870,)

X_train shape: (23096, 6)
y_train shape: (23096,)
X_val shape: (5774, 6)
y_val shape: (5774,)

Create a DENSE network


In [6]:
def QuakeNet(input_shape, lr=1e-3, reg=0.00, dropout=0.2):
      #orig (input_shape, lr=1e-3, reg=0.00, dropout=0.0)
    X_input = Input(input_shape)
    
    X = Dense(512, kernel_regularizer=regularizers.l2(reg),
              activation='relu')(X_input) 
    
    X = Dense(256, kernel_regularizer=regularizers.l2(reg),
              activation='relu')(X) 
    X = Dense(128, kernel_regularizer=regularizers.l2(reg),
              activation='relu')(X)
    ####X = Dense(16, kernel_regularizer=regularizers.l2(reg),
       ###    #   activation='sigmoid')(X)
    X = Dense(64, kernel_regularizer=regularizers.l2(reg),
             activation='relu')(X)       
    
    X = Dense(32, kernel_regularizer=regularizers.l2(reg),
             activation='relu')(X)
    X = Dense(16, kernel_regularizer=regularizers.l2(reg),
              activation='relu')(X)
    X = Dense(8, kernel_regularizer=regularizers.l2(reg),
              activation='relu')(X)
   # X = Dense(64, kernel_regularizer=regularizers.l2(reg),
       #       activation='relu')(X)
    X = Dropout(rate=dropout)(X)
    X = Dense(1, kernel_regularizer=regularizers.l2(reg))(X)

    model = Model(inputs=X_input, outputs=X, name='QuakeNet')
    model.compile(optimizer=Adam(lr=lr), loss='mse')
    
    return model

In [7]:
input_shape = (X_train.shape[1], )
model = QuakeNet(input_shape=input_shape)
model.summary()


_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_1 (InputLayer)         (None, 6)                 0         
_________________________________________________________________
dense_1 (Dense)              (None, 512)               3584      
_________________________________________________________________
dense_2 (Dense)              (None, 256)               131328    
_________________________________________________________________
dense_3 (Dense)              (None, 128)               32896     
_________________________________________________________________
dense_4 (Dense)              (None, 64)                8256      
_________________________________________________________________
dense_5 (Dense)              (None, 32)                2080      
_________________________________________________________________
dense_6 (Dense)              (None, 16)                528       
_________________________________________________________________
dense_7 (Dense)              (None, 8)                 136       
_________________________________________________________________
dropout_1 (Dropout)          (None, 8)                 0         
_________________________________________________________________
dense_8 (Dense)              (None, 1)                 9         
=================================================================
Total params: 178,817
Trainable params: 178,817
Non-trainable params: 0
_________________________________________________________________

Train


In [8]:
stats = model.fit(X_train, y_train, epochs=200, batch_size=32, validation_data=(X_val, y_val))


Train on 23096 samples, validate on 5774 samples
Epoch 1/200
23096/23096 [==============================] - 2s 105us/step - loss: 0.3768 - val_loss: 0.2735
Epoch 2/200
23096/23096 [==============================] - 2s 84us/step - loss: 0.3048 - val_loss: 0.2640
Epoch 3/200
23096/23096 [==============================] - 2s 84us/step - loss: 0.2887 - val_loss: 0.2442
Epoch 4/200
23096/23096 [==============================] - 2s 85us/step - loss: 0.2632 - val_loss: 0.2098
Epoch 5/200
23096/23096 [==============================] - 2s 86us/step - loss: 0.2623 - val_loss: 0.2042
Epoch 6/200
23096/23096 [==============================] - 2s 87us/step - loss: 0.2585 - val_loss: 0.2452
Epoch 7/200
23096/23096 [==============================] - 2s 86us/step - loss: 0.2520 - val_loss: 0.2250
Epoch 8/200
23096/23096 [==============================] - 2s 86us/step - loss: 0.2379 - val_loss: 0.1963
Epoch 9/200
23096/23096 [==============================] - 2s 86us/step - loss: 0.2341 - val_loss: 0.2183
Epoch 10/200
23096/23096 [==============================] - 2s 86us/step - loss: 0.2278 - val_loss: 0.1873
Epoch 11/200
23096/23096 [==============================] - 2s 86us/step - loss: 0.2242 - val_loss: 0.1899
Epoch 12/200
23096/23096 [==============================] - 2s 86us/step - loss: 0.2172 - val_loss: 0.1760
Epoch 13/200
23096/23096 [==============================] - 2s 86us/step - loss: 0.2206 - val_loss: 0.1826
Epoch 14/200
23096/23096 [==============================] - 2s 86us/step - loss: 0.2153 - val_loss: 0.1916
Epoch 15/200
23096/23096 [==============================] - 2s 88us/step - loss: 0.2089 - val_loss: 0.1805
Epoch 16/200
23096/23096 [==============================] - 2s 84us/step - loss: 0.2112 - val_loss: 0.1682
Epoch 17/200
23096/23096 [==============================] - 2s 87us/step - loss: 0.2025 - val_loss: 0.1751
Epoch 18/200
23096/23096 [==============================] - 2s 86us/step - loss: 0.2004 - val_loss: 0.1699
Epoch 19/200
23096/23096 [==============================] - 2s 86us/step - loss: 0.1976 - val_loss: 0.1595
Epoch 20/200
23096/23096 [==============================] - 2s 86us/step - loss: 0.1951 - val_loss: 0.1604
Epoch 21/200
23096/23096 [==============================] - 2s 86us/step - loss: 0.1920 - val_loss: 0.1658
Epoch 22/200
23096/23096 [==============================] - 2s 86us/step - loss: 0.1926 - val_loss: 0.1633
Epoch 23/200
23096/23096 [==============================] - 2s 85us/step - loss: 0.1886 - val_loss: 0.1614
Epoch 24/200
23096/23096 [==============================] - 2s 86us/step - loss: 0.1850 - val_loss: 0.1577
Epoch 25/200
23096/23096 [==============================] - 2s 87us/step - loss: 0.1824 - val_loss: 0.1550
Epoch 26/200
23096/23096 [==============================] - 2s 87us/step - loss: 0.1834 - val_loss: 0.1476
Epoch 27/200
23096/23096 [==============================] - 2s 86us/step - loss: 0.1825 - val_loss: 0.1729
Epoch 28/200
23096/23096 [==============================] - 2s 82us/step - loss: 0.1834 - val_loss: 0.1455
Epoch 29/200
23096/23096 [==============================] - 2s 83us/step - loss: 0.1799 - val_loss: 0.1493
Epoch 30/200
23096/23096 [==============================] - 2s 86us/step - loss: 0.1798 - val_loss: 0.1516
Epoch 31/200
23096/23096 [==============================] - 2s 83us/step - loss: 0.1786 - val_loss: 0.1517
Epoch 32/200
23096/23096 [==============================] - 2s 83us/step - loss: 0.1767 - val_loss: 0.1400
Epoch 33/200
23096/23096 [==============================] - 2s 83us/step - loss: 0.1728 - val_loss: 0.1343
Epoch 34/200
23096/23096 [==============================] - 2s 84us/step - loss: 0.1762 - val_loss: 0.1401
Epoch 35/200
23096/23096 [==============================] - 2s 84us/step - loss: 0.1736 - val_loss: 0.1323
Epoch 36/200
23096/23096 [==============================] - 2s 85us/step - loss: 0.1680 - val_loss: 0.1370
Epoch 37/200
23096/23096 [==============================] - 2s 84us/step - loss: 0.1684 - val_loss: 0.1364
Epoch 38/200
23096/23096 [==============================] - 2s 86us/step - loss: 0.1651 - val_loss: 0.1375
Epoch 39/200
23096/23096 [==============================] - 2s 86us/step - loss: 0.1713 - val_loss: 0.1341
Epoch 40/200
23096/23096 [==============================] - 2s 84us/step - loss: 0.1691 - val_loss: 0.1451
Epoch 41/200
23096/23096 [==============================] - 2s 94us/step - loss: 0.1682 - val_loss: 0.1406
Epoch 42/200
23096/23096 [==============================] - 2s 90us/step - loss: 0.1682 - val_loss: 0.1330
Epoch 43/200
23096/23096 [==============================] - 2s 94us/step - loss: 0.1640 - val_loss: 0.1434
Epoch 44/200
23096/23096 [==============================] - 2s 84us/step - loss: 0.1654 - val_loss: 0.1271
Epoch 45/200
23096/23096 [==============================] - 2s 85us/step - loss: 0.1587 - val_loss: 0.1285
Epoch 46/200
23096/23096 [==============================] - 2s 83us/step - loss: 0.1574 - val_loss: 0.1264
Epoch 47/200
23096/23096 [==============================] - 2s 83us/step - loss: 0.1596 - val_loss: 0.1338
Epoch 48/200
23096/23096 [==============================] - 2s 84us/step - loss: 0.1625 - val_loss: 0.1317
Epoch 49/200
23096/23096 [==============================] - 2s 84us/step - loss: 0.1628 - val_loss: 0.1241
Epoch 50/200
23096/23096 [==============================] - 2s 88us/step - loss: 0.1577 - val_loss: 0.1409
Epoch 51/200
23096/23096 [==============================] - 2s 87us/step - loss: 0.1599 - val_loss: 0.1307
Epoch 52/200
23096/23096 [==============================] - 2s 88us/step - loss: 0.1594 - val_loss: 0.1252
Epoch 53/200
23096/23096 [==============================] - 2s 100us/step - loss: 0.1598 - val_loss: 0.1275
Epoch 54/200
23096/23096 [==============================] - 2s 99us/step - loss: 0.1545 - val_loss: 0.1168
Epoch 55/200
23096/23096 [==============================] - 2s 90us/step - loss: 0.1504 - val_loss: 0.1245
Epoch 56/200
23096/23096 [==============================] - 2s 95us/step - loss: 0.1518 - val_loss: 0.1260
Epoch 57/200
23096/23096 [==============================] - 2s 101us/step - loss: 0.1557 - val_loss: 0.1167
Epoch 58/200
23096/23096 [==============================] - 2s 95us/step - loss: 0.1525 - val_loss: 0.1281
Epoch 59/200
23096/23096 [==============================] - 2s 100us/step - loss: 0.1632 - val_loss: 0.1396
Epoch 60/200
23096/23096 [==============================] - 2s 96us/step - loss: 0.1591 - val_loss: 0.1158
Epoch 61/200
23096/23096 [==============================] - 2s 95us/step - loss: 0.1504 - val_loss: 0.1183
Epoch 62/200
23096/23096 [==============================] - 2s 93us/step - loss: 0.1499 - val_loss: 0.1186
Epoch 63/200
23096/23096 [==============================] - 2s 98us/step - loss: 0.1486 - val_loss: 0.1354
Epoch 64/200
23096/23096 [==============================] - 2s 107us/step - loss: 0.1516 - val_loss: 0.1249
Epoch 65/200
23096/23096 [==============================] - 3s 122us/step - loss: 0.1508 - val_loss: 0.1257
Epoch 66/200
23096/23096 [==============================] - 4s 154us/step - loss: 0.1567 - val_loss: 0.1202
Epoch 67/200
23096/23096 [==============================] - 3s 124us/step - loss: 0.1524 - val_loss: 0.1319
Epoch 68/200
23096/23096 [==============================] - 3s 118us/step - loss: 0.1537 - val_loss: 0.1213
Epoch 69/200
23096/23096 [==============================] - 3s 118us/step - loss: 0.1478 - val_loss: 0.1206
Epoch 70/200
23096/23096 [==============================] - 2s 107us/step - loss: 0.1450 - val_loss: 0.1317
Epoch 71/200
23096/23096 [==============================] - 2s 87us/step - loss: 0.1495 - val_loss: 0.1234
Epoch 72/200
23096/23096 [==============================] - 2s 83us/step - loss: 0.1471 - val_loss: 0.1228
Epoch 73/200
23096/23096 [==============================] - 2s 101us/step - loss: 0.1486 - val_loss: 0.1289
Epoch 74/200
23096/23096 [==============================] - 3s 142us/step - loss: 0.1497 - val_loss: 0.1251
Epoch 75/200
23096/23096 [==============================] - 2s 92us/step - loss: 0.1438 - val_loss: 0.1222
Epoch 76/200
23096/23096 [==============================] - 2s 93us/step - loss: 0.1481 - val_loss: 0.1193
Epoch 77/200
23096/23096 [==============================] - 2s 88us/step - loss: 0.1464 - val_loss: 0.1114
Epoch 78/200
23096/23096 [==============================] - 2s 97us/step - loss: 0.1432 - val_loss: 0.1192
Epoch 79/200
23096/23096 [==============================] - 3s 133us/step - loss: 0.1429 - val_loss: 0.1173
Epoch 80/200
23096/23096 [==============================] - 4s 166us/step - loss: 0.1420 - val_loss: 0.1116
Epoch 81/200
23096/23096 [==============================] - 4s 161us/step - loss: 0.1431 - val_loss: 0.1097
Epoch 82/200
23096/23096 [==============================] - 3s 143us/step - loss: 0.1447 - val_loss: 0.1198
Epoch 83/200
23096/23096 [==============================] - 3s 111us/step - loss: 0.1515 - val_loss: 0.1161
Epoch 84/200
23096/23096 [==============================] - 2s 87us/step - loss: 0.1435 - val_loss: 0.1165
Epoch 85/200
23096/23096 [==============================] - 2s 84us/step - loss: 0.1449 - val_loss: 0.1153
Epoch 86/200
23096/23096 [==============================] - 2s 84us/step - loss: 0.1418 - val_loss: 0.1102
Epoch 87/200
23096/23096 [==============================] - 2s 85us/step - loss: 0.1430 - val_loss: 0.1202
Epoch 88/200
23096/23096 [==============================] - 2s 88us/step - loss: 0.1408 - val_loss: 0.1158
Epoch 89/200
23096/23096 [==============================] - 2s 84us/step - loss: 0.1398 - val_loss: 0.1319
Epoch 90/200
23096/23096 [==============================] - 2s 85us/step - loss: 0.1467 - val_loss: 0.1117
Epoch 91/200
23096/23096 [==============================] - 2s 84us/step - loss: 0.1448 - val_loss: 0.1084
Epoch 92/200
23096/23096 [==============================] - 2s 86us/step - loss: 0.1414 - val_loss: 0.1111
Epoch 93/200
23096/23096 [==============================] - 2s 83us/step - loss: 0.1408 - val_loss: 0.1153
Epoch 94/200
23096/23096 [==============================] - 2s 82us/step - loss: 0.1395 - val_loss: 0.1224
Epoch 95/200
23096/23096 [==============================] - 2s 82us/step - loss: 0.1397 - val_loss: 0.1179
Epoch 96/200
23096/23096 [==============================] - 2s 99us/step - loss: 0.1445 - val_loss: 0.1089
Epoch 97/200
23096/23096 [==============================] - 2s 98us/step - loss: 0.1391 - val_loss: 0.1172
Epoch 98/200
23096/23096 [==============================] - 2s 87us/step - loss: 0.1383 - val_loss: 0.1082
Epoch 99/200
23096/23096 [==============================] - 2s 83us/step - loss: 0.1411 - val_loss: 0.1066
Epoch 100/200
23096/23096 [==============================] - 2s 86us/step - loss: 0.1360 - val_loss: 0.1128
Epoch 101/200
23096/23096 [==============================] - 2s 87us/step - loss: 0.1383 - val_loss: 0.1258
Epoch 102/200
23096/23096 [==============================] - 2s 86us/step - loss: 0.1385 - val_loss: 0.1046
Epoch 103/200
23096/23096 [==============================] - 2s 84us/step - loss: 0.1348 - val_loss: 0.1060
Epoch 104/200
23096/23096 [==============================] - 2s 105us/step - loss: 0.1421 - val_loss: 0.1270
Epoch 105/200
23096/23096 [==============================] - 2s 91us/step - loss: 0.1492 - val_loss: 0.1160
Epoch 106/200
23096/23096 [==============================] - 2s 90us/step - loss: 0.1372 - val_loss: 0.1074
Epoch 107/200
23096/23096 [==============================] - 2s 93us/step - loss: 0.1340 - val_loss: 0.1092
Epoch 108/200
23096/23096 [==============================] - 2s 105us/step - loss: 0.1397 - val_loss: 0.1255
Epoch 109/200
23096/23096 [==============================] - 2s 88us/step - loss: 0.1397 - val_loss: 0.1107
Epoch 110/200
23096/23096 [==============================] - 2s 89us/step - loss: 0.1355 - val_loss: 0.1072
Epoch 111/200
23096/23096 [==============================] - 2s 101us/step - loss: 0.1376 - val_loss: 0.1174
Epoch 112/200
23096/23096 [==============================] - 2s 88us/step - loss: 0.1368 - val_loss: 0.1078
Epoch 113/200
23096/23096 [==============================] - 2s 83us/step - loss: 0.1375 - val_loss: 0.1054
Epoch 114/200
23096/23096 [==============================] - 2s 83us/step - loss: 0.1358 - val_loss: 0.1079
Epoch 115/200
23096/23096 [==============================] - 2s 83us/step - loss: 0.1295 - val_loss: 0.1066
Epoch 116/200
23096/23096 [==============================] - 2s 97us/step - loss: 0.1337 - val_loss: 0.1037
Epoch 117/200
23096/23096 [==============================] - 2s 87us/step - loss: 0.1407 - val_loss: 0.1114
Epoch 118/200
23096/23096 [==============================] - 2s 85us/step - loss: 0.1365 - val_loss: 0.1173
Epoch 119/200
23096/23096 [==============================] - 2s 88us/step - loss: 0.1364 - val_loss: 0.1067
Epoch 120/200
23096/23096 [==============================] - 2s 83us/step - loss: 0.1331 - val_loss: 0.1125
Epoch 121/200
23096/23096 [==============================] - 2s 85us/step - loss: 0.1331 - val_loss: 0.1085
Epoch 122/200
23096/23096 [==============================] - 2s 84us/step - loss: 0.1311 - val_loss: 0.1071
Epoch 123/200
23096/23096 [==============================] - 2s 83us/step - loss: 0.1339 - val_loss: 0.1130
Epoch 124/200
23096/23096 [==============================] - 2s 89us/step - loss: 0.1330 - val_loss: 0.1090
Epoch 125/200
23096/23096 [==============================] - 2s 87us/step - loss: 0.1333 - val_loss: 0.1120
Epoch 126/200
23096/23096 [==============================] - 2s 93us/step - loss: 0.1347 - val_loss: 0.1026
Epoch 127/200
23096/23096 [==============================] - 2s 87us/step - loss: 0.1316 - val_loss: 0.1097
Epoch 128/200
23096/23096 [==============================] - 2s 83us/step - loss: 0.1319 - val_loss: 0.1207
Epoch 129/200
23096/23096 [==============================] - 2s 86us/step - loss: 0.1358 - val_loss: 0.1093
Epoch 130/200
23096/23096 [==============================] - 2s 85us/step - loss: 0.1331 - val_loss: 0.1141
Epoch 131/200
23096/23096 [==============================] - 2s 83us/step - loss: 0.1352 - val_loss: 0.1018
Epoch 132/200
23096/23096 [==============================] - 2s 84us/step - loss: 0.1301 - val_loss: 0.1088
Epoch 133/200
23096/23096 [==============================] - 2s 94us/step - loss: 0.1362 - val_loss: 0.1131
Epoch 134/200
23096/23096 [==============================] - 2s 105us/step - loss: 0.1256 - val_loss: 0.1058
Epoch 135/200
23096/23096 [==============================] - 2s 88us/step - loss: 0.1345 - val_loss: 0.1152
Epoch 136/200
23096/23096 [==============================] - 2s 86us/step - loss: 0.1313 - val_loss: 0.1025
Epoch 137/200
23096/23096 [==============================] - 2s 86us/step - loss: 0.1312 - val_loss: 0.1109
Epoch 138/200
23096/23096 [==============================] - 2s 87us/step - loss: 0.1297 - val_loss: 0.1133
Epoch 139/200
23096/23096 [==============================] - 2s 86us/step - loss: 0.1293 - val_loss: 0.1025
Epoch 140/200
23096/23096 [==============================] - 2s 86us/step - loss: 0.1298 - val_loss: 0.1098
Epoch 141/200
23096/23096 [==============================] - 2s 87us/step - loss: 0.1311 - val_loss: 0.1095
Epoch 142/200
23096/23096 [==============================] - 2s 86us/step - loss: 0.1337 - val_loss: 0.0983
Epoch 143/200
23096/23096 [==============================] - 2s 86us/step - loss: 0.1262 - val_loss: 0.1001
Epoch 144/200
23096/23096 [==============================] - 2s 86us/step - loss: 0.1307 - val_loss: 0.1001
Epoch 145/200
23096/23096 [==============================] - 2s 85us/step - loss: 0.1305 - val_loss: 0.1025
Epoch 146/200
23096/23096 [==============================] - 2s 84us/step - loss: 0.1289 - val_loss: 0.1013
Epoch 147/200
23096/23096 [==============================] - 2s 86us/step - loss: 0.1260 - val_loss: 0.1017
Epoch 148/200
23096/23096 [==============================] - 2s 85us/step - loss: 0.1300 - val_loss: 0.1018
Epoch 149/200
23096/23096 [==============================] - 2s 84us/step - loss: 0.1309 - val_loss: 0.1161
Epoch 150/200
23096/23096 [==============================] - 2s 82us/step - loss: 0.1342 - val_loss: 0.1046
Epoch 151/200
23096/23096 [==============================] - 2s 86us/step - loss: 0.1279 - val_loss: 0.1134
Epoch 152/200
23096/23096 [==============================] - 2s 84us/step - loss: 0.1295 - val_loss: 0.1285
Epoch 153/200
23096/23096 [==============================] - 2s 83us/step - loss: 0.1316 - val_loss: 0.1030
Epoch 154/200
23096/23096 [==============================] - 2s 83us/step - loss: 0.1259 - val_loss: 0.1074
Epoch 155/200
23096/23096 [==============================] - 2s 83us/step - loss: 0.1277 - val_loss: 0.1049
Epoch 156/200
23096/23096 [==============================] - 2s 84us/step - loss: 0.1315 - val_loss: 0.1014
Epoch 157/200
23096/23096 [==============================] - 2s 85us/step - loss: 0.1236 - val_loss: 0.1005
Epoch 158/200
23096/23096 [==============================] - 2s 87us/step - loss: 0.1271 - val_loss: 0.1056
Epoch 159/200
23096/23096 [==============================] - 2s 91us/step - loss: 0.1276 - val_loss: 0.1019
Epoch 160/200
23096/23096 [==============================] - 2s 104us/step - loss: 0.1296 - val_loss: 0.1076
Epoch 161/200
23096/23096 [==============================] - 2s 104us/step - loss: 0.1299 - val_loss: 0.1021
Epoch 162/200
23096/23096 [==============================] - 2s 96us/step - loss: 0.1332 - val_loss: 0.1104
Epoch 163/200
23096/23096 [==============================] - 2s 85us/step - loss: 0.1273 - val_loss: 0.1062
Epoch 164/200
23096/23096 [==============================] - 2s 95us/step - loss: 0.1239 - val_loss: 0.1026
Epoch 165/200
23096/23096 [==============================] - 2s 88us/step - loss: 0.1261 - val_loss: 0.0998
Epoch 166/200
23096/23096 [==============================] - 2s 95us/step - loss: 0.1287 - val_loss: 0.1057
Epoch 167/200
23096/23096 [==============================] - 2s 90us/step - loss: 0.1243 - val_loss: 0.1043
Epoch 168/200
23096/23096 [==============================] - ETA: 0s - loss: 0.124 - 2s 95us/step - loss: 0.1249 - val_loss: 0.1059
Epoch 169/200
23096/23096 [==============================] - 2s 88us/step - loss: 0.1246 - val_loss: 0.1057
Epoch 170/200
23096/23096 [==============================] - 2s 105us/step - loss: 0.1257 - val_loss: 0.1004
Epoch 171/200
23096/23096 [==============================] - 2s 88us/step - loss: 0.1238 - val_loss: 0.1096
Epoch 172/200
23096/23096 [==============================] - 2s 92us/step - loss: 0.1232 - val_loss: 0.1027
Epoch 173/200
23096/23096 [==============================] - 3s 126us/step - loss: 0.1278 - val_loss: 0.1040
Epoch 174/200
23096/23096 [==============================] - 3s 111us/step - loss: 0.1313 - val_loss: 0.1029
Epoch 175/200
23096/23096 [==============================] - 3s 124us/step - loss: 0.1284 - val_loss: 0.1099
Epoch 176/200
23096/23096 [==============================] - 3s 146us/step - loss: 0.1268 - val_loss: 0.1009
Epoch 177/200
23096/23096 [==============================] - 4s 161us/step - loss: 0.1272 - val_loss: 0.1060
Epoch 178/200
23096/23096 [==============================] - 5s 211us/step - loss: 0.1233 - val_loss: 0.0980
Epoch 179/200
23096/23096 [==============================] - 3s 150us/step - loss: 0.1173 - val_loss: 0.1071
Epoch 180/200
23096/23096 [==============================] - 3s 121us/step - loss: 0.1182 - val_loss: 0.1045
Epoch 181/200
23096/23096 [==============================] - 3s 116us/step - loss: 0.1220 - val_loss: 0.0994
Epoch 182/200
23096/23096 [==============================] - 2s 95us/step - loss: 0.1202 - val_loss: 0.0984
Epoch 183/200
23096/23096 [==============================] - 2s 94us/step - loss: 0.1152 - val_loss: 0.0959
Epoch 184/200
23096/23096 [==============================] - 2s 95us/step - loss: 0.1192 - val_loss: 0.0984
Epoch 185/200
23096/23096 [==============================] - 2s 91us/step - loss: 0.1187 - val_loss: 0.1142
Epoch 186/200
23096/23096 [==============================] - 2s 106us/step - loss: 0.1197 - val_loss: 0.0971
Epoch 187/200
23096/23096 [==============================] - 2s 103us/step - loss: 0.1194 - val_loss: 0.0978
Epoch 188/200
23096/23096 [==============================] - 2s 100us/step - loss: 0.1197 - val_loss: 0.1049
Epoch 189/200
23096/23096 [==============================] - 2s 93us/step - loss: 0.1200 - val_loss: 0.0965
Epoch 190/200
23096/23096 [==============================] - 2s 83us/step - loss: 0.1242 - val_loss: 0.0968
Epoch 191/200
23096/23096 [==============================] - 2s 85us/step - loss: 0.1183 - val_loss: 0.0999
Epoch 192/200
23096/23096 [==============================] - 2s 85us/step - loss: 0.1161 - val_loss: 0.0989
Epoch 193/200
23096/23096 [==============================] - 2s 84us/step - loss: 0.1174 - val_loss: 0.1001
Epoch 194/200
23096/23096 [==============================] - 2s 85us/step - loss: 0.1183 - val_loss: 0.1004
Epoch 195/200
23096/23096 [==============================] - 2s 86us/step - loss: 0.1215 - val_loss: 0.1012
Epoch 196/200
23096/23096 [==============================] - 2s 86us/step - loss: 0.1153 - val_loss: 0.0979
Epoch 197/200
23096/23096 [==============================] - 2s 87us/step - loss: 0.1174 - val_loss: 0.0961
Epoch 198/200
23096/23096 [==============================] - 2s 86us/step - loss: 0.1161 - val_loss: 0.1011
Epoch 199/200
23096/23096 [==============================] - 2s 86us/step - loss: 0.1169 - val_loss: 0.1120
Epoch 200/200
23096/23096 [==============================] - 2s 84us/step - loss: 0.1216 - val_loss: 0.1122

Predict


In [9]:
model.save('iwgv6.hdf5')
#from keras.model import load_model

model = load_model('iwgv6.hdf5')

In [10]:
y_pred = model.predict(X_val) # X_val could be new data too?
# Inverse-normalize
y_val = y_val*stdv_y + mean_y
y_pred = y_pred*stdv_y + mean_y

In [ ]:


In [11]:
print(y_val.shape)
y_pred = y_pred.flatten()

#print(y_pred.shape)
#fig, ax = plt.subplots()

#v_min = min(np.min(y_val), np.min(y_pred))
#v_max = max(np.max(y_val), np.max(y_pred))
#x = np.linspace(v_min, v_max, 1000)

#ax.plot(y_val, y_pred, '.')
#ax.plot(x, x)
#ax.set(xlabel='Prediction', ylabel='True')

#fig.tight_layout()


(5774,)
(5774,)

In [12]:
absy_val = abs(y_val)
absy_pred = abs(y_pred)

#taking antilog so we can see what we are used too 
antiy_val = 10**y_val
antiy_pred = 10**y_pred

v_min = min(np.min(antiy_val), np.min(antiy_pred))
v_max = max(np.max(antiy_val), np.max(antiy_pred))
x = np.linspace(v_min, v_max, 1000)

fig, ax = plt.subplots()

ax.loglog(antiy_val, antiy_pred, '.')
ax.plot(x, x)
ax.set(ylabel='Predicted ground velocity [m/s]', xlabel='Actual ground velocity [m/s]',title = 'Actual vs. predicted ground velocities' )
ax.set(adjustable='box-forced', aspect='equal')
#ax.axis([yedges.min(),yedges.max(),yedges.min(),yedges.max()])
fig.tight_layout()

plt.savefig('iriswestomit1.png', dpi =300,bbox_inches='tight')
#plt.show()


/anaconda3/envs/ligo/lib/python3.6/site-packages/matplotlib/axes/_base.py:1400: MatplotlibDeprecationWarning: The 'box-forced' keyword argument is deprecated since 2.2.
  " since 2.2.", cbook.mplDeprecation)

In [13]:
x = np.linspace(v_min, v_max, 1000)
fig2, ax, = plt.subplots()
x_bins = np.logspace(np.log10(antiy_val.min()), np.log10(antiy_val.max()),np.sqrt(5000)) #12279
y_bins = np.logspace(np.log10(antiy_pred.min()), np.log10(antiy_pred.max()),np.sqrt(5000))
H, xedges, yedges = np.histogram2d(antiy_val, antiy_pred, bins=[x_bins,y_bins])
#ax2 = fig.add_subplot(212)
h = ax.pcolormesh(xedges, yedges, H.T)
#ax.set_aspect('equal')
#ax.set(adjustable='box-forced', aspect='equal')
#a2.imshow(img, origin='lower', extent=extent, aspect='auto')
ax.set_xscale('log')
ax.set_yscale('log')
ax.axis([yedges.min(),yedges.max(),yedges.min(),yedges.max()])
ax.set(ylabel='Predicted ground velocity [m/s]', xlabel='Actual ground velocity [m/s]',title = 'Actual vs. predicted ground velocities')




cbar = plt.colorbar(h, ax=ax)
ax.plot(x, x, c='r',linewidth=.5)
#ax.set_ylim([0, 10e-2])
#ax.set_xlim([0, 10e-2])
#ax.set_aspect('equal')
#cbar =plt.colorbar()
#cbar.ax.set_ylabel('Counts')
cbar.set_label('Counts', rotation=270,labelpad=9)

fig.tight_layout()
ax.set(adjustable='box', aspect='equal')
plt.savefig('iriswestdensityomit1.png', dpi =300,bbox_inches='tight')

plt.show()



In [ ]:


In [ ]:


In [ ]:


In [14]:
z = np.array(abs((antiy_val -antiy_pred)/antiy_val))

print(z)
print(z.shape)
print(np.min(z))
print(np.max(z))
print (np.average(z))
#x_bins = np.logspace(np.log10(antiy_val.min()), np.log10(antiy_val.max()),np.sqrt(12279))
#y_bins = np.logspace(np.log10(antiy_pred.min()), np.log10(antiy_pred.max()),np.sqrt(12279))
plt.hist(z, bins=30,range =[0,3.5], facecolor='blue', alpha=0.5)
plt.xlabel('(Predicted-Actual)/Actual Error')
plt.ylabel('Counts')
plt.title('Predicted ground velocity amount falling within error')
plt.savefig('iriswesthistomit1.png', dpi =300,bbox_inches='tight')
plt.show()


[1.19170004 0.13781537 0.17829287 ... 0.10130263 0.40487907 0.06406664]
(5774,)
5.4257824656896914e-05
76.16102123598841
0.4001718060438368

In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]: