In [ ]:
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.regularizers import l2
from keras.callbacks import ModelCheckpoint

import math

In [ ]:
# choose whether to study pickups or dropoffs

target = "pickup"
# target = "dropoff"

In [ ]:
# create model 
# must match specification of model used for training 
# set dropout to 0 to use full model for prediction

batch_size = 256
nb_epoch = 20

num_hidden_1 = 1024
num_hidden_2 = 1024
num_hidden_3 = 1024
dropout = 0.0

model = Sequential()

model.add(Dense(output_dim=num_hidden_1, input_dim=4))
model.add(Activation("tanh"))
model.add(Dropout(dropout))
model.add(Dense(num_hidden_2))
model.add(Activation("tanh"))
model.add(Dropout(dropout))
model.add(Dense(num_hidden_3))
model.add(Activation("tanh"))
model.add(Dropout(dropout))
model.add(Dense(1))

# laod model from saved file

model.load_weights("-model_"+target+".hdf5")
model.compile(loss='mean_squared_logarithmic_error', optimizer='adam')

In [ ]:
import pandas as pd

df = pd.read_csv('-gis-data//grid_data.csv')

In [ ]:
import pickle

# load scaling function from saved file

with open("-scaler_"+target+".pkl", 'rb') as f:
    scaler = pickle.load(f)

# make predictions for each grid point, for each doy of week and hour of day

for dow in range(7):
    for tod in range(24):
        print "dow:", dow, "tod:", tod

        df["dow"] = math.sin((2 * math.pi) / 7 * dow)
        df["tod"] = math.sin((2 * math.pi) / 24 * tod)

        X = df.as_matrix(columns=['X_MIN', 'Y_MIN', 'dow', 'tod'])
        X_scaled = scaler.transform(X)

        y = model.predict(X_scaled)
    
        df["_".join([str(dow), str(tod)])] = y
    
df.to_csv("-predicted_"+target+".csv")