In [1]:
!pip install gdown
!mkdir ./data
import gdown
def data_import():
ids = {
"tables_of_fgm.h5":"1omAitbGljJeoZPMJ9odkrF27zaoQm61X"
}
url = 'https://drive.google.com/uc?id='
for title, g_id in ids.items():
try:
output_file = open("/content/data/" + title, 'wb')
gdown.download(url + g_id, output_file, quiet=False)
except IOError as e:
print(e)
finally:
output_file.close()
data_import()
In [1]:
import tensorflow as tf
import keras
from keras.layers import Dense, Activation, Input, BatchNormalization, Dropout, concatenate
from keras import layers
def res_branch(bi, conv_name_base, bn_name_base, scale, input_tensor, n_neuron, stage, block,dp1, bn=False):
x_1 = Dense(scale * n_neuron, name=conv_name_base + '2a_'+str(bi))(input_tensor)
if bn:
x_1 = BatchNormalization(axis=-1, name=bn_name_base + '2a_'+str(bi))(x_1)
x_1 = Activation('relu')(x_1)
if dp1>0:
x_1 = Dropout(dp1)(x_1)
return x_1
def res_block(input_tensor,scale, n_neuron, stage, block, bn=False,branches=0):
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
# scale = 2
x = Dense(scale * n_neuron, name=conv_name_base + '2a')(input_tensor)
if bn:
x = BatchNormalization(axis=-1, name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
dp1=0.
if dp1 >0:
x = Dropout(dp1)(x)
branch_list=[x]
for i in range(branches-1):
branch_list.append(res_branch(i,conv_name_base, bn_name_base, scale,input_tensor,n_neuron,stage,block,dp1,bn))
if branches-1 > 0:
x = Dense(n_neuron, name=conv_name_base + '2b')(concatenate(branch_list,axis=-1))
# x = Dense(n_neuron, name=conv_name_base + '2b')(layers.add(branch_list))
else:
x = Dense(n_neuron, name=conv_name_base + '2b')(x)
if bn:
x = BatchNormalization(axis=-1, name=bn_name_base + '2b')(x)
x = layers.add([x, input_tensor])
x = Activation('relu')(x)
if dp1 >0:
x = Dropout(dp1)(x)
return x
The read_h5_data function read the table from the hdf5 file.
In the FGM case we chose not to scale the input features, since they all falls between 0 and 1. There are a great variety in the output features. In the reaction region close to stoichiometry the gradient in the output properties are great. A good example is the source term for progress variable, which rises from 0 to 1e5. So the output features are first transformed to logrithmic scale and then rearranged between 0 and 1. The outputs are normalised by its variance. This way the output value will be large where the gradient is great. So during training more focus would be put. The same 'focus design' has been put on the loss function selection as well. mse is selected over mae for that the squared error put more weights on the data samples that shows great changes.
In [10]:
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler, StandardScaler
class data_scaler(object):
def __init__(self):
self.norm = None
self.norm_1 = None
self.std = None
self.bc = None
self.case = None
self.scale = 1
self.bias = 1e-20
# self.bias = 1
self.switcher = {
'min_std': 'min_std',
'std2': 'std2',
'std_min':'std_min',
'min': 'min',
'no':'no',
'log': 'log',
'log_min':'log_min',
'log_std':'log_std',
'log2': 'log2',
'sqrt_std': 'sqrt_std',
'cbrt_std': 'cbrt_std',
'nrt_std':'nrt_std',
'bc':'bc',
'tan': 'tan'
}
def fit_transform(self, input_data, case):
self.case = case
if self.switcher.get(self.case) == 'min_std':
self.norm = MinMaxScaler()
self.std = StandardScaler()
out = self.norm.fit_transform(input_data)
out = self.std.fit_transform(out)
if self.switcher.get(self.case) == 'std2':
self.std = StandardScaler()
out = self.std.fit_transform(input_data)
if self.switcher.get(self.case) == 'std_min':
self.norm = MinMaxScaler()
self.std = StandardScaler()
out = self.std.fit_transform(input_data)
out = self.norm.fit_transform(out)
if self.switcher.get(self.case) == 'min':
self.norm = MinMaxScaler()
out = self.norm.fit_transform(input_data)
if self.switcher.get(self.case) == 'no':
self.norm = MinMaxScaler()
self.std = StandardScaler()
out = input_data
if self.switcher.get(self.case) == 'log_min':
out = - np.log(np.asarray(input_data / self.scale) + self.bias)
self.norm = MinMaxScaler()
out = self.norm.fit_transform(out)
if self.switcher.get(self.case) == 'log_std':
out = - np.log(np.asarray(input_data / self.scale) + self.bias)
self.std = StandardScaler()
out = self.std.fit_transform(out)
if self.switcher.get(self.case) == 'log2':
self.norm = MinMaxScaler()
self.std = StandardScaler()
out = self.norm.fit_transform(input_data)
out = np.log(np.asarray(out) + self.bias)
out = self.std.fit_transform(out)
if self.switcher.get(self.case) == 'sqrt_std':
self.norm = MinMaxScaler()
self.std = StandardScaler()
out = self.norm.fit_transform(input_data)
out = np.sqrt(np.asarray(out / self.scale))
out = self.std.fit_transform(out)
if self.switcher.get(self.case) == 'cbrt_std':
out = np.cbrt(np.asarray(input_data / self.scale))
self.std = StandardScaler()
out = self.std.fit_transform(out)
if self.switcher.get(self.case) == 'nrt_std':
out = np.power(np.asarray(input_data / self.scale),1/4)
self.std = StandardScaler()
out = self.std.fit_transform(out)
if self.switcher.get(self.case) == 'bc':
self.bc = PowerTransformer(method='box-cox')
out = self.bc.fit_transform(input_data + self.bias)
print('lambda:', self.bc.lambdas_)
if self.switcher.get(self.case) == 'tan':
self.norm = MaxAbsScaler()
self.std = StandardScaler()
out = self.std.fit_transform(input_data)
out = self.norm.fit_transform(out)
out = np.tan(out / (2 * np.pi + self.bias))
return out
def transform(self, input_data):
if self.switcher.get(self.case) == 'min_std':
out = self.norm.transform(input_data)
out = self.std.transform(out)
if self.switcher.get(self.case) == 'std2':
out = self.std.transform(input_data)
if self.switcher.get(self.case) == 'std_min':
out = self.std.transform(input_data)
out = self.norm.transform(out)
if self.switcher.get(self.case) == 'min':
out = self.norm.transform(input_data)
if self.switcher.get(self.case) == 'no':
out = input_data
if self.switcher.get(self.case) == 'log_min':
out = - np.log(np.asarray(input_data / self.scale) + self.bias)
out = self.norm.transform(out)
if self.switcher.get(self.case) == 'log_std':
out = - np.log(np.asarray(input_data / self.scale) + self.bias)
out = self.std.transform(out)
if self.switcher.get(self.case) == 'log2':
out = self.norm.transform(input_data)
out = np.log(np.asarray(out) + self.bias)
out = self.std.transform(out)
if self.switcher.get(self.case) == 'sqrt_std':
out = self.norm.transform(input_data)
out = np.sqrt(np.asarray(out / self.scale))
out = self.std.transform(out)
if self.switcher.get(self.case) == 'cbrt_std':
out = np.cbrt(np.asarray(input_data / self.scale))
out = self.std.transform(out)
if self.switcher.get(self.case) == 'nrt_std':
out = np.power(np.asarray(input_data / self.scale),1/4)
out = self.std.transform(out)
if self.switcher.get(self.case) == 'bc':
out = self.bc.transform(input_data + self.bias)
if self.switcher.get(self.case) == 'tan':
out = self.std.transform(input_data)
out = self.norm.transform(out)
out = np.tan(out / (2 * np.pi + self.bias))
return out
def inverse_transform(self, input_data):
if self.switcher.get(self.case) == 'min_std':
out = self.std.inverse_transform(input_data)
out = self.norm.inverse_transform(out)
if self.switcher.get(self.case) == 'std2':
out = self.std.inverse_transform(input_data)
if self.switcher.get(self.case) == 'std_min':
out = self.norm.inverse_transform(input_data)
out = self.std.inverse_transform(out)
if self.switcher.get(self.case) == 'min':
out = self.norm.inverse_transform(input_data)
if self.switcher.get(self.case) == 'no':
out = input_data
if self.switcher.get(self.case) == 'log_min':
out = self.norm.inverse_transform(input_data)
out = (np.exp(-out) - self.bias) * self.scale
if self.switcher.get(self.case) == 'log_std':
out = self.std.inverse_transform(input_data)
out = (np.exp(-out) - self.bias) * self.scale
if self.switcher.get(self.case) == 'log2':
out = self.std.inverse_transform(input_data)
out = np.exp(out) - self.bias
out = self.norm.inverse_transform(out)
if self.switcher.get(self.case) == 'sqrt_std':
out = self.std.inverse_transform(input_data)
out = np.power(out,2) * self.scale
out = self.norm.inverse_transform(out)
if self.switcher.get(self.case) == 'cbrt_std':
out = self.std.inverse_transform(input_data)
out = np.power(out,3) * self.scale
if self.switcher.get(self.case) == 'nrt_std':
out = self.std.inverse_transform(input_data)
out = np.power(out,4) * self.scale
if self.switcher.get(self.case) == 'bc':
out = self.bc.inverse_transform(input_data)
out = out - self.bias
if self.switcher.get(self.case) == 'tan':
out = (2 * np.pi + self.bias) * np.arctan(input_data)
out = self.norm.inverse_transform(out)
out = self.std.inverse_transform(out)
return out
def read_h5_data(fileName, input_features, labels):
df = pd.read_hdf(fileName)
# df = df[df['f']<0.45]
input_df=df[input_features]
in_scaler = data_scaler()
input_np = in_scaler.fit_transform(input_df.values,'no')
label_df=df[labels].clip(0)
# if 'PVs' in labels:
# label_df['PVs']=np.log(label_df['PVs']+1)
out_scaler = data_scaler()
label_np = out_scaler.fit_transform(label_df.values,'cbrt_std')
return input_np, label_np, df, in_scaler, out_scaler
In [11]:
%matplotlib inline
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# define the labels
col_labels=['C2H3', 'C2H6', 'CH2', 'H2CN', 'C2H4', 'H2O2', 'C2H', 'CN',
'heatRelease', 'NCO', 'NNH', 'N2', 'AR', 'psi', 'CO', 'CH4', 'HNCO',
'CH2OH', 'HCCO', 'CH2CO', 'CH', 'mu', 'C2H2', 'C2H5', 'H2', 'T', 'PVs',
'O', 'O2', 'N2O', 'C', 'C3H7', 'CH2(S)', 'NH3', 'HO2', 'NO', 'HCO',
'NO2', 'OH', 'HCNO', 'CH3CHO', 'CH3', 'NH', 'alpha', 'CH3O', 'CO2',
'CH3OH', 'CH2CHO', 'CH2O', 'C3H8', 'HNO', 'NH2', 'HCN', 'H', 'N', 'H2O',
'HCCOH', 'HCNN']
# Taking 0 out
col_labels.remove('AR')
col_labels.remove('heatRelease')
# labels = ['CH4','O2','H2O','CO','CO2','T','PVs','psi','mu','alpha']
# labels = ['T','PVs']
labels = ['CO']
# labels = ['T','CH4','O2','CO2','CO','H2O','H2','OH','psi']
# labels = ['CH2OH','HNCO','CH3OH', 'CH2CHO', 'CH2O', 'C3H8', 'HNO', 'NH2', 'HCN']
# labels = np.random.choice(col_labels,20,replace=False).tolist()
# labels.append('PVs')
# labels = col_labels
print(labels)
input_features=['f','pv','zeta']
# read in the data
x_input, y_label, df, in_scaler, out_scaler = read_h5_data('../data/tables_of_fgm.h5',input_features=input_features, labels = labels)
In [12]:
from sklearn.model_selection import train_test_split
import tensorflow as tf
from keras.models import Model
from keras.layers import Dense, Input
from keras.callbacks import ModelCheckpoint
# split into train and test data
x_train, x_test, y_train, y_test = train_test_split(x_input,y_label, test_size=0.01)
n_neuron = 10
scale=3
branches=3
# %%
print('set up ANN')
# ANN parameters
dim_input = x_train.shape[1]
dim_label = y_train.shape[1]
batch_norm = False
# This returns a tensor
inputs = Input(shape=(dim_input,),name='input_1')
# a layer instance is callable on a tensor, and returns a tensor
x = Dense(n_neuron, activation='relu')(inputs)
# less then 2 res_block, there will be variance
x = res_block(x, scale, n_neuron, stage=1, block='a', bn=batch_norm,branches=branches)
x = res_block(x, scale, n_neuron, stage=1, block='b', bn=batch_norm,branches=branches)
# x = res_block(x, scale, n_neuron, stage=1, block='c', bn=batch_norm,branches=branches)
x = Dense(100, activation='relu')(x)
x = Dropout(0.1)(x)
predictions = Dense(dim_label, activation='linear', name='output_1')(x)
model = Model(inputs=inputs, outputs=predictions)
model.summary()
In [13]:
import keras.backend as K
from keras.callbacks import LearningRateScheduler
import math
def cubic_loss(y_true, y_pred):
return K.mean(K.square(y_true - y_pred)*K.abs(y_true - y_pred), axis=-1)
def coeff_r2(y_true, y_pred):
from keras import backend as K
SS_res = K.sum(K.square( y_true-y_pred ))
SS_tot = K.sum(K.square( y_true - K.mean(y_true) ) )
return ( 1 - SS_res/(SS_tot + K.epsilon()) )
def step_decay(epoch):
initial_lrate = 0.001
drop = 0.5
epochs_drop = 200.0
lrate = initial_lrate * math.pow(drop,math.floor((1+epoch)/epochs_drop))
return lrate
lrate = LearningRateScheduler(step_decay)
In [14]:
from keras import optimizers
batch_size = 1024*32
epochs = 100
vsplit = 0.1
loss_type='mse'
adam_op = optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999,epsilon=1e-8, decay=0.0, amsgrad=True)
model.compile(loss=loss_type, optimizer=adam_op, metrics=[coeff_r2])
# model.compile(loss=cubic_loss, optimizer=adam_op, metrics=['accuracy'])
# checkpoint (save the best model based validate loss)
!mkdir ./tmp
filepath = "./tmp/weights.best.cntk.hdf5"
checkpoint = ModelCheckpoint(filepath,
monitor='val_loss',
verbose=1,
save_best_only=True,
mode='min',
period=20)
# callbacks_list = [checkpoint]
callbacks_list = [lrate]
# fit the model
history = model.fit(
x_train, y_train,
epochs=epochs,
batch_size=batch_size,
validation_split=vsplit,
verbose=2,
# callbacks=callbacks_list,
shuffle=True)
model.save('trained_fgm_nn.h5')
In [15]:
fig = plt.figure()
plt.semilogy(history.history['loss'])
if vsplit:
plt.semilogy(history.history['val_loss'])
plt.title(loss_type)
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper right')
plt.show()
In [8]:
#@title import plotly
import plotly.plotly as py
import numpy as np
from plotly.offline import init_notebook_mode, iplot
# from plotly.graph_objs import Contours, Histogram2dContour, Marker, Scatter
import plotly.graph_objs as go
def configure_plotly_browser_state():
import IPython
display(IPython.core.display.HTML('''
<script src="/static/components/requirejs/require.js"></script>
<script>
requirejs.config({
paths: {
base: '/static/base',
plotly: 'https://cdn.plot.ly/plotly-1.5.1.min.js?noext',
},
});
</script>
'''))
In [17]:
from sklearn.metrics import r2_score
# model.load_weights("./tmp/weights.best.cntk.hdf5")
x_test_df = pd.DataFrame(in_scaler.inverse_transform(x_test),columns=input_features)
y_test_df = pd.DataFrame(out_scaler.inverse_transform(y_test),columns=labels)
predict_val = model.predict(x_test,batch_size=1024*8)
predict_df = pd.DataFrame(out_scaler.inverse_transform(predict_val), columns=labels)
test_data=pd.concat([x_test_df,y_test_df],axis=1)
pred_data=pd.concat([x_test_df,predict_df],axis=1)
!rm sim_check.h5
test_data.to_hdf('sim_check.h5',key='test')
pred_data.to_hdf('sim_check.h5',key='pred')
df_test=pd.read_hdf('sim_check.h5',key='test')
df_pred=pd.read_hdf('sim_check.h5',key='pred')
zeta_level=list(set(df_test['zeta']))
zeta_level.sort()
res_sum=pd.DataFrame()
r2s=[]
r2s_i=[]
names=[]
maxs_0=[]
maxs_9=[]
for r2,name in zip(r2_score(df_test,df_pred,multioutput='raw_values'),df_test.columns):
names.append(name)
r2s.append(r2)
maxs_0.append(df_test[df_test['zeta']==zeta_level[0]][name].max())
maxs_9.append(df_test[df_test['zeta']==zeta_level[8]][name].max())
for i in zeta_level:
r2s_i.append(r2_score(df_pred[df_pred['zeta']==i][name],
df_test[df_test['zeta']==i][name]))
res_sum['name']=names
# res_sum['max_0']=maxs_0
# res_sum['max_9']=maxs_9
res_sum['z_scale']=[m_9/(m_0+1e-20) for m_9,m_0 in zip(maxs_9,maxs_0)]
# res_sum['r2']=r2s
tmp=np.asarray(r2s_i).reshape(-1,10)
for idx,z in enumerate(zeta_level):
res_sum['r2s_'+str(z)]=tmp[:,idx]
res_sum[3:]
Out[17]:
In [16]:
scaler_std2=res_sum[3:]
scaler_std2
Out[16]:
In [18]:
scaler_cbrt_std=res_sum[3:]
scaler_cbrt_std
Out[18]:
In [40]:
scaler_tb=pd.concat([scaler_std2,scaler_cbrt_std],axis=0)
scaler_tb=scaler_tb.drop(['z_scale','name'],axis=1)
scaler_tb
Out[40]:
In [41]:
with open('tb.txt','wb') as f:
scaler_tb.to_latex('tb.txt')
In [14]:
#@title Default title text
# species = np.random.choice(labels)
species = 'T' #@param {type:"string"}
z_level = 0 #@param {type:"integer"}
# configure_plotly_browser_state()
# init_notebook_mode(connected=False)
from sklearn.metrics import r2_score
df_t=df_test[df_test['zeta']==zeta_level[z_level]].sample(frac=1)
# df_p=df_pred.loc[df_pred['zeta']==zeta_level[1]].sample(frac=0.1)
df_p=df_pred.loc[df_t.index]
error=df_p[species]-df_t[species]
r2=round(r2_score(df_p[species],df_t[species]),4)
print(species,'r2:',r2,'max:',df_t[species].max())
fig_db = {
'data': [
{'name':'test data from table',
'x': df_t['f'],
'y': df_t['pv'],
'z': df_t[species],
'type':'scatter3d',
'mode': 'markers',
'marker':{
'size':1
}
},
{'name':'prediction from neural networks',
'x': df_p['f'],
'y': df_p['pv'],
'z': df_p[species],
'type':'scatter3d',
'mode': 'markers',
'marker':{
'size':1
},
},
{'name':'error in difference',
'x': df_p['f'],
'y': df_p['pv'],
'z': error,
'type':'scatter3d',
'mode': 'markers',
'marker':{
'size':1
},
}
],
'layout': {
'scene':{
'xaxis': {'title':'mixture fraction'},
'yaxis': {'title':'progress variable'},
'zaxis': {'title': species+'_r2:'+str(r2)}
}
}
}
# iplot(fig_db, filename='multiple-scatter')
iplot(fig_db)
In [56]:
model.save('trained_fgm_nn.h5')
In [99]:
model.save('trained_fgm_nn.h5')
%run -i k2tf.py --input_model='trained_fgm_nn.h5' --output_model='exported/fgm.pb'
In [114]:
from keras.models import Model
from keras.layers import Dense, Input
from keras.callbacks import ModelCheckpoint
n_neuron = 50
# %%
print('set up student network')
# ANN parameters
dim_input = x_train.shape[1]
dim_label = y_train.shape[1]
batch_norm = False
# This returns a tensor
inputs = Input(shape=(dim_input,),name='input_1')
# a layer instance is callable on a tensor, and returns a tensor
x = Dense(n_neuron, activation='relu',name='l1')(inputs)
x = Dense(n_neuron, activation='relu',name='l2')(x)
x = Dropout(0.1)(x)
predictions = Dense(dim_label, activation='linear', name='output_1')(x)
student_model = Model(inputs=inputs, outputs=predictions)
student_model.summary()
In [115]:
batch_size = 1024*32
epochs = 60
vsplit = 0.1
loss_type='mse'
adam_op = optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999,epsilon=1e-8, decay=0.0, amsgrad=True)
student_model.compile(loss=loss_type, optimizer=adam_op, metrics=[coeff_r2])
# model.compile(loss=cubic_loss, optimizer=adam_op, metrics=['accuracy'])
# checkpoint (save the best model based validate loss)
!mkdir ./tmp
filepath = "./tmp/student_weights.best.cntk.hdf5"
checkpoint = ModelCheckpoint(filepath,
monitor='val_loss',
verbose=1,
save_best_only=True,
mode='min',
period=20)
# callbacks_list = [checkpoint]
callbacks_list = [lrate]
x_train_teacher = x_train
y_train_teacher = model.predict(x_train, batch_size=1024*8)
# fit the model
history = student_model.fit(
x_train_teacher, y_train_teacher,
epochs=epochs,
batch_size=batch_size,
validation_split=vsplit,
verbose=2,
# callbacks=callbacks_list,
shuffle=True)
In [13]:
from sklearn.metrics import r2_score
x_test_df = pd.DataFrame(in_scaler.inverse_transform(x_test),columns=input_features)
predict_val = student_model.predict(x_test,batch_size=1024*8)
predict_df = pd.DataFrame(out_scaler.inverse_transform(predict_val), columns=labels)
pred_data=pd.concat([x_test_df,predict_df],axis=1)
!rm sim_check.h5
pred_data.to_hdf('sim_check.h5',key='pred')
df_pred=pd.read_hdf('sim_check.h5',key='pred')
zeta_level=list(set(df_test['zeta']))
zeta_level.sort()
res_sum=pd.DataFrame()
r2s=[]
r2s_i=[]
names=[]
maxs_0=[]
maxs_9=[]
for r2,name in zip(r2_score(df_test,df_pred,multioutput='raw_values'),df_test.columns):
names.append(name)
r2s.append(r2)
maxs_0.append(df_test[df_test['zeta']==zeta_level[0]][name].max())
maxs_9.append(df_test[df_test['zeta']==zeta_level[8]][name].max())
for i in zeta_level:
r2s_i.append(r2_score(df_pred[df_pred['zeta']==i][name],
df_test[df_test['zeta']==i][name]))
res_sum['name']=names
# res_sum['max_0']=maxs_0
# res_sum['max_9']=maxs_9
res_sum['z_scale']=[m_9/(m_0+1e-20) for m_9,m_0 in zip(maxs_9,maxs_0)]
# res_sum['r2']=r2s
tmp=np.asarray(r2s_i).reshape(-1,10)
for idx,z in enumerate(zeta_level):
res_sum['r2s_'+str(z)]=tmp[:,idx]
res_sum[3:]
Out[13]:
In [71]:
import h5py
!rm student_model_weights.h5
student_model.save('student_model_weights.h5')
f = h5py.File('student_model_weights.h5','r')
dset=f['model_weights']
list(dset)
Out[71]:
In [92]:
l1_w=dset['dense_5']['dense_5']['kernel:0'][:]
l1_b=dset['dense_5']['dense_5']['bias:0'][:]
l1_c=np.vstack([l1_w,l1_b])
l1_c=pd.Series(list(l1_c)).to_json()
l2_w=dset['dense_6']['dense_6']['kernel:0'][:]
l2_b=dset['dense_6']['dense_6']['bias:0'][:]
l2_c=np.vstack([l2_w,l2_b])
l2_c=pd.Series(list(l2_c)).to_json()
l3_w=dset['output_1']['output_1_2']['kernel:0'][:]
l3_b=dset['output_1']['output_1_2']['bias:0'][:]
l3_c=np.vstack([l3_w,l3_b])
l3_c=pd.Series(list(l3_c)).to_json()
In [94]:
!rm data.json
print("{",file=open('data.json','w'))
print('"l1":',l1_c,file=open('data.json','a'))
print(',"l2":',l2_c,file=open('data.json','a'))
print(',"output":',l3_c,file=open('data.json','a'))
print("}",file=open('data.json','a'))
In [113]:
test_id=888
print(x_test[test_id])
print(student_model.predict(x_test[test_id].reshape(-1,3)))
print(y_test[test_id])
In [90]:
l1_b
Out[90]:
In [89]:
np.vstack([l1,l1_b])
Out[89]:
In [110]:
student_model.predict(np.asarray([0.5,0.1,0.1]).reshape(-1,3))
Out[110]:
In [106]:
student_model.save_weights('student_weights.h5')
In [ ]: