In [1]:
from keras.models import load_model
import pandas as pd
In [2]:
import keras.backend as K
from keras.callbacks import LearningRateScheduler
from keras.callbacks import Callback
import math
import numpy as np
def coeff_r2(y_true, y_pred):
from keras import backend as K
SS_res = K.sum(K.square( y_true-y_pred ))
SS_tot = K.sum(K.square( y_true - K.mean(y_true) ) )
return ( 1 - SS_res/(SS_tot + K.epsilon()) )
In [3]:
model = load_model('./FPV_ANN_tabulated_Standard_4Res_500n.H5')
# model = load_model('../tmp/large_next.h5',custom_objects={'coeff_r2':coeff_r2})
# model = load_model('../tmp/calc_100_3_3_cbrt.h5', custom_objects={'coeff_r2':coeff_r2})
model.summary()
In [4]:
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler, StandardScaler
class data_scaler(object):
def __init__(self):
self.norm = None
self.norm_1 = None
self.std = None
self.case = None
self.scale = 1
self.bias = 1e-20
# self.bias = 1
self.switcher = {
'min_std': 'min_std',
'std2': 'std2',
'std_min':'std_min',
'min': 'min',
'no':'no',
'log': 'log',
'log_min':'log_min',
'log_std':'log_std',
'log2': 'log2',
'sqrt_std': 'sqrt_std',
'cbrt_std': 'cbrt_std',
'nrt_std':'nrt_std',
'tan': 'tan'
}
def fit_transform(self, input_data, case):
self.case = case
if self.switcher.get(self.case) == 'min_std':
self.norm = MinMaxScaler()
self.std = StandardScaler()
out = self.norm.fit_transform(input_data)
out = self.std.fit_transform(out)
if self.switcher.get(self.case) == 'std2':
self.std = StandardScaler()
out = self.std.fit_transform(input_data)
if self.switcher.get(self.case) == 'std_min':
self.norm = MinMaxScaler()
self.std = StandardScaler()
out = self.std.fit_transform(input_data)
out = self.norm.fit_transform(out)
if self.switcher.get(self.case) == 'min':
self.norm = MinMaxScaler()
out = self.norm.fit_transform(input_data)
if self.switcher.get(self.case) == 'no':
self.norm = MinMaxScaler()
self.std = StandardScaler()
out = input_data
if self.switcher.get(self.case) == 'log_min':
out = - np.log(np.asarray(input_data / self.scale) + self.bias)
self.norm = MinMaxScaler()
out = self.norm.fit_transform(out)
if self.switcher.get(self.case) == 'log_std':
out = - np.log(np.asarray(input_data / self.scale) + self.bias)
self.std = StandardScaler()
out = self.std.fit_transform(out)
if self.switcher.get(self.case) == 'log2':
self.norm = MinMaxScaler()
self.std = StandardScaler()
out = self.norm.fit_transform(input_data)
out = np.log(np.asarray(out) + self.bias)
out = self.std.fit_transform(out)
if self.switcher.get(self.case) == 'sqrt_std':
out = np.sqrt(np.asarray(input_data / self.scale))
self.std = StandardScaler()
out = self.std.fit_transform(out)
if self.switcher.get(self.case) == 'cbrt_std':
out = np.cbrt(np.asarray(input_data / self.scale))
self.std = StandardScaler()
out = self.std.fit_transform(out)
if self.switcher.get(self.case) == 'nrt_std':
out = np.power(np.asarray(input_data / self.scale),1/4)
self.std = StandardScaler()
out = self.std.fit_transform(out)
if self.switcher.get(self.case) == 'tan':
self.norm = MaxAbsScaler()
self.std = StandardScaler()
out = self.std.fit_transform(input_data)
out = self.norm.fit_transform(out)
out = np.tan(out / (2 * np.pi + self.bias))
return out
def transform(self, input_data):
if self.switcher.get(self.case) == 'min_std':
out = self.norm.transform(input_data)
out = self.std.transform(out)
if self.switcher.get(self.case) == 'std2':
out = self.std.transform(input_data)
if self.switcher.get(self.case) == 'std_min':
out = self.std.transform(input_data)
out = self.norm.transform(out)
if self.switcher.get(self.case) == 'min':
out = self.norm.transform(input_data)
if self.switcher.get(self.case) == 'no':
out = input_data
if self.switcher.get(self.case) == 'log_min':
out = - np.log(np.asarray(input_data / self.scale) + self.bias)
out = self.norm.transform(out)
if self.switcher.get(self.case) == 'log_std':
out = - np.log(np.asarray(input_data / self.scale) + self.bias)
out = self.std.transform(out)
if self.switcher.get(self.case) == 'log2':
out = self.norm.transform(input_data)
out = np.log(np.asarray(out) + self.bias)
out = self.std.transform(out)
if self.switcher.get(self.case) == 'sqrt_std':
out = np.sqrt(np.asarray(input_data / self.scale))
out = self.std.transform(out)
if self.switcher.get(self.case) == 'cbrt_std':
out = np.cbrt(np.asarray(input_data / self.scale))
out = self.std.transform(out)
if self.switcher.get(self.case) == 'nrt_std':
out = np.power(np.asarray(input_data / self.scale),1/4)
out = self.std.transform(out)
if self.switcher.get(self.case) == 'tan':
out = self.std.transform(input_data)
out = self.norm.transform(out)
out = np.tan(out / (2 * np.pi + self.bias))
return out
def inverse_transform(self, input_data):
if self.switcher.get(self.case) == 'min_std':
out = self.std.inverse_transform(input_data)
out = self.norm.inverse_transform(out)
if self.switcher.get(self.case) == 'std2':
out = self.std.inverse_transform(input_data)
if self.switcher.get(self.case) == 'std_min':
out = self.norm.inverse_transform(input_data)
out = self.std.inverse_transform(out)
if self.switcher.get(self.case) == 'min':
out = self.norm.inverse_transform(input_data)
if self.switcher.get(self.case) == 'no':
out = input_data
if self.switcher.get(self.case) == 'log_min':
out = self.norm.inverse_transform(input_data)
out = (np.exp(-out) - self.bias) * self.scale
if self.switcher.get(self.case) == 'log_std':
out = self.std.inverse_transform(input_data)
out = (np.exp(-out) - self.bias) * self.scale
if self.switcher.get(self.case) == 'log2':
out = self.std.inverse_transform(input_data)
out = np.exp(out) - self.bias
out = self.norm.inverse_transform(out)
if self.switcher.get(self.case) == 'sqrt_std':
out = self.std.inverse_transform(input_data)
out = np.power(out,2) * self.scale
if self.switcher.get(self.case) == 'cbrt_std':
out = self.std.inverse_transform(input_data)
out = np.power(out,3) * self.scale
if self.switcher.get(self.case) == 'nrt_std':
out = self.std.inverse_transform(input_data)
out = np.power(out,4) * self.scale
if self.switcher.get(self.case) == 'tan':
out = (2 * np.pi + self.bias) * np.arctan(input_data)
out = self.norm.inverse_transform(out)
out = self.std.inverse_transform(out)
return out
def read_h5_data(fileName, input_features, labels):
df = pd.read_hdf(fileName)
# df = df[df['f']<0.45]
# for i in range(5):
# pv_101=df[df['pv']==1]
# pv_101['pv']=pv_101['pv']+0.002*(i+1)
# df = pd.concat([df,pv_101])
input_df=df[input_features]
in_scaler = data_scaler()
input_np = in_scaler.fit_transform(input_df.values,'std2')
label_df=df[labels].clip(0)
# if 'PVs' in labels:
# label_df['PVs']=np.log(label_df['PVs']+1)
out_scaler = data_scaler()
label_np = out_scaler.fit_transform(label_df.values,'cbrt_std')
return input_np, label_np, df, in_scaler, out_scaler
In [5]:
# labels = ['CH4','O2','H2O','CO','CO2','T','PVs','psi','mu','alpha']
# labels = ['T','PVs']
# labels = ['T','CH4','O2','CO2','CO','H2O','H2','OH','psi']
# labels = ['CH2OH','HNCO','CH3OH', 'CH2CHO', 'CH2O', 'C3H8', 'HNO', 'NH2', 'HCN']
# labels = np.random.choice(col_labels,20,replace=False).tolist()
# labels.append('PVs')
# labels = col_labels
# labels= ['CH4', 'CH2O', 'CH3O', 'H', 'O2', 'H2', 'O', 'OH', 'H2O', 'HO2', 'H2O2',
# 'C', 'CH', 'CH2', 'CH2(S)', 'CH3', 'CO', 'CO2', 'HCO', 'CH2OH', 'CH3OH',
# 'C2H', 'C2H2', 'C2H3', 'C2H4', 'C2H5', 'C2H6', 'HCCO', 'CH2CO', 'HCCOH',
# 'N', 'NH', 'NH2', 'NH3', 'NNH', 'NO', 'NO2', 'N2O', 'HNO', 'CN', 'HCN',
# 'H2CN', 'HCNN', 'HCNO', 'HNCO', 'NCO', 'N2', 'AR', 'C3H7', 'C3H8', 'CH2CHO', 'CH3CHO', 'T', 'PVs']
# labels.remove('AR')
# labels.remove('N2')
labels = ['H2', 'H', 'O', 'O2', 'OH', 'H2O', 'HO2', 'CH3', 'CH4', 'CO', 'CO2', 'CH2O', 'N2', 'T', 'PVs']
print(labels)
input_features=['f','zeta','pv']
# read in the data
x_input, y_label, df, in_scaler, out_scaler = read_h5_data('../data/tables_of_fgm_psi.h5',input_features=input_features, labels = labels)
In [6]:
from sklearn.metrics import r2_score
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x_input,y_label, test_size=0.01)
x_test_df = pd.DataFrame(in_scaler.inverse_transform(x_test),columns=input_features)
y_test_df = pd.DataFrame(out_scaler.inverse_transform(y_test),columns=labels)
predict_val = model.predict(x_test,batch_size=1024*8)
# predict_val = model.predict(x_test,batch_size=1024*8)
predict_df = pd.DataFrame(out_scaler.inverse_transform(predict_val), columns=labels)
test_data=pd.concat([x_test_df,y_test_df],axis=1)
pred_data=pd.concat([x_test_df,predict_df],axis=1)
!rm sim_check.h5
test_data.to_hdf('sim_check.h5',key='test')
pred_data.to_hdf('sim_check.h5',key='pred')
df_test=pd.read_hdf('sim_check.h5',key='test')
df_pred=pd.read_hdf('sim_check.h5',key='pred')
zeta_level=list(set(df_test['zeta']))
zeta_level.sort()
res_sum=pd.DataFrame()
r2s=[]
r2s_i=[]
names=[]
maxs_0=[]
maxs_9=[]
for r2,name in zip(r2_score(df_test,df_pred,multioutput='raw_values'),df_test.columns):
names.append(name)
r2s.append(r2)
maxs_0.append(df_test[df_test['zeta']==zeta_level[0]][name].max())
maxs_9.append(df_test[df_test['zeta']==zeta_level[8]][name].max())
for i in zeta_level:
r2s_i.append(r2_score(df_pred[df_pred['zeta']==i][name],
df_test[df_test['zeta']==i][name]))
res_sum['name']=names
# res_sum['max_0']=maxs_0
# res_sum['max_9']=maxs_9
res_sum['z_scale']=[m_9/(m_0+1e-20) for m_9,m_0 in zip(maxs_9,maxs_0)]
# res_sum['r2']=r2s
tmp=np.asarray(r2s_i).reshape(-1,10)
for idx,z in enumerate(zeta_level):
res_sum['r2s_'+str(z)]=tmp[:,idx]
res_sum[3:]
Out[6]:
In [7]:
from sklearn.metrics import r2_score
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x_input,y_label, test_size=0.01)
x_test_df = pd.DataFrame(in_scaler.inverse_transform(x_test),columns=input_features)
y_test_df = pd.DataFrame(out_scaler.inverse_transform(y_test),columns=labels)
predict_val = student_model.predict(x_test,batch_size=1024*8)
# predict_val = model.predict(x_test,batch_size=1024*8)
predict_df = pd.DataFrame(out_scaler.inverse_transform(predict_val), columns=labels)
test_data=pd.concat([x_test_df,y_test_df],axis=1)
pred_data=pd.concat([x_test_df,predict_df],axis=1)
!rm sim_check.h5
test_data.to_hdf('sim_check.h5',key='test')
pred_data.to_hdf('sim_check.h5',key='pred')
df_test=pd.read_hdf('sim_check.h5',key='test')
df_pred=pd.read_hdf('sim_check.h5',key='pred')
zeta_level=list(set(df_test['zeta']))
zeta_level.sort()
res_sum=pd.DataFrame()
r2s=[]
r2s_i=[]
names=[]
maxs_0=[]
maxs_9=[]
for r2,name in zip(r2_score(df_test,df_pred,multioutput='raw_values'),df_test.columns):
names.append(name)
r2s.append(r2)
maxs_0.append(df_test[df_test['zeta']==zeta_level[0]][name].max())
maxs_9.append(df_test[df_test['zeta']==zeta_level[8]][name].max())
for i in zeta_level:
r2s_i.append(r2_score(df_pred[df_pred['zeta']==i][name],
df_test[df_test['zeta']==i][name]))
res_sum['name']=names
# res_sum['max_0']=maxs_0
# res_sum['max_9']=maxs_9
res_sum['z_scale']=[m_9/(m_0+1e-20) for m_9,m_0 in zip(maxs_9,maxs_0)]
# res_sum['r2']=r2s
tmp=np.asarray(r2s_i).reshape(-1,10)
for idx,z in enumerate(zeta_level):
res_sum['r2s_'+str(z)]=tmp[:,idx]
res_sum[3:]
In [6]:
from sklearn.metrics import r2_score
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x_input,y_label, test_size=0.01)
x_test_df = pd.DataFrame(in_scaler.inverse_transform(x_test),columns=input_features)
y_test_df = pd.DataFrame(out_scaler.inverse_transform(y_test),columns=labels)
predict_val = model.predict(x_test,batch_size=1024*8)
# predict_val = model.predict(x_test,batch_size=1024*8)
predict_df = pd.DataFrame(out_scaler.inverse_transform(predict_val), columns=labels)
test_data=pd.concat([x_test_df,y_test_df],axis=1)
pred_data=pd.concat([x_test_df,predict_df],axis=1)
!rm sim_check.h5
test_data.to_hdf('sim_check.h5',key='test')
pred_data.to_hdf('sim_check.h5',key='pred')
df_test=pd.read_hdf('sim_check.h5',key='test')
df_pred=pd.read_hdf('sim_check.h5',key='pred')
zeta_level=list(set(df_test['zeta']))
zeta_level.sort()
res_sum=pd.DataFrame()
r2s=[]
r2s_i=[]
names=[]
maxs_0=[]
maxs_9=[]
for r2,name in zip(r2_score(df_test,df_pred,multioutput='raw_values'),df_test.columns):
names.append(name)
r2s.append(r2)
maxs_0.append(df_test[df_test['zeta']==zeta_level[0]][name].max())
maxs_9.append(df_test[df_test['zeta']==zeta_level[8]][name].max())
for i in zeta_level:
r2s_i.append(r2_score(df_pred[df_pred['zeta']==i][name],
df_test[df_test['zeta']==i][name]))
res_sum['name']=names
# res_sum['max_0']=maxs_0
# res_sum['max_9']=maxs_9
res_sum['z_scale']=[m_9/(m_0+1e-20) for m_9,m_0 in zip(maxs_9,maxs_0)]
# res_sum['r2']=r2s
tmp=np.asarray(r2s_i).reshape(-1,10)
for idx,z in enumerate(zeta_level):
res_sum['r2s_'+str(z)]=tmp[:,idx]
res_sum[3:]
Out[6]:
In [25]:
from sklearn.metrics import r2_score
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x_input,y_label, test_size=0.01)
x_test_df = pd.DataFrame(in_scaler.inverse_transform(x_test),columns=input_features)
y_test_df = pd.DataFrame(out_scaler.inverse_transform(y_test),columns=labels)
predict_val = model.predict(x_test,batch_size=1024*8)
# predict_val = model.predict(x_test,batch_size=1024*8)
predict_df = pd.DataFrame(out_scaler.inverse_transform(predict_val), columns=labels)
test_data=pd.concat([x_test_df,y_test_df],axis=1)
pred_data=pd.concat([x_test_df,predict_df],axis=1)
!rm sim_check.h5
test_data.to_hdf('sim_check.h5',key='test')
pred_data.to_hdf('sim_check.h5',key='pred')
df_test=pd.read_hdf('sim_check.h5',key='test')
df_pred=pd.read_hdf('sim_check.h5',key='pred')
zeta_level=list(set(df_test['zeta']))
zeta_level.sort()
res_sum=pd.DataFrame()
r2s=[]
r2s_i=[]
names=[]
maxs_0=[]
maxs_9=[]
for r2,name in zip(r2_score(df_test,df_pred,multioutput='raw_values'),df_test.columns):
names.append(name)
r2s.append(r2)
maxs_0.append(df_test[df_test['zeta']==zeta_level[0]][name].max())
maxs_9.append(df_test[df_test['zeta']==zeta_level[8]][name].max())
for i in zeta_level:
r2s_i.append(r2_score(df_pred[df_pred['zeta']==i][name],
df_test[df_test['zeta']==i][name]))
res_sum['name']=names
# res_sum['max_0']=maxs_0
# res_sum['max_9']=maxs_9
res_sum['z_scale']=[m_9/(m_0+1e-20) for m_9,m_0 in zip(maxs_9,maxs_0)]
# res_sum['r2']=r2s
tmp=np.asarray(r2s_i).reshape(-1,10)
for idx,z in enumerate(zeta_level):
res_sum['r2s_'+str(z)]=tmp[:,idx]
res_sum[3:]
Out[25]:
In [7]:
#@title import plotly
import plotly.plotly as py
import numpy as np
from plotly.offline import init_notebook_mode, iplot
# from plotly.graph_objs import Contours, Histogram2dContour, Marker, Scatter
import plotly.graph_objs as go
def configure_plotly_browser_state():
import IPython
display(IPython.core.display.HTML('''
<script src="/static/components/requirejs/require.js"></script>
<script>
requirejs.config({
paths: {
base: '/static/base',
plotly: 'https://cdn.plot.ly/plotly-1.5.1.min.js?noext',
},
});
</script>
'''))
In [11]:
#@title Default title text
# species = np.random.choice(labels)
species = 'HNO' #@param {type:"string"}
z_level = 0 #@param {type:"integer"}
# configure_plotly_browser_state()
# init_notebook_mode(connected=False)
from sklearn.metrics import r2_score
df_t=df_test[df_test['zeta']==zeta_level[z_level]].sample(frac=1)
# df_p=df_pred.loc[df_pred['zeta']==zeta_level[1]].sample(frac=0.1)
df_p=df_pred.loc[df_t.index]
# error=(df_p[species]-df_t[species])
error=(df_p[species]-df_t[species])/(df_p[species]+df_t[species])
r2=round(r2_score(df_p[species],df_t[species]),4)
print(species,'r2:',r2,'max:',df_t[species].max())
fig_db = {
'data': [
{'name':'test data from table',
'x': df_t['f'],
'y': df_t['pv'],
'z': df_t[species],
'type':'scatter3d',
'mode': 'markers',
'marker':{
'size':1
}
},
{'name':'prediction from neural networks',
'x': df_p['f'],
'y': df_p['pv'],
'z': df_p[species],
'type':'scatter3d',
'mode': 'markers',
'marker':{
'size':1
},
},
{'name':'error in difference',
'x': df_p['f'],
'y': df_p['pv'],
'z': error,
'type':'scatter3d',
'mode': 'markers',
'marker':{
'size':1
},
}
],
'layout': {
'scene':{
'xaxis': {'title':'mixture fraction'},
'yaxis': {'title':'progress variable'},
'zaxis': {'title': species+'_r2:'+str(r2)}
}
}
}
# iplot(fig_db, filename='multiple-scatter')
iplot(fig_db)
In [18]:
%matplotlib inline
import matplotlib.pyplot as plt
z=0.22
sp='HNO'
plt.plot(df[(df.pv==1)&(df.zeta==z)]['f'],df[(df.pv==0.9)&(df.zeta==z)][sp],'rd')
Out[18]:
In [23]:
from keras.models import Model
from keras.layers import Dense, Input, Dropout
n_neuron = 100
# %%
print('set up student network')
# ANN parameters
dim_input = x_train.shape[1]
dim_label = y_train.shape[1]
batch_norm = False
# This returns a tensor
inputs = Input(shape=(dim_input,),name='input_1')
# a layer instance is callable on a tensor, and returns a tensor
x = Dense(n_neuron, activation='relu')(inputs)
x = Dense(n_neuron, activation='relu')(x)
x = Dense(n_neuron, activation='relu')(x)
# x = Dropout(0.1)(x)
predictions = Dense(dim_label, activation='linear', name='output_1')(x)
student_model = Model(inputs=inputs, outputs=predictions)
student_model.summary()
In [8]:
import keras.backend as K
from keras.callbacks import LearningRateScheduler
import math
def cubic_loss(y_true, y_pred):
return K.mean(K.square(y_true - y_pred)*K.abs(y_true - y_pred), axis=-1)
def coeff_r2(y_true, y_pred):
from keras import backend as K
SS_res = K.sum(K.square( y_true-y_pred ))
SS_tot = K.sum(K.square( y_true - K.mean(y_true) ) )
return ( 1 - SS_res/(SS_tot + K.epsilon()) )
def step_decay(epoch):
initial_lrate = 0.002
drop = 0.5
epochs_drop = 1000.0
lrate = initial_lrate * math.pow(drop,math.floor((1+epoch)/epochs_drop))
return lrate
lrate = LearningRateScheduler(step_decay)
class SGDRScheduler(Callback):
'''Cosine annealing learning rate scheduler with periodic restarts.
# Usage
```python
schedule = SGDRScheduler(min_lr=1e-5,
max_lr=1e-2,
steps_per_epoch=np.ceil(epoch_size/batch_size),
lr_decay=0.9,
cycle_length=5,
mult_factor=1.5)
model.fit(X_train, Y_train, epochs=100, callbacks=[schedule])
```
# Arguments
min_lr: The lower bound of the learning rate range for the experiment.
max_lr: The upper bound of the learning rate range for the experiment.
steps_per_epoch: Number of mini-batches in the dataset. Calculated as `np.ceil(epoch_size/batch_size)`.
lr_decay: Reduce the max_lr after the completion of each cycle.
Ex. To reduce the max_lr by 20% after each cycle, set this value to 0.8.
cycle_length: Initial number of epochs in a cycle.
mult_factor: Scale epochs_to_restart after each full cycle completion.
# References
Blog post: jeremyjordan.me/nn-learning-rate
Original paper: http://arxiv.org/abs/1608.03983
'''
def __init__(self,
min_lr,
max_lr,
steps_per_epoch,
lr_decay=1,
cycle_length=10,
mult_factor=2):
self.min_lr = min_lr
self.max_lr = max_lr
self.lr_decay = lr_decay
self.batch_since_restart = 0
self.next_restart = cycle_length
self.steps_per_epoch = steps_per_epoch
self.cycle_length = cycle_length
self.mult_factor = mult_factor
self.history = {}
def clr(self):
'''Calculate the learning rate.'''
fraction_to_restart = self.batch_since_restart / (self.steps_per_epoch * self.cycle_length)
lr = self.min_lr + 0.5 * (self.max_lr - self.min_lr) * (1 + np.cos(fraction_to_restart * np.pi))
return lr
def on_train_begin(self, logs={}):
'''Initialize the learning rate to the minimum value at the start of training.'''
logs = logs or {}
K.set_value(self.model.optimizer.lr, self.max_lr)
def on_batch_end(self, batch, logs={}):
'''Record previous batch statistics and update the learning rate.'''
logs = logs or {}
self.history.setdefault('lr', []).append(K.get_value(self.model.optimizer.lr))
for k, v in logs.items():
self.history.setdefault(k, []).append(v)
self.batch_since_restart += 1
K.set_value(self.model.optimizer.lr, self.clr())
def on_epoch_end(self, epoch, logs={}):
'''Check for end of current cycle, apply restarts when necessary.'''
if epoch + 1 == self.next_restart:
self.batch_since_restart = 0
self.cycle_length = np.ceil(self.cycle_length * self.mult_factor)
self.next_restart += self.cycle_length
self.max_lr *= self.lr_decay
self.best_weights = self.model.get_weights()
def on_train_end(self, logs={}):
'''Set weights to the values from the end of the most recent cycle for best performance.'''
self.model.set_weights(self.best_weights)
In [26]:
student_model = load_model('student.h5',custom_objects={'coeff_r2':coeff_r2})
In [13]:
model.summary()
In [9]:
gx,gy,gz=np.mgrid[0:1:600j,0:1:10j,0:1:600j]
gx=gx.reshape(-1,1)
gy=gy.reshape(-1,1)
gz=gz.reshape(-1,1)
gm=np.hstack([gx,gy,gz])
gm.shape
Out[9]:
In [36]:
from keras.callbacks import ModelCheckpoint
from keras import optimizers
batch_size = 1024*16
epochs = 2000
vsplit = 0.1
loss_type='mse'
adam_op = optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999,epsilon=1e-8, decay=0.0, amsgrad=False)
student_model.compile(loss=loss_type,
# optimizer=adam_op,
optimizer='adam',
metrics=[coeff_r2])
# model.compile(loss=cubic_loss, optimizer=adam_op, metrics=['accuracy'])
# checkpoint (save the best model based validate loss)
!mkdir ./tmp
filepath = "./tmp/student_weights.best.cntk.hdf5"
checkpoint = ModelCheckpoint(filepath,
monitor='val_loss',
verbose=1,
save_best_only=True,
mode='min',
period=20)
epoch_size=x_train.shape[0]
a=0
base=2
clc=2
for i in range(5):
a+=base*clc**(i)
print(a)
epochs,c_len = a,base
schedule = SGDRScheduler(min_lr=1e-5,max_lr=1e-4,
steps_per_epoch=np.ceil(epoch_size/batch_size),
cycle_length=c_len,lr_decay=0.8,mult_factor=2)
callbacks_list = [checkpoint]
# callbacks_list = [checkpoint, schedule]
x_train_teacher = in_scaler.transform(gm)
y_train_teacher = model.predict(x_train_teacher, batch_size=1024*8)
x_train, x_test, y_train, y_test = train_test_split(x_train_teacher,y_train_teacher, test_size=0.01)
# fit the model
history = student_model.fit(
x_train, y_train,
epochs=epochs,
batch_size=batch_size,
validation_split=vsplit,
verbose=2,
callbacks=callbacks_list,
shuffle=True)
In [39]:
student_model.save('student_100_3.h5')
In [22]:
n_res = 501
pv_level = 0.996
f_1 = np.linspace(0,1,n_res)
z_1 = np.zeros(n_res)
pv_1 = np.ones(n_res)*pv_level
case_1 = np.vstack((f_1,z_1,pv_1))
# case_1 = np.vstack((pv_1,z_1,f_1))
case_1 = case_1.T
case_1.shape
Out[22]:
In [23]:
out=out_scaler.inverse_transform(model.predict(case_1))
out=pd.DataFrame(out,columns=labels)
sp='PVs'
out.head()
Out[23]:
In [24]:
table_val=df[(df.pv==pv_level) & (df.zeta==0)][sp]
table_val.shape
Out[24]:
In [25]:
import matplotlib.pyplot as plt
plt.plot(f_1,table_val)
plt.show
Out[25]:
In [26]:
plt.plot(f_1,out[sp])
plt.show
Out[26]:
In [27]:
df.head()
Out[27]:
In [201]:
pv_101=df[df['pv']==1][df['zeta']==0]
In [202]:
pv_101['pv']=pv_101['pv']+0.01
In [204]:
a=pd.concat([pv_101,pv_101])
In [205]:
pv_101.shape
Out[205]:
In [206]:
a.shape
Out[206]:
In [207]:
a
Out[207]:
In [ ]: