In [1]:
from keras.models import load_model
import pandas as pd


Using TensorFlow backend.

In [2]:
import keras.backend as K
from keras.callbacks import LearningRateScheduler
from keras.callbacks import Callback
import math
import numpy as np


def coeff_r2(y_true, y_pred):
    from keras import backend as K
    SS_res =  K.sum(K.square( y_true-y_pred ))
    SS_tot = K.sum(K.square( y_true - K.mean(y_true) ) )
    return ( 1 - SS_res/(SS_tot + K.epsilon()) )

In [3]:
model = load_model('./FPV_ANN_tabulated_Standard_4Res_500n.H5')
# model = load_model('../tmp/large_next.h5',custom_objects={'coeff_r2':coeff_r2})
# model = load_model('../tmp/calc_100_3_3_cbrt.h5', custom_objects={'coeff_r2':coeff_r2})
model.summary()


__________________________________________________________________________________________________
Layer (type)                    Output Shape         Param #     Connected to                     
==================================================================================================
input_1 (InputLayer)            (None, 3)            0                                            
__________________________________________________________________________________________________
dense_1 (Dense)                 (None, 500)          2000        input_1[0][0]                    
__________________________________________________________________________________________________
res1a_branch2a (Dense)          (None, 500)          250500      dense_1[0][0]                    
__________________________________________________________________________________________________
activation_1 (Activation)       (None, 500)          0           res1a_branch2a[0][0]             
__________________________________________________________________________________________________
dropout_1 (Dropout)             (None, 500)          0           activation_1[0][0]               
__________________________________________________________________________________________________
res1a_branch2b (Dense)          (None, 500)          250500      dropout_1[0][0]                  
__________________________________________________________________________________________________
add_1 (Add)                     (None, 500)          0           res1a_branch2b[0][0]             
                                                                 dense_1[0][0]                    
__________________________________________________________________________________________________
activation_2 (Activation)       (None, 500)          0           add_1[0][0]                      
__________________________________________________________________________________________________
dropout_2 (Dropout)             (None, 500)          0           activation_2[0][0]               
__________________________________________________________________________________________________
res1b_branch2a (Dense)          (None, 500)          250500      dropout_2[0][0]                  
__________________________________________________________________________________________________
activation_3 (Activation)       (None, 500)          0           res1b_branch2a[0][0]             
__________________________________________________________________________________________________
dropout_3 (Dropout)             (None, 500)          0           activation_3[0][0]               
__________________________________________________________________________________________________
res1b_branch2b (Dense)          (None, 500)          250500      dropout_3[0][0]                  
__________________________________________________________________________________________________
add_2 (Add)                     (None, 500)          0           res1b_branch2b[0][0]             
                                                                 dropout_2[0][0]                  
__________________________________________________________________________________________________
activation_4 (Activation)       (None, 500)          0           add_2[0][0]                      
__________________________________________________________________________________________________
dropout_4 (Dropout)             (None, 500)          0           activation_4[0][0]               
__________________________________________________________________________________________________
res1c_branch2a (Dense)          (None, 500)          250500      dropout_4[0][0]                  
__________________________________________________________________________________________________
activation_5 (Activation)       (None, 500)          0           res1c_branch2a[0][0]             
__________________________________________________________________________________________________
dropout_5 (Dropout)             (None, 500)          0           activation_5[0][0]               
__________________________________________________________________________________________________
res1c_branch2b (Dense)          (None, 500)          250500      dropout_5[0][0]                  
__________________________________________________________________________________________________
add_3 (Add)                     (None, 500)          0           res1c_branch2b[0][0]             
                                                                 dropout_4[0][0]                  
__________________________________________________________________________________________________
activation_6 (Activation)       (None, 500)          0           add_3[0][0]                      
__________________________________________________________________________________________________
dropout_6 (Dropout)             (None, 500)          0           activation_6[0][0]               
__________________________________________________________________________________________________
res1d_branch2a (Dense)          (None, 500)          250500      dropout_6[0][0]                  
__________________________________________________________________________________________________
activation_7 (Activation)       (None, 500)          0           res1d_branch2a[0][0]             
__________________________________________________________________________________________________
dropout_7 (Dropout)             (None, 500)          0           activation_7[0][0]               
__________________________________________________________________________________________________
res1d_branch2b (Dense)          (None, 500)          250500      dropout_7[0][0]                  
__________________________________________________________________________________________________
add_4 (Add)                     (None, 500)          0           res1d_branch2b[0][0]             
                                                                 dropout_6[0][0]                  
__________________________________________________________________________________________________
activation_8 (Activation)       (None, 500)          0           add_4[0][0]                      
__________________________________________________________________________________________________
dropout_8 (Dropout)             (None, 500)          0           activation_8[0][0]               
__________________________________________________________________________________________________
dense_2 (Dense)                 (None, 15)           7515        dropout_8[0][0]                  
==================================================================================================
Total params: 2,013,515
Trainable params: 2,013,515
Non-trainable params: 0
__________________________________________________________________________________________________
/home/eg/anaconda3/envs/my_dev/lib/python3.6/site-packages/keras/engine/saving.py:327: UserWarning: Error in loading the saved optimizer state. As a result, your model is starting with a freshly initialized optimizer.
  warnings.warn('Error in loading the saved optimizer '

In [4]:
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler, StandardScaler

class data_scaler(object):
    def __init__(self):
        self.norm = None
        self.norm_1 = None
        self.std = None
        self.case = None
        self.scale = 1
        self.bias = 1e-20
#         self.bias = 1


        self.switcher = {
            'min_std': 'min_std',
            'std2': 'std2',
            'std_min':'std_min',
            'min': 'min',
            'no':'no',
            'log': 'log',
            'log_min':'log_min',
            'log_std':'log_std',
            'log2': 'log2',
            'sqrt_std': 'sqrt_std',
            'cbrt_std': 'cbrt_std',
            'nrt_std':'nrt_std',
            'tan': 'tan'
        }

    def fit_transform(self, input_data, case):
        self.case = case
        if self.switcher.get(self.case) == 'min_std':
            self.norm = MinMaxScaler()
            self.std = StandardScaler()
            out = self.norm.fit_transform(input_data)
            out = self.std.fit_transform(out)

        if self.switcher.get(self.case) == 'std2':
            self.std = StandardScaler()
            out = self.std.fit_transform(input_data)

        if self.switcher.get(self.case) == 'std_min':
            self.norm = MinMaxScaler()
            self.std = StandardScaler()
            out = self.std.fit_transform(input_data)
            out = self.norm.fit_transform(out)

        if self.switcher.get(self.case) == 'min':
            self.norm = MinMaxScaler()
            out = self.norm.fit_transform(input_data)

        if self.switcher.get(self.case) == 'no':
            self.norm = MinMaxScaler()
            self.std = StandardScaler()
            out = input_data

        if self.switcher.get(self.case) == 'log_min':
            out = - np.log(np.asarray(input_data / self.scale) + self.bias)
            self.norm = MinMaxScaler()
            out = self.norm.fit_transform(out)

        if self.switcher.get(self.case) == 'log_std':
            out = - np.log(np.asarray(input_data / self.scale) + self.bias)
            self.std = StandardScaler()
            out = self.std.fit_transform(out)

        if self.switcher.get(self.case) == 'log2':
            self.norm = MinMaxScaler()
            self.std = StandardScaler()
            out = self.norm.fit_transform(input_data)
            out = np.log(np.asarray(out) + self.bias)
            out = self.std.fit_transform(out)

        if self.switcher.get(self.case) == 'sqrt_std':
            out = np.sqrt(np.asarray(input_data / self.scale))
            self.std = StandardScaler()
            out = self.std.fit_transform(out)

        if self.switcher.get(self.case) == 'cbrt_std':
            out = np.cbrt(np.asarray(input_data / self.scale))
            self.std = StandardScaler()
            out = self.std.fit_transform(out)

        if self.switcher.get(self.case) == 'nrt_std':
            out = np.power(np.asarray(input_data / self.scale),1/4)
            self.std = StandardScaler()
            out = self.std.fit_transform(out)

        if self.switcher.get(self.case) == 'tan':
            self.norm = MaxAbsScaler()
            self.std = StandardScaler()
            out = self.std.fit_transform(input_data)
            out = self.norm.fit_transform(out)
            out = np.tan(out / (2 * np.pi + self.bias))

        return out

    def transform(self, input_data):
        if self.switcher.get(self.case) == 'min_std':
            out = self.norm.transform(input_data)
            out = self.std.transform(out)

        if self.switcher.get(self.case) == 'std2':
            out = self.std.transform(input_data)

        if self.switcher.get(self.case) == 'std_min':
            out = self.std.transform(input_data)
            out = self.norm.transform(out)

        if self.switcher.get(self.case) == 'min':
            out = self.norm.transform(input_data)

        if self.switcher.get(self.case) == 'no':
            out = input_data

        if self.switcher.get(self.case) == 'log_min':
            out = - np.log(np.asarray(input_data / self.scale) + self.bias)
            out = self.norm.transform(out)

        if self.switcher.get(self.case) == 'log_std':
            out = - np.log(np.asarray(input_data / self.scale) + self.bias)
            out = self.std.transform(out)

        if self.switcher.get(self.case) == 'log2':
            out = self.norm.transform(input_data)
            out = np.log(np.asarray(out) + self.bias)
            out = self.std.transform(out)

        if self.switcher.get(self.case) == 'sqrt_std':
            out = np.sqrt(np.asarray(input_data / self.scale))
            out = self.std.transform(out)

        if self.switcher.get(self.case) == 'cbrt_std':
            out = np.cbrt(np.asarray(input_data / self.scale))
            out = self.std.transform(out)

        if self.switcher.get(self.case) == 'nrt_std':
            out = np.power(np.asarray(input_data / self.scale),1/4)
            out = self.std.transform(out)

        if self.switcher.get(self.case) == 'tan':
            out = self.std.transform(input_data)
            out = self.norm.transform(out)
            out = np.tan(out / (2 * np.pi + self.bias))

        return out

    def inverse_transform(self, input_data):

        if self.switcher.get(self.case) == 'min_std':
            out = self.std.inverse_transform(input_data)
            out = self.norm.inverse_transform(out)

        if self.switcher.get(self.case) == 'std2':
            out = self.std.inverse_transform(input_data)

        if self.switcher.get(self.case) == 'std_min':
            out = self.norm.inverse_transform(input_data)
            out = self.std.inverse_transform(out)

        if self.switcher.get(self.case) == 'min':
            out = self.norm.inverse_transform(input_data)

        if self.switcher.get(self.case) == 'no':
            out = input_data

        if self.switcher.get(self.case) == 'log_min':
            out = self.norm.inverse_transform(input_data)
            out = (np.exp(-out) - self.bias) * self.scale

        if self.switcher.get(self.case) == 'log_std':
            out = self.std.inverse_transform(input_data)
            out = (np.exp(-out) - self.bias) * self.scale

        if self.switcher.get(self.case) == 'log2':
            out = self.std.inverse_transform(input_data)
            out = np.exp(out) - self.bias
            out = self.norm.inverse_transform(out)

        if self.switcher.get(self.case) == 'sqrt_std':
            out = self.std.inverse_transform(input_data)
            out = np.power(out,2) * self.scale

        if self.switcher.get(self.case) == 'cbrt_std':
            out = self.std.inverse_transform(input_data)
            out = np.power(out,3) * self.scale

        if self.switcher.get(self.case) == 'nrt_std':
            out = self.std.inverse_transform(input_data)
            out = np.power(out,4) * self.scale

        if self.switcher.get(self.case) == 'tan':
            out = (2 * np.pi + self.bias) * np.arctan(input_data)
            out = self.norm.inverse_transform(out)
            out = self.std.inverse_transform(out)

        return out

      

def read_h5_data(fileName, input_features, labels):
    df = pd.read_hdf(fileName)
#     df = df[df['f']<0.45]
#     for i in range(5):
#         pv_101=df[df['pv']==1]
#         pv_101['pv']=pv_101['pv']+0.002*(i+1)
#         df = pd.concat([df,pv_101])
    
    input_df=df[input_features]
    in_scaler = data_scaler()
    input_np = in_scaler.fit_transform(input_df.values,'std2')

    label_df=df[labels].clip(0)
#     if 'PVs' in labels:
#       label_df['PVs']=np.log(label_df['PVs']+1)
    out_scaler = data_scaler()
    label_np = out_scaler.fit_transform(label_df.values,'cbrt_std')

    return input_np, label_np, df, in_scaler, out_scaler

In [5]:
# labels = ['CH4','O2','H2O','CO','CO2','T','PVs','psi','mu','alpha']
# labels = ['T','PVs']
# labels = ['T','CH4','O2','CO2','CO','H2O','H2','OH','psi']
# labels = ['CH2OH','HNCO','CH3OH', 'CH2CHO', 'CH2O', 'C3H8', 'HNO', 'NH2', 'HCN']

# labels = np.random.choice(col_labels,20,replace=False).tolist()
# labels.append('PVs')
# labels = col_labels
# labels= ['CH4', 'CH2O', 'CH3O', 'H', 'O2', 'H2', 'O', 'OH', 'H2O', 'HO2', 'H2O2', 
#          'C', 'CH', 'CH2', 'CH2(S)', 'CH3', 'CO', 'CO2', 'HCO', 'CH2OH', 'CH3OH', 
#          'C2H', 'C2H2', 'C2H3', 'C2H4', 'C2H5', 'C2H6', 'HCCO', 'CH2CO', 'HCCOH', 
#          'N', 'NH', 'NH2', 'NH3', 'NNH', 'NO', 'NO2', 'N2O', 'HNO', 'CN', 'HCN', 
#          'H2CN', 'HCNN', 'HCNO', 'HNCO', 'NCO', 'N2', 'AR', 'C3H7', 'C3H8', 'CH2CHO', 'CH3CHO', 'T', 'PVs']
# labels.remove('AR')
# labels.remove('N2')
labels =  ['H2', 'H', 'O', 'O2', 'OH', 'H2O', 'HO2', 'CH3', 'CH4', 'CO', 'CO2', 'CH2O', 'N2', 'T', 'PVs']

print(labels)

input_features=['f','zeta','pv']

# read in the data
x_input, y_label, df, in_scaler, out_scaler = read_h5_data('../data/tables_of_fgm_psi.h5',input_features=input_features, labels = labels)


['H2', 'H', 'O', 'O2', 'OH', 'H2O', 'HO2', 'CH3', 'CH4', 'CO', 'CO2', 'CH2O', 'N2', 'T', 'PVs']

In [6]:
from sklearn.metrics import r2_score
from sklearn.model_selection import train_test_split

x_train, x_test, y_train, y_test = train_test_split(x_input,y_label, test_size=0.01)

x_test_df = pd.DataFrame(in_scaler.inverse_transform(x_test),columns=input_features)
y_test_df = pd.DataFrame(out_scaler.inverse_transform(y_test),columns=labels)


predict_val = model.predict(x_test,batch_size=1024*8)
# predict_val = model.predict(x_test,batch_size=1024*8)
predict_df = pd.DataFrame(out_scaler.inverse_transform(predict_val), columns=labels)

test_data=pd.concat([x_test_df,y_test_df],axis=1)
pred_data=pd.concat([x_test_df,predict_df],axis=1)

!rm sim_check.h5
test_data.to_hdf('sim_check.h5',key='test')
pred_data.to_hdf('sim_check.h5',key='pred')

df_test=pd.read_hdf('sim_check.h5',key='test')
df_pred=pd.read_hdf('sim_check.h5',key='pred')

zeta_level=list(set(df_test['zeta']))
zeta_level.sort()


res_sum=pd.DataFrame()
r2s=[]
r2s_i=[]

names=[]
maxs_0=[]
maxs_9=[]

for r2,name in zip(r2_score(df_test,df_pred,multioutput='raw_values'),df_test.columns):
  names.append(name)
  r2s.append(r2)
  maxs_0.append(df_test[df_test['zeta']==zeta_level[0]][name].max())
  maxs_9.append(df_test[df_test['zeta']==zeta_level[8]][name].max())
  for i in zeta_level:
    r2s_i.append(r2_score(df_pred[df_pred['zeta']==i][name],
                          df_test[df_test['zeta']==i][name]))

res_sum['name']=names
# res_sum['max_0']=maxs_0
# res_sum['max_9']=maxs_9
res_sum['z_scale']=[m_9/(m_0+1e-20) for m_9,m_0 in zip(maxs_9,maxs_0)]
# res_sum['r2']=r2s


tmp=np.asarray(r2s_i).reshape(-1,10)
for idx,z in enumerate(zeta_level):
  res_sum['r2s_'+str(z)]=tmp[:,idx]

res_sum[3:]


Out[6]:
name z_scale r2s_-5.551115123125783e-17 r2s_0.10999999999999999 r2s_0.22000000000000003 r2s_0.33 r2s_0.44 r2s_0.55 r2s_0.66 r2s_0.77 r2s_0.88 r2s_0.99
3 H2 0.171383 0.999994 0.999991 0.999994 0.999991 0.999989 0.999985 0.999988 0.999989 0.999987 0.999963
4 H 0.024425 0.999970 0.999978 0.999970 0.999911 0.999910 0.999864 0.999978 0.999969 0.999961 0.999961
5 O 0.026998 0.999817 0.999978 0.999989 0.999969 0.999956 0.999930 0.999987 0.999988 0.999987 0.999980
6 O2 1.000000 0.999992 0.999993 0.999994 0.999995 0.999995 0.999995 0.999996 0.999997 0.999997 0.999997
7 OH 0.029309 0.999937 0.999984 0.999988 0.999971 0.999966 0.999968 0.999987 0.999989 0.999991 0.999982
8 H2O 0.225100 0.999990 0.999995 0.999995 0.999995 0.999993 0.999992 0.999994 0.999995 0.999995 0.999985
9 HO2 0.081202 0.999992 0.999993 0.999994 0.999992 0.999990 0.999982 0.999992 0.999993 0.999992 0.999981
10 CH3 0.036919 0.999987 0.999991 0.999991 0.999976 0.999970 0.999943 0.999986 0.999988 0.999989 0.999980
11 CH4 1.000000 0.999998 0.999997 0.999997 0.999996 0.999996 0.999994 0.999995 0.999996 0.999997 0.999997
12 CO 0.181627 0.999993 0.999994 0.999996 0.999995 0.999993 0.999992 0.999993 0.999995 0.999992 0.999980
13 CO2 0.141674 0.999915 0.999990 0.999992 0.999993 0.999992 0.999991 0.999994 0.999995 0.999995 0.999987
14 CH2O 0.206096 0.999996 0.999996 0.999995 0.999995 0.999994 0.999992 0.999994 0.999995 0.999995 0.999988
15 N2 1.000000 0.999992 0.999994 0.999995 0.999995 0.999996 0.999994 0.999995 0.999997 0.999998 0.999998
16 T 0.311060 0.999995 0.999997 0.999997 0.999996 0.999996 0.999994 0.999995 0.999995 0.999992 0.999765
17 PVs 0.028175 0.999965 0.999991 0.999992 0.999987 0.999980 0.999962 0.999991 0.999993 0.999993 0.999986

In [7]:
from sklearn.metrics import r2_score
from sklearn.model_selection import train_test_split

x_train, x_test, y_train, y_test = train_test_split(x_input,y_label, test_size=0.01)

x_test_df = pd.DataFrame(in_scaler.inverse_transform(x_test),columns=input_features)
y_test_df = pd.DataFrame(out_scaler.inverse_transform(y_test),columns=labels)


predict_val = student_model.predict(x_test,batch_size=1024*8)
# predict_val = model.predict(x_test,batch_size=1024*8)
predict_df = pd.DataFrame(out_scaler.inverse_transform(predict_val), columns=labels)

test_data=pd.concat([x_test_df,y_test_df],axis=1)
pred_data=pd.concat([x_test_df,predict_df],axis=1)

!rm sim_check.h5
test_data.to_hdf('sim_check.h5',key='test')
pred_data.to_hdf('sim_check.h5',key='pred')

df_test=pd.read_hdf('sim_check.h5',key='test')
df_pred=pd.read_hdf('sim_check.h5',key='pred')

zeta_level=list(set(df_test['zeta']))
zeta_level.sort()


res_sum=pd.DataFrame()
r2s=[]
r2s_i=[]

names=[]
maxs_0=[]
maxs_9=[]

for r2,name in zip(r2_score(df_test,df_pred,multioutput='raw_values'),df_test.columns):
  names.append(name)
  r2s.append(r2)
  maxs_0.append(df_test[df_test['zeta']==zeta_level[0]][name].max())
  maxs_9.append(df_test[df_test['zeta']==zeta_level[8]][name].max())
  for i in zeta_level:
    r2s_i.append(r2_score(df_pred[df_pred['zeta']==i][name],
                          df_test[df_test['zeta']==i][name]))

res_sum['name']=names
# res_sum['max_0']=maxs_0
# res_sum['max_9']=maxs_9
res_sum['z_scale']=[m_9/(m_0+1e-20) for m_9,m_0 in zip(maxs_9,maxs_0)]
# res_sum['r2']=r2s


tmp=np.asarray(r2s_i).reshape(-1,10)
for idx,z in enumerate(zeta_level):
  res_sum['r2s_'+str(z)]=tmp[:,idx]

res_sum[3:]


---------------------------------------------------------------------------
NameError                                 Traceback (most recent call last)
<ipython-input-7-168bb80f97eb> in <module>
      8 
      9 
---> 10 predict_val = student_model.predict(x_test,batch_size=1024*8)
     11 # predict_val = model.predict(x_test,batch_size=1024*8)
     12 predict_df = pd.DataFrame(out_scaler.inverse_transform(predict_val), columns=labels)

NameError: name 'student_model' is not defined

In [6]:
from sklearn.metrics import r2_score
from sklearn.model_selection import train_test_split

x_train, x_test, y_train, y_test = train_test_split(x_input,y_label, test_size=0.01)

x_test_df = pd.DataFrame(in_scaler.inverse_transform(x_test),columns=input_features)
y_test_df = pd.DataFrame(out_scaler.inverse_transform(y_test),columns=labels)


predict_val = model.predict(x_test,batch_size=1024*8)
# predict_val = model.predict(x_test,batch_size=1024*8)
predict_df = pd.DataFrame(out_scaler.inverse_transform(predict_val), columns=labels)

test_data=pd.concat([x_test_df,y_test_df],axis=1)
pred_data=pd.concat([x_test_df,predict_df],axis=1)

!rm sim_check.h5
test_data.to_hdf('sim_check.h5',key='test')
pred_data.to_hdf('sim_check.h5',key='pred')

df_test=pd.read_hdf('sim_check.h5',key='test')
df_pred=pd.read_hdf('sim_check.h5',key='pred')

zeta_level=list(set(df_test['zeta']))
zeta_level.sort()


res_sum=pd.DataFrame()
r2s=[]
r2s_i=[]

names=[]
maxs_0=[]
maxs_9=[]

for r2,name in zip(r2_score(df_test,df_pred,multioutput='raw_values'),df_test.columns):
  names.append(name)
  r2s.append(r2)
  maxs_0.append(df_test[df_test['zeta']==zeta_level[0]][name].max())
  maxs_9.append(df_test[df_test['zeta']==zeta_level[8]][name].max())
  for i in zeta_level:
    r2s_i.append(r2_score(df_pred[df_pred['zeta']==i][name],
                          df_test[df_test['zeta']==i][name]))

res_sum['name']=names
# res_sum['max_0']=maxs_0
# res_sum['max_9']=maxs_9
res_sum['z_scale']=[m_9/(m_0+1e-20) for m_9,m_0 in zip(maxs_9,maxs_0)]
# res_sum['r2']=r2s


tmp=np.asarray(r2s_i).reshape(-1,10)
for idx,z in enumerate(zeta_level):
  res_sum['r2s_'+str(z)]=tmp[:,idx]

res_sum[3:]


Out[6]:
name z_scale r2s_0.0 r2s_0.11 r2s_0.22 r2s_0.33 r2s_0.44 r2s_0.55 r2s_0.66 r2s_0.77 r2s_0.88 r2s_0.99
3 CH4 1.000000 0.999995 0.999996 0.999996 0.999996 0.999995 0.999988 0.999992 0.999996 0.999997 0.999999
4 O2 1.000000 0.999990 0.999991 0.999993 0.999993 0.999993 0.999986 0.999992 0.999996 0.999997 0.999999
5 H2O 0.223115 0.999993 0.999995 0.999995 0.999995 0.999994 0.999988 0.999991 0.999988 0.999975 0.999117
6 CO 0.180426 0.999993 0.999995 0.999994 0.999993 0.999992 0.999978 0.999988 0.999985 0.999971 0.998528
7 CO2 0.137653 0.999983 0.999985 0.999986 0.999986 0.999983 0.999958 0.999988 0.999986 0.999974 0.999440
8 T 0.297599 0.999993 0.999995 0.999995 0.999994 0.999994 0.999986 0.999992 0.999990 0.999980 0.999239
9 PVs 0.028787 0.999995 0.999988 0.999978 0.999966 0.999893 0.999268 0.999905 0.999946 0.999916 0.998487
10 psi 1.000000 0.999978 0.999985 0.999987 0.999988 0.999989 0.999988 0.999991 0.999993 0.999994 0.999996
11 mu 0.464841 0.999993 0.999994 0.999995 0.999994 0.999994 0.999988 0.999992 0.999992 0.999984 0.999674
12 alpha 1.000000 0.999978 0.999985 0.999987 0.999988 0.999989 0.999989 0.999991 0.999993 0.999993 0.999997

In [25]:
from sklearn.metrics import r2_score
from sklearn.model_selection import train_test_split

x_train, x_test, y_train, y_test = train_test_split(x_input,y_label, test_size=0.01)

x_test_df = pd.DataFrame(in_scaler.inverse_transform(x_test),columns=input_features)
y_test_df = pd.DataFrame(out_scaler.inverse_transform(y_test),columns=labels)


predict_val = model.predict(x_test,batch_size=1024*8)
# predict_val = model.predict(x_test,batch_size=1024*8)
predict_df = pd.DataFrame(out_scaler.inverse_transform(predict_val), columns=labels)

test_data=pd.concat([x_test_df,y_test_df],axis=1)
pred_data=pd.concat([x_test_df,predict_df],axis=1)

!rm sim_check.h5
test_data.to_hdf('sim_check.h5',key='test')
pred_data.to_hdf('sim_check.h5',key='pred')

df_test=pd.read_hdf('sim_check.h5',key='test')
df_pred=pd.read_hdf('sim_check.h5',key='pred')

zeta_level=list(set(df_test['zeta']))
zeta_level.sort()


res_sum=pd.DataFrame()
r2s=[]
r2s_i=[]

names=[]
maxs_0=[]
maxs_9=[]

for r2,name in zip(r2_score(df_test,df_pred,multioutput='raw_values'),df_test.columns):
  names.append(name)
  r2s.append(r2)
  maxs_0.append(df_test[df_test['zeta']==zeta_level[0]][name].max())
  maxs_9.append(df_test[df_test['zeta']==zeta_level[8]][name].max())
  for i in zeta_level:
    r2s_i.append(r2_score(df_pred[df_pred['zeta']==i][name],
                          df_test[df_test['zeta']==i][name]))

res_sum['name']=names
# res_sum['max_0']=maxs_0
# res_sum['max_9']=maxs_9
res_sum['z_scale']=[m_9/(m_0+1e-20) for m_9,m_0 in zip(maxs_9,maxs_0)]
# res_sum['r2']=r2s


tmp=np.asarray(r2s_i).reshape(-1,10)
for idx,z in enumerate(zeta_level):
  res_sum['r2s_'+str(z)]=tmp[:,idx]

res_sum[3:]


Out[25]:
name z_scale r2s_0.0 r2s_0.11 r2s_0.22 r2s_0.33 r2s_0.44 r2s_0.55 r2s_0.66 r2s_0.77 r2s_0.88 r2s_0.99
3 CH4 1.000000 0.999994 0.999995 0.999995 0.999995 0.999994 0.999976 0.999987 0.999993 0.999993 0.999997
4 O2 1.000000 0.999986 0.999985 0.999989 0.999990 0.999991 0.999973 0.999987 0.999992 0.999993 0.999995
5 H2O 0.224176 0.999979 0.999988 0.999988 0.999986 0.999985 0.999973 0.999970 0.999954 0.999895 0.995601
6 CO 0.183326 0.999950 0.999978 0.999974 0.999979 0.999975 0.999945 0.999950 0.999923 0.999782 0.992715
7 CO2 0.140610 0.999516 0.999846 0.999863 0.999872 0.999897 0.999755 0.999804 0.999669 0.999442 0.984304
8 T 0.318095 0.999942 0.999972 0.999976 0.999976 0.999975 0.999946 0.999955 0.999938 0.999848 0.993675
9 PVs 0.030814 0.999574 0.998306 0.998408 0.997965 0.997493 0.985927 0.993063 0.988779 0.983162 0.811177
10 psi 1.000000 0.999849 0.999948 0.999947 0.999962 0.999971 0.999967 0.999974 0.999976 0.999976 0.999977
11 mu 0.483033 0.999956 0.999981 0.999982 0.999981 0.999983 0.999965 0.999977 0.999967 0.999919 0.998479
12 alpha 1.000000 0.999852 0.999952 0.999952 0.999963 0.999970 0.999969 0.999975 0.999977 0.999976 0.999982

In [7]:
#@title import plotly
import plotly.plotly as py
import numpy as np
from plotly.offline import init_notebook_mode, iplot
# from plotly.graph_objs import Contours, Histogram2dContour, Marker, Scatter
import plotly.graph_objs as go

def configure_plotly_browser_state():
  import IPython
  display(IPython.core.display.HTML('''
        <script src="/static/components/requirejs/require.js"></script>
        <script>
          requirejs.config({
            paths: {
              base: '/static/base',
              plotly: 'https://cdn.plot.ly/plotly-1.5.1.min.js?noext',
            },
          });
        </script>
        '''))

In [11]:
#@title Default title text
# species = np.random.choice(labels)
species = 'HNO' #@param {type:"string"}
z_level =  0 #@param {type:"integer"}

# configure_plotly_browser_state()
# init_notebook_mode(connected=False)

from sklearn.metrics import r2_score


df_t=df_test[df_test['zeta']==zeta_level[z_level]].sample(frac=1)
# df_p=df_pred.loc[df_pred['zeta']==zeta_level[1]].sample(frac=0.1)
df_p=df_pred.loc[df_t.index]
# error=(df_p[species]-df_t[species])
error=(df_p[species]-df_t[species])/(df_p[species]+df_t[species])
r2=round(r2_score(df_p[species],df_t[species]),4)

print(species,'r2:',r2,'max:',df_t[species].max())

fig_db = {
    'data': [       
        {'name':'test data from table',
         'x': df_t['f'],
         'y': df_t['pv'],
         'z': df_t[species],
         'type':'scatter3d', 
        'mode': 'markers',
          'marker':{
              'size':1
          }
        },
        {'name':'prediction from neural networks',
         'x': df_p['f'],
         'y': df_p['pv'],
         'z': df_p[species],
         'type':'scatter3d', 
        'mode': 'markers',
          'marker':{
              'size':1
          },
        },
        {'name':'error in difference',
         'x': df_p['f'],
         'y': df_p['pv'],
         'z': error,
         'type':'scatter3d', 
         'mode': 'markers',
          'marker':{
              'size':1
          },
         }       
    ],
    'layout': {
        'scene':{
            'xaxis': {'title':'mixture fraction'},
            'yaxis': {'title':'progress variable'},
            'zaxis': {'title': species+'_r2:'+str(r2)}
                 }
    }
}
# iplot(fig_db, filename='multiple-scatter')
iplot(fig_db)


HNO r2: 0.9998 max: 3.916200000000001e-08

In [18]:
%matplotlib inline
import matplotlib.pyplot as plt
z=0.22
sp='HNO'
plt.plot(df[(df.pv==1)&(df.zeta==z)]['f'],df[(df.pv==0.9)&(df.zeta==z)][sp],'rd')


Out[18]:
[<matplotlib.lines.Line2D at 0x7f132017c518>]

In [23]:
from keras.models import Model
from keras.layers import Dense, Input, Dropout



n_neuron = 100
# %%
print('set up student network')
# ANN parameters
dim_input = x_train.shape[1]
dim_label = y_train.shape[1]

batch_norm = False

# This returns a tensor
inputs = Input(shape=(dim_input,),name='input_1')

# a layer instance is callable on a tensor, and returns a tensor
x = Dense(n_neuron, activation='relu')(inputs)
x = Dense(n_neuron, activation='relu')(x)
x = Dense(n_neuron, activation='relu')(x)
# x = Dropout(0.1)(x)
predictions = Dense(dim_label, activation='linear', name='output_1')(x)

student_model = Model(inputs=inputs, outputs=predictions)
student_model.summary()


set up student network
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_1 (InputLayer)         (None, 3)                 0         
_________________________________________________________________
dense_3 (Dense)              (None, 100)               400       
_________________________________________________________________
dense_4 (Dense)              (None, 100)               10100     
_________________________________________________________________
dense_5 (Dense)              (None, 100)               10100     
_________________________________________________________________
output_1 (Dense)             (None, 10)                1010      
=================================================================
Total params: 21,610
Trainable params: 21,610
Non-trainable params: 0
_________________________________________________________________

In [8]:
import keras.backend as K
from keras.callbacks import LearningRateScheduler
import math

def cubic_loss(y_true, y_pred):
    return K.mean(K.square(y_true - y_pred)*K.abs(y_true - y_pred), axis=-1)

def coeff_r2(y_true, y_pred):
    from keras import backend as K
    SS_res =  K.sum(K.square( y_true-y_pred ))
    SS_tot = K.sum(K.square( y_true - K.mean(y_true) ) )
    return ( 1 - SS_res/(SS_tot + K.epsilon()) )

  
def step_decay(epoch):
   initial_lrate = 0.002
   drop = 0.5
   epochs_drop = 1000.0
   lrate = initial_lrate * math.pow(drop,math.floor((1+epoch)/epochs_drop))
   return lrate
  
lrate = LearningRateScheduler(step_decay)

class SGDRScheduler(Callback):
    '''Cosine annealing learning rate scheduler with periodic restarts.
    # Usage
        ```python
            schedule = SGDRScheduler(min_lr=1e-5,
                                     max_lr=1e-2,
                                     steps_per_epoch=np.ceil(epoch_size/batch_size),
                                     lr_decay=0.9,
                                     cycle_length=5,
                                     mult_factor=1.5)
            model.fit(X_train, Y_train, epochs=100, callbacks=[schedule])
        ```
    # Arguments
        min_lr: The lower bound of the learning rate range for the experiment.
        max_lr: The upper bound of the learning rate range for the experiment.
        steps_per_epoch: Number of mini-batches in the dataset. Calculated as `np.ceil(epoch_size/batch_size)`.
        lr_decay: Reduce the max_lr after the completion of each cycle.
                  Ex. To reduce the max_lr by 20% after each cycle, set this value to 0.8.
        cycle_length: Initial number of epochs in a cycle.
        mult_factor: Scale epochs_to_restart after each full cycle completion.
    # References
        Blog post: jeremyjordan.me/nn-learning-rate
        Original paper: http://arxiv.org/abs/1608.03983
    '''
    def __init__(self,
                 min_lr,
                 max_lr,
                 steps_per_epoch,
                 lr_decay=1,
                 cycle_length=10,
                 mult_factor=2):

        self.min_lr = min_lr
        self.max_lr = max_lr
        self.lr_decay = lr_decay

        self.batch_since_restart = 0
        self.next_restart = cycle_length

        self.steps_per_epoch = steps_per_epoch

        self.cycle_length = cycle_length
        self.mult_factor = mult_factor

        self.history = {}

    def clr(self):
        '''Calculate the learning rate.'''
        fraction_to_restart = self.batch_since_restart / (self.steps_per_epoch * self.cycle_length)
        lr = self.min_lr + 0.5 * (self.max_lr - self.min_lr) * (1 + np.cos(fraction_to_restart * np.pi))
        return lr

    def on_train_begin(self, logs={}):
        '''Initialize the learning rate to the minimum value at the start of training.'''
        logs = logs or {}
        K.set_value(self.model.optimizer.lr, self.max_lr)

    def on_batch_end(self, batch, logs={}):
        '''Record previous batch statistics and update the learning rate.'''
        logs = logs or {}
        self.history.setdefault('lr', []).append(K.get_value(self.model.optimizer.lr))
        for k, v in logs.items():
            self.history.setdefault(k, []).append(v)

        self.batch_since_restart += 1
        K.set_value(self.model.optimizer.lr, self.clr())

    def on_epoch_end(self, epoch, logs={}):
        '''Check for end of current cycle, apply restarts when necessary.'''
        if epoch + 1 == self.next_restart:
            self.batch_since_restart = 0
            self.cycle_length = np.ceil(self.cycle_length * self.mult_factor)
            self.next_restart += self.cycle_length
            self.max_lr *= self.lr_decay
            self.best_weights = self.model.get_weights()

    def on_train_end(self, logs={}):
        '''Set weights to the values from the end of the most recent cycle for best performance.'''
        self.model.set_weights(self.best_weights)

In [26]:
student_model = load_model('student.h5',custom_objects={'coeff_r2':coeff_r2})

In [13]:
model.summary()


__________________________________________________________________________________________________
Layer (type)                    Output Shape         Param #     Connected to                     
==================================================================================================
input_1 (InputLayer)            (None, 3)            0                                            
__________________________________________________________________________________________________
dense_1 (Dense)                 (None, 900)          3600        input_1[0][0]                    
__________________________________________________________________________________________________
res1a_branch2a (Dense)          (None, 900)          810900      dense_1[0][0]                    
__________________________________________________________________________________________________
activation_1 (Activation)       (None, 900)          0           res1a_branch2a[0][0]             
__________________________________________________________________________________________________
res1a_branch2b (Dense)          (None, 900)          810900      activation_1[0][0]               
__________________________________________________________________________________________________
add_1 (Add)                     (None, 900)          0           res1a_branch2b[0][0]             
                                                                 dense_1[0][0]                    
__________________________________________________________________________________________________
activation_2 (Activation)       (None, 900)          0           add_1[0][0]                      
__________________________________________________________________________________________________
res1b_branch2a (Dense)          (None, 900)          810900      activation_2[0][0]               
__________________________________________________________________________________________________
activation_3 (Activation)       (None, 900)          0           res1b_branch2a[0][0]             
__________________________________________________________________________________________________
res1b_branch2b (Dense)          (None, 900)          810900      activation_3[0][0]               
__________________________________________________________________________________________________
add_2 (Add)                     (None, 900)          0           res1b_branch2b[0][0]             
                                                                 activation_2[0][0]               
__________________________________________________________________________________________________
activation_4 (Activation)       (None, 900)          0           add_2[0][0]                      
__________________________________________________________________________________________________
dense_2 (Dense)                 (None, 100)          90100       activation_4[0][0]               
__________________________________________________________________________________________________
dense_3 (Dense)                 (None, 10)           1010        dense_2[0][0]                    
==================================================================================================
Total params: 3,338,310
Trainable params: 3,338,310
Non-trainable params: 0
__________________________________________________________________________________________________

In [9]:
gx,gy,gz=np.mgrid[0:1:600j,0:1:10j,0:1:600j]
gx=gx.reshape(-1,1)
gy=gy.reshape(-1,1)
gz=gz.reshape(-1,1)
gm=np.hstack([gx,gy,gz])
gm.shape


Out[9]:
(3600000, 3)

In [36]:
from keras.callbacks import ModelCheckpoint
from keras import optimizers
batch_size = 1024*16
epochs = 2000
vsplit = 0.1

loss_type='mse'

adam_op = optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999,epsilon=1e-8, decay=0.0, amsgrad=False)

student_model.compile(loss=loss_type,
#                       optimizer=adam_op,
                      optimizer='adam',
                      metrics=[coeff_r2])
# model.compile(loss=cubic_loss, optimizer=adam_op, metrics=['accuracy'])

# checkpoint (save the best model based validate loss)
!mkdir ./tmp
filepath = "./tmp/student_weights.best.cntk.hdf5"

checkpoint = ModelCheckpoint(filepath,
                             monitor='val_loss',
                             verbose=1,
                             save_best_only=True,
                             mode='min',
                             period=20)

epoch_size=x_train.shape[0]
a=0
base=2
clc=2
for i in range(5):
  a+=base*clc**(i)
print(a)
epochs,c_len = a,base
schedule = SGDRScheduler(min_lr=1e-5,max_lr=1e-4,
                         steps_per_epoch=np.ceil(epoch_size/batch_size),
                         cycle_length=c_len,lr_decay=0.8,mult_factor=2)

callbacks_list = [checkpoint]
# callbacks_list = [checkpoint, schedule]

x_train_teacher = in_scaler.transform(gm)
y_train_teacher = model.predict(x_train_teacher, batch_size=1024*8)
x_train, x_test, y_train, y_test = train_test_split(x_train_teacher,y_train_teacher, test_size=0.01)
# fit the model
history = student_model.fit(
    x_train, y_train,
    epochs=epochs,
    batch_size=batch_size,
    validation_split=vsplit,
    verbose=2,
    callbacks=callbacks_list,
    shuffle=True)


mkdir: cannot create directory ‘./tmp’: File exists
62
Train on 3207600 samples, validate on 356400 samples
Epoch 1/62
 - 4s - loss: 1.5372e-04 - coeff_r2: 0.9998 - val_loss: 7.0398e-05 - val_coeff_r2: 0.9999
Epoch 2/62
 - 2s - loss: 7.1192e-05 - coeff_r2: 0.9999 - val_loss: 6.9974e-05 - val_coeff_r2: 0.9999
Epoch 3/62
 - 2s - loss: 7.3368e-05 - coeff_r2: 0.9999 - val_loss: 7.0930e-05 - val_coeff_r2: 0.9999
Epoch 4/62
 - 2s - loss: 7.6598e-05 - coeff_r2: 0.9999 - val_loss: 7.6345e-05 - val_coeff_r2: 0.9999
Epoch 5/62
 - 2s - loss: 7.8491e-05 - coeff_r2: 0.9999 - val_loss: 8.4268e-05 - val_coeff_r2: 0.9999
Epoch 6/62
 - 2s - loss: 7.6528e-05 - coeff_r2: 0.9999 - val_loss: 8.0171e-05 - val_coeff_r2: 0.9999
Epoch 7/62
 - 2s - loss: 7.8521e-05 - coeff_r2: 0.9999 - val_loss: 8.1167e-05 - val_coeff_r2: 0.9999
Epoch 8/62
 - 2s - loss: 7.9864e-05 - coeff_r2: 0.9999 - val_loss: 8.6915e-05 - val_coeff_r2: 0.9999
Epoch 9/62
 - 2s - loss: 8.4223e-05 - coeff_r2: 0.9999 - val_loss: 9.6007e-05 - val_coeff_r2: 0.9999
Epoch 10/62
 - 2s - loss: 8.1266e-05 - coeff_r2: 0.9999 - val_loss: 8.9177e-05 - val_coeff_r2: 0.9999
Epoch 11/62
 - 2s - loss: 7.9030e-05 - coeff_r2: 0.9999 - val_loss: 7.5455e-05 - val_coeff_r2: 0.9999
Epoch 12/62
 - 2s - loss: 8.1360e-05 - coeff_r2: 0.9999 - val_loss: 8.6248e-05 - val_coeff_r2: 0.9999
Epoch 13/62
 - 2s - loss: 7.9479e-05 - coeff_r2: 0.9999 - val_loss: 7.0483e-05 - val_coeff_r2: 0.9999
Epoch 14/62
 - 2s - loss: 8.0523e-05 - coeff_r2: 0.9999 - val_loss: 7.0475e-05 - val_coeff_r2: 0.9999
Epoch 15/62
 - 2s - loss: 8.3205e-05 - coeff_r2: 0.9999 - val_loss: 7.8536e-05 - val_coeff_r2: 0.9999
Epoch 16/62
 - 3s - loss: 7.8259e-05 - coeff_r2: 0.9999 - val_loss: 7.8073e-05 - val_coeff_r2: 0.9999
Epoch 17/62
 - 3s - loss: 7.9226e-05 - coeff_r2: 0.9999 - val_loss: 9.0970e-05 - val_coeff_r2: 0.9999
Epoch 18/62
 - 2s - loss: 7.8309e-05 - coeff_r2: 0.9999 - val_loss: 7.5601e-05 - val_coeff_r2: 0.9999
Epoch 19/62
 - 2s - loss: 7.7620e-05 - coeff_r2: 0.9999 - val_loss: 7.7259e-05 - val_coeff_r2: 0.9999
Epoch 20/62
 - 2s - loss: 8.2445e-05 - coeff_r2: 0.9999 - val_loss: 7.8942e-05 - val_coeff_r2: 0.9999

Epoch 00020: val_loss improved from inf to 0.00008, saving model to ./tmp/student_weights.best.cntk.hdf5
Epoch 21/62
 - 3s - loss: 7.9240e-05 - coeff_r2: 0.9999 - val_loss: 8.6459e-05 - val_coeff_r2: 0.9999
Epoch 22/62
 - 2s - loss: 7.7118e-05 - coeff_r2: 0.9999 - val_loss: 7.7388e-05 - val_coeff_r2: 0.9999
Epoch 23/62
 - 2s - loss: 7.8313e-05 - coeff_r2: 0.9999 - val_loss: 7.1780e-05 - val_coeff_r2: 0.9999
Epoch 24/62
 - 3s - loss: 8.6743e-05 - coeff_r2: 0.9999 - val_loss: 7.8492e-05 - val_coeff_r2: 0.9999
Epoch 25/62
 - 2s - loss: 7.6938e-05 - coeff_r2: 0.9999 - val_loss: 6.8764e-05 - val_coeff_r2: 0.9999
Epoch 26/62
 - 2s - loss: 7.7853e-05 - coeff_r2: 0.9999 - val_loss: 7.2816e-05 - val_coeff_r2: 0.9999
Epoch 27/62
 - 2s - loss: 7.7118e-05 - coeff_r2: 0.9999 - val_loss: 6.7090e-05 - val_coeff_r2: 0.9999
Epoch 28/62
 - 2s - loss: 7.7613e-05 - coeff_r2: 0.9999 - val_loss: 7.3566e-05 - val_coeff_r2: 0.9999
Epoch 29/62
 - 2s - loss: 8.2237e-05 - coeff_r2: 0.9999 - val_loss: 8.0517e-05 - val_coeff_r2: 0.9999
Epoch 30/62
 - 2s - loss: 7.5754e-05 - coeff_r2: 0.9999 - val_loss: 7.9571e-05 - val_coeff_r2: 0.9999
Epoch 31/62
 - 2s - loss: 7.7426e-05 - coeff_r2: 0.9999 - val_loss: 7.5532e-05 - val_coeff_r2: 0.9999
Epoch 32/62
 - 2s - loss: 7.6448e-05 - coeff_r2: 0.9999 - val_loss: 9.2721e-05 - val_coeff_r2: 0.9999
Epoch 33/62
 - 2s - loss: 7.8374e-05 - coeff_r2: 0.9999 - val_loss: 7.0651e-05 - val_coeff_r2: 0.9999
Epoch 34/62
 - 3s - loss: 8.3776e-05 - coeff_r2: 0.9999 - val_loss: 8.8142e-05 - val_coeff_r2: 0.9999
Epoch 35/62
 - 2s - loss: 7.5800e-05 - coeff_r2: 0.9999 - val_loss: 6.7747e-05 - val_coeff_r2: 0.9999
Epoch 36/62
 - 2s - loss: 7.5429e-05 - coeff_r2: 0.9999 - val_loss: 7.0590e-05 - val_coeff_r2: 0.9999
Epoch 37/62
 - 2s - loss: 7.6417e-05 - coeff_r2: 0.9999 - val_loss: 7.7214e-05 - val_coeff_r2: 0.9999
Epoch 38/62
 - 2s - loss: 7.7997e-05 - coeff_r2: 0.9999 - val_loss: 9.1887e-05 - val_coeff_r2: 0.9999
Epoch 39/62
 - 2s - loss: 7.6255e-05 - coeff_r2: 0.9999 - val_loss: 7.5994e-05 - val_coeff_r2: 0.9999
Epoch 40/62
 - 2s - loss: 7.4470e-05 - coeff_r2: 0.9999 - val_loss: 6.8878e-05 - val_coeff_r2: 0.9999

Epoch 00040: val_loss improved from 0.00008 to 0.00007, saving model to ./tmp/student_weights.best.cntk.hdf5
Epoch 41/62
 - 3s - loss: 7.6974e-05 - coeff_r2: 0.9999 - val_loss: 7.2147e-05 - val_coeff_r2: 0.9999
Epoch 42/62
 - 2s - loss: 7.8337e-05 - coeff_r2: 0.9999 - val_loss: 8.5127e-05 - val_coeff_r2: 0.9999
Epoch 43/62
 - 3s - loss: 7.7425e-05 - coeff_r2: 0.9999 - val_loss: 7.4471e-05 - val_coeff_r2: 0.9999
Epoch 44/62
 - 2s - loss: 7.7451e-05 - coeff_r2: 0.9999 - val_loss: 7.8081e-05 - val_coeff_r2: 0.9999
Epoch 45/62
 - 3s - loss: 7.4969e-05 - coeff_r2: 0.9999 - val_loss: 7.0336e-05 - val_coeff_r2: 0.9999
Epoch 46/62
 - 2s - loss: 8.0301e-05 - coeff_r2: 0.9999 - val_loss: 8.4031e-05 - val_coeff_r2: 0.9999
Epoch 47/62
 - 2s - loss: 7.4756e-05 - coeff_r2: 0.9999 - val_loss: 7.3626e-05 - val_coeff_r2: 0.9999
Epoch 48/62
 - 2s - loss: 7.4078e-05 - coeff_r2: 0.9999 - val_loss: 7.2226e-05 - val_coeff_r2: 0.9999
Epoch 49/62
 - 2s - loss: 7.5466e-05 - coeff_r2: 0.9999 - val_loss: 7.9103e-05 - val_coeff_r2: 0.9999
Epoch 50/62
 - 3s - loss: 7.9991e-05 - coeff_r2: 0.9999 - val_loss: 7.3817e-05 - val_coeff_r2: 0.9999
Epoch 51/62
 - 3s - loss: 7.3744e-05 - coeff_r2: 0.9999 - val_loss: 7.4004e-05 - val_coeff_r2: 0.9999
Epoch 52/62
 - 2s - loss: 7.4640e-05 - coeff_r2: 0.9999 - val_loss: 6.9226e-05 - val_coeff_r2: 0.9999
Epoch 53/62
 - 2s - loss: 7.1235e-05 - coeff_r2: 0.9999 - val_loss: 6.5585e-05 - val_coeff_r2: 0.9999
Epoch 54/62
 - 3s - loss: 7.2866e-05 - coeff_r2: 0.9999 - val_loss: 7.6260e-05 - val_coeff_r2: 0.9999
Epoch 55/62
 - 2s - loss: 7.6232e-05 - coeff_r2: 0.9999 - val_loss: 6.7503e-05 - val_coeff_r2: 0.9999
Epoch 56/62
 - 2s - loss: 7.4577e-05 - coeff_r2: 0.9999 - val_loss: 8.2046e-05 - val_coeff_r2: 0.9999
Epoch 57/62
 - 2s - loss: 7.3923e-05 - coeff_r2: 0.9999 - val_loss: 6.7468e-05 - val_coeff_r2: 0.9999
Epoch 58/62
 - 2s - loss: 7.1492e-05 - coeff_r2: 0.9999 - val_loss: 6.5927e-05 - val_coeff_r2: 0.9999
Epoch 59/62
 - 2s - loss: 7.5408e-05 - coeff_r2: 0.9999 - val_loss: 7.4007e-05 - val_coeff_r2: 0.9999
Epoch 60/62
 - 2s - loss: 7.4182e-05 - coeff_r2: 0.9999 - val_loss: 8.7424e-05 - val_coeff_r2: 0.9999

Epoch 00060: val_loss did not improve from 0.00007
Epoch 61/62
 - 3s - loss: 7.6069e-05 - coeff_r2: 0.9999 - val_loss: 7.1505e-05 - val_coeff_r2: 0.9999
Epoch 62/62
 - 2s - loss: 7.6596e-05 - coeff_r2: 0.9999 - val_loss: 8.7942e-05 - val_coeff_r2: 0.9999

In [39]:
student_model.save('student_100_3.h5')

In [22]:
n_res = 501
pv_level = 0.996
f_1 = np.linspace(0,1,n_res)
z_1 = np.zeros(n_res)
pv_1 = np.ones(n_res)*pv_level
case_1 = np.vstack((f_1,z_1,pv_1))
# case_1 = np.vstack((pv_1,z_1,f_1))

case_1 = case_1.T
case_1.shape


Out[22]:
(501, 3)

In [23]:
out=out_scaler.inverse_transform(model.predict(case_1))
out=pd.DataFrame(out,columns=labels)
sp='PVs'
out.head()


Out[23]:
CH4 O2 H2O CO CO2 T PVs psi mu alpha
0 -4.657913e-07 0.236895 -8.894838e-09 -5.629384e-08 1.191589e-08 299.126465 0.357136 0.011592 0.000018 0.000012
1 1.363579e-06 0.225694 4.472311e-03 7.254475e-05 5.132813e-03 385.113831 0.550195 0.008996 0.000023 0.000009
2 7.026879e-07 0.215498 8.496866e-03 1.583289e-04 1.067833e-02 466.032257 1.369625 0.007415 0.000026 0.000007
3 1.606503e-06 0.205213 1.293162e-02 2.512379e-04 1.602193e-02 551.731995 1.427688 0.006261 0.000030 0.000006
4 2.427723e-06 0.196394 1.718562e-02 2.994192e-04 2.101947e-02 637.010010 1.655576 0.005454 0.000033 0.000005

In [24]:
table_val=df[(df.pv==pv_level) & (df.zeta==0)][sp]
table_val.shape


Out[24]:
(501,)

In [25]:
import matplotlib.pyplot as plt
plt.plot(f_1,table_val)
plt.show


Out[25]:
<function matplotlib.pyplot.show(*args, **kw)>

In [26]:
plt.plot(f_1,out[sp])
plt.show


Out[26]:
<function matplotlib.pyplot.show(*args, **kw)>

In [27]:
df.head()


Out[27]:
zeta f pv C2H3 C2H6 CH2 H2CN C2H4 H2O2 C2H ... CH2O C3H8 HNO NH2 HCN H N H2O HCCOH HCNN
0 0.0 0.000 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 ... 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0
1 0.0 0.002 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 ... 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0
2 0.0 0.004 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 ... 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0
3 0.0 0.006 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 ... 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0
4 0.0 0.008 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 ... 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0

5 rows × 61 columns


In [201]:
pv_101=df[df['pv']==1][df['zeta']==0]


/home/eg/anaconda3/envs/my_dev/lib/python3.6/site-packages/ipykernel_launcher.py:1: UserWarning:

Boolean Series key will be reindexed to match DataFrame index.


In [202]:
pv_101['pv']=pv_101['pv']+0.01

In [204]:
a=pd.concat([pv_101,pv_101])

In [205]:
pv_101.shape


Out[205]:
(501, 61)

In [206]:
a.shape


Out[206]:
(1002, 61)

In [207]:
a


Out[207]:
zeta f pv C2H3 C2H6 CH2 H2CN C2H4 H2O2 C2H ... CH2O C3H8 HNO NH2 HCN H N H2O HCCOH HCNN
2505000 0.0 0.000 1.01 0.000000e+00 0.000000e+00 0.000000e+00 0.000000e+00 0.000000e+00 0.000000e+00 0.000000e+00 ... 0.000000e+00 0.000000e+00 0.000000e+00 0.000000e+00 0.000000e+00 0.000000e+00 0.000000e+00 0.000000 0.000000e+00 0.000000e+00
2505001 0.0 0.002 1.01 8.436570e-25 0.000000e+00 1.573110e-21 1.041870e-24 2.584680e-23 2.159630e-10 5.490900e-21 ... 2.902970e-18 1.588320e-38 5.371620e-16 4.061870e-15 6.098830e-14 1.053440e-12 1.051560e-18 0.004475 4.195130e-12 4.170610e-28
2505002 0.0 0.004 1.01 1.687310e-24 0.000000e+00 3.146210e-21 2.083730e-24 5.169360e-23 4.319260e-10 1.098180e-20 ... 5.805930e-18 3.176630e-38 1.074320e-15 8.123750e-15 1.219770e-13 2.106890e-12 2.103110e-18 0.008950 8.390250e-12 8.341210e-28
2505003 0.0 0.006 1.01 2.530970e-24 0.000000e+00 4.719320e-21 3.125600e-24 7.754050e-23 6.478890e-10 1.647270e-20 ... 8.708900e-18 4.764950e-38 1.611490e-15 1.218560e-14 1.829650e-13 3.160330e-12 3.154670e-18 0.013425 1.258540e-11 1.251180e-27
2505004 0.0 0.008 1.01 3.374630e-24 0.000000e+00 6.292430e-21 4.167460e-24 1.033870e-22 8.638520e-10 2.196360e-20 ... 1.161190e-17 6.353270e-38 2.148650e-15 1.624750e-14 2.439530e-13 4.213770e-12 4.206220e-18 0.017900 1.678050e-11 1.668240e-27
2505005 0.0 0.010 1.01 4.218290e-24 0.000000e+00 7.865530e-21 5.209330e-24 1.292340e-22 1.079820e-09 2.745450e-20 ... 1.451480e-17 7.941580e-38 2.685810e-15 2.030940e-14 3.049410e-13 5.267220e-12 5.257780e-18 0.022375 2.097560e-11 2.085300e-27
2505006 0.0 0.012 1.01 5.061940e-24 0.000000e+00 9.438640e-21 6.251200e-24 1.550810e-22 1.295780e-09 3.294540e-20 ... 1.741780e-17 9.529900e-38 3.222970e-15 2.437120e-14 3.659300e-13 6.320660e-12 6.309330e-18 0.026850 2.517080e-11 2.502360e-27
2505007 0.0 0.014 1.01 5.905600e-24 0.000000e+00 1.101170e-20 7.293060e-24 1.809280e-22 1.511740e-09 3.843630e-20 ... 2.032080e-17 1.111820e-37 3.760130e-15 2.843310e-14 4.269180e-13 7.374100e-12 7.360890e-18 0.031325 2.936590e-11 2.919420e-27
2505008 0.0 0.016 1.01 6.749260e-24 0.000000e+00 1.258490e-20 8.334930e-24 2.067750e-22 1.727700e-09 4.392720e-20 ... 2.322370e-17 1.270650e-37 4.297290e-15 3.249500e-14 4.879060e-13 8.427550e-12 8.412440e-18 0.035800 3.356100e-11 3.336490e-27
2505009 0.0 0.018 1.01 7.592920e-24 0.000000e+00 1.415800e-20 9.376790e-24 2.326210e-22 1.943670e-09 4.941810e-20 ... 2.612670e-17 1.429480e-37 4.834460e-15 3.655690e-14 5.488950e-13 9.480990e-12 9.464000e-18 0.040275 3.775610e-11 3.753550e-27
2505010 0.0 0.020 1.01 8.436570e-24 0.000000e+00 1.573110e-20 1.041870e-23 2.584680e-22 2.159630e-09 5.490900e-20 ... 2.902970e-17 1.588320e-37 5.371620e-15 4.061870e-14 6.098830e-13 1.053440e-11 1.051560e-17 0.044750 4.195130e-11 4.170610e-27
2505011 0.0 0.022 1.01 9.280230e-24 0.000000e+00 1.730420e-20 1.146050e-23 2.843150e-22 2.375590e-09 6.039990e-20 ... 3.193260e-17 1.747150e-37 5.908780e-15 4.468060e-14 6.708710e-13 1.158790e-11 1.156710e-17 0.049225 4.614640e-11 4.587670e-27
2505012 0.0 0.024 1.01 1.012390e-23 0.000000e+00 1.887730e-20 1.250240e-23 3.101620e-22 2.591560e-09 6.589080e-20 ... 3.483560e-17 1.905980e-37 6.445940e-15 4.874250e-14 7.318590e-13 1.264130e-11 1.261870e-17 0.053700 5.034150e-11 5.004730e-27
2505013 0.0 0.026 1.01 9.016900e-24 2.872520e-31 2.005420e-20 1.549900e-23 1.704520e-22 4.019300e-09 4.994760e-20 ... 1.620580e-17 1.424100e-41 2.154720e-14 3.771590e-14 3.364240e-13 5.802590e-11 1.356230e-16 0.058232 7.279110e-12 1.085860e-26
2505014 0.0 0.028 1.01 3.160740e-24 2.105300e-31 2.267060e-20 1.821130e-23 3.779210e-23 6.377730e-09 1.446680e-20 ... 8.384180e-18 6.949410e-42 6.671700e-14 1.785660e-14 1.455210e-13 2.508710e-10 1.219590e-15 0.062815 4.273440e-13 1.875770e-26
2505015 0.0 0.030 1.01 3.283600e-25 8.731760e-32 4.511390e-20 2.579530e-23 2.607580e-24 9.786180e-09 1.248640e-21 ... 8.962550e-18 5.506960e-42 1.965530e-13 6.701920e-15 8.144130e-14 9.639470e-10 8.959960e-15 0.067335 8.867140e-15 5.930860e-26
2505016 0.0 0.032 1.01 7.470110e-27 7.979620e-33 7.006220e-20 2.111110e-23 4.176990e-26 1.437380e-08 2.420680e-23 ... 1.181980e-17 1.475970e-43 5.504880e-13 4.422550e-15 2.889140e-14 3.271470e-09 5.406210e-14 0.071834 5.002560e-17 1.548480e-25
2505017 0.0 0.034 1.01 2.582890e-29 8.395390e-34 6.202040e-20 7.576760e-24 1.078120e-28 2.023260e-08 7.350550e-26 ... 3.243200e-17 4.049330e-44 1.467270e-12 1.083580e-14 4.865130e-15 9.912880e-09 2.731860e-13 0.076303 5.243860e-20 2.178200e-25
2505018 0.0 0.036 1.01 1.783890e-32 2.960920e-33 2.697290e-20 2.070690e-24 6.953570e-32 2.739200e-08 3.286590e-29 ... 1.502940e-16 0.000000e+00 3.745750e-12 3.720830e-14 6.599470e-16 2.721860e-08 1.181780e-12 0.080737 5.430720e-24 1.427810e-25
2505019 0.0 0.038 1.01 7.473630e-33 0.000000e+00 5.603990e-21 1.086450e-24 1.653870e-32 3.577030e-08 1.036300e-29 ... 6.810170e-16 0.000000e+00 9.140330e-12 1.175740e-13 1.789320e-16 6.851340e-08 4.445880e-12 0.085145 8.628830e-26 4.301410e-26
2505020 0.0 0.040 1.01 1.898380e-33 1.237410e-37 4.279520e-22 5.829280e-24 3.352800e-33 4.514020e-08 2.714690e-30 ... 2.778250e-15 0.000000e+00 2.152450e-11 3.353810e-13 5.252850e-16 1.597030e-07 1.473730e-11 0.089529 9.349490e-27 6.466800e-27
2505021 0.0 0.042 1.01 1.429560e-34 1.287500e-39 1.636670e-23 3.901960e-23 2.088260e-34 5.507050e-08 2.090390e-31 ... 1.029310e-14 0.000000e+00 4.915810e-11 8.751570e-13 2.041320e-15 3.475010e-07 4.345900e-11 0.093878 3.147000e-28 1.848780e-26
2505022 0.0 0.044 1.01 5.549040e-34 9.730730e-39 7.397970e-23 2.295740e-22 6.974250e-34 6.489980e-08 8.498090e-31 ... 3.510240e-14 0.000000e+00 1.092640e-10 2.117900e-12 7.263430e-15 7.107550e-07 1.148500e-10 0.098182 4.498430e-28 1.482520e-25
2505023 0.0 0.046 1.01 1.240050e-32 0.000000e+00 6.633150e-22 1.215130e-21 1.359070e-32 7.368330e-08 1.715690e-29 ... 1.115900e-13 8.409280e-46 2.361580e-10 4.811620e-12 2.410880e-14 1.373890e-06 2.731560e-10 0.102430 4.850760e-27 1.035610e-24
2505024 0.0 0.048 1.01 2.385100e-31 6.610940e-38 5.255610e-21 5.940050e-21 2.365090e-31 8.013300e-08 2.908110e-28 ... 3.347350e-13 0.000000e+00 4.949000e-10 1.037860e-11 7.637310e-14 2.518980e-06 5.849440e-10 0.106605 4.636980e-26 6.358130e-24
2505025 0.0 0.050 1.01 4.090560e-30 8.346070e-37 3.730290e-20 2.753260e-20 3.810200e-30 8.265120e-08 4.250500e-27 ... 9.606220e-13 3.485120e-48 9.816180e-10 2.147320e-11 2.363050e-13 4.391200e-06 1.122610e-09 0.110698 4.020290e-25 3.483470e-23
2505026 0.0 0.052 1.01 6.598610e-29 1.941510e-35 2.415630e-19 1.235310e-19 6.062920e-29 7.937490e-08 5.554310e-26 ... 2.688140e-12 5.319320e-46 1.694990e-09 4.250440e-11 7.261360e-13 7.287470e-06 1.896230e-09 0.114663 3.272230e-24 1.733450e-22
2505027 0.0 0.054 1.01 1.073740e-27 5.171820e-34 1.456340e-18 5.494400e-19 1.040940e-27 6.869490e-08 6.746020e-25 ... 7.517830e-12 1.292140e-45 2.563680e-09 8.039130e-11 2.259920e-12 1.148520e-05 2.727860e-09 0.118413 2.604480e-23 7.958910e-22
2505028 0.0 0.056 1.01 1.746680e-26 1.585910e-32 7.943360e-18 2.385200e-18 1.955480e-26 5.116560e-08 7.323720e-24 ... 2.109040e-11 1.281080e-44 3.284450e-09 1.425170e-10 7.071660e-12 1.691130e-05 3.152350e-09 0.121755 1.993970e-22 3.275640e-21
2505029 0.0 0.058 1.01 2.636480e-25 5.425490e-31 3.617210e-17 9.727500e-18 3.824240e-25 3.201200e-08 6.468270e-23 ... 5.662210e-11 2.767120e-43 3.527350e-09 2.344160e-10 2.192540e-11 2.224910e-05 2.808870e-09 0.124397 1.373450e-21 1.105910e-20
... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
2505471 0.0 0.942 1.01 6.978880e-09 1.464090e-05 1.050660e-10 3.590310e-18 2.370270e-04 4.602110e-10 7.798440e-12 ... 1.017760e-06 1.353200e-08 2.704200e-16 1.515470e-15 6.279640e-11 3.726070e-09 2.365050e-16 0.008072 6.542260e-08 1.404580e-16
2505472 0.0 0.944 1.01 6.738230e-09 1.413610e-05 1.014430e-10 3.466510e-18 2.288540e-04 4.443420e-10 7.529530e-12 ... 9.826680e-07 1.306530e-08 2.610950e-16 1.463210e-15 6.063100e-11 3.597580e-09 2.283500e-16 0.007793 6.316670e-08 1.356150e-16
2505473 0.0 0.946 1.01 6.497580e-09 1.363120e-05 9.781990e-11 3.342710e-18 2.206800e-04 4.284720e-10 7.260620e-12 ... 9.475730e-07 1.259870e-08 2.517700e-16 1.410950e-15 5.846560e-11 3.469100e-09 2.201940e-16 0.007515 6.091070e-08 1.307720e-16
2505474 0.0 0.948 1.01 6.256930e-09 1.312640e-05 9.419700e-11 3.218900e-18 2.125070e-04 4.126030e-10 6.991710e-12 ... 9.124780e-07 1.213210e-08 2.424450e-16 1.358700e-15 5.630020e-11 3.340610e-09 2.120390e-16 0.007237 5.865480e-08 1.259280e-16
2505475 0.0 0.950 1.01 6.016270e-09 1.262150e-05 9.057400e-11 3.095100e-18 2.043340e-04 3.967340e-10 6.722800e-12 ... 8.773820e-07 1.166550e-08 2.331200e-16 1.306440e-15 5.413480e-11 3.212130e-09 2.038840e-16 0.006958 5.639880e-08 1.210850e-16
2505476 0.0 0.952 1.01 5.775620e-09 1.211660e-05 8.695110e-11 2.971290e-18 1.961600e-04 3.808640e-10 6.453890e-12 ... 8.422870e-07 1.119890e-08 2.237960e-16 1.254180e-15 5.196940e-11 3.083640e-09 1.957280e-16 0.006680 5.414290e-08 1.162420e-16
2505477 0.0 0.954 1.01 5.534970e-09 1.161180e-05 8.332810e-11 2.847490e-18 1.879870e-04 3.649950e-10 6.184970e-12 ... 8.071920e-07 1.073220e-08 2.144710e-16 1.201920e-15 4.980400e-11 2.955160e-09 1.875730e-16 0.006402 5.188690e-08 1.113980e-16
2505478 0.0 0.956 1.01 5.294320e-09 1.110690e-05 7.970510e-11 2.723690e-18 1.798130e-04 3.491260e-10 5.916060e-12 ... 7.720960e-07 1.026560e-08 2.051460e-16 1.149670e-15 4.763860e-11 2.826670e-09 1.794180e-16 0.006123 4.963100e-08 1.065550e-16
2505479 0.0 0.958 1.01 5.053670e-09 1.060210e-05 7.608220e-11 2.599880e-18 1.716400e-04 3.332560e-10 5.647150e-12 ... 7.370010e-07 9.799000e-09 1.958210e-16 1.097410e-15 4.547320e-11 2.698190e-09 1.712620e-16 0.005845 4.737500e-08 1.017110e-16
2505480 0.0 0.960 1.01 4.813020e-09 1.009720e-05 7.245920e-11 2.476080e-18 1.634670e-04 3.173870e-10 5.378240e-12 ... 7.019060e-07 9.332380e-09 1.864960e-16 1.045150e-15 4.330780e-11 2.569700e-09 1.631070e-16 0.005567 4.511910e-08 9.686790e-17
2505481 0.0 0.962 1.01 4.572370e-09 9.592340e-06 6.883630e-11 2.352270e-18 1.552930e-04 3.015170e-10 5.109330e-12 ... 6.668110e-07 8.865760e-09 1.771720e-16 9.928930e-16 4.114250e-11 2.441220e-09 1.549520e-16 0.005288 4.286310e-08 9.202450e-17
2505482 0.0 0.964 1.01 4.331720e-09 9.087480e-06 6.521330e-11 2.228470e-18 1.471200e-04 2.856480e-10 4.840410e-12 ... 6.317150e-07 8.399150e-09 1.678470e-16 9.406360e-16 3.897710e-11 2.312730e-09 1.467960e-16 0.005010 4.060720e-08 8.718110e-17
2505483 0.0 0.966 1.01 4.091070e-09 8.582620e-06 6.159030e-11 2.104670e-18 1.389470e-04 2.697790e-10 4.571500e-12 ... 5.966200e-07 7.932530e-09 1.585220e-16 8.883780e-16 3.681170e-11 2.184250e-09 1.386410e-16 0.004732 3.835120e-08 8.233770e-17
2505484 0.0 0.968 1.01 3.850420e-09 8.077760e-06 5.796740e-11 1.980860e-18 1.307730e-04 2.539090e-10 4.302590e-12 ... 5.615250e-07 7.465910e-09 1.491970e-16 8.361210e-16 3.464630e-11 2.055760e-09 1.304860e-16 0.004453 3.609530e-08 7.749430e-17
2505485 0.0 0.970 1.01 3.609760e-09 7.572900e-06 5.434440e-11 1.857060e-18 1.226000e-04 2.380400e-10 4.033680e-12 ... 5.264290e-07 6.999290e-09 1.398720e-16 7.838630e-16 3.248090e-11 1.927280e-09 1.223300e-16 0.004175 3.383930e-08 7.265090e-17
2505486 0.0 0.972 1.01 3.369110e-09 7.068040e-06 5.072150e-11 1.733260e-18 1.144270e-04 2.221710e-10 3.764770e-12 ... 4.913340e-07 6.532670e-09 1.305470e-16 7.316050e-16 3.031550e-11 1.798790e-09 1.141750e-16 0.003897 3.158330e-08 6.780750e-17
2505487 0.0 0.974 1.01 3.128460e-09 6.563180e-06 4.709850e-11 1.609450e-18 1.062530e-04 2.063010e-10 3.495850e-12 ... 4.562390e-07 6.066050e-09 1.212230e-16 6.793480e-16 2.815010e-11 1.670310e-09 1.060200e-16 0.003618 2.932740e-08 6.296420e-17
2505488 0.0 0.976 1.01 2.887810e-09 6.058320e-06 4.347550e-11 1.485650e-18 9.808010e-05 1.904320e-10 3.226940e-12 ... 4.211440e-07 5.599430e-09 1.118980e-16 6.270900e-16 2.598470e-11 1.541820e-09 9.786420e-17 0.003340 2.707140e-08 5.812080e-17
2505489 0.0 0.978 1.01 2.647160e-09 5.553460e-06 3.985260e-11 1.361840e-18 8.990670e-05 1.745630e-10 2.958030e-12 ... 3.860480e-07 5.132810e-09 1.025730e-16 5.748330e-16 2.381930e-11 1.413340e-09 8.970880e-17 0.003062 2.481550e-08 5.327740e-17
2505490 0.0 0.980 1.01 2.406510e-09 5.048600e-06 3.622960e-11 1.238040e-18 8.173340e-05 1.586930e-10 2.689120e-12 ... 3.509530e-07 4.666190e-09 9.324820e-17 5.225750e-16 2.165390e-11 1.284850e-09 8.155350e-17 0.002783 2.255950e-08 4.843400e-17
2505491 0.0 0.982 1.01 2.165860e-09 4.543740e-06 3.260660e-11 1.114240e-18 7.356010e-05 1.428240e-10 2.420210e-12 ... 3.158580e-07 4.199570e-09 8.392340e-17 4.703180e-16 1.948850e-11 1.156370e-09 7.339810e-17 0.002505 2.030360e-08 4.359060e-17
2505492 0.0 0.984 1.01 1.925210e-09 4.038880e-06 2.898370e-11 9.904320e-19 6.538670e-05 1.269550e-10 2.151300e-12 ... 2.807620e-07 3.732950e-09 7.459860e-17 4.180600e-16 1.732310e-11 1.027880e-09 6.524280e-17 0.002227 1.804760e-08 3.874720e-17
2505493 0.0 0.986 1.01 1.684560e-09 3.534020e-06 2.536070e-11 8.666280e-19 5.721340e-05 1.110850e-10 1.882380e-12 ... 2.456670e-07 3.266330e-09 6.527370e-17 3.658030e-16 1.515770e-11 8.993950e-10 5.708740e-17 0.001948 1.579170e-08 3.390380e-17
2505494 0.0 0.988 1.01 1.443910e-09 3.029160e-06 2.173780e-11 7.428240e-19 4.904000e-05 9.521600e-11 1.613470e-12 ... 2.105720e-07 2.799720e-09 5.594890e-17 3.135450e-16 1.299240e-11 7.709100e-10 4.893210e-17 0.001670 1.353570e-08 2.906040e-17
2505495 0.0 0.990 1.01 1.203250e-09 2.524300e-06 1.811480e-11 6.190200e-19 4.086670e-05 7.934670e-11 1.344560e-12 ... 1.754760e-07 2.333100e-09 4.662410e-17 2.612880e-16 1.082700e-11 6.424250e-10 4.077670e-17 0.001392 1.127980e-08 2.421700e-17
2505496 0.0 0.992 1.01 9.626040e-10 2.019440e-06 1.449180e-11 4.952160e-19 3.269340e-05 6.347740e-11 1.075650e-12 ... 1.403810e-07 1.866480e-09 3.729930e-17 2.090300e-16 8.661570e-12 5.139400e-10 3.262140e-17 0.001113 9.023810e-09 1.937360e-17
2505497 0.0 0.994 1.01 7.219530e-10 1.514580e-06 1.086890e-11 3.714120e-19 2.452000e-05 4.760800e-11 8.067360e-13 ... 1.052860e-07 1.399860e-09 2.797450e-17 1.567730e-16 6.496180e-12 3.854550e-10 2.446600e-17 0.000835 6.767860e-09 1.453020e-17
2505498 0.0 0.996 1.01 4.813020e-10 1.009720e-06 7.245920e-12 2.476080e-19 1.634670e-05 3.173870e-11 5.378240e-13 ... 7.019060e-08 9.332380e-10 1.864960e-17 1.045150e-16 4.330780e-12 2.569700e-10 1.631070e-17 0.000557 4.511910e-09 9.686790e-18
2505499 0.0 0.998 1.01 2.406510e-10 5.048600e-07 3.622960e-12 1.238040e-19 8.173340e-06 1.586930e-11 2.689120e-13 ... 3.509530e-08 4.666190e-10 9.324820e-18 5.225750e-17 2.165390e-12 1.284850e-10 8.155350e-18 0.000278 2.255950e-09 4.843400e-18
2505500 0.0 1.000 1.01 0.000000e+00 0.000000e+00 0.000000e+00 0.000000e+00 0.000000e+00 0.000000e+00 0.000000e+00 ... 0.000000e+00 0.000000e+00 0.000000e+00 0.000000e+00 0.000000e+00 0.000000e+00 0.000000e+00 0.000000 0.000000e+00 0.000000e+00

1002 rows × 61 columns


In [ ]: