In [1]:
# Autoreload all modules when changes are saved.
%reload_ext autoreload
%autoreload 2
# Show all figures inline.
%matplotlib inline
# Add olfaction-prediction to the Python path.
import os
import sys
curr_path = os.getcwd()
gerkin_path = os.path.split(curr_path)[0]
olfaction_prediction_path = os.path.split(gerkin_path)[0]
sys.path.append(olfaction_prediction_path)
import opc_python
# Import numerical libraries.
import numpy as np
import matplotlib.pyplot as plt
In [2]:
# Import generic utility modules I wrote to load the data from the tab-delimited text files and to score predictions.
from opc_python.utils import loading, scoring
# Import the modules I wrote for actually shaping and fitting the data to the model.
from opc_python.gerkin import dream,fit2
In [3]:
# Load the perceptual descriptors data.
perceptual_headers, perceptual_obs_data = loading.load_perceptual_data('training')
loading.format_leaderboard_perceptual_data()
# Show the perceptual metadata types and perceptual descriptor names.
print(perceptual_headers)
In [4]:
# Show the metadata and perceptual descriptor values for the first compound.
print(perceptual_obs_data[1])
In [5]:
num_descriptors = len(perceptual_headers[6:])
num_subjects = 49
print('There are %d different perceptual descriptors and %d different subjects' % (num_descriptors,num_subjects))
In [6]:
# Load the molecular descriptors data.
molecular_headers, molecular_data = loading.load_molecular_data()
print("First ten molecular descriptor types are %s" % molecular_headers[:10])
print("First ten descriptor values for the first compound are %s" % molecular_data[0][:10])
total_size = len(set([int(row[0]) for row in molecular_data]))
print("We have molecular descriptors for %d unique molecules" % total_size)
In [7]:
training_size = len(set([int(row[0]) for row in perceptual_obs_data]))
print("We have perceptual data for %d unique molecules" % training_size)
remaining_size = total_size - training_size
print ("%d are left out for testing in the competition; half of these (%d) are used for the leaderboard." \
% (remaining_size,remaining_size/2))
In [8]:
print("There are %d rows in the perceptual data set (at least one for each subject and molecule)" % len(perceptual_obs_data))
print("%d of these are replicates (same subject and molecules)" % sum([x[2] for x in perceptual_obs_data]))
In [9]:
X_training,good1,good2,means,stds,imputer = dream.make_X(molecular_data,"training")
X_training.shape
Out[9]:
In [10]:
X_leaderboard_other,good1,good2,means,stds,imputer = dream.make_X(molecular_data,"leaderboard",target_dilution='high',good1=good1,good2=good2,means=means,stds=stds)
X_leaderboard_other.shape
Out[10]:
In [11]:
X_leaderboard_int,good1,good2,means,stds,imputer = dream.make_X(molecular_data,"leaderboard",target_dilution=-3,good1=good1,good2=good2,means=means,stds=stds)
X_leaderboard_int.shape
Out[11]:
In [12]:
X_testset_other,good1,good2,means,stds,imputer = dream.make_X(molecular_data,"testset",target_dilution='high',good1=good1,good2=good2,means=means,stds=stds)
X_testset_other.shape
Out[12]:
In [13]:
X_testset_int,good1,good2,means,stds,imputer = dream.make_X(molecular_data,"testset",target_dilution=-3,good1=good1,good2=good2,means=means,stds=stds)
X_testset_int.shape
Out[13]:
In [14]:
X_all,good1,good2,means,stds,imputer = dream.make_X(molecular_data,['training','leaderboard'],good1=good1,good2=good2,means=means,stds=stds)
X_all.shape
Out[14]:
In [15]:
Y_training_imp,imputer = dream.make_Y_obs('training',target_dilution=None,imputer='median')
Y_training_mask,imputer = dream.make_Y_obs('training',target_dilution=None,imputer='mask')
In [16]:
Y_leaderboard,imputer = dream.make_Y_obs('leaderboard',target_dilution='gold',imputer='mask')
In [29]:
Y_leaderboard_noimpute,_ = dream.make_Y_obs('leaderboard',target_dilution='gold',imputer=None)
In [17]:
Y_all_imp,imputer = dream.make_Y_obs(['training','leaderboard'],target_dilution=None,imputer='median')
In [18]:
Y_all_mask,imputer = dream.make_Y_obs(['training','leaderboard'],target_dilution=None,imputer='mask')
In [19]:
plt.scatter(Y_all_mask['mean_std'][:,0],Y_all_mask['mean_std'][:,21])
Out[19]:
In [20]:
# Show the range of values for the molecular and perceptual descriptors.
plt.hist(X_training.ravel())
plt.yscale('log')
plt.ylabel('Count')
plt.xlabel('Cube root transformed, N(0,1) normalized molecular descriptor values')
plt.figure()
plt.hist(Y_training_imp['mean_std'][:21].ravel())
plt.yscale('log')
plt.ylabel('Count')
plt.xlabel('Perceptual descriptor subject-averaged values')
Out[20]:
In [21]:
write = True # Set to True to actually generate the prediction files.
n_estimators = 1000 # Set this to a high number (e.g. 1000) to get a good fit.
# Best parameters, determined independently.
max_features = {'int':{'mean':None,'sigma':None},
'ple':{'mean':100,'sigma':None},
'dec':{'mean':500,'sigma':500}}
min_samples_leaf = {'int':{'mean':1,'sigma':4},
'ple':{'mean':1,'sigma':1},
'dec':{'mean':1,'sigma':1}}
max_depth = {'int':{'mean':None,'sigma':2},
'ple':{'mean':10,'sigma':10},
'dec':{'mean':10,'sigma':10}}
et = {'int':{'mean':True,'sigma':True},
'ple':{'mean':False,'sigma':False},
'dec':{'mean':False,'sigma':False}}
#et['int'] = {'mean':False,'sigma':False} # Uncomment to get a correct score estimate, or leave commented to get best fit.
use_mask = {'int':{'mean':False,'sigma':True},
'ple':{'mean':False,'sigma':True},
'dec':{'mean':False,'sigma':True}}
In [22]:
# Fit training data. Ignoring warning that arises if too few trees are used.
rfcs_leaderboard,score,rs = fit2.rfc_final(X_training,Y_training_imp['mean_std'],Y_training_mask['mean_std'],
max_features,min_samples_leaf,max_depth,et,use_mask,
n_estimators=n_estimators)
In [30]:
loading.make_prediction_files(rfcs_leaderboard,X_leaderboard_int,X_leaderboard_other,'leaderboard',2,Y_test=Y_leaderboard_noimpute,write=write)
Out[30]:
In [27]:
rfcs_leaderboard
Out[27]:
In [23]:
rfcs,score,rs = fit2.rfc_final(X_all,Y_all_imp['mean_std'],Y_all_mask['mean_std'],
max_features,min_samples_leaf,max_depth,et,use_mask,
n_estimators=n_estimators)
In [24]:
loading.make_prediction_files(rfcs,X_testset_int,X_testset_other,'testset',2,write=False)
Out[24]: