In [4]:
import sys, os
sys.path.append('../../../libs/')
import os.path
import IO_class
from IO_class import FileOperator
from sklearn import cross_validation
import sklearn
import numpy as np
import csv
from dateutil import parser
from datetime import timedelta
from sklearn import svm
import numpy as np
import pandas as pd
import pdb
import pickle
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn import preprocessing
import sklearn
import scipy.stats as ss
from sklearn.svm import LinearSVC
import random
from DL_libs import *
from itertools import izip #new
import pprocess

In [2]:
#filename = 'SUCCESS_log_CrossValidation_load_DL_remoteFisherM1_DL_RE_US_DL_RE_US_1_1_19MAY2014.txt'
filename = 'listOfDDIsHaveOver2InterfacesHave40-75_Examples_2010_real_selected.txt' #for testing
#filename = 'list_of_SUCCESS_log_run3DID_STAT_200_275_examples_201001MAY2014.txt'
file_obj = FileOperator(filename)
ddis = file_obj.readStripLines()


number of lines in listOfDDIsHaveOver2InterfacesHave40-75_Examples_2010_real_selected.txt:37

In [5]:
ddi  = ddis[0]
ddis


Out[5]:
['6PGD_int_NAD_binding_2',
 'Activin_recp_int_TGF_beta',
 'ADSL_C_int_Lyase_1',
 'AICARFT_IMPCHas_int_MGS',
 'AIRS_int_AIRS_C',
 'Ald_Xan_dh_C_int_FAD_binding_5',
 'Alpha-amylase_int_CBM_48',
 'AMNp_N_int_PNP_UDP_1',
 'ARPC4_int_WD40',
 'CagX_int_TrbI',
 'Cation_ATPase_C_int_E1-E2_ATPase',
 'Ca_chan_IQ_int_efhand',
 'CBM_20_int_Glyco_hydro_14',
 'Cytochrom_B_C_int_Rieske',
 'Cytochrom_B_N_int_UCR_14kD',
 'Cytochrom_B_N_int_UCR_Fe-S_N',
 'Dioxygenase_C_int_Dioxygenase_N',
 'E1-E2_ATPase_int_Hydrolase',
 'EFG_C_int_GTP_EFTU',
 'efhand_int_IQ',
 'efhand_int_Troponin',
 'Fapy_DNA_glyco_int_H2TH',
 'Fer4_NifH_int_Oxidored_nitro',
 'FGF_int_I-set',
 'FumaraseC_C_int_Lyase_1',
 'Furin-like_int_Recep_L_domain',
 'Glyco_hydro_10_int_Ricin_B_lectin',
 'GP120_int_ig',
 'H2TH_int_zf-FPG_IleRS',
 'Ion_trans_2_int_V-set',
 'JmjC_int_JmjN',
 'Kringle_int_PAN_1',
 'MDH_int_PQQ',
 'PA_int_Peptidase_M28',
 'Peptidase_M28_int_TFR_dimer',
 'Photo_RC_int_PSII',
 'Stathmin_int_Tubulin']

In [13]:
class DDI_family_base(object):
    def __init__(self, ddi, Vectors_Fishers_aaIndex_raw_folder = '/home/du/Documents/Vectors_Fishers_aaIndex_raw_2014/'):
    #def __init__(self, ddi, Vectors_Fishers_aaIndex_raw_folder = '/home/sun/Downloads/contactmatrix/contactmatrixanddeeplearningcode/data_test/'):
        """ get total number of sequences in a ddi familgy
        Attributes:
            ddi: string ddi name
            Vectors_Fishers_aaIndex_raw_folder: string, folder
            total_number_of_sequences: int
            raw_data: dict raw_data[2]
        LOO_data['FisherM1'][1]

        """
        self.ddi = ddi
        self.Vectors_Fishers_aaIndex_raw_folder = Vectors_Fishers_aaIndex_raw_folder
        self.ddi_folder = self.Vectors_Fishers_aaIndex_raw_folder + ddi + '/'
        self.total_number_of_sequences = self.get_total_number_of_sequences()
        self.raw_data = {}
        self.positve_negative_number = {}
        self.equal_size_data = {}
        for seq_no in range(1, self.total_number_of_sequences+1):
            self.raw_data[seq_no] = self.get_raw_data_for_selected_seq(seq_no)
            try:
                #positive_file = self.ddi_folder + 'numPos_'+ str(seq_no) + '.txt'
                #file_obj = FileOperator(positive_file)
                #lines = file_obj.readStripLines()
                #import pdb; pdb.set_trace()
                count_pos = int(np.sum(self.raw_data[seq_no][:, -1]))
                count_neg = self.raw_data[seq_no].shape[0] - count_pos
                #self.positve_negative_number[seq_no] = {'numPos': int(float(lines[0]))}
                #assert int(float(lines[0])) == count_pos
                self.positve_negative_number[seq_no] = {'numPos': count_pos}
                #negative_file = self.ddi_folder + 'numNeg_'+ str(seq_no) + '.txt'
                #file_obj = FileOperator(negative_file)
                #lines = file_obj.readStripLines()
                #self.positve_negative_number[seq_no]['numNeg'] =  int(float(lines[0]))
                self.positve_negative_number[seq_no]['numNeg'] =  count_neg
            except Exception,e:
                print ddi, seq_no
                print str(e)
            # get data for equal positive and negative
            n_pos = self.positve_negative_number[seq_no]['numPos']
            n_neg = self.positve_negative_number[seq_no]['numNeg']
            index_neg = range(n_pos, n_pos + n_neg)
            random.shuffle(index_neg)
            index_neg = index_neg[: n_pos]
            positive_examples = self.raw_data[seq_no][ : n_pos, :]
            negative_examples = self.raw_data[seq_no][index_neg, :]
            self.equal_size_data[seq_no] = np.vstack((positive_examples, negative_examples))
    def get_LOO_training_and_reduced_traing(self, seq_no, fisher_mode = 'FisherM1ONLY' , reduce_ratio = 4):
        """ get the leave one out traing data, reduced traing
        Parameters:
            seq_no: 
            fisher_mode: default 'FisherM1ONLY'
        Returns:
            (train_X_LOO, train_y_LOO),(train_X_reduced, train_y_reduced),  (test_X, test_y)
        """
        train_X_LOO = np.array([])
        train_y_LOO = np.array([])
        train_X_reduced = np.array([])
        train_y_reduced = np.array([])

        total_number_of_sequences = self.total_number_of_sequences
        equal_size_data_selected_sequence = self.equal_size_data[seq_no]

        #get test data for selected sequence
        test_X, test_y = self.select_X_y(equal_size_data_selected_sequence, fisher_mode = fisher_mode)
        total_sequences = range(1, total_number_of_sequences+1)
        loo_sequences = [i for i in total_sequences if i != seq_no]
        number_of_reduced = len(loo_sequences)/reduce_ratio if len(loo_sequences)/reduce_ratio !=0 else 1
        random.shuffle(loo_sequences)
        reduced_sequences = loo_sequences[:number_of_reduced]

        #for loo data
        for current_no in loo_sequences:
            raw_current_data = self.equal_size_data[current_no]
            current_X, current_y = self.select_X_y(raw_current_data, fisher_mode = fisher_mode)
            if train_X_LOO.ndim ==1:
                train_X_LOO = current_X
            else:
                train_X_LOO = np.vstack((train_X_LOO, current_X))
            train_y_LOO = np.concatenate((train_y_LOO, current_y))

        #for reduced data
        for current_no in reduced_sequences:
            raw_current_data = self.equal_size_data[current_no]
            current_X, current_y = self.select_X_y(raw_current_data, fisher_mode = fisher_mode)
            if train_X_reduced.ndim ==1:
                train_X_reduced = current_X
            else:
                train_X_reduced = np.vstack((train_X_reduced, current_X))
            train_y_reduced = np.concatenate((train_y_reduced, current_y))                

        return (train_X_LOO, train_y_LOO),(train_X_reduced, train_y_reduced), (test_X, test_y)
    def get_total_number_of_sequences(self):
        """ get total number of sequences in a ddi familgy
        Parameters:
            ddi: string
            Vectors_Fishers_aaIndex_raw_folder: string
        Returns:
            n: int
        """
        folder_path = self.Vectors_Fishers_aaIndex_raw_folder + self.ddi + '/' 
        filename = folder_path +'allPairs.txt'
        all_pairs = np.loadtxt(filename)
        return len(all_pairs)

    def get_raw_data_for_selected_seq(self, seq_no):
        """ get raw data for selected seq no in a family
        Parameters:
            ddi: 
            seq_no: 
        Returns:
            data: raw data in the sequence file
        """
        folder_path = self.Vectors_Fishers_aaIndex_raw_folder + self.ddi + '/' 
        filename = folder_path + 'F0_20_F1_20_Sliding_17_11_F0_20_F1_20_Sliding_17_11_ouput_'+ str(seq_no) + '.txt'
        data = np.loadtxt(filename)
        return data
    def select_X_y(self, data, fisher_mode = ''):
        """ select subset from the raw input data set
        Parameters:
            data: data from matlab txt file
            fisher_mode: subset base on this Fisher of AAONLY...
        Returns:
            selected X,  y
        """
        y = data[:,-1] # get lable
        if fisher_mode == 'FisherM1': # fisher m1 plus AA index
            a = data[:, 20:227]
            b = data[:, 247:454]
            X = np.hstack((a,b))
        elif fisher_mode == 'FisherM1ONLY': 
            a = data[:, 20:40]
            b = data[:, 247:267]
            X = np.hstack((a,b))
        elif fisher_mode == 'AAONLY':
            a = data[:, 40:227]
            b = data[:, 267:454]
            X = np.hstack((a,b))
        else:
            raise('there is an error in mode')
        return X, y

In [14]:
import sklearn.preprocessing
class Precessing_Scaler_0_9(sklearn.preprocessing.StandardScaler):
    def __init__(self):
        super(Precessing_Scaler_0_9, self).__init__(self, with_std=0.333)
    def transform(self, X): # transform data to 0.1 to 0.9
        new_X = super(Precessing_Scaler_0_9, self).transform(X)
        print 
        new_X[new_X > 1] = 1
        new_X[new_X < -1] = -1
        new_X = (new_X + 1) * 0.4 + 0.1
        return new_X
    def fit_transform(self):
        print 'Did not implement'
def performance_score(target_label, predicted_label, predicted_score = False, print_report = True): 
    """ get performance matrix for prediction
        Attributes:
            target_label: int 0, 1
            predicted_label: 0, 1 or ranking
            predicted_score: bool if False, predicted_label is from 0, 1. If Ture, predicted_label is ranked, need to get AUC score.
            print_report: if True, print the perfromannce on screen
    """
    import sklearn
    from sklearn.metrics import roc_auc_score
    score = {}
    if predicted_score == False:
        score['accuracy'] = sklearn.metrics.accuracy_score(target_label, predicted_label)
        score['precision'] = sklearn.metrics.precision_score(target_label, predicted_label, pos_label=1)
        score['recall'] = sklearn.metrics.recall_score(target_label, predicted_label, pos_label=1)
    if predicted_score == True:
        auc_score  = roc_auc_score(target_label, predicted_label)
        score['auc_score'] = auc_score
        target_label = [x >= 0.5 for x in target_label]
        score['accuracy'] = sklearn.metrics.accuracy_score(target_label, predicted_label)
        score['precision'] = sklearn.metrics.precision_score(target_label, predicted_label, pos_label=1)
        score['recall'] = sklearn.metrics.recall_score(target_label, predicted_label, pos_label=1)
    if print_report == True:
        for key, value in score.iteritems():
            print key, '{percent:.1%}'.format(percent=value)
    return score

def LOO_out_performance_for_all(ddis):
    for ddi in ddis:
        one_ddi_family = LOO_out_performance_for_one_ddi(ddi)
        one_ddi_family.get_LOO_perfermance('FisherM1', '')
def process_one_ddi(ddi):
    """A function to waste CPU cycles"""
    one_ddi_family = LOO_out_performance_for_one_ddi(ddi)
    one_ddi_family.get_LOO_perfermance('FisherM1', '')
def parallel_process(function, ddis, nproc = 2):
    # maximum number of simultaneous processes desired
    results = pprocess.Map(limit=nproc, reuse=1)
    parallel_function = results.manage(pprocess.MakeReusable(function))
    [parallel_function(ddi) for ddi in ddis]  # Start computing things
    return results[:]

class LOO_out_performance_for_one_ddi(object):
        """ get the performance of ddi families
        Attributes:
            ddi: string ddi name
            Vectors_Fishers_aaIndex_raw_folder: string, folder
            total_number_of_sequences: int
            raw_data: dict raw_data[2]

        """
        def __init__(self, ddi):
            self.ddi_obj = DDI_family_base(ddi)
        def analysis_score(self, target_label, predicted_label): #new
            score = (sklearn.metrics.accuracy_score(target_label, predicted_label),
                     sklearn.metrics.precision_score(target_label, predicted_label, pos_label=1),
                     sklearn.metrics.recall_score(target_label, predicted_label, pos_label=1))
            return score
        def saveAsCsv(self, predicted_score, fname, *arguments): #new
            newfile = False
            if os.path.isfile(fname + '_report.csv'):
                pass
            else:
                newfile = True
            csvfile = open(fname + '_report.csv', 'a+')
            writer = csv.writer(csvfile)
            if newfile == True:
                if predicted_score == False:
                    writer.writerow(['DDI', 'no.', 'FisherMode', 'method', 'isTest', 'accuracy', 'precision', 'recall']) #, 'AUC'])
                else:
                    writer.writerow(['DDI', 'no.', 'FisherMode', 'method', 'isTest', 'AUC', 'accuracy', 'precision', 'recall'])
            for arg in arguments:        
                writer.writerows(arg)
            csvfile.close()

        def get_LOO_perfermance(self, fisher_mode, settings = None):
            analysis_scr = []
            predicted_score = False
            reduce_ratio = 4
            for seq_no in range(1, self.ddi_obj.total_number_of_sequences+1):
                print seq_no
                print "SVM"
                (train_X_LOO, train_y_LOO),(train_X_reduced, train_y_reduced), (test_X, test_y) = self.ddi_obj.get_LOO_training_and_reduced_traing(seq_no, reduce_ratio = reduce_ratio)
                standard_scaler = preprocessing.StandardScaler().fit(train_X_reduced)
                scaled_train_X = standard_scaler.transform(train_X_reduced)
                scaled_test_X = standard_scaler.transform(test_X)
                Linear_SVC = LinearSVC(C=1, penalty="l2")
                Linear_SVC.fit(scaled_train_X, train_y_reduced)
                predicted_test_y = Linear_SVC.predict(scaled_test_X)
                isTest = True; #new
                analysis_scr.append((ddi, seq_no, fisher_mode, 'SVM', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
                
                
                Linear_SVC = LinearSVC(C=1, penalty="l2")
                Linear_SVC.fit(scaled_train_X, train_y_reduced)
                predicted_train_y = Linear_SVC.predict(scaled_train_X)
                isTest = False; #new
                analysis_scr.append((ddi, seq_no, fisher_mode, 'SVM', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
                
                
                print "direct deep learning"
                # direct deep learning 
                min_max_scaler = Precessing_Scaler_0_9()
                X_train_pre_validation_minmax = min_max_scaler.fit(train_X_reduced)
                X_train_pre_validation_minmax = min_max_scaler.transform(train_X_reduced)
                x_test_minmax = min_max_scaler.transform(test_X)
                pretraining_X_minmax = min_max_scaler.transform(train_X_LOO)
                x_train_minmax, x_validation_minmax, y_train_minmax, y_validation_minmax = train_test_split(X_train_pre_validation_minmax, 
                                                                                                  train_y_reduced
                                                                    , test_size=0.2, random_state=42)
                finetune_lr = 1
                batch_size = 100
                pretraining_epochs = cal_epochs(1, x_train_minmax, batch_size = batch_size)
                #pretrain_lr=0.001
                pretrain_lr = 0.001
                training_epochs = 10
                hidden_layers_sizes= [100, 100]
                corruption_levels = [0,0]
                sda = trainSda(x_train_minmax, y_train_minmax,
                             x_validation_minmax, y_validation_minmax , 
                             x_test_minmax, test_y,
                             hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
                             training_epochs = training_epochs, pretraining_epochs = pretraining_epochs, 
                             pretrain_lr = pretrain_lr, finetune_lr=finetune_lr
                 )
                print 'hidden_layers_sizes:', hidden_layers_sizes
                print 'corruption_levels:', corruption_levels
                training_predicted = sda.predict(x_train_minmax)
                y_train = y_train_minmax
                isTest = False; #new
                analysis_scr.append((ddi, seq_no, fisher_mode, 'DL', isTest) + tuple(performance_score(y_train, training_predicted).values()))
                
                test_predicted = sda.predict(x_test_minmax)
                y_test = test_y
                isTest = True; #new
                analysis_scr.append((ddi, seq_no, fisher_mode, 'DL', isTest) + tuple(performance_score(y_test, test_predicted).values()))
                
                
                # deep learning using unlabeled data for pretraining
                print 'deep learning with unlabel data'
                pretraining_epochs = cal_epochs(1, pretraining_X_minmax, batch_size = batch_size)
                sda_unlabel = trainSda(x_train_minmax, y_train_minmax,
                             x_validation_minmax, y_validation_minmax , 
                             x_test_minmax, test_y, 
                             pretraining_X_minmax = pretraining_X_minmax,
                             hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
                             training_epochs = training_epochs, pretraining_epochs = pretraining_epochs, 
                             pretrain_lr = pretrain_lr, finetune_lr=finetune_lr
                 )
                print 'hidden_layers_sizes:', hidden_layers_sizes
                print 'corruption_levels:', corruption_levels
                training_predicted = sda_unlabel.predict(x_train_minmax)
                y_train = y_train_minmax
                isTest = False; #new
                analysis_scr.append((ddi, seq_no, fisher_mode, 'DL_U', isTest) + tuple(performance_score(y_train, training_predicted, predicted_score).values()))
                
                test_predicted = sda_unlabel.predict(x_test_minmax)
                y_test = test_y
                
                isTest = True; #new
                analysis_scr.append((ddi, seq_no, fisher_mode, 'DL_U', isTest) + tuple(performance_score(y_test, test_predicted, predicted_score).values()))
            report_name = filename + '_' + '_'.join(map(str, hidden_layers_sizes)) + \
                            '_' + str(pretrain_lr) + '_' + str(finetune_lr) + '_' + str(reduce_ratio)
            self.saveAsCsv(predicted_score, report_name, analysis_scr)

In [15]:
process_one_ddi(ddis[0])


---------------------------------------------------------------------------
IOError                                   Traceback (most recent call last)
<ipython-input-15-463297ca20f0> in <module>()
----> 1 process_one_ddi(ddis[0])

<ipython-input-14-72f550264d43> in process_one_ddi(ddi)
     45 def process_one_ddi(ddi):
     46     """A function to waste CPU cycles"""
---> 47     one_ddi_family = LOO_out_performance_for_one_ddi(ddi)
     48     one_ddi_family.get_LOO_perfermance('FisherM1', '')
     49 def parallel_process(function, ddis, nproc = 2):

<ipython-input-14-72f550264d43> in __init__(self, ddi)
     64         """
     65         def __init__(self, ddi):
---> 66             self.ddi_obj = DDI_family_base(ddi)
     67         def analysis_score(self, target_label, predicted_label): #new
     68             score = (sklearn.metrics.accuracy_score(target_label, predicted_label),

<ipython-input-13-c4c7edead8da> in __init__(self, ddi, Vectors_Fishers_aaIndex_raw_folder)
     14         self.Vectors_Fishers_aaIndex_raw_folder = Vectors_Fishers_aaIndex_raw_folder
     15         self.ddi_folder = self.Vectors_Fishers_aaIndex_raw_folder + ddi + '/'
---> 16         self.total_number_of_sequences = self.get_total_number_of_sequences()
     17         self.raw_data = {}
     18         self.positve_negative_number = {}

<ipython-input-13-c4c7edead8da> in get_total_number_of_sequences(self)
    102         folder_path = self.Vectors_Fishers_aaIndex_raw_folder + self.ddi + '/'
    103         filename = folder_path +'allPairs.txt'
--> 104         all_pairs = np.loadtxt(filename)
    105         return len(all_pairs)
    106 

/usr/local/lib/python2.7/dist-packages/numpy/lib/npyio.pyc in loadtxt(fname, dtype, comments, delimiter, converters, skiprows, usecols, unpack, ndmin)
    732                 fh = iter(bz2.BZ2File(fname))
    733             elif sys.version_info[0] == 2:
--> 734                 fh = iter(open(fname, 'U'))
    735             else:
    736                 fh = iter(open(fname))

IOError: [Errno 2] No such file or directory: '/home/du/Vectors_Fishers_aaIndex_raw/6PGD_int_NAD_binding_2/allPairs.txt'

In [11]:
parallel_process(process_one_ddi, ddis, nproc = 7)


---------------------------------------------------------------------------
IndexError                                Traceback (most recent call last)
<ipython-input-11-b21b401df1e6> in <module>()
      1 
----> 2 parallel_process(process_one_ddi, ddis, nproc = 7)

<ipython-input-10-72f550264d43> in parallel_process(function, ddis, nproc)
     52     parallel_function = results.manage(pprocess.MakeReusable(function))
     53     [parallel_function(ddi) for ddi in ddis]  # Start computing things
---> 54     return results[:]
     55 
     56 class LOO_out_performance_for_one_ddi(object):

/usr/local/lib/python2.7/dist-packages/pprocess.pyc in __getitem__(self, i)
    788                 pass
    789         else:
--> 790             raise IndexError, i
    791 
    792     # Helper methods for the above access methods.

IndexError: slice(0, 9223372036854775807, None)

In [58]:
LOO_out_performance_for_all(ddis)


1
SVM
recall 36.8%
precision 41.0%
accuracy 41.9%
recall 47.4%
precision 55.1%
accuracy 54.4%
direct deep learning



... building the model
... getting the pretraining functions
... pre-training the model
... getting the finetuning functions
... finetunning the model
epoch 1, minibatch 8/8, validation error 52.000000 %
 epoch 1, minibatch 8/8, test error of best model 32.000000 %
epoch 2, minibatch 8/8, validation error 52.000000 %
epoch 3, minibatch 8/8, validation error 52.000000 %
epoch 4, minibatch 8/8, validation error 52.000000 %
epoch 5, minibatch 8/8, validation error 52.000000 %
epoch 6, minibatch 8/8, validation error 50.500000 %
 epoch 6, minibatch 8/8, test error of best model 36.000000 %
epoch 7, minibatch 8/8, validation error 49.000000 %
 epoch 7, minibatch 8/8, test error of best model 39.000000 %
epoch 8, minibatch 8/8, validation error 47.500000 %
 epoch 8, minibatch 8/8, test error of best model 41.000000 %
epoch 9, minibatch 8/8, validation error 49.500000 %
epoch 10, minibatch 8/8, validation error 49.500000 %
hidden_layers_sizes: [100, 100]
corruption_levels: [0, 0]
recall 67.1%
precision 51.5%
accuracy 51.5%
recall 67.6%
precision 48.4%
accuracy 47.8%
deep learning with unlabel data
... building the model
... getting the pretraining functions
... pre-training the model
The pretraining code ran for 0.00m
The pretraining code ran for 0.00m
... getting the finetuning functions
... finetunning the model
epoch 1, minibatch 8/8, validation error 52.000000 %
 epoch 1, minibatch 8/8, test error of best model 32.000000 %
epoch 2, minibatch 8/8, validation error 52.000000 %
epoch 3, minibatch 8/8, validation error 52.000000 %
epoch 4, minibatch 8/8, validation error 52.000000 %
epoch 5, minibatch 8/8, validation error 52.000000 %
epoch 6, minibatch 8/8, validation error 50.500000 %
 epoch 6, minibatch 8/8, test error of best model 36.000000 %
epoch 7, minibatch 8/8, validation error 49.000000 %
 epoch 7, minibatch 8/8, test error of best model 39.000000 %
epoch 8, minibatch 8/8, validation error 47.500000 %
 epoch 8, minibatch 8/8, test error of best model 41.000000 %
epoch 9, minibatch 8/8, validation error 49.500000 %
epoch 10, minibatch 8/8, validation error 49.500000 %
hidden_layers_sizes: [100, 100]
corruption_levels: [0, 0]
recall 67.1%
precision 51.5%
accuracy 51.5%
recall 67.6%
precision 48.4%
accuracy 47.8%
2
SVM
recall 51.5%
precision 50.7%
accuracy 50.8%
recall 65.3%
precision 59.2%
accuracy 60.2%
direct deep learning



... building the model
... getting the pretraining functions
... pre-training the model
The pretraining code ran for 0.00m
... getting the finetuning functions
... finetunning the model
epoch 1, minibatch 10/10, validation error 48.000000 %
 epoch 1, minibatch 10/10, test error of best model 66.000000 %
epoch 2, minibatch 10/10, validation error 48.000000 %
epoch 3, minibatch 10/10, validation error 48.000000 %
epoch 4, minibatch 10/10, validation error 48.000000 %
epoch 5, minibatch 10/10, validation error 48.000000 %
epoch 6, minibatch 10/10, validation error 48.000000 %
epoch 7, minibatch 10/10, validation error 48.000000 %
epoch 8, minibatch 10/10, validation error 48.000000 %
epoch 9, minibatch 10/10, validation error 48.000000 %
epoch 10, minibatch 10/10, validation error 48.000000 %
hidden_layers_sizes: [100, 100]
corruption_levels: [0, 0]
recall 0.0%
precision 0.0%
accuracy 50.0%
recall 0.0%
precision 0.0%
accuracy 49.2%
deep learning with unlabel data
... building the model
... getting the pretraining functions
... pre-training the model
The pretraining code ran for 0.00m
... getting the finetuning functions
... finetunning the model
epoch 1, minibatch 10/10, validation error 48.000000 %
 epoch 1, minibatch 10/10, test error of best model 66.000000 %
epoch 2, minibatch 10/10, validation error 48.000000 %
epoch 3, minibatch 10/10, validation error 48.000000 %
epoch 4, minibatch 10/10, validation error 48.000000 %
epoch 5, minibatch 10/10, validation error 48.000000 %
epoch 6, minibatch 10/10, validation error 48.000000 %
epoch 7, minibatch 10/10, validation error 48.000000 %
epoch 8, minibatch 10/10, validation error 48.000000 %
epoch 9, minibatch 10/10, validation error 48.000000 %
epoch 10, minibatch 10/10, validation error 48.000000 %
hidden_layers_sizes: [100, 100]
corruption_levels: [0, 0]
recall 0.0%
precision 0.0%
accuracy 50.0%
recall 0.0%
precision 0.0%
accuracy 49.2%
3
SVM
recall 63.8%
precision 46.8%
accuracy 45.7%
recall 54.8%
precision 53.8%
accuracy 53.9%
direct deep learning



... building the model
... getting the pretraining functions
... pre-training the model
The pretraining code ran for 0.00m
... getting the finetuning functions
... finetunning the model
epoch 1, minibatch 10/10, validation error 45.500000 %
 epoch 1, minibatch 10/10, test error of best model 42.000000 %
epoch 2, minibatch 10/10, validation error 45.500000 %
epoch 3, minibatch 10/10, validation error 54.500000 %
epoch 4, minibatch 10/10, validation error 54.500000 %
epoch 5, minibatch 10/10, validation error 53.000000 %
epoch 6, minibatch 10/10, validation error 53.000000 %
epoch 7, minibatch 10/10, validation error 52.500000 %
epoch 8, minibatch 10/10, validation error 52.000000 %
epoch 9, minibatch 10/10, validation error 52.500000 %
epoch 10, minibatch 10/10, validation error 51.500000 %
hidden_layers_sizes: [100, 100]
corruption_levels: [0, 0]
recall 39.8%
precision 58.8%
accuracy 56.3%
recall 36.2%
precision 52.5%
accuracy 51.7%
deep learning with unlabel data
... building the model
... getting the pretraining functions
... pre-training the model
The pretraining code ran for 0.00m
... getting the finetuning functions
... finetunning the model
epoch 1, minibatch 10/10, validation error 45.500000 %
 epoch 1, minibatch 10/10, test error of best model 42.000000 %
epoch 2, minibatch 10/10, validation error 45.500000 %
epoch 3, minibatch 10/10, validation error 54.500000 %
epoch 4, minibatch 10/10, validation error 54.500000 %
epoch 5, minibatch 10/10, validation error 53.000000 %
epoch 6, minibatch 10/10, validation error 53.000000 %
epoch 7, minibatch 10/10, validation error 52.500000 %
epoch 8, minibatch 10/10, validation error 52.000000 %
epoch 9, minibatch 10/10, validation error 52.500000 %
epoch 10, minibatch 10/10, validation error 51.500000 %
hidden_layers_sizes: [100, 100]
corruption_levels: [0, 0]
recall 39.8%
precision 58.8%
accuracy 56.3%
recall 36.2%
precision 52.5%
accuracy 51.7%
4
SVM
recall 55.9%
precision 54.1%
accuracy 54.2%
recall 55.7%
precision 57.7%
accuracy 57.4%
direct deep learning



... building the model
... getting the pretraining functions
... pre-training the model
The pretraining code ran for 0.00m
... getting the finetuning functions
... finetunning the model
epoch 1, minibatch 8/8, validation error 55.000000 %
 epoch 1, minibatch 8/8, test error of best model 41.000000 %
epoch 2, minibatch 8/8, validation error 55.000000 %
epoch 3, minibatch 8/8, validation error 55.000000 %
epoch 4, minibatch 8/8, validation error 45.000000 %
 epoch 4, minibatch 8/8, test error of best model 59.000000 %
epoch 5, minibatch 8/8, validation error 45.000000 %
epoch 6, minibatch 8/8, validation error 45.000000 %
epoch 7, minibatch 8/8, validation error 45.500000 %
epoch 8, minibatch 8/8, validation error 45.000000 %
epoch 9, minibatch 8/8, validation error 46.000000 %
epoch 10, minibatch 8/8, validation error 45.000000 %
hidden_layers_sizes: [100, 100]
corruption_levels: [0, 0]
recall 17.0%
precision 67.0%
accuracy 53.1%
recall 15.3%
precision 69.2%
accuracy 54.2%
deep learning with unlabel data
... building the model
... getting the pretraining functions
... pre-training the model
The pretraining code ran for 0.00m
... getting the finetuning functions
... finetunning the model
epoch 1, minibatch 8/8, validation error 55.000000 %
 epoch 1, minibatch 8/8, test error of best model 41.000000 %
epoch 2, minibatch 8/8, validation error 55.000000 %
epoch 3, minibatch 8/8, validation error 55.000000 %
epoch 4, minibatch 8/8, validation error 45.000000 %
 epoch 4, minibatch 8/8, test error of best model 59.000000 %
epoch 5, minibatch 8/8, validation error 45.000000 %
epoch 6, minibatch 8/8, validation error 45.000000 %
epoch 7, minibatch 8/8, validation error 45.500000 %
epoch 8, minibatch 8/8, validation error 45.000000 %
epoch 9, minibatch 8/8, validation error 46.000000 %
epoch 10, minibatch 8/8, validation error 45.000000 %
hidden_layers_sizes: [100, 100]
corruption_levels: [0, 0]
recall 17.0%
precision 67.0%
accuracy 53.1%
recall 15.3%
precision 69.2%
accuracy 54.2%
5
SVM
recall 64.4%
precision 58.5%
accuracy 59.3%
recall 58.7%
precision 55.0%
accuracy 55.3%
direct deep learning



... building the model
... getting the pretraining functions
... pre-training the model
The pretraining code ran for 0.00m
... getting the finetuning functions
... finetunning the model
epoch 1, minibatch 8/8, validation error 52.000000 %
 epoch 1, minibatch 8/8, test error of best model 41.000000 %
epoch 2, minibatch 8/8, validation error 52.000000 %
epoch 3, minibatch 8/8, validation error 48.000000 %
 epoch 3, minibatch 8/8, test error of best model 59.000000 %
epoch 4, minibatch 8/8, validation error 48.000000 %
epoch 5, minibatch 8/8, validation error 48.500000 %
epoch 6, minibatch 8/8, validation error 44.000000 %
 epoch 6, minibatch 8/8, test error of best model 49.000000 %
epoch 7, minibatch 8/8, validation error 42.500000 %
 epoch 7, minibatch 8/8, test error of best model 42.000000 %
epoch 8, minibatch 8/8, validation error 42.500000 %
epoch 9, minibatch 8/8, validation error 43.000000 %
epoch 10, minibatch 8/8, validation error 42.500000 %
hidden_layers_sizes: [100, 100]
corruption_levels: [0, 0]
recall 62.1%
precision 53.8%
accuracy 53.8%
recall 64.4%
precision 55.1%
accuracy 55.9%
deep learning with unlabel data
... building the model
... getting the pretraining functions
... pre-training the model
The pretraining code ran for 0.00m
... getting the finetuning functions
... finetunning the model
epoch 1, minibatch 8/8, validation error 52.000000 %
 epoch 1, minibatch 8/8, test error of best model 41.000000 %
epoch 2, minibatch 8/8, validation error 52.000000 %
epoch 3, minibatch 8/8, validation error 48.000000 %
 epoch 3, minibatch 8/8, test error of best model 59.000000 %
epoch 4, minibatch 8/8, validation error 48.000000 %
epoch 5, minibatch 8/8, validation error 48.500000 %
epoch 6, minibatch 8/8, validation error 44.000000 %
 epoch 6, minibatch 8/8, test error of best model 49.000000 %
epoch 7, minibatch 8/8, validation error 42.500000 %
 epoch 7, minibatch 8/8, test error of best model 42.000000 %
epoch 8, minibatch 8/8, validation error 42.500000 %
epoch 9, minibatch 8/8, validation error 43.000000 %
epoch 10, minibatch 8/8, validation error 42.500000 %
hidden_layers_sizes: [100, 100]
corruption_levels: [0, 0]
recall 62.1%
precision 53.8%
accuracy 53.8%
recall 64.4%
precision 55.1%
accuracy 55.9%
6
SVM
recall 45.6%
precision 49.1%
accuracy 49.1%
recall 52.9%
precision 56.2%
accuracy 55.9%
direct deep learning



... building the model
... getting the pretraining functions
... pre-training the model
The pretraining code ran for 0.00m
... getting the finetuning functions
... finetunning the model
epoch 1, minibatch 8/8, validation error 51.000000 %
 epoch 1, minibatch 8/8, test error of best model 57.000000 %
epoch 2, minibatch 8/8, validation error 51.000000 %
epoch 3, minibatch 8/8, validation error 51.000000 %
epoch 4, minibatch 8/8, validation error 51.000000 %
epoch 5, minibatch 8/8, validation error 51.000000 %
epoch 6, minibatch 8/8, validation error 51.000000 %
epoch 7, minibatch 8/8, validation error 51.000000 %
epoch 8, minibatch 8/8, validation error 51.000000 %
epoch 9, minibatch 8/8, validation error 51.000000 %
epoch 10, minibatch 8/8, validation error 51.000000 %
hidden_layers_sizes: [100, 100]
corruption_levels: [0, 0]
recall 0.0%
precision 0.0%
accuracy 50.6%
recall 0.0%
precision 0.0%
accuracy 50.0%
deep learning with unlabel data
... building the model
... getting the pretraining functions
... pre-training the model
The pretraining code ran for 0.00m
... getting the finetuning functions
... finetunning the model
epoch 1, minibatch 8/8, validation error 51.000000 %
 epoch 1, minibatch 8/8, test error of best model 57.000000 %
epoch 2, minibatch 8/8, validation error 51.000000 %
epoch 3, minibatch 8/8, validation error 51.000000 %
epoch 4, minibatch 8/8, validation error 51.000000 %
epoch 5, minibatch 8/8, validation error 51.000000 %
epoch 6, minibatch 8/8, validation error 51.000000 %
epoch 7, minibatch 8/8, validation error 51.000000 %
epoch 8, minibatch 8/8, validation error 51.000000 %
epoch 9, minibatch 8/8, validation error 51.000000 %
epoch 10, minibatch 8/8, validation error 51.000000 %
hidden_layers_sizes: [100, 100]
corruption_levels: [0, 0]
recall 0.0%
precision 0.0%
accuracy 50.6%
recall 0.0%
precision 0.0%
accuracy 50.0%
7
SVM
recall 66.2%
precision 51.8%
accuracy 52.3%
recall 63.6%
precision 54.9%
accuracy 55.7%
direct deep learning



... building the model
... getting the pretraining functions
... pre-training the model
The pretraining code ran for 0.00m
... getting the finetuning functions
... finetunning the model
epoch 1, minibatch 9/9, validation error 51.500000 %
 epoch 1, minibatch 9/9, test error of best model 35.000000 %
epoch 2, minibatch 9/9, validation error 56.500000 %
epoch 3, minibatch 9/9, validation error 51.500000 %
epoch 4, minibatch 9/9, validation error 51.500000 %
epoch 5, minibatch 9/9, validation error 51.500000 %
epoch 6, minibatch 9/9, validation error 51.500000 %
epoch 7, minibatch 9/9, validation error 51.500000 %
epoch 8, minibatch 9/9, validation error 51.500000 %
epoch 9, minibatch 9/9, validation error 51.500000 %
epoch 10, minibatch 9/9, validation error 51.500000 %
hidden_layers_sizes: [100, 100]
corruption_levels: [0, 0]
recall 100.0%
precision 50.1%
accuracy 50.1%
recall 100.0%
precision 50.0%
accuracy 50.0%
deep learning with unlabel data
... building the model
... getting the pretraining functions
... pre-training the model
The pretraining code ran for 0.00m
... getting the finetuning functions
... finetunning the model
epoch 1, minibatch 9/9, validation error 51.500000 %
 epoch 1, minibatch 9/9, test error of best model 35.000000 %
epoch 2, minibatch 9/9, validation error 56.500000 %
epoch 3, minibatch 9/9, validation error 51.500000 %
epoch 4, minibatch 9/9, validation error 51.500000 %
epoch 5, minibatch 9/9, validation error 51.500000 %
epoch 6, minibatch 9/9, validation error 51.500000 %
epoch 7, minibatch 9/9, validation error 51.500000 %
epoch 8, minibatch 9/9, validation error 51.500000 %
epoch 9, minibatch 9/9, validation error 51.500000 %
epoch 10, minibatch 9/9, validation error 51.500000 %
hidden_layers_sizes: [100, 100]
corruption_levels: [0, 0]
recall 100.0%
precision 50.1%
accuracy 50.1%
recall 100.0%
precision 50.0%
accuracy 50.0%
8
SVM
recall 58.7%
precision 50.7%
accuracy 50.8%
recall 52.8%
precision 55.7%
accuracy 55.4%
direct deep learning



... building the model
... getting the pretraining functions
... pre-training the model
The pretraining code ran for 0.00m
... getting the finetuning functions
... finetunning the model
epoch 1, minibatch 8/8, validation error 49.000000 %
 epoch 1, minibatch 8/8, test error of best model 63.000000 %
epoch 2, minibatch 8/8, validation error 49.000000 %
epoch 3, minibatch 8/8, validation error 49.000000 %
epoch 4, minibatch 8/8, validation error 51.000000 %
epoch 5, minibatch 8/8, validation error 51.000000 %
epoch 6, minibatch 8/8, validation error 51.000000 %
epoch 7, minibatch 8/8, validation error 51.000000 %
epoch 8, minibatch 8/8, validation error 50.500000 %
epoch 9, minibatch 8/8, validation error 51.000000 %
epoch 10, minibatch 8/8, validation error 48.500000 %
 epoch 10, minibatch 8/8, test error of best model 38.000000 %
hidden_layers_sizes: [100, 100]
corruption_levels: [0, 0]
recall 94.5%
precision 50.6%
accuracy 50.8%
recall 96.8%
precision 50.4%
accuracy 50.8%
deep learning with unlabel data
... building the model
... getting the pretraining functions
... pre-training the model
The pretraining code ran for 0.00m
... getting the finetuning functions
... finetunning the model
epoch 1, minibatch 8/8, validation error 49.000000 %
 epoch 1, minibatch 8/8, test error of best model 63.000000 %
epoch 2, minibatch 8/8, validation error 49.000000 %
epoch 3, minibatch 8/8, validation error 49.000000 %
epoch 4, minibatch 8/8, validation error 51.000000 %
epoch 5, minibatch 8/8, validation error 51.000000 %
epoch 6, minibatch 8/8, validation error 51.000000 %
epoch 7, minibatch 8/8, validation error 51.000000 %
epoch 8, minibatch 8/8, validation error 50.500000 %
epoch 9, minibatch 8/8, validation error 51.000000 %
epoch 10, minibatch 8/8, validation error 48.500000 %
 epoch 10, minibatch 8/8, test error of best model 38.000000 %
hidden_layers_sizes: [100, 100]
corruption_levels: [0, 0]
recall 94.5%
precision 50.6%
accuracy 50.8%
recall 96.8%
precision 50.4%
accuracy 50.8%
9
SVM
recall 0.0%
precision 0.0%
accuracy 50.0%
recall 64.3%
precision 57.4%
accuracy 58.3%
direct deep learning



... building the model
... getting the pretraining functions
... pre-training the model
The pretraining code ran for 0.00m
... getting the finetuning functions
... finetunning the model
epoch 1, minibatch 8/8, validation error 52.500000 %
 epoch 1, minibatch 8/8, test error of best model nan %
epoch 2, minibatch 8/8, validation error 52.500000 %
epoch 3, minibatch 8/8, validation error 47.500000 %
 epoch 3, minibatch 8/8, test error of best model nan %
epoch 4, minibatch 8/8, validation error 47.500000 %
epoch 5, minibatch 8/8, validation error 47.500000 %
epoch 6, minibatch 8/8, validation error 35.500000 %
 epoch 6, minibatch 8/8, test error of best model nan %
epoch 7, minibatch 8/8, validation error 47.500000 %
epoch 8, minibatch 8/8, validation error 47.000000 %
epoch 9, minibatch 8/8, validation error 49.000000 %
epoch 10, minibatch 8/8, validation error 50.000000 %
hidden_layers_sizes: [100, 100]
corruption_levels: [0, 0]
recall 92.4%
precision 50.5%
accuracy 50.7%
recall 22.2%
precision 50.0%
accuracy 50.0%
deep learning with unlabel data
... building the model
... getting the pretraining functions
... pre-training the model
The pretraining code ran for 0.00m
... getting the finetuning functions
... finetunning the model
epoch 1, minibatch 8/8, validation error 52.500000 %
 epoch 1, minibatch 8/8, test error of best model nan %
epoch 2, minibatch 8/8, validation error 52.500000 %
epoch 3, minibatch 8/8, validation error 47.500000 %
 epoch 3, minibatch 8/8, test error of best model nan %
epoch 4, minibatch 8/8, validation error 47.500000 %
epoch 5, minibatch 8/8, validation error 47.500000 %
epoch 6, minibatch 8/8, validation error 35.500000 %
 epoch 6, minibatch 8/8, test error of best model nan %
epoch 7, minibatch 8/8, validation error 47.500000 %
epoch 8, minibatch 8/8, validation error 47.000000 %
epoch 9, minibatch 8/8, validation error 49.000000 %
epoch 10, minibatch 8/8, validation error 50.000000 %
hidden_layers_sizes: [100, 100]
corruption_levels: [0, 0]
recall 92.4%
precision 50.5%
accuracy 50.7%
recall 22.2%
precision 50.0%
accuracy 50.0%
10
SVM
recall 66.1%
precision 50.7%
accuracy 50.9%
recall 62.2%
precision 55.9%
accuracy 56.5%
direct deep learning



... building the model
... getting the pretraining functions
... pre-training the model
The pretraining code ran for 0.00m
... getting the finetuning functions
... finetunning the model
epoch 1, minibatch 9/9, validation error 53.000000 %
 epoch 1, minibatch 9/9, test error of best model 56.000000 %
epoch 2, minibatch 9/9, validation error 47.000000 %
 epoch 2, minibatch 9/9, test error of best model 44.000000 %
epoch 3, minibatch 9/9, validation error 53.000000 %
epoch 4, minibatch 9/9, validation error 47.000000 %
epoch 5, minibatch 9/9, validation error 52.000000 %
epoch 6, minibatch 9/9, validation error 50.500000 %
epoch 7, minibatch 9/9, validation error 52.500000 %
epoch 8, minibatch 9/9, validation error 52.500000 %
epoch 9, minibatch 9/9, validation error 52.500000 %
epoch 10, minibatch 9/9, validation error 52.000000 %
hidden_layers_sizes: [100, 100]
corruption_levels: [0, 0]
recall 0.8%
precision 57.1%
accuracy 50.2%
recall 0.0%
precision 0.0%
accuracy 50.0%
deep learning with unlabel data
... building the model
... getting the pretraining functions
... pre-training the model
The pretraining code ran for 0.00m
... getting the finetuning functions
... finetunning the model
epoch 1, minibatch 9/9, validation error 53.000000 %
 epoch 1, minibatch 9/9, test error of best model 56.000000 %
epoch 2, minibatch 9/9, validation error 47.000000 %
 epoch 2, minibatch 9/9, test error of best model 44.000000 %
epoch 3, minibatch 9/9, validation error 53.000000 %
epoch 4, minibatch 9/9, validation error 47.000000 %
epoch 5, minibatch 9/9, validation error 52.000000 %
epoch 6, minibatch 9/9, validation error 50.500000 %
epoch 7, minibatch 9/9, validation error 52.500000 %
epoch 8, minibatch 9/9, validation error 52.500000 %
epoch 9, minibatch 9/9, validation error 52.500000 %
epoch 10, minibatch 9/9, validation error 52.000000 %
hidden_layers_sizes: [100, 100]
corruption_levels: [0, 0]
recall 0.8%
precision 57.1%
accuracy 50.2%
recall 0.0%
precision 0.0%
accuracy 50.0%
11
SVM
recall 59.3%
precision 53.0%
accuracy 53.4%
recall 56.2%
precision 55.6%
accuracy 55.6%
direct deep learning



... building the model
... getting the pretraining functions
... pre-training the model
The pretraining code ran for 0.00m
... getting the finetuning functions
... finetunning the model
epoch 1, minibatch 6/6, validation error 52.000000 %
 epoch 1, minibatch 6/6, test error of best model 41.000000 %
epoch 2, minibatch 6/6, validation error 52.000000 %
epoch 3, minibatch 6/6, validation error 52.000000 %
epoch 4, minibatch 6/6, validation error 52.000000 %
epoch 5, minibatch 6/6, validation error 52.000000 %
epoch 6, minibatch 6/6, validation error 56.000000 %
epoch 7, minibatch 6/6, validation error 55.000000 %
epoch 8, minibatch 6/6, validation error 55.000000 %
epoch 9, minibatch 6/6, validation error 55.000000 %
epoch 10, minibatch 6/6, validation error 56.000000 %
hidden_layers_sizes: [100, 100]
corruption_levels: [0, 0]
recall 61.6%
precision 54.0%
accuracy 54.7%
recall 55.9%
precision 49.3%
accuracy 49.2%
deep learning with unlabel data
... building the model
... getting the pretraining functions
... pre-training the model
The pretraining code ran for 0.00m
... getting the finetuning functions
... finetunning the model
epoch 1, minibatch 6/6, validation error 52.000000 %
 epoch 1, minibatch 6/6, test error of best model 41.000000 %
epoch 2, minibatch 6/6, validation error 52.000000 %
epoch 3, minibatch 6/6, validation error 52.000000 %
epoch 4, minibatch 6/6, validation error 52.000000 %
epoch 5, minibatch 6/6, validation error 52.000000 %
epoch 6, minibatch 6/6, validation error 56.000000 %
epoch 7, minibatch 6/6, validation error 55.000000 %
epoch 8, minibatch 6/6, validation error 55.000000 %
epoch 9, minibatch 6/6, validation error 55.000000 %
epoch 10, minibatch 6/6, validation error 56.000000 %
hidden_layers_sizes: [100, 100]
corruption_levels: [0, 0]
recall 61.6%
precision 54.0%
accuracy 54.7%
recall 55.9%
precision 49.3%
accuracy 49.2%
12
SVM
recall 68.2%
precision 55.6%
accuracy 56.8%
recall 63.1%
precision 58.1%
accuracy 58.8%
direct deep learning



... building the model
... getting the pretraining functions
... pre-training the model
The pretraining code ran for 0.00m
... getting the finetuning functions
... finetunning the model
epoch 1, minibatch 7/7, validation error 47.000000 %
 epoch 1, minibatch 7/7, test error of best model 66.000000 %
epoch 2, minibatch 7/7, validation error 53.000000 %
epoch 3, minibatch 7/7, validation error 47.000000 %
epoch 4, minibatch 7/7, validation error 47.000000 %
epoch 5, minibatch 7/7, validation error 47.000000 %
epoch 6, minibatch 7/7, validation error 47.000000 %
epoch 7, minibatch 7/7, validation error 47.000000 %
epoch 8, minibatch 7/7, validation error 48.000000 %
epoch 9, minibatch 7/7, validation error 43.000000 %
 epoch 9, minibatch 7/7, test error of best model 58.000000 %
epoch 10, minibatch 7/7, validation error 41.000000 %
 epoch 10, minibatch 7/7, test error of best model 48.000000 %
hidden_layers_sizes: [100, 100]
corruption_levels: [0, 0]
recall 42.9%
precision 57.6%
accuracy 55.3%
recall 42.4%
precision 56.0%
accuracy 54.5%
deep learning with unlabel data
... building the model
... getting the pretraining functions
... pre-training the model
The pretraining code ran for 0.00m
... getting the finetuning functions
... finetunning the model
epoch 1, minibatch 7/7, validation error 47.000000 %
 epoch 1, minibatch 7/7, test error of best model 66.000000 %
epoch 2, minibatch 7/7, validation error 53.000000 %
epoch 3, minibatch 7/7, validation error 47.000000 %
epoch 4, minibatch 7/7, validation error 47.000000 %
epoch 5, minibatch 7/7, validation error 47.000000 %
epoch 6, minibatch 7/7, validation error 47.000000 %
epoch 7, minibatch 7/7, validation error 47.000000 %
epoch 8, minibatch 7/7, validation error 48.000000 %
epoch 9, minibatch 7/7, validation error 43.000000 %
 epoch 9, minibatch 7/7, test error of best model 58.000000 %
epoch 10, minibatch 7/7, validation error 41.000000 %
 epoch 10, minibatch 7/7, test error of best model 48.000000 %
hidden_layers_sizes: [100, 100]
corruption_levels: [0, 0]
recall 42.9%
precision 57.6%
accuracy 55.3%
recall 42.4%
precision 56.0%
accuracy 54.5%
13
SVM
recall 0.0%
precision 0.0%
accuracy 50.0%
recall 60.6%
precision 58.7%
accuracy 59.0%
direct deep learning



... building the model
... getting the pretraining functions
... pre-training the model
The pretraining code ran for 0.00m
... getting the finetuning functions
... finetunning the model
epoch 1, minibatch 8/8, validation error 48.000000 %
 epoch 1, minibatch 8/8, test error of best model nan %
epoch 2, minibatch 8/8, validation error 48.000000 %
epoch 3, minibatch 8/8, validation error 48.000000 %
epoch 4, minibatch 8/8, validation error 48.000000 %
epoch 5, minibatch 8/8, validation error 48.000000 %
epoch 6, minibatch 8/8, validation error 48.000000 %
epoch 7, minibatch 8/8, validation error 48.000000 %
epoch 8, minibatch 8/8, validation error 48.000000 %
epoch 9, minibatch 8/8, validation error 48.000000 %
epoch 10, minibatch 8/8, validation error 48.000000 %
hidden_layers_sizes: [100, 100]
corruption_levels: [0, 0]
recall 100.0%
precision 49.3%
accuracy 49.3%
recall 100.0%
precision 50.0%
accuracy 50.0%
deep learning with unlabel data
... building the model
... getting the pretraining functions
... pre-training the model
The pretraining code ran for 0.00m
... getting the finetuning functions
... finetunning the model
epoch 1, minibatch 8/8, validation error 48.000000 %
 epoch 1, minibatch 8/8, test error of best model nan %
epoch 2, minibatch 8/8, validation error 48.000000 %
epoch 3, minibatch 8/8, validation error 48.000000 %
epoch 4, minibatch 8/8, validation error 48.000000 %
epoch 5, minibatch 8/8, validation error 48.000000 %
epoch 6, minibatch 8/8, validation error 48.000000 %
epoch 7, minibatch 8/8, validation error 48.000000 %
epoch 8, minibatch 8/8, validation error 48.000000 %
epoch 9, minibatch 8/8, validation error 48.000000 %
epoch 10, minibatch 8/8, validation error 48.000000 %
hidden_layers_sizes: [100, 100]
corruption_levels: [0, 0]
recall 100.0%
precision 49.3%
accuracy 49.3%
recall 100.0%
precision 50.0%
accuracy 50.0%
14
SVM
recall 54.4%
precision 55.4%
accuracy 55.3%
recall 55.4%
precision 57.2%
accuracy 57.0%
direct deep learning



... building the model
... getting the pretraining functions
... pre-training the model
The pretraining code ran for 0.00m
... getting the finetuning functions
... finetunning the model
epoch 1, minibatch 6/6, validation error 62.000000 %
 epoch 1, minibatch 6/6, test error of best model 43.000000 %
epoch 2, minibatch 6/6, validation error 62.000000 %
epoch 3, minibatch 6/6, validation error 62.000000 %
epoch 4, minibatch 6/6, validation error 38.000000 %
 epoch 4, minibatch 6/6, test error of best model 57.000000 %
epoch 5, minibatch 6/6, validation error 38.000000 %
epoch 6, minibatch 6/6, validation error 38.000000 %
epoch 7, minibatch 6/6, validation error 38.000000 %
epoch 8, minibatch 6/6, validation error 38.000000 %
epoch 9, minibatch 6/6, validation error 38.000000 %
epoch 10, minibatch 6/6, validation error 38.000000 %
hidden_layers_sizes: [100, 100]
corruption_levels: [0, 0]
recall 0.0%
precision 0.0%
accuracy 48.0%
recall 0.0%
precision 0.0%
accuracy 50.0%
deep learning with unlabel data
... building the model
... getting the pretraining functions
... pre-training the model
The pretraining code ran for 0.00m
... getting the finetuning functions
... finetunning the model
epoch 1, minibatch 6/6, validation error 62.000000 %
 epoch 1, minibatch 6/6, test error of best model 43.000000 %
epoch 2, minibatch 6/6, validation error 62.000000 %
epoch 3, minibatch 6/6, validation error 62.000000 %
epoch 4, minibatch 6/6, validation error 38.000000 %
 epoch 4, minibatch 6/6, test error of best model 57.000000 %
epoch 5, minibatch 6/6, validation error 38.000000 %
epoch 6, minibatch 6/6, validation error 38.000000 %
epoch 7, minibatch 6/6, validation error 38.000000 %
epoch 8, minibatch 6/6, validation error 38.000000 %
epoch 9, minibatch 6/6, validation error 38.000000 %
epoch 10, minibatch 6/6, validation error 38.000000 %
hidden_layers_sizes: [100, 100]
corruption_levels: [0, 0]
recall 0.0%
precision 0.0%
accuracy 48.0%
recall 0.0%
precision 0.0%
accuracy 50.0%
15
SVM
recall 55.4%
precision 50.8%
accuracy 50.9%
recall 54.6%
precision 55.5%
accuracy 55.4%
direct deep learning



... building the model
... getting the pretraining functions
... pre-training the model
The pretraining code ran for 0.00m
... getting the finetuning functions
... finetunning the model
epoch 1, minibatch 8/8, validation error 53.500000 %
 epoch 1, minibatch 8/8, test error of best model 56.000000 %
epoch 2, minibatch 8/8, validation error 53.500000 %
epoch 3, minibatch 8/8, validation error 53.500000 %
epoch 4, minibatch 8/8, validation error 53.500000 %
epoch 5, minibatch 8/8, validation error 53.500000 %
epoch 6, minibatch 8/8, validation error 53.500000 %
epoch 7, minibatch 8/8, validation error 54.000000 %
epoch 8, minibatch 8/8, validation error 54.000000 %
epoch 9, minibatch 8/8, validation error 54.500000 %
epoch 10, minibatch 8/8, validation error 54.000000 %
hidden_layers_sizes: [100, 100]
corruption_levels: [0, 0]
recall 1.9%
precision 53.3%
accuracy 50.5%
recall 3.6%
precision 28.6%
accuracy 47.3%
deep learning with unlabel data
... building the model
... getting the pretraining functions
... pre-training the model
The pretraining code ran for 0.00m
... getting the finetuning functions
... finetunning the model
epoch 1, minibatch 8/8, validation error 53.500000 %
 epoch 1, minibatch 8/8, test error of best model 56.000000 %
epoch 2, minibatch 8/8, validation error 53.500000 %
epoch 3, minibatch 8/8, validation error 53.500000 %
epoch 4, minibatch 8/8, validation error 53.500000 %
epoch 5, minibatch 8/8, validation error 53.500000 %
epoch 6, minibatch 8/8, validation error 53.500000 %
epoch 7, minibatch 8/8, validation error 54.000000 %
epoch 8, minibatch 8/8, validation error 54.000000 %
epoch 9, minibatch 8/8, validation error 54.500000 %
epoch 10, minibatch 8/8, validation error 54.000000 %
hidden_layers_sizes: [100, 100]
corruption_levels: [0, 0]
recall 1.9%
precision 53.3%
accuracy 50.5%
recall 3.6%
precision 28.6%
accuracy 47.3%
16
SVM
recall 53.7%
precision 51.4%
accuracy 51.5%
recall 53.7%
precision 55.0%
accuracy 54.9%
direct deep learning



... building the model
... getting the pretraining functions
... pre-training the model
The pretraining code ran for 0.00m
... getting the finetuning functions
... finetunning the model
epoch 1, minibatch 6/6, validation error 42.000000 %
 epoch 1, minibatch 6/6, test error of best model 33.000000 %
epoch 2, minibatch 6/6, validation error 42.000000 %
epoch 3, minibatch 6/6, validation error 42.000000 %
epoch 4, minibatch 6/6, validation error 42.000000 %
epoch 5, minibatch 6/6, validation error 42.000000 %
epoch 6, minibatch 6/6, validation error 42.000000 %
epoch 7, minibatch 6/6, validation error 42.000000 %
epoch 8, minibatch 6/6, validation error 42.000000 %
epoch 9, minibatch 6/6, validation error 42.000000 %
epoch 10, minibatch 6/6, validation error 42.000000 %
hidden_layers_sizes: [100, 100]
corruption_levels: [0, 0]
recall 100.0%
precision 49.0%
accuracy 49.0%
recall 100.0%
precision 50.0%
accuracy 50.0%
deep learning with unlabel data
... building the model
... getting the pretraining functions
... pre-training the model
The pretraining code ran for 0.00m
... getting the finetuning functions
... finetunning the model
epoch 1, minibatch 6/6, validation error 42.000000 %
 epoch 1, minibatch 6/6, test error of best model 33.000000 %
epoch 2, minibatch 6/6, validation error 42.000000 %
epoch 3, minibatch 6/6, validation error 42.000000 %
epoch 4, minibatch 6/6, validation error 42.000000 %
epoch 5, minibatch 6/6, validation error 42.000000 %
epoch 6, minibatch 6/6, validation error 42.000000 %
epoch 7, minibatch 6/6, validation error 42.000000 %
epoch 8, minibatch 6/6, validation error 42.000000 %
epoch 9, minibatch 6/6, validation error 42.000000 %
epoch 10, minibatch 6/6, validation error 42.000000 %
hidden_layers_sizes: [100, 100]
corruption_levels: [0, 0]
recall 100.0%
precision 49.0%
accuracy 49.0%
recall 100.0%
precision 50.0%
accuracy 50.0%
17
SVM
recall 50.0%
precision 66.7%
accuracy 62.5%
recall 55.9%
precision 56.4%
accuracy 56.4%
direct deep learning



... building the model
... getting the pretraining functions
... pre-training the model
The pretraining code ran for 0.00m
... getting the finetuning functions
... finetunning the model
epoch 1, minibatch 7/7, validation error 51.000000 %
 epoch 1, minibatch 7/7, test error of best model nan %
epoch 2, minibatch 7/7, validation error 49.000000 %
 epoch 2, minibatch 7/7, test error of best model nan %
epoch 3, minibatch 7/7, validation error 51.000000 %
epoch 4, minibatch 7/7, validation error 51.000000 %
epoch 5, minibatch 7/7, validation error 51.000000 %
epoch 6, minibatch 7/7, validation error 51.000000 %
epoch 7, minibatch 7/7, validation error 51.000000 %
epoch 8, minibatch 7/7, validation error 51.000000 %
epoch 9, minibatch 7/7, validation error 51.000000 %
epoch 10, minibatch 7/7, validation error 51.000000 %
hidden_layers_sizes: [100, 100]
corruption_levels: [0, 0]
recall 100.0%
precision 50.5%
accuracy 50.5%
recall 100.0%
precision 50.0%
accuracy 50.0%
deep learning with unlabel data
... building the model
... getting the pretraining functions
... pre-training the model
The pretraining code ran for 0.00m
... getting the finetuning functions
... finetunning the model
epoch 1, minibatch 7/7, validation error 51.000000 %
 epoch 1, minibatch 7/7, test error of best model nan %
epoch 2, minibatch 7/7, validation error 49.000000 %
 epoch 2, minibatch 7/7, test error of best model nan %
epoch 3, minibatch 7/7, validation error 51.000000 %
epoch 4, minibatch 7/7, validation error 51.000000 %
epoch 5, minibatch 7/7, validation error 51.000000 %
epoch 6, minibatch 7/7, validation error 51.000000 %
epoch 7, minibatch 7/7, validation error 51.000000 %
epoch 8, minibatch 7/7, validation error 51.000000 %
epoch 9, minibatch 7/7, validation error 51.000000 %
epoch 10, minibatch 7/7, validation error 51.000000 %
hidden_layers_sizes: [100, 100]
corruption_levels: [0, 0]
recall 100.0%
precision 50.5%
accuracy 50.5%
recall 100.0%
precision 50.0%
accuracy 50.0%
18
SVM
recall 68.4%
precision 52.7%
accuracy 53.5%
recall 62.7%
precision 57.4%
accuracy 58.0%
direct deep learning



... building the model
... getting the pretraining functions
... pre-training the model
The pretraining code ran for 0.00m
... getting the finetuning functions
... finetunning the model
epoch 1, minibatch 10/10, validation error 48.000000 %
 epoch 1, minibatch 10/10, test error of best model 57.000000 %
epoch 2, minibatch 10/10, validation error 48.000000 %
epoch 3, minibatch 10/10, validation error 48.000000 %
epoch 4, minibatch 10/10, validation error 48.000000 %
epoch 5, minibatch 10/10, validation error 48.000000 %
epoch 6, minibatch 10/10, validation error 48.000000 %
epoch 7, minibatch 10/10, validation error 48.000000 %
epoch 8, minibatch 10/10, validation error 48.500000 %
epoch 9, minibatch 10/10, validation error 48.500000 %
epoch 10, minibatch 10/10, validation error 49.000000 %
hidden_layers_sizes: [100, 100]
corruption_levels: [0, 0]
recall 3.1%
precision 94.1%
accuracy 51.0%
recall 1.8%
precision 100.0%
accuracy 50.9%
deep learning with unlabel data
... building the model
... getting the pretraining functions
... pre-training the model
The pretraining code ran for 0.00m
... getting the finetuning functions
... finetunning the model
epoch 1, minibatch 10/10, validation error 48.000000 %
 epoch 1, minibatch 10/10, test error of best model 57.000000 %
epoch 2, minibatch 10/10, validation error 48.000000 %
epoch 3, minibatch 10/10, validation error 48.000000 %
epoch 4, minibatch 10/10, validation error 48.000000 %
epoch 5, minibatch 10/10, validation error 48.000000 %
epoch 6, minibatch 10/10, validation error 48.000000 %
epoch 7, minibatch 10/10, validation error 48.000000 %
epoch 8, minibatch 10/10, validation error 48.500000 %
epoch 9, minibatch 10/10, validation error 48.500000 %
epoch 10, minibatch 10/10, validation error 49.000000 %
hidden_layers_sizes: [100, 100]
corruption_levels: [0, 0]
recall 3.1%
precision 94.1%
accuracy 51.0%
recall 1.8%
precision 100.0%
accuracy 50.9%
19
SVM
recall 72.1%
precision 61.1%
accuracy 63.1%
recall 63.1%
precision 56.5%
accuracy 57.3%
direct deep learning



... building the model
... getting the pretraining functions
... pre-training the model
The pretraining code ran for 0.00m
... getting the finetuning functions
... finetunning the model
epoch 1, minibatch 7/7, validation error 54.000000 %
 epoch 1, minibatch 7/7, test error of best model 61.000000 %
epoch 2, minibatch 7/7, validation error 46.000000 %
 epoch 2, minibatch 7/7, test error of best model 39.000000 %
epoch 3, minibatch 7/7, validation error 54.000000 %
epoch 4, minibatch 7/7, validation error 43.000000 %
 epoch 4, minibatch 7/7, test error of best model 44.000000 %
epoch 5, minibatch 7/7, validation error 54.000000 %
epoch 6, minibatch 7/7, validation error 33.000000 %
 epoch 6, minibatch 7/7, test error of best model 45.000000 %
epoch 7, minibatch 7/7, validation error 26.000000 %
 epoch 7, minibatch 7/7, test error of best model 40.000000 %
epoch 8, minibatch 7/7, validation error 29.000000 %
epoch 9, minibatch 7/7, validation error 36.000000 %
epoch 10, minibatch 7/7, validation error 41.000000 %
hidden_layers_sizes: [100, 100]
corruption_levels: [0, 0]
recall 76.6%
precision 51.0%
accuracy 51.8%
recall 75.4%
precision 50.5%
accuracy 50.8%
deep learning with unlabel data
... building the model
... getting the pretraining functions
... pre-training the model
The pretraining code ran for 0.00m
... getting the finetuning functions
... finetunning the model
epoch 1, minibatch 7/7, validation error 54.000000 %
 epoch 1, minibatch 7/7, test error of best model 61.000000 %
epoch 2, minibatch 7/7, validation error 46.000000 %
 epoch 2, minibatch 7/7, test error of best model 39.000000 %
epoch 3, minibatch 7/7, validation error 54.000000 %
epoch 4, minibatch 7/7, validation error 43.000000 %
 epoch 4, minibatch 7/7, test error of best model 44.000000 %
epoch 5, minibatch 7/7, validation error 54.000000 %
epoch 6, minibatch 7/7, validation error 33.000000 %
 epoch 6, minibatch 7/7, test error of best model 45.000000 %
epoch 7, minibatch 7/7, validation error 26.000000 %
 epoch 7, minibatch 7/7, test error of best model 40.000000 %
epoch 8, minibatch 7/7, validation error 29.000000 %
epoch 9, minibatch 7/7, validation error 36.000000 %
epoch 10, minibatch 7/7, validation error 41.000000 %
hidden_layers_sizes: [100, 100]
corruption_levels: [0, 0]
recall 76.6%
precision 51.0%
accuracy 51.8%
recall 75.4%
precision 50.5%
accuracy 50.8%
20
SVM
recall 73.8%
precision 54.2%
accuracy 55.7%
recall 65.4%
precision 57.4%
accuracy 58.5%
direct deep learning



... building the model
... getting the pretraining functions
... pre-training the model
The pretraining code ran for 0.00m
... getting the finetuning functions
... finetunning the model
epoch 1, minibatch 6/6, validation error 56.000000 %
 epoch 1, minibatch 6/6, test error of best model 61.000000 %
epoch 2, minibatch 6/6, validation error 56.000000 %
epoch 3, minibatch 6/6, validation error 56.000000 %
epoch 4, minibatch 6/6, validation error 56.000000 %
epoch 5, minibatch 6/6, validation error 56.000000 %
epoch 6, minibatch 6/6, validation error 56.000000 %
epoch 7, minibatch 6/6, validation error 55.000000 %
 epoch 7, minibatch 6/6, test error of best model 61.000000 %
epoch 8, minibatch 6/6, validation error 54.000000 %
 epoch 8, minibatch 6/6, test error of best model 61.000000 %
epoch 9, minibatch 6/6, validation error 52.000000 %
 epoch 9, minibatch 6/6, test error of best model 58.000000 %
epoch 10, minibatch 6/6, validation error 52.000000 %
hidden_layers_sizes: [100, 100]
corruption_levels: [0, 0]
recall 23.9%
precision 48.7%
accuracy 51.2%
recall 16.4%
precision 50.0%
accuracy 50.0%
deep learning with unlabel data
... building the model
... getting the pretraining functions
... pre-training the model
The pretraining code ran for 0.00m
... getting the finetuning functions
... finetunning the model
epoch 1, minibatch 6/6, validation error 56.000000 %
 epoch 1, minibatch 6/6, test error of best model 61.000000 %
epoch 2, minibatch 6/6, validation error 56.000000 %
epoch 3, minibatch 6/6, validation error 56.000000 %
epoch 4, minibatch 6/6, validation error 56.000000 %
epoch 5, minibatch 6/6, validation error 56.000000 %
epoch 6, minibatch 6/6, validation error 56.000000 %
epoch 7, minibatch 6/6, validation error 55.000000 %
 epoch 7, minibatch 6/6, test error of best model 61.000000 %
epoch 8, minibatch 6/6, validation error 54.000000 %
 epoch 8, minibatch 6/6, test error of best model 61.000000 %
epoch 9, minibatch 6/6, validation error 52.000000 %
 epoch 9, minibatch 6/6, test error of best model 58.000000 %
epoch 10, minibatch 6/6, validation error 52.000000 %
hidden_layers_sizes: [100, 100]
corruption_levels: [0, 0]
recall 23.9%
precision 48.7%
accuracy 51.2%
recall 16.4%
precision 50.0%
accuracy 50.0%
21
SVM
recall 28.6%
precision 66.7%
accuracy 57.1%
recall 61.0%
precision 58.6%
accuracy 59.0%
direct deep learning



... building the model
... getting the pretraining functions
... pre-training the model
The pretraining code ran for 0.00m
... getting the finetuning functions
... finetunning the model
epoch 1, minibatch 8/8, validation error 47.000000 %
 epoch 1, minibatch 8/8, test error of best model nan %
epoch 2, minibatch 8/8, validation error 47.000000 %
epoch 3, minibatch 8/8, validation error 53.000000 %
epoch 4, minibatch 8/8, validation error 53.000000 %
epoch 5, minibatch 8/8, validation error 53.000000 %
epoch 6, minibatch 8/8, validation error 51.500000 %
epoch 7, minibatch 8/8, validation error 46.500000 %
 epoch 7, minibatch 8/8, test error of best model nan %
epoch 8, minibatch 8/8, validation error 43.000000 %
 epoch 8, minibatch 8/8, test error of best model nan %
epoch 9, minibatch 8/8, validation error 47.000000 %
epoch 10, minibatch 8/8, validation error 48.000000 %
hidden_layers_sizes: [100, 100]
corruption_levels: [0, 0]
recall 59.9%
precision 54.4%
accuracy 54.1%
recall 0.0%
precision 0.0%
accuracy 50.0%
deep learning with unlabel data
... building the model
... getting the pretraining functions
... pre-training the model
The pretraining code ran for 0.00m
... getting the finetuning functions
... finetunning the model
epoch 1, minibatch 8/8, validation error 47.000000 %
 epoch 1, minibatch 8/8, test error of best model nan %
epoch 2, minibatch 8/8, validation error 47.000000 %
epoch 3, minibatch 8/8, validation error 53.000000 %
epoch 4, minibatch 8/8, validation error 53.000000 %
epoch 5, minibatch 8/8, validation error 53.000000 %
epoch 6, minibatch 8/8, validation error 51.500000 %
epoch 7, minibatch 8/8, validation error 46.500000 %
 epoch 7, minibatch 8/8, test error of best model nan %
epoch 8, minibatch 8/8, validation error 43.000000 %
 epoch 8, minibatch 8/8, test error of best model nan %
epoch 9, minibatch 8/8, validation error 47.000000 %
epoch 10, minibatch 8/8, validation error 48.000000 %
hidden_layers_sizes: [100, 100]
corruption_levels: [0, 0]
recall 59.9%
precision 54.4%
accuracy 54.1%
recall 0.0%
precision 0.0%
accuracy 50.0%
22
SVM
recall 0.0%
precision 0.0%
accuracy 41.7%
recall 54.2%
precision 55.1%
accuracy 55.1%
direct deep learning



... building the model
... getting the pretraining functions
... pre-training the model
The pretraining code ran for 0.00m
... getting the finetuning functions
... finetunning the model
epoch 1, minibatch 8/8, validation error 51.000000 %
 epoch 1, minibatch 8/8, test error of best model nan %
epoch 2, minibatch 8/8, validation error 51.000000 %
epoch 3, minibatch 8/8, validation error 49.000000 %
 epoch 3, minibatch 8/8, test error of best model nan %
epoch 4, minibatch 8/8, validation error 49.000000 %
epoch 5, minibatch 8/8, validation error 49.000000 %
epoch 6, minibatch 8/8, validation error 49.000000 %
epoch 7, minibatch 8/8, validation error 49.000000 %
epoch 8, minibatch 8/8, validation error 49.000000 %
epoch 9, minibatch 8/8, validation error 49.000000 %
epoch 10, minibatch 8/8, validation error 49.000000 %
hidden_layers_sizes: [100, 100]
corruption_levels: [0, 0]
recall 100.0%
precision 49.4%
accuracy 49.4%
recall 100.0%
precision 50.0%
accuracy 50.0%
deep learning with unlabel data
... building the model
... getting the pretraining functions
... pre-training the model
The pretraining code ran for 0.00m
... getting the finetuning functions
... finetunning the model
epoch 1, minibatch 8/8, validation error 51.000000 %
 epoch 1, minibatch 8/8, test error of best model nan %
epoch 2, minibatch 8/8, validation error 51.000000 %
epoch 3, minibatch 8/8, validation error 49.000000 %
 epoch 3, minibatch 8/8, test error of best model nan %
epoch 4, minibatch 8/8, validation error 49.000000 %
epoch 5, minibatch 8/8, validation error 49.000000 %
epoch 6, minibatch 8/8, validation error 49.000000 %
epoch 7, minibatch 8/8, validation error 49.000000 %
epoch 8, minibatch 8/8, validation error 49.000000 %
epoch 9, minibatch 8/8, validation error 49.000000 %
epoch 10, minibatch 8/8, validation error 49.000000 %
hidden_layers_sizes: [100, 100]
corruption_levels: [0, 0]
recall 100.0%
precision 49.4%
accuracy 49.4%
recall 100.0%
precision 50.0%
accuracy 50.0%
23
SVM
recall 53.3%
precision 49.2%
accuracy 49.2%
recall 55.4%
precision 57.0%
accuracy 56.8%
direct deep learning



... building the model
... getting the pretraining functions
... pre-training the model
The pretraining code ran for 0.00m
... getting the finetuning functions
... finetunning the model
epoch 1, minibatch 8/8, validation error 49.500000 %
 epoch 1, minibatch 8/8, test error of best model 60.000000 %
epoch 2, minibatch 8/8, validation error 49.500000 %
epoch 3, minibatch 8/8, validation error 49.500000 %
epoch 4, minibatch 8/8, validation error 50.500000 %
epoch 5, minibatch 8/8, validation error 50.500000 %
epoch 6, minibatch 8/8, validation error 50.500000 %
epoch 7, minibatch 8/8, validation error 50.500000 %
epoch 8, minibatch 8/8, validation error 50.500000 %
epoch 9, minibatch 8/8, validation error 50.500000 %
epoch 10, minibatch 8/8, validation error 50.500000 %
hidden_layers_sizes: [100, 100]
corruption_levels: [0, 0]
recall 99.5%
precision 50.0%
accuracy 50.1%
recall 100.0%
precision 50.0%
accuracy 50.0%
deep learning with unlabel data
... building the model
... getting the pretraining functions
... pre-training the model
The pretraining code ran for 0.00m
... getting the finetuning functions
... finetunning the model
epoch 1, minibatch 8/8, validation error 49.500000 %
 epoch 1, minibatch 8/8, test error of best model 60.000000 %
epoch 2, minibatch 8/8, validation error 49.500000 %
epoch 3, minibatch 8/8, validation error 49.500000 %
epoch 4, minibatch 8/8, validation error 50.500000 %
epoch 5, minibatch 8/8, validation error 50.500000 %
epoch 6, minibatch 8/8, validation error 50.500000 %
epoch 7, minibatch 8/8, validation error 50.500000 %
epoch 8, minibatch 8/8, validation error 50.500000 %
epoch 9, minibatch 8/8, validation error 50.500000 %
epoch 10, minibatch 8/8, validation error 50.500000 %
hidden_layers_sizes: [100, 100]
corruption_levels: [0, 0]
recall 99.5%
precision 50.0%
accuracy 50.1%
recall 100.0%
precision 50.0%
accuracy 50.0%
24
SVM
recall 57.4%
precision 45.5%
accuracy 44.3%
recall 58.6%
precision 52.8%
accuracy 53.1%
direct deep learning



... building the model
... getting the pretraining functions
... pre-training the model
The pretraining code ran for 0.00m
... getting the finetuning functions
... finetunning the model
epoch 1, minibatch 8/8, validation error 52.000000 %
 epoch 1, minibatch 8/8, test error of best model 39.000000 %
epoch 2, minibatch 8/8, validation error 52.000000 %
epoch 3, minibatch 8/8, validation error 53.000000 %
epoch 4, minibatch 8/8, validation error 48.000000 %
 epoch 4, minibatch 8/8, test error of best model 61.000000 %
epoch 5, minibatch 8/8, validation error 48.000000 %
epoch 6, minibatch 8/8, validation error 48.000000 %
epoch 7, minibatch 8/8, validation error 48.000000 %
epoch 8, minibatch 8/8, validation error 48.500000 %
epoch 9, minibatch 8/8, validation error 48.500000 %
epoch 10, minibatch 8/8, validation error 45.000000 %
 epoch 10, minibatch 8/8, test error of best model 53.000000 %
hidden_layers_sizes: [100, 100]
corruption_levels: [0, 0]
recall 21.4%
precision 67.2%
accuracy 55.0%
recall 16.4%
precision 83.3%
accuracy 56.6%
deep learning with unlabel data
... building the model
... getting the pretraining functions
... pre-training the model
The pretraining code ran for 0.00m
... getting the finetuning functions
... finetunning the model
epoch 1, minibatch 8/8, validation error 52.000000 %
 epoch 1, minibatch 8/8, test error of best model 39.000000 %
epoch 2, minibatch 8/8, validation error 52.000000 %
epoch 3, minibatch 8/8, validation error 53.000000 %
epoch 4, minibatch 8/8, validation error 48.000000 %
 epoch 4, minibatch 8/8, test error of best model 61.000000 %
epoch 5, minibatch 8/8, validation error 48.000000 %
epoch 6, minibatch 8/8, validation error 48.000000 %
epoch 7, minibatch 8/8, validation error 48.000000 %
epoch 8, minibatch 8/8, validation error 48.500000 %
epoch 9, minibatch 8/8, validation error 48.500000 %
epoch 10, minibatch 8/8, validation error 45.000000 %
 epoch 10, minibatch 8/8, test error of best model 53.000000 %
hidden_layers_sizes: [100, 100]
corruption_levels: [0, 0]
recall 21.4%
precision 67.2%
accuracy 55.0%
recall 16.4%
precision 83.3%
accuracy 56.6%
25
SVM
recall 0.0%
precision 0.0%
accuracy 50.0%
recall 58.0%
precision 57.1%
accuracy 57.2%
direct deep learning



... building the model
... getting the pretraining functions
... pre-training the model
The pretraining code ran for 0.00m
... getting the finetuning functions
... finetunning the model
epoch 1, minibatch 10/10, validation error 50.500000 %
 epoch 1, minibatch 10/10, test error of best model nan %
epoch 2, minibatch 10/10, validation error 50.500000 %
epoch 3, minibatch 10/10, validation error 51.500000 %
epoch 4, minibatch 10/10, validation error 51.000000 %
epoch 5, minibatch 10/10, validation error 49.500000 %
 epoch 5, minibatch 10/10, test error of best model nan %
epoch 6, minibatch 10/10, validation error 50.500000 %
epoch 7, minibatch 10/10, validation error 50.500000 %
epoch 8, minibatch 10/10, validation error 51.000000 %
epoch 9, minibatch 10/10, validation error 51.500000 %
epoch 10, minibatch 10/10, validation error 50.500000 %
hidden_layers_sizes: [100, 100]
corruption_levels: [0, 0]
recall 65.9%
precision 51.0%
accuracy 51.8%
recall 0.0%
precision 0.0%
accuracy 50.0%
deep learning with unlabel data
... building the model
... getting the pretraining functions
... pre-training the model
The pretraining code ran for 0.00m
... getting the finetuning functions
... finetunning the model
epoch 1, minibatch 10/10, validation error 50.500000 %
 epoch 1, minibatch 10/10, test error of best model nan %
epoch 2, minibatch 10/10, validation error 50.500000 %
epoch 3, minibatch 10/10, validation error 51.500000 %
epoch 4, minibatch 10/10, validation error 51.000000 %
epoch 5, minibatch 10/10, validation error 49.500000 %
 epoch 5, minibatch 10/10, test error of best model nan %
epoch 6, minibatch 10/10, validation error 50.500000 %
epoch 7, minibatch 10/10, validation error 50.500000 %
epoch 8, minibatch 10/10, validation error 51.000000 %
epoch 9, minibatch 10/10, validation error 51.500000 %
epoch 10, minibatch 10/10, validation error 50.500000 %
hidden_layers_sizes: [100, 100]
corruption_levels: [0, 0]
recall 65.9%
precision 51.0%
accuracy 51.8%
recall 0.0%
precision 0.0%
accuracy 50.0%
26
SVM
recall 0.0%
precision 0.0%
accuracy 50.0%
recall 52.7%
precision 56.6%
accuracy 56.1%
direct deep learning



... building the model
... getting the pretraining functions

---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-58-bc27e0896ebe> in <module>()
----> 1 LOO_out_performance_for_all(ddis)

<ipython-input-57-ab5d0a3bd074> in LOO_out_performance_for_all(ddis)
     42     for ddi in ddis:
     43         one_ddi_family = LOO_out_performance_for_one_ddi(ddi)
---> 44         one_ddi_family.get_LOO_perfermance('FisherM1', '')
     45 
     46 class LOO_out_performance_for_one_ddi(object):

<ipython-input-57-ab5d0a3bd074> in get_LOO_perfermance(self, fisher_mode, settings)
    125                              hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
    126                              training_epochs = training_epochs, pretraining_epochs = pretraining_epochs,
--> 127                              pretrain_lr = pretrain_lr, finetune_lr=finetune_lr
    128                  )
    129                 print 'hidden_layers_sizes:', hidden_layers_sizes

/home/sun/Downloads/contactmatrix/contactmatrixanddeeplearningcode/DL_libs.py in trainSda(X_train_minmax, y_train, X_validation_minmax, y_validation, X_test_minmax, y_test, pretraining_X_minmax, hidden_layers_sizes, corruption_levels, batch_size, training_epochs, pretraining_epochs, pretrain_lr, finetune_lr)
    624     if pretraining_X_minmax == None:
    625         pretraining_fns = sda.pretraining_functions(train_set_x=train_set_x,
--> 626                                                     batch_size=batch_size)
    627     else:
    628         pretraining_X_minmax  = shuffle(pretraining_X_minmax, random_state=0)

/home/sun/Downloads/contactmatrix/contactmatrixanddeeplearningcode/DL_libs.py in pretraining_functions(self, train_set_x, batch_size)
    495                                  updates=updates,
    496                                  givens={self.x: train_set_x[batch_begin:
--> 497                                                              batch_end]})
    498             # append `fn` to the list of functions
    499             pretrain_fns.append(fn)

/usr/local/lib/python2.7/dist-packages/theano/compile/function.pyc in function(inputs, outputs, mode, updates, givens, no_default_updates, accept_inplace, name, rebuild_strict, allow_input_downcast, profile, on_unused_input)
    221                 allow_input_downcast=allow_input_downcast,
    222                 on_unused_input=on_unused_input,
--> 223                 profile=profile)
    224     # We need to add the flag check_aliased inputs if we have any mutable or
    225     # borrowed used defined inputs

/usr/local/lib/python2.7/dist-packages/theano/compile/pfunc.pyc in pfunc(params, outputs, mode, updates, givens, no_default_updates, accept_inplace, name, rebuild_strict, allow_input_downcast, profile, on_unused_input)
    510     return orig_function(inputs, cloned_outputs, mode,
    511             accept_inplace=accept_inplace, name=name, profile=profile,
--> 512             on_unused_input=on_unused_input)
    513 
    514 

/usr/local/lib/python2.7/dist-packages/theano/compile/function_module.pyc in orig_function(inputs, outputs, mode, accept_inplace, name, profile, on_unused_input)
   1309                    accept_inplace=accept_inplace,
   1310                    profile=profile,
-> 1311                    on_unused_input=on_unused_input).create(
   1312                        defaults)
   1313 

/usr/local/lib/python2.7/dist-packages/theano/compile/function_module.pyc in __init__(self, inputs, outputs, mode, accept_inplace, function_builder, profile, on_unused_input)
   1020             gof.Op.add_stack_trace_on_call = False
   1021             start_optimizer = time.time()
-> 1022             optimizer_profile = optimizer(fgraph)
   1023             end_optimizer = time.time()
   1024             opt_time = end_optimizer - start_optimizer

/usr/local/lib/python2.7/dist-packages/theano/gof/opt.pyc in __call__(self, fgraph)
     89         Same as self.optimize(fgraph)
     90         """
---> 91         return self.optimize(fgraph)
     92 
     93     def add_requirements(self, fgraph):

/usr/local/lib/python2.7/dist-packages/theano/gof/opt.pyc in optimize(self, fgraph, *args, **kwargs)
     80             orig = theano.tensor.basic.constant.enable
     81             theano.tensor.basic.constant.enable = False
---> 82             ret = self.apply(fgraph, *args, **kwargs)
     83         finally:
     84             theano.tensor.basic.constant.enable = orig

/usr/local/lib/python2.7/dist-packages/theano/gof/opt.pyc in apply(self, fgraph)
    181             try:
    182                 t0 = time.time()
--> 183                 sub_prof = optimizer.optimize(fgraph)
    184                 l.append(float(time.time() - t0))
    185                 sub_profs.append(sub_prof)

/usr/local/lib/python2.7/dist-packages/theano/gof/opt.pyc in optimize(self, fgraph, *args, **kwargs)
     80             orig = theano.tensor.basic.constant.enable
     81             theano.tensor.basic.constant.enable = False
---> 82             ret = self.apply(fgraph, *args, **kwargs)
     83         finally:
     84             theano.tensor.basic.constant.enable = orig

/usr/local/lib/python2.7/dist-packages/theano/gof/opt.pyc in apply(self, fgraph, start_from)
   1598                     for lopt in self.local_optimizers:
   1599                         t_opt = time.time()
-> 1600                         lopt_change = self.process_node(fgraph, node, lopt)
   1601                         time_opts[lopt] += time.time() - t_opt
   1602                         if lopt_change:

/usr/local/lib/python2.7/dist-packages/theano/gof/opt.pyc in process_node(self, fgraph, node, lopt)
   1284         lopt = lopt or self.local_opt
   1285         try:
-> 1286             replacements = lopt.transform(node)
   1287         except Exception, e:
   1288             if self.failure_callback is not None:

/usr/local/lib/python2.7/dist-packages/theano/gof/opt.pyc in transform(self, node)
   1101                 return pattern.clone()
   1102         u = match(self.in_pattern, node.out, unify.Unification(), True,
-> 1103                   self.pdb)
   1104         if u:
   1105             p = self.out_pattern

KeyboardInterrupt: 

In [ ]:


In [15]:
one_ddi_family.get_LOO_perfermance('AAONLY', '1')


1
SVM
precision on test:  0.493506493506
recall on test:  0.558823529412
direct deep learning
... building the model
... getting the pretraining functions
... pre-training the model
Pre-training layer 0, epoch 0, cost  63.5688703827
Pre-training layer 0, epoch 1, cost  57.926744179
Pre-training layer 0, epoch 2, cost  52.9103500176
Pre-training layer 0, epoch 3, cost  48.4728498244
Pre-training layer 0, epoch 4, cost  44.5676252783
Pre-training layer 0, epoch 5, cost  41.1486432219
Pre-training layer 0, epoch 6, cost  38.1706009545
Pre-training layer 0, epoch 7, cost  35.5890538646
Pre-training layer 0, epoch 8, cost  33.3606653607
Pre-training layer 0, epoch 9, cost  31.4436467359
Pre-training layer 0, epoch 10, cost  29.7983604444
Pre-training layer 0, epoch 11, cost  28.3879736869
Pre-training layer 0, epoch 12, cost  27.1790156235
Pre-training layer 0, epoch 13, cost  26.1417245224
Pre-training layer 0, epoch 14, cost  25.2501424078
Pre-training layer 0, epoch 15, cost  24.4819826456
Pre-training layer 0, epoch 16, cost  23.8183350717
Pre-training layer 0, epoch 17, cost  23.2432815655
Pre-training layer 0, epoch 18, cost  22.7434833952
Pre-training layer 0, epoch 19, cost  22.3077826561
Pre-training layer 0, epoch 20, cost  21.9268418937
Pre-training layer 0, epoch 21, cost  21.5928321067
Pre-training layer 0, epoch 22, cost  21.2991702827
Pre-training layer 0, epoch 23, cost  21.0403025827
Pre-training layer 0, epoch 24, cost  20.8115270748
Pre-training layer 0, epoch 25, cost  20.6088494287
Pre-training layer 0, epoch 26, cost  20.4288654427
Pre-training layer 0, epoch 27, cost  20.26866513
Pre-training layer 0, epoch 28, cost  20.1257540421
Pre-training layer 0, epoch 29, cost  19.9979883827
Pre-training layer 0, epoch 30, cost  19.8835212065
Pre-training layer 0, epoch 31, cost  19.780757587
Pre-training layer 0, epoch 32, cost  19.6883170979
Pre-training layer 0, epoch 33, cost  19.6050022989
Pre-training layer 0, epoch 34, cost  19.5297721866
Pre-training layer 0, epoch 35, cost  19.4617197696
Pre-training layer 0, epoch 36, cost  19.4000530876
Pre-training layer 0, epoch 37, cost  19.3440791143
Pre-training layer 0, epoch 38, cost  19.2931900806
Pre-training layer 0, epoch 39, cost  19.2468518368
Pre-training layer 0, epoch 40, cost  19.2045939315
Pre-training layer 0, epoch 41, cost  19.1660011403
Pre-training layer 0, epoch 42, cost  19.1307062203
Pre-training layer 0, epoch 43, cost  19.0983837018
Pre-training layer 0, epoch 44, cost  19.0687445596
Pre-training layer 0, epoch 45, cost  19.0415316297
Pre-training layer 0, epoch 46, cost  19.0165156612
Pre-training layer 0, epoch 47, cost  18.9934919073
Pre-training layer 0, epoch 48, cost  18.972277177
Pre-training layer 0, epoch 49, cost  18.9527072795
Pre-training layer 0, epoch 50, cost  18.9346348056
Pre-training layer 0, epoch 51, cost  18.9179271959
Pre-training layer 0, epoch 52, cost  18.9024650578
Pre-training layer 0, epoch 53, cost  18.8881406944
Pre-training layer 0, epoch 54, cost  18.8748568166
Pre-training layer 0, epoch 55, cost  18.862525414
Pre-training layer 0, epoch 56, cost  18.8510667622
Pre-training layer 0, epoch 57, cost  18.840408549
Pre-training layer 0, epoch 58, cost  18.8304851035
Pre-training layer 0, epoch 59, cost  18.8212367148
Pre-training layer 0, epoch 60, cost  18.8126090286
Pre-training layer 0, epoch 61, cost  18.8045525121
Pre-training layer 0, epoch 62, cost  18.7970219789
Pre-training layer 0, epoch 63, cost  18.7899761656
Pre-training layer 0, epoch 64, cost  18.7833773547
Pre-training layer 0, epoch 65, cost  18.7771910376
Pre-training layer 0, epoch 66, cost  18.7713856137
Pre-training layer 0, epoch 67, cost  18.7659321202
Pre-training layer 0, epoch 68, cost  18.76080399
Pre-training layer 0, epoch 69, cost  18.7559768348
Pre-training layer 0, epoch 70, cost  18.7514282489
Pre-training layer 0, epoch 71, cost  18.7471376333
Pre-training layer 0, epoch 72, cost  18.7430860366
Pre-training layer 0, epoch 73, cost  18.7392560119
Pre-training layer 0, epoch 74, cost  18.7356314863
Pre-training layer 0, epoch 75, cost  18.7321976441
Pre-training layer 1, epoch 0, cost  141.87507339
Pre-training layer 1, epoch 1, cost  128.869708774
Pre-training layer 1, epoch 2, cost  118.042616412
Pre-training layer 1, epoch 3, cost  109.091278647
Pre-training layer 1, epoch 4, cost  101.716215703
Pre-training layer 1, epoch 5, cost  95.6474870944
Pre-training layer 1, epoch 6, cost  90.6528391421
Pre-training layer 1, epoch 7, cost  86.5377923308
Pre-training layer 1, epoch 8, cost  83.1422403949
Pre-training layer 1, epoch 9, cost  80.3355466594
Pre-training layer 1, epoch 10, cost  78.0114033264
Pre-training layer 1, epoch 11, cost  76.0832149159
Pre-training layer 1, epoch 12, cost  74.480276263
Pre-training layer 1, epoch 13, cost  73.144720502
Pre-training layer 1, epoch 14, cost  72.0291096619
Pre-training layer 1, epoch 15, cost  71.0945299101
Pre-training layer 1, epoch 16, cost  70.3090707112
Pre-training layer 1, epoch 17, cost  69.6465917475
Pre-training layer 1, epoch 18, cost  69.0857074209
Pre-training layer 1, epoch 19, cost  68.6089414598
Pre-training layer 1, epoch 20, cost  68.2020204564
Pre-training layer 1, epoch 21, cost  67.8532848743
Pre-training layer 1, epoch 22, cost  67.5532009755
Pre-training layer 1, epoch 23, cost  67.2939594113
Pre-training layer 1, epoch 24, cost  67.0691475049
Pre-training layer 1, epoch 25, cost  66.873483358
Pre-training layer 1, epoch 26, cost  66.7026011328
Pre-training layer 1, epoch 27, cost  66.552878208
Pre-training layer 1, epoch 28, cost  66.4212962791
Pre-training layer 1, epoch 29, cost  66.3053297916
Pre-training layer 1, epoch 30, cost  66.2028562773
Pre-training layer 1, epoch 31, cost  66.1120841942
Pre-training layer 1, epoch 32, cost  66.0314947373
Pre-training layer 1, epoch 33, cost  65.9597947961
Pre-training layer 1, epoch 34, cost  65.8958788128
Pre-training layer 1, epoch 35, cost  65.8387977593
Pre-training layer 1, epoch 36, cost  65.7877338158
Pre-training layer 1, epoch 37, cost  65.7419796282
Pre-training layer 1, epoch 38, cost  65.7009212507
Pre-training layer 1, epoch 39, cost  65.6640240634
Pre-training layer 1, epoch 40, cost  65.6308210966
Pre-training layer 1, epoch 41, cost  65.6009033109
Pre-training layer 1, epoch 42, cost  65.5739114695
Pre-training layer 1, epoch 43, cost  65.5495293113
Pre-training layer 1, epoch 44, cost  65.5274777936
Pre-training layer 1, epoch 45, cost  65.5075102125
Pre-training layer 1, epoch 46, cost  65.4894080517
Pre-training layer 1, epoch 47, cost  65.4729774351
Pre-training layer 1, epoch 48, cost  65.4580460833
Pre-training layer 1, epoch 49, cost  65.4444606921
Pre-training layer 1, epoch 50, cost  65.4320846666
Pre-training layer 1, epoch 51, cost  65.4207961557
Pre-training layer 1, epoch 52, cost  65.4104863418
Pre-training layer 1, epoch 53, cost  65.4010579482
Pre-training layer 1, epoch 54, cost  65.3924239332
Pre-training layer 1, epoch 55, cost  65.3845063445
Pre-training layer 1, epoch 56, cost  65.3772353118
Pre-training layer 1, epoch 57, cost  65.3705481598
Pre-training layer 1, epoch 58, cost  65.3643886252
Pre-training layer 1, epoch 59, cost  65.3587061646
Pre-training layer 1, epoch 60, cost  65.3534553415
Pre-training layer 1, epoch 61, cost  65.3485952838
Pre-training layer 1, epoch 62, cost  65.3440892009
Pre-training layer 1, epoch 63, cost  65.3399039559
Pre-training layer 1, epoch 64, cost  65.3360096844
Pre-training layer 1, epoch 65, cost  65.3323794554
Pre-training layer 1, epoch 66, cost  65.3289889686
Pre-training layer 1, epoch 67, cost  65.3258162853
Pre-training layer 1, epoch 68, cost  65.3228415875
Pre-training layer 1, epoch 69, cost  65.3200469628
Pre-training layer 1, epoch 70, cost  65.3174162126
Pre-training layer 1, epoch 71, cost  65.3149346801
Pre-training layer 1, epoch 72, cost  65.3125890964
Pre-training layer 1, epoch 73, cost  65.3103674428
Pre-training layer 1, epoch 74, cost  65.3082588275
Pre-training layer 1, epoch 75, cost  65.3062533745
... getting the finetuning functions
... finetunning the model
epoch 1, minibatch 13/13, validation error 50.000000 %
 epoch 1, minibatch 13/13, test error of best model 43.333333 %
epoch 2, minibatch 13/13, validation error 50.000000 %
epoch 3, minibatch 13/13, validation error 50.000000 %
epoch 4, minibatch 13/13, validation error 50.000000 %
epoch 5, minibatch 13/13, validation error 50.000000 %
epoch 6, minibatch 13/13, validation error 50.000000 %
epoch 7, minibatch 13/13, validation error 50.000000 %
epoch 8, minibatch 13/13, validation error 50.000000 %
epoch 9, minibatch 13/13, validation error 50.000000 %
epoch 10, minibatch 13/13, validation error 50.000000 %
hidden_layers_sizes: [100, 100]
corruption_levels: [0, 0]
train accuracy:  49.8%
precision:  49.8%
recall:  100.0%
testing accuracy:  50.0%
precision:  50.0%
recall:  100.0%
deep learning with unlabel data
... building the model
... getting the pretraining functions
The pretraining code ran for 0.10m
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-15-f1a291fe5526> in <module>()
----> 1 one_ddi_family.get_LOO_perfermance('AAONLY', '1')

<ipython-input-6-4c735d4c74b6> in get_LOO_perfermance(self, fisher_mode, classifier)
     95                              hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
     96                              training_epochs = training_epochs, pretraining_epochs = pretraining_epochs,
---> 97                              pretrain_lr = pretrain_lr, finetune_lr=finetune_lr
     98                  )
     99                 print 'hidden_layers_sizes:', hidden_layers_sizes

/home/sun/Downloads/contactmatrix/contactmatrixanddeeplearningcode/DL_libs.pyc in trainSda(X_train_minmax, y_train, X_validation_minmax, y_validation, X_test_minmax, y_test, pretraining_X_minmax, hidden_layers_sizes, corruption_levels, batch_size, training_epochs, pretraining_epochs, pretrain_lr, finetune_lr)
    630 
    631         pretraining_fns = sda.pretraining_functions(train_set_x=pretraining_X,
--> 632                                                     batch_size=batch_size)
    633 
    634     print '... pre-training the model'

/home/sun/Downloads/contactmatrix/contactmatrixanddeeplearningcode/DL_libs.pyc in pretraining_functions(self, train_set_x, batch_size)
    495                                  updates=updates,
    496                                  givens={self.x: train_set_x[batch_begin:
--> 497                                                              batch_end]})
    498             # append `fn` to the list of functions
    499             pretrain_fns.append(fn)

/usr/local/lib/python2.7/dist-packages/theano/compile/function.pyc in function(inputs, outputs, mode, updates, givens, no_default_updates, accept_inplace, name, rebuild_strict, allow_input_downcast, profile, on_unused_input)
    221                 allow_input_downcast=allow_input_downcast,
    222                 on_unused_input=on_unused_input,
--> 223                 profile=profile)
    224     # We need to add the flag check_aliased inputs if we have any mutable or
    225     # borrowed used defined inputs

/usr/local/lib/python2.7/dist-packages/theano/compile/pfunc.pyc in pfunc(params, outputs, mode, updates, givens, no_default_updates, accept_inplace, name, rebuild_strict, allow_input_downcast, profile, on_unused_input)
    510     return orig_function(inputs, cloned_outputs, mode,
    511             accept_inplace=accept_inplace, name=name, profile=profile,
--> 512             on_unused_input=on_unused_input)
    513 
    514 

/usr/local/lib/python2.7/dist-packages/theano/compile/function_module.pyc in orig_function(inputs, outputs, mode, accept_inplace, name, profile, on_unused_input)
   1309                    accept_inplace=accept_inplace,
   1310                    profile=profile,
-> 1311                    on_unused_input=on_unused_input).create(
   1312                        defaults)
   1313 

/usr/local/lib/python2.7/dist-packages/theano/compile/function_module.pyc in __init__(self, inputs, outputs, mode, accept_inplace, function_builder, profile, on_unused_input)
   1020             gof.Op.add_stack_trace_on_call = False
   1021             start_optimizer = time.time()
-> 1022             optimizer_profile = optimizer(fgraph)
   1023             end_optimizer = time.time()
   1024             opt_time = end_optimizer - start_optimizer

/usr/local/lib/python2.7/dist-packages/theano/gof/opt.pyc in __call__(self, fgraph)
     89         Same as self.optimize(fgraph)
     90         """
---> 91         return self.optimize(fgraph)
     92 
     93     def add_requirements(self, fgraph):

/usr/local/lib/python2.7/dist-packages/theano/gof/opt.pyc in optimize(self, fgraph, *args, **kwargs)
     80             orig = theano.tensor.basic.constant.enable
     81             theano.tensor.basic.constant.enable = False
---> 82             ret = self.apply(fgraph, *args, **kwargs)
     83         finally:
     84             theano.tensor.basic.constant.enable = orig

/usr/local/lib/python2.7/dist-packages/theano/gof/opt.pyc in apply(self, fgraph)
    181             try:
    182                 t0 = time.time()
--> 183                 sub_prof = optimizer.optimize(fgraph)
    184                 l.append(float(time.time() - t0))
    185                 sub_profs.append(sub_prof)

/usr/local/lib/python2.7/dist-packages/theano/gof/opt.pyc in optimize(self, fgraph, *args, **kwargs)
     80             orig = theano.tensor.basic.constant.enable
     81             theano.tensor.basic.constant.enable = False
---> 82             ret = self.apply(fgraph, *args, **kwargs)
     83         finally:
     84             theano.tensor.basic.constant.enable = orig

/usr/local/lib/python2.7/dist-packages/theano/gof/opt.pyc in apply(self, fgraph, start_from)
   1556                 fgraph.change_tracker.reset()
   1557                 t_opt = time.time()
-> 1558                 gopt.apply(fgraph)
   1559                 time_opts[gopt] += time.time() - t_opt
   1560                 if fgraph.change_tracker.changed:

/usr/local/lib/python2.7/dist-packages/theano/gof/opt.pyc in apply(self, fgraph, start_from)
   1556                 fgraph.change_tracker.reset()
   1557                 t_opt = time.time()
-> 1558                 gopt.apply(fgraph)
   1559                 time_opts[gopt] += time.time() - t_opt
   1560                 if fgraph.change_tracker.changed:

/usr/local/lib/python2.7/dist-packages/theano/gof/opt.pyc in apply(self, fgraph, start_from)
   1359         nb_nodes_start = len(fgraph.apply_nodes)
   1360         t0 = time.time()
-> 1361         q = deque(graph.io_toposort(fgraph.inputs, start_from))
   1362         io_t = time.time() - t0
   1363 

/usr/local/lib/python2.7/dist-packages/theano/gof/graph.pyc in io_toposort(inputs, outputs, orderings)
    807         return rval
    808 
--> 809     topo = general_toposort(outputs, deps)
    810     return [o for o in topo if isinstance(o, Apply)]
    811 

/usr/local/lib/python2.7/dist-packages/theano/gof/graph.pyc in general_toposort(r_out, deps, debug_print)
    748     assert isinstance(r_out, (tuple, list, deque))
    749 
--> 750     reachable, clients = stack_search(deque(r_out), _deps, 'dfs', True)
    751     sources = deque([r for r in reachable if not deps_cache.get(r, None)])
    752 

/usr/local/lib/python2.7/dist-packages/theano/gof/graph.pyc in stack_search(start, expand, mode, build_inv)
    524             rval_list.append(l)
    525             rval_set.add(id(l))
--> 526             expand_l = expand(l)
    527             if expand_l:
    528                 if build_inv:

/usr/local/lib/python2.7/dist-packages/theano/gof/graph.pyc in _deps(io)
    734     def _deps(io):
    735         if io not in deps_cache:
--> 736             d = deps(io)
    737             if d:
    738                 if not isinstance(d, (list, OrderedSet)):

/usr/local/lib/python2.7/dist-packages/theano/gof/graph.pyc in deps(obj)
    802             elif isinstance(obj, Apply):
    803                 rval = list(obj.inputs)
--> 804             rval.extend(orderings.get(obj, []))
    805         else:
    806             assert not orderings.get(obj, [])

KeyboardInterrupt: 

In [ ]:
data =a
a = data[:, 20:227]
b = data[:, 247:454]
X = np.hstack((a,b))
print a.shape, b.shape, X.shape,  X

In [ ]: