In [1]:
%matplotlib inline
import matplotlib.pyplot as plt
from gensim import corpora, models, similarities
from gensim.utils import simple_preprocess, lemmatize
from gensim.models.doc2vec import TaggedDocument,Doc2Vec
from sklearn.multiclass import OneVsRestClassifier,OneVsOneClassifier
from sklearn.cross_validation import cross_val_score, train_test_split
from sklearn.metrics import classification_report, f1_score, accuracy_score, confusion_matrix
from gensim import models
from gensim.models.ldamodel import LdaModel
from scipy.stats import skew, kurtosis
from sklearn.preprocessing import LabelEncoder
from nltk.corpus import stopwords
from os import listdir
import pandas as pd
import numpy as np
import codecs
import re
from sklearn.learning_curve import learning_curve


/usr/local/lib/python2.7/dist-packages/sklearn/cross_validation.py:43: DeprecationWarning: This module was deprecated in version 0.18 in favor of the model_selection module into which all the refactored classes and functions are moved. Also note that the interface of the new CV iterators are different from that of this module. This module will be removed in 0.20.
  "This module will be removed in 0.20.", DeprecationWarning)
/usr/local/lib/python2.7/dist-packages/sklearn/learning_curve.py:23: DeprecationWarning: This module was deprecated in version 0.18 in favor of the model_selection module into which all the functions are moved. This module will be removed in 0.20
  DeprecationWarning)

In [2]:
""" 
os textos de cada classes estão divididos em
subdiretórios dentro de path
"""
class MyCorpus(object):
    
    def __init__(self,path):
        self.path = path
        self.klasses = self.file_ids_klass()
        #self.data = [self.get_text(fname) for fname in self.klasses.keys()]
        self.dictionary = corpora.Dictionary(self.get_text(fname) for fname in self.klasses.keys())
        
    def __iter__(self):
        for fname in self.klasses.keys():
            #yield self.dictionary.doc2bow(self.get_text(fname))
            yield self.get_text(fname)
            
    def text_bow(self,fname):
        return self.dictionary.doc2bow(self.get_text(fname))
    
    def file_ids_klass(self):
        ids = {}
        for klass in listdir(self.path):
            for fname in listdir(self.path+'/'+klass):
                ids[klass+'/'+fname] = klass
        return ids
                
    def get_text(self,fname):
        with open(self.path+'/'+fname) as finput:
            text = ''.join(finput.readlines())
            return self.pre_process(text)
    
    """
        por hora, um preprocessamento simples...
        podemos trocar por um mais elaborado depois
    """
    def pre_process(self,text):
        sentence = re.sub('[.,"]','',text)
        sentence = sentence.lower().decode('ISO-8859-7').split()
        
        # REMOVING STOPWORDS TEXT  ~~~~~~~~~~~~~~
        #stopCashed = set(stopwords.words('english'))
        #sentence = [word for word in sentence.lower().decode('ISO-8859-7').split() if word not in (stopCashed)]
        ## ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
        
        # LEMMATIZE TEXT ~~~~~~~~~~~~~~~~~~~~~~~~
        #sentence = lemmatize(sentence.decode('ISO-8859-7'))
        ## ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
        
        return sentence
        #return simple_preprocess(text)

In [32]:
stopCashed = stopwords.words('english')

In [55]:
data = {}
for row in stopCashed:
    lemma = lemmatize(row)
    if len(lemma) > 0:
        data[row] = lemma

In [56]:
data


Out[56]:
{u'again': ['again/RB'],
 u'ain': ['ain/RB'],
 u'am': ['be/VB'],
 u'are': ['be/VB'],
 u'aren': ['aren/NN'],
 u'be': ['be/VB'],
 u'been': ['be/VB'],
 u'being': ['be/VB'],
 u'couldn': ['couldn/NN'],
 u'did': ['do/VB'],
 u'didn': ['didn/VB'],
 u'do': ['do/VB'],
 u'does': ['do/VB'],
 u'doesn': ['doesn/NN'],
 u'doing': ['do/VB'],
 u'don': ['don/VB'],
 u'few': ['few/JJ'],
 u'further': ['further/JJ'],
 u'had': ['have/VB'],
 u'hadn': ['hadn/NN'],
 u'has': ['have/VB'],
 u'hasn': ['hasn/NN'],
 u'have': ['have/VB'],
 u'haven': ['haven/NN'],
 u'having': ['have/VB'],
 u'here': ['here/RB'],
 u'is': ['be/VB'],
 u'isn': ['isn/NN'],
 u'just': ['just/RB'],
 u'll': ['ll/NN'],
 u'mightn': ['mightn/NN'],
 u'more': ['more/RB'],
 u'most': ['most/RB'],
 u'mustn': ['mustn/NN'],
 u'needn': ['needn/RB'],
 u'not': ['not/RB'],
 u'now': ['now/RB'],
 u'once': ['once/RB'],
 u'only': ['only/RB'],
 u'other': ['other/JJ'],
 u'ours': ['our/NN'],
 u'own': ['own/JJ'],
 u're': ['re/NN'],
 u'same': ['same/JJ'],
 u'shouldn': ['shouldn/NN'],
 u'so': ['so/RB'],
 u'such': ['such/JJ'],
 u'then': ['then/RB'],
 u'too': ['too/RB'],
 u've': ['ve/NN'],
 u'very': ['very/RB'],
 u'was': ['be/VB'],
 u'wasn': ['wasn/VB'],
 u'were': ['be/VB'],
 u'weren': ['weren/NN'],
 u'won': ['win/VB'],
 u'wouldn': ['wouldn/VB']}

In [ ]:
#DOC2VEC

class MyCorpus(object):
    
    def __init__(self,path):
        self.path = path
        self.klasses = self.file_ids_klass()
        self.text = {}
        for fname in self.klasses.keys():
            self.text[fname] = self.get_text(fname)
        
    def __iter__(self):
        for sent in self.klasses.keys():
            yield TaggedDocument(words = self.text.get(sent), tags = [sent])            
            
    def file_ids_klass(self):
        ids = {}
        for klass in listdir(self.path):
            for fname in listdir(self.path+'/'+klass):
                ids[klass+'/'+fname] = klass
        return ids
                
    def get_text(self,fname):
        with open(self.path+'/'+fname) as finput:
            text = ''.join(finput.readlines())
            return self.pre_process(text)
    
    def pre_process(self,text):
        sentence = re.sub('[.,"]','',text)
        #sentence = sentence.lower().decode('ISO-8859-7').split()
        
        # REMOVING STOPWORDS TEXT  ~~~~~~~~~~~~~~
        #stopCashed = set(stopwords.words('english'))
        #sentence = [word for word in sentence.lower().decode('ISO-8859-7').split() if word not in (stopCashed)]
        ## ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
        
        # LEMMATIZE TE#XT ~~~~~~~~~~~~~~~~~~~~~~~~
        sentence = lemmatize(sentence.decode('ISO-8859-7'))
        ## ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
        
        return sentence
        #return simple_preprocess(text)

In [ ]:


In [3]:
mc = MyCorpus('Bases/nltk/movie_reviews')

In [ ]:
import numpy as np

cont = []
cortados = []
i = 0
for sent in mc.klasses.keys():
    temp = mc.get_text(sent)
    if temp is None:
        cortados.append(sent)
        mc.text.pop(sent)
        mc.klasses.pop(sent)
        i = i+1
    if len(temp) != 0:
        cont.append(len(temp))
    else:
        cortados.append(sent)
        mc.text.pop(sent)
        mc.klasses.pop(sent)
        i = i+1

print('Foram cortados %s sentenças vazias'%i)
print('Numero de Documentos: %s'%len(cont))
print('Numero total de termos: %s'%np.sum(cont))
print('Numero de termos do maior documento: %s'%np.max(cont))
print('Numero de termos do menor documento: %s'%np.min(cont))
print('Media de termos por documento: %8.6f'%np.mean(cont))
print('Medida de Desvio Padrão: %8.5f'%np.std(cont))
print('Skewness do numero de termos por documento: %6.4f'%skew(cont))
print('Kurtosis do numero de termos por documento: %6.4f'%kurtosis(cont))

In [4]:
dictionary = corpora.Dictionary(mc)
corpus = [dictionary.doc2bow(text) for text in mc]
print('Tamanho do dicionário: %s'%len(dictionary.keys()))


Tamanho do dicionário: 50917

In [5]:
# gera o modelo tf-idf
tfidf = models.TfidfModel(corpus)

In [ ]:
# gera o modelo lda
lda = models.LdaModel(corpus, id2word=dictionary, passes=10)
print lda.print_topics(2)

In [ ]:
# gera o modelo lsi
lsi = models.LsiModel(corpus, id2word=dictionary)
print lsi.print_topics(2)

In [ ]:
# gera o modelo random projection
rpm = models.RpModel(corpus, id2word=dictionary)

In [ ]:
#gera o modelo doc2vec
model = Doc2Vec(mc, size=300, hs=1, iter=100, min_count=0, workers=5)

In [ ]:
model = Doc2Vec(dm=0, dbow_words=1, alpha=0.025, min_alpha=0.025, hs=1, size=300, min_count=0, workers=4, iter=20)#use fixed learning rate
model.build_vocab(mc)
model.train_words=True
model.train_labels=True
 
for epoch in range(7):
    model.train(mc)
    model.alpha -= 0.003  # decrease the learning rate
    model.min_alpha = model.alpha  # fix the learning rate, no decay

In [ ]:
from sklearn.decomposition import PCA

def plotWords():
    #get model, we use w2v only
    w2v=model
 
    words_np = []
    #a list of labels (words)
    words_label = []
    for word in w2v.vocab.keys():
        words_np.append(w2v[word])
        words_label.append(word)
    print('Added %s words. Shape %s'%(len(words_np),np.shape(words_np)))
 
    pca = PCA(n_components=2)
    pca.fit(words_np)
    reduced= pca.transform(words_np)
 
    # plt.plot(pca.explained_variance_ratio_)
    for index,vec in enumerate(reduced):
        # print ('%s %s'%(words_label[index],vec))
        if index <15:
            x,y=vec[0],vec[1]
            plt.scatter(x,y)
            plt.annotate(words_label[index],xy=(x,y))
    plt.show()

In [ ]:
del G

In [6]:
# cria um grafo dirigido (Digrafo)
import networkx as nx

G = nx.DiGraph()

# cada texto é um nó do grafo
# a classe do texto é um atributo do nó do grafo
for k,v in mc.klasses.items():
    G.add_node(k,klass=v)

In [13]:
# adiciona as arestas no grafo

# nomes dos arquivos...
# variável auxiliar...
names = mc.klasses.keys()


# gera o modelo de similaridades
# para encontaros k-vizinhos de cada nó
# num_best é o número de k-vizinhos + 1 (pois o nó é vizinho dele mesmo)
# num_best=11 gera um grafo com 10 vizinhos pra cada nó

# To lda, lsi
index = similarities.Similarity('temp',tfidf[corpus],num_features=len(mc.dictionary.keys()),num_best=11)
# To rpm, hdp
#index = similarities.MatrixSimilarity(rpm[corpus],num_features=len(dictionary.keys()),num_best=11)

for k in names:
    for nn in index[tfidf[mc.text_bow(k)]]:
        if not k==names[nn[0]]:
            G.add_edge(k,names[nn[0]],weight=nn[1])

In [ ]:
#For doc2vec model

names = mc.klasses.keys()


for k in names:
    for nn in model.docvecs.most_similar(k, topn=11):
        G.add_edge(k,nn[0],weight=nn[1])

In [14]:
# calcula a distribuição do grau de cada nó

from collections import Counter

# como o out_degree é sempre 10 (por construção)
# basta usar o in_degree
degree = G.in_degree().values()
cdegree = Counter(degree)

In [15]:
# skewness and kurtosis mede o quanto não uniforme é a distribuição

print skew(degree), kurtosis(degree)


3.58810292231 26.2468283919

In [16]:
%matplotlib inline
import matplotlib.pyplot as plt

plt.plot(cdegree.keys(),cdegree.values(),'bo-')
#plt.savefig('Pictures/Doc2Vec-DBOW_SKIPGRAM/Com Stemming/moviereview-GoodDegrees-k11')


Out[16]:
[<matplotlib.lines.Line2D at 0x10b46390>]

In [17]:
good_bad_edges = {}

for k in names:
    good_bad_edges[k] = {}
    good_bad_edges[k]['good'] = 0
    good_bad_edges[k]['bad'] = 0
    good_bad_edges[k]['all'] = 0
    for edge in G.in_edges(k):
        if G.node[edge[0]]['klass'] == G.node[edge[1]]['klass']:
            good_bad_edges[k]['good']+=1
        else:
            good_bad_edges[k]['bad']+=1
        good_bad_edges[k]['all']+=1

In [21]:
#baddegree = [d['bad'] for d in good_bad_edges.values()]
baddegree = [d['bad'] for d in good_bad_edges.values() if d['bad'] > d['good']]
#gooddegree = [d['good'] for d in good_bad_edges.values() if d['bad'] < d['good']]
CBad = Counter(baddegree)
#CGood = Counter(gooddegree)

plt.plot(cdegree.keys(),cdegree.values(),'bo-')
plt.plot(CBad.keys(),CBad.values(),'ro-')
#plt.plot(CGood.keys(),CGood.values(),'go-')
#plt.savefig('Pictures/Doc2Vec-DBOW_SKIPGRAM/Com Stemming/moviereview-GoodBadDegrees-k11')


Out[21]:
[<matplotlib.lines.Line2D at 0x10b55c90>]

In [19]:
print skew(baddegree), kurtosis(baddegree)


3.46477882019 24.4100396215

In [ ]:
from scipy.stats import spearmanr,pearsonr
import numpy as np

corr = np.array([[d['bad'], d['all']] for d in good_bad_edges.values()])

print('Spearman Correlation: %8.6f, %3.7s'% spearmanr(corr[:,0],corr[:,1]))
print('Pearson Correlation: %8.6f, %3.7s'%pearsonr(corr[:,0],corr[:,1]))

In [ ]:
import numpy as np
color = ['r' if node[1]['klass'] == 'pos' else 'b' for node in G.nodes(data=True)]

pos = nx.spring_layout(G)


ec = nx.draw_networkx_edges(G, pos, alpha=0.01)
nc = nx.draw_networkx_nodes(G, pos, node_color=color, node_size=2**np.sqrt(np.array(degree)),alpha=0.3)

In [ ]:
############# ----------------------  TRAINING MODEL ------------------##################

In [20]:
## FOR TFIDF MODEL SKLEARN

from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer

def split_mc_corpus(corpus):
    words = corpus
    return [word for word in words]

bow_transformer = CountVectorizer(analyzer=split_mc_corpus).fit(mc)

messages_bow = bow_transformer.transform(mc)
tfidf_transformer = TfidfTransformer().fit(messages_bow)

print 'sparse matrix shape:', messages_bow.shape
print 'number of non-zeros:', messages_bow.nnz
print 'sparsity: %.2f%%' % (100.0 * messages_bow.nnz / (messages_bow.shape[0] * messages_bow.shape[1]))

vectors = tfidf_transformer.transform(messages_bow)
klasses = np.array(mc.klasses.values())
tag     = np.array(mc.klasses.keys())


sparse matrix shape: (2000, 50917)
number of non-zeros: 686453
sparsity: 0.67%

In [ ]:
## FOR TFIDF MODEL
'''
TF-IDF Pega os vetores do modelo e armazena
'''

vectorsX = []
klasses = []
temp = 0
k = 0

for key in mc.klasses.keys():
    vecs = np.array(tfidf[mc.text_bow(key)], dtype=np.float32).T
    try:
        if len(vecs[1]) > 0:
            vectorsX.append(vecs[1])
            klasses.append(mc.klasses.get(key))
            if temp < len(vecs[1]):
                temp = len(vecs[1])
    except IndexError:
        k = k+1
        continue
vecstemp = np.array(vectorsX)

s = (len(vecstemp),temp)
A = np.zeros(s)
B = np.array(vecstemp)
vectors = addMatrix(A,B)
klasses = np.array(klasses)

print "%s: sentenças não continham pesos TFIDF"%k
print vectors.shape

In [ ]:
## FOR DOC2VEC MODEL
'''
Doc2Vec Pega os vetores do modelo e armazena
'''

vectors = []
klasses = []
tag     = []

for key in mc.klasses.keys():
    vectors.append(model.docvecs[key])
    klasses.append(mc.klasses.get(key))
    tag.append(key)

vectors = np.array(vectors)
klasses = np.array(klasses)
tag     = np.array(tag)

In [ ]:
'''
LSI Pega os vetores do modelo e armazena
'''

vectorsX = []
vecs     = []
klasses  = []
tag      = []
k = 0

for key in mc.klasses.keys():
    vecs = np.array(lsi[mc.text_bow(key)], dtype=np.float32).T
    try:
        
        #if len(vecs[1]) == 200:
        vectorsX.append(vecs[1])
        klasses.append(mc.klasses.get(key))
        tag.append(key)
        #else:
            #k = k+1
    except IndexError:
        k = k+1
        continue
vectors = np.array(vectorsX)
klasses = np.array(klasses)
tag     = np.array(tag)
print "%s: sentenças não continham pesos LSI"%k
vectors.shape

In [ ]:
'''
LDA Pega os vetores do modelo e armazena
'''

## MATRIX FOR LDA AND TFIDF MODELS

def addMatrix(A,B):
    """ Soma duas matrizes."""
    sizeL=len(A)
    sizeC=len(A[0])
    s = (sizeL,sizeC)
    C = np.zeros(s, dtype=np.float32)
    # Soma    
    for i in range(sizeL):
        for j in range(len(B[i])):
            C[i][j]=A[i][j]+B[i][j]
    return C 

vectors = []
klasses = []
tag     = []
temp = 0
q = 0

for key in mc.klasses.keys():
    try:
        vecs = np.array(lda[mc.text_bow(key)], dtype=np.float32)
        
        s = (1, int(vecs[:,0].max()+1))
        A = np.zeros(s)

        sizeL=len(A)
        sizeC=len(A[0])
        img = (sizeL,sizeC)
        C = np.zeros(img, dtype=np.float32)
        flag = 0
        for row in vecs[:,0]:
            dim = int(row)+1
            for i in range(sizeL):
                for j in range(dim):
                    if j == dim-1:
                        C[i][j]=1
                    if temp < dim:
                        temp = dim
        vectors.append(np.hstack(C))
        klasses.append(mc.klasses.get(key))
        tag.append(key)
    except IndexError:
        q=q+1
        continue

s = (len(vectors),temp)
A = np.zeros(s)
B = np.array(vectors)
klasses = np.array(klasses)
vectors = addMatrix(A,B)
tag     = np.array(tag)

print "%s: sentenças não continham pesos LDA"%q
print vectors.shape

In [ ]:
# FOR EVERY
# SET TEST VECTOR

vecs_train, vecs_test, label_train, label_test = \
    train_test_split(vectors, klasses, test_size=0.4)

print len(vecs_train), len(vecs_test), len(vecs_train) + len(vecs_test)

In [ ]:
from sklearn import svm

abc_detector = svm.LinearSVC().fit(vectors, klasses)

In [ ]:
## FOR CONSULTING

phase = mc.text.get('SENT_15')
it = lda.id2word.doc2bow(tx for tx in phase)
tvec = np.array(lda[it]).T
tvec = tvec[1,]

print 'Class predicted:', abc_detector.predict(tvec)[0]

In [ ]:
print 'predicted:', abc_detector.predict(vectors)[4]
print 'expected:', klasses[4]

In [ ]:
all_predictions = abc_detector.predict(vectors)
print all_predictions[0:20]

In [ ]:
#CONVERT KLASSES TO BINARY
#DEFINE MCC

def multiclass_matthews_corrcoef(y_true,y_pred):
    cov_mat = np.cov(y_true,y_pred)
    mcc = cov_mat[0][1]/np.sqrt(cov_mat[0][0]*cov_mat[1][1])
    return mcc

pe = LabelEncoder()
#pe.fit(all_predictions)

le = LabelEncoder()
le.fit(klasses)

bin_klasses = le.transform(klasses)
#bin_predictions = pe.transform(all_predictions)

In [ ]:
modelo = 'Doc2Vec-DM_CBOW'
data = 'moviereview'
process = 'SemStop'

In [ ]:
import sys
temp = sys.stdout
sys.stdout = open('Logs/log.txt', 'a')
print 'Confusion Matrix '+modelo+' Model >['+data+' dataset]< --'+process+'--'
print 'accuracy', accuracy_score(klasses, all_predictions)
print 'confusion matrix\n', confusion_matrix(klasses, all_predictions)
print '(row=expected, col=predicted)'
print 'Classification Report'
print classification_report(klasses, all_predictions)
sys.stdout.close()               
sys.stdout = temp                 # restore print commands to interactive prompt

## Back to Normal
print 'accuracy', accuracy_score(klasses, all_predictions)
print 'confusion matrix\n', confusion_matrix(klasses, all_predictions)
print '(row=expected, col=predicted)'

In [ ]:
plt.matshow(confusion_matrix(klasses, all_predictions), cmap=plt.cm.binary, interpolation='nearest')
plt.title('confusion matrix')
plt.colorbar()
plt.ylabel('expected label')
plt.xlabel('predicted label')

plt.savefig('Pictures/'+modelo+'/CMatrix/ConfusionMatrix-'+data+'-'+process+'')

In [ ]:
print classification_report(klasses, all_predictions)

In [ ]:
def Hubness(sent_vec, dic_hub, percent):
    fatia = len(dic_hub)*percent/100
    pos = []
    i = 0
    
    for row in sent_vec:
        for nn in range(fatia):
            if row == dic_hub[nn][0]:
                pos.append(i)
        i=i+1
                
    return pos

import scipy.sparse as sps

def delete_rows_csr(mat, indices):
    """
    Remove the rows denoted by ``indices`` form the CSR sparse matrix ``mat``.
    """
    if not isinstance(mat, sps.csr_matrix):
        raise ValueError("works only for CSR format -- use .tocsr() first")
    indices = list(indices)
    mask = np.ones(mat.shape[0], dtype=bool)
    mask[indices] = False
    return mat[mask]

In [ ]:
import random

all_hub  = sorted(good_bad_edges.items(), key=lambda t: t[1]['all'], reverse=True)
bad_hub  = sorted(good_bad_edges.items(), key=lambda t: t[1]['bad'], reverse=True)
sort_hub = random.sample(good_bad_edges.items(), len(good_bad_edges))

graph = sort_hub

In [ ]:
# Doc2Vec-DM_CBOW
# Doc2Vec-DBOW_SKIPGRAM
modelo  = 'Doc2Vec-DBOW_SKIPGRAM'
data    = 'moviereview_total'
process = 'Com Stemming'
log     = 'Hubness_'+data+''
per     = 25
how     = '< sort Hubs > - %s '%+per+'%'

import sys
temp = sys.stdout
sys.stdout = open('Logs/'+log+'.txt', 'a')
print 'Matrix '+modelo+' Model >['+data+' dataset]< --'+process+'-- '+how+''

In [ ]:
from sklearn.neighbors import KNeighborsClassifier
from sklearn import cross_validation
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn.linear_model import SGDClassifier
from sklearn import svm
from sklearn.ensemble import AdaBoostClassifier
from sklearn.model_selection import StratifiedKFold
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import LinearSVC, SVC
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import BaggingClassifier
from sklearn.metrics import accuracy_score, make_scorer
from sklearn.metrics import f1_score
import time

X = vectors
y = bin_klasses
s = tag
var = False
skf = StratifiedKFold(n_folds=2)
skf.get_n_splits(X, y)

classifiers = {'SVM Linear' : LinearSVC(),
               #'RBF SVM' : SVC(gamma=2, C=1),
               '3-NN' : KNeighborsClassifier(n_neighbors=3),
               '5-NN' : KNeighborsClassifier(n_neighbors=5),
               'AdaBoost' : AdaBoostClassifier(),
               'Logistic' :LogisticRegression(),
               'BernoulliNB' :BernoulliNB(),
               'RF' : RandomForestClassifier(max_depth=100, max_features='auto'),
               }

for name, clf in classifiers.items():
    mccs = []
    accs = []
    f1s  = []
    
    for train_index, test_index in skf.split(X, y, s):            
        X_train, X_test = X[train_index], X[test_index]
        y_train, y_test = y[train_index], y[test_index]
        s_train, s_test = s[train_index], s[test_index]
        
        pos_train = Hubness(s_train, graph, per)
        pos_test = Hubness(s_test, graph, per)
        pos_train_rev = sorted(pos_train, reverse=True)
        pos_test_rev = sorted(pos_test, reverse=True)

        for idxi in pos_train_rev:
#            X_train = delete_rows_csr(X_train, [idxi])
            X_train = np.delete(X_train, idxi, 0)
            y_train = np.delete(y_train, idxi, 0)
            s_train = np.delete(s_train, idxi, 0)  
                        
        for idx in pos_test_rev:
#            X_test = delete_rows_csr(X_test, [idx])
            X_test = np.delete(X_test, idx, 0)
            y_test = np.delete(y_test, idx, 0)
            s_test = np.delete(s_test, idx, 0)    
        
        
        clf.fit(X_train,y_train)
        preds = clf.predict(X_test)
        mccs.append(multiclass_matthews_corrcoef(y_test,preds))
        accs.append(accuracy_score(y_test,preds))
        f1s.append(f1_score(y_test,preds,average=None))

    if len(f1s[0]) > len(f1s[1]):
        pop = len(f1s[0])-1
        f1s[0] = np.delete(f1s[0], pop)
    elif len(f1s[0]) < len(f1s[1]):
        pop = len(f1s[1])-1
        f1s[1] = np.delete(f1s[1], pop)
        
   
    print name, "Accuracy: %0.3f"% np.mean(accs)
    print name, "F1: %0.3f"% np.mean(f1s)
    print name, "MCC: %0.3f"% np.mean(mccs)
    

print '=================================================================================================='    
sys.stdout.close()               
sys.stdout = temp                 # restore print commands to interactive prompt

In [ ]: