In [ ]:
import numpy as np
import networkx as nx
import gensim
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import silhouette_score
from scipy.spatial.distance import cdist, pdist
import pickle
from collections import defaultdict
import random
import pickle
import os
%matplotlib inline
In [2]:
# Import training set
training_set = pickle.load(open("training_data.txt", "rb"))
testing_set = pickle.load(open("testing_data.txt","rb"))
In [3]:
# Create graph from training set
G = nx.DiGraph()
for edge in training_set.keys():
nodes = edge.split('-')
G.add_edge(nodes[0],nodes[1])
In [4]:
# Add only the nodes from test set to graph if not already present in generated graph
node_list_conn = G.nodes()
for edge in testing_set.keys():
nodes = edge.split('-')
for node in nodes:
if node in node_list_conn:
continue
else:
G.add_node(node)
In [5]:
# Build new edgelist node2vec can utilize for generating embeddings
nx.write_edgelist(G,'graph/train_n2v.txt')
In [ ]:
CmdStr = "python main.py --p 1 --q 0.5 --iter 200 --input graph/train_n2v.txt \
--output emb/emb_train_n2v.emb --dimensions 64"
os.system(CmdStr)
In [6]:
node_list_conn_int = sorted(map(lambda x : int(x),node_list_conn))
node_list_conn = map(lambda x : str(x),node_list_conn_int)
In [7]:
# Sanity check to make sure same number of nodes reappears in new graph generated
G.number_of_nodes()
Out[7]:
In [ ]:
In [8]:
G.has_edge('88','22504')
Out[8]:
In [ ]:
In [10]:
## Read embeddings file from and generate features
model = gensim.models.KeyedVectors.load_word2vec_format('emb/iter_20_train_n2v.emb')
In [11]:
embeddings = {}
for node in node_list_conn:
embeddings[node] = model.word_vec(node)
In [12]:
def combine_embedding(method,n_out,n_in):
if(method == 1):
#print "Implementing Simple average"
return (n_out+n_in)/2.0
elif(method == 2):
#print "Implementing Hadamard"
#print n_in,n_out
return np.multiply(n_in,n_out)
else:
print "Invalid Method. Enter 1 or 2"
return
In [21]:
# Try Hadamard first
feature = []
label = []
for edge in training_set.keys():
nodes = edge.split('-')
feature.append(combine_embedding(2,embeddings[nodes[0]],embeddings[nodes[1]]))
label.append(training_set[edge])
In [22]:
len(feature)
Out[22]:
In [23]:
G.number_of_edges()
Out[23]:
In [24]:
feature_np = np.asarray(feature)
print feature_np.shape
label_np = np.asarray(label)
print label_np.shape
In [25]:
x,residuals,rank,s = np.linalg.lstsq(feature_np,label_np)
In [26]:
def evaluate_perf(data,w,labels):
label_pred = np.dot(data,w)
print label_pred.shape
diff = np.abs(np.subtract(label_pred,labels))
return np.sum(diff)*1.0/len(labels)
In [27]:
x.shape
Out[27]:
In [28]:
error = evaluate_perf(feature_np,x,label_np)
error
Out[28]:
In [29]:
# Extract test set features
In [30]:
np.count_nonzero(label_np)
Out[30]:
In [31]:
len(label_np)
Out[31]:
In [ ]: