In [ ]:
# 1_network_df
import networkx as nx
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import os
from glob import glob
plt.style.use('ggplot')
pd.set_option('display.width', 5000)
pd.set_option('display.max_columns', 60)
#gml_files = glob('../output/network/Gc_negative.gml')
#gml_files = glob('../output/network/Gc_positive.gml')
gml_files = glob('../output/network/Gc_neutral.gml')
In [ ]:
def calculate_graph_inf(graph):
graph.name = filename
info = nx.info(graph)
print info
#plt.figure(figsize=(10,10))
#nx.draw_spring(graph, arrows=True, with_labels=True)
def highest_centrality(cent_dict):
"""Returns a tuple (node,value) with the node
with largest value from centrality dictionary."""
# create ordered tuple of centrality data
cent_items = [(b,a) for (a,b) in cent_dict.iteritems()]
# sort in descending order
cent_items.sort()
cent_items.reverse()
return tuple(reversed(cent_items[0]))
In [ ]:
# 2_node_df: list all nodes and centrality
data_columns = ['name',
'sentiment'
]
data = pd.DataFrame(columns = data_columns)
combined_df = pd.DataFrame()
In [ ]:
# graph = directed, ugraph = undirected
for graph_num, gml_graph in enumerate(gml_files):
graph = nx.read_gml(gml_graph)
#ugraph = graph.to_undirected() # to undirected graph
#U = graph.to_undirected(reciprocal=True)
#e = U.edges()
#ugraph.add_edges_from(e)
(filepath, filename) = os.path.split(gml_graph)
print('-' * 10)
print(gml_graph)
calculate_graph_inf(graph)
#calculate_graph_inf(ugraph)
# calculate variables and save into list
sent = "negative"
deg_cent = nx.degree_centrality(graph)
bet_cent = nx.betweenness_centrality(graph)
clo_cent = nx.closeness_centrality(graph)
graph_values = {'name':filename,
'sentiment':sent
}
data = data.append(graph_values, ignore_index=True)
degree = nx.degree(graph)
deg_df = pd.DataFrame.from_dict(degree, orient = 'index')
deg_df.columns = ['degree']
# degree centrality
deg_cent = nx.degree_centrality(graph)
dc_df = pd.DataFrame.from_dict(deg_cent, orient = 'index')
dc_df.columns = ['deg cent']
# betweenness centrality
bet_cent = nx.betweenness_centrality(graph)
bc_df = pd.DataFrame.from_dict(bet_cent, orient = 'index')
bc_df.columns = ['bet cent']
# closeness centrality
clo_cent = nx.closeness_centrality(graph)
cc_df = pd.DataFrame.from_dict(clo_cent, orient = 'index')
cc_df.columns = ['clo cent']
# concat node frames into node_df
frames = [deg_df, dc_df, bc_df, cc_df]
node_df = pd.concat(frames, axis = 1)
node_df.index.name = 'node'
node_df = node_df.reset_index()
values = pd.DataFrame(graph_values, columns = ('name', 'sentiment'), index = [0])
# df = merges graph_values with node_df for single graph and fill NaNs
df = pd.concat([values, node_df], axis = 1)
df = df.fillna(method='ffill')
combined_df = combined_df.append(df)
In [ ]:
combined_df
#combined_df.to_csv('../output/df/Gc_nodes_negative.csv')
#combined_df.to_csv('../output/df/Gc_nodes_positive.csv')
combined_df.to_csv('../output/df/Gc_nodes_neutral.csv')
In [ ]:
# 7_graph_calculation
def drawIt(graph, what = 'graph'):
nsize = graph.number_of_nodes()
print "Drawing %s of size %s:" % (what, nsize)
if nsize > 20:
plt.figure(figsize=(10, 10))
if nsize > 40:
nx.draw_spring(graph, with_labels = True, node_size = 70, font_size = 12)
else:
nx.draw_spring(graph, with_labels = True)
else:
nx.draw_spring(graph, with_labels = True)
plt.show()
def describeGraph(graph):
components = sorted(nx.connected_components(graph), key = len, reverse = True)
cc = [len(c) for c in components]
subgraphs = list(nx.connected_component_subgraphs(graph))
params = (graph.number_of_edges(),graph.number_of_nodes(),len(cc))
print "Graph has %s nodes, %s edges, %s connected components\n" % params
drawIt(graph)
for sub in components:
drawIt(graph.subgraph(sub), what = 'component')
# list of connected components by size
connected_components = [len(c) for c in sorted(nx.connected_components(ugraph), key=len, reverse=True)]
# generate connected components as subgraphs
subgraphs = list(nx.connected_component_subgraphs(ugraph))
# greatest component
Gc = max(nx.connected_component_subgraphs(ugraph), key=len)
In [ ]:
nx.write_gml(Gc, "../output/network/Gc_negative.gml")
#nx.write_gml(Gc, "../output/network/Gc_positive.gml")
#nx.write_gml(Gc, "../output/network/Gc_neutral.gml")
In [ ]:
Gc_files = glob('../output/network/Gc_negative.gml')
#Gc_files = glob('../output/network/Gc_positive.gml')
#Gc_files = glob('../output/network/Gc_neutral.gml')
network_data_columns = ['name',
'sentiment',
'# nodes',
'# edges',
#'avg deg',
'density',
'deg assort coef',
'avg deg cent',
'avg bet cent',
'avg clo cent',
'high deg cent',
'high bet cent',
'high clo cent',
'avg node conn',
'# conn comp',
'gc size'
]
network_data = pd.DataFrame(columns = network_data_columns)
In [ ]:
### don't run... trying to make a function... ###
def calculate(graph):
graph = nx.read_gml(graph)
(filepath, filename) = os.path.split(gml_graph)
print('-' * 10)
print(gml_graph)
calculate_graph_inf(graph)
# calculate variables
sent = " "
nodes = nx.number_of_nodes(graph)
edges = nx.number_of_edges(graph)
density = float("{0:.4f}".format(nx.density(graph)))
avg_deg_cen = np.array(nx.degree_centrality(graph).values()).mean()
avg_bet_cen = np.array(nx.betweenness_centrality(graph).values()).mean()
avg_clo_cen = np.array(nx.closeness_centrality(graph).values()).mean()
#in_deg = sum(graph.in_degree().values())/float(nx.number_of_nodes(graph))
#out_deg = sum(graph.out_degree().values())/float(nx.number_of_nodes(graph))
#avg_deg = float("{0:.4f}".format(in_deg + out_deg))
#strong_comp = nx.number_strongly_connected_components(graph)
#weak_comp = nx.number_weakly_connected_components(graph)
avg_node_con = float("{0:.4f}".format((nx.average_node_connectivity(graph))))
deg_assort_coeff = float("{0:.4f}".format((nx.degree_assortativity_coefficient(graph))))
conn_comp = nx.number_connected_components(graph) # ugraph
deg_cen = nx.degree_centrality(graph)
bet_cen = nx.betweenness_centrality(graph)
clo_cen = nx.closeness_centrality(graph)
highest_deg_cen = highest_centrality(deg_cen)
highest_bet_cen = highest_centrality(bet_cen)
highest_clo_cen = highest_centrality(clo_cen)
Gc = len(max(nx.connected_component_subgraphs(graph), key=len))
# save variables into list
graph_values = {'name':filename,
'sentiment':sent,
'# nodes':nodes,
'# edges':edges,
#'avg deg':avg_deg,
'density':density,
'deg assort coef':deg_assort_coeff,
'avg deg cent':"%.4f" % avg_deg_cen,
'avg bet cent':"%.4f" % avg_bet_cen,
'avg clo cent':"%.4f" % avg_clo_cen,
'high deg cent':highest_deg_cen,
'high bet cent':highest_bet_cen,
'high clo cent':highest_clo_cen,
'avg node conn':avg_node_con,
'# conn comp':conn_comp,
'gc size':Gc
}
network_data = network_data.append(graph_values, ignore_index=True)
###
In [ ]:
# Gc_files
for graph_num, gml_graph in enumerate(Gc_files):
graph = nx.read_gml(gml_graph)
(filepath, filename) = os.path.split(gml_graph)
print('-' * 10)
print(gml_graph)
calculate_graph_inf(graph)
# calculate variables
sent = "negative"
nodes = nx.number_of_nodes(graph)
edges = nx.number_of_edges(graph)
density = float("{0:.4f}".format(nx.density(graph)))
avg_deg_cen = np.array(nx.degree_centrality(graph).values()).mean()
avg_bet_cen = np.array(nx.betweenness_centrality(graph).values()).mean()
avg_clo_cen = np.array(nx.closeness_centrality(graph).values()).mean()
#avg_deg = float("{0:.4f}".format(in_deg + out_deg))
avg_node_con = float("{0:.4f}".format((nx.average_node_connectivity(graph))))
deg_assort_coeff = float("{0:.4f}".format((nx.degree_assortativity_coefficient(graph))))
conn_comp = nx.number_connected_components(graph) # ugraph
deg_cen = nx.degree_centrality(graph)
bet_cen = nx.betweenness_centrality(graph)
clo_cen = nx.closeness_centrality(graph)
highest_deg_cen = highest_centrality(deg_cen)
highest_bet_cen = highest_centrality(bet_cen)
highest_clo_cen = highest_centrality(clo_cen)
Gc = len(max(nx.connected_component_subgraphs(graph), key=len))
# save variables into list
graph_values = {'name':filename,
'sentiment':sent,
'# nodes':nodes,
'# edges':edges,
#'avg deg':avg_deg,
'density':density,
'deg assort coef':deg_assort_coeff,
'avg deg cent':"%.4f" % avg_deg_cen,
'avg bet cent':"%.4f" % avg_bet_cen,
'avg clo cent':"%.4f" % avg_clo_cen,
'high deg cent':highest_deg_cen,
'high bet cent':highest_bet_cen,
'high clo cent':highest_clo_cen,
'avg node conn':avg_node_con,
'# conn comp':conn_comp,
'gc size':Gc
}
network_data = network_data.append(graph_values, ignore_index=True)
In [ ]:
network_data
network_data.to_csv('../output/df/Gc_df_negative.csv', encoding = 'utf-8')
#network_data.to_csv('../output/df/Gc_df_positive.csv', encoding = 'utf-8')
#network_data.to_csv('../output/df/Gc_df_neutral.csv', encoding = 'utf-8')
In [ ]:
# returns all minimum k cutsets of an undirected graph
# i.e., the set(s) of nodes of cardinality equal to the node connectivity of G
# thus if removed, would break G into two or more connected components
cutsets = list(nx.all_node_cuts(Gc))
print "Connected components =", connected_components
print "Greatest component size =", len(Gc)
print "# of cutsets =", len(cutsets)
# returns a set of nodes or edges of minimum cardinality that disconnects G
min_ncut = nx.minimum_node_cut(Gc)
min_ecut = nx.minimum_edge_cut(Gc)
print "Min node cut =", min_ncut
print "Min edge cut =", min_ecut
# min cuts with source and target
print nx.minimum_node_cut(Gc, s='vaccines', t='autism')
print nx.minimum_edge_cut(Gc, s='vaccines', t='autism')
In [ ]:
# read edge labels in min cut for Gc
# change source and target
a = nx.minimum_edge_cut(Gc, s='vaccines', t='autism')
#a = nx.minimum_edge_cut(Gc)
labels = nx.get_edge_attributes(Gc,'edge')
edgelabels = {}
for e in labels.keys():
e1 = e[0:2]
edgelabels[e1]=labels[e]
for e in a:
if edgelabels.has_key(e):
print e,edgelabels[e]
else:
rev_e = e[::-1]
print rev_e, edgelabels[rev_e]
In [ ]:
# degree centrality
dc = nx.degree_centrality(graph)
dc_df = pd.DataFrame.from_dict(dc, orient = 'index')
dc_df.columns = ['degree cent']
dc_df = dc_df.sort_values(by = ['degree cent'])
#dc_df
# betweenness centrality
bc = nx.betweenness_centrality(graph)
bc_df = pd.DataFrame.from_dict(bc, orient = 'index')
bc_df.columns = ['betweenness cent']
bc_df = bc_df.sort_values(by = ['betweenness cent'])
#bc_df
# closeness centrality
cc = nx.closeness_centrality(graph)
cc_df = pd.DataFrame.from_dict(cc, orient = 'index')
cc_df.columns = ['closeness cent']
cc_df = cc_df.sort_values(by = ['closeness cent'])
#cc_df