The project has the following structure:
In [2]:
import sys, os
# sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath('../src/'))))
sys.path.append('../src/')
import graph
reload(graph)
Out[2]:
We will check if the library is imported correctly by computing a few parameters of empty graph.
In [3]:
emptyG = graph.Graph()
diam = emptyG.diameter()
gcc = emptyG.global_clustering_coefficient()
print("The diameter of empty graph: %d" % diam)
print("The global cluster coeff of empty graph is: %f" % gcc)
In [4]:
path = '../test_data/'
txt = ".txt"
filenames = ['zachary_connected', 'graph_1000n_4000m', 'graph_100n_1000m']
graphs = [] # store all three graph objects in a list
for i, g_name in enumerate(filenames):
g = graph.Graph({})
g.read_from_file(filename=path+g_name+txt)
graphs.append(g)
results = []
params = ["vertices", "edges", "density", "diameter", "clustering coef"]
print("%d, %d" % (graphs[0].number_of_vertices(), graphs[0].number_of_edges()))
print("%d, %d" % (graphs[1].number_of_vertices(), graphs[1].number_of_edges()))
print("%d, %d" % (graphs[2].number_of_vertices(), graphs[2].number_of_edges()))
In [5]:
# compute parameters
for i, G in enumerate(graphs):
temp_results = [G.number_of_vertices(), G.number_of_edges(),
G.density(), G.diameter(),
G.global_clustering_coefficient()
]
results.append(temp_results)
Now we will present the results in a table. Notice that package ipy_table is required.
In [6]:
from ipy_table import *
dictList = []
data_str = "dataset"
# convert the dictionary to a list
for i in range(len(results)+1):
if i == 0:
dictList.append([data_str] + [p for p in params])
else:
dictList.append([filenames[i-1]] + results[i-1])
# create table with make_table
make_table(dictList)
set_column_style(0, width='100', bold=True, color='hsla(225, 80%, 94%, 1)')
set_column_style(1, width='100')
# render the table
render()
Out[6]:
In [ ]: