In [1]:
%load_ext autoreload
%autoreload 2
from eden.util import configure_logging
import logging
debug=True
configure_logging(logging.getLogger(),verbosity=1+debug)
%matplotlib inline
In [2]:
from eden.io.gspan import gspan_to_eden
from itertools import islice
def get_graphs(dataset_fname='../../toolsdata/bursi.pos.gspan', size=100):
return islice(gspan_to_eden(dataset_fname),size)
In [3]:
from graphlearn.utils import draw
import graphlearn.minor.molecule.transform_cycle as mole
import graphlearn.minor.decompose as decompose
from graphlearn.graphlearn import Sampler as GLS
from eden.graph import Vectorizer
vectorizer=Vectorizer()
# get a graph and prepare it
graphs=get_graphs()
g=graphs.next()
g=vectorizer._graph_preprocessing(g)
# why do we need a decomposer? we dont do decomp here..
#decomposer = decompose.MinorDecomposer(
# include_base=False,
# base_thickness_list=[2])
#the preprocessor makes the abstraction, wrapper provides convenient format for drawing
preproc=mole.GraphTransformerCircles()
#graph_wrapper = decomposer.make_new_decomposer(g)
#graph=graph_wrapper.pre_vectorizer_graph(nested=True)
graph= preproc.abstract(g)
# draw
draw.graphlearn(graph,size=10, abstract_color='red', contract=True,ignore_for_layout='nesting')
In [4]:
from graphlearn.utils import draw
from graphlearn.graphlearn import Sampler as GLS
from eden.graph import Vectorizer
import graphlearn.minor.molecule.transform_cycle as mole
import graphlearn.minor.decompose as decompose
transformer=mole.GraphTransformerCircles()
decomposer = decompose.MinorDecomposer()
print 'DEMONSTRATING WRAPPER'
graphs=get_graphs()
for i in range(1):
print 'grammar example %d' % i
g=graphs.next()
print g.graph
gm=decomposer.make_new_decomposer( transformer.transform([g])[0])
draw.graphlearn([gm.pre_vectorizer_graph(nested=True), gm.abstract_graph(),gm.base_graph()],
size = 15,
vertex_label = 'label',
contract=False)
print 'DEMONSTRATING EXTRACTION'
radius_list=[0,2]
thickness_list=[2,4]
base_thickness_list=[2]
argz=(gm,radius_list,thickness_list,Vectorizer(),2**20-1,lambda x,y:True, base_thickness_list)
cips=gm.all_core_interface_pairs(thickness_list=[2],radius_list=[0,1])
draw.graphlearn(cips[0][0].graph, contract=False)
draw.graphlearn(cips[0][1].graph, contract=False)
In [5]:
%%time
from graphlearn.graphlearn import Sampler as GLS
graphs = get_graphs(size=200)
sampler=GLS(#radius_list=[0,1],
#thickness_list=[1],
#min_cip_count=2,
#min_interface_count=2,
decomposer=decomposer,
graphtransformer=mole.GraphTransformerCircles())
sampler.fit(graphs)
In [6]:
#show congruent cips in grammar
draw.draw_grammar(sampler.lsgg.productions,n_productions=5,n_graphs_per_production=5,
n_graphs_per_line=5, size=9, contract=False,
colormap='Paired', invert_colormap=False,
vertex_alpha=0.6, edge_alpha=0.5, abstract_interface=True)
In [7]:
%%time
import graphlearn.utils.draw as draw
import itertools
# parameters
graphs = get_graphs()
id_start=15
id_end=id_start+9
graphs = itertools.islice(graphs,id_start,id_end)
n_steps=100
#sampling
graphs = sampler.transform(graphs)
scores=[]
ids=range(id_start,id_end)
for i,path_graphs in enumerate(graphs):
# path_graphs is a list of 'sampled' graphs for 1 run
print 'Graph id: %d'%(ids[i])
#save score
scores.append(sampler.monitors[i].sampling_info['score_history'])
# draw
draw.graphlearn(path_graphs,
n_graphs_per_line=5, size=10,
colormap='Paired', invert_colormap=False, vertex_color='color_level',
vertex_alpha=0.5, edge_alpha=0.7,edge_label='label' )
In [8]:
%matplotlib inline
from itertools import islice
import numpy as np
import matplotlib.pyplot as plt
step=1
num_graphs_per_plot=3
num_plots=np.ceil([len(scores)/num_graphs_per_plot])
for i in range(num_plots):
plt.figure(figsize=(10,5))
for j,score in enumerate(scores[i*num_graphs_per_plot:i*num_graphs_per_plot+num_graphs_per_plot]):
data = list(islice(score,None, None, step))
plt.plot(data, label='graph %d'%(j+i*num_graphs_per_plot+id_start))
plt.legend(loc='lower right')
plt.grid()
plt.ylim(-0.1,1.1)
plt.show()
In [9]:
In [ ]: