In [1]:
%load_ext autoreload
%autoreload 2

In [2]:
from wb import WeightsAndBiases
from sklearn.preprocessing import LabelBinarizer
from random import sample, choice
from fingerprint_vect import GraphFingerprint
from collections import defaultdict
from autograd import grad
from autograd.scipy.misc import logsumexp

import autograd.numpy as np
import networkx as nx
import math

In [3]:
def make_random_graph(nodes, n_edges, features_dict):
    """
    Makes a randomly connected graph. 
    """
    
    G = nx.Graph()
    for n in nodes:
        G.add_node(n, features=features_dict[n])
    
    for i in range(n_edges):
        u, v = sample(G.nodes(), 2)
        G.add_edge(u, v)
        
    return G

In [4]:
# features_dict will look like this:
# {0: array([1, 0, 0, 0, 0, 0, 0, 0, 0, 0]),
#  1: array([0, 1, 0, 0, 0, 0, 0, 0, 0, 0]),
#  2: array([0, 0, 1, 0, 0, 0, 0, 0, 0, 0]),
#  3: array([0, 0, 0, 1, 0, 0, 0, 0, 0, 0]),
#  4: array([0, 0, 0, 0, 1, 0, 0, 0, 0, 0]),
#  5: array([0, 0, 0, 0, 0, 1, 0, 0, 0, 0]),
#  6: array([0, 0, 0, 0, 0, 0, 1, 0, 0, 0]),
#  7: array([0, 0, 0, 0, 0, 0, 0, 1, 0, 0]),
#  8: array([0, 0, 0, 0, 0, 0, 0, 0, 1, 0]),
#  9: array([0, 0, 0, 0, 0, 0, 0, 0, 0, 1])}

all_nodes = [i for i in range(10)]    
lb = LabelBinarizer()
features_dict = {i:lb.fit_transform(all_nodes)[i] for i in all_nodes}

G = make_random_graph(sample(all_nodes, 6), 5, features_dict)
G.edges(data=True)
# G.nodes(data=True)


Out[4]:
[(1, 2, {}), (3, 4, {}), (3, 6, {}), (6, 8, {})]

In [5]:
wb = WeightsAndBiases(n_layers=2, shapes=(10, 20, 10))
# wb

In [6]:
# def score(G):
#     """
#     The regressable score for each graph will be the sum of the 
#     (square root of each node + the sum of its neighbors.)
#     """
#     sum_score = 0
#     for n, d in G.nodes(data=True):
#         sum_score += math.sqrt(n)
        
#         for nbr in G.neighbors(n):
#             sum_score += nbr ** (1/3)
#     return sum_score

def score(G):
    """
    The regressable score for each graph is the number of nodes
    in the graph.
    """
    return len(G.nodes())

score(G)


Out[6]:
6

In [7]:
G.nodes(data=True)[0][1]['features'].shape


Out[7]:
(10,)

In [8]:
def softmax(X, axis=0):
    """
    The softmax function normalizes everything to between 0 and 1.
    """
    return np.exp(X - logsumexp(X, axis=axis, keepdims=True))

# test softmax:
X = np.random.random((1,10))
softmax(X, axis=1)


Out[8]:
array([[ 0.10590253,  0.07747197,  0.06798611,  0.08309748,  0.14523769,
         0.10324535,  0.09085766,  0.11327375,  0.14492305,  0.06800441]])

In [9]:
def relu(X):
    """
    The ReLU - Rectified Linear Unit.
    """
    return X * (X > 0)


# test relu:
X = np.random.normal(0, 1, size=(5, 1))
print(X)
print('')
print(relu(X))


[[ 0.09574597]
 [-0.05016848]
 [ 0.18500304]
 [-1.10497981]
 [ 0.27769682]]

[[ 0.09574597]
 [-0.        ]
 [ 0.18500304]
 [-0.        ]
 [ 0.27769682]]

In [45]:
# Make 1000 random graphs.
syngraphs = []
for i in range(20):
    n_nodes = choice([i for i in range(2, 10)])
    n_edges = choice([i for i in range(1, n_nodes**2)])
    
    G = make_random_graph(sample(all_nodes, n_nodes), n_edges, features_dict)
    syngraphs.append(G)
    
len(syngraphs)


Out[45]:
20

In [31]:
# Write a function that computes the feature matrix, and writes the
# indices to the nodes of each graph.
def stacked_node_activations(graphs):
    """
    Note: this function should only be called for computing the
    stacked node activations after initializing the graphs.
    
    Inputs:
    =======
    - graphs: (list) a list of graphs on which to stack their
              feature vectors.
    """
    features = []
    curr_idx = 0
    for g in graphs:
        for n, d in g.nodes(data=True):
            features.append(d['features'])
            g.node[n]['idx'] = curr_idx
            curr_idx += 1
    return np.vstack(features)

# test stacked_node_activations
layers = dict()
layers[0] = stacked_node_activations(syngraphs)
layers[1] = stacked_node_activations(syngraphs)
# layers[1]

In [32]:
# Write a function that gets the indices of each node's neighbors.
def neighbor_indices(G, n):
    """
    Inputs:
    =======
    - G: the graph to which the node belongs to.
    - n: the node inside the graph G.
    
    Returns:
    ========
    - indices: (list) a list of indices, which should (but is not
               guaranteed to) correspond to a row in a large 
               stacked matrix of features.
    """
    indices = []
    for n in G.neighbors(n):
        indices.append(G.node[n]['idx'])
    return indices


# test neighbor_indices
nbr_idxs = neighbor_indices(syngraphs[0], syngraphs[0].nodes()[0])
nbr_idxs


Out[32]:
[1, 2, 3, 4, 5, 6]

In [33]:
# Write a function that sums each of the neighbors' activations for a
# given node in a given graph.
def neighbor_activations(G, n, activations_dict, layer):
    """
    Inputs:
    =======
    - G: the graph to which the node belongs to.
    - n: the node inside the graph G
    - activations_dict: a dictionary that stores the node activations 
                        at each layer.
    - layer: the layer at which to compute neighbor activations.
    """
    nbr_indices = neighbor_indices(G, n)
    return np.sum(activations_dict[layer][nbr_indices], axis=0)

neighbor_activations(syngraphs[0], syngraphs[0].nodes()[0], layers, 0)


Out[33]:
array([0, 1, 1, 1, 1, 1, 0, 1, 0, 0])

In [34]:
# Write a function that stacks each of the nodes' neighbors
# activations together into a large feature matrix.

def stacked_neighbor_activations(graphs, activations_dict, layer):
    """
    Inputs:
    =======
    - graphs: (list) a list of NetworkX graphs.
    - activations_dict: (dict) a dictionary where keys are the layer
                        number and values are the node activations.
    
    Returns:
    ========
    - a stacked numpy array of neighbor activations
    """
    nbr_activations = []
    for g in graphs:
        for n in g.nodes():
            nbr_acts = neighbor_activations(g, n, activations_dict, layer)
            nbr_activations.append(nbr_acts)
    return np.vstack(nbr_activations)

# stacked_neighbor_activations(syngraphs, layers, 1)

In [46]:
# Write a function that computes the next layers' activations.

def activation(activations_dict, wb, layer, graphs):
    """
    Inputs:
    =======
    - activations_dict: (dict) a dictionary where keys are the layer
                        number and values are the node activations.
    - wb: (wb.WeightsAndBiases) the WB class storing the weights and
          biases.
    - layer: (int) the layer for which to compute the activations.    
    
    Returns:
    ========
    - a stacked numpy array of activations, which can be assigned to
      the activations_dict's next layer if desired (actually it
      should be).
    """
    
    self_acts = activations_dict[layer]
    self_acts = np.dot(self_acts, wb[layer]['self_weights'])

    nbr_acts = stacked_neighbor_activations(graphs, activations_dict, layer)
    nbr_acts = np.dot(nbr_acts, wb[layer]['nbr_weights'])
    
    biases = wb[layer]['biases']
    
    return relu(self_acts + nbr_acts + biases)

print(activation(layers, wb, 0, syngraphs))


---------------------------------------------------------------------------
KeyError                                  Traceback (most recent call last)
<ipython-input-46-f98471cd7c40> in <module>()
     28     return relu(self_acts + nbr_acts + biases)
     29 
---> 30 print(activation(layers, wb, 0, syngraphs))

<ipython-input-46-f98471cd7c40> in activation(activations_dict, wb, layer, graphs)
     21     self_acts = np.dot(self_acts, wb[layer]['self_weights'])
     22 
---> 23     nbr_acts = stacked_neighbor_activations(graphs, activations_dict, layer)
     24     nbr_acts = np.dot(nbr_acts, wb[layer]['nbr_weights'])
     25 

<ipython-input-34-4bea9da03592> in stacked_neighbor_activations(graphs, activations_dict, layer)
     17     for g in graphs:
     18         for n in g.nodes():
---> 19             nbr_acts = neighbor_activations(g, n, activations_dict, layer)
     20             nbr_activations.append(nbr_acts)
     21     return np.vstack(nbr_activations)

<ipython-input-33-677f377e9d0e> in neighbor_activations(G, n, activations_dict, layer)
     11     - layer: the layer at which to compute neighbor activations.
     12     """
---> 13     nbr_indices = neighbor_indices(G, n)
     14     return np.sum(activations_dict[layer][nbr_indices], axis=0)
     15 

<ipython-input-32-78637bf466b0> in neighbor_indices(G, n)
     15     indices = []
     16     for n in G.neighbors(n):
---> 17         indices.append(G.node[n]['idx'])
     18     return indices
     19 

KeyError: 'idx'

In [36]:
act = np.dot(stacked_neighbor_activations(syngraphs, layers, 0), wb[0]['nbr_weights']) + wb[0]['biases']
act.shape


Out[36]:
(118, 20)

In [37]:
# Write a function that gets the indices of all of the nodes in the
# graph.
def graph_indices(g):
    """
    Returns the row indices of each of the nodes in the graphs.
    """
    return [d['idx'] for _, d in g.nodes(data=True)]

In [38]:
# Write a function that makes the fingerprint used for predictions.
def fingerprint(activations_dict, graphs):
    """
    Computes the final layer fingerprint for each graph.
    
    Inputs:
    =======
    - activations_dict: (dict) a dictionary where keys are the layer
                        number and values are the node activations.
    - graphs: a list of graphs for which to compute the fingerprints.
    
    Returns:
    ========
    - a stacked numpy array of fingerprints, of length len(graphs).
    """
    top_layer = max(activations_dict.keys())
    fingerprints = []
    for g in graphs:
        idxs = graph_indices(g)
        fp = np.sum(activations_dict[top_layer][idxs], axis=0)
        fingerprints.append(fp)
    
    return relu(np.vstack(fingerprints))

# test fingerprint function
fingerprint(layers, syngraphs)


Out[38]:
array([[1, 1, 1, 1, 1, 1, 0, 1, 0, 0],
       [1, 0, 1, 1, 1, 1, 1, 1, 1, 1],
       [1, 0, 0, 0, 1, 0, 0, 0, 1, 0],
       [1, 1, 1, 1, 0, 1, 1, 0, 1, 0],
       [0, 0, 0, 1, 0, 0, 0, 1, 1, 1],
       [0, 0, 1, 1, 1, 0, 1, 1, 1, 1],
       [0, 0, 1, 0, 1, 0, 1, 1, 0, 0],
       [0, 1, 1, 1, 0, 1, 1, 1, 0, 1],
       [0, 0, 0, 0, 0, 1, 0, 0, 1, 0],
       [0, 0, 0, 0, 0, 0, 0, 1, 1, 0],
       [1, 1, 0, 0, 0, 1, 0, 1, 0, 1],
       [0, 0, 0, 1, 0, 1, 0, 1, 0, 0],
       [1, 1, 1, 0, 1, 1, 1, 0, 1, 1],
       [1, 1, 1, 0, 1, 1, 0, 1, 1, 1],
       [1, 1, 1, 1, 1, 1, 1, 0, 1, 1],
       [0, 1, 0, 0, 0, 1, 0, 1, 0, 1],
       [1, 1, 1, 0, 0, 1, 0, 1, 1, 0],
       [1, 1, 0, 1, 0, 0, 1, 1, 1, 1],
       [0, 1, 1, 1, 0, 1, 1, 1, 1, 1],
       [1, 1, 1, 0, 1, 1, 1, 1, 1, 0]])

In [39]:
# Write a function that makes the forward pass predictions.
def predict(wb_vect, wb_unflattener, activations_dict, graphs):
    """
    Makes predictions.
    
    Change this function for each new learning problem.
    
    Inputs:
    =======
    - wb_vect: (WeightsAndBiases.vect)
    - wb_unfalttener (WeightsAndBiases.unflattener)
    - activations_dict: (dict) a dictionary where keys are the layer
                        number and values are the node activations.
    - graphs: a list of graphs for which to compute the fingerprints.
    
    Returns:
    ========
    - a numpy array of predictions, of length len(graphs).
    """
    
    wb = wb_unflattener(wb_vect)
    for k in sorted(wb.keys()):
        activations_dict[k + 1] = activation(activations_dict, wb, k, graphs)
        # print(activations_dict[k])
    
    top_layer = max(wb.keys())
    
    fps = fingerprint(layers, graphs)
    
    return np.dot(fps, wb[top_layer]['linweights'])

predict(*wb.flattened(), layers, syngraphs)


Out[39]:
array([[-0.4140944 ],
       [-0.80994935],
       [-0.05438736],
       [-0.08457893],
       [-0.03971847],
       [-0.35550169],
       [-0.15989237],
       [-0.3462132 ],
       [-0.02066551],
       [-0.02535193],
       [-0.06567089],
       [-0.03222333],
       [-0.22609366],
       [-0.44758711],
       [-0.18338839],
       [-0.04373497],
       [-0.18453728],
       [-0.33261526],
       [-0.21751378],
       [-0.33543044]])

In [40]:
# Write a function that computes the training loss.
def train_loss(wb_vect, wb_unflattener, activations_dict, graphs):
    """
    Computes the training loss as mean squared error.
    
    Inputs:
    =======
    - wb_vect: (WeightsAndBiases.vect)
    - wb_unfalttener (WeightsAndBiases.unflattener)
    - activations_dict: (dict) a dictionary where keys are the layer
                        number and values are the node activations.
    - graphs: a list of graphs for which to compute the fingerprints.

    Returns:
    ========
    - mean squared error.
    """
    
    scores = np.array([score(g) for g in graphs]).reshape((len(graphs), 1))
    # print(scores)
    preds = predict(wb_vect, wb_unflattener, activations_dict, graphs)
    # print(preds)
    return np.sum(np.power(preds - scores, 2)) / len(scores)

train_loss(wb.vect, wb.unflattener, layers, syngraphs)


Out[40]:
0.77174070288183994

In [41]:
gradfunc = grad(train_loss, argnum=0)
gradfunc(wb.vect, wb.unflattener, layers, syngraphs)


Out[41]:
array([  0.00000000e+00,   4.36545916e-02,   3.01206013e-02, ...,
        -2.99765891e-05,   6.78054181e-02,   0.00000000e+00])

In [42]:
def sgd(grad, wb_vect, wb_unflattener, activations_dict, graphs, callback=None, num_iters=200, step_size=0.1, mass=0.9):
    """
    Stochastic gradient descent with momentum.
    """
    from time import time
    velocity = np.zeros(len(wb_vect))
    for i in range(num_iters):
        start = time()
        print(i)
        g = grad(wb_vect, wb_unflattener, activations_dict, graphs)

        velocity = mass * velocity - (1.0 - mass) * g
        wb_vect += step_size * velocity
        # print(wb_vect)
        print(train_loss(wb_vect, wb_unflattener, activations_dict, graphs))
        end = time()
        print('Epoch Time: {0}'.format(end - start))
    return wb_vect, wb_unflattener

wb_vect, wb_unflattener = sgd(gradfunc, wb.vect, wb.unflattener, layers, syngraphs, num_iters=200, step_size=0.001)


0
0.67276760589
Epoch Time: 0.15809106826782227
1
0.519709997502
Epoch Time: 0.17552399635314941
2
0.365243994256
Epoch Time: 0.14338183403015137
3
0.249816520583
Epoch Time: 0.15077614784240723
4
0.189969966201
Epoch Time: 0.1405642032623291
5
0.179175168473
Epoch Time: 0.2888529300689697
6
0.202457764917
Epoch Time: 0.25779199600219727
7
0.240191970356
Epoch Time: 0.16279006004333496
8
0.275522609796
Epoch Time: 0.17266297340393066
9
0.296764999479
Epoch Time: 0.23338699340820312
10
0.298641869262
Epoch Time: 0.14048004150390625
11
0.281555614684
Epoch Time: 0.14654302597045898
12
0.250107867914
Epoch Time: 0.1505289077758789
13
0.211169974916
Epoch Time: 0.19708895683288574
14
0.171896379056
Epoch Time: 0.2993190288543701
15
0.1382448387
Epoch Time: 0.1716620922088623
16
0.113899536158
Epoch Time: 0.1766970157623291
17
0.100432243778
Epoch Time: 0.12554121017456055
18
0.0965960794042
Epoch Time: 0.16973400115966797
19
0.100463748826
Epoch Time: 0.12432217597961426
20
0.108049144955
Epoch Time: 0.0913701057434082
21
0.11540884078
Epoch Time: 0.09949517250061035
22
0.119570988513
Epoch Time: 0.08339095115661621
23
0.119011475604
Epoch Time: 0.10935187339782715
24
0.113759002025
Epoch Time: 0.08563804626464844
25
0.104929668427
Epoch Time: 0.09794497489929199
26
0.0943072461764
Epoch Time: 0.1364271640777588
27
0.0837504001197
Epoch Time: 0.11782717704772949
28
0.0747370492615
Epoch Time: 0.19859099388122559
29
0.0681957034625
Epoch Time: 0.09680795669555664
30
0.0642548247157
Epoch Time: 0.09877705574035645
31
0.0625307745733
Epoch Time: 0.1034848690032959
32
0.0623600439961
Epoch Time: 0.07760405540466309
33
0.0629591866183
Epoch Time: 0.10930895805358887
34
0.0636119086017
Epoch Time: 0.09763002395629883
35
0.0637714366822
Epoch Time: 0.08770084381103516
36
0.0631239511907
Epoch Time: 0.08395099639892578
37
0.0616098561329
Epoch Time: 0.13645005226135254
38
0.059354392362
Epoch Time: 0.1989579200744629
39
0.0566002354098
Epoch Time: 0.12883400917053223
40
0.0536422459885
Epoch Time: 0.16208505630493164
41
0.0507828101667
Epoch Time: 0.11619997024536133
42
0.0482654779948
Epoch Time: 0.09464287757873535
43
0.0462262510754
Epoch Time: 0.08729696273803711
44
0.0447086863827
Epoch Time: 0.11520695686340332
45
0.043665500671
Epoch Time: 0.08374595642089844
46
0.0428872137519
Epoch Time: 0.08515405654907227
47
0.0422829034787
Epoch Time: 0.15094804763793945
48
0.0417895022015
Epoch Time: 0.2170419692993164
49
0.0412820874714
Epoch Time: 0.12810897827148438
50
0.0406590542302
Epoch Time: 0.20282387733459473
51
0.0398781433166
Epoch Time: 0.141035795211792
52
0.0389411456534
Epoch Time: 0.10340404510498047
53
0.0378894612559
Epoch Time: 0.12001299858093262
54
0.0367838212329
Epoch Time: 0.0860891342163086
55
0.0356907861948
Epoch Time: 0.17188310623168945
56
0.0346632350332
Epoch Time: 0.18347811698913574
57
0.0337394791134
Epoch Time: 0.20557188987731934
58
0.0329363442973
Epoch Time: 0.2087240219116211
59
0.0322500644289
Epoch Time: 0.14825010299682617
60
0.0316617086732
Epoch Time: 0.12659907341003418
61
0.0311431121684
Epoch Time: 0.15209507942199707
62
0.0306634252321
Epoch Time: 0.21123409271240234
63
0.0301948375653
Epoch Time: 0.14742088317871094
64
0.0297165677404
Epoch Time: 0.11933302879333496
65
0.0292176307625
Epoch Time: 0.08783602714538574
66
0.0286955813619
Epoch Time: 0.08727002143859863
67
0.0281556101859
Epoch Time: 0.12844514846801758
68
0.0276078313723
Epoch Time: 0.3142120838165283
69
0.0270644669798
Epoch Time: 0.19463491439819336
70
0.0265370905064
Epoch Time: 0.16424918174743652
71
0.0260040235816
Epoch Time: 0.17171096801757812
72
0.025495756376
Epoch Time: 0.12290501594543457
73
0.0250200682179
Epoch Time: 0.13110113143920898
74
0.0245763291716
Epoch Time: 0.09866094589233398
75
0.0241604433213
Epoch Time: 0.1123499870300293
76
0.0237661477999
Epoch Time: 0.1517031192779541
77
0.0233864873758
Epoch Time: 0.29996204376220703
78
0.023015121729
Epoch Time: 0.1734180450439453
79
0.0226472767675
Epoch Time: 0.20694899559020996
80
0.0222802319695
Epoch Time: 0.23494315147399902
81
0.0219132577066
Epoch Time: 0.17871308326721191
82
0.0215474817406
Epoch Time: 0.1515040397644043
83
0.0211852015127
Epoch Time: 0.1388230323791504
84
0.0208297055611
Epoch Time: 0.09818387031555176
85
0.0204868726028
Epoch Time: 0.11666607856750488
86
0.0201548669972
Epoch Time: 0.09231090545654297
87
0.0198352321883
Epoch Time: 0.19968199729919434
88
0.0195276137343
Epoch Time: 0.1350541114807129
89
0.0192313974454
Epoch Time: 0.12230086326599121
90
0.0189483664657
Epoch Time: 0.13256287574768066
91
0.0186759696027
Epoch Time: 0.14299392700195312
92
0.0184108064028
Epoch Time: 0.12851405143737793
93
0.0181515451804
Epoch Time: 0.11595797538757324
94
0.0178972320437
Epoch Time: 0.11960005760192871
95
0.0176473296043
Epoch Time: 0.10412788391113281
96
0.017401674398
Epoch Time: 0.0810401439666748
97
0.0171604142699
Epoch Time: 0.24204802513122559
98
0.0169236301451
Epoch Time: 0.20845389366149902
99
0.0166915759697
Epoch Time: 0.17960882186889648
100
0.0164637186594
Epoch Time: 0.16512084007263184
101
0.0162402521173
Epoch Time: 0.11943697929382324
102
0.0160222077873
Epoch Time: 0.16980290412902832
103
0.0158094729559
Epoch Time: 0.1951150894165039
104
0.015601803286
Epoch Time: 0.15648889541625977
105
0.015398870775
Epoch Time: 0.08746099472045898
106
0.0152003167325
Epoch Time: 0.0904397964477539
107
0.0150068775653
Epoch Time: 0.17812013626098633
108
0.0148180157911
Epoch Time: 0.1676790714263916
109
0.0146333064458
Epoch Time: 0.16890192031860352
110
0.0144521633072
Epoch Time: 0.1773219108581543
111
0.0142745373198
Epoch Time: 0.17998600006103516
112
0.0141003508494
Epoch Time: 0.26571106910705566
113
0.0139295884792
Epoch Time: 0.1656949520111084
114
0.0137622265502
Epoch Time: 0.203322172164917
115
0.0135982572865
Epoch Time: 0.19545817375183105
116
0.0134376544254
Epoch Time: 0.14963293075561523
117
0.0132803642991
Epoch Time: 0.3542819023132324
118
0.0131263070962
Epoch Time: 0.14770293235778809
119
0.0129753841194
Epoch Time: 0.2656288146972656
120
0.012827264676
Epoch Time: 0.18648099899291992
121
0.012681817288
Epoch Time: 0.17218303680419922
122
0.0125391113949
Epoch Time: 0.14994096755981445
123
0.0123990425518
Epoch Time: 0.15392088890075684
124
0.0122615219394
Epoch Time: 0.14308404922485352
125
0.0121264773663
Epoch Time: 0.2529280185699463
126
0.0119938516953
Epoch Time: 0.37139391899108887
127
0.0118635994188
Epoch Time: 0.1851511001586914
128
0.0117356776818
Epoch Time: 0.24257206916809082
129
0.0116100537828
Epoch Time: 0.3074629306793213
130
0.0114865240618
Epoch Time: 0.20826411247253418
131
0.0113651039806
Epoch Time: 0.16225004196166992
132
0.0112460463196
Epoch Time: 0.17823004722595215
133
0.0111293234149
Epoch Time: 0.21312212944030762
134
0.0110146806006
Epoch Time: 0.17337298393249512
135
0.0109020623867
Epoch Time: 0.17001104354858398
136
0.0107914138018
Epoch Time: 0.2066490650177002
137
0.010682679669
Epoch Time: 0.12774896621704102
138
0.0105765165672
Epoch Time: 0.1224219799041748
139
0.010474609562
Epoch Time: 0.10490989685058594
140
0.0103748897019
Epoch Time: 0.12877893447875977
141
0.0102769865794
Epoch Time: 0.11943411827087402
142
0.0101808378368
Epoch Time: 0.1585099697113037
143
0.0100863973595
Epoch Time: 0.16044306755065918
144
0.00999361697477
Epoch Time: 0.14638590812683105
145
0.00990245177087
Epoch Time: 0.16121196746826172
146
0.00981285940282
Epoch Time: 0.2007291316986084
147
0.00972479947817
Epoch Time: 0.16707801818847656
148
0.00963823306817
Epoch Time: 0.12162494659423828
149
0.00955312237853
Epoch Time: 0.1602339744567871
150
0.00946943057904
Epoch Time: 0.15663480758666992
151
0.00938681338686
Epoch Time: 0.1436450481414795
152
0.00930547965715
Epoch Time: 0.10521912574768066
153
0.00922531599515
Epoch Time: 0.12975788116455078
154
0.00914637099674
Epoch Time: 0.12823891639709473
155
0.0090686376313
Epoch Time: 0.13267898559570312
156
0.00899209132262
Epoch Time: 0.21919679641723633
157
0.00891670866121
Epoch Time: 0.12835407257080078
158
0.00884246721179
Epoch Time: 0.2546579837799072
159
0.00876934528852
Epoch Time: 0.14373016357421875
160
0.00869743666442
Epoch Time: 0.15531301498413086
161
0.00862766946125
Epoch Time: 0.17506098747253418
162
0.0085589927266
Epoch Time: 0.16774988174438477
163
0.00849142092924
Epoch Time: 0.15909695625305176
164
0.00842497603245
Epoch Time: 0.1340959072113037
165
0.00835954177823
Epoch Time: 0.17125296592712402
166
0.00829509295534
Epoch Time: 0.23639392852783203
167
0.00823160513496
Epoch Time: 0.1316661834716797
168
0.00816905497542
Epoch Time: 0.14069509506225586
169
0.00810742065991
Epoch Time: 0.14436602592468262
170
0.00804667973131
Epoch Time: 0.1110529899597168
171
0.00798681176122
Epoch Time: 0.11226511001586914
172
0.00792779736529
Epoch Time: 0.09699201583862305
173
0.00786961812812
Epoch Time: 0.1319270133972168
174
0.00781225649708
Epoch Time: 0.1273939609527588
175
0.0077556956612
Epoch Time: 0.26929688453674316
176
0.00769991943115
Epoch Time: 0.15114212036132812
177
0.00764491213349
Epoch Time: 0.15407705307006836
178
0.00759065852826
Epoch Time: 0.12313389778137207
179
0.00753714375365
Epoch Time: 0.12388396263122559
180
0.00748435329713
Epoch Time: 0.13428401947021484
181
0.00743227298849
Epoch Time: 0.12202000617980957
182
0.00738088900813
Epoch Time: 0.1233060359954834
183
0.00733019080919
Epoch Time: 0.10522317886352539
184
0.00728017322975
Epoch Time: 0.11958694458007812
185
0.00723081388635
Epoch Time: 0.1788480281829834
186
0.00718210026439
Epoch Time: 0.15226411819458008
187
0.0071340202557
Epoch Time: 0.1587531566619873
188
0.00708656219322
Epoch Time: 0.13465619087219238
189
0.00703971477843
Epoch Time: 0.11770105361938477
190
0.0069934669826
Epoch Time: 0.12220406532287598
191
0.0069476857064
Epoch Time: 0.1321578025817871
192
0.00690244341401
Epoch Time: 0.12490105628967285
193
0.00685775635021
Epoch Time: 0.10334086418151855
194
0.00681361657944
Epoch Time: 0.12047815322875977
195
0.00677001618953
Epoch Time: 0.1936509609222412
196
0.0067269473045
Epoch Time: 0.1417231559753418
197
0.00668440209738
Epoch Time: 0.13784503936767578
198
0.00664237280267
Epoch Time: 0.13803410530090332
199
0.00660085172757
Epoch Time: 0.14908194541931152

In [23]:
train_loss(wb_vect, wb.unflattener, layers, syngraphs)


Out[23]:
0.043996281002482249

In [24]:
wb.unflattener(wb.vect)[2]['linweights']


Out[24]:
array([[ 0.1676535 ],
       [ 0.03242438],
       [-0.00748493],
       [ 0.66346477],
       [ 0.27239111],
       [-0.09952596],
       [ 0.4714696 ],
       [ 0.07962087],
       [ 0.28633275],
       [-0.04745121]])

In [25]:
scores = [score(g) for g in syngraphs]

In [26]:
preds = predict(wb_vect, wb.unflattener, layers, syngraphs)

In [27]:
[i for i in zip(scores, preds)]


Out[27]:
[(2, array([ 2.06262481])),
 (6, array([ 5.92384939])),
 (9, array([ 9.15106495])),
 (5, array([ 5.39515665])),
 (7, array([ 7.002901])),
 (2, array([ 2.00901536])),
 (9, array([ 8.53653948])),
 (8, array([ 7.91750888])),
 (8, array([ 7.99536548])),
 (4, array([ 4.17193318]))]

In [28]:
new_graphs = [make_random_graph(sample(all_nodes, 4), 5, features_dict) for i in range(100)]
# predict(wb_vect, wb.unflattener, layers, new_graphs)
new_graphs[0].nodes(data=True)

stacked_node_activations(new_graphs)


Out[28]:
array([[1, 0, 0, ..., 0, 0, 0],
       [0, 0, 0, ..., 0, 0, 1],
       [0, 0, 0, ..., 0, 0, 0],
       ..., 
       [0, 0, 0, ..., 0, 1, 0],
       [0, 0, 1, ..., 0, 0, 0],
       [0, 0, 0, ..., 0, 0, 0]])

In [29]:
predict(wb_vect, wb.unflattener, layers, new_graphs)


---------------------------------------------------------------------------
IndexError                                Traceback (most recent call last)
<ipython-input-29-f82372ec046b> in <module>()
----> 1 predict(wb_vect, wb.unflattener, layers, new_graphs)

<ipython-input-19-38ccf9e2efb0> in predict(wb_vect, wb_unflattener, activations_dict, graphs)
     21     wb = wb_unflattener(wb_vect)
     22     for k in sorted(wb.keys()):
---> 23         activations_dict[k + 1] = activation(activations_dict, wb, k, graphs)
     24         # print(activations_dict[k])
     25 

<ipython-input-15-32815cd899ec> in activation(activations_dict, wb, layer, graphs)
     21     self_acts = np.dot(self_acts, wb[layer]['self_weights'])
     22 
---> 23     nbr_acts = stacked_neighbor_activations(graphs, activations_dict, layer)
     24     # print('nbr_dtype: {0}....... wb_dtype: {1}'.format(nbr_acts.dtype, wb[layer]['nbr_weights'].dtype))
     25     # print('nbr_act type: {0}'.format(type(nbr_acts)))

<ipython-input-14-4bea9da03592> in stacked_neighbor_activations(graphs, activations_dict, layer)
     17     for g in graphs:
     18         for n in g.nodes():
---> 19             nbr_acts = neighbor_activations(g, n, activations_dict, layer)
     20             nbr_activations.append(nbr_acts)
     21     return np.vstack(nbr_activations)

<ipython-input-13-677f377e9d0e> in neighbor_activations(G, n, activations_dict, layer)
     12     """
     13     nbr_indices = neighbor_indices(G, n)
---> 14     return np.sum(activations_dict[layer][nbr_indices], axis=0)
     15 
     16 neighbor_activations(syngraphs[0], syngraphs[0].nodes()[0], layers, 0)

IndexError: index 62 is out of bounds for axis 0 with size 60

In [ ]: