In [1]:
import os
import csv
import platform
import pandas as pd
import networkx as nx
from graph_partitioning import GraphPartitioning, utils

cols = ["WASTE", "CUT RATIO", "EDGES CUT", "TOTAL COMM VOLUME", "MODULARITY", "LONELINESS", "NETWORK PERMANENCE", "NORM. MUTUAL INFO"]
pwd = %pwd

run_metrics = True

# OUTSTANDING ISSUES
# ISSUE [x]: sliding window bug
# ISSUE [x]: integrate NMI score
# ISSUE []: disable OSLOM: or get RELATIVE PATH BETWEEN EXE AND DATA FILE
# ISSUE [x]: merge with master
# ISSUE []: GAM Model - make sure it uses assignments and not prediction model
# ISSUE []: debug internals of PaToH for data conversion
# ISSUE []: debug internals of SCOTCH & upgrade SCOTCH
# ISSUE []: debug virtual nodes performance
# ISSUE []: hyperedge expansion experiments in batch_mode
# ISSUE []: virtual nodes?

config = {

    "DATA_FILENAME": os.path.join(pwd, "data", "predition_model_tests", "network", "network_1.txt"),
    "OUTPUT_DIRECTORY": os.path.join(pwd, "output"),

    # Set which algorithm is run for the PREDICTION MODEL.
    # Either: 'FENNEL' or 'SCOTCH'
    "PREDICTION_MODEL_ALGORITHM": "PATOH",

    # Alternativly, read input file for prediction model.
    # Set to empty to generate prediction model using algorithm value above.
    "PREDICTION_MODEL": "",

    
    "PARTITIONER_ALGORITHM": "PATOH",

    # File containing simulated arrivals. This is used in simulating nodes
    # arriving at the shelter. Nodes represented by line number; value of
    # 1 represents a node as arrived; value of 0 represents the node as not
    # arrived or needing a shelter.
    "SIMULATED_ARRIVAL_FILE": os.path.join(pwd,
                                           "data",
                                           "predition_model_tests",
                                           "dataset_1_shift_rotate",
                                           "simulated_arrival_list",
                                           "percentage_of_prediction_correct_90",
                                           "arrival_90_1.txt"
                                          ),
    
    # File containing the prediction of a node arriving. This is different to the
    # simulated arrivals, the values in this file are known before the disaster.
    "PREDICTION_LIST_FILE": os.path.join(pwd,
                                         "data",
                                         "predition_model_tests",
                                         "dataset_1_shift_rotate",
                                         "prediction_list",
                                         "prediction_1.txt"
                                        ),

    # File containing the geographic location of each node, in "x,y" format.
    "POPULATION_LOCATION_FILE": os.path.join(pwd,
                                             "data",
                                             "predition_model_tests",
                                             "coordinates",
                                             "coordinates_1.txt"
                                            ),

    # Number of shelters
    "num_partitions": 6,

    # The number of iterations when making prediction model
    "num_iterations": 1,

    # Percentage of prediction model to use before discarding
    # When set to 0, prediction model is discarded, useful for one-shot
    "prediction_model_cut_off": 0.10,

    # Alpha value used in one-shot (when restream_batches set to 1)
    "one_shot_alpha": 0.5,

    # Number of arrivals to batch before recalculating alpha and restreaming.
    # When set to 1, one-shot is used with alpha value from above
    "restream_batches": 10,

    # When the batch size is reached: if set to True, each node is assigned
    # individually as first in first out. If set to False, the entire batch
    # is processed and empty before working on the next batch.
    "sliding_window": False,

    # Create virtual nodes based on prediction model
    "use_virtual_nodes": False,

    # Virtual nodes: edge weight
    "virtual_edge_weight": 1.0,

    # Loneliness score parameter. Used when scoring a partition by how many
    # lonely nodes exist.
    "loneliness_score_param": 1.2,

    ####
    # GRAPH MODIFICATION FUNCTIONS

    # Also enables the edge calculation function.
    "graph_modification_functions": True,

    # If set, the node weight is set to 100 if the node arrives at the shelter,
    # otherwise the node is removed from the graph.
    "alter_arrived_node_weight_to_100": False,

    # Uses generalized additive models from R to generate prediction of nodes not
    # arrived. This sets the node weight on unarrived nodes the the prediction
    # given by a GAM.
    # Needs POPULATION_LOCATION_FILE to be set.
    "alter_node_weight_to_gam_prediction": False,

    # The value of 'k' used in the GAM will be the number of nodes arrived until
    # it reaches this max value.
    "gam_k_value": 100,

    # Alter the edge weight for nodes that haven't arrived. This is a way to
    # de-emphasise the prediction model for the unknown nodes.
    "prediction_model_emphasis": 1.0,
    
    
    "SCOTCH_LIB_PATH": os.path.join(pwd, "libs/scotch/macOS/libscotch.dylib")
    if 'Darwin' in platform.system()
    else "/usr/local/lib/libscotch.so",
    
    # Path to the PaToH shared library
    "PATOH_LIB_PATH": os.path.join(pwd, "libs/patoh/lib/macOS/libpatoh.dylib")
    if 'Darwin' in platform.system()
    else os.path.join(pwd, "libs/patoh/lib/linux/libpatoh.so"),
    
    "PATOH_ITERATIONS": 5,
        
    # Expansion modes: 'avg_node_weight', 'total_node_weight', 'smallest_node_weight'
    # 'largest_node_weight'
    # add '_squared' or '_sqrt' at the end of any of the above for ^2 or sqrt(weight)
    # i.e. 'avg_node_weight_squared
    "PATOH_HYPEREDGE_EXPANSION_MODE": 'no_expansion',

    # Alters how much information to print. Keep it at 1 for this notebook.
    # 0 - will print nothing, useful for batch operations.
    # 1 - prints basic information on assignments and operations.
    # 2 - prints more information as it batches arrivals.
    "verbose": 1

}

gp = GraphPartitioning(config)

# Optional: shuffle the order of nodes arriving
# Arrival order should not be shuffled if using GAM to alter node weights
#random.shuffle(gp.arrival_order)

%pylab inline


Populating the interactive namespace from numpy and matplotlib

In [2]:
gp.load_network()


Graph loaded...
Name: 
Type: Graph
Number of nodes: 1000
Number of edges: 2938
Average degree:   5.8760
Graph is undirected

In [3]:
gp.init_partitioner()


PaToH partitioner loaded for generating PREDICTION MODEL.
PaToH partitioner loaded for making shelter assignments.

In [4]:
gp.prediction_model()


Ran PaToH for 5 iterations with min_cuts = 165 and max_cuts = 217  - picked min_cuts assignements.
PREDICTION MODEL
----------------


Assignments:
[ 1  4  5  2  4  0  2  0  0  0  5  0  3  0  1  1  4  0  0  2  0  1  1  0  3  3  5  1  2  5  3  4  4  4  1  5  4  1  3  2  0  3  5  0  2  2  5  0  0  1  3  2  5  3  1  3  3  0  4  3  1  0  2  2  0  3  0  4  2  4  0  5  4  1  5  5  2  5  5  1  1  0  1  3  5  3  0  0  3  2  1  5  0  3  1  2  1  4  4  4  5  1  1  1  1  3  4  0  5  5  2  2  2  0  1  4  1  3  2  1  4  3  5  4  5  1  2  1  3  2  0  3  2  3  0  4  0  2  2  2  0  2  4  3  5  4  1  3  3  5  2  2  4  5  1  4  0  3  3  1  0  2  3  3  3  5  5  1  1  1  0  5  2  2  5  0  3  0  2  1  0  2  1  2  4  1  1  5  2  0  2  3  2  3  1  5  1  3  0  2  0  5  4  2  4  1  0  2  4  4  2  2  5  4  2  5  4  2  5  1  3  5  2  0  1  3  0  4  3  1  1  2  3  1  3  3  2  2  4  4  1  5  0  0  5  4  5  2  5  3  2  0  2  1  1  2  3  1  1  4  1  3  5  1  2  5  1  4  4  0  4  4  2  3  3  3  4  0  2  5  2  4  0  4  2  1  2  5  2  0  4  0  0  3  3  0  5  2  5  1  2  2  0  3  5  1  4  5  1  5  1  3  3  1  1  1  5  2  3  5  2  3  3  2  4  4  4  2  0  4  2  0  3  4  0  3  1  4  2  4  3  3  1  5  5  4  2  4  4  4  2  1  1  0  3  3  1  3  2  1  3  3  0  1  0  5  2  2  3  3  2  5  5  3  4  1  5  5  1  4  0  5  2  2  2  0  3  5  5  5  5  1  0  2  4  1  2  4  3  1  1  4  5  5  2  5  3  1  0  4  1  1  3  1  1  0  2  0  2  4  4  3  0  1  1  3  2  0  3  1  4  5  1  1  0  3  1  4  5  3  5  4  1  1  1  1  3  4  5  4  4  2  4  0  2  3  0  0  2  5  4  0  4  3  3  2  5  1  0  2  1  5  3  3  1  2  4  2  3  2  5  3  5  2  0  3  2  1  3  4  0  0  4  2  0  5  0  5  3  3  3  4  2  1  2  5  4  5  3  0  0  2  4  0  5  0  4  3  3  0  1  4  1  0  0  1  0  5  3  2  3  5  2  1  5  0  5  4  2  3  5  0  5  2  2  4  2  4  1  2  1  0  4  3  2  3  2  0  2  3  4  4  5  0  3  5  1  3  1  0  3  0  5  3  0  2  3  0  3  3  3  0  0  1  0  1  1  0  4  2  1  4  1  4  0  2  4  1  0  3  2  5  3  3  5  2  3  0  0  4  0  3  1  3  5  4  5  1  4  3  3  1  3  1  5  5  0  4  4  4  5  0  0  2  2  4  1  2  0  5  4  4  2  4  4  3  1  0  2  5  3  2  1  3  0  1  1  5  2  5  4  5  2  1  3  3  2  5  3  4  2  5  3  4  3  0  1  2  3  2  3  4  0  2  5  5  4  4  0  2  2  1  0  0  4  3  0  5  0  0  5  5  0  4  4  4  2  4  3  1  5  4  1  2  0  1  0  5  0  4  4  3  5  4  1  0  2  0  5  3  4  2  2  3  4  2  1  0  0  0  2  4  4  4  2  5  3  4  5  1  4  1  4  1  0  0  0  3  0  5  5  5  5  5  4  5  3  3  5  5  1  2  5  5  3  5  0  5  1  5  3  1  4  1  0  5  1  5  4  4  5  4  4  5  4  0  5  3  3  0  5  1  5  2  1  1  5  2  3  1  0  1  4  0  4  5  3  2  1  1  4  3  0  3  3  4  3  4  1  0  3  5  0  3  3  2  5  4  1  4  1  4  2  1  4  5  5  4  0  2  2  3  5  0  0  5  4  0  1  3  4  4  2  1  4  5  4  2  5  1  0  0  4  4  4  0  4  0  5  4  5  1  3  1  2  2  1  3  3  3  3  0  2  2  4  5  4  5  2  1  1  3  0  2  1  3  1  2  5  0  0  5  3  5  0  1  5  2  0  4  1  5  2  3  2  5  4  1  4  4  0  3  2  0  3  2  0  5  1  2  5  4  0  0  0  0  0  5  5  3  2  3  2  3  5  5  5  5  3  2  1  0  3  0  3  0  4  3  0  2  1  4  1  4  5  5  0  1  2  5  0  1  2  2  5  3  4  0  4  4  1  5  2  1  5  1  2  5  4  4]

Fixed: 0

Partitions - nodes (weight):
P0: 168.0 (168.0)
P1: 166.0 (166.0)
P2: 166.0 (166.0)
P3: 167.0 (167.0)
P4: 166.0 (166.0)
P5: 167.0 (167.0)
Out[4]:
[[0.0080000000000000071,
  0.056160653505786251,
  165,
  205,
  0.77481755837753929,
  0.85656177564213065,
  '0.379307',
  1.0]]

In [5]:
gp.prediction_model_cut_off = 0.5
m = gp.assign_cut_off()


Assign first 139 arrivals using prediction model, then discard


Assignments:
[-1 -1 -1 -1  4  0 -1 -1 -1 -1 -1 -1  3  0 -1 -1 -1  0  0 -1 -1 -1 -1 -1  3  3 -1 -1 -1  5 -1  4 -1  4 -1 -1 -1 -1  3 -1  0  3  5 -1 -1 -1 -1  0 -1 -1  3 -1 -1  3 -1  3  3 -1  4 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1  4 -1 -1  5  2  5 -1 -1 -1 -1 -1  3 -1  3  0 -1 -1 -1 -1 -1 -1  3 -1 -1 -1  4  4 -1 -1 -1 -1 -1 -1 -1  4 -1 -1  5 -1 -1  2 -1 -1  4 -1  3 -1 -1 -1  3  5  4 -1 -1 -1 -1  3 -1 -1  3 -1  3 -1  4 -1 -1 -1 -1 -1 -1 -1  3  5 -1 -1 -1  3 -1 -1 -1 -1 -1 -1 -1  0  3 -1 -1 -1 -1  3 -1  3 -1 -1 -1 -1 -1  0 -1 -1 -1 -1 -1  3 -1 -1 -1  0 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1  3 -1  3 -1 -1 -1  3 -1 -1 -1  5 -1 -1  4 -1 -1 -1  4 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1  3  5 -1  0 -1  3 -1 -1  3 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1  4 -1 -1 -1  0  5  4 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1  3 -1 -1 -1 -1  3  5 -1 -1 -1 -1 -1  4 -1 -1 -1 -1  3  3 -1  4  0 -1 -1  2 -1 -1 -1 -1 -1 -1 -1 -1  0 -1 -1 -1 -1  3  0 -1 -1 -1 -1 -1 -1 -1  3 -1 -1  4 -1 -1 -1 -1 -1  3 -1 -1 -1  5 -1  3 -1 -1 -1  3 -1  4  4 -1 -1  0  4 -1 -1  3  4 -1 -1 -1 -1 -1 -1  3  3 -1 -1  5  4  2 -1 -1 -1 -1 -1 -1  0  3  3 -1  3 -1 -1 -1 -1 -1 -1  0 -1 -1 -1  3 -1 -1  5 -1  3 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1  5 -1  0 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1  5  3 -1  0 -1 -1 -1  3 -1 -1  0 -1  0 -1 -1 -1  3  0 -1 -1  3 -1 -1  3 -1 -1  5 -1 -1 -1  3 -1 -1  5  3  5  4 -1 -1 -1 -1 -1 -1  5 -1  4 -1 -1 -1 -1  3  0 -1 -1 -1 -1 -1 -1  3  3 -1 -1 -1  0 -1 -1 -1 -1  3 -1 -1  4 -1  3 -1  5  3 -1 -1 -1 -1 -1 -1 -1 -1 -1  0  4 -1  0 -1 -1 -1  3 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1]

Fixed: 139

Partitions - nodes (weight):
P0: 27 (0)
P1: 0 (0)
P2: 4 (0)
P3: 60 (0)
P4: 27 (0)
P5: 21 (0)

In [17]:
Gsub_ = gp.G.subgraph(gp.nodes_arrived)

parts = [0] * gp.num_partitions
for n in Gsub_.nodes_iter(data=True):
    node = n[0]
    if 'weight' in n[1]:
        weight = n[1]['weight']
        print(weight)
    else:
        weight = 1.0


1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0
1.0

In [6]:
batch_arrived = []
for i, a in enumerate(gp.arrival_order):
    if gp.fixed[a] == 1:
        continue
    if gp.graph_modification_functions:
        if gp.simulated_arrival_list[a] == 0:
            gp.G.remove_node(a)
            continue
    if gp.simulated_arrival_list[a] == 0:
        continue
    batch_arrived.append(a)
    if gp.restream_batches == len(batch_arrived):
        break

print(batch_arrived)


[500, 508, 509, 510, 512, 513, 515, 516, 517, 530]

In [13]:
# Test PaToH algorithm
Gsub = gp.G.subgraph(gp.nodes_arrived + batch_arrived)

print_weights = True
if print_weights:
    node_weights = {n[0]: n[1]['weight'] for n in Gsub.nodes_iter(data=True)}
    edge_weights = {(e[0], e[1]): e[2]['weight'] for e in Gsub.edges_iter(data=True)}
    
    print('node_weights', node_weights)
    print('edge_weights', edge_weights)

sortedNodes = sorted(Gsub.nodes())
print('sortedNodes', sortedNodes)


node_weights {4: 1.0, 5: 1.0, 12: 1.0, 13: 1.0, 17: 1.0, 18: 1.0, 24: 1.0, 25: 1.0, 29: 1.0, 31: 1.0, 33: 1.0, 38: 1.0, 40: 1.0, 41: 1.0, 42: 1.0, 47: 1.0, 50: 1.0, 53: 1.0, 55: 1.0, 56: 1.0, 58: 1.0, 72: 1.0, 75: 1.0, 76: 1.0, 77: 1.0, 83: 1.0, 85: 1.0, 86: 1.0, 93: 1.0, 97: 1.0, 98: 1.0, 106: 1.0, 109: 1.0, 112: 1.0, 115: 1.0, 117: 1.0, 121: 1.0, 122: 1.0, 123: 1.0, 128: 1.0, 131: 1.0, 133: 1.0, 135: 1.0, 143: 1.0, 144: 1.0, 148: 1.0, 156: 1.0, 157: 1.0, 162: 1.0, 164: 1.0, 170: 1.0, 176: 1.0, 180: 1.0, 191: 1.0, 193: 1.0, 197: 1.0, 201: 1.0, 204: 1.0, 208: 1.0, 220: 1.0, 221: 1.0, 223: 1.0, 225: 1.0, 228: 1.0, 239: 1.0, 243: 1.0, 244: 1.0, 245: 1.0, 256: 1.0, 261: 1.0, 262: 1.0, 268: 1.0, 273: 1.0, 274: 1.0, 276: 1.0, 277: 1.0, 280: 1.0, 289: 1.0, 294: 1.0, 295: 1.0, 303: 1.0, 306: 1.0, 312: 1.0, 316: 1.0, 318: 1.0, 322: 1.0, 324: 1.0, 325: 1.0, 328: 1.0, 329: 1.0, 332: 1.0, 333: 1.0, 340: 1.0, 341: 1.0, 344: 1.0, 345: 1.0, 346: 1.0, 353: 1.0, 354: 1.0, 355: 1.0, 357: 1.0, 364: 1.0, 368: 1.0, 371: 1.0, 373: 1.0, 390: 1.0, 392: 1.0, 405: 1.0, 406: 1.0, 408: 1.0, 412: 1.0, 415: 1.0, 417: 1.0, 421: 1.0, 422: 1.0, 425: 1.0, 428: 1.0, 431: 1.0, 435: 1.0, 438: 1.0, 439: 1.0, 440: 1.0, 441: 1.0, 448: 1.0, 450: 1.0, 455: 1.0, 456: 1.0, 463: 1.0, 464: 1.0, 468: 1.0, 473: 1.0, 476: 1.0, 478: 1.0, 480: 1.0, 481: 1.0, 491: 1.0, 492: 1.0, 494: 1.0, 498: 1.0, 500: 100, 508: 1.0, 509: 1.0, 510: 1.0, 512: 1.0, 513: 1.0, 515: 1.0, 516: 1.0, 517: 1.0, 530: 1.0}
edge_weights {(4, 106): 1.0, (5, 170): 1.0, (12, 83): 1.0, (12, 128): 1.0, (12, 157): 1.0, (12, 164): 1.0, (12, 191): 1.0, (13, 513): 1.0, (17, 75): 1.0, (17, 243): 1.0, (17, 316): 1.0, (17, 371): 1.0, (17, 405): 1.0, (18, 277): 1.0, (24, 312): 1.0, (24, 530): 1.0, (25, 117): 1.0, (25, 131): 1.0, (25, 322): 1.0, (31, 123): 1.0, (31, 440): 1.0, (33, 135): 1.0, (33, 345): 1.0, (38, 121): 1.0, (38, 157): 1.0, (38, 256): 1.0, (38, 357): 1.0, (38, 464): 1.0, (38, 473): 1.0, (42, 262): 1.0, (42, 390): 1.0, (42, 438): 1.0, (47, 156): 1.0, (50, 85): 1.0, (50, 332): 1.0, (53, 121): 1.0, (53, 157): 1.0, (53, 164): 1.0, (53, 239): 1.0, (53, 273): 1.0, (53, 274): 1.0, (53, 354): 1.0, (53, 464): 1.0, (55, 143): 1.0, (55, 294): 1.0, (56, 318): 1.0, (56, 322): 1.0, (56, 439): 1.0, (58, 268): 1.0, (58, 318): 1.0, (58, 325): 1.0, (72, 306): 1.0, (72, 450): 1.0, (72, 492): 1.0, (72, 512): 1.0, (75, 316): 1.0, (75, 371): 1.0, (75, 405): 1.0, (77, 440): 1.0, (83, 191): 1.0, (85, 332): 1.0, (86, 415): 1.0, (86, 468): 1.0, (86, 494): 1.0, (93, 455): 1.0, (98, 441): 1.0, (98, 516): 1.0, (109, 144): 1.0, (112, 346): 1.0, (115, 208): 1.0, (115, 276): 1.0, (117, 131): 1.0, (117, 322): 1.0, (121, 128): 1.0, (121, 164): 1.0, (121, 274): 1.0, (121, 406): 1.0, (121, 421): 1.0, (121, 464): 1.0, (122, 221): 1.0, (122, 262): 1.0, (122, 390): 1.0, (123, 245): 1.0, (128, 157): 1.0, (128, 164): 1.0, (128, 191): 1.0, (128, 354): 1.0, (128, 421): 1.0, (133, 435): 1.0, (135, 345): 1.0, (143, 228): 1.0, (143, 294): 1.0, (148, 455): 1.0, (157, 164): 1.0, (157, 421): 1.0, (157, 464): 1.0, (162, 220): 1.0, (162, 455): 1.0, (164, 191): 1.0, (164, 273): 1.0, (164, 354): 1.0, (164, 421): 1.0, (176, 481): 1.0, (180, 491): 1.0, (180, 509): 1.0, (191, 354): 1.0, (193, 220): 1.0, (204, 268): 1.0, (204, 329): 1.0, (204, 450): 1.0, (204, 492): 1.0, (220, 228): 1.0, (220, 261): 1.0, (220, 357): 1.0, (221, 243): 1.0, (221, 262): 1.0, (225, 303): 1.0, (239, 441): 1.0, (243, 262): 1.0, (244, 440): 1.0, (245, 329): 1.0, (245, 492): 1.0, (256, 406): 1.0, (256, 473): 1.0, (256, 508): 1.0, (261, 303): 1.0, (261, 357): 1.0, (261, 498): 1.0, (262, 390): 1.0, (273, 354): 1.0, (274, 340): 1.0, (274, 406): 1.0, (274, 425): 1.0, (274, 464): 1.0, (277, 392): 1.0, (277, 422): 1.0, (294, 478): 1.0, (295, 515): 1.0, (303, 498): 1.0, (306, 450): 1.0, (312, 463): 1.0, (316, 371): 1.0, (318, 322): 1.0, (318, 332): 1.0, (318, 439): 1.0, (322, 332): 1.0, (322, 439): 1.0, (324, 329): 1.0, (325, 333): 1.0, (329, 450): 1.0, (329, 492): 1.0, (340, 368): 1.0, (340, 425): 1.0, (340, 464): 1.0, (340, 481): 1.0, (341, 428): 1.0, (353, 456): 1.0, (357, 498): 1.0, (368, 464): 1.0, (368, 481): 1.0, (371, 405): 1.0, (392, 422): 1.0, (406, 425): 1.0, (406, 473): 1.0, (406, 508): 1.0, (415, 494): 1.0, (425, 464): 1.0, (425, 473): 1.0, (431, 438): 1.0, (431, 448): 1.0, (450, 492): 1.0, (463, 500): 1.0, (464, 481): 1.0, (468, 494): 1.0, (473, 508): 1.0, (491, 509): 1.0}
sortedNodes [4, 5, 12, 13, 17, 18, 24, 25, 29, 31, 33, 38, 40, 41, 42, 47, 50, 53, 55, 56, 58, 72, 75, 76, 77, 83, 85, 86, 93, 97, 98, 106, 109, 112, 115, 117, 121, 122, 123, 128, 131, 133, 135, 143, 144, 148, 156, 157, 162, 164, 170, 176, 180, 191, 193, 197, 201, 204, 208, 220, 221, 223, 225, 228, 239, 243, 244, 245, 256, 261, 262, 268, 273, 274, 276, 277, 280, 289, 294, 295, 303, 306, 312, 316, 318, 322, 324, 325, 328, 329, 332, 333, 340, 341, 344, 345, 346, 353, 354, 355, 357, 364, 368, 371, 373, 390, 392, 405, 406, 408, 412, 415, 417, 421, 422, 425, 428, 431, 435, 438, 439, 440, 441, 448, 450, 455, 456, 463, 464, 468, 473, 476, 478, 480, 481, 491, 492, 494, 498, 500, 508, 509, 510, 512, 513, 515, 516, 517, 530]

In [8]:
# Step 1: create a node mapping
nodeMapping = {}
for newID, nodeID in enumerate(sortedNodes):
    # old label as key, new label as value
    nodeMapping[nodeID] = newID

print(nodeMapping)


{4: 0, 5: 1, 12: 2, 13: 3, 17: 4, 18: 5, 24: 6, 25: 7, 29: 8, 31: 9, 33: 10, 38: 11, 40: 12, 41: 13, 42: 14, 47: 15, 50: 16, 53: 17, 55: 18, 56: 19, 58: 20, 72: 21, 75: 22, 76: 23, 77: 24, 83: 25, 85: 26, 86: 27, 93: 28, 97: 29, 98: 30, 106: 31, 109: 32, 112: 33, 115: 34, 117: 35, 121: 36, 122: 37, 123: 38, 128: 39, 131: 40, 133: 41, 135: 42, 143: 43, 144: 44, 148: 45, 156: 46, 157: 47, 162: 48, 164: 49, 170: 50, 176: 51, 180: 52, 191: 53, 193: 54, 197: 55, 201: 56, 204: 57, 208: 58, 220: 59, 221: 60, 223: 61, 225: 62, 228: 63, 239: 64, 243: 65, 244: 66, 245: 67, 256: 68, 261: 69, 262: 70, 268: 71, 273: 72, 274: 73, 276: 74, 277: 75, 280: 76, 289: 77, 294: 78, 295: 79, 303: 80, 306: 81, 312: 82, 316: 83, 318: 84, 322: 85, 324: 86, 325: 87, 328: 88, 329: 89, 332: 90, 333: 91, 340: 92, 341: 93, 344: 94, 345: 95, 346: 96, 353: 97, 354: 98, 355: 99, 357: 100, 364: 101, 368: 102, 371: 103, 373: 104, 390: 105, 392: 106, 405: 107, 406: 108, 408: 109, 412: 110, 415: 111, 417: 112, 421: 113, 422: 114, 425: 115, 428: 116, 431: 117, 435: 118, 438: 119, 439: 120, 440: 121, 441: 122, 448: 123, 450: 124, 455: 125, 456: 126, 463: 127, 464: 128, 468: 129, 473: 130, 476: 131, 478: 132, 480: 133, 481: 134, 491: 135, 492: 136, 494: 137, 498: 138, 500: 139, 508: 140, 509: 141, 510: 142, 512: 143, 513: 144, 515: 145, 516: 146, 517: 147, 530: 148}

In [ ]: