In [ ]:
import os
import csv
import platform
import pandas as pd
import networkx as nx
from graph_partitioning import GraphPartitioning, utils

run_metrics = True

cols = ["WASTE", "CUT RATIO", "EDGES CUT", "TOTAL COMM VOLUME", "Qds", "CONDUCTANCE", "MAXPERM", "NMI", "FSCORE", "FSCORE RELABEL IMPROVEMENT", "LONELINESS"]
#cols = ["WASTE", "CUT RATIO", "EDGES CUT", "TOTAL COMM VOLUME", "Q", "Qds", "CONDUCTANCE", "LONELINESS", "NETWORK PERMANENCE", "NORM. MUTUAL INFO", "EDGE CUT WEIGHT", "FSCORE", "FSCORE RELABEL IMPROVEMENT"]
#cols = ["WASTE", "CUT RATIO", "EDGES CUT", "TOTAL COMM VOLUME", "MODULARITY", "LONELINESS", "NETWORK PERMANENCE", "NORM. MUTUAL INFO", "EDGE CUT WEIGHT", "FSCORE", "FSCORE RELABEL IMPROVEMENT"]

pwd = %pwd

config = {

    "DATA_FILENAME": os.path.join(pwd, "data", "predition_model_tests", "network", "network_$$.txt"),
    "OUTPUT_DIRECTORY": os.path.join(pwd, "output"),

    # Set which algorithm is run for the PREDICTION MODEL.
    # Either: 'FENNEL' or 'SCOTCH'
    "PREDICTION_MODEL_ALGORITHM": "FENNEL",

    # Alternativly, read input file for prediction model.
    # Set to empty to generate prediction model using algorithm value above.
    "PREDICTION_MODEL": "",

    
    "PARTITIONER_ALGORITHM": "FENNEL",

    # File containing simulated arrivals. This is used in simulating nodes
    # arriving at the shelter. Nodes represented by line number; value of
    # 1 represents a node as arrived; value of 0 represents the node as not
    # arrived or needing a shelter.
    "SIMULATED_ARRIVAL_FILE": os.path.join(pwd,
                                           "data",
                                           "predition_model_tests",
                                           "dataset_1_shift_rotate",
                                           "simulated_arrival_list",
                                           "percentage_of_prediction_correct_100",
                                           "arrival_100_$$.txt"
                                          ),

    # File containing the prediction of a node arriving. This is different to the
    # simulated arrivals, the values in this file are known before the disaster.
    "PREDICTION_LIST_FILE": os.path.join(pwd,
                                         "data",
                                         "predition_model_tests",
                                         "dataset_1_shift_rotate",
                                         "prediction_list",
                                         "prediction_$$.txt"
                                        ),

    # File containing the geographic location of each node, in "x,y" format.
    "POPULATION_LOCATION_FILE": os.path.join(pwd,
                                             "data",
                                             "predition_model_tests",
                                             "coordinates",
                                             "coordinates_$$.txt"
                                            ),

    # Number of shelters
    "num_partitions": 4,

    # The number of iterations when making prediction model
    "num_iterations": 1,

    # Percentage of prediction model to use before discarding
    # When set to 0, prediction model is discarded, useful for one-shot
    "prediction_model_cut_off": .0,

    # Alpha value used in one-shot (when restream_batches set to 1)
    "one_shot_alpha": 0.5,
    
    "use_one_shot_alpha": False,
    
    # Number of arrivals to batch before recalculating alpha and restreaming.
    "restream_batches": 50,

    # When the batch size is reached: if set to True, each node is assigned
    # individually as first in first out. If set to False, the entire batch
    # is processed and empty before working on the next batch.
    "sliding_window": False,

    # Create virtual nodes based on prediction model
    "use_virtual_nodes": False,

    # Virtual nodes: edge weight
    "virtual_edge_weight": 1.0,
    
    # Loneliness score parameter. Used when scoring a partition by how many
    # lonely nodes exist.
    "loneliness_score_param": 1.2,


    ####
    # GRAPH MODIFICATION FUNCTIONS

    # Also enables the edge calculation function.
    "graph_modification_functions": True,

    # If set, the node weight is set to 100 if the node arrives at the shelter,
    # otherwise the node is removed from the graph.
    "alter_arrived_node_weight_to_100": False,

    # Uses generalized additive models from R to generate prediction of nodes not
    # arrived. This sets the node weight on unarrived nodes the the prediction
    # given by a GAM.
    # Needs POPULATION_LOCATION_FILE to be set.
    "alter_node_weight_to_gam_prediction": False,
    
    # Enables edge expansion when graph_modification_functions is set to true
    "edge_expansion_enabled": True,

    # The value of 'k' used in the GAM will be the number of nodes arrived until
    # it reaches this max value.
    "gam_k_value": 100,

    # Alter the edge weight for nodes that haven't arrived. This is a way to
    # de-emphasise the prediction model for the unknown nodes.
    "prediction_model_emphasis": 1.0,
    
    # This applies the prediction_list_file node weights onto the nodes in the graph
    # when the prediction model is being computed and then removes the weights
    # for the cutoff and batch arrival modes
    "apply_prediction_model_weights": True,
    
    "compute_metrics_enabled": True,

    "SCOTCH_LIB_PATH": os.path.join(pwd, "libs/scotch/macOS/libscotch.dylib")
    if 'Darwin' in platform.system()
    else "/usr/local/lib/libscotch.so",
    
    # Path to the PaToH shared library
    "PATOH_LIB_PATH": os.path.join(pwd, "libs/patoh/lib/macOS/libpatoh.dylib")
    if 'Darwin' in platform.system()
    else os.path.join(pwd, "libs/patoh/lib/linux/libpatoh.so"),
    
    "PATOH_ITERATIONS": 5,
        
    # Expansion modes: 'avg_node_weight', 'total_node_weight', 'smallest_node_weight'
    # 'largest_node_weight'
    # add '_squared' or '_sqrt' at the end of any of the above for ^2 or sqrt(weight)
    # i.e. 'avg_node_weight_squared
    "PATOH_HYPEREDGE_EXPANSION_MODE": 'no_expansion',
    
    # Edge Expansion: average, total, minimum, maximum, product, product_squared, sqrt_product
    "EDGE_EXPANSION_MODE" : 'total',
    
    # Whether nodes should be reordered using a centrality metric for optimal node assignments in batch mode
    # This is specific to FENNEL and at the moment Leverage Centrality is used to compute new noder orders
    "FENNEL_NODE_REORDERING_ENABLED": False,

    # The node ordering scheme: PII_LH (political index), LEVERAGE_HL, DEGREE_HL, BOTTLENECK_HL
    "FENNEL_NODE_REODERING_SCHEME": 'BOTTLENECK_HL',
    
    # Whether the Friend of a Friend scoring system is active during FENNEL partitioning.
    # FOAF employs information about a node's friends to determine the best partition when
    # this node arrives at a shelter and no shelter has friends already arrived
    "FENNEL_FRIEND_OF_A_FRIEND_ENABLED": False,
    
    # Alters how much information to print. Keep it at 1 for this notebook.
    # 0 - will print nothing, useful for batch operations.
    # 1 - prints basic information on assignments and operations.
    # 2 - prints more information as it batches arrivals.
    "verbose": 1
}

#gp = GraphPartitioning(config)

# Optional: shuffle the order of nodes arriving
# Arrival order should not be shuffled if using GAM to alter node weights
#random.shuffle(gp.arrival_order)

%pylab inline


Populating the interactive namespace from numpy and matplotlib

In [ ]:
import scipy
from copy import deepcopy

iterations = 1000 # the number of individual networks to run

# BOTTLENECK 1 Restream, no FOAF, Lonely after

# change these variables:
ordering_enabled_mode = [True]#[False, True]


for mode in ordering_enabled_mode:
#for mode in range(1, 51):

    metricsDataPrediction = []
    metricsDataAssign = []
    
    config['FENNEL_NODE_REORDERING_ENABLED'] = mode
    config['FENNEL_NODE_REORDERING_SCHEME'] = 'BOTTLENECK_HL'
    config['FENNEL_FRIEND_OF_A_FRIEND_ENABLED'] = False
    
    print('Mode', mode)
    for i in range(0, iterations):
        if (i % 50) == 0:
            print('Mode', mode, 'Iteration', str(i))
        
        conf = deepcopy(config)

        #if mode == 'no_expansion':
        #    config['edge_expansion_enabled'] = False
                    
        #conf["DATA_FILENAME"] =  os.path.join(pwd, "data", "predition_model_tests", "network", "network_" + str(i + 1) + ".txt")
        conf["DATA_FILENAME"] = conf["DATA_FILENAME"].replace('$$', str(i + 1))
        conf["SIMULATED_ARRIVAL_FILE"] = conf["SIMULATED_ARRIVAL_FILE"].replace('$$', str(i + 1))
        conf["PREDICTION_LIST_FILE"] = conf["PREDICTION_LIST_FILE"].replace('$$', str(i + 1))
        conf["POPULATION_LOCATION_FILE"] = conf["POPULATION_LOCATION_FILE"].replace('$$', str(i + 1))
        conf["compute_metrics_enabled"] = False
        conf['PREDICTION_MODEL'] = conf['PREDICTION_MODEL'].replace('$$', str(i + 1))



        #print(i, conf)
        #print('config', config)
        
        with GraphPartitioning(conf) as gp:
            #gp = GraphPartitioning(config)
            gp.verbose = 0
            gp.load_network()
            gp.init_partitioner()

            m = gp.prediction_model()
            m = gp.assign_cut_off()
            m = gp.batch_arrival()

            Gsub = gp.G.subgraph(gp.nodes_arrived)
            gp.compute_metrics_enabled = True
            m = [gp._print_score(Gsub)]
            gp.compute_metrics_enabled = False            


            totalM = len(m)
            metricsDataPrediction.append(m[totalM - 1])


    waste = ''
    cutratio = ''
    ec = ''
    tcv = ''
    qds = ''
    conductance = ''
    maxperm = ''
    nmi = ''
    lonliness = ''
    fscore = ''
    fscoreimprove = ''
        
    qdsOv = ''
    condOv = ''

    dataWaste = []
    dataCutRatio = []
    dataEC = []
    dataTCV = [] 
    dataQDS = []
    dataCOND = []
    dataMAXPERM = []
    dataNMI = []
    dataLonliness = []
    dataFscore = []
    dataFscoreImprove = []

    
    for i in range(0, iterations):
        dataWaste.append(metricsDataPrediction[i][0])        
        dataCutRatio.append(metricsDataPrediction[i][1])
        dataEC.append(metricsDataPrediction[i][2])
        dataTCV.append(metricsDataPrediction[i][3])
        dataQDS.append(metricsDataPrediction[i][4])
        dataCOND.append(metricsDataPrediction[i][5])
        dataMAXPERM.append(metricsDataPrediction[i][6])
        dataNMI.append(metricsDataPrediction[i][7])        
        dataFscore.append(metricsDataPrediction[i][8])        
        dataFscoreImprove.append(metricsDataPrediction[i][9])        
        dataLonliness.append(metricsDataPrediction[i][10])


        if(len(waste)):
            waste = waste + ','
        waste = waste + str(metricsDataPrediction[i][0])

        if(len(cutratio)):
            cutratio = cutratio + ','
        cutratio = cutratio + str(metricsDataPrediction[i][1])

        if(len(ec)):
            ec = ec + ','
        ec = ec + str(metricsDataPrediction[i][2])
        
        if(len(tcv)):
            tcv = tcv + ','
        tcv = tcv + str(metricsDataPrediction[i][3])

        if(len(qds)):
            qds = qds + ','
        qds = qds + str(metricsDataPrediction[i][4])

        if(len(conductance)):
            conductance = conductance + ','
        conductance = conductance + str(metricsDataPrediction[i][5])

        if(len(maxperm)):
            maxperm = maxperm + ','
        maxperm = maxperm + str(metricsDataPrediction[i][6])

        if(len(nmi)):
            nmi = nmi + ','
        nmi = nmi + str(metricsDataPrediction[i][7])

        if(len(fscore)):
            fscore = fscore + ','
        fscore = fscore + str(metricsDataPrediction[i][8])

        if(len(fscoreimprove)):
            fscoreimprove = fscoreimprove + ','
        fscoreimprove = fscoreimprove + str(metricsDataPrediction[i][8])
        
        if(len(lonliness)):
            lonliness = lonliness + ','
        lonliness = lonliness + str(dataLonliness[i])
        

    waste = 'WASTE,' + 'centrality_enabled_mode_' + str(config['FENNEL_NODE_REORDERING_ENABLED']) + ',' + str(scipy.mean(dataWaste)) + ',' + str(scipy.std(dataWaste)) + ',' + waste

    cutratio = 'CUT_RATIO,' + 'centrality_enabled_mode_' + str(config['FENNEL_NODE_REORDERING_ENABLED']) + ',' + str(scipy.mean(dataCutRatio)) + ',' + str(scipy.std(dataCutRatio)) + ',' + cutratio
    ec = 'EC,' + 'centrality_enabled_mode_' + str(config['FENNEL_NODE_REORDERING_ENABLED']) + ',' + str(scipy.mean(dataEC)) + ',' + str(scipy.std(dataEC)) + ',' + ec
    tcv = 'TCV,' + 'centrality_enabled_mode_' + str(config['FENNEL_NODE_REORDERING_ENABLED']) + ',' + str(scipy.mean(dataTCV)) + ',' + str(scipy.std(dataTCV)) + ',' + tcv

    lonliness = "LONELINESS," + 'centrality_enabled_mode_' + str(config['FENNEL_NODE_REORDERING_ENABLED']) + ',' + str(scipy.mean(dataLonliness)) + ',' + str(scipy.std(dataLonliness)) + ',' + lonliness
    
    qds = 'QDS,' + 'centrality_enabled_mode_' + str(config['FENNEL_NODE_REORDERING_ENABLED']) + ',' + str(scipy.mean(dataQDS)) + ',' + str(scipy.std(dataQDS)) + ',' + qds
    conductance = 'CONDUCTANCE,' + 'centrality_enabled_mode_' + str(config['FENNEL_NODE_REORDERING_ENABLED']) + ',' + str(scipy.mean(dataCOND)) + ',' + str(scipy.std(dataCOND)) + ',' + conductance
    maxperm = 'MAXPERM,' + 'centrality_enabled_mode_' + str(config['FENNEL_NODE_REORDERING_ENABLED']) + ',' + str(scipy.mean(dataMAXPERM)) + ',' + str(scipy.std(dataMAXPERM)) + ',' + maxperm
    nmi = 'NMI,' + 'centrality_enabled_mode_' + str(config['FENNEL_NODE_REORDERING_ENABLED']) + ',' + str(scipy.mean(dataNMI)) + ',' + str(scipy.std(dataNMI)) + ',' + nmi

    fscore = "FSCORE," + 'centrality_enabled_mode_' + str(config['FENNEL_NODE_REORDERING_ENABLED']) + ',' + str(scipy.mean(dataFscore)) + ',' + str(scipy.std(dataFscore)) + ',' + fscore
    fscoreimprove = "FSCORE_IMPROVE," + 'centrality_enabled_mode_' + str(config['FENNEL_NODE_REORDERING_ENABLED']) + ',' + str(scipy.mean(dataFscoreImprove)) + ',' + str(scipy.std(dataFscoreImprove)) + ',' + fscoreimprove

    print(waste)
    print(cutratio)
    print(ec)
    print(tcv)
    print(lonliness)
    print(qds)
    print(conductance)
    print(maxperm)
    print(fscore)
    print(fscoreimprove)


Mode True
Mode True Iteration 0
Mode True Iteration 50
Mode True Iteration 100

In [ ]: