In [3]:
import os
import csv
import platform
import pandas as pd
import networkx as nx
from graph_partitioning import GraphPartitioning, utils

run_metrics = True

cols = ["WASTE", "CUT RATIO", "EDGES CUT", "TOTAL COMM VOLUME", "Qds", "CONDUCTANCE", "MAXPERM", "RBSE", "NMI", "FSCORE", "FSCORE RELABEL IMPROVEMENT", "LONELINESS"]

pwd = %pwd

config = {

    "DATA_FILENAME": os.path.join(pwd, "data", "predition_model_tests", "network", "network_$$.txt"),
    "OUTPUT_DIRECTORY": os.path.join(pwd, "output"),

    # Set which algorithm is run for the PREDICTION MODEL.
    # Either: 'FENNEL' or 'SCOTCH'
    "PREDICTION_MODEL_ALGORITHM": "FENNEL",

    # Alternativly, read input file for prediction model.
    # Set to empty to generate prediction model using algorithm value above.
    "PREDICTION_MODEL": "",

    
    "PARTITIONER_ALGORITHM": "FENNEL",

    # File containing simulated arrivals. This is used in simulating nodes
    # arriving at the shelter. Nodes represented by line number; value of
    # 1 represents a node as arrived; value of 0 represents the node as not
    # arrived or needing a shelter.
    "SIMULATED_ARRIVAL_FILE": os.path.join(pwd,
                                           "data",
                                           "predition_model_tests",
                                           "#scenario#",
                                           "simulated_arrival_list",
                                           "percentage_of_prediction_correct_££",
                                           "arrival_££_$$.txt"
                                          ),

    # File containing the prediction of a node arriving. This is different to the
    # simulated arrivals, the values in this file are known before the disaster.
    "PREDICTION_LIST_FILE": os.path.join(pwd,
                                         "data",
                                         "predition_model_tests",
                                         "#scenario#",
                                         "prediction_list",
                                         "prediction_$$.txt"
                                        ),

    # File containing the geographic location of each node, in "x,y" format.
    "POPULATION_LOCATION_FILE": os.path.join(pwd,
                                             "data",
                                             "predition_model_tests",
                                             "coordinates",
                                             "coordinates_$$.txt"
                                            ),

    # Number of shelters
    "num_partitions": 4,

    # The number of iterations when making prediction model
    "num_iterations": 10,

    # Percentage of prediction model to use before discarding
    # When set to 0, prediction model is discarded, useful for one-shot
    "prediction_model_cut_off": 0.0,

    # Alpha value used in one-shot (when restream_batches set to 1)
    "one_shot_alpha": 0.5,
    
    "use_one_shot_alpha" : False,
    
    # Number of arrivals to batch before recalculating alpha and restreaming.
    "restream_batches": 50,

    # When the batch size is reached: if set to True, each node is assigned
    # individually as first in first out. If set to False, the entire batch
    # is processed and empty before working on the next batch.
    "sliding_window": False,

    # Create virtual nodes based on prediction model
    "use_virtual_nodes": False,

    # Virtual nodes: edge weight
    "virtual_edge_weight": 1.0,
    
    # Loneliness score parameter. Used when scoring a partition by how many
    # lonely nodes exist.
    "loneliness_score_param": 1.2,

    "compute_metrics_enabled": True,

    ####
    # GRAPH MODIFICATION FUNCTIONS

    # Also enables the edge calculation function.
    "graph_modification_functions": True,

    # If set, the node weight is set to 100 if the node arrives at the shelter,
    # otherwise the node is removed from the graph.
    "alter_arrived_node_weight_to_100": False,

    # Uses generalized additive models from R to generate prediction of nodes not
    # arrived. This sets the node weight on unarrived nodes the the prediction
    # given by a GAM.
    # Needs POPULATION_LOCATION_FILE to be set.
    "alter_node_weight_to_gam_prediction": False,
    
    # Enables edge expansion when graph_modification_functions is set to true
    "edge_expansion_enabled": True,

    # The value of 'k' used in the GAM will be the number of nodes arrived until
    # it reaches this max value.
    "gam_k_value": 100,

    # Alter the edge weight for nodes that haven't arrived. This is a way to
    # de-emphasise the prediction model for the unknown nodes.
    "prediction_model_emphasis": 1.0,
    
    # This applies the prediction_list_file node weights onto the nodes in the graph
    # when the prediction model is being computed and then removes the weights
    # for the cutoff and batch arrival modes
    "apply_prediction_model_weights": True,

    "SCOTCH_LIB_PATH": os.path.join(pwd, "libs/scotch/macOS/libscotch.dylib")
    if 'Darwin' in platform.system()
    else "/usr/local/lib/libscotch.so",
    
    # Path to the PaToH shared library
    "PATOH_LIB_PATH": os.path.join(pwd, "libs/patoh/lib/macOS/libpatoh.dylib")
    if 'Darwin' in platform.system()
    else os.path.join(pwd, "libs/patoh/lib/linux/libpatoh.so"),
    
    "PATOH_ITERATIONS": 5,
        
    # Expansion modes: 'avg_node_weight', 'total_node_weight', 'smallest_node_weight'
    # 'largest_node_weight'
    # add '_squared' or '_sqrt' at the end of any of the above for ^2 or sqrt(weight)
    # i.e. 'avg_node_weight_squared
    "PATOH_HYPEREDGE_EXPANSION_MODE": 'no_expansion',
    
    # Edge Expansion: average, total, minimum, maximum, product, product_squared, sqrt_product
    "EDGE_EXPANSION_MODE" : 'total',
    
    # Whether nodes should be reordered using a centrality metric for optimal node assignments in batch mode
    # This is specific to FENNEL and at the moment Leverage Centrality is used to compute new noder orders
    "FENNEL_NODE_REORDERING_ENABLED": False,
    
    # Whether the Friend of a Friend scoring system is active during FENNEL partitioning.
    # FOAF employs information about a node's friends to determine the best partition when
    # this node arrives at a shelter and no shelter has friends already arrived
    "FENNEL_FRIEND_OF_A_FRIEND_ENABLED": False,
    
    # Alters how much information to print. Keep it at 1 for this notebook.
    # 0 - will print nothing, useful for batch operations.
    # 1 - prints basic information on assignments and operations.
    # 2 - prints more information as it batches arrivals.
    "verbose": 1
}

gp = GraphPartitioning(config)

# Optional: shuffle the order of nodes arriving
# Arrival order should not be shuffled if using GAM to alter node weights
#random.shuffle(gp.arrival_order)

%pylab inline


Populating the interactive namespace from numpy and matplotlib

In [4]:
import scipy
from copy import deepcopy
import time

iterations = 100

#pm_cutoff = []
#for i in range(0, 21):
#    pm_cutoff.append(i * 0.05)

#virtual_edge_weight = [0.5, 1.0, 1.5, 2.0, 2.5, 3.0]

correctednesses = [100, 80, 60, 40, 20, 0]
scenarios = ['dataset_1_shift_rotate', 'dataset_2_shift_move', 'dataset_3_increase_prediction_perc'
            , 'dataset_4_increase_total_perc', 'dataset_5_increase_total_perc_equals_total_prediction_prec',
            'dataset_6_randomize']

scenario = scenarios[0]

for correctedness in correctednesses:
    metricsDataPrediction = []
    metricsDataAssign = []
    
    # batches of 50 - use 10 restreaming iterations on FENNEL
    config['num_iterations'] = 10
    config['PREDICTION_MODEL'] = os.path.join(pwd, "data", "predition_model_tests", "network", "pm", "network_pm_$$.txt")


    print('Scenario', scenario)
    elapsed_times = []
    for i in range(0, iterations):
        # how many networks
        if (i % 20) == 0:
            print('Scenario', scenario, correctedness, 'Iteration', str(i))
        
        conf = deepcopy(config)
        
        conf["DATA_FILENAME"] = conf["DATA_FILENAME"].replace('$$', str(i + 1))
        
        conf["SIMULATED_ARRIVAL_FILE"] = conf["SIMULATED_ARRIVAL_FILE"].replace('$$', str(i + 1))
        conf["SIMULATED_ARRIVAL_FILE"] = conf["SIMULATED_ARRIVAL_FILE"].replace('££', str(correctedness))
        conf["SIMULATED_ARRIVAL_FILE"] = conf["SIMULATED_ARRIVAL_FILE"].replace('#scenario#', str(scenario))

        conf["PREDICTION_LIST_FILE"] = conf["PREDICTION_LIST_FILE"].replace('$$', str(i + 1))
        conf["PREDICTION_LIST_FILE"] = conf["PREDICTION_LIST_FILE"].replace('#scenario#', scenario)

        conf["POPULATION_LOCATION_FILE"] = conf["POPULATION_LOCATION_FILE"].replace('$$', str(i + 1))
        conf['PREDICTION_MODEL'] = conf['PREDICTION_MODEL'].replace('$$', str(i + 1))


        with GraphPartitioning(conf) as gp:
            gp.verbose = 0
            
            start_time = time.time()
            
            gp.load_network()
            gp.init_partitioner()

            m = gp.prediction_model()
            m = gp.assign_cut_off()
            m = gp.batch_arrival()

            elapsed_time = time.time() - start_time
            elapsed_times.append(elapsed_time)
            
            totalM = len(m)
            metricsDataPrediction.append(m[totalM - 1])
    
    print('Average Elapsed Time =', scipy.mean(elapsed_times))

    waste = ''
    cutratio = ''
    ec = ''
    tcv = ''
    qds = ''
    conductance = ''
    maxperm = ''
    rbse = ''
    nmi = ''
    lonliness = ''
    fscore = ''
    fscoreimprove = ''
        
    qdsOv = ''
    condOv = ''

    dataWaste = []
    dataCutRatio = []
    dataEC = []
    dataTCV = [] 
    dataQDS = []
    dataCOND = []
    dataMAXPERM = []
    dataRBSE = []
    dataNMI = []
    dataLonliness = []
    dataFscore = []
    dataFscoreImprove = []

    
    for i in range(0, iterations):
        dataWaste.append(metricsDataPrediction[i][0])        
        dataCutRatio.append(metricsDataPrediction[i][1])
        dataEC.append(metricsDataPrediction[i][2])
        dataTCV.append(metricsDataPrediction[i][3])
        dataQDS.append(metricsDataPrediction[i][4])
        dataCOND.append(metricsDataPrediction[i][5])
        dataMAXPERM.append(metricsDataPrediction[i][6])
        dataRBSE.append(metricsDataPrediction[i][7])
        dataNMI.append(metricsDataPrediction[i][8])        
        dataFscore.append(metricsDataPrediction[i][9])        
        dataFscoreImprove.append(metricsDataPrediction[i][10])        
        dataLonliness.append(metricsDataPrediction[i][11])


        if(len(waste)):
            waste = waste + ','
        waste = waste + str(metricsDataPrediction[i][0])

        if(len(cutratio)):
            cutratio = cutratio + ','
        cutratio = cutratio + str(metricsDataPrediction[i][1])

        if(len(ec)):
            ec = ec + ','
        ec = ec + str(metricsDataPrediction[i][2])
        
        if(len(tcv)):
            tcv = tcv + ','
        tcv = tcv + str(metricsDataPrediction[i][3])

        if(len(qds)):
            qds = qds + ','
        qds = qds + str(metricsDataPrediction[i][4])

        if(len(conductance)):
            conductance = conductance + ','
        conductance = conductance + str(metricsDataPrediction[i][5])

        if(len(maxperm)):
            maxperm = maxperm + ','
        maxperm = maxperm + str(metricsDataPrediction[i][6])

        if(len(rbse)):
            rbse = rbse + ','
        rbse = rbse + str(metricsDataPrediction[i][7])


        
        if(len(nmi)):
            nmi = nmi + ','
        nmi = nmi + str(metricsDataPrediction[i][8])

        if(len(fscore)):
            fscore = fscore + ','
        fscore = fscore + str(metricsDataPrediction[i][9])

        if(len(fscoreimprove)):
            fscoreimprove = fscoreimprove + ','
        fscoreimprove = fscoreimprove + str(metricsDataPrediction[i][10])
        
        if(len(lonliness)):
            lonliness = lonliness + ','
        lonliness = lonliness + str(dataLonliness[i])
        

    waste = 'WASTE,' + scenario + '_' + str(correctedness) + ',' + str(scipy.mean(dataWaste)) + ',' + str(scipy.std(dataWaste)) + ',' + waste

    cutratio = 'CUT_RATIO,' + scenario + '_' + str(correctedness) + ',' + str(scipy.mean(dataCutRatio)) + ',' + str(scipy.std(dataCutRatio)) + ',' + cutratio
    ec = 'EC,' + scenario + '_' + str(correctedness) + ',' + str(scipy.mean(dataEC)) + ',' + str(scipy.std(dataEC)) + ',' + ec
    tcv = 'TCV,' + scenario + '_' + str(correctedness) + ',' + str(scipy.mean(dataTCV)) + ',' + str(scipy.std(dataTCV)) + ',' + tcv

    lonliness = "LONELINESS," + scenario + '_' + str(correctedness) + ',' + str(scipy.mean(dataLonliness)) + ',' + str(scipy.std(dataLonliness)) + ',' + lonliness

    qds = 'QDS,' + scenario + '_' + str(correctedness) + ',' + str(scipy.mean(dataQDS)) + ',' + str(scipy.std(dataQDS)) + ',' + qds
    conductance = 'CONDUCTANCE,' + scenario + '_' + str(correctedness) + ',' + str(scipy.mean(dataCOND)) + ',' + str(scipy.std(dataCOND)) + ',' + conductance
    maxperm = 'MAXPERM,' + scenario + '_' + str(correctedness) + ',' + str(scipy.mean(dataMAXPERM)) + ',' + str(scipy.std(dataMAXPERM)) + ',' + maxperm
    rbse = 'RBSE,' + scenario + '_' + str(correctedness) + ',' + str(scipy.mean(dataRBSE)) + ',' + str(scipy.std(dataRBSE)) + ',' + rbse

    nmi = 'NMI,' + scenario + '_' + str(correctedness) + ',' + str(scipy.mean(dataNMI)) + ',' + str(scipy.std(dataNMI)) + ',' + nmi

    fscore = "FSCORE," + scenario + '_' + str(correctedness) + ',' + str(scipy.mean(dataFscore)) + ',' + str(scipy.std(dataFscore)) + ',' + fscore
    fscoreimprove = "FSCORE_IMPROVE," + scenario + '_' + str(correctedness) + ',' + str(scipy.mean(dataFscoreImprove)) + ',' + str(scipy.std(dataFscoreImprove)) + ',' + fscoreimprove


    print(cutratio)
    print(ec)
    print(tcv)
    print(lonliness)
    print(qds)
    print(conductance)
    print(maxperm)
    print(rbse)
    print(nmi)
    print(fscore)
    print(fscoreimprove)


Scenario dataset_1_shift_rotate
Scenario dataset_1_shift_rotate 100 Iteration 0
Average Elapsed Time = 7.00355005264
CUT_RATIO,dataset_1_shift_rotate_100,0.175,0.0,0.175
EC,dataset_1_shift_rotate_100,105.0,0.0,105
TCV,dataset_1_shift_rotate_100,141.0,0.0,141
LONELINESS,dataset_1_shift_rotate_100,0.756318276461,0.0,0.756318276461
QDS,dataset_1_shift_rotate_100,0.472432948788,0.0,0.4724329487876044
CONDUCTANCE,dataset_1_shift_rotate_100,0.0603615377181,0.0,0.06036153771810973
MAXPERM,dataset_1_shift_rotate_100,0.337412637011,0.0,0.33741263701067625
RBSE,dataset_1_shift_rotate_100,0.0604982206406,0.0,0.060498220640569395
NMI,dataset_1_shift_rotate_100,0.0896011832858,0.0,0.0896011832858
FSCORE,dataset_1_shift_rotate_100,0.195444407405,0.0,0.195444407405
FSCORE_IMPROVE,dataset_1_shift_rotate_100,0.120300929527,0.0,0.120300929527
Scenario dataset_1_shift_rotate
Scenario dataset_1_shift_rotate 80 Iteration 0
Average Elapsed Time = 6.46811914444
CUT_RATIO,dataset_1_shift_rotate_80,0.173310225303,0.0,0.173310225303
EC,dataset_1_shift_rotate_80,100.0,0.0,100
TCV,dataset_1_shift_rotate_80,137.0,0.0,137
LONELINESS,dataset_1_shift_rotate_80,0.748523839873,0.0,0.748523839873
QDS,dataset_1_shift_rotate_80,0.463491961611,0.0,0.4634919616112253
CONDUCTANCE,dataset_1_shift_rotate_80,0.0485971244165,0.0,0.04859712441648762
MAXPERM,dataset_1_shift_rotate_80,0.365544695971,0.0,0.36554469597069594
RBSE,dataset_1_shift_rotate_80,0.032967032967,0.0,0.03296703296703297
NMI,dataset_1_shift_rotate_80,0.142003479742,0.0,0.142003479742
FSCORE,dataset_1_shift_rotate_80,0.191570288182,0.0,0.191570288182
FSCORE_IMPROVE,dataset_1_shift_rotate_80,0.252421723691,0.0,0.252421723691
Scenario dataset_1_shift_rotate
Scenario dataset_1_shift_rotate 60 Iteration 0
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-4-7a507b625894> in <module>()
     59             m = gp.prediction_model()
     60             m = gp.assign_cut_off()
---> 61             m = gp.batch_arrival()
     62 
     63             elapsed_time = time.time() - start_time

/Users/voreno/Development/CSAP/graph-partitioning/graph_partitioning/graph_partitioning.py in batch_arrival(self)
    462             # batch processing
    463             if self.restream_batches == len(batch_arrived):
--> 464                 run_metrics += self.process_batch(batch_arrived)
    465                 if not self.sliding_window:
    466                     batch_arrived = []

/Users/voreno/Development/CSAP/graph-partitioning/graph_partitioning/graph_partitioning.py in process_batch(self, batch_arrived, assign_all)
    577                     self.assignments[i] = -1
    578 
--> 579         return [self._print_score(Gsub)]
    580 
    581 

/Users/voreno/Development/CSAP/graph-partitioning/graph_partitioning/graph_partitioning.py in _print_score(self, graph)
    275         #q_qds_conductance = utils.louvainModularityComQuality(graph, self.assignments, self.num_partitions)
    276         # non-overlapping metrics
--> 277         q_qds_conductance = utils.infomapModularityComQuality(graph, self.assignments, self.num_partitions)
    278 
    279         loneliness = utils.loneliness_score_wavg(graph, self.loneliness_score_param, self.assignments, self.num_partitions)

/Users/voreno/Development/CSAP/graph-partitioning/graph_partitioning/utils.py in infomapModularityComQuality(G, assignments, num_partitions)
    297             retval = subprocess.call(
    298                 args, cwd=com_qual_path,
--> 299                 stdout=logwriter, stderr=subprocess.STDOUT)
    300 
    301         n_nodes = len(nodes)

/usr/local/Cellar/python3/3.6.0/Frameworks/Python.framework/Versions/3.6/lib/python3.6/subprocess.py in call(timeout, *popenargs, **kwargs)
    267     with Popen(*popenargs, **kwargs) as p:
    268         try:
--> 269             return p.wait(timeout=timeout)
    270         except:
    271             p.kill()

/usr/local/Cellar/python3/3.6.0/Frameworks/Python.framework/Versions/3.6/lib/python3.6/subprocess.py in wait(self, timeout, endtime)
   1434                         if self.returncode is not None:
   1435                             break  # Another thread waited.
-> 1436                         (pid, sts) = self._try_wait(0)
   1437                         # Check the pid and loop as waitpid has been known to
   1438                         # return 0 even without WNOHANG in odd situations.

/usr/local/Cellar/python3/3.6.0/Frameworks/Python.framework/Versions/3.6/lib/python3.6/subprocess.py in _try_wait(self, wait_flags)
   1381             """All callers to this function MUST hold self._waitpid_lock."""
   1382             try:
-> 1383                 (pid, sts) = os.waitpid(self.pid, wait_flags)
   1384             except ChildProcessError:
   1385                 # This happens if SIGCLD is set to be ignored or waiting

KeyboardInterrupt: 

In [ ]: