For Thesis

UNBAKED, YET TO BE INTEGRATED WITH THE CODES IN GITHUB

1. Importing


In [1]:
# import ipyparallel

# client=ipyparallel.Client()
# client.ids

In [2]:
# %%px --local
%matplotlib inline
from matplotlib import pyplot as plt
import matplotlib.colors as colors
from matplotlib.pylab import *
import networkx as nx
import pandas as pd
import geopandas as gp
import copy
from rasterstats import zonal_stats
from __future__ import division

import sys
sys.path.append('C:/Users/Lenovo/Desktop/bangladesh_network')

import network_prep as net_p
import network_visualization as net_v
import od_prep as od_p
import weighted_betweenness as betw_w


%matplotlib inline
from matplotlib import pyplot as plt
import matplotlib.colors as colors
from matplotlib.pylab import *
import networkx as nx
import pandas as pd
from __future__ import division

import ema_workbench

import os

2. Network creation


In [3]:
# %%px --local
# userpath = "D:/Bramka's File/Bangladesh Project/"
# networkfile = 'Data preparation/BGD_road_network/Road_BGD_Class1_2_f_connected_v4.shp'
# centroidfile = 'Data preparation/BGD_Districtdata_Citycentroid/BGD_Districtdata_Citycentroid_level2_v02.shp'
# centroid = userpath+centroidfile
# network = userpath+networkfile
import os
filepath = os.getcwd()
network = filepath+'\\Road_BGD_Class1_2_f_connected_v4.shp'
network = filepath+'\\rmms_v11_7_waterway_noZ2.shp'
centroid = filepath+'\\BGD_Districtdata_Citycentroid_level2_v02.shp'


gdf_points, gdf_node_pos, gdf = net_p.prepare_centroids_network(centroid, network)

gdf['penalty'] = gdf.apply(lambda row: 15 if (row['cross']==1 and row['mode']=='road') else 0, axis=1)
gdf['length'] = gdf['length'] + gdf['penalty']

#simplify the graph
G2_new = net_p.gdf_to_simplified_multidigraph(gdf_node_pos, gdf, undirected=True)

#change to simple Graph object type
G2_new_tograph = net_p.multigraph_to_graph(G2_new)

#take the largest components
for g in nx.connected_component_subgraphs(G2_new_tograph):
    if len(list(g.edges())) > 100:
        G3 = g
        
G2_new_tograph = G3.copy()
#change the simplified transport network back to GeoDataFrame
gdf2 = net_p.graph_to_df(G2_new_tograph)

filepath = os.getcwd()
adm_csv = filepath+'\\District_level_data_v7.csv'
#adm_csv = filepath+'\\District_level_data_v5.csv'
adm_shp = filepath+'\\BGD_adm2.shp'
district_gdf2 = net_p.prepare_adm_background(adm_csv, adm_shp, ['Code', 'Population', 'Population_M', 'Total_export',
                                                               'Jute_mill', 'Flour_mill', 'Tot_Garment_Factory', 'Household',
                                                               'Land_throughput', 'SteelBricks_exp_ton', 'Food_exp_ton',
                                                               'Jutextile_exp_ton', 'Garment_exp_ton', 'Textile_loc_ton',
                                                               'Wheat_loc_ton', 'RawJute_loc_ton', 'Foods_loc_ton',
                                                               'Nonfoods_loc_ton'])

#embed district data to gdf_points

#read district data
district_data = pd.read_csv(adm_csv)

#rename to HASC_2
district_data.rename(columns={'Code':'HASC_2'}, inplace=True)

#merge them
gdf_points = pd.merge(gdf_points,district_data,on='HASC_2',how='outer')


C:\Users\Lenovo\Anaconda2\lib\site-packages\pandas\core\frame.py:2834: SettingWithCopyWarning: 
A value is trying to be set on a copy of a slice from a DataFrame

See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
  **kwargs)

Non-EMA preparation


In [4]:
# %%px --local
#Alternative deterrence function

def det_func_basic(distance):
    #distance is a n x n DataFrame of euclidean distance between all centroids
    distance = distance*distance
    distance = 100000/distance
    for i in list(distance.columns):
        for j in list(distance.index.values):
            if distance[i][j] > 9999999:
                distance[i][j] = 0
    return distance

def det_func_prob(distance, beta=0.05):
    #distance is a n x n DataFrame of euclidean distance between all centroids
    haha = pd.DataFrame()
    for i, row in distance.iterrows():
        row2 = row * (-beta)
        exp_row = np.exp(row2)
        sum_exp = exp_row.sum()
        exp_row = 100000 * (exp_row / sum_exp)
        haha = haha.append(exp_row, ignore_index = True)
        
    haha.values[[np.arange(len(haha))]*2] = 0
    return haha

def det_func_sp(distance, G=G2_new_tograph, gdf_points=gdf_points, weight='length'):
    centroid_list = list(gdf_points['Node'])
    c = []
    for i in arange(len(centroid_list)):
        row_list = []
        for j in arange(len(centroid_list)):
            if i != j:
                dist = nx.dijkstra_path_length(G=G, source=centroid_list[i], target=centroid_list[j], weight=weight)
                row_list.append(dist)
            else:
                row_list.append(0)
        c.append(row_list)
    
    
    arr = np.arange(len(centroid_list))
    det_df = pd.DataFrame(c, index=arr, columns=arr)
                 
    return det_df

det_func = {1 : det_func_basic, 2 : det_func_prob, 3 : det_func_sp}

In [5]:
# %%px --local
theta = 50 #for m1
beta = 0.5 #for m1, m6
weight='length'
cutoff = 0.05 #for m5_01
m10_buffer = 0.005 #for m10
penalty = 1.2 #for m8_02

In [6]:
# %%px --local
centroid_nodes = od_p.prepare_centroids_list(G2_new_tograph)

#export OD
prod_lists1 = ['SteelBricks_exp_ton', 'Food_exp_ton','Jutextile_exp_ton', 'Garment_exp_ton']
attr_driver = 'Total_export'
OD_export_dict = od_p.all_ods_creation(gdf_points = gdf_points, prod_lists = prod_lists1, 
                                       attr_driver = attr_driver, dist_deterrence = det_func[1])

prod_lists2 = [ 'Foods_loc_ton', 'Nonfoods_loc_ton']
attr_driver='Population_x'
OD_local_dict1 = od_p.all_ods_creation(gdf_points = gdf_points, prod_lists = prod_lists2, 
                                       attr_driver = attr_driver, dist_deterrence = det_func[1])

prod_lists3 = ['RawJute_loc_ton']
attr_driver='Jute_mill'
OD_local_dict2 = od_p.all_ods_creation(gdf_points = gdf_points, prod_lists = prod_lists3, 
                                       attr_driver = attr_driver, dist_deterrence = det_func[1])

prod_lists4 = ['Wheat_loc_ton']
attr_driver='Flour_mill'
OD_local_dict3 = od_p.all_ods_creation(gdf_points = gdf_points, prod_lists = prod_lists4, 
                                       attr_driver = attr_driver, dist_deterrence = det_func[1])

prod_lists5 = ['Textile_loc_ton']
attr_driver='Tot_Garment_Factory'
OD_local_dict4 = od_p.all_ods_creation(gdf_points = gdf_points, prod_lists = prod_lists5, 
                                       attr_driver = attr_driver, dist_deterrence = det_func[1])


#Combine all ODs
OD_local1 = OD_local_dict1[OD_local_dict1.keys()[0]]
for i in range(len(OD_local_dict1)-1):
    OD_local1 = OD_local1 +  OD_local_dict1[OD_local_dict1.keys()[i+1]]
    
OD_local2 = OD_local_dict2[OD_local_dict2.keys()[0]]

OD_local3 = OD_local_dict3[OD_local_dict3.keys()[0]]

OD_local4 = OD_local_dict4[OD_local_dict4.keys()[0]]

OD_export = OD_export_dict[OD_export_dict.keys()[0]]
for i in range(len(OD_export_dict)-1):
    OD_export = OD_export +  OD_export_dict[OD_export_dict.keys()[i+1]]

OD_all = OD_local1 + OD_local2 + OD_local3 + OD_local4 + OD_export

#create unweighted OD
centroid_district_listed = list(OD_all.columns)

OD_unweighted = pd.DataFrame(1, index=centroid_district_listed, columns=centroid_district_listed)

for i,row in OD_unweighted.iterrows():
    OD_unweighted.loc[i][i] = 0

In [7]:
# %%px --local
#for m3_02
division_gdf = gp.read_file('BGD_adm1.shp')
division_gdf.crs = {'proj': 'longlat', 'ellps': 'WGS84', 'datum': 'WGS84'}
division_gdf['HASC_1'] = division_gdf['HASC_1'].apply(lambda code: code.replace('.', '_'))
division_list = list(set(list(division_gdf['HASC_1'])))
division_gdf['geometry'] = division_gdf.geometry.apply(lambda x: x.buffer(0.01))
def determine_division(geom, division):
    for i, row in division.iterrows():
        if geom.intersects(row['geometry']):
            return row['HASC_1']
gdf2['division'] = gdf2.geometry.apply(lambda geom: determine_division(geom=geom, division=division_gdf))
div_gdf_list = []
for div in division_list:
    exec("{}_gdf = gdf2.loc[gdf2['geometry'].intersects(division_gdf.loc[division_gdf['HASC_1']==div]['geometry'].iloc[0])]".format(div))
    exec("div_gdf_list.append({}_gdf)".format(div))

In [8]:
# %%px --local
import pickle

def load_obj(name ):
    with open(name + '.pkl', 'rb') as f:
        return pickle.load(f)
    
ksp = load_obj('ksp_for_m402')

In [9]:
# %%px --local
def _daily_accessibility(centroid, G, theta, weight='length', beta=0.5):
    '''
    Helper function for function interdiction_m1

    return:
    a              : daily accessibility index
    len(sp_length) : number of nodes accessible within daily travel threshold (theta)
    '''

    total_sp_length = 0

    #calculate shortest path length to all other centroids
#     for target in G.nodes(): #why need this?
    sp_length = nx.single_source_dijkstra_path_length(G=G, source=centroid, cutoff=theta, weight=weight)
    count_node = 0
#     for key, val in sp_length.iteritems():
#         try:
#             total_sp_length += 1 / (val**beta)
#         except:
#             pass
#         count_node += 1
    for item in sp_length:
        try:
            total_sp_length += 1 / (item[1]**beta)
        except:
            pass
        count_node += 1

    #calculate the accessibility
    try:
        a = total_sp_length
    except:
        a = 0

    return a, count_node

def _sum_daily_accessibility(a_dict, a_n_dict):
    '''
    Helper function for function interdiction_m1
    '''
    sum_a = sum(a_dict.values())

    sum_a_n = sum(a_n_dict.values())

    return sum_a, sum_a_n

def _dict_daily_accessibility(centroids, G, theta, weight='length', beta=0.5):
    '''
    Helper function for function interdiction_m1

    return:
    a_dict     : dictionary of daily accessibility, keyed by centroids id
    a_n_dict   : dictionary of number of nodes accessible within daily travel threshold, keyed by centroids id
    '''

    a_dict = {}
    a_n_dict = {}
    for centroid in centroids:
        a, a_n = _daily_accessibility(centroid=centroid, G=G, theta=theta, weight=weight, beta=beta)
        a_dict.update({centroid:a})
        a_n_dict.update({centroid:a_n})
        
    return a_dict, a_n_dict

In [10]:
# %%px --local
#EMA building block
sp_dict_graph = betw_w.sp_dict_graph_creation(G=G2_new_tograph, sources=centroid_nodes, 
                                              targets=centroid_nodes, weight='length')
a_dict_base, a_n_dict_base = _dict_daily_accessibility(centroids=centroid_nodes, G=G2_new_tograph, theta=theta, 
                                                                 weight='length', beta=beta)
sum_a_base, sum_a_n_base = _sum_daily_accessibility(a_dict_base, a_n_dict_base)
all_daily_sp_list, all_daily_sp_dict = betw_w._all_daily_sp_record(G=G2_new_tograph, sources=centroid_nodes, 
                                                            cutoff=theta, weight='length')

#EMA building block
div_graph_list = []
for div in division_list:
    exec("{}_graph = nx.from_pandas_dataframe(df={}_gdf, source='FNODE_', target='TNODE_',edge_attr='length')".format(div,div))
    exec("div_graph_list.append({}_graph)".format(div))
div_graph_dict = dict(zip(division_list, div_graph_list))
div_init_avrgdist_dict = {}
for key, val in div_graph_dict.iteritems():
    cc = 0
    if nx.number_connected_components(val) > 1:
        for subgraph in nx.connected_component_subgraphs(val):
            if len(subgraph) > cc:
                cc = len(subgraph)
                graph = subgraph
    else:
        graph = val
    average_sp_length = nx.average_shortest_path_length(graph, weight='length')
    div_init_avrgdist_dict.update({key:average_sp_length})
    
#EMA building block
total_cost_base, od_cost_dict = betw_w._total_cost_sp(G=G2_new_tograph, sources=centroid_nodes, targets=centroid_nodes,
                                              weight=weight, od=OD_all, weighted=False)
total_cost_base_w, od_cost_dict_w = betw_w._total_cost_sp(G=G2_new_tograph, sources=centroid_nodes, targets=centroid_nodes,
                                              weight=weight, od=OD_all, weighted=True)
total_cost_sp_inversed, od_cost_inversed_dict =  betw_w._total_cost_sp_inversed(G=G2_new_tograph, sources=centroid_nodes, 
                                                                         targets=centroid_nodes, weight=weight)
efficiency_base = betw_w._network_efficiency_calc(G=G2_new_tograph, total_cost_inversed=total_cost_sp_inversed)

#EMA building block
flow_fromto_df = pd.DataFrame(OD_all.sum(axis=0)+OD_all.sum(axis=1))
flow_fromto_df.columns = ['flow']
a_sum_base, a_master_dict =  betw_w._sum_weighted_accessibility(G=G2_new_tograph, centroids=centroid_nodes,
                                                                flow=flow_fromto_df, weight='length', beta=beta)

#EMA building block
sp_cost_dict = betw_w._shortest_path_cost(G=G2_new_tograph, centroids=centroid_nodes, weight='length')

#EMA building block
flood_file = 'fluvial_defended_1in75_tile_1_-9999to0.tif'
gdf3 = gdf2.copy()
gdf3['geometry'] = gdf3.geometry.apply(lambda geom: geom.buffer(m10_buffer))
zonal_stats_dict = zonal_stats(gdf3, flood_file, stats='mean', all_touched=False)

In [11]:
#EMA building block
flood_file = 'fluvial_defended_1in75_tile_1_-9999to0.tif'
gdf3 = gdf2.copy()
gdf3['geometry'] = gdf3.geometry.apply(lambda geom: geom.buffer(m10_buffer))
zonal_stats_dict = zonal_stats(gdf3, flood_file, stats='mean', all_touched=False)

In [12]:
# %%px --local
def no_interdiction(G, centroids, od, od_unw, weight, gdf=gdf2, div_graph_dict=div_graph_dict,
                    div_init_avrgdist_dict=div_init_avrgdist_dict, cutoff=cutoff, penalty=penalty,
                    zonal_stats_dict=zonal_stats_dict):
    
    edgelist = []
    for edge in list(G.edges()):
        edgelist.append(edge)
    
    #for m3_01
    flow_unweighted = betw_w.aon_assignment(G=G, sources=centroids,
                                            targets=centroids, weight=weight, od=od_unw)
    m3_01_dict = betw_w.edge_betweenness_centrality(flow_unweighted, od_unw)
    
    #for m3_02
    gdf['m3_02'] = gdf.apply(lambda row: betw_w.interdiction_m3_02(row=row, div_graph_dict=div_graph_dict,
                                                                   div_init_avrgdist_dict=div_init_avrgdist_dict), axis=1)
    gdf['FromToTuple'] = gdf.apply(lambda row: tuple([row['FNODE_'], row['TNODE_']]), axis=1)
    for i in list(gdf['FromToTuple']):
        if not i in m3_01_dict.keys():
            idx = gdf.loc[gdf['FromToTuple']==i].index[0]
            new_tup = tuple([i[1],i[0]])
            gdf['FromToTuple'][idx] = new_tup
    el = list(gdf['FromToTuple'])
    m3_02 = list(gdf['m3_02'])
    m3_02_dict = dict(zip(el, m3_02))
    
    #for m5_01
    gdf['m5_01'] = gdf.geometry.apply(lambda line: betw_w.m5_01(gdf=gdf, line=line, cutoff=cutoff))
    m5_01 = list(gdf['m5_01'])
    m5_01_dict = dict(zip(el, m5_01))
    
    #for m8_02
    flow_probit_5 = betw_w.probit_assignment(G=G, sources=centroids, targets=centroids, weight=weight, 
                                             od=od, N=4, penalty=penalty)
    m8_02_dict = betw_w.edge_betweenness_centrality(flow_probit_5, od)
    
    #for m10
    gdf['m10'] = [d.values()[0] for d in zonal_stats_dict]
    gdf['m10'] = gdf.m10.apply(lambda val: 0 if val > 0.8 else val)
    m10 = list(gdf['m10'])
    m10_dict = dict(zip(el, m10))    
    
    #if an edge does not have value yet
    #assign 0 to it
    all_dicts = [m3_01_dict, m3_02_dict, m5_01_dict, m8_02_dict, m10_dict]
    for edge in edgelist:
        for metric in all_dicts:
            if not edge in metric:
                metric.update({edge:0})
    
    return m3_01_dict, m3_02_dict, m5_01_dict, m8_02_dict, m10_dict

def interdiction(G, centroids, od, weight, theta=theta, beta=beta,
                 a_dict_base=a_dict_base, a_n_dict_base=a_n_dict_base, sum_a_base=sum_a_base, sum_a_n_base=sum_a_n_base,
                 all_daily_sp_list=all_daily_sp_list, all_daily_sp_dict=all_daily_sp_dict, od_cost_dict=od_cost_dict,
                 od_cost_inversed_dict=od_cost_inversed_dict, total_cost_base=total_cost_base, 
                 total_cost_sp_inversed=total_cost_sp_inversed, efficiency_base=efficiency_base, ksp=ksp,
                 a_sum_base=a_sum_base, a_master_dict=a_master_dict, flow_df=flow_fromto_df,
                 total_cost_base_w=total_cost_base_w, od_cost_dict_w=od_cost_dict_w, sp_cost_dict=sp_cost_dict):
    
    ff=0
    m1_01_dict = {}
    m1_02_dict = {}
    m2_01_dict = {}
    m2_02_dict = {}
    m4_02_dict = {}
    m6_01_dict = {}
    m7_01_dict = {}
    m7_02_dict = {}
    m7_03_dict = {}
    m9_01_dict = {}
    
    #record all paths in all shortest paths, also record initial total distinct shortest paths
    init_n_paths = 0
    for key, val in ksp.iteritems():
        init_n_paths += val[1]    
    
    edgelist = []
    for edge in list(G.edges()):
        edgelist.append(edge)
        
    for edge in edgelist:
        
        ff += 1
#         if ff%100 == 0:
#             print(str(ff) + ' edges have been interdicted')
            
        u = edge[0]
        v = edge[1]
        tup = tuple([u,v])
        
        #make a copy of the original graph
        G1 = G.copy()
        
        #remove that edge
        G1.remove_edge(u,v)
        
        #COPYING DICTIONARY
        #for m1
        a_dict_base2 = a_dict_base.copy()
        a_n_dict_base2 = a_n_dict_base.copy()
        #for m2
        od_cost_dict2 = od_cost_dict.copy()
        od_cost_inversed_dict2 = od_cost_inversed_dict.copy()
        #for m4_02
        new_n_paths = init_n_paths
        #for m6_01
        a_master_dict2 = a_master_dict.copy()
        #for m7_01
        od_cost_dict3 = od_cost_dict_w.copy()
        #for m7_02 and m7_03
        user_exposure_dict = {}
        for centroid in centroids:
            user_exposure_dict.update({centroid:[]})
        sum_exposure = 0
        #for m9_01
        unsatisfied_demand = 0
        
        #M1
        for key, val in all_daily_sp_dict.iteritems():
            if tup in val:
                a_new, a_n_new = _daily_accessibility(centroid=key, G=G1, theta=theta, weight=weight, beta=beta)
                a_dict_base2.update({key:a_new})
                a_n_dict_base2.update({key:a_n_new})
        
        #M1 VALUE UPDATE
        sum_a_new, sum_a_n_new = _sum_daily_accessibility(a_dict_base2, a_n_dict_base2)
        try:
            m1_01 = sum_a_base / sum_a_new
            m1_01_dict.update({edge:m1_01})
            m1_02 = sum_a_n_base / sum_a_n_new
            m1_02_dict.update({edge:m1_02})
        except:
            m1_01 = 9999
            m1_01_dict.update({edge:m1_01})
            m1_02 = 9999
            m1_02_dict.update({edge:m1_02})
            print(sum_a_new, sum_a_n_new, sum_a_base, sum_a_n_base)
                
                
        #M2, M6_01, M7_01, M7_02, M7_03, M9_01 ITERATION
        updated_centroid = [] #for m6_01
        for key, val in sp_dict_graph.iteritems():
            if tup in val:
                #for m2, m7_01, m9_01
                try:
                    sp_dijk_distance = nx.dijkstra_path_length(G1, source=key[0], target=key[1], weight=weight)
                    cost = sp_dijk_distance
                    od_cost_dict2[key] = cost
                    od_cost_inversed_dict2[key] = 1 / cost
                    flow = od[key[0]][key[1]]
                    cost2 = sp_dijk_distance * flow
                    od_cost_dict3[key] = cost2
                except:
                    sp_dijk_distance = 9999
                    flow = od[key[0]][key[1]]
                    unsatisfied_demand += flow
                #for m6_01
                try:
                    a_source_old_dict = a_master_dict2[key[0]]
                    a_source_new_dict = a_source_old_dict.copy()
                    a_source_target = betw_w._weighted_accessibility(G=G1, centroid=key[0], targets=key[1], 
                                                              flow=flow_df, weight=weight, beta=beta)
                    a_source_new_dict.update({key[1]:a_source_target})
                    a_master_dict2.update({key[0]:a_source_new_dict})
                except:
                    sp_dijk_distance = 9999
                #for m7_02 and m7_03
                exposure = betw_w._user_exposure(G_new=G1, centroid=key[0], target=key[1], 
                                          weight=weight, od=od, sp_cost_dict=sp_cost_dict)
                user_exposure_dict[key[0]].append(exposure)
                    
        #M2 VALUE UPDATE
        total_cost_new = sum(od_cost_dict2.values())
        total_cost_inversed_new = sum(od_cost_inversed_dict2.values())
        efficiency_new = betw_w._network_efficiency_calc(G1, total_cost_inversed_new)
        m2_01 = total_cost_new/total_cost_base
        if m2_01 < 0:
            m2_01 = 0
        m2_01_dict.update({edge:m2_01})
        try:
            m2_02 = (efficiency_base - efficiency_new)/efficiency_base
        except:
            m2_02 = 9999
        m2_02_dict.update({edge:m2_02})
        
        #M6_01 VALUE UPDATE
        a_sum_new = sum([sum(x.values()) for x in a_master_dict2.values()])
        try:
            m6_01 = a_sum_base/a_sum_new
        except:
            m6_01 = 9999
        if m6_01 < 0:
            m6_01 = 0
        m6_01_dict.update({edge:m6_01})
        
        #M7_01 AND M9_01 VALUE UPDATE
        total_cost_new = sum(od_cost_dict3.values())
        cost_increase = (total_cost_new - total_cost_base_w)/total_cost_base_w
        unsatisfied_demand = unsatisfied_demand/total_cost_base_w
        if cost_increase < 0:
            cost_increase = 0
        m7_01_dict.update({edge:cost_increase})
        m9_01_dict.update({edge:unsatisfied_demand})
        
        #M7_02 AND M7_03 VALUE UPDATE
        expected_ue_dict = {}
        worst_ue_dict = {}
        for key, val in user_exposure_dict.iteritems():
            if len(val) > 0:
                average_val = average(val)
                worst_val = max(val)
            else:
                average_val = 0
                worst_val = 0
            expected_ue_dict.update({key:average_val})
            worst_ue_dict.update({key:worst_val})
        try:
            m7_02 = sum(expected_ue_dict.values())/len(centroids)
            m7_03 = sum(worst_ue_dict.values())/len(centroids)
        except:
            m7_02 = 9999
            m7_03 = 9999
        m7_02_dict.update({edge:m7_02})
        m7_03_dict.update({edge:m7_03})
        
        #M4_02 ITERATION
        for key, val in ksp.iteritems():
            if tup in val[0]:
                new_n_paths -= 1
                
        #M4_02 VALUE UPDATE
        try:
            m4_02 = 1 - (new_n_paths / init_n_paths)
        except:
            m4_02 = 9999
        m4_02_dict.update({edge:m4_02})        
                
    #if an edge does not have value yet
    #assign 0 to it
    all_dicts = [m1_01_dict, m1_02_dict, m2_01_dict, m2_02_dict, m4_02_dict, m6_01_dict, m7_01_dict, m7_02_dict, m7_03_dict, m9_01_dict]
    for edge in edgelist:
        for metric in all_dicts:
            if not edge in metric:
                metric.update({edge:0})
            
    return m1_01_dict, m1_02_dict, m2_01_dict, m2_02_dict, m4_02_dict, m6_01_dict, m7_01_dict, m7_02_dict, m7_03_dict, m9_01_dict

In [13]:
# m1_01_dict, m1_02_dict, m2_01_dict, m2_02_dict, m4_02_dict, m6_01_dict, m7_01_dict, m7_02_dict, m7_03_dict, m9_01_dict = interdiction(G=G2_new_tograph, centroids=centroid_nodes, od=OD_all, weight='length')

In [14]:
# %%px --local
edgelist = []
edge_dict = {}
for edge in G2_new_tograph.edges():
    key = str(edge[0]) + str(edge[1])
    edgelist.append(edge)
    edge_dict.update({key:[]})

EMA preparation


In [15]:
# %%px --local
theta = 50 #for m1
beta = 0.5 #for m1, m6
weight='length'
cutoff = 0.05 #for m5_01
m10_buffer = 0.005 #for m10
penalty = 1.2 #for m8_02

In [16]:
# %%px --local
def ema_criticality(det_idx=1, od_exp=1, od_loc1=1, od_loc2=1, od_loc3=1, od_loc4=1, theta=50, beta=0.5, weight='length',
                   cutoff=0.05, m10_buffer=0.005, penalty=1.2):
#     print('here1')
    #replicate the graph and gdf to avoid working on the real file
    G3 = G2_new_tograph.copy()
    gdf3 = gdf2.copy()
    
#     print('here2')
    #set the edge attribute
    edge_length_change = {}
    for u,v,data in G3.edges(data=True):
        length = data['length'] * np.random.uniform(0.75, 1.5)
        edge_length_change.update({tuple([u,v]):length})
        
    nx.set_edge_attributes(G3, 'length', edge_length_change)
    
#     print('here3')
    ##### CREATE OD MATRIX FIRST #####
    centroid_nodes = od_p.prepare_centroids_list(G2_new_tograph)

    #export OD
    prod_lists1 = ['SteelBricks_exp_ton', 'Food_exp_ton','Jutextile_exp_ton', 'Garment_exp_ton']
    attr_driver='Total_export'
    OD_export_dict = od_p.all_ods_creation_ema(gdf_points = gdf_points, prod_lists = prod_lists1, 
                                           attr_driver = attr_driver, dist_deterrence = det_func[det_idx])
    for key, val in OD_export_dict.iteritems():
        OD_export_dict[key] = (OD_export_dict[key][0] * od_exp, OD_export_dict[key][1])
    
#     print('here4')
    prod_lists2 = [ 'Foods_loc_ton', 'Nonfoods_loc_ton']
    attr_driver='Population_x'
    OD_local_dict1 = od_p.all_ods_creation_ema(gdf_points = gdf_points, prod_lists = prod_lists2, 
                                           attr_driver = attr_driver, dist_deterrence = det_func[det_idx])
    for key, val in OD_local_dict1.iteritems():
        OD_local_dict1[key] = (OD_local_dict1[key][0] * od_loc1, OD_local_dict1[key][1])

    prod_lists3 = ['RawJute_loc_ton']
    attr_driver='Jute_mill'
    OD_local_dict2 = od_p.all_ods_creation_ema(gdf_points = gdf_points, prod_lists = prod_lists3, 
                                           attr_driver = attr_driver, dist_deterrence = det_func[det_idx])
    for key, val in OD_local_dict2.iteritems():
        OD_local_dict2[key] = (OD_local_dict2[key][0] * od_loc2, OD_local_dict2[key][1])

    prod_lists4 = ['Wheat_loc_ton']
    attr_driver='Flour_mill'
    OD_local_dict3 = od_p.all_ods_creation_ema(gdf_points = gdf_points, prod_lists = prod_lists4, 
                                           attr_driver = attr_driver, dist_deterrence = det_func[det_idx])
    for key, val in OD_local_dict3.iteritems():
        OD_local_dict3[key] = (OD_local_dict3[key][0] * od_loc3, OD_local_dict3[key][1])

    prod_lists5 = ['Textile_loc_ton']
    attr_driver='Tot_Garment_Factory'
    OD_local_dict4 = od_p.all_ods_creation_ema(gdf_points = gdf_points, prod_lists = prod_lists5, 
                                           attr_driver = attr_driver, dist_deterrence = det_func[det_idx])
    for key, val in OD_local_dict4.iteritems():
        OD_local_dict4[key] = (OD_local_dict4[key][0] * od_loc4, OD_local_dict4[key][1])

#     print('here5')
    OD_all_dict = od_p._merge_two_dicts(OD_export_dict, OD_local_dict1)
    OD_all_dict = od_p._merge_two_dicts(OD_all_dict, OD_local_dict2)
    OD_all_dict = od_p._merge_two_dicts(OD_all_dict, OD_local_dict3)
    OD_all_dict = od_p._merge_two_dicts(OD_all_dict, OD_local_dict4)

    prod_lists = prod_lists1+prod_lists2+prod_lists3+prod_lists4+prod_lists5

    factors_dict = od_p.factors_dict_creation(prod_lists)

    #combine all od
    OD_all = od_aggregation(OD_all_dict, **factors_dict)
    
    print(OD_all.sum().sum())
    
#     print('here6')
    ##### THEN PREPARE ALL THE SUPPORTING THINGS #####
    
    #EMA building block
    sp_dict_graph = betw_w.sp_dict_graph_creation(G=G3, sources=centroid_nodes, 
                                                  targets=centroid_nodes, weight='length')
    a_dict_base, a_n_dict_base = _dict_daily_accessibility(centroids=centroid_nodes, G=G3, theta=theta, 
                                                                     weight='length', beta=beta)
    sum_a_base, sum_a_n_base = _sum_daily_accessibility(a_dict_base, a_n_dict_base)
    all_daily_sp_list, all_daily_sp_dict = betw_w._all_daily_sp_record(G=G3, sources=centroid_nodes, 
                                                                cutoff=theta, weight='length')

#     print('here7')
    #EMA building block
    div_init_avrgdist_dict = {}
    for key, val in div_graph_dict.iteritems():
        cc = 0
        if nx.number_connected_components(val) > 1:
            for subgraph in nx.connected_component_subgraphs(val):
                if len(subgraph) > cc:
                    cc = len(subgraph)
                    graph = subgraph
        else:
            graph = val
        average_sp_length = nx.average_shortest_path_length(graph, weight='length')
        div_init_avrgdist_dict.update({key:average_sp_length})

#     print('here8')
    #EMA building block
    total_cost_base, od_cost_dict = betw_w._total_cost_sp(G=G3, sources=centroid_nodes, targets=centroid_nodes,
                                                  weight=weight, od=OD_all, weighted=False)
    total_cost_base_w, od_cost_dict_w = betw_w._total_cost_sp(G=G3, sources=centroid_nodes, targets=centroid_nodes,
                                                  weight=weight, od=OD_all, weighted=True)
    total_cost_sp_inversed, od_cost_inversed_dict =  betw_w._total_cost_sp_inversed(G=G3, sources=centroid_nodes, 
                                                                             targets=centroid_nodes, weight=weight)
    efficiency_base = betw_w._network_efficiency_calc(G=G3, total_cost_inversed=total_cost_sp_inversed)

#     print('here9')
    #EMA building block
    flow_fromto_df = pd.DataFrame(OD_all.sum(axis=0)+OD_all.sum(axis=1))
    flow_fromto_df.columns = ['flow']
    a_sum_base, a_master_dict =  betw_w._sum_weighted_accessibility(G=G3, centroids=centroid_nodes,
                                                                    flow=flow_fromto_df, weight='length', beta=beta)

    #EMA building block
    sp_cost_dict = betw_w._shortest_path_cost(G=G3, centroids=centroid_nodes, weight='length')

#     print('here10')
    #EMA building block
    flood_file = 'fluvial_defended_1in75_tile_1_-9999to0.tif'
    gdf4 = gdf3.copy()
    gdf4['geometry'] = gdf4.geometry.apply(lambda geom: geom.buffer(m10_buffer))
    zonal_stats_dict = zonal_stats(gdf4, flood_file, stats='mean', all_touched=False)
    
    print('here11')
    #### THEN CALCULATE THE METRICS ####
    m1_01_dict, m1_02_dict, m2_01_dict, m2_02_dict, m4_02_dict, m6_01_dict, m7_01_dict, m7_02_dict, m7_03_dict, m9_01_dict = interdiction(G=G3, centroids=centroid_nodes, od=OD_all, 
                                                                                                                                          weight='length', theta=theta, beta=beta,
                                                                                                                                          a_dict_base=a_dict_base, a_n_dict_base=a_n_dict_base, 
                                                                                                                                          sum_a_base=sum_a_base, sum_a_n_base=sum_a_n_base,
                                                                                                                                          all_daily_sp_list=all_daily_sp_list, 
                                                                                                                                          all_daily_sp_dict=all_daily_sp_dict, 
                                                                                                                                          od_cost_dict=od_cost_dict, 
                                                                                                                                          od_cost_inversed_dict=od_cost_inversed_dict, 
                                                                                                                                          total_cost_base=total_cost_base, 
                                                                                                                                          total_cost_sp_inversed=total_cost_sp_inversed, 
                                                                                                                                          efficiency_base=efficiency_base, ksp=ksp,
                                                                                                                                          a_sum_base=a_sum_base, a_master_dict=a_master_dict, 
                                                                                                                                          flow_df=flow_fromto_df, total_cost_base_w=total_cost_base_w, 
                                                                                                                                          od_cost_dict_w=od_cost_dict_w, sp_cost_dict=sp_cost_dict)
    
    m3_01_dict, m3_02_dict, m5_01_dict, m8_02_dict, m10_dict = no_interdiction(G=G3, centroids=centroid_nodes, 
                                                                               od=OD_all, od_unw=OD_unweighted, weight='length', 
                                                                               gdf=gdf3, div_graph_dict=div_graph_dict, div_init_avrgdist_dict=div_init_avrgdist_dict, 
                                                                               cutoff=cutoff, penalty=penalty, zonal_stats_dict=zonal_stats_dict)
    print('here12')

    
    ##### COMBINE ALL DICTIONARIES ####
    all_dicts = [m3_01_dict, m3_02_dict, m5_01_dict, m8_02_dict, m10_dict, m1_01_dict, m1_02_dict, m2_01_dict, m2_02_dict, 
                 m4_02_dict, m6_01_dict, m7_01_dict, m7_02_dict, m7_03_dict, m9_01_dict]
    d = {}
    for k in m3_01_dict.iterkeys():
        d[k] = tuple(d[k] for d in all_dicts)
        
    edge_dict = {}
    for key,val in d.iteritems():
        keyn = str(key[0]) + str(key[1])
        edge_dict.update({keyn:val})
        
    return edge_dict

In [17]:
def od_aggregation(OD_all_dict, **factors_dict):
    #create empty dictionary
    OD_final_dict={}

    #iterate over all items in original OD
    for key1,val1 in OD_all_dict.iteritems():

        #matching the production value of the OD dict and the factors_dict
        for key2,val2 in factors_dict.iteritems():
            #if it is a match
            if val1[1] == key2:
                #scale the OD flows of that particular product
                OD_final_dict["od_{0}".format(val1[1])]=val1[0]*val2

    #creation of final OD dataframe
    OD_final_df = OD_final_dict[OD_final_dict.keys()[0]]
    OD_final_df.fillna(value=0, inplace=True)
    for i in range(len(OD_final_dict)-1):
        OD_new = OD_final_dict[OD_final_dict.keys()[i+1]]
        OD_new.fillna(value=0, inplace=True)
        OD_final_df = OD_final_df +  OD_new

    return OD_final_df

In [19]:
# import datetime
# timestart = datetime.datetime.now()
# print(timestart)

# dictall = ema_criticality()

# timeend = datetime.datetime.now()
# print(timeend-timestart)

In [16]:
# %%px --local
edge_dict = ema_criticality()


here1
C:/Users/User/Desktop/BramkaFile/bangladesh_network\od_prep.py:137: SettingWithCopyWarning: 
A value is trying to be set on a copy of a slice from a DataFrame

See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
  relative_attr[i[0]] = i[1] * np.random.uniform(0.5, 1.5)
here6
here11
C:\Users\User\Anaconda2\lib\site-packages\ipykernel\__main__.py:23: SettingWithCopyWarning: 
A value is trying to be set on a copy of a slice from a DataFrame

See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
here12

EMA run


In [19]:
# %%px --local
from ema_workbench import (Model, RealParameter, ScalarOutcome, Constant, IntegerParameter)

#instantiate the model
criticality_model = Model('criticality', function = ema_criticality)

#specify uncertainties
criticality_model.uncertainties = [IntegerParameter('det_idx', 1, 3),
                            RealParameter('od_exp', 0.75, 1.5),
                            RealParameter('od_loc1', 0.75, 1.5),
                            RealParameter('od_loc2', 0.75, 1.5),
                            RealParameter('od_loc3', 0.75, 1.5),
                            RealParameter('od_loc4', 0.75, 1.5),
                            RealParameter('theta', 30, 70),
                            RealParameter('beta', 0.25, 0.75),
                            RealParameter('cutoff', 0.025, 0.075),
                            RealParameter('m10_buffer', 0.0025, 0.0075),
                            RealParameter('penalty', 1, 1.5)]

#specify outcomes 
criticality_model.outcomes = [ScalarOutcome(key) for key,val in edge_dict.iteritems()]

In [23]:
len(edge_dict)


Out[23]:
1258

In [20]:
edge_dict = load_obj('edge_dict_ema')

In [21]:
crit_df = pd.DataFrame.from_dict(edge_dict, orient='index')
crit_df2 = crit_df.copy()

In [22]:
crit_df['osmid'] = crit_df.index
crit_df.index = np.arange(0, len(crit_df), 1)

In [23]:
c = 0
crit_df['rep'] = [c for i in np.arange(0,len(crit_df),1)]

In [24]:
# c = 0
# while c < 3:
    
#     new_df = crit_df2.copy()
#     new_df['osmid'] = new_df.index
#     new_df.index = np.arange(0, len(new_df), 1)
#     new_df['rep'] = [c+1 for i in np.arange(0,len(new_df),1)]
#     crit_df = crit_df.append(new_df)
#     c += 1
#     crit_df.to_csv('version_{}.csv'.format(c))

In [ ]:
import datetime
timestart = datetime.datetime.now()
print(timestart)

m3_01_dict, m3_02_dict, m5_01_dict, m8_02_dict, m10_dict = no_interdiction(G=G2_new_tograph, centroids=centroid_nodes, 
                                                                           od=OD_all, od_unw=OD_unweighted, weight='length')

timeend = datetime.datetime.now()
print(timeend-timestart)

In [ ]:
#non-EMA run
c = 0
import datetime
while c < 100:
    timestart = datetime.datetime.now()
    det_idx = np.random.randint(1,4)
    od_exp = np.random.uniform(0.75,1.5)
    od_loc1 = np.random.uniform(0.75,1.5)
    od_loc2 = np.random.uniform(0.75,1.5)
    od_loc3 = np.random.uniform(0.75,1.5)
    od_loc4 = np.random.uniform(0.75,1.5)
    theta = np.random.randint(30,71)
    beta = np.random.uniform(0.25, 0.75)
    cutoff = np.random.uniform(0.025, 0.075)
    m10_buffer = np.random.uniform(0.0025, 0.0075)
    penalty = np.random.uniform(1, 1.5)
    
    new_dict = ema_criticality(det_idx=det_idx, od_exp=od_exp, od_loc1=od_loc1, od_loc2=od_loc2, 
                               od_loc3=od_loc3, od_loc4=od_loc4, theta=theta, beta=beta, weight='length',
                               cutoff=cutoff, m10_buffer=m10_buffer, penalty=penalty)
    
    new_df = pd.DataFrame.from_dict(new_dict, orient='index')
    new_df['osmid'] = new_df.index
    new_df.index = np.arange(0, len(new_df), 1)
    repnum = c + 17
    new_df['rep'] = [repnum+1 for i in np.arange(0,len(new_df),1)]
    crit_df = crit_df.append(new_df)
    c += 1
    repnum = c + 17
    new_df.to_csv('result_single_version_{}.csv'.format(repnum))
    crit_df.to_csv('result_version_{}.csv'.format(repnum))
    timeend = datetime.datetime.now()
    print(timeend-timestart)


C:/Users/Lenovo/Desktop/bangladesh_network\od_prep.py:137: SettingWithCopyWarning: 
A value is trying to be set on a copy of a slice from a DataFrame

See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
  relative_attr[i[0]] = i[1] * np.random.uniform(0.5, 1.5)
80983494.698
here11
C:\Users\Lenovo\Anaconda2\lib\site-packages\ipykernel\__main__.py:23: SettingWithCopyWarning: 
A value is trying to be set on a copy of a slice from a DataFrame

See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
here12
2:15:29.593000
82441076.9093
here11
here12
2:26:50.926000
83695773.9975
here11
here12
2:22:12.914000
54311036.8575
here11
here12
2:26:37.918000
87600361.6121
here11
here12
2:36:00.668000
60710082.8756
here11
here12
2:24:11.503000
81427135.8291
here11
here12
2:15:54.124000
74892445.3533
here11

In [ ]:


In [ ]:
from ema_workbench import Policy, perform_experiments
from ema_workbench import ema_logging, save_results
from ema_workbench.em_framework import IpyparallelEvaluator

import datetime
timestart = datetime.datetime.now()
print(timestart)

ema_logging.log_to_stderr(ema_logging.INFO)

n_scenarios = 100
with IpyparallelEvaluator(criticality_model, client=client) as evaluator:
    results = evaluator.perform_experiments(scenarios=n_scenarios)

fh =  r'./data/{} experiments_22may_v01.tar.gz'.format(n_scenarios)
save_results(results, fh)

timeend = datetime.datetime.now()
print(timeend-timestart)


2017-05-22 16:43:05.682000
[MainProcess/INFO] performing experiments using ipyparallel
[MainProcess/INFO] performing 100 scenarios * 1 policies * 1 model(s) = 100 experiments

In [22]:
from ema_workbench import Policy, perform_experiments
from ema_workbench import ema_logging, save_results

ema_logging.log_to_stderr(ema_logging.INFO)

n_scenarios = 10
results = perform_experiments(criticality_model, n_scenarios)

fh =  r'./data/{} experiments_22may_v01.tar.gz'.format(n_scenarios)
save_results(results, fh)


[MainProcess/INFO] performing 10 scenarios * 1 policies * 1 model(s) = 10 experiments
[MainProcess/INFO] performing experiments sequentially
here1
here6
C:\Users\User\Anaconda2\lib\site-packages\ipykernel\__main__.py:19: RuntimeWarning: divide by zero encountered in double_scalars
here11
C:\Users\User\Anaconda2\lib\site-packages\ipykernel\__main__.py:130: RuntimeWarning: invalid value encountered in double_scalars
C:\Users\User\Anaconda2\lib\site-packages\ipykernel\__main__.py:191: RuntimeWarning: invalid value encountered in double_scalars
C:\Users\User\Anaconda2\lib\site-packages\ipykernel\__main__.py:23: SettingWithCopyWarning: 
A value is trying to be set on a copy of a slice from a DataFrame

See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
Traceback (most recent call last):
  File "C:\Users\User\Anaconda2\lib\site-packages\ema_workbench\em_framework\experiment_runner.py", line 89, in run_experiment
    model.run_model(scenario, policy)
  File "C:\Users\User\Anaconda2\lib\site-packages\ema_workbench\util\ema_logging.py", line 49, in wrapper
    res = func(*args, **kwargs)
  File "C:\Users\User\Anaconda2\lib\site-packages\ema_workbench\em_framework\model.py", line 356, in run_model
    self.output = self.run_experiment(experiment)
  File "C:\Users\User\Anaconda2\lib\site-packages\ema_workbench\util\ema_logging.py", line 49, in wrapper
    res = func(*args, **kwargs)
  File "C:\Users\User\Anaconda2\lib\site-packages\ema_workbench\em_framework\model.py", line 410, in run_experiment
    model_output = self.function(**experiment)
  File "<ipython-input-14-c68384b6e669>", line 146, in ema_criticality
    cutoff=cutoff, penalty=penalty, zonal_stats_dict=zonal_stats_dict)
  File "<ipython-input-20-f9f16f8d82e2>", line 36, in no_interdiction
    m8_02_dict = betw_w.edge_betweenness_centrality(flow_probit_5, od)
  File "C:/Users/User/Desktop/BramkaFile/bangladesh_network\weighted_betweenness.py", line 269, in edge_betweenness_centrality
    flow2[key] = val / totalval
ZeroDivisionError: float division by zero
---------------------------------------------------------------------------
EMAError                                  Traceback (most recent call last)
<ipython-input-22-036d2a7013a5> in <module>()
      5 
      6 n_scenarios = 10
----> 7 results = perform_experiments(criticality_model, n_scenarios)
      8 
      9 fh =  r'./data/{} experiments_22may_v01.tar.gz'.format(n_scenarios)

C:\Users\User\Anaconda2\lib\site-packages\ema_workbench\em_framework\evaluators.pyc in perform_experiments(models, scenarios, policies, evaluator, reporting_interval, uncertainty_union, lever_union, outcome_union, uncertainty_sampling, levers_sampling)
    411         evaluator = SequentialEvaluator(models)
    412 
--> 413     evaluator.evaluate_experiments(scenarios, policies, callback)
    414 
    415     if callback.i != nr_of_exp:

C:\Users\User\Anaconda2\lib\site-packages\ema_workbench\em_framework\evaluators.pyc in evaluate_experiments(self, scenarios, policies, callback)
    203         runner = ExperimentRunner(models)
    204         for experiment in ex_gen:
--> 205             result = runner.run_experiment(experiment)
    206             callback(experiment, result)
    207         runner.cleanup()

C:\Users\User\Anaconda2\lib\site-packages\ema_workbench\em_framework\experiment_runner.pyc in run_experiment(self, experiment)
    102 
    103             raise EMAError(("exception in run_model"
--> 104                    "\nCaused by: {}: {}".format(type(e).__name__, str(e))))
    105 
    106         output = model.output

EMAError: exception in run_model
Caused by: ZeroDivisionError: float division by zero

In [ ]:
from ema_workbench import Policy, perform_experiments
from ema_workbench import ema_logging, save_results

ema_logging.log_to_stderr(ema_logging.INFO)

n_scenarios = 20
results = perform_experiments(criticality_model, n_scenarios)

fh =  r'./data/{} experiments_22may_v02.tar.gz'.format(n_scenarios)
save_results(results, fh)

In [ ]:
from ema_workbench import Policy, perform_experiments
from ema_workbench import ema_logging, save_results

ema_logging.log_to_stderr(ema_logging.INFO)

n_scenarios = 40
results = perform_experiments(criticality_model, n_scenarios)

fh =  r'./data/{} experiments_22may_v03.tar.gz'.format(n_scenarios)
save_results(results, fh)

In [ ]:
from ema_workbench import Policy, perform_experiments
from ema_workbench import ema_logging, save_results

ema_logging.log_to_stderr(ema_logging.INFO)

n_scenarios = 30
results = perform_experiments(criticality_model, n_scenarios)

fh =  r'./data/{} experiments_22may_v04.tar.gz'.format(n_scenarios)
save_results(results, fh)

In [154]:
from ema_workbench import save_results
fh =  r'./data/{} experiments_22may_v01.tar.gz'.format(n_scenarios)
save_results(results, fh)


[MainProcess/INFO] results saved successfully to C:\Users\User\Desktop\BramkaFile\betweenness_ema_thesis\data\3 experiments_22may_v01.tar.gz

used for EMA


In [17]:
# %%px --local
#EMA BUILDING BLOCK


centroid_nodes = od_p.prepare_centroids_list(G2_new_tograph)

#export OD
prod_lists1 = ['SteelBricks_exp_ton', 'Food_exp_ton','Jutextile_exp_ton', 'Garment_exp_ton']
attr_driver='Total_export'
OD_export_dict = od_p.all_ods_creation_ema(gdf_points = gdf_points, prod_lists = prod_lists1, 
                                       attr_driver = attr_driver, dist_deterrence = det_func)

prod_lists2 = [ 'Foods_loc_ton', 'Nonfoods_loc_ton']
attr_driver='Population_x'
OD_local_dict1 = od_p.all_ods_creation_ema(gdf_points = gdf_points, prod_lists = prod_lists2, 
                                       attr_driver = attr_driver, dist_deterrence = det_func)

prod_lists3 = ['RawJute_loc_ton']
attr_driver='Jute_mill'
OD_local_dict2 = od_p.all_ods_creation_ema(gdf_points = gdf_points, prod_lists = prod_lists3, 
                                       attr_driver = attr_driver, dist_deterrence = det_func)

prod_lists4 = ['Wheat_loc_ton']
attr_driver='Flour_mill'
OD_local_dict3 = od_p.all_ods_creation_ema(gdf_points = gdf_points, prod_lists = prod_lists4, 
                                       attr_driver = attr_driver, dist_deterrence = det_func)

prod_lists5 = ['Textile_loc_ton']
attr_driver='Tot_Garment_Factory'
OD_local_dict4 = od_p.all_ods_creation_ema(gdf_points = gdf_points, prod_lists = prod_lists5, 
                                       attr_driver = attr_driver, dist_deterrence = det_func)

OD_all_dict = od_p._merge_two_dicts(OD_export_dict, OD_local_dict1)
OD_all_dict = od_p._merge_two_dicts(OD_all_dict, OD_local_dict2)
OD_all_dict = od_p._merge_two_dicts(OD_all_dict, OD_local_dict3)
OD_all_dict = od_p._merge_two_dicts(OD_all_dict, OD_local_dict4)

prod_lists = prod_lists1+prod_lists2+prod_lists3+prod_lists4+prod_lists5

factors_dict = od_p.factors_dict_creation(prod_lists)

# sp_dict = betw_w._shortest_path_record(G=G2_new_tograph, sources=centroid_nodes, 
#                                      targets=centroid_nodes, weight='length')
   
#combine all od
OD_all = od_p.od_aggregation(OD_all_dict, **factors_dict)

#create unweighted OD
centroid_district_listed = list(OD_all.columns)

OD_unweighted = pd.DataFrame(1, index=centroid_district_listed, columns=centroid_district_listed)

for i,row in OD_unweighted.iterrows():
    OD_unweighted.loc[i][i] = 0

In [24]:
#EMA building block
sp_dict_graph = betw_w.sp_dict_graph_creation(G=G2_new_tograph, sources=centroid_nodes, 
                                              targets=centroid_nodes, weight='length')
a_dict_base, a_n_dict_base = betw_w._dict_daily_accessibility(centroids=centroid_nodes, G=G2_new_tograph, theta=theta, 
                                                                 weight='length', beta=beta)
sum_a_base, sum_a_n_base = betw_w._sum_daily_accessibility(a_dict_base, a_n_dict_base)
all_daily_sp_list, all_daily_sp_dict = betw_w._all_daily_sp_record(G=G2_new_tograph, sources=centroid_nodes, 
                                                            cutoff=theta, weight='length')

#EMA building block
div_graph_list = []
for div in division_list:
    exec("{}_graph = nx.from_pandas_dataframe(df={}_gdf, source='FNODE_', target='TNODE_',edge_attr='length')".format(div,div))
    exec("div_graph_list.append({}_graph)".format(div))
div_graph_dict = dict(zip(division_list, div_graph_list))
div_init_avrgdist_dict = {}
for key, val in div_graph_dict.iteritems():
    cc = 0
    if nx.number_connected_components(val) > 1:
        for subgraph in nx.connected_component_subgraphs(val):
            if len(subgraph) > cc:
                cc = len(subgraph)
                graph = subgraph
    else:
        graph = val
    average_sp_length = nx.average_shortest_path_length(graph, weight='length')
    div_init_avrgdist_dict.update({key:average_sp_length})
    
#EMA building block
total_cost_base, od_cost_dict = betw_w._total_cost_sp(G=G2_new_tograph, sources=centroid_nodes, targets=centroid_nodes,
                                              weight=weight, od=OD_all, weighted=False)
total_cost_base_w, od_cost_dict_w = betw_w._total_cost_sp(G=G2_new_tograph, sources=centroid_nodes, targets=centroid_nodes,
                                              weight=weight, od=OD_all, weighted=True)
total_cost_sp_inversed, od_cost_inversed_dict =  betw_w._total_cost_sp_inversed(G=G2_new_tograph, sources=centroid_nodes, 
                                                                         targets=centroid_nodes, weight=weight)
efficiency_base = betw_w._network_efficiency_calc(G=G2_new_tograph, total_cost_inversed=total_cost_sp_inversed)

#EMA building block
flow_fromto_df = pd.DataFrame(OD_all.sum(axis=0)+OD_all.sum(axis=1))
flow_fromto_df.columns = ['flow']
a_sum_base, a_master_dict =  betw_w._sum_weighted_accessibility(G=G2_new_tograph, centroids=centroid_nodes,
                                                                flow=flow_fromto_df, weight='length', beta=beta)

#EMA building block
sp_cost_dict = betw_w._shortest_path_cost(G=G2_new_tograph, centroids=centroid_nodes, weight='length')

#EMA building block
flood_file = 'fluvial_defended_1in75_tile_1_-9999to0.tif'
gdf3 = gdf2.copy()
gdf3['geometry'] = gdf3.geometry.apply(lambda geom: geom.buffer(m10_buffer))
zonal_stats_dict = zonal_stats(gdf3, flood_file, stats='mean', all_touched=False)

real deal


In [32]:


In [33]:
import datetime
timestart = datetime.datetime.now()
print(timestart)

m3_01_dict, m3_02_dict, m5_01_dict, m8_02_dict, m10_dict = no_interdiction(G=G2_new_tograph, centroids=centroid_nodes, 
                                                                           od=OD_all, od_unw=OD_unweighted, weight='length')

timeend = datetime.datetime.now()
print(timeend-timestart)


2017-05-21 07:48:38.943000
C:\Users\Lenovo\Anaconda2\lib\site-packages\ipykernel\__main__.py:22: SettingWithCopyWarning: 
A value is trying to be set on a copy of a slice from a DataFrame

See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
0:24:19.479000

In [50]:
#for dict merger
all_dicts = [m3_01_dict, m3_02_dict, 
             m5_01_dict, m8_02_dict, m10_dict]
d = {}
for k in m3_01_dict.iterkeys():
    d[k] = tuple(d[k] for d in all_dicts)

In [40]:
alll = [m3_01_dict, m3_02_dict, m5_01_dict, m8_02_dict, m10_dict]
for i in alll:
    print(len(i))


1258
1258
1258
1258
1258

In [41]:
for num, i in enumerate(alll):
    for key, val in i.iteritems():
        for num2, j in enumerate(alll):
            if i != j:
                if not key in j.keys():
                    print('key ' + str(key) + ' in ' + str(i) + ' not available in ' + str(j))

In [42]:
gdf_final, m1_01_df = betw_w.betweenness_to_df(gdf2,m3_01_dict,'m3_01')
gdf_final, m1_01_df = betw_w.betweenness_to_df(gdf_final,m3_02_dict,'m3_02')
gdf_final, m1_01_df = betw_w.betweenness_to_df(gdf_final,m5_01_dict,'m5_01')
gdf_final, m1_01_df = betw_w.betweenness_to_df(gdf_final,m8_02_dict,'m8_02')
gdf_final, m1_01_df = betw_w.betweenness_to_df(gdf_final,m10_dict,'m10')

In [43]:
gdf_final.to_csv('result_NoInterdiction_1107noz2_v01.csv')

In [32]:
import datetime
timestart = datetime.datetime.now()
print(timestart)

m1_01_dict, m1_02_dict, m2_01_dict, m2_02_dict, m4_02_dict, m6_01_dict, m7_01_dict, m7_02_dict, m7_03_dict, m9_01_dict = interdiction(G=G2_new_tograph, centroids=centroid_nodes, od=OD_all, weight='length')

timeend = datetime.datetime.now()
print(timeend-timestart)


2017-05-20 00:40:33.471000
starting the interdiction
5 edges have been interdicted
50 edges have been interdicted
100 edges have been interdicted
150 edges have been interdicted
200 edges have been interdicted
250 edges have been interdicted
300 edges have been interdicted
350 edges have been interdicted
400 edges have been interdicted
450 edges have been interdicted
500 edges have been interdicted
550 edges have been interdicted
600 edges have been interdicted
650 edges have been interdicted
700 edges have been interdicted
750 edges have been interdicted
800 edges have been interdicted
850 edges have been interdicted
900 edges have been interdicted
950 edges have been interdicted
1000 edges have been interdicted
1050 edges have been interdicted
1100 edges have been interdicted
1150 edges have been interdicted
1200 edges have been interdicted
1250 edges have been interdicted
1:21:30.666000

In [33]:
gdf_final, m1_01_df = betw_w.betweenness_to_df(gdf2,m1_01_dict,'m1_01')
gdf_final, m1_01_df = betw_w.betweenness_to_df(gdf_final,m1_02_dict,'m1_02')
gdf_final, m1_01_df = betw_w.betweenness_to_df(gdf_final,m2_01_dict,'m2_01')
gdf_final, m1_01_df = betw_w.betweenness_to_df(gdf_final,m2_02_dict,'m2_02')
gdf_final, m1_01_df = betw_w.betweenness_to_df(gdf_final,m4_02_dict,'m4_02')
gdf_final, m1_01_df = betw_w.betweenness_to_df(gdf_final,m6_01_dict,'m6_01')
gdf_final, m1_01_df = betw_w.betweenness_to_df(gdf_final,m7_01_dict,'m7_01')
gdf_final, m1_01_df = betw_w.betweenness_to_df(gdf_final,m7_02_dict,'m7_02')
gdf_final, m1_01_df = betw_w.betweenness_to_df(gdf_final,m7_03_dict,'m7_03')
gdf_final, m1_01_df = betw_w.betweenness_to_df(gdf_final,m9_01_dict,'m9_01')

In [34]:
gdf_final.to_csv('result_interdiction_1107noz2_v03.csv')

In [35]:
alll = [m1_01_dict, m1_02_dict, m2_01_dict, m2_02_dict, m4_02_dict, m6_01_dict, m7_01_dict, m7_02_dict, m7_03_dict, m9_01_dict]
for i in alll:
    print(len(i))


1258
1258
1258
1258
1258
1258
1258
1258
1258
1258

In [36]:
check = [x for x in m1_02_dict.values() if x > 0]
keys= [x for x in m1_02_dict.keys() if m1_02_dict[x] > 0]

In [33]:
len(m7_02_dict)


Out[33]:
1258