In [1]:
# Reload when code changed:
%load_ext autoreload
%autoreload 2
%pwd
%matplotlib inline
import os
import sys
path = "../"
sys.path.append(path)
#os.path.abspath("../")
print(os.path.abspath(path))
In [2]:
import pandas as pd
import numpy as np
import json
import pickle
import core
import importlib
importlib.reload(core)
import logging
importlib.reload(core)
try:
logging.shutdown()
importlib.reload(logging)
except:
pass
from event_handler import EventHandler
print(core.__file__)
pd.__version__
Out[2]:
In [3]:
root_directory = 'D:/github/w_vattenstatus/ekostat_calculator'#"../" #os.getcwd()
workspace_directory = root_directory + '/workspaces'
resource_directory = root_directory + '/resources'
#alias = 'lena'
user_id = 'test_user' #kanske ska vara off_line user?
workspace_alias = 'lena_indicator'
In [4]:
print(root_directory)
paths = {'user_id': user_id,
'workspace_directory': root_directory + '/workspaces',
'resource_directory': root_directory + '/resources',
'log_directory': 'D:/github' + '/log',
'test_data_directory': 'D:/github' + '/test_data'}
In [5]:
ekos = EventHandler(**paths)
#request = ekos.test_requests['request_workspace_list']
#response = ekos.request_workspace_list(request)
#ekos.write_test_response('request_workspace_list', response)
# OLD: ekos = EventHandler(root_directory)
In [6]:
##### BEHÖVS BARA FÖRSTA GÅNGEN FÖR ATT SKAPA WORKSPACE #######
#ekos.copy_workspace(source_uuid='default_workspace', target_alias=workspace_alias)
In [7]:
ekos.print_workspaces()
In [8]:
workspace_uuid = ekos.get_unique_id_for_alias(workspace_alias = workspace_alias)
print(workspace_uuid)
In [9]:
workspace_alias = ekos.get_alias_for_unique_id(workspace_uuid = workspace_uuid)
In [10]:
ekos.load_workspace(unique_id = workspace_uuid)
Out[10]:
In [ ]:
In [11]:
##### BEHÖVS BARA VID NYTT WORKSPACE ELLER NYA DATAFILER ######
#ekos.import_default_data(workspace_alias = workspace_alias)
In [12]:
#ekos.get_workspace(unique_id = workspace_uuid, alias = workspace_alias).delete_alldata_export()
In [30]:
ekos.load_data(workspace_uuid = workspace_uuid)
Out[30]:
In [13]:
w = ekos.get_workspace(workspace_uuid = workspace_uuid)
len(w.data_handler.get_all_column_data_df())
### Om "rätt" DATA så bör len bli 10694
Out[13]:
In [54]:
print('subsetlist', w.get_subset_list())
for subset_uuid in w.get_subset_list():
print('uuid {} alias {}'.format(subset_uuid, w.uuid_mapping.get_alias(unique_id=subset_uuid)))
In [ ]:
In [12]:
def load_and_fix(subset_alias, indicator):
subset_uuid = ekos.get_unique_id_for_alias(workspace_alias = workspace_alias, subset_alias = subset_alias)
result_path = w.get_step_object(step = 3, subset = subset_uuid).paths['directory_paths']['results']
df = pickle.load(open(result_path + '/'+ indicator + '_by_period.pkl', "rb"))
df.rename(columns = {'VISS_EU_CD': 'EU_CD'}, inplace = True)
return df
In [13]:
def merge_and_compare(df1, df2, suffixes):
par_list = ['STATUS','EU_CD','YEAR_count','global_EQR','WATER_TYPE_AREA']
df = pd.merge(df1[par_list], df2[par_list], on = ['EU_CD','WATER_TYPE_AREA'], suffixes = suffixes)
conditions = [(df['global_EQR'+suffixes[0]] >= 0.6) & (df['global_EQR'+suffixes[1]] < 0.6),
(df['global_EQR'+suffixes[0]] < 0.6) & (df['global_EQR'+suffixes[1]] >= 0.6),
(df['STATUS'+suffixes[0]] == df['STATUS'+suffixes[1]])]
choices = [-1,1,0]
df['change'] = np.select(conditions, choices, default=2)
return df
In [15]:
ntot_winter2006 = load_and_fix('period_2007-2012_refvalues_2006', 'ntot_winter')
ntot_winter2017 = load_and_fix('period_2007-2012_refvalues_2017', 'ntot_winter')
ntot_winter = merge_and_compare(ntot_winter2006, ntot_winter2017, ['2006','2017'])
In [17]:
din_winter2006 = load_and_fix('period_2007-2012_refvalues_2006', 'din_winter')
din_winter2017 = load_and_fix('period_2007-2012_refvalues_2017', 'din_winter')
din_winter = merge_and_compare(din_winter2006, din_winter2017, ['2006','2017'])
In [18]:
ntot_summer2006 = load_and_fix('period_2007-2012_refvalues_2006', 'ntot_summer')
ntot_summer2017 = load_and_fix('period_2007-2012_refvalues_2017', 'ntot_summer')
ntot_summer = merge_and_compare(ntot_summer2006, ntot_summer2017, ['2006','2017'])
In [19]:
df = pd.merge(din_winter, ntot_winter, on = ['EU_CD','WATER_TYPE_AREA'], suffixes = ['din_winter','ntot_winter'])
ntot_summer.add_suffix('ntot_summer')
ntot_summer.rename(columns = {'EU_CDntot_summer':'EU_CD','WATER_TYPE_AREAntotsummer':'WATER_TYPE_AREA'})
df = pd.merge(df, ntot_summer, on = ['EU_CD','WATER_TYPE_AREA'], suffixes = ['',''])
In [20]:
df.to_csv('D:/comparisonN20062017.txt', header = True, index = None, sep = '\t')
In [ ]:
In [ ]:
par_list = ['STATUS','EU_CD','YEAR_count','global_EQR','WATER_TYPE_AREA']
ntot_winter = pd.merge(ntot_winter_df_2006ref[par_list], ntot_winter_df_2017ref[par_list], on = ['EU_CD','WATER_TYPE_AREA'], suffixes = ['2006','2017'])
ntot_winter.columns
In [ ]:
conditions = [(din_winter['global_EQR2006'] >= 0.6) & (din_winter['global_EQR2017'] < 0.6)]
choices = [True]
din_winter['change'] = np.select(conditions, choices, default=False)
In [ ]:
ntot_winter.loc[(ntot_winter['WATER_TYPE_AREA'].str.contains('Botten')) & (ntot_winter['change_ntot'] == True)][['EU_CD','STATUS2006','global_EQR2006','STATUS2017','global_EQR2017','change_ntot']].dropna(subset = ['global_EQR2006'])
In [ ]:
din_winter.loc[(din_winter['WATER_TYPE_AREA'].str.contains('Botten')) & (din_winter['change'] == True)][['EU_CD','STATUS2006','global_EQR2006','STATUS2017','global_EQR2017','change_din']].dropna(subset = ['global_EQR2006'])
In [ ]:
df = pd.merge(din_winter, ntot_winter, on = ['EU_CD','WATER_TYPE_AREA'], suffixes = ['din_winter','ntot_winter'])
df.loc[df['WATER_TYPE_AREA'].str.contains('Botten')]
In [ ]:
din_winter.loc[din_winter['WATER_TYPE_AREA2006'].str.contains('Botten')].plot(kind = 'bar',x = 'EU_CD', y = ['global_EQR2006','global_EQR2017'])
In [14]:
subset_uuid = ekos.get_unique_id_for_alias(workspace_alias = workspace_alias, subset_alias = 'period_2007-2012_refvalues_2017')
print(subset_uuid)
print(w.get_subset_list())
w.get_step_object(step = 3, subset = subset_uuid).calculate_quality_element(subset_unique_id = subset_uuid, quality_element = 'Nutrients', class_name = 'QualityElementNutrients')
In [26]:
# old
def old_get_QF_results(subset_alias):
subset_uuid = ekos.get_unique_id_for_alias(workspace_alias = workspace_alias, subset_alias = subset_alias)
#result_path = w.get_step_object(step = 3, subset = subset_uuid).paths['directory_paths']['results']
#df = pickle.load(open(result_path + '/'+ 'nutrients_all_results.pkl', "rb"))
w.get_step_object(step = 3, subset = subset_uuid).calculate_quality_element(subset_unique_id = subset_uuid, quality_element = 'Nutrients', class_name = 'QualityElementNutrients')
#columns = ['VISS_EU_CD','WATER_BODY_NAME','WATER_TYPE_AREA','STATUS_NUTRIENTS','mean_EQR','MEAN_N_EQR','EQR_N_winter_mean','global_EQR_indicator_ntot_winter','global_EQR_indicator_din_winter','global_EQR_indicator_ntot_summer','MEAN_P_EQR','EQR_P_winter_mean','global_EQR_indicator_ptot_winter','global_EQR_indicator_dip_winter','global_EQR_indicator_ptot_summer']
return w.get_step_object(step = 3, subset = subset_uuid).quality_element['Nutrients'].results#[columns]
In [38]:
def get_QF_results(subset_alias):
subset_uuid = ekos.get_unique_id_for_alias(workspace_alias = workspace_alias, subset_alias = subset_alias)
result_path = w.get_step_object(step = 3, subset = subset_uuid).paths['directory_paths']['results']
df = pickle.load(open(result_path + '/'+ 'nutrients_all_results.pkl', "rb"))
#w.get_step_object(step = 3, subset = subset_uuid).calculate_quality_element(subset_unique_id = subset_uuid, quality_element = 'Nutrients', class_name = 'QualityElementNutrients')
#columns = ['VISS_EU_CD','WATER_BODY_NAME','WATER_TYPE_AREA','STATUS_NUTRIENTS','mean_EQR','MEAN_N_EQR','EQR_N_winter_mean','global_EQR_ntot_winter','global_EQR_din_winter','global_EQR_ntot_summer','MEAN_P_EQR','EQR_P_winter_mean','global_EQR_ptot_winter','global_EQR_dip_winter','global_EQR_ptot_summer']
return df#w.get_step_object(step = 3, subset = subset_uuid).quality_element['Nutrients'].results#[columns]
In [34]:
QF_2006 = old_get_QF_results('period_2007-2012_refvalues_2006')
QF_2013 = old_get_QF_results('period_2007-2012_refvalues_2013')
QF_2017 = old_get_QF_results('period_2007-2012_refvalues_2017')
In [ ]:
In [16]:
QF_2006 = get_QF_results('period_2007-2012_refvalues_2006')
QF_2013 = get_QF_results('period_2007-2012_refvalues_2013')
QF_2017 = get_QF_results('period_2007-2012_refvalues_2017')
In [20]:
def merge_df(df1, df2, suffixes):
#par_list = ['STATUS','EU_CD','YEAR_count','global_EQR','WATER_TYPE_AREA']
df = pd.merge(df1, df2, on = ['EU_CD','WATER_TYPE_AREA','WATER_BODY_NAME'], suffixes = suffixes)
In [35]:
df = pd.merge(QF_2006, QF_2017, on = ['VISS_EU_CD','WATER_TYPE_AREA','WATER_BODY_NAME'], suffixes = ['_2006','_2017'])
list(df.columns)
Out[35]:
In [22]:
def change(df, col, status_col, suffixes):
conditions = [(df[col+suffixes[0]] >= 0.6) & (df[col+suffixes[1]] < 0.6),
(df[col+suffixes[0]] < 0.6) & (df[col+suffixes[1]] >= 0.6),
(df[status_col+suffixes[0]] == df[status_col+suffixes[1]]),
(df[status_col+suffixes[0]] == 'HIGH') & (df[status_col+suffixes[1]] == 'GOOD'),
(df[status_col+suffixes[0]] == 'MODERATE') & (df[status_col+suffixes[1]] == 'POOR'),
(df[status_col+suffixes[0]] == 'POOR') & (df[status_col+suffixes[1]] == 'BAD'),
(df[status_col+suffixes[0]] == 'GODD') & (df[status_col+suffixes[1]] == 'HIGH'),
(df[status_col+suffixes[0]] == 'POOR') & (df[status_col+suffixes[1]] == 'MODERATE'),
(df[status_col+suffixes[0]] == 'BAD') & (df[status_col+suffixes[1]] == 'POOR')]
choices = [-2,2,0,-1,-1,-1,1,1,1]
df['change_'+status_col] = np.select(conditions, choices, default=2)
In [36]:
#old
suffixes = ['_2006','_2017']
change(df, 'mean_EQR', 'STATUS_NUTRIENTS', suffixes)
change(df, 'MEAN_N_EQR', 'STATUS_N', suffixes)
change(df, 'global_EQR_indicator_ntot_summer', 'STATUS_indicator_ntot_summer', suffixes)
change(df, 'EQR_N_winter_mean', 'STATUS_N_winter', suffixes)
change(df, 'global_EQR_indicator_ntot_winter', 'STATUS_indicator_ntot_winter', suffixes)
change(df, 'global_EQR_indicator_din_winter', 'STATUS_indicator_din_winter', suffixes)
change(df, 'MEAN_P_EQR', 'STATUS_P', suffixes)
change(df, 'global_EQR_indicator_ptot_summer', 'STATUS_indicator_ptot_summer', suffixes)
change(df, 'EQR_P_winter_mean', 'STATUS_P_winter', suffixes)
change(df, 'global_EQR_indicator_ptot_winter', 'STATUS_indicator_ptot_winter', suffixes)
change(df, 'global_EQR_indicator_dip_winter', 'STATUS_indicator_dip_winter', suffixes)
In [42]:
k1 = ['STATUS_NUTRIENTS',
'STATUS_N','STATUS_indicator_ntot_summer','STATUS_N_winter','STATUS_indicator_ntot_winter','STATUS_indicator_din_winter',
'STATUS_P','STATUS_indicator_ptot_summer','STATUS_P_winter','STATUS_indicator_ptot_winter','STATUS_indicator_dip_winter']
suffixes = ['_2006','_2017']
k2 =[]
for L in k1:
k2 = k2+[L+suffixes[0], L+suffixes[1], 'change_'+L]
col_list = ['VISS_EU_CD','WATER_BODY_NAME','WATER_TYPE_AREA'] + k2
print(col_list)
In [44]:
df[col_list].to_csv('//winfs-proj/proj/havgem/LenaV/Projekt/Bedömningsgrunder/Revidering 2017/Utvärdering förändring av refvärden/Nutrients_2006_2017_gammal.txt', columns = col_list, float_format='%.3f', header = True, index = None, sep = '\t')
suffixes = ['_2006','_2017'] change(df, 'mean_EQR', 'STATUS_NUTRIENTS', suffixes) change(df, 'MEAN_N_EQR', 'STATUS_N', suffixes) change(df, 'global_EQR_indicator_ntot_summer', 'STATUS_indicator_ntot_summer', suffixes) change(df, 'EQR_N_winter_mean', 'STATUS_N_winter', suffixes) change(df, 'global_EQR_indicator_ntot_winter', 'STATUS_indicator_ntot_winter', suffixes) change(df, 'global_EQR_indicator_din_winter', 'STATUS_indicator_din_winter', suffixes)
In [153]:
suffixes = ['_2006','_2017']
change_list = ['qe_nutrients',
'indicator_n','indicator_n_summer','indicator_n_winter','indicator_ntot_winter','indicator_din_winter',
'indicator_p','indicator_p_summer','indicator_p_winter','indicator_ptot_winter','indicator_dip_winter']
col_list = ['VISS_EU_CD','WATER_TYPE_AREA','WATER_BODY_NAME']
for ind in change_list:
change(df, 'global_EQR_' + ind, 'STATUS_' + ind, suffixes)
if 'ok_'+ind in ['ok_indicator_n_summer','ok_indicator_ntot_winter','ok_indicator_din_winter',
'ok_indicator_p_summer','ok_indicator_ptot_winter','ok_indicator_dip_winter']:
print(ind)
col_list = col_list + ['STATUS_' + ind + suffixes[0],
'STATUS_' + ind + suffixes[1], 'YEAR_count_' + ind + suffixes[1],
'change_'+'STATUS_' + ind]
else:
col_list = col_list + ['STATUS_' + ind + suffixes[0],
'STATUS_' + ind + suffixes[1],
'change_'+'STATUS_' + ind]
#col_list = col_list + \
# [L+suffixes[0] for L in ['ok_indicator_n_summer','ok_indicator_ntot_winter','ok_indicator_din_winter',
# 'ok_indicator_p_summer','ok_indicator_ptot_winter','ok_indicator_dip_winter']] + \
# [L+suffixes[1] for L in ['ok_indicator_n_summer','ok_indicator_ntot_winter','ok_indicator_din_winter',
# 'ok_indicator_p_summer','ok_indicator_ptot_winter','ok_indicator_dip_winter']]
In [154]:
df[col_list].to_csv('//winfs-proj/proj/havgem/LenaV/Projekt/Bedömningsgrunder/Revidering 2017/Utvärdering förändring av refvärden/Nutrients_2006_2017.txt', columns = col_list, float_format='%.3f', header = True, index = None, sep = '\t')
In [137]:
[L+'_2013'for L in ['ok_indicator_n_summer','ok_indicator_ntot_winter','ok_indicator_din_winter',
'ok_indicator_p_summer','ok_indicator_ptot_winter','ok_indicator_dip_winter']] + \
[L+'_2017'for L in ['ok_indicator_n_summer','ok_indicator_ntot_winter','ok_indicator_din_winter',
'ok_indicator_p_summer','ok_indicator_ptot_winter','ok_indicator_dip_winter']]
Out[137]:
In [69]:
col_list = ['VISS_EU_CD','WATER_TYPE_AREA','WATER_BODY_NAME',
'STATUS_NUTRIENTS'+suffixes[0],'STATUS_NUTRIENTS_2017','change_STATUS_NUTRIENTS',
'STATUS_N'+suffixes[0],'STATUS_N_2017','change_STATUS_N',
'STATUS_ntot_summer'+suffixes[0],'STATUS_ntot_summer_2017','change_STATUS_ntot_summer',
'STATUS_N_winter'+suffixes[0],'STATUS_N_winter_2017','change_STATUS_N_winter',
'STATUS_ntot_winter'+suffixes[0],'STATUS_ntot_winter_2017','change_STATUS_ntot_winter',
'STATUS_din_winter'+suffixes[0],'STATUS_din_winter_2017','change_STATUS_din_winter',
]
df[col_list].head()
In [53]:
df[col_list].to_csv('D:/QF_Nutrients_period2007-2012_ref2013_2017'+'.txt', float_format='%.3f', header = True, index = None, sep = '\t')
In [ ]:
conditions = [(df['mean_EQR'+suffixes[0]] >= 0.6) & (df['mean_EQR'+suffixes[1]] < 0.6),
(df['mean_EQR'+suffixes[0]] < 0.6) & (df['mean_EQR'+suffixes[1]] >= 0.6),
(df['STATUS_NUTRIENTS'+suffixes[0]] == df['STATUS_NUTRIENTS'+suffixes[1]])]
choices = [-1,1,0]
df['change_STATUS_NUTRIENTS'] = np.select(conditions, choices, default=2)
In [25]:
#.to_csv('D:/Nutrients'+subset_alias+'.txt', float_format='%.3f', header = True, index = None, sep = '\t')
Out[25]:
In [ ]:
conditions = [(df['global_EQR'+suffixes[0]] >= 0.6) & (df['global_EQR'+suffixes[1]] < 0.6),
(df['global_EQR'+suffixes[0]] < 0.6) & (df['global_EQR'+suffixes[1]] >= 0.6),
(df['STATUS'+suffixes[0]] == df['STATUS'+suffixes[1]])]
choices = [-1,1,0]
df['change'] = np.select(conditions, choices, default=2)
In [ ]:
import folium
import geopandas as gpd
In [ ]:
gdf = gpd.read_file('D:\LenaV\maps\SVAR\O_Back\moddad med ny typ, fast har blivit lite felaktig\havsomr_y_2012_2.shp')
gdf_HOlsson = gpd.read_file('D:\LenaV\maps\SVAR\H_Olsson\Kustvatten_20180213\Kustvatten_2016_4.shp')
In [ ]:
gdf_merged = gdf.merge(din_winter, on='EU_CD')
In [ ]:
gdf_merged.loc[(gdf_merged['WATER_TYPE_AREA2006'].str.contains('Botten')) & (gdf_merged['change'] == True)][['EU_CD','STATUS2006','global_EQR2006','STATUS2017','global_EQR2017','change']]
In [ ]:
m = folium.Map(location=[65, 17], zoom_start=6)
for i, row in gdf_merged.to_crs(epsg = 4326).iterrows():#, stations.PROJ.values):
if row.global_EQR2017 < 0.2:
c = 'red'
elif row.global_EQR2017 < 0.4:
c = 'orange'
elif row.global_EQR2017 < 0.6:
c = 'yellow'
elif row.global_EQR2017 < 0.8:
c = 'green'
elif row.global_EQR2017 <= 1:
c = 'blue'
else:
c = None
lon = row.geometry.centroid.x
lat = row.geometry.centroid.y
# The description column is used for popup messages.
marker = folium.RegularPolygonMarker([lat, lon], number_of_sides=4, radius = 5, fill_color = c, color = c,
popup = "STATUS 2006 {}".format(row.STATUS2006) +'__ '+ "STATUS 2017 {}".format(row.STATUS2017) +' ______________ '+ row.NAMN +' '+ row.EU_CD).add_to(m)
if row.change:
#print(row.change, row.STATUS2006, row.STATUS2017, str(lat), str(lon))
marker = folium.RegularPolygonMarker([lat, lon], number_of_sides=4, radius = 5, fill_color = '#bf35a7', color = '#bf35a7',
popup = "STATUS 2006 {}".format(row.STATUS2006) +'---> '+ "STATUS 2017 {}".format(row.STATUS2017) +' _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ '+ row.NAMN +' '+ row.EU_CD).add_to(m)
m
In [ ]:
gdf_HOlsson.columns
In [ ]:
fig, ax = mpl.pyplot.subplots()
gdf.to_crs(epsg = 4326).plot(ax = ax)
gdf_merged.to_crs(epsg = 4326).plot(ax = ax, column = 'STATUS2006', cmap =cmap, norm = norm)
ax.set_ylim([60, 67])
ax.set_xlim([16, 26])
#ax.set_ylim([4000000, 4900000])
#ax.set_xlim([4700000, 5050000])
#mpl.pyplot.savefig(result_path+'din_winter2006.pdf')
ax.get_xlim()
In [ ]:
m = folium.Map(location=[65, 17], zoom_start=6)
m.choropleth(
geo_data=gdf_HOlsson,
name='choropleth',
data=din_winter,
columns=['EU_CD', 'global_EQR2006'],
key_on='feature.properties.VISS_MS_CD',
fill_color='YlGn',
fill_opacity=0.7,
line_opacity=0.2,
legend_name='EQR 2006'
)
folium.LayerControl().add_to(m)
for i, row in gdf_merged.to_crs(epsg = 4326).iterrows():#, stations.PROJ.values):
if row.global_EQR2017 < 0.2:
c = 'red'
elif row.global_EQR2017 < 0.4:
c = 'orange'
elif row.global_EQR2017 < 0.6:
c = 'yellow'
elif row.global_EQR2017 < 0.8:
c = 'green'
elif row.global_EQR2017 <= 1:
c = 'blue'
else:
c = None
lon = row.geometry.centroid.x
lat = row.geometry.centroid.y
# The description column is used for popup messages.
marker = folium.RegularPolygonMarker([lat, lon], number_of_sides=4, radius = 5, fill_color = c, color = c,
popup = "STATUS 2006 {}".format(row.STATUS2006) +'__ '+ "STATUS 2017 {}".format(row.STATUS2017) +' ______________ '+ row.NAMN +' '+ row.EU_CD).add_to(m)
if row.change:
#print(row.change, row.STATUS2006, row.STATUS2017, str(lat), str(lon))
marker = folium.RegularPolygonMarker([lat, lon], number_of_sides=4, radius = 5, fill_color = '#bf35a7', color = '#bf35a7',
popup = "STATUS 2006 {}".format(row.STATUS2006) +'---> '+ "STATUS 2017 {}".format(row.STATUS2017) +' _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ '+ row.NAMN +' '+ row.EU_CD).add_to(m)
m.save('test.html')
In [ ]:
gdf.set_index('EU_CD')['geometry']
In [ ]:
gdf_merged.columns
In [ ]:
import matplotlib as mpl
cmap = mpl.colors.ListedColormap(['red', 'orange','yellow', 'green', 'blue'])
boundaries = [0.2,0.4,0.6,0.8]
norm = mpl.colors.BoundaryNorm(boundaries, cmap.N, clip=True)
In [ ]:
fig = gdf_merged.plot(ax = fig, column = 'OMRTYP', color = None, edgecolor='black', linewidth = 0.01)
gdf_merged.plot(column = 'global_EQR', cmap = cmap, norm = norm, edgecolor='black', linewidth = 0.01)
mpl.pyplot.savefig(result_path+'din_winter2006.pdf')
In [ ]:
gjson = gdf_HOlsson.to_crs(epsg='4326').to_json()
In [ ]:
def my_color_function(feature):
"""Maps BAD to red, POOR to orange, MODERATE to yellow, GOOD to green and HIGH to blue."""
#print(feature['properties']['VISS_MS_CD'])
#print(feature['properties'].keys())
status = din_winter.loc[din_winter['EU_CD'] == feature['properties']['VISS_MS_CD'], 'STATUS2006'].values
#print(status)
if len(status) == 0:
return '#CADCEA'
#try:
# din_winter_df_ix[feature['properties']['EU_CD']]
#except KeyError:
# return 'black'
status = status[0]
if status == 'BAD':
return 'red'
elif status== 'POOR':
return 'orange'
elif status == 'MODERATE':
return 'yellow'
elif status == 'GOOD':
return 'green'
elif status == 'HIGH':
return 'blue'
else:
return 'black'
In [ ]:
step = folium.StepColormap(['red','orange','yellow','green','blue'], vmin=0, vmax=1., caption='step')
step
m = folium.Map(location=[60, 17], tiles='cartodbpositron', zoom_start=5)
folium.GeoJson(
gdf_HOlsson,
style_function=lambda feature: {
'fillColor': step(din_winter.dropna(subset = ['global_EQR2006']).loc[din_winter['EU_CD'] == feature['properties']['VISS_MS_CD'], 'global_EQR2006'].values),
#'fillColor': my_color_function(feature),
'color' : 'black',
'weight' : 2,
'dashArray' : '5, 5'
}
).add_to(m)
m.save('test2.html')
In [ ]:
din_winter_df_ix = din_winter_df.set_index('EU_CD')['global_EQR']
din_winter_df_ix.head()
din_winter_df_ix['SE584340-174401']
In [ ]:
def my_longlat(longlat):
s = str(longlat)
return float(s[:2]) + (float(s[2:4]) + float(s[4:])/100)/60
In [ ]:
my_longlat(654125)
In [ ]:
m
In [ ]:
gjson#['features'][0]['properties']['EU_CD']
In [ ]:
# Set center for the map.
center_lat = 60
center_long = 17
# Create map object.
m = folium.Map(location=[center_lat, center_long], zoom_start=5)
m.choropleth(geo_data = gjson, key_on = 'feature.properties.EU_CD', fill_color = 'YlGnBu')
#m = folium.Map([43,-100], tiles='cartodbpositron', zoom_start=4)
m
In [ ]: