Landscape elevational connectivity analyse

Here we perform the same operation as what is done in notebook 1-computeLEC but a series of files.


In [ ]:
import numpy as np
import pandas as pd
import cmocean as cmo

from pyevtk.hl import gridToVTK

import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.interpolate import RegularGridInterpolator

import warnings
warnings.filterwarnings('ignore')
warnings.simplefilter(action = "ignore", category = FutureWarning)

%config InlineBackend.figure_format = 'svg'
%matplotlib inline

Read dataset


In [ ]:
def combineDataset(liststep=None,catchmentID=None,folder=None,filename=None,gridExt='.csv',LECExt='LEC.csv',
                   combinedExt='comb.csv'):
    

    for k in range(len(liststep)):
        step = liststep[k]
        file1 = folder+'/'+filename+str(step)+gridExt
        file2 = folder+'/'+filename+str(step)+LECExt
        outfile = folder+'/'+filename+str(step)+combinedExt
        viewfile = folder+'/'+filename+str(step) 

        # First the elevation file
        df = pd.read_csv(file1, sep=',', engine='c',
                        header=0, na_filter=False, dtype=np.float,
                        low_memory=False)
        X = df['X']
        Y = df['Y']
        Z = df['Z']
        B = df['Basin']
        dx = ( X[1] - X[0] )
        nx = int((X.max() - X.min())/dx+1)
        ny = int((Y.max() - Y.min())/dx+1)

        # Second the LEC file
        df2 = pd.read_csv(file2, sep=',', engine='c',
                        header=0, na_filter=False, dtype=np.float,
                        low_memory=False)
        LEC = df2['LEC']
        X = np.reshape(X,(ny,nx))
        Y = np.reshape(Y,(ny,nx))
        Z = np.reshape(Z,(ny,nx))
        Basin = np.reshape(B,(ny,nx))
        LEC = np.reshape(LEC,(ny,nx))
        nLEC = LEC/LEC.max()

        # Then write the combined dataset
        df3 = pd.DataFrame({'X':X.flatten(),'Y':Y.flatten(),
                            'Z':Z.flatten(),'LEC':LEC.flatten(), 'nLEC':nLEC.flatten(), 'Basin':Basin.flatten()})    
        df3.to_csv(outfile, columns=['X', 'Y', 'Z', 'LEC', 'nLEC', 'Basin'], sep=',', index=False , header=1)

        # Finally create a vtk file for visualisation
        Xv = np.zeros((X.shape[0],X.shape[1],2))
        Yv = np.zeros((X.shape[0],X.shape[1],2))
        Zv = np.zeros((X.shape[0],X.shape[1],2))
        LECv = np.zeros((X.shape[0],X.shape[1],2))
        normLEC = np.zeros((X.shape[0],X.shape[1],2))
        Basinv = np.zeros((X.shape[0],X.shape[1],2))
        Xv[:,:,0] = X
        Yv[:,:,0] = Y
        Zv[:,:,0] = Z
        LECv[:,:,0] = LEC
        normLEC[:,:,0] = LEC/LEC.max()
        Basinv[:,:,0] = Basin
        Xv[:,:,1] = X
        Yv[:,:,1] = Y
        Zv[:,:,1] = Z
        LECv[:,:,1] = LEC
        normLEC[:,:,1] = LEC/LEC.max()
        Basinv[:,:,1] = Basin
        gridToVTK(viewfile, Xv, Yv, Zv, pointData = {"elevation" : Zv, "LEC" :LECv, "nLEC" :normLEC, "BasinID" :Basinv})
        
        if catchmentID is not None:
            basinID = catchmentID[k]
            basinfile = folder+'/'+filename+str(step)+'basin'+combinedExt
            bIDr,bIDl = np.where(Basin==basinID)
            df4 = pd.DataFrame({'X':X[bIDr,bIDl].flatten(),'Y':Y[bIDr,bIDl].flatten(),
                                'Z':Z[bIDr,bIDl].flatten(),'LEC':LEC[bIDr,bIDl].flatten(), 
                                'nLEC':nLEC[bIDr,bIDl].flatten()})    
            df4.to_csv(basinfile, columns=['X', 'Y', 'Z', 'LEC', 'nLEC'], sep=',', index=False , header=1)
    return

First we define the folder and file names and extensions that will be read and writen:


In [ ]:
folder = 'output'
filename = 'grid'
gridExt = '.csv'
LECExt = 'LEC-con.csv'
combinedExt = 'comb.csv'

We then specify the time steps that will be analysed and if specific catchment values need to be extracted the corresponding temporal catchment IDs needs to be given based on notebook 0-regridBadlands.


In [ ]:
liststep = np.linspace(5,200,40).astype(int)
catchmentID = [372,180,260,253,87,7,243,17,341,311,61,238,216,283,191,191,
               313,36,121,235,223,212,341,17,307,227,171,225,205,220,54,47,
               65,118,237,199,119,274,164,175]

liststepOro = np.linspace(5,180,36).astype(int)
catchmentIDOro = [172,75,76,304,357,305,321,123,174,34,98,212,354,351,237,304,
                  174,199,368,63,364,71,10,23,58,217,65,63,327,35,286,53,312,
                  320,61,138]

Then we run the combineDataset function


In [ ]:
combineDataset(liststep,catchmentID,folder,filename,gridExt,LECExt,combinedExt)

Plotting elevation

Analyse of elevation for a given time step


In [ ]:
step = 100
folder = 'outputKe'
filename = 'grid'
combinedExt = 'comb.csv'
loadfile = folder+'/'+filename+str(step)+combinedExt

We first load the dataset:


In [ ]:
df = pd.read_csv(loadfile, sep=',', engine='c',
                header=0, na_filter=False, dtype=np.float,
                low_memory=False)
X = df['X']
Y = df['Y']
Z = df['Z']
dx = ( X[1] - X[0] )
nx = int((X.max() - X.min())/dx+1)
ny = int((Y.max() - Y.min())/dx+1)
#
Z = np.reshape(Z,(ny,nx))
print 'Resolution ',dx

In [ ]:
def plotElev(elev=None,color=cmo.cm.delta,title='Elevation',figsize=(6,6),extend=None,save=None):
    
    save = None
    fig = plt.figure(figsize=figsize)
    ax1 = plt.gca()

    ax1.set_title(title, fontsize=10)
    if extend is None:
        im1 = plt.imshow(np.flipud(elev), vmin=elev.min(), vmax=elev.max(), 
                         cmap=color, aspect='auto')
    else:
        im1 = plt.imshow(np.flipud(elev[extend[0]:extend[1],extend[2]:extend[3]]), 
                         vmin=elev.min(), vmax=elev.max(), 
                         cmap=color, aspect='auto')
        
    ax1.tick_params(axis='x', labelsize=8)
    ax1.tick_params(axis='y', labelsize=8)
    ax1.grid(False)

    divider1 = make_axes_locatable(ax1)
    cax1 = divider1.append_axes("right", size="5%", pad=0.05)
    cbar1 = plt.colorbar(im1,cax=cax1)
    cbar1.ax.tick_params(labelsize=9) 

    plt.tight_layout()
    plt.show()
    if save is not None:
        fig.savefig(save,dpi=200, bbox_inches='tight')
        
    return

We now visualise the elevation using the plotElev function:


In [ ]:
plotElev(elev=Z,color=cmo.cm.delta,title='Elevation',figsize=(6,6),extend=None,save=None)
plotElev(elev=Z,color=cmo.cm.delta,title='Elevation',figsize=(6,6),extend=[330,430,150,250],save=None)

Extracting a sub-domain from the region

We first define the row/column limits of each sub-domain of interest


In [ ]:
domainA = [60,110,80,130]
domainB = [230,330,200,300]
domainC = [330,430,150,250]

In [ ]:
def writeSubDomain(filename=None,domain=None):
    
        df = pd.DataFrame({'X':X[domain[0]:domain[1],domain[2]:domain[3]].flatten(),
                           'Y':Y[domain[0]:domain[1],domain[2]:domain[3]].flatten(),
                           'Z':Z[domain[0]:domain[1],domain[2]:domain[3]].flatten(),
                           'LEC':LEC[domain[0]:domain[1],domain[2]:domain[3]].flatten(),
                           'nLEC':nLEC[domain[0]:domain[1],domain[2]:domain[3]].flatten()}) 
        
        df.to_csv(filename, columns=['X', 'Y', 'Z', 'LEC', 'nLEC'], sep=',', index=False , header=1)
        
        return

In [ ]:
step = 100
folder = 'outputKe'
filename = 'grid'
combinedExt = 'comb.csv'
loadfile = folder+'/'+filename+str(step)+combinedExt

We load the dataset:


In [ ]:
df = pd.read_csv(loadfile, sep=',', engine='c',
                header=0, na_filter=False, dtype=np.float,
                low_memory=False)
X = df['X']
Y = df['Y']
Z = df['Z']
B = df['Basin']
LEC = df['LEC']
nLEC = df['nLEC']

dx = ( X[1] - X[0] )
nx = int((X.max() - X.min())/dx+1)
ny = int((Y.max() - Y.min())/dx+1)

X = np.reshape(X,(ny,nx))
Y = np.reshape(Y,(ny,nx))
Z = np.reshape(Z,(ny,nx))
Basin = np.reshape(B,(ny,nx))
LEC = np.reshape(LEC,(ny,nx))
nLEC = np.reshape(nLEC,(ny,nx))

And wrote sub-domain values to a file:


In [ ]:
fileA = folder+'/'+filename+str(step)+'domainA.csv'
fileB = folder+'/'+filename+str(step)+'domainB.csv'
fileC = folder+'/'+filename+str(step)+'domainC.csv'
writeSubDomain(filename=fileA,domain=domainA)
writeSubDomain(filename=fileB,domain=domainB)
writeSubDomain(filename=fileC,domain=domainC)

LEC & Elevation distribution

Elevation frequency as function of site elevation


In [ ]:
step = 100
folder = 'outputKe'
filename = 'grid'
combinedExt = 'comb.csv'
loadfile = folder+'/'+filename+str(step)+combinedExt

data = pd.read_csv(loadfile)

ax = data.Z.plot(kind='hist', color='Blue', alpha=0.1, bins=80, xlim=(data.Z.min(),data.Z.max()))
plt.xlabel('Elevation')
data.Z.plot(kind='density', figsize=(10,6), ax=ax, xlim=(data.Z.min(),data.Z.max()),
               linewidth=4,title='Elevation frequency as function of site elevation',secondary_y=True,y='Density')
ax.set_ylabel('Frequency count')
plt.ylabel('Density')
plt.show()

Landscape elevational connectivity as a function of elevation


In [ ]:
ax = data.plot(kind='scatter', x='Z', y='LEC', c='white', edgecolors='lightgray', figsize=(10,6), 
          xlim=(data['Z'].min(),data['Z'].max()), s=5, title='Landscape elevational connectivity periodic boundary fully connected')
plt.xlabel('Elevation')
ax2=ax.twinx()  
data.Z.plot(kind='density',secondary_y=True, ax=ax2, xlim=(data['Z'].min(),data['Z'].max()),
               linewidth=2)
ax.set_ylabel('LEC')
plt.ylabel('Density')
plt.show()

Combined plots


In [ ]:
step = 5
folder = 'outputKe'
filename = 'grid'
combinedExt = 'comb.csv'
loadfile = folder+'/'+filename+str(step)+combinedExt

data = pd.read_csv(loadfile)

nbins = 40

fig, ax = plt.subplots(figsize=(10, 6))

w = data.nLEC
n, _ = np.histogram(data.Z, bins=nbins)
sy, _ = np.histogram(data.Z, bins=nbins, weights=w)
sy2, _ = np.histogram(data.Z, bins=nbins, weights=w*w)
mean = sy / n
std = np.sqrt(sy2/n - mean*mean)

plt.ylabel('normalised LEC')
plt.xlabel('Elevation')

plt.plot((_[1:] + _[:-1])/2, mean,color='steelblue',zorder=2,linewidth=2)

num_samples = 25000
idx = np.random.choice(np.arange(len(data.Z)), num_samples)
plt.scatter(data.Z[idx], w[idx], c='w',edgecolors='lightgray', zorder=0,alpha=1.,s=5)
ax.set_xlim(data['Z'].min(),data['Z'].max())
ax.set_ylim(0,1)

(_, caps, _) = plt.errorbar(
    (_[1:] + _[:-1])/2, mean, yerr=std, fmt='-o', c='steelblue',markersize=4, capsize=3,zorder=1,linewidth=1.25)

for cap in caps:
    cap.set_markeredgewidth(1)


ax2=ax.twinx()  
data.Z.plot(kind='density',secondary_y=True, ax=ax2, xlim=(data['Z'].min(),data['Z'].max()),
               color='coral',linewidth=2,zorder=0)
 

plt.ylabel('Density', color='coral')
# plt.show()

figname = folder+'/'+filename+str(step)+'.png'
# fig.savefig(figname, dpi=1000)

In [ ]:
import mpl_scatter_density

step = 45
folder = 'outputKe'
filename = 'grid'
combinedExt = 'comb.csv'
loadfile = folder+'/'+filename+str(step)+combinedExt

data = pd.read_csv(loadfile)

nbins = 40

fig, ax = plt.subplots(figsize=(10, 6))
ax11 = fig.add_subplot(1, 1, 1, projection='scatter_density')

w = data.nLEC
n, _ = np.histogram(data.Z, bins=nbins)
sy, _ = np.histogram(data.Z, bins=nbins, weights=w)
sy2, _ = np.histogram(data.Z, bins=nbins, weights=w*w)
mean = sy / n
std = np.sqrt(sy2/n - mean*mean)

plt.ylabel('normalised LEC')
plt.xlabel('Elevation')
#mediumseagreen
plt.plot((_[1:] + _[:-1])/2, mean,color='k',zorder=2,linewidth=2)

num_samples = 25000
idx = np.random.choice(np.arange(len(data.Z)), num_samples)
#plt.scatter(data.Z[idx], w[idx], c='w',edgecolors='lightgray', zorder=0,alpha=1.,s=5)
ax11.scatter_density(data.Z, w, cmap=cmo.cm.gray_r) #color='grey')
ax.set_xlim(data['Z'].min(),data['Z'].max())
ax.set_ylim(0,1)
ax11.set_ylim(0,1)

(_, caps, _) = plt.errorbar(
    (_[1:] + _[:-1])/2, mean, yerr=std, fmt='-o', c='k',markersize=4, capsize=3,zorder=1,linewidth=1.25)

for cap in caps:
    cap.set_markeredgewidth(1)


ax2=ax.twinx()  
data.Z.plot(kind='density',secondary_y=True, ax=ax2, xlim=(data['Z'].min(),data['Z'].max()),
               color='coral',linewidth=2,zorder=0)
 

plt.ylabel('Density', color='coral')
# plt.show()

figname = folder+'/'+filename+str(step)+'.png'
fig.savefig(figname, dpi=1000)

In [ ]: