In [23]:
%matplotlib inline
import os
import json
import codecs
import pickle
import numpy as np
import toolbox as tb
import matplotlib as mpl
import matplotlib.pyplot as plt
import openquake.hazardlib.geo.geodetic as geo
import hmtk.sources as src
import hmtk.plotting.seismicity.catalogue_plots as cp
from string import Template
from collections import OrderedDict
from hmtk.plotting.mapping import HMTKBaseMap
from hmtk.parsers.catalogue import CsvCatalogueParser
from hmtk.parsers.source_model.nrml04_parser import nrmlSourceModelParser
from hmtk.seismicity.selector import CatalogueSelector
In [24]:
# Marmaris, Turkey
target_lon = 28.25
target_lat = 36.85
# load fault data in JSON format
source_file = "Eastern_Mediterranean_Fault_Traces.geojson"
DATA = json.load(open(source_file, "r"))
DATA = OrderedDict([(dat["properties"]["IDSOURCE"], dat)
for dat in DATA["features"]])
In [25]:
# the code provided is good for printing fault data but nothing else
def show_fault_properties(flt_id):
fault = DATA[flt_id]
# pretty print
print "ID = %s" % fault["properties"]["IDSOURCE"]
print "Name = %s" % fault["properties"]["SOURCENAME"]
print "Trace (long. lat.):"
for crd in fault["geometry"]["coordinates"]:
print " %.6f %.6f" % (crd[0], crd[1])
print "Dip = %s - %s" % (str(fault["properties"]["DIPMIN"]),
str(fault["properties"]["DIPMAX"]))
print "Rake = %s - %s" % (str(fault["properties"]["RAKEMIN"]),
str(fault["properties"]["RAKEMAX"]))
print "Upper Seismogenic Depth = %s km" % \
(str(fault["properties"]["MINDEPTH"]))
print "Lower Seismogenic Depth = %s km" % \
(str(fault["properties"]["MAXDEPTH"]))
print "Slip Rate = %s - %s mm/yr" % \
(str(fault["properties"]["SRMIN"]),
str(fault["properties"]["SRMAX"]))
In [26]:
show_fault_properties('TRCS394') # a fault of interest because it is nearby
In [27]:
# loop over faults; compute and append statistics of interest
mu = 30e9 # [Pa] dummy value for estimate of moment rate
print "%d faults total" % len(DATA)
print "Adding some statistics ..."
for fault in DATA:
fault_lonlat = np.array(DATA[fault]["geometry"]["coordinates"])
min_depth = np.array(DATA[fault]["properties"]["MINDEPTH"])
s_min = DATA[fault]["properties"]["SRMIN"]
s_max = DATA[fault]["properties"]["SRMAX"]
d_min = DATA[fault]["properties"]["MINDEPTH"]
d_max = DATA[fault]["properties"]["MAXDEPTH"]
phi_min = DATA[fault]["properties"]["DIPMIN"]
phi_max = DATA[fault]["properties"]["DIPMAX"]
L_max = geo.distance(fault_lonlat[0,0], fault_lonlat[0,1], 0,
fault_lonlat[-1,0], fault_lonlat[-1,1], 0)
R_all = geo.distance(target_lon, target_lat, 0,
fault_lonlat[:,0], fault_lonlat[:,1], min_depth)
R_min = np.min(R_all)
R_max = np.max(R_all)
R_mid = (R_max + R_min)/2
W_max = (d_max - d_min)/np.abs(np.sin(phi_min))
W_mid = (d_max - d_min)/np.abs(np.sin((phi_max + phi_min)/2))
s_mid = (s_max + s_min)/2
L_mid = L_max/2
M0_rate_max = mu*L_max*1e3*W_max*1e3*s_max*1e-3
M0_rate_mid = mu*L_mid*1e3*W_mid*1e3*s_mid*1e-3
M0_rate_hybrid = mu*L_max*1e3*W_mid*1e3*s_mid*1e-3
danger = M0_rate_hybrid/R_min
DATA[fault]["statistics"] = {
"R_min": R_min,
"R_max": R_max,
"R_mid": R_mid,
"L_max": L_max,
"W_max": W_max,
"W_mid": W_mid,
"s_mid": s_mid,
"M0_rate_max": M0_rate_max,
"M0_rate_mid": M0_rate_mid,
"M0_rate_hybrid": M0_rate_mid,
"danger": danger,
}
In [28]:
base = os.path.basename(source_file)
fault_file_name = os.path.splitext(base)[0] + '.pkl'
with open(fault_file_name, "wb") as output:
pickle.dump(DATA, output)
In [29]:
# a function for filtering the fault list by "danger"
def pick_dangerous_faults(data, key, n, quiet=False, R_max=np.inf):
if key == 'R_min':
reverse = False
else:
reverse = True
# sort faults by statistic of choice
data_sorted = sorted(data.items(),
key=lambda item: item[1]["statistics"][key],
reverse=reverse)
# enforce a maximum distance, to match a catalogue
data_sorted = [item for item in data_sorted if item[1]["statistics"]["R_min"] < R_max]
top = OrderedDict(data_sorted[:n])
# print statistics about remaining "top n" dangerous faults
if not quiet:
print " fault, R_min, L_max, W_mid, s_mid, M0_rate_hybrid, danger?"
for (key, fault) in top.items():
s_max = fault["properties"]["SRMAX"]
R_min = fault["statistics"]["R_min"]
L_max = fault["statistics"]["L_max"]
W_mid = fault["statistics"]["W_mid"]
s_mid = fault["statistics"]["s_mid"]
M0_rate_max = fault["statistics"]["M0_rate_max"]
M0_rate_mid = fault["statistics"]["M0_rate_mid"]
M0_rate_hybrid = fault["statistics"]["M0_rate_hybrid"]
danger = fault["statistics"]["danger"]
if not quiet:
print "%s, %5.1f, %5.1f, %5.1f, %5.1f, %7.1e, %7.1e" % \
(key, R_min, L_max, W_mid, s_mid, M0_rate_hybrid, danger)
return top
W_mid = (d_max - d_min)/sin((phi_max + phi_min)/2) s_mid = (s_max + s_min)/2 M0_rate_hybrid = muL_max1e3W_mid1e3s_mid1e-3
In [30]:
closest_faults = pick_dangerous_faults(
DATA, 'R_min', 5);
In [31]:
R_max = 300 # km
most_dangerous_faults = pick_dangerous_faults(
DATA, 'danger', 5, R_max=R_max);
In [32]:
template_file = "SimpleFaultTemplate.xml"
with open(template_file, 'r') as myfile:
template_string = myfile.read()
template_string
Out[32]:
In [33]:
def fault_json_to_xml(fault_json):
fault_file_list = []
region = "Active Shallow Crust"
coord_format = " %10.6f %10.6f\n"
scaling_relation = "WC1994"
aspect_ratio = 2.0
MFD = 'truncGutenbergRichterMFD aValue="4.0" bValue="1.0" minMag="4.0" maxMag="8.5"'
for (key, fault) in fault_json.items():
output_file = key + '.xml'
fault_file_list.append(output_file)
# no need to compute stats & write file if it already exists
# we don't want to overwrite somebody else's edits
if os.path.isfile(output_file): continue
fault_lonlat = np.array(fault["geometry"]["coordinates"])
coord_string = [(coord_format % tuple(coords)) for coords in fault_lonlat]
coord_string = (''.join(coord_string))[:-1]
dip_min = fault["properties"]["DIPMIN"]
dip_max = fault["properties"]["DIPMAX"]
rake_min = fault["properties"]["RAKEMIN"]
rake_max = fault["properties"]["RAKEMAX"]
with open(template_file, 'r') as myfile:
template_string = myfile.read()
template = Template(template_string)
output_string = template.substitute(
IDSOURCE=key,
SOURCEMODELNAME="$SOURCEMODELNAME",
SOURCENAME=fault["properties"]["SOURCENAME"],
REGION=region,
TRACE=coord_string,
DIP="%.1f" % ((dip_min + dip_max)/2),
MINDEPTH="%.1f" % fault["properties"]["MINDEPTH"],
MAXDEPTH="%.1f" % fault["properties"]["MAXDEPTH"],
SCALEREL=scaling_relation,
ASPECTRATIO="%.3g" % aspect_ratio,
MFD=MFD,
RAKE="%.1f" % ((rake_min + rake_max)/2),
)
with open(output_file, 'w') as myfile:
myfile.write(output_string)
return fault_file_list
In [34]:
def concat_xml_faults(fault_xml_list, source_model_name, output_xml):
joint_string = '\n </sourceModel>\n' \
+ '</nrml>\n<?xml version=\'1.0\' encoding=\'utf-8\'?>\n' \
+ '<nrml xmlns:gml="http://www.opengis.net/gml"\n' \
+ ' xmlns="http://openquake.org/xmlns/nrml/0.4">\n\n' \
+ ' <sourceModel name="$SOURCEMODELNAME">\n'
# read and concatenate all of the files
faults = ''
for file_name in fault_xml_list:
with open(file_name, 'r') as myfile:
fault = myfile.read()
faults = faults + fault
# remove redundent start/end XML
faults = faults.replace(joint_string,'')
# give the model a name
template = Template(faults)
output_string = template.substitute(
SOURCEMODELNAME=source_model_name)
# ship it
print "Writing results to ", output_xml
with open(output_xml, 'w') as myfile:
myfile.write(output_string)
In [35]:
# other faults to add
other_sources_list = ['HELL001.xml', 'HELL002.xml', 'CYPR001.xml', 'CYPR002.xml', 'AREA001.xml']
#other_sources_list = ['HELL001.xml', 'CYPR001.xml']
n_faults = 10
In [36]:
# write "dangerous" source model
n_total = n_faults + len(other_sources_list)
if R_max is not np.inf:
dangerous_sources_file = "MarmarisDangerousSources%d_%gkm.xml" \
% (n_total, R_max)
sources_name = "Top %d dangerous sources within %g km of Marmaris" \
% (n_total, R_max)
else:
dangerous_sources_file = "MarmarisDangerousSources%d.xml" \
% n_total
sources_name = "Top %d sources potentially dangerous to Marmaris" \
% n_total
faults = pick_dangerous_faults(DATA, 'danger', n_faults, quiet=False, R_max=R_max)
fault_file_list = fault_json_to_xml(faults)
concat_xml_faults(fault_file_list + other_sources_list, sources_name, dangerous_sources_file)
In [37]:
# write "nearby" source model
nearby_sources_file = "MarmarisNearbySources%d.xml" \
% n_total
sources_name = "Top %d sources nearest to Marmaris.xml" \
% n_total
faults = pick_dangerous_faults(DATA, 'R_min', n_faults, quiet=False)
fault_file_list = fault_json_to_xml(faults)
concat_xml_faults(fault_file_list + other_sources_list, sources_name, nearby_sources_file)
In [38]:
catalogue_filename = '../sources/Marmaris-catalogue1_exclude_homogenised_cleaned_declustered.csv'
parser = CsvCatalogueParser(catalogue_filename) # From .csv to hmtk
# Read and process the catalogue content in a variable called "catalogue"
catalogue = parser.read_file()
print 'Minimum magnitude: ', np.min(catalogue.data['magnitude'])
print 'Maximum magnitude: ', np.max(catalogue.data['magnitude'])
print 'Number of events: ', len(catalogue.data['magnitude'])
print 'Catalogue keys: '
print catalogue.data.keys()
In [39]:
bin_width = 5
normalisation=False
bootstrap=None
depth_bins = np.arange(0.,
np.max(catalogue.data['depth']) + bin_width,
bin_width)
depth_hist = catalogue.get_depth_distribution(depth_bins,
normalisation=normalisation,
bootstrap=bootstrap)
plt.bar(depth_bins[:-1],
depth_hist,
width=0.9*bin_width,
edgecolor='k')
plt.xlabel('Depth (km)', fontsize='large')
if normalisation:
plt.ylabel('Probability Mass Function', fontsize='large')
else:
plt.ylabel('Count')
plt.title('Depth Histogram', fontsize='large')
plt.yscale('log')
plt.xlim((0,150))
plt.yscale('log')
plt.ylim((10,1e5))
Out[39]:
In [40]:
cp.plot_magnitude_time_density(catalogue, 0.2, 2)
In [41]:
completeness_table_a = np.array([[2004, 3.2],
[1992, 4.8],
[1972, 4.8],
[1900, 6.5]])
cp.plot_observed_recurrence(catalogue, completeness_table_a, 0.1)
In [42]:
# Map configuration
span = 1.5 # deg
map_config = {"min_lon": target_lon - 2*span,
"max_lon": target_lon + 2*span,
"min_lat": target_lat - span,
"max_lat": target_lat + span, "resolution": "l"}
In [43]:
# Creating a basemap
basemap1 = HMTKBaseMap(map_config, 'Catalogue and nearest sources')
# Adding the catalogue to the basemap
basemap1.add_catalogue(catalogue, overlay=True)
# Reading the models
parser = nrmlSourceModelParser(nearby_sources_file)
# Parse the seismic sources and save them in "source_model"
source_model = parser.read_file("Sources Around Marmaris")
# Adding the seismic sources
basemap1.add_source_model(source_model, area_border='r-',
border_width=1.5, alpha=0.5, overlay=True)
# Add target
basemap1.add_size_scaled_points(target_lon, target_lat, 20, shape='*',
colour='k', zorder=6)
In [44]:
# Creating a basemap
basemap1 = HMTKBaseMap(map_config, 'Catalogue and "most dangerous" sources')
# Adding the catalogue to the basemap
basemap1.add_catalogue(catalogue, overlay=True)
# Reading the models
parser = nrmlSourceModelParser(dangerous_sources_file)
# Parse the seismic sources and save them in "source_model"
source_model = parser.read_file("Sources Around Marmaris")
# Adding the seismic sources
basemap1.add_source_model(source_model, area_border='r-',
border_width=1.5, alpha=0.5)
In [ ]:
In [ ]: