In [ ]:
# You'll probably want to set our data rate higher for this notebook.
# follow: http://stackoverflow.com/questions/43288550/iopub-data-rate-exceeded-when-viewing-image-in-jupyter-notebook
In [ ]:
# See requirements.txt to set up your dev environment.
import sys
import os
import json
import scipy
import urllib
import datetime
import urllib3
import rasterio
import subprocess
import numpy as np
import pandas as pd
import seaborn as sns
from osgeo import gdal
from planet import api
from planet.api import filters
from traitlets import link
import rasterio.tools.mask as rio_mask
from shapely.geometry import mapping, shape
from IPython.display import display, Image, HTML
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
urllib3.disable_warnings()
from ipyleaflet import (
Map,
Marker,
TileLayer, ImageOverlay,
Polyline, Polygon, Rectangle, Circle, CircleMarker,
GeoJSON,
DrawControl
)
%matplotlib inline
# will pick up api_key via environment variable PL_API_KEY
# but can be specified using `api_key` named argument
api_keys = json.load(open("apikeys.json",'r'))
client = api.ClientV1(api_key=api_keys["PLANET_API_KEY"])
$ pip install ipyleaflet
$ jupyter nbextension enable --py --sys-prefix ipyleaflet
$ jupyter nbextension enable --py --sys-prefix widgetsnbextension
In [ ]:
# Basemap Mosaic (v1 API)
mosaicsSeries = 'global_quarterly_2017q1_mosaic'
# Planet tile server base URL (Planet Explorer Mosaics Tiles)
mosaicsTilesURL_base = 'https://tiles0.planet.com/experimental/mosaics/planet-tiles/' + mosaicsSeries + '/gmap/{z}/{x}/{y}.png'
# Planet tile server url
mosaicsTilesURL = mosaicsTilesURL_base + '?api_key=' + api_keys["PLANET_API_KEY"]
# Map Settings
# Define colors
colors = {'blue': "#009da5"}
# Define initial map center lat/long
center = [45.5231, -122.6765]
# Define initial map zoom level
zoom = 11
# Set Map Tiles URL
planetMapTiles = TileLayer(url= mosaicsTilesURL)
# Create the map
m = Map(
center=center,
zoom=zoom,
default_tiles = planetMapTiles # Uncomment to use Planet.com basemap
)
# Define the draw tool type options
polygon = {'shapeOptions': {'color': colors['blue']}}
rectangle = {'shapeOptions': {'color': colors['blue']}}
# Create the draw controls
# @see https://github.com/ellisonbg/ipyleaflet/blob/master/ipyleaflet/leaflet.py#L293
dc = DrawControl(
polygon = polygon,
rectangle = rectangle
)
# Initialize an action counter variable
actionCount = 0
AOIs = {}
# Register the draw controls handler
def handle_draw(self, action, geo_json):
# Increment the action counter
global actionCount
actionCount += 1
# Remove the `style` property from the GeoJSON
geo_json['properties'] = {}
# Convert geo_json output to a string and prettify (indent & replace ' with ")
geojsonStr = json.dumps(geo_json, indent=2).replace("'", '"')
AOIs[actionCount] = json.loads(geojsonStr)
# Attach the draw handler to the draw controls `on_draw` event
dc.on_draw(handle_draw)
m.add_control(dc)
m
In [ ]:
print AOIs[1]
myAOI = AOIs[1]["geometry"]
# build a query using the AOI and
# a cloud_cover filter that excludes 'cloud free' scenes
old = datetime.datetime(year=2013,month=1,day=1)
query = filters.and_filter(
filters.geom_filter(myAOI),
filters.range_filter('cloud_cover', lt=50),
filters.date_range('acquired', gt=old)
)
# build a request for only PlanetScope imagery
request = filters.build_search_request(
query, item_types=['PSScene3Band']
)
# if you don't have an API key configured, this will raise an exception
result = client.quick_search(request)
scenes = []
planet_map = {}
for item in result.items_iter(limit=500):
planet_map[item['id']]=item
props = item['properties']
props["id"] = item['id']
props["geometry"] = item["geometry"]
props["thumbnail"] = item["_links"]["thumbnail"]
scenes.append(props)
scenes = pd.DataFrame(data=scenes)
display(scenes)
print len(scenes)
In [ ]:
# now let's clean up the datetime stuff
# make a shapely shape from our aoi
portland = shape(myAOI)
footprints = []
overlaps = []
# go through the geometry from our api call, convert to a shape and calculate overlap area.
# also save the shape for safe keeping
for footprint in scenes["geometry"].tolist():
s = shape(footprint)
footprints.append(s)
overlap = 100.0*(portland.intersection(s).area / portland.area)
overlaps.append(overlap)
# take our lists and add them back to our dataframe
scenes['overlap'] = pd.Series(overlaps, index=scenes.index)
scenes['footprint'] = pd.Series(footprints, index=scenes.index)
# now make sure pandas knows about our date/time columns.
scenes["acquired"] = pd.to_datetime(scenes["acquired"])
scenes["published"] = pd.to_datetime(scenes["published"])
scenes["updated"] = pd.to_datetime(scenes["updated"])
scenes.head()
In [ ]:
# Now let's get it down to just good, recent, clear scenes
clear = scenes['cloud_cover']<0.1
good = scenes['quality_category']=="standard"
recent = scenes["acquired"] > datetime.date(year=2017,month=1,day=1)
partial_coverage = scenes["overlap"] > 30
good_scenes = scenes[(good&clear&recent&partial_coverage)]
display(good_scenes)
print len(good_scenes)
# Now let's get it down to just good, recent, clear scenes
clear = scenes['cloud_cover']<0.5
good = scenes['quality_category']=="standard"
all_time = scenes["acquired"] > datetime.date(year=2014,month=1,day=1)
full_coverage = scenes["overlap"] >= 60
all_scenes = scenes[(good&clear&all_time&full_coverage)]
display(all_scenes)
print len(all_scenes)
In [ ]:
# first create a list of colors
colors = ["#ff0000","#00ff00","#0000ff","#ffff00","#ff00ff","#00ffff"]
# grab our scenes from the geometry/footprint geojson
footprints = good_scenes["geometry"].tolist()
# for each footprint/color combo
for footprint,color in zip(footprints,colors):
# create the leaflet object
feat = {'geometry':footprint,"properties":{
'style':{'color': color,'fillColor': color,'fillOpacity': 0.2,'weight': 1}},
'type':u"Feature"}
# convert to geojson
gjson = GeoJSON(data=feat)
# add it our map
m.add_layer(gjson)
# now we will draw our original AOI on top
feat = {'geometry':myAOI,"properties":{
'style':{'color': "#FFFFFF",'fillColor': "#FFFFFF",'fillOpacity': 0.5,'weight': 1}},
'type':u"Feature"}
gjson = GeoJSON(data=feat)
m.add_layer(gjson)
m
In [ ]:
imgs = []
# loop through our thumbnails and add display them
for img in good_scenes["thumbnail"].tolist():
imgs.append(Image(url=img))
print img
display(*imgs)
In [ ]:
def get_products(client, scene_id, asset_type='PSScene3Band'):
"""
Ask the client to return the available products for a
given scene and asset type. Returns a list of product
strings
"""
out = client.get_assets_by_id(asset_type,scene_id)
temp = out.get()
return temp.keys()
def activate_product(client, scene_id, asset_type="PSScene3Band",product="analytic"):
"""
Activate a product given a scene, an asset type, and a product.
On success return the return value of the API call and an activation object
"""
temp = client.get_assets_by_id(asset_type,scene_id)
products = temp.get()
if( product in products.keys() ):
return client.activate(products[product]),products[product]
else:
return None
def download_and_save(client,product):
"""
Given a client and a product activation object download the asset.
This will save the tiff file in the local directory and return its
file name.
"""
out = client.download(product)
fp = out.get_body()
fp.write()
return fp.name
def scenes_are_active(scene_list):
"""
Check if all of the resources in a given list of
scene activation objects is read for downloading.
"""
retVal = True
for scene in scene_list:
if scene["status"] != "active":
print "{} is not ready.".format(scene)
return False
return True
In [ ]:
to_get = good_scenes["id"].tolist()
activated = []
# for each scene to get
for scene in to_get:
# get the product
product_types = get_products(client,scene)
for p in product_types:
# if there is a visual product
if p == "visual": # p == "basic_analytic_dn"
print "Activating {0} for scene {1}".format(p,scene)
# activate the product
_,product = activate_product(client,scene,product=p)
activated.append(product)
In [ ]:
tiff_files = []
asset_type = "_3B_Visual"
# check if our scenes have been activated
if True: #scenes_are_active(activated):
for to_download,name in zip(activated,to_get):
# create the product name
name = name + asset_type + ".tif"
# if the product exists locally
if( os.path.isfile(name) ):
# do nothing
print "We have scene {0} already, skipping...".format(name)
tiff_files.append(name)
elif to_download["status"] == "active":
# otherwise download the product
print "Downloading {0}....".format(name)
fname = download_and_save(client,to_download)
tiff_files.append(fname)
print "Download done."
else:
print "Could not download, still activating"
else:
print "Scenes aren't ready yet"
print tiff_files
In [ ]:
def load_image4(filename):
"""Return a 4D (r, g, b, nir) numpy array with the data in the specified TIFF filename."""
path = os.path.abspath(os.path.join('./', filename))
if os.path.exists(path):
with rasterio.open(path) as src:
b, g, r, nir = src.read()
return np.dstack([r, g, b, nir])
def load_image3(filename):
"""Return a 3D (r, g, b) numpy array with the data in the specified TIFF filename."""
path = os.path.abspath(os.path.join('./', filename))
if os.path.exists(path):
with rasterio.open(path) as src:
b,g,r,mask = src.read()
return np.dstack([b, g, r])
def get_mask(filename):
"""Return a 1D mask numpy array with the data in the specified TIFF filename."""
path = os.path.abspath(os.path.join('./', filename))
if os.path.exists(path):
with rasterio.open(path) as src:
b,g,r,mask = src.read()
return np.dstack([mask])
def rgbir_to_rgb(img_4band):
"""Convert an RGBIR image to RGB"""
return img_4band[:,:,:3]
In [ ]:
img_files = []
masks = []
# load the images and masks
for fname in tiff_files[0:2]:
img_files.append(load_image3(fname))
masks.append(get_mask(fname))
i = 0
# use matplotlib to display the map
for img,name in zip(img_files,tiff_files):
plt.figure(i,figsize=(18,36))
plt.imshow(img)
plt.title(name)
i+=1
In [ ]:
import numpy.ma as ma
def plot_hist4(img_4band,title=""):
# Plot a four band histogram
r, g, b, nir = img_4band[:, :, 0], img_4band[:, :, 1], img_4band[:, :, 2], img_4band[:, :, 3]
for slice_, name, color in ((r,'r', 'red'),(g,'g', 'green'),(b,'b', 'blue'), (nir, 'nir', 'magenta')):
plt.hist(slice_.ravel(), bins=100,
range=[0,img_4band.max()],
label=name, color=color, histtype='step')
plt.title(title)
plt.legend()
def plot_hist3(img_3band,mask,title=""):
# plot a three band histogramwaiter = []
r, g, b = img_3band[:, :, 0], img_3band[:, :, 1], img_3band[:, :, 2]
r = ma.masked_array(r,mask=mask)
g = ma.masked_array(g,mask=mask)
b = ma.masked_array(b,mask=mask)
for slice_, name, color in ((r,'r', 'red'),(g,'g', 'green'),(b,'b', 'blue')):
plt.hist(slice_.ravel(), bins=25,
range=[0,img_3band.max()],
label=name, color=color, histtype='step')
plt.title(title)
plt.legend()
In [ ]:
i = 0
for img,name,mask in zip(img_files,tiff_files,masks):
plt.figure(i,figsize=(9,18))
plot_hist3(img,mask=mask,title=name)
i+=1
In [ ]:
def plot_bands4(img,title="",i=0):
fig = plt.figure(i)
fig.set_size_inches(24, 3)
r, g, b, nir = img[:, :, 0], img[:, :, 1], img[:, :, 2], img[:, :, 3]
fig.suptitle(title)
for i, (x, c) in enumerate(((r, 'r'), (g, 'g'), (b, 'b'), (nir, 'near-ir'))):
a = fig.add_subplot(1, 4, i+1)
a.set_title(c)
plt.imshow(x)
def plot_bands3(img,title="",i=0):
fig = plt.figure(i)
fig.set_size_inches(24, 5)
r, g, b = img[:, :, 0], img[:, :, 1], img[:, :, 2]
fig.suptitle(title)
for i, (x, c) in enumerate(((r, 'r'), (g, 'g'), (b, 'b'))):
a = fig.add_subplot(1, 4, i+1)
a.set_title(c)
plt.imshow(x)
In [ ]:
plot_bands3(img_files[0],title=tiff_files[0],i=0)
In [ ]:
aoi_file ="portland.geojson"
# write our input AOI to a geojson file.
with open(aoi_file,"w") as f:
f.write(json.dumps(myAOI))
# create our full input and output names
clip_names = [os.path.abspath(tiff[:-4]+"_clip"+".tif") for tiff in tiff_files]
full_tif_files = [os.path.abspath("./"+tiff) for tiff in tiff_files]
for in_file,out_file in zip(tiff_files,clip_names):
commands = ["gdalwarp", # t
"-t_srs","EPSG:3857",
"-cutline",aoi_file,
"-crop_to_cutline",
"-tap",
"-tr", "3", "3"
"-overwrite"]
subprocess.call(["rm",out_file])
commands.append(in_file)
commands.append(out_file)
print " ".join(commands)
subprocess.call(commands)
In [ ]:
clip_img_files = [load_image3(fname) for fname in clip_names]
i = 0
for img,name in zip(clip_img_files,clip_names):
plt.figure(i,figsize=(6,12))
plt.imshow(img)
plt.title(name)
i+=1
In [ ]:
subprocess.call(["rm","merged.tif"])
commands = ["gdalwarp", # t
"-t_srs","EPSG:3857",
"-cutline",aoi_file,
"-crop_to_cutline",
"-tap",
"-tr", "3", "3"
"-overwrite"]
output_mosaic = "merged.tif"
for tiff in tiff_files[0:2]:
commands.append(tiff)
commands.append(output_mosaic)
print " ".join(commands)
subprocess.call(commands)
In [ ]:
merged = load_image3("./merged.tif")
plt.figure(i,figsize=(6,12))
plt.imshow(merged)
plt.title("merged")
In [ ]:
# Activate
to_get = all_scenes["id"].tolist()
activated = []
for scene in to_get:
product_types = get_products(client,scene)
for p in product_types:
if p == "visual": # p == "basic_analytic_dn"
print "Activating {0} for scene {1}".format(p,scene)
_,product = activate_product(client,scene,product=p)
activated.append(product)
# Download
tiff_files = []
asset_type = "_3B_Visual"
if True: #scenes_are_active(activated):
for to_download,name in zip(activated,to_get):
name = name + asset_type + ".tif"
if( os.path.isfile(name) ):
print "We have scene {0} already, skipping...".format(name)
tiff_files.append(name)
elif to_download["status"] == "active":
print "Downloading {0}....".format(name)
fname = download_and_save(client,to_download)
tiff_files.append(fname)
print "Download done."
else:
print "Could not download, still activating"
else:
print "Scenes aren't ready yet"
In [ ]:
tiff_files = sorted(tiff_files)
# Create a list of tif file names.
for tiff in tiff_files:
clip_names.append(os.path.abspath(tiff[:-4]+"_clip"+".tif"))
full_tif_files = []
for tiff in tiff_files:
full_tif_files.append(os.path.abspath("./"+tiff))
# Run GDAL to crop our file down.
for in_file,out_file in zip(tiff_files,clip_names):
commands = ["gdalwarp", # t
"-t_srs","EPSG:3857",
"-cutline",aoi_file,
"-crop_to_cutline",
"-tap",
"-tr", "3", "3"
"-overwrite"]
subprocess.call(["rm",out_file])
commands.append(in_file)
commands.append(out_file)
print " ".join(commands)
subprocess.call(commands)
temp_names = []
i = 0
# use image magic convert to
for in_file in clip_names:
temp_name = "img{0}.gif".format(i)
command = ["convert", in_file, "-sample", "30x30%",temp_name]
temp_names.append(temp_name)
i += 1
subprocess.call(command)
magic = "portland.gif"
last_call = ["convert","-delay", "40","-loop","0", "img*.gif",magic]
subprocess.call(last_call)
print "done!"
In [ ]:
In [ ]: