In [ ]:
# See requirements.txt to set up your dev environment.
import os
import cv2
import sys
import json
import scipy
import urllib
import datetime 
import urllib3
import rasterio
import subprocess
import numpy as np
import pandas as pd
import seaborn as sns
from osgeo import gdal, ogr, osr
from planet import api
from planet.api import filters
from traitlets import link
import rasterio.tools.mask as rio_mask
from shapely.geometry import mapping, shape
from IPython.display import display, Image, HTML
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
#from scipy import ndimage
urllib3.disable_warnings()
from ipyleaflet import (
    Map,
    Marker,
    TileLayer, ImageOverlay,
    Polyline, Polygon, Rectangle, Circle, CircleMarker,
    GeoJSON,
    DrawControl
)

%matplotlib inline
# will pick up api_key via environment variable PL_API_KEY
# but can be specified using `api_key` named argument
api_keys = json.load(open("apikeys.json",'r'))
client = api.ClientV1(api_key=api_keys["PLANET_API_KEY"])

Let's pull it all together to do something cool.

  • Let's reuse a lot of our code to make a movie of our travel around Portland.
  • We'll first select a bunch of recent scenes, activate, and download them.
  • After that we'll create a mosaic, a path, and trace the path through the moasic.
  • We'll use the path to crop subregions, save them as images, and create a video.
  • First step is to trace our AOI and a path through it.

In [ ]:
# Basemap Mosaic (v1 API)
mosaicsSeries = 'global_quarterly_2017q1_mosaic'
# Planet tile server base URL (Planet Explorer Mosaics Tiles)
mosaicsTilesURL_base = 'https://tiles0.planet.com/experimental/mosaics/planet-tiles/' + mosaicsSeries + '/gmap/{z}/{x}/{y}.png'
# Planet tile server url
mosaicsTilesURL = mosaicsTilesURL_base + '?api_key=' + api_keys["PLANET_API_KEY"]
# Map Settings 
# Define colors
colors = {'blue': "#009da5"}
# Define initial map center lat/long
center = [45.5231, -122.6765]
# Define initial map zoom level
zoom = 11
# Set Map Tiles URL
planetMapTiles = TileLayer(url= mosaicsTilesURL)
# Create the map
m = Map(
    center=center, 
    zoom=zoom,
    default_tiles = planetMapTiles # Uncomment to use Planet.com basemap
)
# Define the draw tool type options
polygon = {'shapeOptions': {'color': colors['blue']}}
rectangle = {'shapeOptions': {'color': colors['blue']}} 

# Create the draw controls
# @see https://github.com/ellisonbg/ipyleaflet/blob/master/ipyleaflet/leaflet.py#L293
dc = DrawControl(
    polygon = polygon,
    rectangle = rectangle
)
# Initialize an action counter variable
actionCount = 0
AOIs = {}

# Register the draw controls handler
def handle_draw(self, action, geo_json):
    # Increment the action counter
    global actionCount
    actionCount += 1
    # Remove the `style` property from the GeoJSON
    geo_json['properties'] = {}
    # Convert geo_json output to a string and prettify (indent & replace ' with ")
    geojsonStr = json.dumps(geo_json, indent=2).replace("'", '"')
    AOIs[actionCount] = json.loads(geojsonStr)
    
# Attach the draw handler to the draw controls `on_draw` event
dc.on_draw(handle_draw)
m.add_control(dc)
m

Query the API

  • Now we'll save the geometry for our AOI and the path.
  • We'll also filter and cleanup our data just like before.

In [ ]:
areaAOI = AOIs[1]["geometry"]
pathAOI = AOIs[2]["geometry"]

aoi_file ="portland.geojson" 
with open(aoi_file,"w") as f:
    f.write(json.dumps(areaAOI))
# build a query using the AOI and
# a cloud_cover filter that excludes 'cloud free' scenes

old = datetime.datetime(year=2017,month=1,day=1)

query = filters.and_filter(
    filters.geom_filter(areaAOI),
    filters.range_filter('cloud_cover', lt=5),
    filters.date_range('acquired', gt=old)
)

# build a request for only PlanetScope imagery
request = filters.build_search_request(
    query, item_types=['PSScene3Band']
)

# if you don't have an API key configured, this will raise an exception
result = client.quick_search(request)
scenes = []
planet_map = {}
for item in result.items_iter(limit=500):
    planet_map[item['id']]=item
    props = item['properties']
    props["id"] = item['id']
    props["geometry"] = item["geometry"]
    props["thumbnail"] = item["_links"]["thumbnail"]
    scenes.append(props)
scenes = pd.DataFrame(data=scenes)
display(scenes)
print len(scenes)

Just like before we clean up our data and distill it down to just the scenes we want.


In [ ]:
# now let's clean up the datetime stuff
# make a shapely shape from our aoi
portland = shape(areaAOI)
footprints = []
overlaps = []
# go through the geometry from our api call, convert to a shape and calculate overlap area.
# also save the shape for safe keeping
for footprint in scenes["geometry"].tolist():
    s = shape(footprint)
    footprints.append(s)
    overlap = 100.0*(portland.intersection(s).area / portland.area)
    overlaps.append(overlap)
# take our lists and add them back to our dataframe
scenes['overlap'] = pd.Series(overlaps, index=scenes.index)
scenes['footprint'] = pd.Series(footprints, index=scenes.index)
# now make sure pandas knows about our date/time columns.
scenes["acquired"] = pd.to_datetime(scenes["acquired"])
scenes["published"] = pd.to_datetime(scenes["published"])
scenes["updated"] = pd.to_datetime(scenes["updated"])
scenes.head()

# Now let's get it down to just good, recent, clear scenes
clear = scenes['cloud_cover']<0.4
good = scenes['quality_category']=="standard"
recent = scenes["acquired"] > datetime.date(year=2017,month=1,day=1)
partial_coverage = scenes["overlap"] > 10
good_scenes = scenes[(good&clear&recent&partial_coverage)]
print good_scenes

To make sure we are good we'll visually inspect the scenes in our slippy map.


In [ ]:
# first create a list of colors
colors = ["#ff0000","#00ff00","#0000ff","#ffff00","#ff00ff","#00ffff","#ff0000","#00ff00","#0000ff","#ffff00","#ff00ff","#00ffff"]
# grab our scenes from the geometry/footprint geojson
# Chane this number as needed
footprints = good_scenes[0:10]["geometry"].tolist()
# for each footprint/color combo
for footprint,color in zip(footprints,colors):
    # create the leaflet object
    feat = {'geometry':footprint,"properties":{
            'style':{'color': color,'fillColor': color,'fillOpacity': 0.2,'weight': 1}},
            'type':u"Feature"}
    # convert to geojson
    gjson = GeoJSON(data=feat)
    # add it our map
    m.add_layer(gjson)
# now we will draw our original AOI on top 
feat = {'geometry':areaAOI,"properties":{
            'style':{'color': "#FFFFFF",'fillColor': "#FFFFFF",'fillOpacity': 0.5,'weight': 1}},
            'type':u"Feature"}
gjson = GeoJSON(data=feat)
m.add_layer(gjson)   
m

This is from the previous notebook. We are just activating and downloading scenes.


In [ ]:
def get_products(client, scene_id, asset_type='PSScene3Band'):    
    """
    Ask the client to return the available products for a 
    given scene and asset type. Returns a list of product 
    strings
    """
    out = client.get_assets_by_id(asset_type,scene_id)
    temp = out.get()
    return temp.keys()

def activate_product(client, scene_id, asset_type="PSScene3Band",product="analytic"):
    """
    Activate a product given a scene, an asset type, and a product.
    
    On success return the return value of the API call and an activation object
    """
    temp = client.get_assets_by_id(asset_type,scene_id)  
    products = temp.get()
    if( product in products.keys() ):
        return client.activate(products[product]),products[product]
    else:
        return None 

def download_and_save(client,product):
    """
    Given a client and a product activation object download the asset. 
    This will save the tiff file in the local directory and return its 
    file name. 
    """
    out = client.download(product)
    fp = out.get_body()
    fp.write()
    return fp.name

def scenes_are_active(scene_list):
    """
    Check if all of the resources in a given list of
    scene activation objects is read for downloading.
    """
    return True
    retVal = True
    for scene in scene_list:
        if scene["status"] != "active":
            print "{} is not ready.".format(scene)
            return False
    return True
def load_image4(filename):
    """Return a 4D (r, g, b, nir) numpy array with the data in the specified TIFF filename."""
    path = os.path.abspath(os.path.join('./', filename))
    if os.path.exists(path):
        with rasterio.open(path) as src:
            b, g, r, nir = src.read()
            return np.dstack([r, g, b, nir])
        
def load_image3(filename):
    """Return a 3D (r, g, b) numpy array with the data in the specified TIFF filename."""
    path = os.path.abspath(os.path.join('./', filename))
    if os.path.exists(path):
        with rasterio.open(path) as src:
            b,g,r,mask = src.read()
            return np.dstack([b, g, r])
        
def get_mask(filename):
    """Return a 1D mask numpy array with the data in the specified TIFF filename."""
    path = os.path.abspath(os.path.join('./', filename))
    if os.path.exists(path):
        with rasterio.open(path) as src:
            b,g,r,mask = src.read()
            return np.dstack([mask])

def rgbir_to_rgb(img_4band):
    """Convert an RGBIR image to RGB"""
    return img_4band[:,:,:3]

Perform the actual activation ... go get coffee


In [ ]:
to_get = good_scenes["id"][0:10].tolist()
to_get = sorted(to_get)
activated = []
# for each scene to get
for scene in to_get:
    # get the product 
    product_types = get_products(client,scene)
    for p in product_types:
        # if there is a visual productfor p in labels:
        if p == "visual": # p == "basic_analytic_dn"
            print "Activating {0} for scene {1}".format(p,scene)
            # activate the product
            _,product = activate_product(client,scene,product=p)
            activated.append(product)

Downloand the scenes


In [ ]:
tiff_files = []
asset_type = "_3B_Visual"
# check if our scenes have been activated
if scenes_are_active(activated):
    for to_download,name in zip(activated,to_get):
        # create the product name
        name = name + asset_type + ".tif"
        # if the product exists locally
        if( os.path.isfile(name) ):
            # do nothing 
            print "We have scene {0} already, skipping...".format(name)
            tiff_files.append(name)
        elif to_download["status"] == "active":
            # otherwise download the product
            print "Downloading {0}....".format(name)
            fname = download_and_save(client,to_download)
            tiff_files.append(fname)
            print "Download done."
        else:
            print "Could not download, still activating"
else:
    print "Scenes aren't ready yet"

print tiff_files

Now, just like before, we will mosaic those scenes.

  • It is easier to call out using subprocess and use the command line util.
  • Just iterate through the files and drop them into a single file portland_mosaic.tif

In [ ]:
subprocess.call(["rm","portland_mosaic.tif"])
commands = ["gdalwarp", # t
           "-t_srs","EPSG:3857",
           "-cutline",aoi_file,
           "-crop_to_cutline",
           "-tap",
            "-tr", "3", "3"
           "-overwrite"]
output_mosaic = "portland_mosaic.tif"
for tiff in tiff_files:
    commands.append(tiff)
commands.append(output_mosaic)
print " ".join(commands)
subprocess.call(commands)

Let's take a look at what we got


In [ ]:
merged = load_image3(output_mosaic)
plt.figure(0,figsize=(18,18))
plt.imshow(merged)
plt.title("merged")

Now we are going to write a quick crop function.

  • this function takes in a, scene, a center position, and the width and height of a window.
  • We'll use numpy slice notation to make the crop.
  • Let's pick a spot and see what we get.

In [ ]:
def crop_to_area(scene,x_c,y_c,w,h):
    tlx = x_c-(w/2)
    tly = y_c-(h/2)
    brx = x_c+(w/2)
    bry = y_c+(h/2)
    return scene[tly:bry,tlx:brx,:]

In [ ]:
plt.figure(0,figsize=(3,4))
plt.imshow(crop_to_area(merged,3000,3000,640,480))
plt.title("merged")

Now to figure out how our lat/long values map to pixels.

  • The next thing we need is a way to map from a lat and long in our slippy map to the pixel position in our image.
  • We'll use what we know about the lat/long of the corners of our image to do that.
  • We'll ask GDAL to tell us the extents of our scene and the geotransofrm.
  • We'll then apply the GeoTransform from GDAL to the coordinates that are the extents of our scene.
  • Now we have the corners of our scene in Lat/Long

In [ ]:
# Liberally borrowed from this example
# https://gis.stackexchange.com/questions/57834/how-to-get-raster-corner-coordinates-using-python-gdal-bindings
def GetExtent(gt,cols,rows):
    """
    Get the list of corners in our output image in the format 
    [[x,y],[x,y],[x,y]]
    """
    ext=[]
    # for the corners of the image
    xarr=[0,cols]
    yarr=[0,rows]

    for px in xarr:
        for py in yarr:
            # apply the geo coordiante transform 
            # using the affine transform we got from GDAL
            x=gt[0]+(px*gt[1])+(py*gt[2])
            y=gt[3]+(px*gt[4])+(py*gt[5])
            ext.append([x,y])
        yarr.reverse()
    return ext

def ReprojectCoords(coords,src_srs,tgt_srs):
    trans_coords=[]
    # create a transform object from the source and target ref system
    transform = osr.CoordinateTransformation( src_srs, tgt_srs)
    for x,y in coords:
        # transform the points
        x,y,z = transform.TransformPoint(x,y)
        # add it to the list. 
        trans_coords.append([x,y])
    return trans_coords

Here we'll call the functions we wrote.

  • First we open the scene and get the width and height.
  • Then from the geotransorm we'll reproject those points to lat and long.

In [ ]:
# TLDR: pixels => UTM coordiantes => Lat Long 
raster=output_mosaic
# Load the GDAL File
ds=gdal.Open(raster)
# get the geotransform
gt=ds.GetGeoTransform()
# get the width and height of our image
cols = ds.RasterXSize
rows = ds.RasterYSize
# Generate the coordinates of our image in utm
ext=GetExtent(gt,cols,rows)
# get the spatial referencec object 
src_srs=osr.SpatialReference()
# get the data that will allow us to move from UTM to Lat Lon. 
src_srs.ImportFromWkt(ds.GetProjection())
tgt_srs = src_srs.CloneGeogCS()
extents = ReprojectCoords(ext,src_srs,tgt_srs)
print extents

Now we'll do a bit of hack.

  • That bit above is precise but complext, we are going to make everything easier to think about.
  • We are going to linearize our scene, which isn't perfect, but good enough for our application.
  • What this function does is take in a given lat,long, the size of the image, and the extents as lat,lon coordinates.
  • For a given pixel we map it's x and y values to the value between a given lat and long and return the results.
  • Now we can ask, for a given lat,long pair what is the corresponding pixel.

In [ ]:
def poor_mans_lat_lon_2_pix(lon,lat,w,h,extents):
    # split up our lat and longs 
    lats = [e[1] for e in extents]
    lons = [e[0] for e in extents]
    # calculate our scene extents  max and min
    lat_max = np.max(lats)
    lat_min = np.min(lats)    
    lon_max = np.max(lons)
    lon_min = np.min(lons) 
    # calculate the difference between our start point
    # and our minimum
    lat_diff = lat-lat_min
    lon_diff = lon-lon_min
    # create the linearization
    lat_r = float(h)/(lat_max-lat_min)
    lon_r = float(w)/(lon_max-lon_min) 
    # generate the results. 
    return int(lat_r*lat_diff),int(lon_r*lon_diff)

Let's check our work

  • First we'll create a draw point function that just puts a red dot at given pixel.
  • We'll get our scene, and map all of the lat/long points in our path to pixel values.
  • Finally we'll load our image, plot the points and show our results

In [ ]:
def draw_point(x,y,img,t=40):
    h,w,d = img.shape
    y = h-y
    img[(y-t):(y+t),(x-t):(x+t),:] = [255,0,0]
h,w,c = merged.shape
waypoints = [poor_mans_lat_lon_2_pix(point[0],point[1],w,h,extents) for point in pathAOI["coordinates"]]
print waypoints
merged = load_image3(output_mosaic)
[draw_point(pt[1],pt[0],merged) for pt in waypoints]
plt.figure(0,figsize=(18,18))
plt.imshow(merged)
plt.title("merged")

Now things get interesting....

  • Our path is just a few waypoint but to make a video we need just about every point between our waypoints.
  • To get all of the points between our waypoints we'll have to write a little interpolation script.
  • Interpolation is just a fancy word for nicely space points bewteen or waypoints, we'll call the space between each point as our "velocity."
  • If we were really slick we could define a heading vector and and build a spline so the camera faces the direction of heading. Our approach is fine as the top of the frame is always North, which makes reckoning easy.
  • Once we have our interpolation function all we need to do is to crop our large mosaic at each point in our interpolation point list and save it in a sequential file.

In [ ]:
def interpolate_waypoints(waypoints,velocity=10.0):
    retVal = []
    last_pt = waypoints[0]
    # for each point in our waypoints except the first
    for next_pt in waypoints[1:]:
        # calculate distance between the points
        distance = np.sqrt((last_pt[0]-next_pt[0])**2+(last_pt[1]-next_pt[1])**2)
        # use our velocity to calculate the number steps.
        steps = np.ceil(distance/velocity)
        # linearly space points between the two points on our line
        xs = np.array(np.linspace(last_pt[0],next_pt[0],steps),dtype='int64')
        ys = np.array(np.linspace(last_pt[1],next_pt[1],steps),dtype='int64')
        # zip the points together
        retVal += zip(xs,ys)
        # move to the next point
        last_pt = next_pt
    return retVal

def build_scenes(src,waypoints,window=[640,480],path="./movie/"):
    count = 0 
    # Use opencv to change the color space of our image.
    src = cv2.cvtColor(src, cv2.COLOR_BGR2RGB)
    # define half our sampling window. 
    w2 = window[0]/2
    h2 = window[1]/2
    # for our source image get the width and height
    h,w,d = src.shape
    for pt in waypoints:
        # for each point crop the area out.
        # the y value of our scene is upside down. 
        temp = crop_to_area(src,pt[1],h-pt[0],window[0],window[1])
        # If we happen to hit the border of the scene, just skip
        if temp.shape[0]*temp.shape[1]== 0:
            # if we have an issue, just keep plugging along
            continue
        # Resample the image a bit, this just makes things look nice. 
        temp = cv2.resize(temp, (int(window[0]*0.75), int(window[1]*.75))) 
        # create a file name
        fname = os.path.abspath(path+"img{num:06d}.png".format(num=count))
        # Save it
        cv2.imwrite(fname,temp)
        count += 1

Before we generate our video frames, let's check our work

  • We'll load our image.
  • Build the interpolated waypoints list.
  • Draw the points on the image using our draw_point method.
  • Plot the results

In [ ]:
# load the image
merged = load_image3(output_mosaic)
# interpolate the waypoints
interp = interpolate_waypoints(waypoints)
# draw them on our scene
[draw_point(pt[1],pt[0],merged) for pt in interp]
# display the scene
plt.figure(0,figsize=(18,18))
plt.imshow(merged)
plt.title("merged")

Now let's re-load the image and run the scene maker.


In [ ]:
os.system("rm ./movie/*.png")
merged = load_image3(output_mosaic)
build_scenes(merged,interp)

Finally, let's make a movie.

  • Our friend AVConv, which is like ffmpeg is a handy command line util for transcoding video.
  • AVConv can also convert a series of images into a video and vice versa.
  • We'll set up our command and use subprocess to make the call.

In [ ]:
# avconv -framerate 30 -f image2 -i ./movie/img%06d.png -b 65536k out.mpg
os.system("rm ./movie/*.png")
framerate = 30
output = "out.mpg"
command = ["avconv","-framerate", str(framerate), "-f", "image2", "-i", "./movie/img%06d.png", "-b", "65536k", output]
os.system(" ".join(command))

In [ ]: