In [1]:
%matplotlib
import wradlib
#import cv2
import numpy as np
import os
import matplotlib.pyplot as plt
#from matplotlib import animation
import matplotlib.patches as mpatches
from matplotlib.collections import PatchCollection
from scipy.ndimage import zoom
import datetime
import warnings
warnings.simplefilter('once', DeprecationWarning)


Using matplotlib backend: Qt5Agg
C:\Anaconda3\envs\wradlib0_11\lib\site-packages\h5py\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
  from ._conv import register_converters as _register_converters

In [28]:
from scipy import ndimage as ndi
from skimage import feature

Read sample data

Data is from the German Weather Service: the so called RY product represents rainfall intensity composite for the whole of Germany in 5 minute intervals.

Spatial resolution: 1 x 1 km; spatial extent: 900 x 900 km.

Information required from user

  • specify the directory datadir where you store the RY data (unpack the ry archives there).
  • select a specific interval by commenting/uncommenting the dtimes lines.
  • decide whether you need to reduce the resolution (downsize the image by a downsizeby) in order to avoid memory problems (this becomes relevant once you solve the 2D-adveciton equation...)

In [3]:
# Set data directory
datadir = "data/ry"

# Original grid dimensions
nx = 900
ny = 900

# pixel size (in meters)
dx = 1000.
dy = 1000.

# Downsize by factor "downsizeby"
#    downsizeby = 1 will leave the dimensions unchanged,
#    but for a 900x900 km grid, downsizing might be 
#    required in order to avoid MemoryError
downsizeby = 1

# interval between observations (in seconds)
interval = 300

# Set time window
##dtimes = wradlib.util.from_to("2008-06-02 17:00:00", "2008-06-02 19:00:00", interval)
##dtimes = wradlib.util.from_to("2015-04-26 17:00:00", "2015-04-26 19:00:00", interval)
##dtimes = wradlib.util.from_to("2015-03-29 17:00:00", "2015-03-29 19:00:00", interval)
dtimes = wradlib.util.from_to("2016-05-29 16:00:00", "2016-05-29 19:00:00", interval)

In [4]:
# Compute grid dimensions and grid coordinates after resampling
dx2, dy2 = dx*downsizeby, dy*downsizeby
nx2, ny2 = int(nx/downsizeby), int(ny/downsizeby)

X2, Y2 = np.meshgrid( np.arange(0,nx2*dx2, dx2), np.arange(0,ny2*dy2, dy2) )

# Define container
frames = np.zeros( (len(dtimes), nx2, ny2 ) )

# Read the data, convert back to dBZ, and downsize
#   (maybe also try with keeping mm/h instead of converting to dBZ?)
for i, dtime in enumerate(dtimes):
    fname = dtime.strftime( os.path.join(datadir, "raa01-ry_10000-%y%m%d%H%M-dwd---bin") )
    frames[i] = zoom( wradlib.io.read_RADOLAN_composite(fname, missing=0)[0], 1./downsizeby, order=1)
    frames[i] = wradlib.trafo.decibel( wradlib.zr.r2z(frames[i]) )
    frames[i][frames[i]<0] = 0


E:\src\git\heistermann\wradlib\wradlib\trafo.py:127: RuntimeWarning: divide by zero encountered in log10
  return 10. * np.log10(x)

In [23]:
#im = frames.sum(axis=0)
im = frames[10]

In [7]:
from skimage import measure
from skimage import filters

In [36]:
blobs = im > 5.

all_labels = measure.label(blobs)
blobs_labels = measure.label(blobs, background=0)

plt.figure(figsize=(9, 3.5))
plt.subplot(131)
plt.imshow(blobs, cmap='gray')
plt.axis('off')
plt.subplot(132)
plt.imshow(all_labels, cmap='spectral')
plt.axis('off')
plt.subplot(133)
plt.imshow(blobs_labels, cmap='spectral')
plt.axis('off')

plt.tight_layout()


C:\Anaconda3\envs\wradlib0_11\lib\site-packages\matplotlib\cbook\deprecation.py:106: MatplotlibDeprecationWarning: The spectral and spectral_r colormap was deprecated in version 2.0. Use nipy_spectral and nipy_spectral_r instead.
  warnings.warn(message, mplDeprecation, stacklevel=1)

In [38]:
nb_labels


Out[38]:
886

In [19]:
from scipy import ndimage

In [40]:
mask = frames[10] > 10

label_im, nb_labels = ndimage.label(mask)

# Find the largest connected component
sizes = ndimage.sum(mask, label_im, range(nb_labels + 1))
mask_size = sizes < 1000
remove_pixel = mask_size[label_im]
label_im[remove_pixel] = 0
labels = np.unique(label_im)
label_im = np.searchsorted(labels, label_im)

# Now that we have only one connected component, extract it's bounding box
slice_x, slice_y = ndimage.find_objects(label_im==4)[0]
roi = im[slice_x, slice_y]

plt.figure(figsize=(4, 2))
plt.axes([0, 0, 1, 1])
plt.imshow(roi)
plt.axis('off')


C:\Anaconda3\envs\wradlib0_11\lib\site-packages\scipy\ndimage\measurements.py:272: DeprecationWarning: In future, it will be an error for 'np.bool_' scalars to be interpreted as an index
  return _nd_image.find_objects(input, max_label)
Out[40]:
(-0.5, 34.5, 63.5, -0.5)

In [44]:
remove_pixel.shape


Out[44]:
(900, 900)

In [21]:
edges1 = feature.canny(im)
edges2 = feature.canny(im, sigma=20)

In [22]:
# display results
fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3, figsize=(8, 3),
                                    sharex=True, sharey=True)

ax1.imshow(im, cmap=plt.cm.gray)
ax1.axis('off')
ax1.set_title('noisy image', fontsize=20)

ax2.imshow(edges1, cmap=plt.cm.gray)
ax2.axis('off')
ax2.set_title('Canny filter, $\sigma=1$', fontsize=20)

ax3.imshow(edges2, cmap=plt.cm.gray)
ax3.axis('off')
ax3.set_title('Canny filter, $\sigma=3$', fontsize=20)

fig.tight_layout()


<input>:11: DeprecationWarning: invalid escape sequence \s
<input>:11: DeprecationWarning: invalid escape sequence \s
<input>:11: DeprecationWarning: invalid escape sequence \s
<input>:11: DeprecationWarning: invalid escape sequence \s
<input>:11: DeprecationWarning: invalid escape sequence \s
<input>:11: DeprecationWarning: invalid escape sequence \s
<input>:11: DeprecationWarning: invalid escape sequence \s
<ipython-input-22-d4c7fe1e591b>:11: DeprecationWarning: invalid escape sequence \s
  ax2.set_title('Canny filter, $\sigma=1$', fontsize=20)

In [24]:
from skimage.morphology import convex_hull_image
from skimage import data, img_as_float
from skimage.util import invert

# The original image is inverted as the object must be white.
#image = invert(frames[0])
image = frames[0]

chull = convex_hull_image(image)

fig, axes = plt.subplots(1, 2, figsize=(8, 4))
ax = axes.ravel()

ax[0].set_title('Original picture')
ax[0].imshow(image, cmap=plt.cm.gray, interpolation='nearest')
ax[0].set_axis_off()

ax[1].set_title('Transformed picture')
ax[1].imshow(chull, cmap=plt.cm.gray, interpolation='nearest')
ax[1].set_axis_off()

plt.tight_layout()

In [26]:
import numpy as np
import matplotlib.pyplot as plt

from skimage.data import camera
from skimage.filters import roberts, sobel, scharr, prewitt


image = frames[0]
edge_roberts = roberts(image)
edge_sobel = sobel(image)

fig, ax = plt.subplots(ncols=2, sharex=True, sharey=True,
                       figsize=(8, 4))

ax[0].imshow(edge_roberts, cmap=plt.cm.gray)
ax[0].set_title('Roberts Edge Detection')

ax[1].imshow(edge_sobel, cmap=plt.cm.gray)
ax[1].set_title('Sobel Edge Detection')

for a in ax:
    a.axis('off')

plt.tight_layout()

In [34]:
import numpy as np
import matplotlib.pyplot as plt
from skimage.color import rgb2gray
from skimage import data
from skimage.filters import gaussian
from skimage.segmentation import active_contour

# Test scipy version, since active contour is only possible
# with recent scipy version
import scipy
split_version = scipy.__version__.split('.')
if not(split_version[-1].isdigit()): # Remove dev string if present
        split_version.pop()
scipy_version = list(map(int, split_version))
new_scipy = scipy_version[0] > 0 or \
            (scipy_version[0] == 0 and scipy_version[1] >= 14)

img = frames[0]#data.astronaut()
img = rgb2gray(img)

x = [100, 800, 800, 100, 100]
y = [100, 100, 800, 800, 100]
init = np.array([x, y]).T

if not new_scipy:
    print('You are using an old version of scipy. '
          'Active contours is implemented for scipy versions '
          '0.14.0 and above.')

if new_scipy:
    snake = active_contour(gaussian(img, 1),
                           init, alpha=0.015, beta=10, gamma=0.001)

    fig = plt.figure(figsize=(7, 7))
    ax = fig.add_subplot(111)
    plt.gray()
    ax.imshow(img)
    ax.plot(init[:, 0], init[:, 1], '--r', lw=3)
    ax.plot(snake[:, 0], snake[:, 1], '-b', lw=3)
    ax.set_xticks([]), ax.set_yticks([])
    ax.axis([0, img.shape[1], img.shape[0], 0])

In [41]:
import numpy as np
import matplotlib.pyplot as plt

from skimage import data, color
from skimage.transform import hough_circle, hough_circle_peaks
from skimage.feature import canny
from skimage.draw import circle_perimeter
from skimage.util import img_as_ubyte


# Load picture and detect edges
im = frames[0]
image = im#img_as_ubyte(im/im.max())
edges = canny(image, sigma=20)#, low_threshold=10, high_threshold=50)


# Detect two radii
hough_radii = np.arange(5, 100, 5)
hough_res = hough_circle(edges, hough_radii)

# Select the most prominent 5 circles
accums, cx, cy, radii = hough_circle_peaks(hough_res, hough_radii,
                                           total_num_peaks=100)

# Draw them
fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(10, 4))
image = color.gray2rgb(image)
for center_y, center_x, radius in zip(cy, cx, radii):
    circy, circx = circle_perimeter(center_y, center_x, radius)
    image[circy, circx] = (220, 20, 20)

ax.imshow(image, cmap=plt.cm.gray)


Out[41]:
<matplotlib.image.AxesImage at 0xd236828>

In [42]:
import matplotlib.pyplot as plt

from skimage import data, color, img_as_ubyte
from skimage.feature import canny
from skimage.transform import hough_ellipse
from skimage.draw import ellipse_perimeter

# Load picture, convert to grayscale and detect edges
#image_rgb = data.coffee()[0:220, 160:420]
#image_gray = color.rgb2gray(image_rgb)
image_gray = frames[0]
edges = canny(image_gray, sigma=2.0,
              low_threshold=0.55, high_threshold=0.8)

# Perform a Hough Transform
# The accuracy corresponds to the bin size of a major axis.
# The value is chosen in order to get a single high accumulator.
# The threshold eliminates low accumulators
result = hough_ellipse(edges, accuracy=20, threshold=250,
                       min_size=100, max_size=120)
result.sort(order='accumulator')

# Estimated parameters for the ellipse
best = list(result[-1])
yc, xc, a, b = [int(round(x)) for x in best[1:5]]
orientation = best[5]

# Draw the ellipse on the original image
cy, cx = ellipse_perimeter(yc, xc, a, b, orientation)
image_rgb[cy, cx] = (0, 0, 255)
# Draw the edge (white) and the resulting ellipse (red)
edges = color.gray2rgb(img_as_ubyte(edges))
edges[cy, cx] = (250, 0, 0)

fig2, (ax1, ax2) = plt.subplots(ncols=2, nrows=1, figsize=(8, 4), sharex=True,
                                sharey=True,
                                subplot_kw={'adjustable':'box-forced'})

ax1.set_title('Original picture')
ax1.imshow(image_rgb)

ax2.set_title('Edge (white) and result (red)')
ax2.imshow(edges)


---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-42-d80450b2c2f6> in <module>()
     18 # The threshold eliminates low accumulators
     19 result = hough_ellipse(edges, accuracy=20, threshold=250,
---> 20                        min_size=100, max_size=120)
     21 result.sort(order='accumulator')
     22 

C:\Anaconda3\envs\wradlib0_11\lib\site-packages\skimage\transform\hough_transform.py in hough_ellipse(img, threshold, accuracy, min_size, max_size)
    161     """
    162     return _hough_ellipse(img, threshold=threshold, accuracy=accuracy,
--> 163                           min_size=min_size, max_size=max_size)
    164 
    165 

skimage/transform/_hough_transform.pyx in skimage.transform._hough_transform._hough_ellipse (skimage\transform\_hough_transform.c:4430)()

C:\Anaconda3\envs\wradlib0_11\lib\site-packages\numpy\core\fromnumeric.py in amax(a, axis, out, keepdims)
   2305 
   2306     """
-> 2307     kwargs = {}
   2308     if keepdims is not np._NoValue:
   2309         kwargs['keepdims'] = keepdims

KeyboardInterrupt: 

In [43]:
from skimage.feature import daisy
from skimage import data
import matplotlib.pyplot as plt


img = frames[0]
descs, descs_img = daisy(img, step=180, radius=58, rings=2, histograms=6,
                         orientations=8, visualize=True)

fig, ax = plt.subplots()
ax.axis('off')
ax.imshow(descs_img)
descs_num = descs.shape[0] * descs.shape[1]
ax.set_title('%i DAISY descriptors extracted:' % descs_num)


Out[43]:
Text(0.5,1,'25 DAISY descriptors extracted:')

Use OpenCV's Optical Flow to detect and track features

This example uses the Lucas-Kanade Optical Flow implementation in OpenCV (see here). We take the first frame, detect some Shi-Tomasi corner points in it, then we iteratively track those points over the subsequent images.

The parameter dictionaries are certainly something to experiment with.


In [ ]:
# FEATURE DETECTION: Parameters for ShiTomasi corner detection
feature_params = dict( maxCorners = 200,
                       qualityLevel = 0.2,
                       minDistance = 7,
                       blockSize = 21 )

# FEATURE TRACKING: Parameters for Lucas Kanade (lk) Optical Flow technique
lk_params = dict( winSize  = (20,20),
                  maxLevel = 2,
                  criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0))

# Over which time steps (of the data we've read in) do you want to track
trackstart = 0
trackend = 36

In [ ]:
# Our approach requires 8 bit integers - so we need to normalize our radar data accordingly
#   (there might be a more elegant solution...)
minval = 0
maxval = 59 # dBZ in this case
iframes = frames.copy()
iframes[iframes<minval] = minval
iframes[iframes>maxval] = maxval
iframes = ((iframes / maxval)*255).astype(np.uint8)

In [ ]:
# Find good features to track...
old = cv2.goodFeaturesToTrack(iframes[trackstart], mask = None, **feature_params)
print("Found %d good features to track." % len(old) )

# Set containers to collect results (time steps in rows, detected corners in columns)
#   Tracking status
sts = np.zeros((trackend,len(old)), dtype=np.bool)
#   corner x coords
x = np.zeros((trackend,len(old))) * np.nan
#   corner y coords
y = np.zeros((trackend,len(old))) * np.nan
#   tracking error
errs = np.zeros((trackend,len(old))) * np.nan
#   Assign persistent corner IDs
ids = np.arange(len(old))

In [ ]:
# Track good features
for i in range(trackstart, trackend):
    # track current corners in next image
    new, st, err = cv2.calcOpticalFlowPyrLK(prevImg=iframes[i], nextImg=iframes[i+1], prevPts=old, nextPts=None, **lk_params)
    success = st.ravel()==1
    ids = ids[success]
    sts[i, ids] = True
    x[i, ids] = old[success,0,0]
    y[i, ids] = old[success,0,1]
    errs[i, ids] = err.ravel()[success]
    # new corners will be old in the next loop
    old = new[success]

# Incremental euclidic distance from starting point
trackdist = np.diff( np.sqrt( (x-x[0].reshape((1,-1)))**2 + (y-y[0].reshape((1,-1)))**2 ), axis=0 )
trackdist = np.vstack( (np.zeros((1,trackdist.shape[1])), trackdist))

# Plot feature persistence
fig = plt.figure(figsize=(12,6))
ax = fig.add_subplot(211)
cb = plt.imshow(errs, interpolation="none", cmap="summer", vmax = 15)
plt.xlabel("Feature ID")
plt.ylabel("Tracking time step")
plt.colorbar(cb, shrink=0.5)
plt.title("Tracking error")

# Plot consistence of movement
ax = fig.add_subplot(212)
cb = plt.imshow(trackdist, interpolation="none", cmap="bwr", vmin=-5, vmax=5)
plt.xlabel("Feature ID")
plt.ylabel("Tracking time step")
plt.colorbar(cb, shrink=0.75)
plt.title("Incremental euclidian distance from starting point")

plt.tight_layout()

In [ ]:
# Find good tracks (but what is a "good" track...?)
#   Certainly a lot of subjective criteria to play with...
goodtrack = np.zeros(x.shape[1], dtype=np.bool)
for i in range(len(goodtrack)):
    # persistence of the track
    if len(np.where(sts[:,i])[0]) < 2:
        continue
    # consistency of movement
    if len(np.where(trackdist[:,i]<0)[0]) > 0:
        continue
    # tracking error
    if len(np.where(errs[:,i]>15)[0]) > 5:
        continue
    goodtrack[i] = True
print("Found %d good tracks and %d bad tracks." % \
      (len(np.where(goodtrack)[0]), len(goodtrack)-len(np.where(goodtrack)[0])) )

In [ ]:
# Visualize tracks: green=good track, red=bad track
goodcolor = "limegreen"
badcolor = "red"
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111, aspect="equal")
# average reflectivity over entire tracking period as background image
ax.imshow(np.mean(frames[trackstart:trackend], axis=0), origin="lower", cmap="gray", interpolation="none")
plt.xlabel("Easting (# pixels)")
plt.ylabel("Northing (# pixels)")
plt.title("[Zoom in to inspect track properties (not in inline mode!)]")
plt.grid(color="white")
plt.xlim(0,nx/downsizeby)
plt.ylim(0,nx/downsizeby)
bad_line = plt.Line2D([], [], color=badcolor, label='Bad track')
good_line = plt.Line2D([], [], color=goodcolor, label='Good track')
plt.legend(handles=[bad_line, good_line], loc="upper left")
for i, isgood in enumerate(goodtrack):
    ix = sts[:,i]
    color = badcolor
    if isgood:
        color = goodcolor
    ax.plot(x[ix,i], y[ix,i],marker="None", color=color, markersize=14, linestyle="-")
    ax.arrow(x[ix,i][-2], y[ix,i][-2],
             np.diff(x[ix,i][-2:])[0], np.diff(y[ix,i][-2:])[0], 
             head_width=2, head_length=2, fc=color, ec=color)

In [ ]:
# Animate features

# Prepare canvas
fig = plt.figure(figsize=(8,8))
ax1 = plt.subplot(111,aspect="equal")
im1 = ax1.imshow(iframes[trackstart], origin="lower", cmap="gray", interpolation="none")
plt.xlabel("Easting (# pixels)")
plt.ylabel("Northing (# pixels)")
plt.title("[Zoom in to inspect track properties (not in inline mode!)]")
plt.grid(color="white")
plt.xlim(0,nx/downsizeby)
plt.ylim(0,nx/downsizeby)
ax1.plot(x[0,goodtrack], y[0,goodtrack], linestyle="None", marker="o", mfc="None", mec="limegreen")
ax1.plot(x[0,~goodtrack], y[0,~goodtrack], linestyle="None", marker="o", mfc="None", mec="red")
ax1.grid(color="white")
tstamp1 = ax1.text(25, 850, dtimes[trackstart].isoformat(), color="white", fontsize=14)

def animate(j):
    im1.set_array(iframes[trackstart+j])
    for line in plt.gca().get_lines():
        if not line.get_linestyle()=="None":
            line.remove()
    for i, isgood in enumerate(goodtrack):
        ix = np.where(sts[:j,i])[0]
        color = "red"
        if isgood:
            color = "limegreen"
        ax1.plot(x[ix,i], y[ix,i], marker="None", color=color, markersize=14, linestyle="-")
    tstamp1.set_text(dtimes[trackstart+j].isoformat())
    return im1

# ATTENTION: THIS IS SLOW - Rendering each frame of the animation might take more time than the interval between the frames
#    This can cause the temporal sequence to be confused in the matplotlib interactive mode.
#    The animation thus looks better if saved as movie, or you have to increase the interval argument
#    Animation not shown in notebook if you use %pylab inline
ani = animation.FuncAnimation(fig, animate, frames=np.arange(trackstart, trackend-1), interval=400, blit=False)
#ani.save("features.avi", dpi=500, bitrate=2000)

Update tracked corners for each time step of the considered tracking period

Until now, we only tracked those corners which we detected in the initial time step. We now want to add new tracks with each addtional time step, and follow these as well.


In [ ]:
init_crns = [cv2.goodFeaturesToTrack(iframes[i], mask = None, **feature_params) for i in range(trackstart, trackend)]
print("List of # corners in each time step:\n", [len(crn) for crn in init_crns ])

In [ ]:
# this function wraps up everything which we already did above for a single set of corners
def tracker(old, frameset, lk_params):
    # Set containers to collect results (time steps in rows, corners in columns)
    #   Tracking status
    sts = np.zeros((trackend,len(old)), dtype=np.bool)
    #   corner x coords
    x = np.zeros((trackend,len(old))) * np.nan
    #   corner y coords
    y = np.zeros((trackend,len(old))) * np.nan
    #   tracking error
    errs = np.zeros((trackend,len(old))) * np.nan
    #   Assign persistent corner IDs
    ids = np.arange(len(old))
    # Track good features
    for i in range(len(frameset)-1):
        # track current corners in next image
        new, st, err = cv2.calcOpticalFlowPyrLK(prevImg=frameset[i], nextImg=frameset[i+1],
                                                prevPts=old, nextPts=None, **lk_params)
        success = st.ravel()==1
        ids = ids[success]
        sts[i, ids] = True
        x[i, ids] = new[success,0,0]
        y[i, ids] = new[success,0,1]
        errs[i, ids] = err.ravel()[success]
        # new corners will be old in the next loop
        old = new[success]

    # Incremental euclidic distance from starting point
    trackdist = np.diff( np.sqrt( (x-x[0].reshape((1,-1)))**2 + (y-y[0].reshape((1,-1)))**2 ), axis=0 )
    trackdist = np.vstack( (np.zeros((1,trackdist.shape[1])), trackdist))

    # Find good tracks (but what is a "good" track...?)
    goodtrack = np.zeros(x.shape[1], dtype=np.bool)
    for i in range(len(goodtrack)):
        # persistence of the track
        if len(np.where(sts[:,i])[0]) < 2:
            continue
        # consistency of movement
        if len(np.where(trackdist[:,i]<0)[0]) > 0:
            continue
        # tracking error
        if len(np.where(errs[:,i]>15)[0]) > 5:
            continue
        goodtrack[i] = True
    
    return sts, x, y, errs, goodtrack

In [ ]:
sts_ls, x_ls, y_ls, errs_ls, goodtrack_ls = [], [], [], [], []
for i, crns in enumerate(init_crns):
    sts, x, y, errs, goodtrack = tracker(crns, iframes[i:], lk_params)
    sts_ls.append(sts)
    x_ls.append(x)
    y_ls.append(y)
    errs_ls.append(errs)
    goodtrack_ls.append(goodtrack)

In [ ]:
# Visualize tracks:
fig = plt.figure(figsize=(12,12))
ax = fig.add_subplot(111, aspect="equal")
# average reflectivity as background image
ax.imshow(np.mean(frames[trackstart:trackend], axis=0), origin="lower", cmap="gray", interpolation="none")
plt.xlabel("Easting (# pixels)")
plt.ylabel("Northing (# pixels)")
plt.title("[Zoom in to inspect track properties (not in inline mode!)]")
plt.grid(color="white")
plt.xlim(0,nx/downsizeby)
plt.ylim(0,nx/downsizeby)
colors = [ plt.cm.spring(i) for i in np.linspace(0,254, len(goodtrack_ls)).astype("i4") ]
for j, goodtrack in enumerate(goodtrack_ls[:-2]):
    sts, x, y = sts_ls[j], x_ls[j], y_ls[j]
    for i, isgood in enumerate(goodtrack):
        ix = sts[:,i]
        # HERE WE DO NOT PLOT THE BAD TRACKS
        color = "none"
        if isgood:
            color = colors[j]
        ax.plot(x[ix,i], y[ix,i],marker="None", color=color, linestyle="-", alpha=0.4)
        ax.arrow(x[ix,i][-2], y[ix,i][-2],
                 np.diff(x[ix,i][-2:])[0], np.diff(y[ix,i][-2:])[0], 
                 head_width=2, head_length=2, fc=color, ec=color, alpha=0.4)

In [ ]:
# ATTENTION: THIS ANIMATION TAKES A LONG WHILE (SEVERAL MINUTES) AND MIGHT STILL BE BUGGY

# Prepare canvas
fig = plt.figure(figsize=(8,8))
ax1 = plt.subplot(111,aspect="equal")
im1 = ax1.imshow(iframes[trackstart], origin="lower", cmap="gray", interpolation="none")
plt.xlabel("Easting (# pixels)")
plt.ylabel("Northing (# pixels)")
plt.title("[Zoom in to inspect track properties (not in inline mode!)]")
plt.grid(color="white")
plt.xlim(0,nx/downsizeby)
plt.ylim(0,nx/downsizeby)
#ax1.plot(x[0,goodtrack], y[0,goodtrack], linestyle="None", marker="o", mfc="None", mec=colors[0])
ax1.grid(color="white")
tstamp1 = ax1.text(25, 850, dtimes[trackstart].isoformat(), color="white", fontsize=14)

def animate(j):
    im1.set_array(iframes[trackstart+j])
    for line in plt.gca().get_lines():
        line.remove()
        #if not line.get_linestyle()=="None":
        #    line.remove()
    for k, goodtrack in enumerate(goodtrack_ls[:j]):
        sts, x, y = sts_ls[k], x_ls[k], y_ls[k]
        for i, isgood in enumerate(goodtrack):
            ix = np.where(sts[:j,i])[0]
            # HERE WE DO NOT PLOT THE BAD TRACKS
            color = "none"
            if isgood:
                color = colors[k]
            #ax1.plot(x[0,goodtrack], y[0,goodtrack], linestyle="None", marker="o", mfc="None", mec=color, alpha=0.4)
            ax1.plot(x[ix,i], y[ix,i],marker="None", color=color, linestyle="-", alpha=0.4)
# ATTENTION: THIS IS SLOW - Rendering each frame of the animation might take more time than the interval between the frames
#    This can cause the temporal sequence to be confused.
#    The animation thus looks better if saved as movie, or you have to increase the interval argument
ani = animation.FuncAnimation(fig, animate, frames=np.arange(trackstart, trackend-1), interval=200, blit=False)
#ani.save("features2.avi", dpi=500, bitrate=2000)

Experiment with SIFT/SURF feature detection and description

See SIFT and SURF for feature detection. Right now, this does not seem to add value as compared to the Optical Flow approach above. Features seem to be much less persistent.


In [ ]:
# SURF
surf = cv2.xfeatures2d.SURF_create(3000)

kplist = []
deslist= []

for i in range(trackstart, trackend):
    kp, des = surf.detectAndCompute(iframes[i],None)
    kplist.append(kp)
    deslist.append(des)
    print("Found %d keypoints in step %d." % (len(kp), i))

In [ ]:
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111, aspect="equal")
# average reflectivity as background image
ax.imshow(frames[0], origin="lower", cmap="gray", interpolation="none")
plt.xlabel("Easting (# pixels)")
plt.ylabel("Northing (# pixels)")
plt.title("[Zoom in to inspect feature properties (not in inline mode)]")
tstamp1 = ax1.text(25, 850, dtimes[0].isoformat(), color="white", fontsize=14)
plt.grid(color="white")
plt.xlim(0,nx/downsizeby)
plt.ylim(0,nx/downsizeby)
patches = []
for kp_ in kplist[0]:
    if kp_.size > 5:
        circle = mpatches.Circle(kp_.pt, kp_.size, fill=False, edgecolor="red")
    #ax.add_patch(circle)
    patches.append(circle)
collection = PatchCollection(patches, facecolor="none", edgecolor="red")
ax.add_collection(collection)

In [ ]:
# Make list of patch collections for all timesteps
def collect(kp):
    patches = []
    for kp_ in kp:
        if (kp_.size > 10) and (kp_.size < 50):
            circle = mpatches.Circle(kp_.pt, kp_.size, fill=False, edgecolor="red")
            patches.append(circle)
    return(PatchCollection(patches, facecolor="none", edgecolor="red"))

In [ ]:
# Animate features
_plot_style = dict(markersize=12, markeredgewidth=2,
                       markerfacecolor='none', markeredgecolor='r',
                       marker='o', linestyle='none')
_pcm_style = dict(cmap=plt.cm.spectral, vmin=0., vmax=30.)

# Prepare canvas
fig = plt.figure(figsize=(10,10))
ax1 = plt.subplot(111,aspect="equal")
im1 = ax1.imshow(iframes[0], origin="lower", cmap="gray", interpolation="none")
ax1.add_collection(collect(kplist[0]))
ax1.grid(color="white")
tstamp1 = ax1.text(25, 850, dtimes[0].isoformat(), color="white", fontsize=14)


def animate(i):
    im1.set_array(iframes[trackstart+i])
    ax1.collections = []
    ax1.add_collection(collect(kplist[trackstart+i]))
    tstamp1.set_text(dtimes[trackstart+i].isoformat())
    return im1

ani = animation.FuncAnimation(fig, animate, frames=np.arange(trackstart, trackend-1), interval=200, blit=False)
#ani.save("features_surf.avi", dpi=400, bitrate=2000)

Match features (brute force)

According Bowler et al. (2004), maximum advection velocity of rainfall objects is about 130 km/h which is roughly 10 km (pixels) in 5 minutes.


In [ ]:
maxveloc = 10.
# Detect initial feature set 
detector = cv2.xfeatures2d.SURF_create(3000)
kp1, des1 = detector.detectAndCompute(iframes[trackstart],None)

# create BFMatcher object
bf = cv2.BFMatcher()

kp1_ls = []
kp2_ls = []

for i in range(trackstart+1, trackend):
    kp2, des2 = detector.detectAndCompute(iframes[i],None)
    matches = bf.knnMatch(des1, des2, k=1)
    # Select matches to keep
    kp1_, des1_, kp2_, des2_  = [], [], [], []
    for match in matches:
        match=match[0]
        xy = np.vstack( (kp1[match.queryIdx].pt, kp2[match.trainIdx].pt) )
        eucdist = np.sqrt( (xy[0,0] - xy[1,0])**2 + (xy[0,1] - xy[1,1])**2 )
        if eucdist < maxveloc:
            kp1_.append( kp1[match.queryIdx] )
            des1_.append( np.array( des1[match.queryIdx] ) )
            kp2_.append( kp2[match.trainIdx] )
            des2_.append( np.array( des2[match.trainIdx] ) )
    kp1_ls.append(kp1_)
    kp2_ls.append(kp2_)
    # Update initial feature set
    kp1, des1 = kp2_, np.array( des2_ )

In [ ]: