In [2]:
%load_ext autoreload
%autoreload 2
# System modules
import os
import warnings
import pickle
import glob
import sys
import logging
import datetime as dt
import time
# External modules
import numpy as np
import scipy.misc
import scipy.ndimage.filters
import matplotlib.pyplot as plt
from matplotlib import gridspec
from matplotlib.backends.backend_pdf import PdfPages
import pandas as pd
import dateutil.parser
import typhon.plots
import scipy.ndimage
from skimage.segmentation import slic
from skimage.filters import threshold_otsu
from skimage import exposure
# Internal modules
import pyclamster
from pyclamster.clustering.preprocess import LCN
from pyclamster.functions import rbDetection
from pyclamster.positioning import Projection
plt.style.use('ggplot')
warnings.catch_warnings()
warnings.filterwarnings('ignore')
#logging.basicConfig(level=logging.INFO)
In [3]:
FILE_DIR = %pwd
BASE_DIR = os.path.dirname(FILE_DIR)
#image_directory = os.path.join(BASE_DIR, "examples", "images", "lex")
image_dir = '/home/tfinn/Data/specific/'
trained_models = os.path.join(BASE_DIR, "data")
plot_dir = os.path.join(BASE_DIR, 'plots')
debug = True
In [7]:
predictor = pickle.load(open(os.path.join(trained_models, "kmeans.pk"), "rb"))
cams = []
cams.append(pickle.load(open(os.path.join(trained_models, 'sessions', 'FE3_session_new_600.pk'), mode='rb')))
cams.append(pickle.load(open(os.path.join(trained_models, 'sessions', 'FE4_session_new_600.pk'), mode='rb')))
cams[0].set_images(os.path.join(image_dir, 'cam3'))
cams[1].set_images(os.path.join(image_dir, 'cam4'))
matching = pyclamster.matching.Matching(greyscale=False)
dist = np.sqrt((cams[0].position.x-cams[1].position.x)**2+(cams[0].position.y-cams[1].position.y)**2)
height = {}
In [8]:
def generate_doppel(images):
start_time = time.time()
k = 0
clouds = []
height[images[0].time] = []
for img in images:
#image_lcn = pyclamster.Image(img)
#image_lcn.data = LCN((50,50,3)).fit_transform(image_lcn.data)
#image_lcn.data = image_lcn.data-scipy.ndimage.filters.uniform_filter(image_lcn.data, size=(50,50,3), mode='constant')
#image_lcn.save(os.path.join(plot_dir, "lcn_{0:s}_{1:d}.png".format(img.time.strftime('%Y%m%d%H%M'), k)))
#image_lcn = image_lcn.cut([50, 50, 350, 350])
#w, h, _ = original_shape = image_lcn.data.shape
#raw_image_lcn = rbDetection(image_lcn.data).reshape((w * h, -1))
#label = predictor.predict(raw_image_lcn)
#label.reshape((w, h), replace=True)
#masks = label.getMaskStore()
#cloud_mask_num = [1] # cloud - sky choose right number (0 or 1)
#masks.denoise(cloud_mask_num, 1000)
#cloud_labels_object, numLabels = masks.labelMask(cloud_mask_num)
#cloud_labels_object, numLabels = masks.wsMask(cloud_mask_num, np.ones((14,14)))
#cloud_labels_object, numLabels = masks.slicMask(img.cut([50,50,350,350]), cloud_mask_num, 30)
#cloud_labels_object, numLabels = masks.regionMask(cloud_mask_num, (30, 30))
#segmented_image = slic(img.data, slic_zero=True)+1
rb_image = img.data[:,:,0]-img.data[:,:,2]
global_thres = threshold_otsu(rb_image[rb_image>10])
threshold_array = np.zeros_like(rb_image)
threshold_array[:] = global_thres
label = pyclamster.Labels((np.logical_or(rb_image>threshold_array, rb_image<10)).astype(int))
masks = label.getMaskStore()
masks.denoise([1], 960)
cloud_labels_object, _ = masks.labelMask([1,])
cloud_store = cloud_labels_object.getMaskStore()
cloud_labels = np.unique(cloud_labels_object.labels)[1:]
clouds.append([cloud_store.getCloud(img, [k, ]) for k in cloud_labels])
if debug:
j = 0
fig = plt.figure()
gs = gridspec.GridSpec(2,3)
ax = plt.subplot(gs[0, 0])
ax.imshow(img.data)
ax = plt.subplot(gs[0, 1])
ax.imshow(rb_image)
ax = plt.subplot(gs[1, :2])
ax.hist(rb_image.reshape((-1)), bins=256)
ax.axvline(x=global_thres)
ax = plt.subplot(gs[1, 2])
ax.imshow(threshold_array)
ax = plt.subplot(gs[0, 2])
ax.imshow(cloud_labels_object.labels)
plt.axis('off')
plt.tight_layout()
plt.savefig(os.path.join(plot_dir, "detection_{0:s}_{1:d}.pdf".format(img.time.strftime('%Y%m%d%H%M'), k)))
print_label = label.labels
print_label = np.ma.masked_where(print_label == 0, print_label)
print_clouds = np.ma.masked_where(print_label == 0, cloud_labels_object.labels)
img.save(os.path.join(plot_dir, "rectified_{0:s}_{1:d}.png".format(img.time.strftime('%Y%m%d%H%M'), k)))
scipy.misc.imsave(
os.path.join(plot_dir, "lables_kmean_{0:s}_{1:d}.png".format(img.time.strftime('%Y%m%d%H%M'), k)),
print_label)
scipy.misc.imsave(
os.path.join(plot_dir, "lables_used_{0:s}_{1:d}.png".format(img.time.strftime('%Y%m%d%H%M'), k)),
print_clouds)
print('finished image {0:s} of camera {1:d}'.format(img.time.strftime('%Y%m%d%H%M'), k))
k += 1
t=0
if not(not clouds[0] or not clouds[1]):
matching_result, _ = matching.matching(clouds[0], clouds[1], min_match_prob=0.9)
for result in matching_result:
spatial_cloud = result[1]
successful = spatial_cloud.oper_mode()
if debug and successful:
fig = plt.figure()
ax = plt.subplot(1,3,1)
ax.axis('off')
ax.imshow(result[1].clouds[0].image.data)
ax = plt.subplot(1,3,2)
ax.axis('off')
ax.imshow(result[0].prob_map, interpolation='nearest')
ax = plt.subplot(1,3,3)
ax.axis('off')
ax.imshow(result[1].clouds[1].image.data)
plt.tight_layout()
plt.savefig(os.path.join(plot_dir, 'matching_{0:s}_{1:d}.png'.format(img.time.strftime('%Y%m%d%H%M'), t)))
fig = plt.figure()
gs = gridspec.GridSpec(1,3)
ax = plt.subplot(gs[0])
ax.axis('off')
ax.imshow(spatial_cloud.clouds[0].image.data)
ax = plt.subplot(gs[1])
ax.axis('off')
z = spatial_cloud.positions.z
X, Y = np.meshgrid(range(z.shape[1]), range(z.shape[0]))
CS = ax.contour(X, Y, z)
manual_locations = [(-1, -1.4), (-0.62, -0.7), (-2, 0.5), (1.7, 1.2), (2.0, 1.4), (2.4, 1.7)]
plt.clabel(CS, inline=1, fontsize=10)
plt.text(0,0,'{:5.1} m height'.format(spatial_cloud.height), color='black')
ax = plt.subplot(gs[2])
ax.axis('off')
ax.imshow(spatial_cloud.clouds[1].image.data)
plt.tight_layout()
plt.savefig(os.path.join(plot_dir, 'height{0:s}_{1:d}.png'.format(img.time.strftime('%Y%m%d%H%M'), t)))
height[images[0].time].append(spatial_cloud.height)
t+=1
try:
print('finished image {0:s} calculations with {1:d} heights (min: {2:.1f}, max: {3:.1f}), duration: {4:.1f} s'.format(images[0].time.strftime('%Y%m%d%H%M'), t, np.nanmin(height[images[0].time]), np.nanmax(height[images[0].time]), time.time()-start_time))
except:
print('finished image {0:s} calculations with {1:d} heights, duration: {2:.1f} s'.format(images[0].time.strftime('%Y%m%d%H%M'), t, time.time()-start_time))
In [9]:
images_available = True
gens = [cams[0].iterate_over_rectified_images(), cams[1].iterate_over_rectified_images()]
images = [next(gens[0]), next(gens[1])]
while images_available:
for k, img in enumerate(images):
img.loadTimefromfilename('FE{0:d}_Image_%Y%m%d_%H%M%S_UTCp1.jpg'.format(k+3))
if (images[0].time-images[1].time).seconds<60:
generate_doppel(images)
try:
images = [next(gens[0]), next(gens[1])]
except:
images_available = False
elif images[0].time<images[1].time:
try:
images[0] = next(gens[0])
except:
images_available = False
elif images[0].time>images[1].time:
try:
images[1] = next(gens[1])
except:
images_available = False
print('finished image processing')
In [1]:
max_len = np.max([len(height[i]) for i in height.keys()])
df_height = pd.DataFrame(columns=range(max_len), index=height.keys())
for i in df_height.index:
for k, j in enumerate(height[i]):
if not j is None:
if 70<j<15000:
df_height.ix[i, k] = j
df_height = df_height.sort()
df_height.to_json(os.path.join(trained_models, 'heights_201609010900_4h_300_new.json'))
In [ ]: