In [16]:
#from PiFM_read.py in Data>JessicaKong>Code on server. need to see if it is the same as those in util in hyperAFM
import os
import numpy as np
from skimage import feature
from skimage import transform
import csv
class PiFMImage():
"""
A class representing a PiFM image. Give the path to the PiFM data
and receive a class that stores this information as a hyper image,
and series of channel images.
Input:
path: Path to ANFATEC parameter file. This is the text file that
is generated with each scan.
Output:
"""
def __init__(self, path):
self.channel_names = []
full_path = os.path.realpath(path)
directory = os.path.dirname(full_path)
# Get the scan parameters and channel details.
self.parms, channels = read_anfatec_params(full_path)
x_pixel = int(self.parms['xPixel'])
y_pixel = int(self.parms['yPixel'])
#Make one big array for all the data channels.
channel_data = np.zeros((x_pixel, y_pixel, len(channels)))
for i, channel in enumerate(channels):
self.channel_names.append(channel['Caption'])
data = np.fromfile(os.path.join(directory,channel['FileName']),dtype='i4')
#scaling = float(channel['Scale'])
channel_data[:,:,i] = np.reshape(data, (256,256))
#for i,line in enumerate(np.split(data,y_pixel)):
# for j, pixel in enumerate(np.split(line,x_pixel)):
# channel_data[j,i,:] = (scaling*pixel)
# Here's how we access the different hyper and channel data.
self.channel_data = channel_data
def read_anfatec_params(path):
"""
Reads in an ANFATEC parameter file. This file is produced by the Molecular
Vista PiFM system and describes all parameters need to interpret the data
files produced when the data is saved.
Input:
path: a path to the ANFATEC parameter file.
Output:
file_descriptions: A list of dictionaries, with each item in the list
corresponding to a channel that was recorded by the PiFM.
scan_params: A dictionary of non-channel specific scan parameters.
"""
file_descriptions = []
scan_params = {}
parameters = {}
inside_description = False
with open(path, 'r') as csvfile:
reader = csv.reader(csvfile,delimiter='\t')
for i,row in enumerate(reader):
if row:
# First line of the file is useless. We tell the reader to stop at ';'
if row[0] ==';ANFATEC Parameterfile':
continue
if row[0][0] == ';':
break
# This string indicates that we have reached a channel description.
if row[0].endswith('Begin'):
inside_description = True
continue
# Here we handle the unicode characters and form our key value pairs
new_row = row[0]#.replace(' ','')
if 'FileName' not in row[0]:
new_row = row[0].replace(' ','')
split_row = new_row.split(':')
split_row[-1] = split_row[-1].decode('unicode-escape')
else:
split_row = new_row.split(':')
split_row[0] = 'FileName'
split_row[-1] = split_row[-1].decode('unicode-escape')
split_row[-1] = split_row[-1][1:]
# We want to save the channel parameters to a separate structure.
if inside_description:
parameters[split_row[0]] = split_row[-1]
else:
scan_params[split_row[0]] = split_row[-1]
if row[0].endswith('End'):
del parameters[row[0]]
file_descriptions.append(parameters)
parameters = {}
inside_description = False
csvfile.close()
return scan_params, file_descriptions
In [23]:
from dipy.align.imwarp import SymmetricDiffeomorphicRegistration
from dipy.align.metrics import SSDMetric, CCMetric, EMMetric
import dipy.align.imwarp as imwarp
from dipy.viz import regtools
from scipy import signal
import matplotlib.pyplot as plt
%matplotlib inline
In [21]:
f03 = PiFMImage('./../Data/PolymerBlends/Film15_0003.txt')
f04 = PiFMImage('./../Data/PolymerBlends/Film15_0004.txt')
f09 = PiFMImage('./../Data/PolymerBlends/Film15_0009.txt')
f03topo = f03.channel_data[:,:,0]
f04topo = f04.channel_data[:,:,0]
f09topo = f09.channel_data[:,:,0]
f03topoflat1 = signal.detrend(f03topo, axis=1, type="linear")
f04topoflat1 = signal.detrend(f04topo, axis=1, type="linear")
f09topoflat1 = signal.detrend(f09topo, axis=1, type="linear")
In [24]:
plt.imshow(f03topoflat1)
Out[24]:
In [28]:
moving = f09topoflat1
static = f03topoflat1
dim = static.ndim
metric = SSDMetric(dim)
level_iters = [200, 100, 50, 25]
sdr = SymmetricDiffeomorphicRegistration(metric, level_iters, inv_iter = 50)
mapping = sdr.optimize(static, moving)
warped_moving = mapping.transform(moving, 'linear')
regtools.overlay_images(static, warped_moving, 'Static','Overlay','Warped moving',
'direct_warp_result.png')
Out[28]:
In [46]:
plt.imshow(f09topoflat1)
Out[46]:
In [45]:
plt.imshow(warped_moving)
Out[45]:
In [59]:
pifm09 = f09.channel_data[:,:,15]
pifm03 = f03.channel_data[:,:,15]
pifm09flat1 = signal.detrend(pifm09, axis=1, type="linear")
pifm03flat1 = signal.detrend(pifm03, axis=1, type="linear")
plt.imshow(pifm09flat1)
Out[59]:
In [61]:
plt.imshow(pifm03flat1)
Out[61]:
In [65]:
diff = pifm09shifted - pifm03flat1
plt.imshow(diff)
Out[65]:
In [55]:
pifm09shifted = mapping.transform(pifm09, 'linear')
In [56]:
plt.imshow(pifm09shifted)
Out[56]:
In [53]:
errorimage = plt.matshow(warped_moving-static, cmap='viridis')
regtools.overlay_images(errorimage,'errorimage')
In [66]:
from skimage.feature import register_translation
from skimage.feature.register_translation import _upsampled_dft
from scipy.ndimage import fourier_shift
In [67]:
offset_image = f09topoflat1
image = f03topoflat1
In [70]:
shift, error, diffphase = register_translation(image, offset_image)
fig = plt.figure(figsize=(8, 3))
ax1 = plt.subplot(1, 3, 1, adjustable='box-forced')
ax2 = plt.subplot(1, 3, 2, sharex=ax1, sharey=ax1, adjustable='box-forced')
ax3 = plt.subplot(1, 3, 3)
ax1.imshow(image)
ax1.set_axis_off()
ax1.set_title('Reference image')
ax2.imshow(offset_image.real)
ax2.set_axis_off()
ax2.set_title('Offset image')
# Show the output of a cross-correlation to show what the algorithm is
# doing behind the scenes
image_product = np.fft.fft2(image) * np.fft.fft2(offset_image).conj()
cc_image = np.fft.fftshift(np.fft.ifft2(image_product))
ax3.imshow(cc_image.real)
ax3.set_axis_off()
ax3.set_title("Cross-correlation")
plt.show()
print("Detected pixel offset (y, x): {}".format(shift))
In [100]:
offset_image = f09topoflat1
image = f03topoflat1
shift, error, diffphase = register_translation(image, offset_image)
fig = plt.figure(figsize=(8, 3))
ax1 = plt.subplot(1, 3, 1, adjustable='box-forced')
ax2 = plt.subplot(1, 3, 2, sharex=ax1, sharey=ax1, adjustable='box-forced')
ax3 = plt.subplot(1, 3, 3)
ax1.imshow(image)
ax1.set_axis_off()
ax1.set_title('Reference image')
ax2.imshow(offset_image.real)
ax2.set_axis_off()
ax2.set_title('Offset image')
# Show the output of a cross-correlation to show what the algorithm is
# doing behind the scenes
image_product = np.fft.fft2(image) * np.fft.fft2(offset_image).conj()
cc_image = _upsampled_dft(image_product, 150, 100, (shift*100)+75).conj()
ax3.imshow(cc_image.real)
ax3.set_axis_off()
ax3.set_title("Supersampled XC sub-area")
plt.show()
print("Detected pixel offset (y, x): {}".format(shift))
In [82]:
offset_imagecrop = offset_image[:-16,1:]
offset_imagepadded = np.zeros((256,256))
offset_imagepadded[:offset_imagecrop.shape[0], :offset_imagecrop.shape[1]] = offset_imagecrop
In [84]:
plt.imshow(offset_imagepadded)
Out[84]:
In [101]:
image = offset_imagepadded
offset_image = f03topoflat1
In [89]:
shift, error, diffphase = register_translation(image, offset_image)
print("Detected pixel offset (y, x): {}".format(shift))
In [90]:
offset_imagecrop1 = offset_image[16:, :]
offset_imagepadded1 = np.zeros((256,256))
offset_imagepadded1[:offset_imagecrop1.shape[0], :offset_imagecrop1.shape[1]] = offset_imagecrop1
In [91]:
plt.imshow(offset_imagepadded1)
Out[91]:
In [97]:
offset_image = offset_imagepadded
image = offset_imagepadded1
shift, error, diffphase = register_translation(image, offset_image)
fig = plt.figure(figsize=(8, 3))
ax1 = plt.subplot(1, 3, 1, adjustable='box-forced')
ax2 = plt.subplot(1, 3, 2, sharex=ax1, sharey=ax1, adjustable='box-forced')
ax3 = plt.subplot(1, 3, 3)
ax1.imshow(image)
ax1.set_axis_off()
ax1.set_title('Reference image')
ax2.imshow(offset_image.real)
ax2.set_axis_off()
ax2.set_title('Offset image')
# Show the output of a cross-correlation to show what the algorithm is
# doing behind the scenes
image_product = np.fft.fft2(image) * np.fft.fft2(offset_image).conj()
image_product = np.fft.fft2(image) * np.fft.fft2(offset_image).conj()
cc_image = np.fft.fftshift(np.fft.ifft2(image_product))
ax3.imshow(cc_image.real)
ax3.set_axis_off()
ax3.set_title("Cross-correlation")
plt.show()
print("Detected pixel offset (y, x): {}".format(shift))
In [96]:
offset_image = offset_imagepadded
image = offset_imagepadded1
shift, error, diffphase = register_translation(image, offset_image, 100)
fig = plt.figure(figsize=(8, 3))
ax1 = plt.subplot(1, 3, 1, adjustable='box-forced')
ax2 = plt.subplot(1, 3, 2, sharex=ax1, sharey=ax1, adjustable='box-forced')
ax3 = plt.subplot(1, 3, 3)
ax1.imshow(image)
ax1.set_axis_off()
ax1.set_title('Reference image')
ax2.imshow(offset_image.real)
ax2.set_axis_off()
ax2.set_title('Offset image')
# Show the output of a cross-correlation to show what the algorithm is
# doing behind the scenes
image_product = np.fft.fft2(image) * np.fft.fft2(offset_image).conj()
cc_image = _upsampled_dft(image_product, 150, 100, (shift*100)+75).conj()
ax3.imshow(cc_image.real)
ax3.set_axis_off()
ax3.set_title("Supersampled XC sub-area")
plt.show()
print("Detected pixel offset (y, x): {}".format(shift))
In [ ]: