This Notebook illustrates a straightforward workflow of using feature detection to register XRIM images.
The main pyXRIM classes used are FeatureExtraction and GeometricTransforms.
As things stand FeatureExtraction is a wrapper class over scikit-image orb detector.
Sparse feature detectors will be implemented in the future and should be more apt at dealing with sparse xrim images.
In [2]:
%matplotlib notebook
import numpy as np
import os
from matplotlib import pyplot as plt
import h5py as h5
from PyXRIM.PlotLib import imageGallery,imageTile
from PyXRIM.misc import TranslationTransform, RigidTransform
from PyXRIM.FeatureExtraction import FeatureExtractor
from PyXRIM.GeometricTransforms import geoTransformer
from PyXRIM.Corrections import Corrector
In [3]:
# open h5file
wdir = os.getcwd()
fname = '/home/nl7/work/xray_data/BFO_TSO_20160330.hdf5'
fpath = os.path.join(wdir, fname)
f = h5.File(fpath, 'r')
f.items()
# Get Correction Files
metag = f['BFO_TSO_2/Meta']
READ = metag['READ']
DARK = metag['DARK']
In [18]:
raw = f['/BFO_TSO_2/Raw']
raw.items()
S001 = raw['S002']
In [23]:
S001.shape
Out[23]:
In [3]:
#L-scan along [101]_pc near DSO 103_pc
rawg = f['BFO_TSO_2/Raw']
dset = rawg['S008']
dset.attrs['scan_command']
data = f[dset.attrs['registered']]
# L = dset.attrs['L']
# lscan_norm = f[dset.attrs['normalized']]
# lscan_flat = f[dset.attrs['flatfield']]
# Int_norm = np.mean(lscan_norm[:,800:1200,800:1200],axis=(1,-1))
# Int_flat = np.mean(lscan_flat[:,800:1200,800:1200],axis=(1,-1))
In [12]:
np.save('S008_fields.npy',dset.attrs['DAC5']*1e2/4.785)
In [7]:
np.save('S008_roi2.npy',data[:,800:1200,1000:1400])
In [5]:
plt.figure()
plt.imshow(data[100],cmap='bone')
Out[5]:
In [8]:
FE = FeatureExtractor('ORB', lib = 'skimage')
FE.detector = FE.detector(n_keypoints=int(5e3))
FE.loadData(data)
keypts, desc= FE.getFeatures(processors = min(20,data.shape[0]), mask = True,
origin = [750, 650], window_size = 400)
In [9]:
# instantiate transformation class
gt = geoTransformer()
gt.loadData(data)
gt.loadFeatures([keypts, desc])
In [10]:
# find matches
matches, filt_matches = gt.matchFeatures(processors = 20, lib ='skimage',maximum_distance = 50)
[tmatch.shape for tmatch in filt_matches]
Out[10]:
In [11]:
# find transformation based on matches
transforms, trueMatches = gt.findTransformation(TranslationTransform, matches, min(20,data.shape[0]),
min_samples = 4, residual_threshold = 2,
max_trials = int(1e5))
[np.where(tmatch == True)[0].size for tmatch in trueMatches]
[trans.translation for trans in transforms]
Out[11]:
In [12]:
# Reset transformations with not enough true matches (i.e inliers)
newTransforms = []
i = 0
for tmatch, trans in zip(trueMatches, transforms):
i += 1
if np.where(tmatch == True)[0].size < 4:
trans = TranslationTransform(translation=(0,0))
print('reseting transformation # %d!' %(i))
newTransforms.append(trans)
In [13]:
# Transform images
transImages, chainTransforms = gt.applyTransformation(transforms,
transformation ='translation')
In [19]:
f.close()
In [20]:
# Write data into h5-file
with h5.File(fpath, 'r+') as f:
rawg = f['BFO_TSO_2/Raw']
dset = rawg['S008']
procg = f['BFO_TSO_2/Process']
dsetname = 'S008_registered_ORB'
dsetnew = procg.create_dataset(dsetname, data = transImages , dtype =np.float64,
compression = 'lzf')
dset.attrs['registered']=dsetnew.ref
In [17]:
np.save('S008_normalized_registered.npy',transImages)