In [ ]:
%matplotlib inline
%load_ext autoreload
%autoreload 2
Измерения кристалла алмаза для Ширяева А.А. (большой)
Трубка - Mo,режим 50 Кв, 40 мА
Геометрия эксперимента.
Угол Брэгга - 9.9165 (по факту 7.610)
Угол детектора - 19.833
Углы поворота (для томографии) 0-185 с шагом 1 градус
5 кадров на поворот, экспозиция - 60 сек.
1 кадр на поворот, экспозиция - 60 сек. (снимались после данных)
Итого: 6 кадров на поворот, где 5 - данные и 1 темновой ток.
In [ ]:
import os
import glob
import numpy as np
import pylab as plt
import h5py
import astra
import cv2
from astropy.io import fits
from pprint import pprint
import warnings
import ipywidgets
from ipywidgets import interact, interactive, fixed, interact_manual
In [ ]:
def log_progress(sequence, every=None, size=None):
from ipywidgets import IntProgress, HTML, VBox
from IPython.display import display
is_iterator = False
if size is None:
try:
size = len(sequence)
except TypeError:
is_iterator = True
if size is not None:
if every is None:
if size <= 200:
every = 1
else:
every = size / 200 # every 0.5%
else:
assert every is not None, 'sequence is iterator, set every'
if is_iterator:
progress = IntProgress(min=0, max=1, value=1)
progress.bar_style = 'info'
else:
progress = IntProgress(min=0, max=size, value=0)
label = HTML()
box = VBox(children=[label, progress])
display(box)
index = 0
try:
for index, record in enumerate(sequence, 1):
if index == 1 or index % every == 0:
if is_iterator:
label.value = '{index} / ?'.format(index=index)
else:
progress.value = index
label.value = u'{index} / {size}'.format(
index=index,
size=size
)
yield record
except:
progress.bar_style = 'danger'
raise
else:
progress.bar_style = 'success'
progress.value = index
label.value = str(index or '?')
In [ ]:
plt.gray()
In [ ]:
import re
def natural_key(string_):
"""See http://www.codinghorror.com/blog/archives/001018.html"""
return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_)]
In [ ]:
def read_fit(fit_path):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
with fits.open(fit_path) as fit:
tmp_data = fit[0].data.astype('uint16')
return tmp_data
In [ ]:
#lodad files and sort it ina natural way
data_dir = os.path.expanduser('/diskmnt/a/makov/topo_tomo/big_diamond/2018_04_20/data/')
all_files = glob.glob(os.path.join(data_dir,'*.FITS'))
all_files = sorted(all_files, key=natural_key)
print(len(all_files))
all_files[0:10]
# print len(data_files)
# empty_files = glob.glob(os.path.join(data_dir,'FREE*.fit'))
# empty_files = sorted(empty_files, key=natural_key)
# print len(empty_files)
In [ ]:
data_range = np.index_exp[50:600, 390:1150]
zero_file = read_fit(all_files[0])[data_range]
plt.figure(figsize=(10,10))
plt.imshow(cv2.medianBlur(zero_file,3))
plt.colorbar(orientation='horizontal')
In [ ]:
zero_file = read_fit(all_files[0])[data_range]
all_data = np.zeros(shape=(len(all_files), zero_file.shape[0], zero_file.shape[1]), dtype='float32')
for i in log_progress(np.arange(len(all_files))):
all_data[i]=read_fit(all_files[i])[data_range]
In [ ]:
free_numbers = np.argwhere(all_data.mean(axis=-1).mean(axis=-1)<50)
free_numbers = [x[0] for x in free_numbers]
print(len(free_numbers))
In [ ]:
clear_frames = np.zeros(shape=(len(free_numbers), zero_file.shape[0], zero_file.shape[1]), dtype='float32')
In [ ]:
current_frame = 0
for ifn, fn in enumerate(np.asarray(free_numbers)):
# print(current_frame,ifn, fn)
zero_frame = all_data[fn]
tmp_frame = all_data[current_frame:fn].mean(axis=0)-zero_frame
# tmp_frame = np.percentile(all_data[current_frame:fn], 50, axis=0)-zero_frame
tmp_frame[tmp_frame<0] = 0
clear_frames[ifn] = tmp_frame
current_frame = fn+1
In [ ]:
current_frame = 0
for ifn, fn in enumerate(np.asarray(free_numbers)):
if ifn %10== 0:
zero_frame = all_data[fn]
t_data = all_data[current_frame:fn]-zero_frame
t_s = np.sort(t_data, axis=0)
tmp_frame_mean_e = np.percentile(t_data, 60, axis=0) #t_s[3:].mean(axis=0)
tmp_frame_mean = t_data.mean(axis=0)
tmp_frame_mean[tmp_frame_mean<1] = 1
tmp_frame_std = t_data.std(axis=0)
tmp_frame_std[tmp_frame_std<1] = 1
tmp_frame_min = t_data.min(axis=0)
tmp_frame_min[tmp_frame_min<1] = 1
tmp_frame_max = t_data.max(axis=0)
tmp_frame_max[tmp_frame_max<1] = 1
vmax = np.max(tmp_frame_mean)
plt.figure(figsize=(14,12))
plt.subplot(221)
plt.title(str(ifn) + ' mean')
plt.imshow(tmp_frame_mean, vmin=0, vmax=vmax)
plt.colorbar(orientation='horizontal')
plt.subplot(222)
plt.title(str(ifn) + ' std')
plt.imshow(tmp_frame_std, vmin=0, vmax=vmax)
plt.colorbar(orientation='horizontal')
plt.subplot(223)
plt.title(str(ifn) + ' median')
plt.imshow(tmp_frame_mean_e, vmin=0, vmax=vmax)
plt.colorbar(orientation='horizontal')
plt.subplot(224)
plt.title(str(ifn) + ' min')
plt.imshow(tmp_frame_min, vmin=0, vmax=vmax)
plt.colorbar(orientation='horizontal')
plt.show()
# plt.figure(figsize=(7,5))
# for i in range(1,10):
# # plt.subplot(220+i)
# x,y = np.histogram(t_data[:, 100:102,100+10*i:100+10*i+2], bins=10)
# plt.plot(y[1:], x,label=i)
# plt.legend()
# plt.grid()
# plt.show()
current_frame = fn+1
In [ ]:
plt.figure(figsize=(10,10))
plt.plot(all_data[:].mean(axis=-1).mean(axis=-1))
plt.grid()
plt.show()
In [ ]:
plt.figure(figsize=(10,10))
plt.plot(clear_frames[:].mean(axis=-1).mean(axis=-1))
plt.grid()
plt.show()
In [ ]:
def show_array(data3d, axis_numb=0):
vmax = data3d.max()
def show_slice(i):
plt.figure(figsize=(10,10))
plt.imshow(local_data.take(i,axis=axis_numb),vmin=0, vmax=vmax)
plt.colorbar(orientation='horizontal')
plt.show()
local_data = data3d
ipywidgets.interact(show_slice, i=(0,data3d.shape[axis_numb]-1))
In [ ]:
show_array(clear_frames)
In [ ]:
# from bokeh.plotting import figure, show
# from bokeh.io import output_notebook
# output_notebook()
# def show_array_b(data3d, axis_numb=0):
# vmax = data3d.max()
# p = figure(plot_width=data3d.shape[2], plot_height=data3d.shape[1],
# x_range=(0, data3d.shape[2]), y_range=(0, data3d.shape[1]))
# def show_slice(i):
# image = local_data.take(i,axis=axis_numb)
# p.image(image=[image], x=0, y=0, dw=image.shape[1], dh=image.shape[0], palette='Greys9')
# show(p)
# local_data = data3d
# ipywidgets.interact(show_slice, i=(0,data3d.shape[axis_numb]-1))
# show_array_b(clear_frames)
In [ ]:
shift = 0
plt.figure(figsize=(10,10))
plt.plot(np.roll(clear_frames[0],shift,1).sum(axis=0))
plt.plot(np.roll(np.fliplr(clear_frames[180]),-shift,1).sum(axis=0))
plt.grid()
In [ ]:
plt.figure(figsize=(10,10))
plt.imshow(np.roll(clear_frames[0],shift,1)-np.roll(np.fliplr(clear_frames[180]),-shift,1))
In [ ]:
angles = np.arange(len(clear_frames))*1.0 # Angles staep = 2 deg
In [ ]:
import astra
def astra_tomo2d_parallel(sinogram, angles):
angles = angles.astype('float64')
detector_size = sinogram.shape[1]
rec_size = detector_size
vol_geom = astra.create_vol_geom(rec_size, rec_size)
proj_geom = astra.create_proj_geom('parallel', 1.0, detector_size, angles)
sinogram_id = astra.data2d.create('-sino', proj_geom, data=sinogram)
# Create a data object for the reconstruction
rec_id = astra.data2d.create('-vol', vol_geom)
# proj_id = astra.create_projector('strip', proj_geom, vol_geom) # for CPU reconstruction only
# Set up the parameters for a reconstruction algorithm using the GPU
cfg = astra.astra_dict('CGLS_CUDA')
cfg['ReconstructionDataId'] = rec_id
cfg['ProjectionDataId'] = sinogram_id
# cfg['ProjectorId'] = proj_id # for CPU reconstruction only
cfg['option'] = {}
cfg['option']['MinConstraint'] = 0
# cfg['option']['MaxConstraint'] = 5
# Available algorithms:
# SIRT_CUDA, SART_CUDA, EM_CUDA, FBP_CUDA (see the FBP sample)
# Create the algorithm object from the configuration structure
alg_id = astra.algorithm.create(cfg)
# Run 150 iterations of the algorithm
astra.algorithm.run(alg_id, 10)
# Get the result
rec = astra.data2d.get(rec_id)
# Clean up. Note that GPU memory is tied up in the algorithm object,
# and main RAM in the data objects.
astra.algorithm.delete(alg_id)
astra.data2d.delete(rec_id)
astra.data2d.delete(sinogram_id)
astra.clear()
return rec
def astra_tomo3d_parallel(sinogram, angles):
angles = angles.astype('float64')
detector_size = sinogram.shape[-1]
slices_number = sinogram.shape[0]
rec_size = detector_size
vol_geom = astra.create_vol_geom(rec_size, rec_size, slices_number)
proj_geom = astra.create_proj_geom('parallel3d', 1.0, 1.0, slices_number, detector_size, angles)
sinogram_id = astra.data3d.create('-sino', proj_geom, data=sinogram)
# Create a data object for the reconstruction
rec_id = astra.data3d.create('-vol', vol_geom)
# proj_id = astra.create_projector('strip', proj_geom, vol_geom) # for CPU reconstruction only
# Set up the parameters for a reconstruction algorithm using the GPU
cfg = astra.astra_dict('CGLS3D_CUDA')
cfg['ReconstructionDataId'] = rec_id
cfg['ProjectionDataId'] = sinogram_id
# cfg['ProjectorId'] = proj_id # for CPU reconstruction only
cfg['option'] = {}
cfg['option']['MinConstraint'] = 0
# cfg['option']['MaxConstraint'] = 5
# Available algorithms:
# SIRT_CUDA, SART_CUDA, EM_CUDA, FBP_CUDA (see the FBP sample)
# Create the algorithm object from the configuration structure
alg_id = astra.algorithm.create(cfg)
# Run 150 iterations of the algorithm
astra.algorithm.run(alg_id, 10)
# Get the result
rec = astra.data3d.get(rec_id)
# Clean up. Note that GPU memory is tied up in the algorithm object,
# and main RAM in the data objects.
astra.algorithm.delete(alg_id)
astra.data3d.delete(rec_id)
astra.data3d.delete(sinogram_id)
astra.clear()
return rec
def astra_topotomo3d(sinogram, angles):
astra_angles = angles.astype('float64')
detector_size = sinogram.shape[-1]
slices_number = sinogram.shape[0]
rec_size = detector_size
vol_geom = astra.create_vol_geom(rec_size, rec_size, slices_number)
# We generate the same geometry as the circular one above.
vectors = np.zeros((len(astra_angles), 12))
alpha = -19.833/2*np.pi/180 # define bragg angle
for i in range(len(astra_angles)):
# ray direction
vectors[i,0] = np.sin(astra_angles[i])*np.cos(alpha)
vectors[i,1] = -np.cos(astra_angles[i])*np.cos(alpha)
vectors[i,2] = np.sin(alpha)
# center of detector
vectors[i,3:6] = 0
# vector from detector pixel (0,0) to (0,1)
vectors[i,6] = np.cos(astra_angles[i])
vectors[i,7] = np.sin(astra_angles[i])
vectors[i,8] = 0;
# vector from detector pixel (0,0) to (1,0)
vectors[i,9] = 0
vectors[i,10] = 0
vectors[i,11] = 1
# Parameters: #rows, #columns, vectors
proj_geom = astra.create_proj_geom('parallel3d_vec', slices_number, detector_size, vectors)
# proj_geom = astra.create_proj_geom('parallel3d', 1.0, 1.0, slices_number, detector_size, angles)
sinogram_id = astra.data3d.create('-sino', proj_geom, data=sinogram)
# Create a data object for the reconstruction
rec_id = astra.data3d.create('-vol', vol_geom)
# proj_id = astra.create_projector('strip', proj_geom, vol_geom) # for CPU reconstruction only
# Set up the parameters for a reconstruction algorithm using the GPU
cfg = astra.astra_dict('CGLS3D_CUDA')
cfg['ReconstructionDataId'] = rec_id
cfg['ProjectionDataId'] = sinogram_id
# cfg['ProjectorId'] = proj_id # for CPU reconstruction only
# cfg['option'] = {}
# cfg['option']['MinConstraint'] = 0
# cfg['option']['MaxConstraint'] = 5
# Available algorithms:
# SIRT_CUDA, SART_CUDA, EM_CUDA, FBP_CUDA (see the FBP sample)
# Create the algorithm object from the configuration structure
alg_id = astra.algorithm.create(cfg)
# Run 150 iterations of the algorithm
astra.algorithm.run(alg_id, 100)
# Get the result
rec = astra.data3d.get(rec_id)
# Clean up. Note that GPU memory is tied up in the algorithm object,
# and main RAM in the data objects.
astra.algorithm.delete(alg_id)
astra.data3d.delete(rec_id)
astra.data3d.delete(sinogram_id)
astra.clear()
return rec
In [ ]:
# rec_slice = astra_tomo2d_parallel(np.log(data[:,150,:]+1), angles*np.pi/180)
# rec_slice = astra_tomo2d_parallel(data[:,150,:], angles*np.pi/180)
In [ ]:
# plt.imshow(rec_slice)
# plt.colorbar()
In [ ]:
data = clear_frames/np.expand_dims(np.expand_dims(clear_frames.mean(axis=-1).mean(axis=-1),-1),-1)
# data = data[::2]
In [ ]:
show_array(data,1)
In [ ]:
s1 = np.require(data[:,:,:].swapaxes(0,1),dtype=np.float32, requirements=['C'])
In [ ]:
rec_3d = astra_topotomo3d(np.log(1.+s1), angles*np.pi/180)
# rec_3d = astra_topotomo3d(s1, angles*np.pi/180)
In [ ]:
show_array(rec_3d,1)
In [ ]:
# sl = rec_3d[rec_3d.shape[0]/3]
# mask = np.zeros_like(sl)
# X,Y = meshgrid(np.arange(mask.shape[0]),np.arange(mask.shape[1]))
# X = X-mask.shape[0]/2
# Y = Y-mask.shape[1]/2
# mask = (X*X+Y*Y)**0.5<(mask.shape[0]/2)-3
# rec_3d_masked = rec_3d*mask
In [ ]:
# show_array(rec_3d_masked)
In [ ]:
# data_file_parts = os.path.join(data_dir,'result.h5')
# out_file = '{}_reconstruction.h5'.format(''.join(data_file_parts[:-1]))
out_file = os.path.join(data_dir,'result.h5')
print(out_file)
with h5py.File(out_file,'w') as h5f:
h5f.create_dataset('Result', data=rec_3d, dtype=np.float32)
In [ ]:
def save_amira(result_file):
"""
Функция сохраняет реконструированные слои в формате Amira raw file
Inputs:
data_path - путь к директории, где находиться файл res_tomo.hdf5 в формате HDF5
в этом файде должен быть раздел (node) /Results в котором в виде 3D массива
записаны реконструированный объём
Outputs:
Файлы amira.raw и tomo.hx. Если файлы уже существуют, то они перезаписываются.
Тип данных: float32 little endian
"""
data_path = os.path.dirname(result_file)
with open(os.path.join(data_path, 'amira.raw'), 'wb') as amira_file:
with h5py.File(result_file, 'r') as h5f:
x = h5f['Result']
for i in range(x.shape[0]):
np.array(x[i, :, :]).tofile(amira_file)
file_shape = x.shape
with open(os.path.join(data_path, 'tomo.hx'), 'w') as af:
af.write('# Amira Script\n')
af.write('remove -all\n')
af.write(r'[ load -raw ${SCRIPTDIR}/amira.raw little xfastest float 1 '+
str(file_shape[1])+' '+str(file_shape[2])+' '+str(file_shape[0])+
' 0 '+str(file_shape[1]-1)+' 0 '+str(file_shape[2]-1)+' 0 '+str(file_shape[0]-1)+
' ] setLabel tomo.raw\n')
In [ ]:
save_amira(out_file)
In [ ]: