This notebook compares the performance of Tensorflow versus the C library for NCS. Note that this works a little differently then the usual approach. Here we solve the entire image in a single step rather than breaking it up into lots of sub-images. This works fine at least for the simulated image as it isn't too large. Both Tensorflow and the C library are fairly memory efficient.
Timing was done with the CPU version of Tensorflow. The GPU version might be faster?
In order for this to work you need both the reference NCS/python3-6 Python module and the NCS/clib Python module in your Python path.
In [ ]:
import matplotlib
import matplotlib.pyplot as pyplot
import numpy
import os
import time
# tensorflow
import tensorflow as tf
from tensorflow.python.training import adagrad
from tensorflow.python.training import adam
from tensorflow.python.training import gradient_descent
# python3-6 NCS. This provideds the OTF and the simulated images.
import pyNCS
import pyNCS.denoisetools as ncs
# python3 and C NCS.
import pyCNCS.ncs_c as ncsC
# Generate the same random noise each time.
numpy.random.seed(1)
In [ ]:
py_ncs_path = os.path.dirname(os.path.abspath(pyNCS.__file__))
print(py_ncs_path)
In [ ]:
# create normalized ideal image
fpath1 = os.path.join(py_ncs_path, "../randwlcposition.mat")
imgsz = 128
zoom = 8
Pixelsize = 0.1
NA = 1.4
Lambda = 0.7
t = time.time()
res = ncs.genidealimage(imgsz,Pixelsize,zoom,NA,Lambda,fpath1)
elapsed = time.time()-t
print('Elapsed time for generating ideal image:', elapsed)
imso = res[0]
pyplot.imshow(imso,cmap="gray")
# select variance map from calibrated map data
fpath = os.path.join(py_ncs_path, "../gaincalibration_561_gain.mat")
noisemap = ncs.gennoisemap(imgsz,fpath)
varsub = noisemap[0]*10 # increase the readout noise by 10 to demonstrate the effect of NCS algorithm
gainsub = noisemap[1]
# generate simulated data
I = 100
bg = 10
offset = 100
N = 1
dataimg = ncs.gendatastack(imso,varsub,gainsub,I,bg,offset,N)
imsd = dataimg[1]
print(imsd.shape)
In [ ]:
alpha = 0.1
In [ ]:
# Get the OTF mask that NCSDemo_simulation.py used.
rcfilter = ncs.genfilter(128,Pixelsize,NA,Lambda,'OTFweighted',1,0.7)
print(rcfilter.shape)
pyplot.imshow(rcfilter, cmap = "gray")
pyplot.show()
In [ ]:
# Calculate gamma and run Python/C NCS.
gamma = varsub/(gainsub*gainsub)
In [ ]:
# This takes ~100ms on my laptop.
out_c = ncsC.pyReduceNoise(imsd[0], gamma, rcfilter, alpha)
In [ ]:
f,(ax1,ax2) = pyplot.subplots(1,2,sharey=False,figsize = (8,8))
ax1.imshow(imsd[0],aspect='equal',cmap="gray")
ax2.imshow(out_c,aspect ='equal',cmap="gray")
pyplot.show()
In [ ]:
py_otf_mask = numpy.fft.fftshift(rcfilter.astype(numpy.float32))
FITMIN = tf.constant(1.0e-6)
tf_alpha = tf.constant(numpy.float32(alpha))
tf_data = tf.Variable(imsd[0].astype(numpy.float32), shape = (128, 128), trainable=False)
tf_gamma = tf.constant(gamma.astype(numpy.float32))
tf_rc = tf.constant(py_otf_mask*py_otf_mask/(128.0*128.0))
tf_u = tf.Variable(imsd[0].astype(numpy.float32), shape = (128, 128), trainable=True)
In [ ]:
# Tensorflow cost function.
@tf.function
def cost():
## LL
t1 = tf.math.add(tf_data, tf_gamma)
t2 = tf.math.add(tf_u, tf_gamma)
t2 = tf.math.maximum(t2, FITMIN)
t2 = tf.math.log(t2)
t2 = tf.math.multiply(t1, t2)
t2 = tf.math.subtract(tf_u, t2)
c1 = tf.math.reduce_sum(t2)
## NC
t1 = tf.dtypes.complex(tf_u, tf.zeros_like(tf_u))
t2 = tf.signal.fft2d(t1)
t2 = tf.math.multiply(t2, tf.math.conj(t2))
t2 = tf.math.abs(t2)
t2 = tf.math.multiply(t2, tf_rc)
c2 = tf.math.reduce_sum(t2)
c2 = tf.math.multiply(tf_alpha, c2)
return tf.math.add(c1, c2)
In [ ]:
# Gradient Descent Optimizer.
#
# This takes ~700ms on my laptop, so about 7x slower.
tf_data.assign(numpy.copy(imsd[0]))
tf_u.assign(tf_data.numpy())
for i in range(100):
if((i%10)==0):
print(cost().numpy())
opt = gradient_descent.GradientDescentOptimizer(2.0).minimize(cost)
out_tf = tf_u.numpy()
In [ ]:
f,(ax1,ax2) = pyplot.subplots(1,2,sharey=False,figsize = (8,4))
ax1.imshow(out_c,aspect='equal',cmap="gray")
ax2.imshow(out_tf,aspect ='equal',cmap="gray")
pyplot.show()
In [ ]:
print("Maximum pixel difference is {0:.3f}e-".format(numpy.max(numpy.abs(out_c - out_tf))))
In [ ]:
# AdamOptimizer.
#
# This takes ~1.5ms on my laptop, so about 15x slower.
tf_data.assign(numpy.copy(imsd[0]))
tf_u.assign(tf_data.numpy())
for i in range(100):
if((i%10)==0):
print(cost().numpy())
opt = adam.AdamOptimizer(0.8).minimize(cost)
out_tf_2 = tf_u.numpy()
In [ ]:
f,(ax1,ax2) = pyplot.subplots(1,2,sharey=False,figsize = (8,4))
ax1.imshow(out_c,aspect='equal',cmap="gray")
ax2.imshow(out_tf_2,aspect ='equal',cmap="gray")
pyplot.show()
In [ ]:
print("Maximum pixel difference is {0:.3f}e-".format(numpy.max(numpy.abs(out_c - out_tf_2))))
In [ ]:
# Adagrad.
#
# This takes ~950ms on my laptop, so about 9.5x slower.
tf_data.assign(numpy.copy(imsd[0]))
tf_u.assign(tf_data.numpy())
for i in range(100):
if((i%10)==0):
print(cost().numpy())
opt = adagrad.AdagradOptimizer(0.8).minimize(cost)
out_tf_3 = tf_u.numpy()
In [ ]:
f,(ax1,ax2) = pyplot.subplots(1,2,sharey=False,figsize = (8,4))
ax1.imshow(out_c,aspect='equal',cmap="gray")
ax2.imshow(out_tf_3,aspect ='equal',cmap="gray")
pyplot.show()
In [ ]:
print("Maximum pixel difference is {0:.3f}e-".format(numpy.max(numpy.abs(out_c - out_tf_3))))
In [ ]: