In [ ]:
import matplotlib
import matplotlib.pyplot as pyplot
import numpy
import os
import time
# python3-6 NCS.
import pyNCS
import pyNCS.denoisetools as ncs
# python3 and C NCS.
import pyCNCS.ncs_c as ncsC
# Generate the same random noise each time.
numpy.random.seed(1)
In [ ]:
py_ncs_path = os.path.dirname(os.path.abspath(pyNCS.__file__))
print(py_ncs_path)
In [ ]:
# create normalized ideal image
fpath1 = os.path.join(py_ncs_path, "../randwlcposition.mat")
imgsz = 128
zoom = 8
Pixelsize = 0.1
NA = 1.4
Lambda = 0.7
t = time.time()
res = ncs.genidealimage(imgsz,Pixelsize,zoom,NA,Lambda,fpath1)
elapsed = time.time()-t
print('Elapsed time for generating ideal image:', elapsed)
imso = res[0]
pyplot.imshow(imso,cmap="gray")
# select variance map from calibrated map data
fpath = os.path.join(py_ncs_path, "../gaincalibration_561_gain.mat")
noisemap = ncs.gennoisemap(imgsz,fpath)
varsub = noisemap[0]*10 # increase the readout noise by 10 to demonstrate the effect of NCS algorithm
gainsub = noisemap[1]
# generate simulated data
I = 100
bg = 10
offset = 100
N = 1
dataimg = ncs.gendatastack(imso,varsub,gainsub,I,bg,offset,N)
imsd = dataimg[1]
In [ ]:
# generate noise corrected image
Rs = 8
iterationN = 15
alpha = 0.1
out_name = os.path.join(py_ncs_path, "../../out.npy")
# This is useful for debugging as it takes a long time for this approach to
# to reduce the noise of an image. Once you've done this once you can just
# load the reference result.
if not os.path.exists(out_name):
# This takes ~24 seconds on my laptop.
out = ncs.reducenoise(Rs,imsd[0:1],varsub,gainsub,imgsz,Pixelsize,NA,Lambda,alpha,iterationN)
numpy.save(out_name, out)
else:
out = numpy.load(out_name)
print(out.shape)
In [ ]:
f,(ax1,ax2) = pyplot.subplots(1,2,sharey=False,figsize = (8,8))
ax1.imshow(imsd[0],aspect='equal',cmap="gray")
ax2.imshow(out[0],aspect ='equal',cmap="gray")
pyplot.show()
In [ ]:
# Get the OTF mask that NCSDemo_simulation.py used.
rcfilter = ncs.genfilter(Rs+2,Pixelsize,NA,Lambda,'OTFweighted',1,0.7)
print(rcfilter.shape)
pyplot.imshow(rcfilter, cmap = "gray")
pyplot.show()
In [ ]:
# Calculate gamma and run Python/C NCS.
gamma = varsub/(gainsub*gainsub)
In [ ]:
# This takes ~100ms on my laptop, so ~200x faster even though it is single threaded.
out2 = ncsC.pyReduceNoise(imsd[0], gamma, rcfilter, alpha)
In [ ]:
f,(ax1,ax2) = pyplot.subplots(1,2,sharey=False,figsize = (8,8))
ax1.imshow(out[0],aspect='equal',cmap="gray")
ax2.imshow(out2,aspect ='equal',cmap="gray")
pyplot.show()
In [ ]:
pyplot.figure(figsize = (6,6))
pyplot.imshow(out[0] - out2, cmap = "gray")
pyplot.show()
In [ ]:
print("Maximum pixel difference is {0:.3f}e-".format(numpy.max(numpy.abs(out[0] - out2))))
In [ ]:
# The C library expects the OTF be shifted to match the FFT frequency convention.
otf_mask = numpy.fft.fftshift(rcfilter)
In [ ]:
# This takes ~50ms on my laptop, so 2x faster than the mixed C/Python approach.
out3 = ncsC.cReduceNoise(imsd[0], gamma, otf_mask, alpha)
In [ ]:
f,(ax1,ax2) = pyplot.subplots(1,2,sharey=False,figsize = (8,8))
ax1.imshow(out[0],aspect='equal',cmap="gray")
ax2.imshow(out3,aspect ='equal',cmap="gray")
pyplot.show()
In [ ]:
pyplot.figure(figsize = (6,6))
pyplot.imshow(out2 - out3, cmap = "gray")
pyplot.show()
In [ ]:
print("C vs C/Python difference is {0:.5f}%".format(numpy.max(numpy.abs(out2 - out3))/numpy.max(out2)))
In [ ]: