In [1]:
import numpy as np
from nifti import NiftiImage

Open file


In [2]:
# from http://stackoverflow.com/questions/3579568/choosing-a-file-in-python-with-simple-dialog
from Tkinter import Tk
from tkFileDialog import askopenfilename

Tk().withdraw() # we don't want a full GUI, so keep the root window from appearing
filename = askopenfilename() # show an "Open" dialog box and return the path to the selected file
print(filename)


/home/sophie/Desktop/100051ss1_1000regcmini.nii

In [3]:
nim=NiftiImage(filename)

In [4]:
D=nim.data
D.shape


Out[4]:
(11573, 9, 11, 23)

In [5]:
#np.save('863try500regc.npy',D)

In [6]:
#Dts=tsc.loadSeries('/home/sophie/Desktop/862try500regcU.npy', inputFormat='npy')

In [7]:
from thunder.rdds.fileio.imagesloader import ImagesLoader
imgs = ImagesLoader(sc).fromArrays(list(D))

Debleach


In [9]:
S=imgs.toTimeSeries()


---------------------------------------------------------------------------
Py4JJavaError                             Traceback (most recent call last)
<ipython-input-9-58fea488b091> in <module>()
----> 1 S=imgs.toTimeSeries()

/usr/local/lib/python2.7/dist-packages/thunder/rdds/images.pyc in toTimeSeries(self, size)
    125         Images.toBlocks
    126         """
--> 127         return self.toBlocks(size).toSeries().toTimeSeries()
    128 
    129     def toSeries(self, size="150M"):

/usr/local/lib/python2.7/dist-packages/thunder/rdds/images.pyc in toBlocks(self, size, units, padding)
     98         # fastest changing dimension (e.g. x) is first, so must sort reversed keys to get desired ordering
     99         # sort must come after group, b/c group will mess with ordering.
--> 100         groupedvals = vals.groupBy(lambda (k, _): k.spatialKey).sortBy(lambda (k, _): tuple(k[::-1]))
    101         # groupedvals is now rdd of (z, y, x spatial key, [(partitioning key, numpy array)...]
    102         blockedvals = groupedvals.map(blockingStrategy.combiningFunction)

/home/sophie/Desktop/spark-1.1.0-bin-hadoop1/python/pyspark/rdd.pyc in sortBy(self, keyfunc, ascending, numPartitions)
    631         [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
    632         """
--> 633         return self.keyBy(keyfunc).sortByKey(ascending, numPartitions).values()
    634 
    635     def glom(self):

/home/sophie/Desktop/spark-1.1.0-bin-hadoop1/python/pyspark/rdd.pyc in sortByKey(self, ascending, numPartitions, keyfunc)
    604         maxSampleSize = numPartitions * 20.0  # constant from Spark's RangePartitioner
    605         fraction = min(maxSampleSize / max(rddSize, 1), 1.0)
--> 606         samples = self.sample(False, fraction, 1).map(lambda (k, v): k).collect()
    607         samples = sorted(samples, reverse=(not ascending), key=keyfunc)
    608 

/home/sophie/Desktop/spark-1.1.0-bin-hadoop1/python/pyspark/rdd.pyc in collect(self)
    721         """
    722         with _JavaStackTrace(self.context) as st:
--> 723             bytesInJava = self._jrdd.collect().iterator()
    724         return list(self._collect_iterator_through_file(bytesInJava))
    725 

/home/sophie/Desktop/spark-1.1.0-bin-hadoop1/python/lib/py4j-0.8.2.1-src.zip/py4j/java_gateway.py in __call__(self, *args)
    536         answer = self.gateway_client.send_command(command)
    537         return_value = get_return_value(answer, self.gateway_client,
--> 538                 self.target_id, self.name)
    539 
    540         for temp_arg in temp_args:

/home/sophie/Desktop/spark-1.1.0-bin-hadoop1/python/lib/py4j-0.8.2.1-src.zip/py4j/protocol.py in get_return_value(answer, gateway_client, target_id, name)
    298                 raise Py4JJavaError(
    299                     'An error occurred while calling {0}{1}{2}.\n'.
--> 300                     format(target_id, '.', name), value)
    301             else:
    302                 raise Py4JError(

Py4JJavaError: An error occurred while calling o117.collect.
: java.lang.IllegalStateException: unread block data
	at java.io.ObjectInputStream$BlockDataInputStream.setBlockDataMode(ObjectInputStream.java:2419)
	at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1381)
	at java.io.ObjectInputStream.readArray(ObjectInputStream.java:1705)
	at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1343)
	at java.io.ObjectInputStream.readObject(ObjectInputStream.java:369)
	at org.apache.spark.MapOutputTracker$.deserializeMapStatuses(MapOutputTracker.scala:365)
	at org.apache.spark.scheduler.DAGScheduler.newOrUsedStage(DAGScheduler.scala:269)
	at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$getShuffleMapStage(DAGScheduler.scala:221)
	at org.apache.spark.scheduler.DAGScheduler$$anonfun$visit$1$1.apply(DAGScheduler.scala:301)
	at org.apache.spark.scheduler.DAGScheduler$$anonfun$visit$1$1.apply(DAGScheduler.scala:298)
	at scala.collection.immutable.List.foreach(List.scala:318)
	at org.apache.spark.scheduler.DAGScheduler.visit$1(DAGScheduler.scala:298)
	at org.apache.spark.scheduler.DAGScheduler.getParentStages(DAGScheduler.scala:310)
	at org.apache.spark.scheduler.DAGScheduler.newStage(DAGScheduler.scala:246)
	at org.apache.spark.scheduler.DAGScheduler.handleJobSubmitted(DAGScheduler.scala:726)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessActor$$anonfun$receive$2.applyOrElse(DAGScheduler.scala:1360)
	at akka.actor.ActorCell.receiveMessage(ActorCell.scala:498)
	at akka.actor.ActorCell.invoke(ActorCell.scala:456)
	at akka.dispatch.Mailbox.processMailbox(Mailbox.scala:237)
	at akka.dispatch.Mailbox.run(Mailbox.scala:219)
	at akka.dispatch.ForkJoinExecutorConfigurator$AkkaForkJoinTask.exec(AbstractDispatcher.scala:386)
	at scala.concurrent.forkjoin.ForkJoinTask.doExec(ForkJoinTask.java:260)
	at scala.concurrent.forkjoin.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1339)
	at scala.concurrent.forkjoin.ForkJoinPool.runWorker(ForkJoinPool.java:1979)
	at scala.concurrent.forkjoin.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:107)

In [ ]:
Smean=S.seriesMean().collect()

In [ ]:
Smean.shape

In [ ]:
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_context('notebook')
from thunder import Colorize
image = Colorize.image

In [ ]:
from pylab import *
from scipy.optimize import curve_fit

In [ ]:
def func(x, a, b, c, d, e):
    return a*(1+b*np.exp(-c*x)+d*np.exp(-e*x))

In [ ]:
def func2(x, a):
    return a

In [ ]:
def Debleach(x):
    X=range(len(x))  
    #try:
    popt, pcov = curve_fit(func, X, x,[0.02,0.0556,0.02677,0.0529,0.002028],maxfev=15000)
    [a, b, c, d, e]=popt
    y=[x[i]-a*(1+b*np.exp(-c*i)+d*np.exp(-e*i)) for i in X]
   # except Exception:
   #    popt, pcov = curve_fit(func2, X, x,[0.02,0.0556,0.02677,0.0529,0.002028],maxfev=15000)
    #    [a]=popt
    #    y=[x[i]-a for i in X]
    return y

'StartPoint',[1,0.0556,0.01377,0.1629,0.001028],'Lower',[0.001,0.01,0.002,0.01,0],'Upper',[10,0.6,0.1,0.6,0.002]


In [ ]:
xd=Debleach(Smean)

In [65]:
plt.plot(xd)


Out[65]:
[<matplotlib.lines.Line2D at 0xc0ff190>]

In [19]:
Dd=S.rdd.mapValues(Debleach)


---------------------------------------------------------------------------
NameError                                 Traceback (most recent call last)
<ipython-input-19-a4a6f75a38f9> in <module>()
----> 1 Dd=S.rdd.mapValues(Debleach)

NameError: name 'Debleach' is not defined

In [ ]:
S.rdd=Dd

In [67]:
Dd.count()


Out[67]:
9108

In [21]:
Dd[1:3].shape


---------------------------------------------------------------------------
TypeError                                 Traceback (most recent call last)
<ipython-input-21-82ad484e2c3f> in <module>()
----> 1 Dd[1:3].shape

TypeError: 'PipelinedRDD' object has no attribute '__getitem__'

In [ ]:
nim2.data=S.

Try 3D registration


In [8]:
from thunder import Registration
reg=Registration('crosscorr')
reg.prepare(imgs,startIdx=100,stopIdx=150)

In [ ]:
model=reg.fit(imgs)
imreg=model.transform(imgs)