Additional Exploratory Analysis - Albert

Metadata analysis: We already created a metadata script, however I never actually analyzed the metadata itself. Below is the metadata for two of the images, "Fear200" and "Fear199".

{u'channels': {u'Grayscale': {u'channel_type': u'oldchannel', u'datatype': u'uint16', u'description': u'oldchannel', u'exceptions': 1, u'propagate': 2, u'readonly': 1, u'resolution': 0, u'windowrange': [0, 0]}}, u'dataset': {u'cube_dimension': {u'0': [128, 128, 16], u'1': [128, 128, 16], u'2': [128, 128, 16], u'3': [128, 128, 16], u'4': [128, 128, 16], u'5': [64, 64, 64]}, u'description': u'Fear200', u'imagesize': {u'0': [17817, 23068, 1295], u'1': [8909, 11534, 1295], u'2': [4455, 5767, 1295], u'3': [2228, 2884, 1295], u'4': [1114, 1442, 1295], u'5': [557, 721, 1295]}, u'name': u'Fear200', u'neariso_imagesize': {u'0': [17817, 23068, 1295], u'1': [8909, 11534, 1295], u'2': [4455, 5767, 1295], u'3': [2228, 2884, 1295], u'4': [1114, 1442, 647], u'5': [557, 721, 431]}, u'neariso_offset': {u'0': [0.0, 0.0, 0.0], u'1': [0.0, 0.0, 0.0], u'2': [0.0, 0.0, 0.0], u'3': [0.0, 0.0, 0.0], u'4': [0.0, 0.0, 0.0], u'5': [0.0, 0.0, 0.0]}, u'neariso_scaledown': {u'0': 1, u'1': 1, u'2': 1, u'3': 1, u'4': 2, u'5': 3}, u'neariso_voxelres': {u'0': [1.0, 1.0, 10.0], u'1': [2.0, 2.0, 10.0], u'2': [4.0, 4.0, 10.0], u'3': [8.0, 8.0, 10.0], u'4': [16.0, 16.0, 20.0], u'5': [32.0, 32.0, 30.0]}, u'offset': {u'0': [0, 0, 0], u'1': [0, 0, 0], u'2': [0, 0, 0], u'3': [0, 0, 0], u'4': [0, 0, 0], u'5': [0, 0, 0]}, u'resolutions': [0, 1, 2, 3, 4, 5], u'scaling': u'zslices', u'scalinglevels': 5, u'timerange': [0, 0], u'voxelres': {u'0': [1.0, 1.0, 10.0], u'1': [2.0, 2.0, 10.0], u'2': [4.0, 4.0, 10.0], u'3': [8.0, 8.0, 10.0], u'4': [16.0, 16.0, 10.0], u'5': [32.0, 32.0, 10.0]}}, u'metadata': {}, u'project': {u'description': u'Fear200', u'name': u'Fear200', u'version': u'0.0'}}

{u'channels': {u'Grayscale': {u'channel_type': u'oldchannel', u'datatype': u'uint16', u'description': u'oldchannel', u'exceptions': 1, u'propagate': 2, u'readonly': 0, u'resolution': 0, u'windowrange': [99, 196]}}, u'dataset': {u'cube_dimension': {u'0': [128, 128, 16], u'1': [128, 128, 16], u'2': [128, 128, 16], u'3': [128, 128, 16], u'4': [128, 128, 16], u'5': [64, 64, 64]}, u'description': u'Fear199', u'imagesize': {u'0': [17275, 22939, 1358], u'1': [8638, 11470, 1358], u'2': [4319, 5735, 1358], u'3': [2160, 2868, 1358], u'4': [1080, 1434, 1358], u'5': [540, 717, 1358]}, u'name': u'Fear199', u'neariso_imagesize': {u'0': [17275, 22939, 1358], u'1': [8638, 11470, 1358], u'2': [4319, 5735, 1358], u'3': [2160, 2868, 1358], u'4': [1080, 1434, 679], u'5': [540, 717, 452]}, u'neariso_offset': {u'0': [0.0, 0.0, 0.0], u'1': [0.0, 0.0, 0.0], u'2': [0.0, 0.0, 0.0], u'3': [0.0, 0.0, 0.0], u'4': [0.0, 0.0, 0.0], u'5': [0.0, 0.0, 0.0]}, u'neariso_scaledown': {u'0': 1, u'1': 1, u'2': 1, u'3': 1, u'4': 2, u'5': 3}, u'neariso_voxelres': {u'0': [1.0, 1.0, 10.0], u'1': [2.0, 2.0, 10.0], u'2': [4.0, 4.0, 10.0], u'3': [8.0, 8.0, 10.0], u'4': [16.0, 16.0, 20.0], u'5': [32.0, 32.0, 30.0]}, u'offset': {u'0': [0, 0, 0], u'1': [0, 0, 0], u'2': [0, 0, 0], u'3': [0, 0, 0], u'4': [0, 0, 0], u'5': [0, 0, 0]}, u'resolutions': [0, 1, 2, 3, 4, 5], u'scaling': u'zslices', u'scalinglevels': 5, u'timerange': [0, 0], u'voxelres': {u'0': [1.0, 1.0, 10.0], u'1': [2.0, 2.0, 10.0], u'2': [4.0, 4.0, 10.0], u'3': [8.0, 8.0, 10.0], u'4': [16.0, 16.0, 10.0], u'5': [32.0, 32.0, 10.0]}}, u'metadata': {}, u'project': {u'description': u'Fear199', u'name': u'Fear199', u'version': u'0.0'}}

When comparing the above two to the metadata of one of the controls it is clear that simple overlay analysis is not going to be particularly effective - we also will get back to this in a later section when we use microGL for visualization purposes. The most important thing to note is that (obviously in retrospect), the brains are different sizes. The easiest form of analysis for a dataset this small would be to use simple overlay analysis - that is overlaying our brains over some kind of atlas, however since the brains are different sizes, overlaying will be very difficult. Additionally, because of the unusual "size" of the data volumes, using standardized tools like histogram normalization is difficult - errors like: "improper shape" are common.

{u'channels': {u'Grayscale': {u'channel_type': u'oldchannel', u'datatype': u'uint16', u'description': u'oldchannel', u'exceptions': 1, u'propagate': 2, u'readonly': 1, u'resolution': 0, u'windowrange': [0, 0]}}, u'dataset': {u'cube_dimension': {u'0': [128, 128, 16], u'1': [128, 128, 16], u'2': [128, 128, 16], u'3': [128, 128, 16], u'4': [128, 128, 16], u'5': [64, 64, 64]}, u'description': u'Control258', u'imagesize': {u'0': [17817, 24223, 1375], u'1': [8909, 12112, 1375], u'2': [4455, 6056, 1375], u'3': [2228, 3028, 1375], u'4': [1114, 1514, 1375], u'5': [557, 757, 1375]}, u'name': u'Control258', u'neariso_imagesize': {u'0': [17817, 24223, 1375], u'1': [8909, 12112, 1375], u'2': [4455, 6056, 1375], u'3': [2228, 3028, 1375], u'4': [1114, 1514, 687], u'5': [557, 757, 458]}, u'neariso_offset': {u'0': [0.0, 0.0, 6.0], u'1': [0.0, 0.0, 6.0], u'2': [0.0, 0.0, 6.0], u'3': [0.0, 0.0, 6.0], u'4': [0.0, 0.0, 3.0], u'5': [0.0, 0.0, 2.0]}, u'neariso_scaledown': {u'0': 1, u'1': 1, u'2': 1, u'3': 1, u'4': 2, u'5': 3}, u'neariso_voxelres': {u'0': [1.0, 1.0, 10.0], u'1': [2.0, 2.0, 10.0], u'2': [4.0, 4.0, 10.0], u'3': [8.0, 8.0, 10.0], u'4': [16.0, 16.0, 20.0], u'5': [32.0, 32.0, 30.0]}, u'offset': {u'0': [0, 0, 6], u'1': [0, 0, 6], u'2': [0, 0, 6], u'3': [0, 0, 6], u'4': [0, 0, 6], u'5': [0, 0, 6]}, u'resolutions': [0, 1, 2, 3, 4, 5], u'scaling': u'zslices', u'scalinglevels': 5, u'timerange': [0, 0], u'voxelres': {u'0': [1.0, 1.0, 10.0], u'1': [2.0, 2.0, 10.0], u'2': [4.0, 4.0, 10.0], u'3': [8.0, 8.0, 10.0], u'4': [16.0, 16.0, 10.0], u'5': [32.0, 32.0, 10.0]}}, u'metadata': {}, u'project': {u'description': u'Control258', u'name': u'Control258', u'version': u'0.0'}}

3D Analysis: In order to improve our visualization models I used several different softwares to visualize the data and try to extract better methods of creating a visualization model. This was mostly conducted using 3 different softwares:

  1. ITKSnap: ITKSnap is a powerful tool that we are using to try to create a surface mesh.

  2. MicroGL: (ignore the title, it is incorrect, volview is below) MicroGL is used to create the model. Because it generates a properly dimensioned and segmented brain image it is the template we are using for the brain encasing.

  3. Volview: Volview is very helpful for 3D visualization via shuffling of slices in the x,y,z coordinates.

Currently I am using these three softwares to create a similar pipeline to create visualizations similar to http://f1000research.com/articles/4-466/v1.

As a last-ditch effort, because of the inconclusive results from classification due to small sample size, I varied the bin numbers for our classification attempts to see if any additional information could be extracted.


In [1]:
import os
PATH="/Users/albertlee/claritycontrol/code/scripts" # use your own path
os.chdir(PATH)


import clarity as cl  # I wrote this module for easier operations on data

import matplotlib.pyplot as plt
import jgraph as ig

import clarity.resources as rs
import csv,gc  # garbage memory collection :)

import matplotlib
#import matplotlib.pyplot as plt
import numpy as np

from skimage import data, img_as_float
from skimage import exposure

from numpy import genfromtxt

BINS=32 # histogram bins
RANGE=(10.0,300.0)

matplotlib.rcParams['font.size'] = 8

my_data = genfromtxt('../data/hist/Fear199.csv', delimiter=',')
print my_data


/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/matplotlib/font_manager.py:273: UserWarning: Matplotlib is building the font cache using fc-list. This may take a moment.
  warnings.warn('Matplotlib is building the font cache using fc-list. This may take a moment.')
[  4.90000000e+01   5.80000000e+01   1.35000000e+02   2.99000000e+02
   7.36000000e+02   2.50000000e+03   7.66600000e+03   4.64490000e+04
   1.51772000e+06   1.16437106e+08   1.97147778e+08   5.00216770e+07
   3.75322990e+07   2.74382340e+07   1.78604610e+07   1.09035570e+07
   7.49919200e+06   4.36949700e+06   3.13623000e+06   2.34876700e+06
   1.79951900e+06   1.42698700e+06   1.15425800e+06   9.54144000e+05
   8.01040000e+05   6.74604000e+05   5.75535000e+05   4.91377000e+05
   4.22580000e+05   3.63628000e+05   3.17308000e+05   3.02971000e+05]

In [7]:
for token in rs.TOKENS:
    c = cl.Clarity(token)
    fname = rs.HIST_DATA_PATH+"4bin"+token+".csv"
    hist, bin_edges = c.loadImg().getHistogram(bins=BINS,range=RANGE,density=False)
    np.savetxt(fname,hist,delimiter=',')
    print fname,"saved."
    del c
    my_data = genfromtxt(fname, delimiter=',')
    print my_data
    gc.collect()


Image Loaded: ../data/raw/Cocaine174.img
../data/hist/4binCocaine174.csv saved.
Image Loaded: ../data/raw/Cocaine175.img
../data/hist/4binCocaine175.csv saved.
Image Loaded: ../data/raw/Cocaine178.img
../data/hist/4binCocaine178.csv saved.
Image Loaded: ../data/raw/Control181.img
../data/hist/4binControl181.csv saved.
Image Loaded: ../data/raw/Control182.img
../data/hist/4binControl182.csv saved.
Image Loaded: ../data/raw/Control189.img
../data/hist/4binControl189.csv saved.
Image Loaded: ../data/raw/Control239.img
../data/hist/4binControl239.csv saved.
Image Loaded: ../data/raw/Control258.img
../data/hist/4binControl258.csv saved.
Image Loaded: ../data/raw/Fear187.img
../data/hist/4binFear187.csv saved.
Image Loaded: ../data/raw/Fear197.img
../data/hist/4binFear197.csv saved.
Image Loaded: ../data/raw/Fear199.img
../data/hist/4binFear199.csv saved.
Image Loaded: ../data/raw/Fear200.img
../data/hist/4binFear200.csv saved.
[  2.53130000e+04   1.96457382e+08   4.00785140e+07   9.07274100e+06]

In [8]:
import numpy as np
import clarity.resources as rs
features = np.empty(shape=(1,BINS))
for token in rs.TOKENS:
    fname = rs.HIST_DATA_PATH+"4bin"+token+".csv"
    data = np.loadtxt(fname,delimiter=',')
    features = np.vstack([features,data])
features = features[1:,]
minc = np.min(features)
maxc = np.max(features)
features = (features-minc)/(maxc-minc)
print features
np.savetxt(rs.HIST_DATA_PATH+"4binfeatures.csv",features,delimiter=',')


[[  5.90824742e-05   1.00000000e+00   1.22120148e-01   3.20605118e-02]
 [  4.02513940e-05   7.51001373e-01   7.80913612e-02   1.58931904e-02]
 [  5.33573911e-05   6.79104808e-01   1.37626229e-01   3.64573693e-02]
 [  2.78582353e-05   4.21585477e-01   9.71577021e-02   3.32082903e-02]
 [  5.92327137e-06   4.24638954e-01   8.67806085e-02   1.53308807e-02]
 [  5.73946775e-05   6.93202617e-01   4.52744512e-02   9.53192775e-03]
 [  6.05608946e-05   7.68823440e-01   8.30486167e-02   1.90907356e-02]
 [  7.38027465e-05   7.55535379e-01   1.10174701e-01   2.62104007e-02]
 [  4.78017270e-05   6.07689571e-01   8.28482612e-02   2.16572894e-02]
 [  3.81464430e-05   6.13776330e-01   1.01223540e-01   2.66232891e-02]
 [  5.20707658e-05   7.33350093e-01   3.62225482e-02   6.27126756e-03]
 [  0.00000000e+00   3.13955869e-01   6.40167239e-02   1.44604348e-02]]

In [9]:
from sklearn import cross_validation
from sklearn.cross_validation import LeaveOneOut
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis

%matplotlib inline

np.random.seed(12345678)  # for reproducibility, set random seed

# Cocaine = ["Cocaine174","Cocaine175","Cocaine178"]
# Control = ["Control181","Control182","Control189","Control239","Control258"]
# Fear = ["Fear187","Fear197","Fear199","Fear200"]

features = np.loadtxt(rs.HIST_DATA_PATH+"4binfeatures.csv",delimiter=',')
temp_mu = np.mean(features,axis=1)
temp_std = np.std(features,axis=1)

mu = [np.mean(temp_mu[0:3]),np.mean(temp_mu[3:8]),np.mean(temp_mu[8:12])]
std = [np.mean(temp_std[0:3]),np.mean(temp_std[3:8]),np.mean(temp_std[8:12])]
print mu
print std
std=[1,1,1]

# define number of subjects per class
S = np.array((9, 21, 30, 39, 45, 63, 81, 96, 108, 210, 333))

names = ["Nearest Neighbors", "Linear SVM", "Random Forest",
         "Linear Discriminant Analysis", "Quadratic Discriminant Analysis"]

classifiers = [
    KNeighborsClassifier(3),
    SVC(kernel="linear", C=0.5),
    RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
    LinearDiscriminantAnalysis()]


[0.23770897349869147, 0.17949098607239755, 0.16388957730669243]
[0.33325814318994945, 0.25244003850017155, 0.2346371954529729]

In [10]:
accuracy = np.zeros((len(S), len(classifiers), 2), dtype=np.dtype('float64'))
for idx1, s in enumerate(S):
    s0=s/3
    s1=s/3
    s2=s/3
    
    x0 = np.random.normal(mu[0],std[0],(s0,BINS))
    x1 = np.random.normal(mu[1],std[1],(s1,BINS))
    x2 = np.random.normal(mu[2],std[2],(s2,BINS))
    X = x0
    X = np.vstack([X,x1])
    X = np.vstack([X,x2])
    y = np.append(np.append(np.zeros(s0), np.ones(s1)),np.ones(s2)*2)
    for idx2, cla in enumerate(classifiers):
        X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.4, random_state=0)
        clf = cla.fit(X_train, y_train)
        loo = LeaveOneOut(len(X))
        scores = cross_validation.cross_val_score(clf, X, y, cv=loo)
        accuracy[idx1, idx2,] = [scores.mean(), scores.std()]
        print("Accuracy of %s: %0.2f (+/- %0.2f)" % (names[idx2], scores.mean(), scores.std() * 2))
    
print accuracy


Accuracy of Nearest Neighbors: 0.22 (+/- 0.83)
Accuracy of Linear SVM: 0.11 (+/- 0.63)
Accuracy of Random Forest: 0.22 (+/- 0.83)
Accuracy of Linear Discriminant Analysis: 0.33 (+/- 0.94)
Accuracy of Nearest Neighbors: 0.33 (+/- 0.94)
Accuracy of Linear SVM: 0.24 (+/- 0.85)
/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/sklearn/discriminant_analysis.py:387: UserWarning: Variables are collinear.
  warnings.warn("Variables are collinear.")
Accuracy of Random Forest: 0.29 (+/- 0.90)
Accuracy of Linear Discriminant Analysis: 0.33 (+/- 0.94)
Accuracy of Nearest Neighbors: 0.20 (+/- 0.80)
Accuracy of Linear SVM: 0.23 (+/- 0.85)
/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/sklearn/discriminant_analysis.py:453: UserWarning: The priors do not sum to 1. Renormalizing
  UserWarning)
Accuracy of Random Forest: 0.30 (+/- 0.92)
Accuracy of Linear Discriminant Analysis: 0.33 (+/- 0.94)
Accuracy of Nearest Neighbors: 0.33 (+/- 0.94)
Accuracy of Linear SVM: 0.10 (+/- 0.61)
Accuracy of Random Forest: 0.26 (+/- 0.87)
Accuracy of Linear Discriminant Analysis: 0.13 (+/- 0.67)
Accuracy of Nearest Neighbors: 0.31 (+/- 0.93)
Accuracy of Linear SVM: 0.04 (+/- 0.41)
Accuracy of Random Forest: 0.31 (+/- 0.93)
Accuracy of Linear Discriminant Analysis: 0.16 (+/- 0.72)
Accuracy of Nearest Neighbors: 0.40 (+/- 0.98)
Accuracy of Linear SVM: 0.57 (+/- 0.99)
Accuracy of Random Forest: 0.40 (+/- 0.98)
Accuracy of Linear Discriminant Analysis: 0.49 (+/- 1.00)
Accuracy of Nearest Neighbors: 0.40 (+/- 0.98)
Accuracy of Linear SVM: 0.28 (+/- 0.90)
Accuracy of Random Forest: 0.23 (+/- 0.85)
Accuracy of Linear Discriminant Analysis: 0.28 (+/- 0.90)
Accuracy of Nearest Neighbors: 0.30 (+/- 0.92)
Accuracy of Linear SVM: 0.29 (+/- 0.91)
Accuracy of Random Forest: 0.29 (+/- 0.91)
Accuracy of Linear Discriminant Analysis: 0.25 (+/- 0.87)
Accuracy of Nearest Neighbors: 0.32 (+/- 0.94)
Accuracy of Linear SVM: 0.46 (+/- 1.00)
Accuracy of Random Forest: 0.31 (+/- 0.92)
Accuracy of Linear Discriminant Analysis: 0.44 (+/- 0.99)
Accuracy of Nearest Neighbors: 0.32 (+/- 0.94)
Accuracy of Linear SVM: 0.37 (+/- 0.96)
Accuracy of Random Forest: 0.35 (+/- 0.96)
Accuracy of Linear Discriminant Analysis: 0.37 (+/- 0.97)
Accuracy of Nearest Neighbors: 0.36 (+/- 0.96)
Accuracy of Linear SVM: 0.40 (+/- 0.98)
Accuracy of Random Forest: 0.30 (+/- 0.92)
Accuracy of Linear Discriminant Analysis: 0.38 (+/- 0.97)
[[[ 0.22222222  0.41573971]
  [ 0.11111111  0.31426968]
  [ 0.22222222  0.41573971]
  [ 0.33333333  0.47140452]]

 [[ 0.33333333  0.47140452]
  [ 0.23809524  0.42591771]
  [ 0.28571429  0.45175395]
  [ 0.33333333  0.47140452]]

 [[ 0.2         0.4       ]
  [ 0.23333333  0.42295258]
  [ 0.3         0.45825757]
  [ 0.33333333  0.47140452]]

 [[ 0.33333333  0.47140452]
  [ 0.1025641   0.30338871]
  [ 0.25641026  0.43665093]
  [ 0.12820513  0.33431807]]

 [[ 0.31111111  0.46294815]
  [ 0.04444444  0.20608041]
  [ 0.31111111  0.46294815]
  [ 0.15555556  0.36243348]]

 [[ 0.3968254   0.48923921]
  [ 0.57142857  0.49487166]
  [ 0.3968254   0.48923921]
  [ 0.49206349  0.49993701]]

 [[ 0.39506173  0.48886395]
  [ 0.28395062  0.45091314]
  [ 0.2345679   0.42372845]
  [ 0.28395062  0.45091314]]

 [[ 0.30208333  0.45916118]
  [ 0.29166667  0.45452967]
  [ 0.29166667  0.45452967]
  [ 0.25        0.4330127 ]]

 [[ 0.32407407  0.46802785]
  [ 0.46296296  0.49862637]
  [ 0.30555556  0.46064233]
  [ 0.43518519  0.49578124]]

 [[ 0.32380952  0.46792832]
  [ 0.36666667  0.48189441]
  [ 0.35238095  0.47771186]
  [ 0.37142857  0.4831867 ]]

 [[ 0.36036036  0.48010496]
  [ 0.3993994   0.48977497]
  [ 0.3033033   0.45968512]
  [ 0.38438438  0.48644941]]]

In [11]:
plt.errorbar(S, accuracy[:,0,0], yerr = accuracy[:,0,1], hold=True, label=names[0])
plt.errorbar(S, accuracy[:,1,0], yerr = accuracy[:,1,1], color='green', hold=True, label=names[1])
plt.errorbar(S, accuracy[:,2,0], yerr = accuracy[:,2,1], color='red', hold=True, label=names[2])
plt.errorbar(S, accuracy[:,3,0], yerr = accuracy[:,3,1], color='black', hold=True, label=names[3])
# plt.errorbar(S, accuracy[:,4,0], yerr = accuracy[:,4,1], color='brown', hold=True, label=names[4])
plt.xscale('log')
plt.xlabel('number of samples')
plt.ylabel('accuracy')
plt.title('Accuracy of classification under simulated data')
plt.axhline(1, color='red', linestyle='--')
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.show()



In [12]:
y=np.array([0,0,0,1,1,1,1,1,2,2,2,2])
features = np.loadtxt(rs.HIST_DATA_PATH+"4binfeatures.csv",delimiter=',')

In [13]:
accuracy=np.zeros((len(classifiers),2))
for idx, cla in enumerate(classifiers):
    X_train, X_test, y_train, y_test = cross_validation.train_test_split(features, y, test_size=0.4, random_state=0)
    clf = cla.fit(X_train, y_train)
    loo = LeaveOneOut(len(features))
    scores = cross_validation.cross_val_score(clf, features, y, cv=loo)
    accuracy[idx,] = [scores.mean(), scores.std()]
    print("Accuracy of %s: %0.2f (+/- %0.2f)" % (names[idx], scores.mean(), scores.std() * 2))


Accuracy of Nearest Neighbors: 0.00 (+/- 0.00)
Accuracy of Linear SVM: 0.25 (+/- 0.87)
Accuracy of Random Forest: 0.33 (+/- 0.94)
Accuracy of Linear Discriminant Analysis: 0.25 (+/- 0.87)

Now for the simplest case:


In [ ]:
for token in rs.TOKENS:
    c = cl.Clarity(token)
    fname = rs.HIST_DATA_PATH+"1bin"+token+".csv"
    hist, bin_edges = c.loadImg().getHistogram(bins=BINS,range=RANGE,density=False)
    np.savetxt(fname,hist,delimiter=',')
    print fname,"saved."
    del c
    my_data = genfromtxt(fname, delimiter=',')
    print my_data
    gc.collect()


Image Loaded: ../data/raw/Cocaine174.img

In [ ]: