In [14]:
import sys
import os
niftynet_path = '/home/tom/phd/NiftyNet-Generator-PR/NiftyNet'
sys.path.append(niftynet_path)
os.environ['CUDA_VISIBLE_DEVICES'] = ''
import pandas as pd
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from niftynet.io.image_reader import ImageReader
from niftynet.io.image_sets_partitioner import ImageSetsPartitioner
from collections import namedtuple
from niftynet.contrib.preprocessors.preprocessing import Preprocessing
from niftynet.contrib.csv_reader.sampler_csv_rows import ImageWindowDatasetCSV
from niftynet.contrib.csv_reader.sampler_resize_v2_csv import ResizeSamplerCSV as ResizeSampler
from niftynet.contrib.csv_reader.csv_reader import CSVReader
In [15]:
#### Some setup
NetParam = namedtuple('NetParam', 'normalise_foreground_only foreground_type multimod_foreground_type histogram_ref_file norm_type cutoff normalisation whitening')
ActionParam = namedtuple('ActionParam', 'random_flipping_axes scaling_percentage rotation_angle rotation_angle_x rotation_angle_y rotation_angle_z do_elastic_deformation num_ctrl_points deformation_sigma proportion_to_deform')
class TaskParam:
def __init__(self, classes):
self.image = classes
net_param = NetParam(normalise_foreground_only=False, foreground_type='threshold_plus', multimod_foreground_type = 'and', histogram_ref_file='mapping.txt', norm_type='percentile', cutoff=(0.05, 0.95), normalisation=False, whitening=True)
action_param = ActionParam(random_flipping_axes=[], scaling_percentage=[], rotation_angle=None, rotation_angle_x=None, rotation_angle_y=None, rotation_angle_z=None, do_elastic_deformation=False, num_ctrl_points=6, deformation_sigma=50, proportion_to_deform=0.9)
In [16]:
from niftynet.utilities.download import download
download('mr_ct_regression_model_zoo_data')
labels_location = 'ct.csv'
files = [file for file in os.listdir('/home/tom/niftynet/data/mr_ct_regression/CT_zero_mean') if file.endswith('.nii.gz')]
pd.DataFrame(data=[(file.replace('.nii.gz', ''), file.replace('.nii.gz', '')) for file in files]).to_csv('label.csv', index=None, header=['subject_id', 'label'])
pd.read_csv('label.csv')
Out[16]:
In [17]:
#### Testing the CSV Reader on labels
# Make sure we accept 'Label', 'label', 'LABEL'
task_param = TaskParam(['image'])
image_data_param = {'CT': {'path_to_search': '~/niftynet/data/mr_ct_regression/CT_zero_mean', 'filename_contains': 'nii'}}
#csv_data_file is a csv with data
csv_data_param = {'label': {'csv_data_file': 'label.csv', 'to_ohe': True}}
grouping_param = {'image': (['CT'])}
image_sets_partitioner = ImageSetsPartitioner().initialise(image_data_param)
image_reader = ImageReader().initialise(image_data_param, grouping_param, file_list=image_sets_partitioner.all_files)
preprocessing = Preprocessing(net_param, action_param, task_param)
normalisation_layers = preprocessing.prepare_normalisation_layers()
augmentation_layers = preprocessing.prepare_augmentation_layers()
image_reader.add_preprocessing_layers(normalisation_layers + augmentation_layers)
csv_reader = CSVReader(('label',)).initialise(csv_data_param, {'label': (['label'])}, file_list=image_sets_partitioner.all_files)
print('One sample from the csv_reader:', np.squeeze(csv_reader(idx=13)[1]['label']))
window_sizes = {'image': (100, 100, 1), 'label': (1, 1, 1)}
sampler = ResizeSampler(reader=image_reader,
csv_reader=csv_reader,
window_sizes=window_sizes,
num_threads=2,
smaller_final_batch_mode='drop',
batch_size=2,
queue_length=2)
sample = next(sampler())
print(sample['image'].shape)
print(sample['label'].shape)
In [18]:
from niftynet.utilities.download import download
download('mr_ct_regression_model_zoo_data')
labels_location = 'ct.csv'
files = [file.replace('.nii.gz', '') for file in os.listdir('/home/tom/niftynet/data/mr_ct_regression/CT_zero_mean') if file.endswith('.nii.gz')]
pd.DataFrame(data=[tuple([file] + list(np.random.randn(10))) for file in files]).to_csv('features.csv', index=None, header=['subject_id'] + [str(x) for x in range(10)])
pd.read_csv('features.csv')
Out[18]:
In [19]:
task_param = TaskParam(['image'])
image_data_param = {'CT': {'path_to_search': '~/niftynet/data/mr_ct_regression/CT_zero_mean', 'filename_contains': 'nii'}}
csv_data_param = {'features': {'csv_data_file': 'features.csv', 'to_ohe': False}}
grouping_param = {'image': (['CT'])}
image_sets_partitioner = ImageSetsPartitioner().initialise(image_data_param)
image_reader = ImageReader().initialise(image_data_param, grouping_param, file_list=image_sets_partitioner.all_files)
preprocessing = Preprocessing(net_param, action_param, task_param)
normalisation_layers = preprocessing.prepare_normalisation_layers()
augmentation_layers = preprocessing.prepare_augmentation_layers()
image_reader.add_preprocessing_layers(normalisation_layers + augmentation_layers)
csv_reader = CSVReader(('features',)).initialise(csv_data_param, {'features': ['features']}, file_list=image_sets_partitioner.all_files)
print('One sample from the csv_reader:', np.squeeze(csv_reader(idx=13)[1]['features']))
window_sizes = {'image': (100, 100, 1), 'features': (1, 1, 1)}
sampler = ResizeSampler(reader=image_reader,
csv_reader=csv_reader,
window_sizes=window_sizes,
num_threads=2,
smaller_final_batch_mode='drop',
batch_size=2,
queue_length=2)
sample = next(sampler())
print(sample['image'].shape)
print(sample['features'].shape)
print(sample.keys())
In [20]:
# Make sure we accept 'Label', 'label', 'LABEL'
task_param = TaskParam(['image'])
image_data_param = {'CT': {'path_to_search': '~/niftynet/data/mr_ct_regression/CT_zero_mean', 'filename_contains': 'nii'}}
csv_data_param = {'label': {'csv_data_file': 'label.csv', 'to_ohe': True},
'features': {'csv_data_file': 'features.csv', 'to_ohe': False}}
grouping_param = {'image': (['CT'])}
image_sets_partitioner = ImageSetsPartitioner().initialise(image_data_param)
image_reader = ImageReader().initialise(image_data_param, grouping_param, file_list=image_sets_partitioner.all_files)
preprocessing = Preprocessing(net_param, action_param, task_param)
normalisation_layers = preprocessing.prepare_normalisation_layers()
augmentation_layers = preprocessing.prepare_augmentation_layers()
image_reader.add_preprocessing_layers(normalisation_layers + augmentation_layers)
csv_reader = CSVReader(('label', 'features')).initialise(csv_data_param,
{'label': (['label']), 'features': (['features'])},
file_list=image_sets_partitioner.all_files)
print('One sample from the csv_reader:', np.squeeze(csv_reader(idx=13)[1]['label']))
window_sizes = {'image': (100, 100, 1), 'label': (1, 1, 1)}
sampler = ResizeSampler(reader=image_reader,
csv_reader=csv_reader,
window_sizes=window_sizes,
num_threads=2,
smaller_final_batch_mode='drop',
batch_size=2,
queue_length=2)
sample = next(sampler())
print(sample['image'].shape)
print(sample['label'].shape)
print(sample['features'].shape)
In [30]:
modalities = ['t1ce.', 't1.', 'flair.', 't2.']
def get_modality(string):
return modalities[[True if mod in string else False for mod in modalities].index(True)][:-1]
files = [(file.replace('.nii.gz', ''), get_modality(file)) \
for file in os.listdir('/home/tom/data/BRATS_18_SPLITS/train') if 'seg' not in file]
pd.DataFrame(data=files, columns=['subject_id', 'label']).to_csv('/home/tom/phd/NiftyNet-Generator-PR/NiftyNet/modality_labels.csv', index=None)
In [ ]: