In [ ]:
%load_ext autoreload
%autoreload 2
import os
import sys
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
sys.path.append("../../..")
from batchflow import *
from batchflow.opensets import MNIST
from batchflow.models.torch import *
from batchflow.models.torch.layers import *
In [ ]:
mnist = MNIST()
BAR = True
PLOT = False
if __name__ == '__main__':
MICROBATCH = None
DEVICE = None
BAR = 'n'
PLOT = True
print('\nMicrobatching is: {}'.format(MICROBATCH))
print('\nDevice is: {}'.format(DEVICE))
print('\nBar style is: {}'.format(BAR))
print('\nPlot results is: {}'.format(PLOT))
In [ ]:
IMAGE_SHAPE = (1, 28, 28)
N_ITERS = 10
BATCH_SIZE = 16
def get_classification_config(model_class, config):
default_config = {
'inputs/images/shape': IMAGE_SHAPE, # can be commented
'inputs/labels/classes': 10,
'initial_block/inputs': 'images', # can be commented
'loss': 'ce',
'microbatch': MICROBATCH,
'device': DEVICE,
}
pipeline_config = {
'model': model_class,
'model_config': {**default_config, **config},
'feed_dict': {'images': B('images'),
'targets': B('labels')},
'gather': {'metrics_class' : 'classification',
'fmt' : 'logits',
'axis' : 1,
'targets' : B.labels},
'evaluate': 'accuracy',
}
return pipeline_config
def get_segmentation_config(model_class, config):
default_config = {
'inputs/images/shape': IMAGE_SHAPE, # can be commented
'inputs/masks/shape': IMAGE_SHAPE, # can be commented
'initial_block/inputs': 'images', # can be commented
'loss': 'mse',
'microbatch': MICROBATCH,
'device': DEVICE,
}
pipeline_config = {
'model': model_class,
'model_config': {**default_config, **config},
'feed_dict': {'images': B('images'),
'targets': B('images')},
'gather': {'metrics_class' : 'segmentation',
'fmt' : 'proba',
'axis' : None,
'targets' : B.images},
'evaluate': 'jaccard',
}
return pipeline_config
In [ ]:
def get_pipeline(pipeline_config):
""" Pipeline config must contain 'model', 'model_config', 'feed_dict' keys. """
vals = pipeline_config['feed_dict'].values()
pipeline = (Pipeline(config=pipeline_config)
.init_variable('loss_history', [])
.to_array(channels='first', dtype='float32')
.multiply(multiplier=1/255., preserve_type=False)
.init_model('dynamic', C('model'),
'MODEL', config=C('model_config'))
.train_model('MODEL',
# *vals,
# feed_dict=C('feed_dict'),
**pipeline_config['feed_dict'],
fetches='loss',
save_to=V('loss_history', mode='a'))
)
return pipeline
In [ ]:
def run(task, model_class, config, description, batch_size=BATCH_SIZE, n_iters=N_ITERS):
if task == 'classification':
pipeline_config = get_classification_config(model_class, config)
elif task == 'segmentation':
pipeline_config = get_segmentation_config(model_class, config)
train_pipeline = get_pipeline(pipeline_config) << mnist.train
_ = train_pipeline.run(batch_size, n_iters=n_iters, bar=BAR,
bar_desc=W(V('loss_history')[-1].format('Loss is {:7.7}')))
print('{} {} is done'.format(task, description))
return train_pipeline
In [ ]:
def show_some_results(ppl, task, size=10):
batch_ind = np.random.randint(len(ppl.v('targets')))
image_ind = np.random.choice(len(ppl.v('targets')[batch_ind]), size=size, replace=False)
true = ppl.v('targets')[batch_ind]
pred = ppl.v('predictions')[batch_ind]
if task == 'classification':
print(pd.DataFrame({'true': true[image_ind],
'pred': np.argmax(pred[image_ind], axis=1)}).to_string(index=False))
elif task == 'segmentation':
pass # for the sake of parsing by notebooks_test.py
fig, ax = plt.subplots(2, size, figsize=(10, 5))
[axi.set_axis_off() for axi in ax.ravel()]
for plot_num, image_num in enumerate(image_ind):
ax[0][plot_num].imshow(true[image_num][0], cmap='gray', vmin=0, vmax=1)
ax[1][plot_num].imshow(pred[image_num][0], cmap='gray', vmin=0, vmax=1)
In [ ]:
def test(pipeline, show_results=PLOT):
test_pipeline = (mnist.test.p
.import_model('MODEL', pipeline)
.init_variable('targets', default=[])
.init_variable('predictions', default=[])
.init_variable('metrics', default=[])
.to_array(channels='first', dtype='float32')
.multiply(multiplier=1/255., preserve_type=False)
.update(V('targets', mode='a'), pipeline.config['feed_dict/targets'])
.predict_model('MODEL',
# targets=B.images,
feed_dict=pipeline.config['feed_dict'],
fetches='predictions',
save_to=V('predictions', mode='a'))
.gather_metrics(**pipeline.config['gather'], predictions=V.predictions[-1],
save_to=V('metrics', mode='a'))
.run(64, shuffle=False, n_epochs=1, drop_last=False, bar=BAR)
)
if show_results:
show_some_results(test_pipeline, pipeline.config['gather/metrics_class'])
metrics = test_pipeline.get_variable('metrics')
to_evaluate = pipeline.config['evaluate']
evaluated = np.mean([m.evaluate(to_evaluate) for m in metrics])
print('{0} metrics is: {1:.3}'.format(to_evaluate, evaluated))
return test_pipeline
In [ ]:
config = {
'initial_block': {'layout': 'fa'*2,
'units': [64, 128],},
'body': {'layout': 'fa'*2,
'units': [256, 512]},
'head': {'layout': 'faf',
'units': [600, 10]},
}
ppl = run('classification', TorchModel, config, 'simple fc', n_iters=50, batch_size=64)
# test(ppl)
In [ ]:
config = {
'body/encoder/num_stages': 5,
'head': {'layout': 'f'}
}
ppl = run('classification', Encoder, config, 'encoder')
In [ ]:
config = {
'initial_block': {'layout': 'fafaf', 'units': [128, 256, 10]},
'order': ['initial_block', ('ib_2', 'initial_block', TorchModel.initial_block)],
'loss': ['ce', 'ce'],
'decay': 'exp',
'n_iters': 25,
'train_steps': {'ts_1': {}, 'ts_2': {}},
}
ppl = run('classification', TorchModel, config, 'train steps and order')
In [ ]:
config = {
'initial_block/inputs': ['images', 'images'],
# note that we can't directly assign this module to `initial_block`
'initial_block/module': Combine(op='+'),
'body/encoder': {'num_stages': 5},
'head': {'layout': 'faf', 'units': [50, 10]}
}
ppl = run('classification', Encoder, config, 'duo input')
In [ ]:
config = {
'initial_block/filters': 6,
'body/encoder/blocks/n_reps': [1, 1, 2, 1],
'body/encoder/blocks/bottleneck': False,
'body/encoder/blocks/attention': 'se',
}
ppl = run('classification', ResNet, config, 'resnet with config')
In [ ]:
ppl.m('MODEL').info
In [ ]:
ppl = run('classification', ResNet18, {}, 'resnet18')
ppl = run('classification', SEResNeXt18, {}, 'SE-resneXt18')
# ppl = run('classification', DenseNet121, {}, 'DenseNet121')
In [ ]:
ppl = run('classification', VGG7, {}, 'vgg7')
In [ ]:
# reusing encoder from model from the previous cell
config = {
'initial_block': ppl.m('MODEL').model.body.encoder,
'head' : {'layout': 'Vf'},
}
ppl = run('classification', TorchModel, config, 'reused encoder')
In [ ]:
import torchvision.models as models
resnet18 = models.resnet18(pretrained=True)
resnet18.fc = torch.nn.Identity()
config = {
'initial_block': {'layout': 'cna',
'filters': 3},
'body': resnet18,
'head': {'layout': 'Dnfaf',
'units': [50, 10],
'dropout_rate': 0.3,
'multisample': 0.3},
}
ppl = run('classification', TorchModel, config, 'pretrained resnet')
In [ ]:
config = {
'initial_block': {'layout': 'cna', 'filters': 8},
'body/decoder/num_stages': 3,
'body/decoder/factor': [1, 1, 1],
}
ppl = run('segmentation', Decoder, config, 'decoder')
In [ ]:
config = {
'step_on_each': 1,
'initial_block': {
'layout': 'cnaRp cnaRp tna+ tna+ BScna+ cnac',
'filters': [16, 32, 32, 16, 'same', 8, 1],
'transposed_conv': {'kernel_size': 2, 'strides': 2},
'branch': {'layout': 'ca', 'filters': 'same'}
},
}
ppl = run('segmentation', TorchModel, config, 'hardcoded unet')
In [ ]:
config = {
'body/encoder/num_stages': 2,
'body/embedding': {'base': ASPP, 'pyramid': (2, 4, 8)},
}
ppl = run('segmentation', EncoderDecoder, config, 'unet-like with ASPP')
In [ ]:
config = {
'initial_block/filters': 128,
'body/encoder/num_stages': 3,
'body/encoder/blocks/filters': 'same*2',
'body/embedding/filters': 'same',
'body/decoder/blocks/filters': 'same//2',
}
ppl = run('segmentation', UNet, config, 'unet')
# ppl = run('segmentation', ResUNet, config, 'unet with residual blocks')
In [ ]:
config = {
'initial_block/filters': 16,
'body/encoder/num_stages': 2,
'body/embedding/filters': 6,
'body/decoder/blocks/filters': 6,
}
ppl = run('segmentation', DenseUNet, config, 'unet with dense blocks')
In [ ]:
config = {
'body/encoder/base_model': ResNet18,
'body/encoder/base_model_kwargs/blocks/filters': 7,
'body/decoder/blocks/filters': 'same//4'
}
ppl = run('segmentation', EncoderDecoder, config, 'encoder-decoder with resnet18 backbone')
In [ ]:
ppl.m('MODEL').info
In [ ]:
# ppl.m('MODEL').set_debug_mode(True)
ppl.m('MODEL').model