In [ ]:
%load_ext autoreload
%autoreload 2
import os
import sys
import warnings
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
sys.path.append("../../..")
from batchflow import *
from batchflow.opensets import MNIST
from batchflow.models.tf import *
tf.logging.set_verbosity(tf.logging.ERROR)
warnings.filterwarnings("ignore")
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
In [ ]:
mnist = MNIST(batch_class=ImagesBatch)
if __name__ == '__main__':
MICROBATCH = None
DEVICE = None
print('\nMicrobatching is: {}'.format(MICROBATCH))
print('\nDevice is: {}'.format(DEVICE))
In [ ]:
def get_classification_config(model_class, config):
default_config = {
'inputs/images/shape': (28, 28, 1),
'inputs/labels/classes': 10,
'initial_block/inputs': 'images',
'loss': 'ce',
'microbatch': MICROBATCH,
'device': DEVICE,
}
pipeline_config = {
'model': model_class,
'model_config': {**default_config, **config},
'feed_dict': {'images': B('images'),
'labels': B('labels')},
}
return pipeline_config
def get_segmentation_config(model_class, config):
default_config = {
'inputs/images/shape': (28, 28, 1),
'inputs/masks/shape': (28, 28, 1),
'initial_block/inputs': 'images',
'body/decoder/blocks/combine_op': 'softsum', # for some reason `concat` is not working from within pytest
'loss': 'mse',
'microbatch': MICROBATCH,
'device': DEVICE,
}
pipeline_config = {
'model': model_class,
'model_config': {**default_config, **config},
'feed_dict': {'images': B('images'),
'masks': B('images')},
}
return pipeline_config
In [ ]:
def get_pipeline(pipeline_config):
""" Pipeline config must contain 'model', 'model_config', 'feed_dict' keys. """
pipeline = (Pipeline(config=pipeline_config)
.init_variable('loss_history', [])
.multiply(multiplier=1/255., preserve_type=False)
.to_array()
.init_model('dynamic', C('model'),
'MODEL', config=C('model_config'))
.train_model('MODEL', fetches='loss',
feed_dict=C('feed_dict'),
save_to=V('loss_history', mode='a'))
)
return pipeline
In [ ]:
def run(task, model_class, config, description, batch_size=16, n_iters=10):
if task.startswith('c'):
pipeline_config = get_classification_config(model_class, config)
elif task.startswith('s'):
pipeline_config = get_segmentation_config(model_class, config)
train_pipeline = get_pipeline(pipeline_config) << mnist.train
_ = train_pipeline.run(batch_size, n_iters=n_iters, bar=True,
bar_desc=W(V('loss_history')[-1].format('Loss is {:7.7}')))
print('{} {} is done'.format(task, description))
return train_pipeline
In [ ]:
config = {
'initial_block': {'layout': 'fa'*2,
'units': [64, 128],},
'body': {'layout': 'fa'*2,
'units': [256, 512]},
'head': {'layout': 'faf',
'units': [600,10]},
}
ppl = run('classification', TFModel, config, 'simple fc')
In [ ]:
config = {
'head/layout': 'f'
}
ppl = run('classification', XceptionS, config, 'Xception')
In [ ]:
ppl = run('classification', MobileNet_v3_small, {}, 'MobileNet_v3_small')
In [ ]:
config = {
'initial_block/filters': 4,
'body/encoder': {'num_stages': 3},
}
ppl = run('segmentation', UNet, config, 'unet')
In [ ]:
config = {
'initial_block': {'layout': 'cna', 'filters': 2},
'body/encoder': {'base': ResNet18,
'filters':[4]*4},
'body/embedding': [{'layout': 'cna', 'filters': 16}]*4,
}
ppl = run('segmentation', EncoderDecoder, config, 'encoder-decoder with ResNet18 backbone')
In [ ]:
config = {
'initial_block': {'layout': 'cna', 'filters': 4},
'body/encoder': {'base': ResNet,
'num_blocks': [2, 2, 2, 2, 2],
'filters': [2, 4, 8, 16, 32]},
'body/embedding': {'filters': 32},
'body/decoder': {'num_stages': 5,
'factor': 32,
'skip': True,
'upsample': {'layout': 'X'},
'blocks': {'base': DenseNet.block,
'num_layers': [2, 2, 2, 2, 2],
'growth_rate': 2,
'skip': False}},
}
ppl = run('segmentation', EncoderDecoder, config, 'encoder-decoder with ResNet, DenseNet blocks')
config['body/encoder/block/resnext'] = True
ppl = run('segmentation', EncoderDecoder, config, 'encoder-decoder with ResNeXt, DenseNet blocks')
In [ ]:
ppl = run('segmentation', DeepLabXS, {}, 'DeepLab XS')