In [120]:
from __future__ import print_function

%matplotlib inline

import sys
import tensorflow as tf
from tensorflow.python.framework import ops
from tensorflow.contrib.framework.python.ops import variables

from collections import OrderedDict

import numpy as np
import scipy.misc
from input import *
from model import *
from train import *
import utils
import matplotlib.pyplot as plt
tf.reset_default_graph()
NB_CLASSES = 151

In [121]:
iterator, filename = get_train_inputs(batch_size=1,
                                      repeat=True,
                                      num_classes=1)

In [147]:
_show = True
if _show:
    with tf.Session() as sess:
        sess.run(iterator.initializer, feed_dict={filename: ['data/adek20_training.tfecord']})
        next_element = iterator.get_next()
        i = 1
        while i < 10:
            i += 1
            print("*"*10)
            image, label = sess.run(next_element)
            print(type(image))
            print(image.shape, label.shape)
            plt.imshow(np.uint8(image))
            plt.imshow(label[:,:,0], cmap='jet', alpha=0.5)
            plt.show()


**********
<class 'numpy.ndarray'>
(512, 683, 3) (512, 683, 1)
**********
<class 'numpy.ndarray'>
(512, 711, 3) (512, 711, 1)
**********
<class 'numpy.ndarray'>
(512, 683, 3) (512, 683, 1)
**********
<class 'numpy.ndarray'>
(251, 384, 3) (251, 384, 1)
**********
<class 'numpy.ndarray'>
(512, 683, 3) (512, 683, 1)
**********
<class 'numpy.ndarray'>
(973, 512, 3) (973, 512, 1)
**********
<class 'numpy.ndarray'>
(735, 512, 3) (735, 512, 1)
**********
<class 'numpy.ndarray'>
(512, 538, 3) (512, 538, 1)
**********
<class 'numpy.ndarray'>
(512, 683, 3) (512, 683, 1)

In [160]:
if _show:
    with tf.Session() as sess:
        sess.run(iterator.initializer, feed_dict={filename: ['data/adek20_training.tfecord']})
        img, _ = iterator.get_next()
        print(img)
        img = img.set_shape([500,200,3])
        print(img)


Tensor("IteratorGetNext_15:0", shape=(?, ?, 3), dtype=uint8)
None

In [123]:
# Model
encoder = SlimModelEncoder(name="vgg_16", num_classes=NB_CLASSES, is_training=True)
image, label = iterator.get_next()
image = tf.to_float(image)
restore_fn, end_points = encoder.build(image=image)
decoder = FCNDecoder(end_points, nb_classes=NB_CLASSES, scope='decoder')

tensors_to_connect = OrderedDict()
tensors_to_connect["vgg_16/fc8"] = (2,2)
tensors_to_connect['vgg_16/pool4'] = (2,2)
tensors_to_connect['vgg_16/pool3'] = (8,8)
net = decoder.build(tensors_to_connect)

# Train
trainer = Trainer(nb_classes=NB_CLASSES, optimizer=tf.train.AdamOptimizer, learning_rate=1e-4)
trainer.build(predictions=net, labels=label)
trainer.train(iterator,
              restore_fn=restore_fn,
              number_of_steps=2500,
              filename=['data/adek20_training.tfecord'])


restore following variables: [<tf.Variable 'vgg_16/conv1/conv1_1/weights:0' shape=(3, 3, 3, 64) dtype=float32_ref>, <tf.Variable 'vgg_16/conv1/conv1_1/biases:0' shape=(64,) dtype=float32_ref>, <tf.Variable 'vgg_16/conv1/conv1_2/weights:0' shape=(3, 3, 64, 64) dtype=float32_ref>, <tf.Variable 'vgg_16/conv1/conv1_2/biases:0' shape=(64,) dtype=float32_ref>, <tf.Variable 'vgg_16/conv2/conv2_1/weights:0' shape=(3, 3, 64, 128) dtype=float32_ref>, <tf.Variable 'vgg_16/conv2/conv2_1/biases:0' shape=(128,) dtype=float32_ref>, <tf.Variable 'vgg_16/conv2/conv2_2/weights:0' shape=(3, 3, 128, 128) dtype=float32_ref>, <tf.Variable 'vgg_16/conv2/conv2_2/biases:0' shape=(128,) dtype=float32_ref>, <tf.Variable 'vgg_16/conv3/conv3_1/weights:0' shape=(3, 3, 128, 256) dtype=float32_ref>, <tf.Variable 'vgg_16/conv3/conv3_1/biases:0' shape=(256,) dtype=float32_ref>, <tf.Variable 'vgg_16/conv3/conv3_2/weights:0' shape=(3, 3, 256, 256) dtype=float32_ref>, <tf.Variable 'vgg_16/conv3/conv3_2/biases:0' shape=(256,) dtype=float32_ref>, <tf.Variable 'vgg_16/conv3/conv3_3/weights:0' shape=(3, 3, 256, 256) dtype=float32_ref>, <tf.Variable 'vgg_16/conv3/conv3_3/biases:0' shape=(256,) dtype=float32_ref>, <tf.Variable 'vgg_16/conv4/conv4_1/weights:0' shape=(3, 3, 256, 512) dtype=float32_ref>, <tf.Variable 'vgg_16/conv4/conv4_1/biases:0' shape=(512,) dtype=float32_ref>, <tf.Variable 'vgg_16/conv4/conv4_2/weights:0' shape=(3, 3, 512, 512) dtype=float32_ref>, <tf.Variable 'vgg_16/conv4/conv4_2/biases:0' shape=(512,) dtype=float32_ref>, <tf.Variable 'vgg_16/conv4/conv4_3/weights:0' shape=(3, 3, 512, 512) dtype=float32_ref>, <tf.Variable 'vgg_16/conv4/conv4_3/biases:0' shape=(512,) dtype=float32_ref>, <tf.Variable 'vgg_16/conv5/conv5_1/weights:0' shape=(3, 3, 512, 512) dtype=float32_ref>, <tf.Variable 'vgg_16/conv5/conv5_1/biases:0' shape=(512,) dtype=float32_ref>, <tf.Variable 'vgg_16/conv5/conv5_2/weights:0' shape=(3, 3, 512, 512) dtype=float32_ref>, <tf.Variable 'vgg_16/conv5/conv5_2/biases:0' shape=(512,) dtype=float32_ref>, <tf.Variable 'vgg_16/conv5/conv5_3/weights:0' shape=(3, 3, 512, 512) dtype=float32_ref>, <tf.Variable 'vgg_16/conv5/conv5_3/biases:0' shape=(512,) dtype=float32_ref>, <tf.Variable 'vgg_16/fc6/weights:0' shape=(7, 7, 512, 4096) dtype=float32_ref>, <tf.Variable 'vgg_16/fc6/biases:0' shape=(4096,) dtype=float32_ref>, <tf.Variable 'vgg_16/fc7/weights:0' shape=(1, 1, 4096, 4096) dtype=float32_ref>, <tf.Variable 'vgg_16/fc7/biases:0' shape=(4096,) dtype=float32_ref>]

In [91]:
# _image_path, _label_path = "data/images/training/", "data/annotations_sceneparsing/training/"
# _file_path = 'data/images/training.txt'
# image_names = [img.split('/')[1] for img in pd.read_csv(_file_path, header=None)[0].tolist()]
# image_paths = [_image_path + image_name for image_name in image_names]
# label_paths = [_label_path + image_name.replace(".jpg",".png") for image_name in image_names]
# i = 67
# image_path, label_path = image_paths[i], label_paths[i]
# image = scipy.misc.imread(image_path, mode='RGB')
# gt_image = scipy.misc.imread(label_path, mode='RGB')
# gt_image = gt_image[:, :, 0]
# plt.imshow(image)
# plt.show()
# plt.imshow(gt_image)
# plt.show()
# np.unique(gt_image)


Out[91]:
array([  0,   1,   6,  11,  15,  28,  48,  71,  82,  90, 135], dtype=uint8)

In [144]:
gt_image


---------------------------------------------------------------------------
AttributeError                            Traceback (most recent call last)
<ipython-input-144-cef88ff2f3bd> in <module>()
----> 1 gt_image.get_shape()

AttributeError: 'numpy.ndarray' object has no attribute 'get_shape'

In [90]:
_show = np.zeros_like(gt_image)

idx=(gt_image==0)
_show[idx]=gt_image[idx]
plt.imshow(_show)


Out[90]:
<matplotlib.image.AxesImage at 0x11c0b2a20>

In [130]:
oh = tf.one_hot(gt_image,depth=151)

In [131]:
with tf.Session() as sess: oh_ = sess.run(oh)

In [132]:
oh_.shape, gt_image.shape


Out[132]:
((512, 683, 151), (512, 683))

In [136]:
a = tf.constant([1,2])

In [141]:
with tf.Session as sess: sess.run(a)


---------------------------------------------------------------------------
AttributeError                            Traceback (most recent call last)
<ipython-input-141-433c78020d99> in <module>()
----> 1 with tf.Session as sess: sess.run(a)

AttributeError: __enter__

In [139]:
a.eval()


---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
<ipython-input-139-dc3247d68d8e> in <module>()
----> 1 a.eval()

/Users/denizzorlu/anaconda2/envs/py3/lib/python3.6/site-packages/tensorflow/python/framework/ops.py in eval(self, feed_dict, session)
    539 
    540     """
--> 541     return _eval_using_default_session(self, feed_dict, self.graph, session)
    542 
    543 

/Users/denizzorlu/anaconda2/envs/py3/lib/python3.6/site-packages/tensorflow/python/framework/ops.py in _eval_using_default_session(tensors, feed_dict, graph, session)
   4069     session = get_default_session()
   4070     if session is None:
-> 4071       raise ValueError("Cannot evaluate tensor using `eval()`: No default "
   4072                        "session is registered. Use `with "
   4073                        "sess.as_default()` or pass an explicit session to "

ValueError: Cannot evaluate tensor using `eval()`: No default session is registered. Use `with sess.as_default()` or pass an explicit session to `eval(session=sess)`

In [ ]: