In [1]:
from __future__ import print_function
%matplotlib inline  
import matplotlib.pyplot as plt

import cv2
import numpy as np
import os

In [2]:
class DataSource(object):
    def __init__(self, imageSource, path=''):
        print('Reading images.')
        self.readImages(imageSource, path)
        print('Reading images --- DONE.')
        
    def readImages(self, imageSource, path=''):
        self.images = []
        with open(imageSource, 'r') as f:
            for line in f:
                line = line.strip()
                try:
                    image = cv2.imread(os.path.join(path, line) + '_RGB.png')
                    labelMask  = cv2.imread(os.path.join(path, line) + '_label.tif')
                    labelMask = np.uint8(labelMask > 128)
                    labelMask = labelMask[:,:,0] + labelMask[:,:,1] * 2 + labelMask[:,:,2] * 4
                    self.images.append((image, labelMask))
                except:
                    print('ERROR: Unable to read image or label file', os.path.join(path, line))
                

    def upateCrops(self, resolution=128, sDev=0.05, cropsPerImage=200):
        self.crops = []
        for image, labels in self.images:
            for i in range(cropsPerImage):
                while(True):
                    dst = np.array([
                        [0, 0],
                        [resolution - 1, 0],
                        [resolution - 1, resolution - 1],
                        [0, resolution - 1]], dtype=np.float32)

                    # center the source patch around 0
                    src = dst - resolution / 2

                    # random patch rotation
                    angle = np.random.uniform() * np.pi * 2
                    R = np.array([
                        [np.cos(angle), -np.sin(angle)],
                        [np.sin(angle),  np.cos(angle)]
                        ], dtype=np.float32)
                    src = src.dot(R)   

                    # Shift the crop.
                    position = np.array([
                        [np.random.randint(image.shape[1]), np.random.randint(image.shape[0])]
                        ], dtype=np.float32)
                    src += position

                    # add small random corner position noise
                    noise = resolution * sDev * np.random.normal(size=dst.shape).astype(np.float32)
                    src = src + noise

                    if np.all(src >= 0) and np.all(src[:, 1] < image.shape[0]) and np.all(src[:, 0] < image.shape[1]):
                        break

                if not (np.all(src >= 0) and np.all(src[:, 1] < image.shape[0]) and np.all(src[:, 0] < image.shape[1])):
                    print("BLody fail.")
                # compute transformation
                T = cv2.getPerspectiveTransform(src, dst)
                croppedImage = cv2.warpPerspective(  # interpolate image
                    image, T, (resolution, resolution),
                    flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT)
                
                
                croppedLabels = cv2.warpPerspective(  # DON'T interpolate labels
                    labels, T, (resolution, resolution),
                    flags=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT)

                self.crops.append((croppedImage, croppedLabels[:, :, np.newaxis], src))
                    
    def getBatch(self, count=32):
        idx = np.random.choice(len(self.images), count)
        data = [self.crops[i][0] for i in idx]
        labels = [self.crops[i][1] for i in idx]
        data = np.stack(data)
        labels = np.stack(labels)
        
        return data, labels
    
    def getAllCrops(self):
        
        data = [c[0] for c in self.crops]
        labels = [c[1] for c in self.crops]
        data = np.stack(data)
        labels = np.stack(labels)
        
        return data, labels

In [3]:
sourceTrn = DataSource('files.trn', path='')
sourceVal = DataSource('files.tst', path='')
sourceTrn.upateCrops(resolution=128, sDev=0.05, cropsPerImage=20)


Reading images.
Reading images --- DONE.
Reading images.
Reading images --- DONE.

In [4]:
i=0

In [6]:
plt.imshow(sourceTrn.crops[i][0][:, :, ::-1])
print(sourceTrn.crops[i][2].T, sourceTrn.crops[i][0].shape)
i+=1


[[ 449.59146118  465.05001831  575.74841309  575.72717285]
 [ 582.34753418  450.16619873  458.43762207  578.89880371]] (128, 128, 3)

In [7]:
from keras.layers import Input, Reshape, Dense, Dropout, Flatten, BatchNormalization
from keras.layers import Activation, Conv2D, MaxPooling2D, PReLU, UpSampling2D
from keras.layers import Concatenate
from keras.models import Model
from keras import regularizers


Using TensorFlow backend.

In [8]:
from keras.layers import Reshape
def build_VGG_block(net, channels, layers, prefix):
    for i in range(layers):
        net = Conv2D(channels, 3, activation=PReLU(shared_axes=[1, 2]), padding='same',
                    name='{}.{}'.format(prefix, i))(net)
    return net

def build_VGG_Bnorm_block(net, channels, layers, prefix):
    for i in range(layers):
        net = Conv2D(channels, 3, padding='same',
                    name='{}.{}'.format(prefix, i))(net)
        net = BatchNormalization()(net)
        net = PReLU(shared_axes=[1, 2])(net)
    return net

def build_hourglass(input_data, block_channels=[16, 32, 64], block_layers=[2, 2, 3]):

    net = input_data
    bypasses = []
    for i, (cCount, lCount) in enumerate(zip(block_channels, block_layers)):
        net = build_VGG_Bnorm_block(net, cCount, lCount, 'conv{}'.format(i))
        if i != len(block_channels)-1:
            bypasses.append(net)
            net = MaxPooling2D(2, 2, padding="same")(net)
        net = Dropout(rate=0.2)(net)
        
    bypasses = list(reversed(bypasses))

    for i, (cCount, lCount) in enumerate(zip(reversed(block_channels), reversed(block_layers))):
        if i > 0:
            net = UpSampling2D(size=(2, 2))(net)
            net = Concatenate()([net, bypasses[0]])
            bypasses = bypasses[1:]
        net = build_VGG_Bnorm_block(net, cCount, lCount, 'decoder_conv{}'.format(i))
        
    net = Conv2D(8, 3, activation='softmax', padding='same', name='classProb')(net)
    net = Reshape((-1, 8))(net)
    return net

In [9]:
from keras import optimizers
from keras.models import Model
from keras import losses
from keras import metrics

input_data = Input(shape=(128, 128, 3), name='data')
net = build_hourglass(input_data, block_channels=[24,48,48,48], block_layers=[2,2,2,3])
model = Model(inputs=[input_data], outputs=[net])

print('Model')
model.summary()

model.compile(
    loss=losses.sparse_categorical_crossentropy, 
    optimizer=optimizers.Adam(lr=0.001), 
    metrics=[metrics.sparse_categorical_accuracy])


Model
__________________________________________________________________________________________________
Layer (type)                    Output Shape         Param #     Connected to                     
==================================================================================================
data (InputLayer)               (None, 128, 128, 3)  0                                            
__________________________________________________________________________________________________
conv0.0 (Conv2D)                (None, 128, 128, 24) 672         data[0][0]                       
__________________________________________________________________________________________________
batch_normalization_1 (BatchNor (None, 128, 128, 24) 96          conv0.0[0][0]                    
__________________________________________________________________________________________________
p_re_lu_1 (PReLU)               (None, 128, 128, 24) 24          batch_normalization_1[0][0]      
__________________________________________________________________________________________________
conv0.1 (Conv2D)                (None, 128, 128, 24) 5208        p_re_lu_1[0][0]                  
__________________________________________________________________________________________________
batch_normalization_2 (BatchNor (None, 128, 128, 24) 96          conv0.1[0][0]                    
__________________________________________________________________________________________________
p_re_lu_2 (PReLU)               (None, 128, 128, 24) 24          batch_normalization_2[0][0]      
__________________________________________________________________________________________________
max_pooling2d_1 (MaxPooling2D)  (None, 64, 64, 24)   0           p_re_lu_2[0][0]                  
__________________________________________________________________________________________________
dropout_1 (Dropout)             (None, 64, 64, 24)   0           max_pooling2d_1[0][0]            
__________________________________________________________________________________________________
conv1.0 (Conv2D)                (None, 64, 64, 48)   10416       dropout_1[0][0]                  
__________________________________________________________________________________________________
batch_normalization_3 (BatchNor (None, 64, 64, 48)   192         conv1.0[0][0]                    
__________________________________________________________________________________________________
p_re_lu_3 (PReLU)               (None, 64, 64, 48)   48          batch_normalization_3[0][0]      
__________________________________________________________________________________________________
conv1.1 (Conv2D)                (None, 64, 64, 48)   20784       p_re_lu_3[0][0]                  
__________________________________________________________________________________________________
batch_normalization_4 (BatchNor (None, 64, 64, 48)   192         conv1.1[0][0]                    
__________________________________________________________________________________________________
p_re_lu_4 (PReLU)               (None, 64, 64, 48)   48          batch_normalization_4[0][0]      
__________________________________________________________________________________________________
max_pooling2d_2 (MaxPooling2D)  (None, 32, 32, 48)   0           p_re_lu_4[0][0]                  
__________________________________________________________________________________________________
dropout_2 (Dropout)             (None, 32, 32, 48)   0           max_pooling2d_2[0][0]            
__________________________________________________________________________________________________
conv2.0 (Conv2D)                (None, 32, 32, 48)   20784       dropout_2[0][0]                  
__________________________________________________________________________________________________
batch_normalization_5 (BatchNor (None, 32, 32, 48)   192         conv2.0[0][0]                    
__________________________________________________________________________________________________
p_re_lu_5 (PReLU)               (None, 32, 32, 48)   48          batch_normalization_5[0][0]      
__________________________________________________________________________________________________
conv2.1 (Conv2D)                (None, 32, 32, 48)   20784       p_re_lu_5[0][0]                  
__________________________________________________________________________________________________
batch_normalization_6 (BatchNor (None, 32, 32, 48)   192         conv2.1[0][0]                    
__________________________________________________________________________________________________
p_re_lu_6 (PReLU)               (None, 32, 32, 48)   48          batch_normalization_6[0][0]      
__________________________________________________________________________________________________
max_pooling2d_3 (MaxPooling2D)  (None, 16, 16, 48)   0           p_re_lu_6[0][0]                  
__________________________________________________________________________________________________
dropout_3 (Dropout)             (None, 16, 16, 48)   0           max_pooling2d_3[0][0]            
__________________________________________________________________________________________________
conv3.0 (Conv2D)                (None, 16, 16, 48)   20784       dropout_3[0][0]                  
__________________________________________________________________________________________________
batch_normalization_7 (BatchNor (None, 16, 16, 48)   192         conv3.0[0][0]                    
__________________________________________________________________________________________________
p_re_lu_7 (PReLU)               (None, 16, 16, 48)   48          batch_normalization_7[0][0]      
__________________________________________________________________________________________________
conv3.1 (Conv2D)                (None, 16, 16, 48)   20784       p_re_lu_7[0][0]                  
__________________________________________________________________________________________________
batch_normalization_8 (BatchNor (None, 16, 16, 48)   192         conv3.1[0][0]                    
__________________________________________________________________________________________________
p_re_lu_8 (PReLU)               (None, 16, 16, 48)   48          batch_normalization_8[0][0]      
__________________________________________________________________________________________________
conv3.2 (Conv2D)                (None, 16, 16, 48)   20784       p_re_lu_8[0][0]                  
__________________________________________________________________________________________________
batch_normalization_9 (BatchNor (None, 16, 16, 48)   192         conv3.2[0][0]                    
__________________________________________________________________________________________________
p_re_lu_9 (PReLU)               (None, 16, 16, 48)   48          batch_normalization_9[0][0]      
__________________________________________________________________________________________________
dropout_4 (Dropout)             (None, 16, 16, 48)   0           p_re_lu_9[0][0]                  
__________________________________________________________________________________________________
decoder_conv0.0 (Conv2D)        (None, 16, 16, 48)   20784       dropout_4[0][0]                  
__________________________________________________________________________________________________
batch_normalization_10 (BatchNo (None, 16, 16, 48)   192         decoder_conv0.0[0][0]            
__________________________________________________________________________________________________
p_re_lu_10 (PReLU)              (None, 16, 16, 48)   48          batch_normalization_10[0][0]     
__________________________________________________________________________________________________
decoder_conv0.1 (Conv2D)        (None, 16, 16, 48)   20784       p_re_lu_10[0][0]                 
__________________________________________________________________________________________________
batch_normalization_11 (BatchNo (None, 16, 16, 48)   192         decoder_conv0.1[0][0]            
__________________________________________________________________________________________________
p_re_lu_11 (PReLU)              (None, 16, 16, 48)   48          batch_normalization_11[0][0]     
__________________________________________________________________________________________________
decoder_conv0.2 (Conv2D)        (None, 16, 16, 48)   20784       p_re_lu_11[0][0]                 
__________________________________________________________________________________________________
batch_normalization_12 (BatchNo (None, 16, 16, 48)   192         decoder_conv0.2[0][0]            
__________________________________________________________________________________________________
p_re_lu_12 (PReLU)              (None, 16, 16, 48)   48          batch_normalization_12[0][0]     
__________________________________________________________________________________________________
up_sampling2d_1 (UpSampling2D)  (None, 32, 32, 48)   0           p_re_lu_12[0][0]                 
__________________________________________________________________________________________________
concatenate_1 (Concatenate)     (None, 32, 32, 96)   0           up_sampling2d_1[0][0]            
                                                                 p_re_lu_6[0][0]                  
__________________________________________________________________________________________________
decoder_conv1.0 (Conv2D)        (None, 32, 32, 48)   41520       concatenate_1[0][0]              
__________________________________________________________________________________________________
batch_normalization_13 (BatchNo (None, 32, 32, 48)   192         decoder_conv1.0[0][0]            
__________________________________________________________________________________________________
p_re_lu_13 (PReLU)              (None, 32, 32, 48)   48          batch_normalization_13[0][0]     
__________________________________________________________________________________________________
decoder_conv1.1 (Conv2D)        (None, 32, 32, 48)   20784       p_re_lu_13[0][0]                 
__________________________________________________________________________________________________
batch_normalization_14 (BatchNo (None, 32, 32, 48)   192         decoder_conv1.1[0][0]            
__________________________________________________________________________________________________
p_re_lu_14 (PReLU)              (None, 32, 32, 48)   48          batch_normalization_14[0][0]     
__________________________________________________________________________________________________
up_sampling2d_2 (UpSampling2D)  (None, 64, 64, 48)   0           p_re_lu_14[0][0]                 
__________________________________________________________________________________________________
concatenate_2 (Concatenate)     (None, 64, 64, 96)   0           up_sampling2d_2[0][0]            
                                                                 p_re_lu_4[0][0]                  
__________________________________________________________________________________________________
decoder_conv2.0 (Conv2D)        (None, 64, 64, 48)   41520       concatenate_2[0][0]              
__________________________________________________________________________________________________
batch_normalization_15 (BatchNo (None, 64, 64, 48)   192         decoder_conv2.0[0][0]            
__________________________________________________________________________________________________
p_re_lu_15 (PReLU)              (None, 64, 64, 48)   48          batch_normalization_15[0][0]     
__________________________________________________________________________________________________
decoder_conv2.1 (Conv2D)        (None, 64, 64, 48)   20784       p_re_lu_15[0][0]                 
__________________________________________________________________________________________________
batch_normalization_16 (BatchNo (None, 64, 64, 48)   192         decoder_conv2.1[0][0]            
__________________________________________________________________________________________________
p_re_lu_16 (PReLU)              (None, 64, 64, 48)   48          batch_normalization_16[0][0]     
__________________________________________________________________________________________________
up_sampling2d_3 (UpSampling2D)  (None, 128, 128, 48) 0           p_re_lu_16[0][0]                 
__________________________________________________________________________________________________
concatenate_3 (Concatenate)     (None, 128, 128, 72) 0           up_sampling2d_3[0][0]            
                                                                 p_re_lu_2[0][0]                  
__________________________________________________________________________________________________
decoder_conv3.0 (Conv2D)        (None, 128, 128, 24) 15576       concatenate_3[0][0]              
__________________________________________________________________________________________________
batch_normalization_17 (BatchNo (None, 128, 128, 24) 96          decoder_conv3.0[0][0]            
__________________________________________________________________________________________________
p_re_lu_17 (PReLU)              (None, 128, 128, 24) 24          batch_normalization_17[0][0]     
__________________________________________________________________________________________________
decoder_conv3.1 (Conv2D)        (None, 128, 128, 24) 5208        p_re_lu_17[0][0]                 
__________________________________________________________________________________________________
batch_normalization_18 (BatchNo (None, 128, 128, 24) 96          decoder_conv3.1[0][0]            
__________________________________________________________________________________________________
p_re_lu_18 (PReLU)              (None, 128, 128, 24) 24          batch_normalization_18[0][0]     
__________________________________________________________________________________________________
classProb (Conv2D)              (None, 128, 128, 8)  1736        p_re_lu_18[0][0]                 
__________________________________________________________________________________________________
reshape_1 (Reshape)             (None, 16384, 8)     0           classProb[0][0]                  
==================================================================================================
Total params: 354,320
Trainable params: 352,784
Non-trainable params: 1,536
__________________________________________________________________________________________________

In [10]:
sourceVal.upateCrops(resolution=128, sDev=0.05, cropsPerImage=50)
val_img, val_labels = sourceVal.getAllCrops()
val_labels = val_labels.reshape(val_labels.shape[0], -1, 1)

In [ ]:
for i in range(1000):
    sourceTrn.upateCrops(resolution=128, sDev=0.05, cropsPerImage=200)
    trn_img, trn_labels = sourceTrn.getAllCrops()
    trn_labels = trn_labels.reshape(trn_labels.shape[0], -1, 1)
    model.fit(x=trn_img, y=trn_labels, batch_size=8, epochs=i+1, validation_data=[val_img, val_labels], shuffle=True, initial_epoch=i)


Train on 4200 samples, validate on 150 samples
Epoch 1/1
4200/4200 [==============================] - 26s 6ms/step - loss: 0.6770 - sparse_categorical_accuracy: 0.7443 - val_loss: 0.7700 - val_sparse_categorical_accuracy: 0.6960
Train on 4200 samples, validate on 150 samples
Epoch 2/2
4200/4200 [==============================] - 26s 6ms/step - loss: 0.6390 - sparse_categorical_accuracy: 0.7580 - val_loss: 0.8497 - val_sparse_categorical_accuracy: 0.6542
Train on 4200 samples, validate on 150 samples
Epoch 3/3
1880/4200 [============>.................] - ETA: 14s - loss: 0.6107 - sparse_categorical_accuracy: 0.7696

In [12]:
model.save_weights('model.hdf5')

In [13]:
import keras

In [14]:
input_data = Input(shape=(736, 736, 3), name='data')
net = build_hourglass(input_data,  block_channels=[24,48,48,48], block_layers=[2,2,2,3])
testModel = Model(inputs=[input_data], outputs=[net])
testModel.load_weights('model.hdf5')

In [15]:
with open('./testImages.txt', 'r') as f:
    for line in f:
        line = line.strip()
        img = cv2.imread(line + '_RGB.png').astype(np.float32)[np.newaxis, :736, :736, :]
        
        out = testModel.predict(img).reshape(1, 736, 736, 8)
        print(out.shape)
        best = np.max(out, axis=3)[..., np.newaxis]
        out = out == best
        labels = np.zeros(img.shape[1:], dtype=np.uint8)
        for i in range(out.shape[3]):
            R = i % 2
            G = (i / 2) % 2
            B = (i / 4) % 2
            color = np.asarray([R,G,B]) * 255
            labels[out[0, :, :, i]] = color.reshape(1,1,3)
        plt.figure(figsize=(15,15))
        plt.subplot(1, 2, 1)
        plt.imshow(labels)
        plt.subplot(1, 2, 2)
        plt.imshow(img[0, :, :, ::-1] / 255)
        plt.show()


(1, 736, 736, 8)
(1, 736, 736, 8)
(1, 736, 736, 8)
(1, 736, 736, 8)
(1, 736, 736, 8)
(1, 736, 736, 8)
(1, 736, 736, 8)
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-15-4750de0a6b52> in <module>()
     22         plt.subplot(1, 2, 2)
     23         plt.imshow(img[0, :, :, ::-1] / 255)
---> 24         plt.show()
     25 

/usr/local/lib/python2.7/dist-packages/matplotlib/pyplot.pyc in show(*args, **kw)
    251     """
    252     global _show
--> 253     return _show(*args, **kw)
    254 
    255 

/usr/local/lib/python2.7/dist-packages/ipykernel/pylab/backend_inline.pyc in show(close, block)
     34     try:
     35         for figure_manager in Gcf.get_all_fig_managers():
---> 36             display(figure_manager.canvas.figure)
     37     finally:
     38         show._to_draw = []

/usr/local/lib/python2.7/dist-packages/IPython/core/display.pyc in display(*objs, **kwargs)
    304             publish_display_data(data=obj, metadata=metadata, **kwargs)
    305         else:
--> 306             format_dict, md_dict = format(obj, include=include, exclude=exclude)
    307             if not format_dict:
    308                 # nothing to display (e.g. _ipython_display_ took over)

/usr/local/lib/python2.7/dist-packages/IPython/core/formatters.pyc in format(self, obj, include, exclude)
    171             md = None
    172             try:
--> 173                 data = formatter(obj)
    174             except:
    175                 # FIXME: log the exception

<decorator-gen-9> in __call__(self, obj)

/usr/local/lib/python2.7/dist-packages/IPython/core/formatters.pyc in catch_format_error(method, self, *args, **kwargs)
    215     """show traceback on failed format call"""
    216     try:
--> 217         r = method(self, *args, **kwargs)
    218     except NotImplementedError:
    219         # don't warn on NotImplementedErrors

/usr/local/lib/python2.7/dist-packages/IPython/core/formatters.pyc in __call__(self, obj)
    332                 pass
    333             else:
--> 334                 return printer(obj)
    335             # Finally look for special method names
    336             method = get_real_method(obj, self.print_method)

/usr/local/lib/python2.7/dist-packages/IPython/core/pylabtools.pyc in <lambda>(fig)
    239 
    240     if 'png' in formats:
--> 241         png_formatter.for_type(Figure, lambda fig: print_figure(fig, 'png', **kwargs))
    242     if 'retina' in formats or 'png2x' in formats:
    243         png_formatter.for_type(Figure, lambda fig: retina_figure(fig, **kwargs))

/usr/local/lib/python2.7/dist-packages/IPython/core/pylabtools.pyc in print_figure(fig, fmt, bbox_inches, **kwargs)
    123 
    124     bytes_io = BytesIO()
--> 125     fig.canvas.print_figure(bytes_io, **kw)
    126     data = bytes_io.getvalue()
    127     if fmt == 'svg':

/usr/local/lib/python2.7/dist-packages/matplotlib/backend_bases.pyc in print_figure(self, filename, dpi, facecolor, edgecolor, orientation, format, **kwargs)
   2190                     orientation=orientation,
   2191                     dryrun=True,
-> 2192                     **kwargs)
   2193                 renderer = self.figure._cachedRenderer
   2194                 bbox_inches = self.figure.get_tightbbox(renderer)

/usr/local/lib/python2.7/dist-packages/matplotlib/backends/backend_agg.pyc in print_png(self, filename_or_obj, *args, **kwargs)
    543 
    544     def print_png(self, filename_or_obj, *args, **kwargs):
--> 545         FigureCanvasAgg.draw(self)
    546         renderer = self.get_renderer()
    547         original_dpi = renderer.dpi

/usr/local/lib/python2.7/dist-packages/matplotlib/backends/backend_agg.pyc in draw(self)
    462 
    463         try:
--> 464             self.figure.draw(self.renderer)
    465         finally:
    466             RendererAgg.lock.release()

/usr/local/lib/python2.7/dist-packages/matplotlib/artist.pyc in draw_wrapper(artist, renderer, *args, **kwargs)
     61     def draw_wrapper(artist, renderer, *args, **kwargs):
     62         before(artist, renderer)
---> 63         draw(artist, renderer, *args, **kwargs)
     64         after(artist, renderer)
     65 

/usr/local/lib/python2.7/dist-packages/matplotlib/figure.pyc in draw(self, renderer)
   1141 
   1142             mimage._draw_list_compositing_images(
-> 1143                 renderer, self, dsu, self.suppressComposite)
   1144 
   1145             renderer.close_group('figure')

/usr/local/lib/python2.7/dist-packages/matplotlib/image.pyc in _draw_list_compositing_images(renderer, parent, dsu, suppress_composite)
    137     if not_composite or not has_images:
    138         for zorder, a in dsu:
--> 139             a.draw(renderer)
    140     else:
    141         # Composite any adjacent images together

/usr/local/lib/python2.7/dist-packages/matplotlib/artist.pyc in draw_wrapper(artist, renderer, *args, **kwargs)
     61     def draw_wrapper(artist, renderer, *args, **kwargs):
     62         before(artist, renderer)
---> 63         draw(artist, renderer, *args, **kwargs)
     64         after(artist, renderer)
     65 

/usr/local/lib/python2.7/dist-packages/matplotlib/axes/_base.pyc in draw(self, renderer, inframe)
   2407             renderer.stop_rasterizing()
   2408 
-> 2409         mimage._draw_list_compositing_images(renderer, self, dsu)
   2410 
   2411         renderer.close_group('axes')

/usr/local/lib/python2.7/dist-packages/matplotlib/image.pyc in _draw_list_compositing_images(renderer, parent, dsu, suppress_composite)
    137     if not_composite or not has_images:
    138         for zorder, a in dsu:
--> 139             a.draw(renderer)
    140     else:
    141         # Composite any adjacent images together

/usr/local/lib/python2.7/dist-packages/matplotlib/artist.pyc in draw_wrapper(artist, renderer, *args, **kwargs)
     61     def draw_wrapper(artist, renderer, *args, **kwargs):
     62         before(artist, renderer)
---> 63         draw(artist, renderer, *args, **kwargs)
     64         after(artist, renderer)
     65 

/usr/local/lib/python2.7/dist-packages/matplotlib/axis.pyc in draw(self, renderer, *args, **kwargs)
   1139 
   1140         for tick in ticks_to_draw:
-> 1141             tick.draw(renderer)
   1142 
   1143         # scale up the axis label box to also find the neighbors, not

/usr/local/lib/python2.7/dist-packages/matplotlib/artist.pyc in draw_wrapper(artist, renderer, *args, **kwargs)
     61     def draw_wrapper(artist, renderer, *args, **kwargs):
     62         before(artist, renderer)
---> 63         draw(artist, renderer, *args, **kwargs)
     64         after(artist, renderer)
     65 

/usr/local/lib/python2.7/dist-packages/matplotlib/axis.pyc in draw(self, renderer)
    257             self.gridline.draw(renderer)
    258         if self.tick1On:
--> 259             self.tick1line.draw(renderer)
    260         if self.tick2On:
    261             self.tick2line.draw(renderer)

/usr/local/lib/python2.7/dist-packages/matplotlib/artist.pyc in draw_wrapper(artist, renderer, *args, **kwargs)
     61     def draw_wrapper(artist, renderer, *args, **kwargs):
     62         before(artist, renderer)
---> 63         draw(artist, renderer, *args, **kwargs)
     64         after(artist, renderer)
     65 

/usr/local/lib/python2.7/dist-packages/matplotlib/lines.pyc in draw(self, renderer)
    860                 gc.set_capstyle(marker.get_capstyle())
    861                 marker_path = marker.get_path()
--> 862                 marker_trans = marker.get_transform()
    863                 w = renderer.points_to_pixels(self._markersize)
    864                 if marker.get_marker() != ',':

/usr/local/lib/python2.7/dist-packages/matplotlib/markers.pyc in get_transform(self)
    274 
    275     def get_transform(self):
--> 276         return self._transform.frozen()
    277 
    278     def get_alt_path(self):

/usr/local/lib/python2.7/dist-packages/matplotlib/transforms.pyc in frozen(self)
   1734 
   1735     def frozen(self):
-> 1736         return Affine2D(self.get_matrix().copy())
   1737     frozen.__doc__ = AffineBase.frozen.__doc__
   1738 

/usr/local/lib/python2.7/dist-packages/matplotlib/transforms.pyc in __init__(self, matrix, **kwargs)
   1816         If *matrix* is None, initialize with the identity transform.
   1817         """
-> 1818         Affine2DBase.__init__(self, **kwargs)
   1819         if matrix is None:
   1820             matrix = np.identity(3)

/usr/local/lib/python2.7/dist-packages/matplotlib/transforms.pyc in __init__(self, *args, **kwargs)
   1661 
   1662     def __init__(self, *args, **kwargs):
-> 1663         Transform.__init__(self, *args, **kwargs)
   1664         self._inverted = None
   1665 

/usr/local/lib/python2.7/dist-packages/matplotlib/transforms.pyc in __init__(self, shorthand_name)
     97         # parents are deleted, references from the children won't keep
     98         # them alive.
---> 99         self._parents = WeakValueDictionary()
    100 
    101         # TransformNodes start out as invalid until their values are

/usr/lib/python2.7/weakref.pyc in __init__(*args, **kw)
     45     # way in).
     46 
---> 47     def __init__(*args, **kw):
     48         if not args:
     49             raise TypeError("descriptor '__init__' of 'WeakValueDictionary' "

KeyboardInterrupt: 

In [ ]: