In [1]:
import sys
sys.path.append('..')

from deepgraph.utils.logging import log
from deepgraph.utils.common import batch_parallel, ConfigMixin, shuffle_in_unison_inplace, pickle_dump
from deepgraph.utils.image import batch_pad_mirror
from deepgraph.constants import *
from deepgraph.conf import rng

from deepgraph.pipeline import Processor, Packet


Using gpu device 0: GeForce GTX TITAN X (CNMeM is enabled with initial size: 90.0% of memory, CuDNN 3007)

  _____                _____                 _
 |  _  \              |  __ \               | |
 | | | |___  ___ _ __ | |  \/_ __ __ _ _ __ | |__
 | | | / _ \/ _ \ '_ \| | __| '__/ _` | '_ \| '_ \
 | |/ /  __/  __/ |_) | |_\ \ | | (_| | |_) | | | |
 |___/ \___|\___| .__/ \____/_|  \__,_| .__/|_| |_|
                | |                   | |
                |_|                   |_|


Available on GitHub: https://github.com/sebastian-schlecht/deepgraph


In [2]:
from deepgraph.nn.init import *
class Transformer(Processor):
    """
    Apply online random augmentation.
    """
    def __init__(self, name, shapes, config, buffer_size=10):
        super(Transformer, self).__init__(name, shapes, config, buffer_size)
        self.mean = None

    def init(self):
        if self.conf("mean_file") is not None:
            self.mean = np.load(self.conf("mean_file"))
        else:
            log("Transformer - No mean file specified.", LOG_LEVEL_WARNING)

    def process(self):
        packet = self.pull()
        # Return if no data is there
        if not packet:
            return False
        # Unpack
        data, label = packet.data
        # Do processing
        log("Transformer - Processing data", LOG_LEVEL_VERBOSE)
        
        h = 240
        w = 320
        
        start = time.time()
        # Mean
        if packet.phase == PHASE_TRAIN or packet.phase == PHASE_VAL:
            data = data.astype(np.float32)
            if self.mean is not None:
                std = self.conf("std")
                for idx in range(data.shape[0]):
                    # Subtract mean
                    data[idx] = data[idx] - self.mean.astype(np.float32)
                    if std is not None:
                        data[idx] =  data[idx] * std
            if self.conf("offset") is not None:
                label -= self.conf("offset")

        if packet.phase == PHASE_TRAIN:
             # Do elementwise operations
            data_old = data
            label_old = label
            data = np.zeros((data_old.shape[0], data_old.shape[1], h, w), dtype=np.float32)
            label = np.zeros((label_old.shape[0], h, w), dtype=np.float32)
            for idx in range(data.shape[0]):
                # Rotate
                # We rotate before cropping to be able to get filled corners
                # Maybe even adjust the border after rotating
                deg = np.random.randint(-5,6)
                # Operate on old data. Careful - data is already in float so we need to normalize and rescale afterwards
                # data_old[idx] = 255. * rotate_transformer_rgb_uint8(data_old[idx] * 0.003921568627, deg).astype(np.float32)
                # label_old[idx] = rotate_transformer_scalar_float32(label_old[idx], deg)
                
                # Take care of any empty areas, we crop on a smaller surface depending on the angle
                # TODO Remove this once loss supports masking
                shift = 0 #np.tan((deg/180.) * math.pi)
                # Random crops
                #cy = rng.randint(data_old.shape[2] - h - shift, size=1)
                #cx = rng.randint(data_old.shape[3] - w - shift, size=1)

                data[idx] = data_old[idx]
                label[idx] = label_old[idx]

                # Flip horizontally with probability 0.5
                p = rng.randint(2)
                if p > 0:
                    data[idx] = data[idx, :, :, ::-1]
                    label[idx] = label[idx, :, ::-1]

                # RGB we mult with a random value between 0.8 and 1.2
                r = rng.randint(80,121) / 100.
                g = rng.randint(80,121) / 100.
                b = rng.randint(80,121) / 100.
                data[idx, 0] = data[idx, 0] * r
                data[idx, 1] = data[idx, 1] * g
                data[idx, 2] = data[idx, 2] * b
                
            # Shuffle
            data, label = shuffle_in_unison_inplace(data, label)
            
        elif packet.phase == PHASE_VAL:
            # Center crop
            pass
            #cy = (data.shape[2] - h) // 2
            #cx = (data.shape[3] - w) // 2
            #data = data[:, :, cy:cy+h, cx:cx+w]
            #label = label[:, cy:cy+h, cx:cx+w]
            
        end = time.time()
        log("Transformer - Processing took " + str(end - start) + " seconds.", LOG_LEVEL_VERBOSE)
        # Try to push into queue as long as thread should not terminate
        self.push(Packet(identifier=packet.id, phase=packet.phase, num=2, data=(data, label)))
        return True

    def setup_defaults(self):
        super(Transformer, self).setup_defaults()
        self.conf_default("mean_file", None)
        self.conf_default("offset", None)
        self.conf_default("std", 1.0)

In [ ]:
from theano.tensor.nnet import relu

from deepgraph.graph import *
from deepgraph.nn.core import *
from deepgraph.nn.conv import *
from deepgraph.nn.loss import *
from deepgraph.solver import *
from deepgraph.nn.init import *

from deepgraph.pipeline import Optimizer, H5DBLoader, Pipeline


def build_u_graph():
    graph = Graph("u_depth")

    """
    Inputs
    """
    data = Data(graph, "data", T.ftensor4, shape=(-1, 3, 240, 320))
    label = Data(graph, "label", T.ftensor3, shape=(-1, 1, 240, 320), config={
        "phase": PHASE_TRAIN
    })
    """
    Contractive part
    """
    conv_1 = Conv2D(
        graph,
        "conv_1",
        config={
            "channels": 64,
            "kernel": (3, 3),
            "border_mode": (1, 1),
            "activation": relu,
            "weight_filler": xavier(gain="relu"),
            "bias_filler": constant(0)
        }
    )
    conv_2 = Conv2D(
        graph,
        "conv_2",
        config={
            "channels": 64,
            "kernel": (3, 3),
            "border_mode": (1, 1),
            "activation": relu,
            "weight_filler": xavier(gain="relu"),
            "bias_filler": constant(0)
        }
    )
    pool_2 = Pool(graph, "pool_2", config={
        "kernel": (2, 2)
    })
    conv_3 = Conv2D(
        graph,
        "conv_3",
        config={
            "channels": 128,
            "kernel": (3, 3),
            "border_mode": (1, 1),
            "activation": relu,
            "weight_filler": xavier(gain="relu"),
            "bias_filler": constant(0)
        }
    )
    conv_4 = Conv2D(
        graph,
        "conv_4",
        config={
            "channels": 128,
            "kernel": (3, 3),
            "border_mode": (1, 1),
            "activation": relu,
            "weight_filler": xavier(gain="relu"),
            "bias_filler": constant(0)
        }
    )
    pool_4 = Pool(graph, "pool_4", config={
        "kernel": (2, 2)
    })

    conv_5 = Conv2D(
        graph,
        "conv_5",
        config={
            "channels": 256,
            "kernel": (3, 3),
            "border_mode": (1, 1),
            "activation": relu,
            "weight_filler": xavier(gain="relu"),
            "bias_filler": constant(0)
        }
    )
    conv_6 = Conv2D(
        graph,
        "conv_6",
        config={
            "channels": 256,
            "kernel": (3, 3),
            "border_mode": (1, 1),
            "activation": relu,
            "weight_filler": xavier(gain="relu"),
            "bias_filler": constant(0)
        }
    )

    pool_6 = Pool(graph, "pool_6", config={
        "kernel": (2, 2)
    })

    conv_7 = Conv2D(
        graph,
        "conv_7",
        config={
            "channels": 512,
            "kernel": (3, 3),
            "border_mode": (1, 1),
            "activation": relu,
            "weight_filler": xavier(gain="relu"),
            "bias_filler": constant(0)
        }
    )
    conv_8 = Conv2D(
        graph,
        "conv_8",
        config={
            "channels": 512,
            "kernel": (3, 3),
            "border_mode": (1, 1),
            "activation": relu,
            "weight_filler": xavier(gain="relu"),
            "bias_filler": constant(0)
        }
    )
    pool_8 = Pool(graph, "pool_8", config={
        "kernel": (2, 2)
    })
    """
    conv_9 = Conv2D(
        graph,
        "conv_9",
        config={
            "channels": 1024,
            "kernel": (3, 3),
            "border_mode": (1, 1),
            "activation": relu,
            "weight_filler": xavier(gain="relu"),
            "bias_filler": constant(0)
        }
    )
    conv_10 = Conv2D(
            graph,
            "conv_10",
            config={
                "channels": 1024,
                "kernel": (3, 3),
                "border_mode": (1, 1),
                "activation": relu,
                "weight_filler": xavier(gain="relu"),
                "bias_filler": constant(0)
            }
    )
    """
    fl = Flatten(graph, "fl",config={
            "dims": 2
    })
    fc_8 = Dense(graph, "fc_8", config={
        "out": 4096,
        "activation": relu,
        "weight_filler": xavier(),
        "bias_filler": constant(0.1)
    })
    dp_8 = Dropout(graph, "dp_8")
    fc_9 = Dense(graph, "fc_9", config={
        "out": 19200,
        "activation": relu,
        "weight_filler": xavier(),
        "bias_filler": constant(0.1)
    })
    dp_9 = Dropout(graph, "dp_9")
    rs_10 = Reshape(graph, "rs_10", config={
        "shape": (-1, 64, 15, 20)
    })
    """
    Expansive path
    """
    up_11 = Upsample(graph, "up_11", config={
        "kernel": (2, 2)
    })
    conv_11 = Conv2D(
        graph,
        "conv_11",
        config={
            "channels": 512,
            "kernel": (3, 3),
            "border_mode": 1,
            "weight_filler": xavier(),
            "bias_filler": constant(0)
        }
    )
    conv_12 = Conv2D(
        graph,
        "conv_12",
        config={
            "channels": 512,
            "kernel": (3, 3),
            "border_mode": (1, 1),
            "activation": relu,
            "weight_filler": xavier(gain="relu"),
            "bias_filler": constant(0)
        }
    )
    conv_13 = Conv2D(
        graph,
        "conv_13",
        config={
            "channels": 512,
            "kernel": (3, 3),
            "border_mode": (1, 1),
            "activation": relu,
            "weight_filler": xavier(gain="relu"),
            "bias_filler": constant(0)
        }
    )
    up_14 = Upsample(graph, "up_14", config={
        "kernel": (2, 2)
    })
    conv_14 = Conv2D(
        graph,
        "conv_14",
        config={
            "channels": 256,
            "kernel": (3, 3),
            "border_mode": 1,
            "weight_filler": xavier(),
            "bias_filler": constant(0)
        }
    )
    conv_15 = Conv2D(
        graph,
        "conv_15",
        config={
            "channels": 256,
            "kernel": (3, 3),
            "border_mode": (1, 1),
            "activation": relu,
            "weight_filler": xavier(gain="relu"),
            "bias_filler": constant(0)
        }
    )
    conv_16 = Conv2D(
        graph,
        "conv_16",
        config={
            "channels": 256,
            "kernel": (3, 3),
            "border_mode": (1, 1),
            "activation": relu,
            "weight_filler": xavier(gain="relu"),
            "bias_filler": constant(0)
        }
    )

    up_17 = Upsample(graph, "up_17", config={
        "kernel": (2, 2)
    })
    conv_17 = Conv2D(graph, "conv_17", config={
            "channels": 128,
            "kernel": (3, 3),
            "border_mode": 1,
            "weight_filler": xavier(),
            "bias_filler": constant(0)
    })
    conv_18 = Conv2D(
        graph,
        "conv_18",
        config={
            "channels": 128,
            "kernel": (3, 3),
            "border_mode": (1, 1),
            "activation": relu,
            "weight_filler": xavier(gain="relu"),
            "bias_filler": constant(0)
        }
    )
    conv_19 = Conv2D(
        graph,
        "conv_19",
        config={
            "channels": 128,
            "kernel": (3, 3),
            "border_mode": (1, 1),
            "activation": relu,
            "weight_filler": xavier(gain="relu"),
            "bias_filler": constant(0)
        }
    )
    up_20 = Upsample(graph, "up_20", config={
        "mode": "constant",
        "kernel": (2, 2)
    })
    conv_20 = Conv2D(graph, "conv_20", config={
            "channels": 64,
            "kernel": (3, 3),
            "border_mode": 1,
            "weight_filler": xavier(),
            "bias_filler": constant(0)
    })
    conv_21 = Conv2D(
        graph,
        "conv_21",
        config={
            "channels": 64,
            "kernel": (3, 3),
            "border_mode": (1, 1),
            "activation": relu,
            "weight_filler": xavier(gain="relu"),
            "bias_filler": constant(0)
        }
    )
    conv_22 = Conv2D(
        graph,
        "conv_22",
        config={
            "channels": 64,
            "kernel": (3, 3),
            "border_mode": (1, 1),
            "activation": relu,
            "weight_filler": xavier(gain="relu"),
            "bias_filler": constant(0)
        }
    )
    conv_23 = Conv2D(
        graph,
        "conv_23",
        config={
            "channels": 1,
            "kernel": (1, 1),
            "activation": None,
            "weight_filler": xavier(),
            "bias_filler": constant(0)
        }
    )

    """
    Feed forward nodes
    """
    

    concat_20 = Concatenate(graph, "concat_20", config={
        "axis": 1
    })

    

    concat_17 = Concatenate(graph, "concat_17", config={
        "axis": 1
    })

    
    concat_14 = Concatenate(graph, "concat_14", config={
        "axis": 1
    })

    

    concat_11 = Concatenate(graph, "concat_11", config={
        "axis": 1
    })


    """
    Losses / Error
    """
    loss = EuclideanLoss(graph, "loss")

    error = MSE(graph, "mse", config={
        "root": True,
        "is_output": True,
        "phase": PHASE_TRAIN
    })


    """
    Make connections
    """
    data.connect(conv_1)
    conv_1.connect(conv_2)
    conv_2.connect(concat_20)
    conv_2.connect(pool_2)
    pool_2.connect(conv_3)
    conv_3.connect(conv_4)
    conv_4.connect(concat_17)
    conv_4.connect(pool_4)
    pool_4.connect(conv_5)
    conv_5.connect(conv_6)
    conv_6.connect(concat_14)
    conv_6.connect(pool_6)
    pool_6.connect(conv_7)
    conv_7.connect(conv_8)
    conv_8.connect(concat_11)
    conv_8.connect(pool_8)
    pool_8.connect(fl)
    fl.connect(fc_8)
    fc_8.connect(dp_8)
    dp_8.connect(fc_9)
    fc_9.connect(dp_9)
    dp_9.connect(rs_10)
    rs_10.connect(up_11)
    up_11.connect(conv_11)
    conv_11.connect(concat_11)
    concat_11.connect(conv_12)
    conv_12.connect(conv_13)
    conv_13.connect(up_14)
    up_14.connect(conv_14)
    conv_14.connect(concat_14)
    concat_14.connect(conv_15)
    conv_15.connect(conv_16)
    conv_16.connect(up_17)
    up_17.connect(conv_17)
    conv_17.connect(concat_17)
    concat_17.connect(conv_18)
    conv_18.connect(conv_19)
    conv_19.connect(up_20)
    up_20.connect(conv_20)
    conv_20.connect(concat_20)
    concat_20.connect(conv_21)
    conv_21.connect(conv_22)
    conv_22.connect(conv_23)

    conv_23.connect(loss)
    label.connect(loss)

    conv_23.connect(error)
    label.connect(error)

    return graph


if __name__ == "__main__":

    batch_size = 4
    chunk_size = 10*batch_size
    transfer_shape = ((chunk_size, 3, 240, 320), (chunk_size, 240, 320))

    g = build_u_graph()

    # Build the training pipeline
    db_loader = H5DBLoader("db", ((chunk_size, 3, 480, 640), (chunk_size, 1, 480, 640)), config={
        "db": "/home/ga29mix/nashome/data/nyu_depth_v2_combined_50.hdf5",
        # "db": '../data/nyu_depth_unet_large.hdf5',
        "key_data": "images",
        "key_label": "depths",
        "chunk_size": chunk_size
    })
    transformer = Transformer("tr", transfer_shape, config={
        # Measured empirically for the data-set
        # "offset": 2.7321029
        "mean_file": "/home/ga29mix/nashome/data/nyu_depth_v2_combined_50.npy",
    })
    optimizer = Optimizer("opt", g, transfer_shape, config={
        "batch_size":  batch_size,
        "chunk_size": chunk_size,
        "learning_rate": 0.0001,# for step 1
        # "learning_rate": 0.00001, # for step 2
        "momentum": 0.9,
        "weight_decay": 0.0005,
        "print_freq": 20,
        "save_freq": 15000,
        "weights": "/data/vnet2_pretrained_with_low_lr_batch2_iter_18000.zip",
        "save_prefix": "/data/vnet2_pretrained_with_low_lr_step2"
    })

    p = Pipeline(config={
        "validation_frequency": 20,
        "cycles": 3100
    })
    p.add(db_loader)
    p.add(transformer)
    p.add(optimizer)
    p.run()


[2016-04-16 10:38:28] INFO: H5DBLoader - Caching DB in memory
[2016-04-16 10:38:28] INFO: Pipeline - Starting computation
[2016-04-16 10:40:33] INFO: Graph - Loading parameters from file '/data/vnet2_pretrained_with_low_lr_batch2_iter_18000.zip'
[2016-04-16 10:40:33] INFO: Graph - Setting up graph
[2016-04-16 10:40:33] INFO: Node - data has shape (-1, 3, 240, 320)
[2016-04-16 10:40:33] INFO: Node - label has shape (-1, 1, 240, 320)
[2016-04-16 10:40:33] INFO: Node - conv_1 has shape (-1, 64, 240, 320)
[2016-04-16 10:40:33] INFO: Conv2D - Using DNN CUDA Module
[2016-04-16 10:40:33] INFO: Node - conv_2 has shape (-1, 64, 240, 320)
[2016-04-16 10:40:33] INFO: Conv2D - Using DNN CUDA Module
[2016-04-16 10:40:33] INFO: Node - pool_2 has shape (-1, 64, 120, 160)
[2016-04-16 10:40:33] INFO: Pool - Using DNN CUDA Module
[2016-04-16 10:40:33] INFO: Node - conv_3 has shape (-1, 128, 120, 160)
[2016-04-16 10:40:33] INFO: Conv2D - Using DNN CUDA Module
[2016-04-16 10:40:33] INFO: Node - conv_4 has shape (-1, 128, 120, 160)
[2016-04-16 10:40:33] INFO: Conv2D - Using DNN CUDA Module
[2016-04-16 10:40:33] INFO: Node - pool_4 has shape (-1, 128, 60, 80)
[2016-04-16 10:40:33] INFO: Pool - Using DNN CUDA Module
[2016-04-16 10:40:34] INFO: Node - conv_5 has shape (-1, 256, 60, 80)
[2016-04-16 10:40:34] INFO: Conv2D - Using DNN CUDA Module
[2016-04-16 10:40:34] INFO: Node - conv_6 has shape (-1, 256, 60, 80)
[2016-04-16 10:40:34] INFO: Conv2D - Using DNN CUDA Module
[2016-04-16 10:40:34] INFO: Node - pool_6 has shape (-1, 256, 30, 40)
[2016-04-16 10:40:34] INFO: Pool - Using DNN CUDA Module
[2016-04-16 10:40:34] INFO: Node - conv_7 has shape (-1, 512, 30, 40)
[2016-04-16 10:40:34] INFO: Conv2D - Using DNN CUDA Module
[2016-04-16 10:40:34] INFO: Node - conv_8 has shape (-1, 512, 30, 40)
[2016-04-16 10:40:34] INFO: Conv2D - Using DNN CUDA Module
[2016-04-16 10:40:34] INFO: Node - pool_8 has shape (-1, 512, 15, 20)
[2016-04-16 10:40:34] INFO: Pool - Using DNN CUDA Module
[2016-04-16 10:40:34] INFO: Node - fl has shape (-1, 153600)
[2016-04-16 10:40:34] INFO: Node - fc_8 has shape (-1, 4096)
[2016-04-16 10:40:34] INFO: Node - dp_8 has shape (-1, 4096)
[2016-04-16 10:40:34] INFO: Node - fc_9 has shape (-1, 19200)
[2016-04-16 10:40:34] INFO: Node - dp_9 has shape (-1, 19200)
[2016-04-16 10:40:34] INFO: Node - rs_10 has shape (-1, 64, 15, 20)
[2016-04-16 10:40:34] INFO: Node - up_11 has shape (-1, 64, 30, 40)
[2016-04-16 10:40:34] INFO: Node - conv_11 has shape (-1, 512, 30, 40)
[2016-04-16 10:40:34] INFO: Conv2D - Using DNN CUDA Module
[2016-04-16 10:40:34] INFO: Node - concat_11 has shape (-1, 1024, 30, 40)
[2016-04-16 10:40:34] INFO: Node - conv_12 has shape (-1, 512, 30, 40)
[2016-04-16 10:40:34] INFO: Conv2D - Using DNN CUDA Module
[2016-04-16 10:40:34] INFO: Node - conv_13 has shape (-1, 512, 30, 40)
[2016-04-16 10:40:34] INFO: Conv2D - Using DNN CUDA Module
[2016-04-16 10:40:34] INFO: Node - up_14 has shape (-1, 512, 60, 80)
[2016-04-16 10:40:34] INFO: Node - conv_14 has shape (-1, 256, 60, 80)
[2016-04-16 10:40:34] INFO: Conv2D - Using DNN CUDA Module
[2016-04-16 10:40:34] INFO: Node - concat_14 has shape (-1, 512, 60, 80)
[2016-04-16 10:40:34] INFO: Node - conv_15 has shape (-1, 256, 60, 80)
[2016-04-16 10:40:34] INFO: Conv2D - Using DNN CUDA Module
[2016-04-16 10:40:34] INFO: Node - conv_16 has shape (-1, 256, 60, 80)
[2016-04-16 10:40:34] INFO: Conv2D - Using DNN CUDA Module
[2016-04-16 10:40:34] INFO: Node - up_17 has shape (-1, 256, 120, 160)
[2016-04-16 10:40:34] INFO: Node - conv_17 has shape (-1, 128, 120, 160)
[2016-04-16 10:40:34] INFO: Conv2D - Using DNN CUDA Module
[2016-04-16 10:40:34] INFO: Node - concat_17 has shape (-1, 256, 120, 160)
[2016-04-16 10:40:34] INFO: Node - conv_18 has shape (-1, 128, 120, 160)
[2016-04-16 10:40:34] INFO: Conv2D - Using DNN CUDA Module
[2016-04-16 10:40:34] INFO: Node - conv_19 has shape (-1, 128, 120, 160)
[2016-04-16 10:40:34] INFO: Conv2D - Using DNN CUDA Module
[2016-04-16 10:40:34] INFO: Node - up_20 has shape (-1, 128, 240, 320)
[2016-04-16 10:40:34] INFO: Node - conv_20 has shape (-1, 64, 240, 320)
[2016-04-16 10:40:34] INFO: Conv2D - Using DNN CUDA Module
[2016-04-16 10:40:34] INFO: Node - concat_20 has shape (-1, 128, 240, 320)
[2016-04-16 10:40:34] INFO: Node - conv_21 has shape (-1, 64, 240, 320)
[2016-04-16 10:40:34] INFO: Conv2D - Using DNN CUDA Module
[2016-04-16 10:40:34] INFO: Node - conv_22 has shape (-1, 64, 240, 320)
[2016-04-16 10:40:34] INFO: Conv2D - Using DNN CUDA Module
[2016-04-16 10:40:34] INFO: Node - conv_23 has shape (-1, 1, 240, 320)
[2016-04-16 10:40:34] INFO: Conv2D - Using DNN CUDA Module
[2016-04-16 10:40:34] INFO: Node - loss has shape (1,)
[2016-04-16 10:40:34] INFO: Node - mse has shape (1,)
[2016-04-16 10:41:03] INFO: Graph - Invoking Theano compiler
[2016-04-16 10:41:41] INFO: Optimizer - Compilation finished
[2016-04-16 10:42:01] INFO: Optimizer - Training score at iteration 20: {'loss': array(1.03914475440979, dtype=float32), 'mse': array(1.019384503364563, dtype=float32)}
[2016-04-16 10:42:21] INFO: Optimizer - Training score at iteration 40: {'loss': array(1.092673420906067, dtype=float32), 'mse': array(1.0453102588653564, dtype=float32)}
[2016-04-16 10:42:42] INFO: Optimizer - Training score at iteration 60: {'loss': array(0.5158506631851196, dtype=float32), 'mse': array(0.7182274460792542, dtype=float32)}
[2016-04-16 10:43:03] INFO: Optimizer - Training score at iteration 80: {'loss': array(1.06766676902771, dtype=float32), 'mse': array(1.033279538154602, dtype=float32)}
[2016-04-16 10:43:25] INFO: Optimizer - Training score at iteration 100: {'loss': array(1.4331656694412231, dtype=float32), 'mse': array(1.1971489191055298, dtype=float32)}
[2016-04-16 10:43:46] INFO: Optimizer - Training score at iteration 120: {'loss': array(0.6923319697380066, dtype=float32), 'mse': array(0.8320648670196533, dtype=float32)}
[2016-04-16 10:44:07] INFO: Optimizer - Training score at iteration 140: {'loss': array(0.3766331374645233, dtype=float32), 'mse': array(0.6137044429779053, dtype=float32)}
[2016-04-16 10:44:28] INFO: Optimizer - Training score at iteration 160: {'loss': array(0.5955070853233337, dtype=float32), 'mse': array(0.7716910243034363, dtype=float32)}
[2016-04-16 10:44:49] INFO: Optimizer - Training score at iteration 180: {'loss': array(0.5297645926475525, dtype=float32), 'mse': array(0.7278493046760559, dtype=float32)}
[2016-04-16 10:45:10] INFO: Optimizer - Training score at iteration 200: {'loss': array(0.6483050584793091, dtype=float32), 'mse': array(0.805173933506012, dtype=float32)}
[2016-04-16 10:45:32] INFO: Optimizer - Mean loss values for validation at iteration 200 is: {'loss': 0.8761586, 'mse': 0.90960628}
[2016-04-16 10:45:52] INFO: Optimizer - Training score at iteration 220: {'loss': array(0.5500811338424683, dtype=float32), 'mse': array(0.741674542427063, dtype=float32)}
[2016-04-16 10:46:13] INFO: Optimizer - Training score at iteration 240: {'loss': array(0.837603747844696, dtype=float32), 'mse': array(0.9152069091796875, dtype=float32)}
[2016-04-16 10:46:34] INFO: Optimizer - Training score at iteration 260: {'loss': array(1.0848230123519897, dtype=float32), 'mse': array(1.0415483713150024, dtype=float32)}
[2016-04-16 10:46:54] INFO: Optimizer - Training score at iteration 280: {'loss': array(0.42648181319236755, dtype=float32), 'mse': array(0.653055727481842, dtype=float32)}
[2016-04-16 10:47:15] INFO: Optimizer - Training score at iteration 300: {'loss': array(0.6030213832855225, dtype=float32), 'mse': array(0.7765445113182068, dtype=float32)}
[2016-04-16 10:47:36] INFO: Optimizer - Training score at iteration 320: {'loss': array(0.5377848148345947, dtype=float32), 'mse': array(0.7333381175994873, dtype=float32)}
[2016-04-16 10:47:56] INFO: Optimizer - Training score at iteration 340: {'loss': array(0.9547203183174133, dtype=float32), 'mse': array(0.9770978689193726, dtype=float32)}
[2016-04-16 10:48:17] INFO: Optimizer - Training score at iteration 360: {'loss': array(0.6608562469482422, dtype=float32), 'mse': array(0.8129306435585022, dtype=float32)}
[2016-04-16 10:48:38] INFO: Optimizer - Training score at iteration 380: {'loss': array(0.7703433036804199, dtype=float32), 'mse': array(0.8776920437812805, dtype=float32)}
[2016-04-16 10:48:58] INFO: Optimizer - Training score at iteration 400: {'loss': array(0.7967088222503662, dtype=float32), 'mse': array(0.8925854563713074, dtype=float32)}
[2016-04-16 10:49:20] INFO: Optimizer - Mean loss values for validation at iteration 400 is: {'loss': 0.84534043, 'mse': 0.89433795}
[2016-04-16 10:49:40] INFO: Optimizer - Training score at iteration 420: {'loss': array(0.7389243841171265, dtype=float32), 'mse': array(0.8596071004867554, dtype=float32)}
[2016-04-16 10:50:00] INFO: Optimizer - Training score at iteration 440: {'loss': array(0.791595458984375, dtype=float32), 'mse': array(0.8897165060043335, dtype=float32)}
[2016-04-16 10:50:21] INFO: Optimizer - Training score at iteration 460: {'loss': array(0.4212435185909271, dtype=float32), 'mse': array(0.6490327715873718, dtype=float32)}
[2016-04-16 10:50:42] INFO: Optimizer - Training score at iteration 480: {'loss': array(0.8897281885147095, dtype=float32), 'mse': array(0.9432540535926819, dtype=float32)}
[2016-04-16 10:51:02] INFO: Optimizer - Training score at iteration 500: {'loss': array(0.6607567071914673, dtype=float32), 'mse': array(0.8128694295883179, dtype=float32)}
[2016-04-16 10:51:23] INFO: Optimizer - Training score at iteration 520: {'loss': array(0.7615388631820679, dtype=float32), 'mse': array(0.8726619482040405, dtype=float32)}
[2016-04-16 10:51:43] INFO: Optimizer - Training score at iteration 540: {'loss': array(0.2140781134366989, dtype=float32), 'mse': array(0.462685763835907, dtype=float32)}
[2016-04-16 10:52:04] INFO: Optimizer - Training score at iteration 560: {'loss': array(0.6067139506340027, dtype=float32), 'mse': array(0.778918445110321, dtype=float32)}
[2016-04-16 10:52:24] INFO: Optimizer - Training score at iteration 580: {'loss': array(0.8675040602684021, dtype=float32), 'mse': array(0.9313989877700806, dtype=float32)}
[2016-04-16 10:52:45] INFO: Optimizer - Training score at iteration 600: {'loss': array(0.5419451594352722, dtype=float32), 'mse': array(0.7361692190170288, dtype=float32)}
[2016-04-16 10:53:07] INFO: Optimizer - Mean loss values for validation at iteration 600 is: {'loss': 0.86547768, 'mse': 0.90665233}
[2016-04-16 10:53:27] INFO: Optimizer - Training score at iteration 620: {'loss': array(1.1687124967575073, dtype=float32), 'mse': array(1.081070065498352, dtype=float32)}
[2016-04-16 10:53:48] INFO: Optimizer - Training score at iteration 640: {'loss': array(0.6695969700813293, dtype=float32), 'mse': array(0.818289041519165, dtype=float32)}
[2016-04-16 10:54:08] INFO: Optimizer - Training score at iteration 660: {'loss': array(0.5775451064109802, dtype=float32), 'mse': array(0.759963870048523, dtype=float32)}
[2016-04-16 10:54:29] INFO: Optimizer - Training score at iteration 680: {'loss': array(0.7739944458007812, dtype=float32), 'mse': array(0.879769504070282, dtype=float32)}
[2016-04-16 10:54:49] INFO: Optimizer - Training score at iteration 700: {'loss': array(1.3531413078308105, dtype=float32), 'mse': array(1.1632460355758667, dtype=float32)}
[2016-04-16 10:55:10] INFO: Optimizer - Training score at iteration 720: {'loss': array(1.1869434118270874, dtype=float32), 'mse': array(1.089469313621521, dtype=float32)}
[2016-04-16 10:55:31] INFO: Optimizer - Training score at iteration 740: {'loss': array(0.9367290735244751, dtype=float32), 'mse': array(0.9678476452827454, dtype=float32)}
[2016-04-16 10:55:51] INFO: Optimizer - Training score at iteration 760: {'loss': array(0.9825435876846313, dtype=float32), 'mse': array(0.9912333488464355, dtype=float32)}
[2016-04-16 10:56:12] INFO: Optimizer - Training score at iteration 780: {'loss': array(0.47716832160949707, dtype=float32), 'mse': array(0.6907737255096436, dtype=float32)}
[2016-04-16 10:56:33] INFO: Optimizer - Training score at iteration 800: {'loss': array(0.5131669640541077, dtype=float32), 'mse': array(0.7163567543029785, dtype=float32)}
[2016-04-16 10:56:54] INFO: Optimizer - Mean loss values for validation at iteration 800 is: {'loss': 0.86217195, 'mse': 0.90388191}
[2016-04-16 10:57:14] INFO: Optimizer - Training score at iteration 820: {'loss': array(0.7633434534072876, dtype=float32), 'mse': array(0.8736952543258667, dtype=float32)}
[2016-04-16 10:57:35] INFO: Optimizer - Training score at iteration 840: {'loss': array(0.3636421859264374, dtype=float32), 'mse': array(0.6030275225639343, dtype=float32)}
[2016-04-16 10:57:55] INFO: Optimizer - Training score at iteration 860: {'loss': array(0.4544912874698639, dtype=float32), 'mse': array(0.6741596460342407, dtype=float32)}
[2016-04-16 10:58:16] INFO: Optimizer - Training score at iteration 880: {'loss': array(0.8699121475219727, dtype=float32), 'mse': array(0.9326907992362976, dtype=float32)}
[2016-04-16 10:58:37] INFO: Optimizer - Training score at iteration 900: {'loss': array(0.5640856623649597, dtype=float32), 'mse': array(0.7510563731193542, dtype=float32)}
[2016-04-16 10:58:57] INFO: Optimizer - Training score at iteration 920: {'loss': array(0.8300798535346985, dtype=float32), 'mse': array(0.911087155342102, dtype=float32)}
[2016-04-16 10:59:18] INFO: Optimizer - Training score at iteration 940: {'loss': array(0.3357769548892975, dtype=float32), 'mse': array(0.5794626474380493, dtype=float32)}
[2016-04-16 10:59:38] INFO: Optimizer - Training score at iteration 960: {'loss': array(0.5607156753540039, dtype=float32), 'mse': array(0.7488095164299011, dtype=float32)}
[2016-04-16 10:59:59] INFO: Optimizer - Training score at iteration 980: {'loss': array(0.9165178537368774, dtype=float32), 'mse': array(0.9573493599891663, dtype=float32)}
[2016-04-16 11:00:40] INFO: Optimizer - Mean loss values for validation at iteration 999 is: {'loss': 0.89347649, 'mse': 0.91543537}
[2016-04-16 11:00:41] INFO: Optimizer - Training score at iteration 1000: {'loss': array(0.8947082757949829, dtype=float32), 'mse': array(0.9458901882171631, dtype=float32)}
[2016-04-16 11:01:01] INFO: Optimizer - Training score at iteration 1020: {'loss': array(0.4414142072200775, dtype=float32), 'mse': array(0.6643900871276855, dtype=float32)}
[2016-04-16 11:01:22] INFO: Optimizer - Training score at iteration 1040: {'loss': array(0.5006342530250549, dtype=float32), 'mse': array(0.7075551152229309, dtype=float32)}
[2016-04-16 11:01:42] INFO: Optimizer - Training score at iteration 1060: {'loss': array(1.4043015241622925, dtype=float32), 'mse': array(1.1850322484970093, dtype=float32)}
[2016-04-16 11:02:03] INFO: Optimizer - Training score at iteration 1080: {'loss': array(0.7062894105911255, dtype=float32), 'mse': array(0.8404102325439453, dtype=float32)}
[2016-04-16 11:02:23] INFO: Optimizer - Training score at iteration 1100: {'loss': array(1.1474363803863525, dtype=float32), 'mse': array(1.071184515953064, dtype=float32)}
[2016-04-16 11:02:44] INFO: Optimizer - Training score at iteration 1120: {'loss': array(0.7201094031333923, dtype=float32), 'mse': array(0.8485925793647766, dtype=float32)}
[2016-04-16 11:03:05] INFO: Optimizer - Training score at iteration 1140: {'loss': array(0.6592743396759033, dtype=float32), 'mse': array(0.811957061290741, dtype=float32)}
[2016-04-16 11:03:25] INFO: Optimizer - Training score at iteration 1160: {'loss': array(0.8886898159980774, dtype=float32), 'mse': array(0.9427034854888916, dtype=float32)}
[2016-04-16 11:03:46] INFO: Optimizer - Training score at iteration 1180: {'loss': array(1.4022555351257324, dtype=float32), 'mse': array(1.1841686964035034, dtype=float32)}
[2016-04-16 11:04:27] INFO: Optimizer - Mean loss values for validation at iteration 1199 is: {'loss': 0.92432934, 'mse': 0.93053615}
[2016-04-16 11:04:28] INFO: Optimizer - Training score at iteration 1200: {'loss': array(0.8377403616905212, dtype=float32), 'mse': array(0.9152815341949463, dtype=float32)}
[2016-04-16 11:04:48] INFO: Optimizer - Training score at iteration 1220: {'loss': array(1.18159818649292, dtype=float32), 'mse': array(1.0870133638381958, dtype=float32)}
[2016-04-16 11:05:09] INFO: Optimizer - Training score at iteration 1240: {'loss': array(0.6860515475273132, dtype=float32), 'mse': array(0.8282822966575623, dtype=float32)}
[2016-04-16 11:05:30] INFO: Optimizer - Training score at iteration 1260: {'loss': array(1.3753563165664673, dtype=float32), 'mse': array(1.1727558374404907, dtype=float32)}
[2016-04-16 11:05:50] INFO: Optimizer - Training score at iteration 1280: {'loss': array(0.6669328212738037, dtype=float32), 'mse': array(0.8166595101356506, dtype=float32)}
[2016-04-16 11:06:11] INFO: Optimizer - Training score at iteration 1300: {'loss': array(0.7899040579795837, dtype=float32), 'mse': array(0.8887654542922974, dtype=float32)}
[2016-04-16 11:06:31] INFO: Optimizer - Training score at iteration 1320: {'loss': array(0.360726922750473, dtype=float32), 'mse': array(0.6006054282188416, dtype=float32)}
[2016-04-16 11:06:52] INFO: Optimizer - Training score at iteration 1340: {'loss': array(0.5138194561004639, dtype=float32), 'mse': array(0.716812014579773, dtype=float32)}
[2016-04-16 11:07:13] INFO: Optimizer - Training score at iteration 1360: {'loss': array(0.9502920508384705, dtype=float32), 'mse': array(0.9748292565345764, dtype=float32)}
[2016-04-16 11:07:33] INFO: Optimizer - Training score at iteration 1380: {'loss': array(0.28788191080093384, dtype=float32), 'mse': array(0.5365462899208069, dtype=float32)}
[2016-04-16 11:08:15] INFO: Optimizer - Mean loss values for validation at iteration 1399 is: {'loss': 0.87484843, 'mse': 0.9061048}
[2016-04-16 11:08:15] INFO: Optimizer - Training score at iteration 1400: {'loss': array(0.39781758189201355, dtype=float32), 'mse': array(0.6307278275489807, dtype=float32)}
[2016-04-16 11:08:36] INFO: Optimizer - Training score at iteration 1420: {'loss': array(0.7992634177207947, dtype=float32), 'mse': array(0.8940153121948242, dtype=float32)}
[2016-04-16 11:08:56] INFO: Optimizer - Training score at iteration 1440: {'loss': array(0.3676409423351288, dtype=float32), 'mse': array(0.6063340306282043, dtype=float32)}
[2016-04-16 11:09:17] INFO: Optimizer - Training score at iteration 1460: {'loss': array(0.27579107880592346, dtype=float32), 'mse': array(0.525158166885376, dtype=float32)}
[2016-04-16 11:09:38] INFO: Optimizer - Training score at iteration 1480: {'loss': array(0.8618680834770203, dtype=float32), 'mse': array(0.9283685088157654, dtype=float32)}
[2016-04-16 11:09:58] INFO: Optimizer - Training score at iteration 1500: {'loss': array(0.4817989468574524, dtype=float32), 'mse': array(0.6941173672676086, dtype=float32)}
[2016-04-16 11:10:19] INFO: Optimizer - Training score at iteration 1520: {'loss': array(0.6992504596710205, dtype=float32), 'mse': array(0.8362119793891907, dtype=float32)}
[2016-04-16 11:10:40] INFO: Optimizer - Training score at iteration 1540: {'loss': array(1.094907283782959, dtype=float32), 'mse': array(1.0463781356811523, dtype=float32)}
[2016-04-16 11:11:00] INFO: Optimizer - Training score at iteration 1560: {'loss': array(0.7314178347587585, dtype=float32), 'mse': array(0.8552296757698059, dtype=float32)}
[2016-04-16 11:11:21] INFO: Optimizer - Training score at iteration 1580: {'loss': array(0.796359121799469, dtype=float32), 'mse': array(0.8923895359039307, dtype=float32)}
[2016-04-16 11:12:02] INFO: Optimizer - Mean loss values for validation at iteration 1599 is: {'loss': 0.86458039, 'mse': 0.90536898}
[2016-04-16 11:12:03] INFO: Optimizer - Training score at iteration 1600: {'loss': array(0.5854623317718506, dtype=float32), 'mse': array(0.7651550769805908, dtype=float32)}
[2016-04-16 11:12:23] INFO: Optimizer - Training score at iteration 1620: {'loss': array(1.2497929334640503, dtype=float32), 'mse': array(1.1179413795471191, dtype=float32)}
[2016-04-16 11:12:44] INFO: Optimizer - Training score at iteration 1640: {'loss': array(0.5110294222831726, dtype=float32), 'mse': array(0.7148631811141968, dtype=float32)}
[2016-04-16 11:13:05] INFO: Optimizer - Training score at iteration 1660: {'loss': array(0.3234402537345886, dtype=float32), 'mse': array(0.5687180757522583, dtype=float32)}
[2016-04-16 11:13:25] INFO: Optimizer - Training score at iteration 1680: {'loss': array(0.7834725379943848, dtype=float32), 'mse': array(0.8851398229598999, dtype=float32)}
[2016-04-16 11:13:46] INFO: Optimizer - Training score at iteration 1700: {'loss': array(0.863593578338623, dtype=float32), 'mse': array(0.9292973279953003, dtype=float32)}
[2016-04-16 11:14:06] INFO: Optimizer - Training score at iteration 1720: {'loss': array(0.5272631645202637, dtype=float32), 'mse': array(0.726128876209259, dtype=float32)}
[2016-04-16 11:14:27] INFO: Optimizer - Training score at iteration 1740: {'loss': array(0.494700163602829, dtype=float32), 'mse': array(0.703349232673645, dtype=float32)}
[2016-04-16 11:14:48] INFO: Optimizer - Training score at iteration 1760: {'loss': array(1.6024001836776733, dtype=float32), 'mse': array(1.2658594846725464, dtype=float32)}
[2016-04-16 11:15:08] INFO: Optimizer - Training score at iteration 1780: {'loss': array(0.5284398794174194, dtype=float32), 'mse': array(0.7269387245178223, dtype=float32)}
[2016-04-16 11:15:48] INFO: Optimizer - Mean loss values for validation at iteration 1798 is: {'loss': 0.85163772, 'mse': 0.89549851}
[2016-04-16 11:15:50] INFO: Optimizer - Training score at iteration 1800: {'loss': array(0.5437886118888855, dtype=float32), 'mse': array(0.7374202609062195, dtype=float32)}
[2016-04-16 11:16:11] INFO: Optimizer - Training score at iteration 1820: {'loss': array(0.4145874083042145, dtype=float32), 'mse': array(0.6438845992088318, dtype=float32)}
[2016-04-16 11:16:31] INFO: Optimizer - Training score at iteration 1840: {'loss': array(0.5861783623695374, dtype=float32), 'mse': array(0.7656228542327881, dtype=float32)}
[2016-04-16 11:16:52] INFO: Optimizer - Training score at iteration 1860: {'loss': array(0.5745732188224792, dtype=float32), 'mse': array(0.7580060958862305, dtype=float32)}
[2016-04-16 11:17:13] INFO: Optimizer - Training score at iteration 1880: {'loss': array(1.1425668001174927, dtype=float32), 'mse': array(1.0689091682434082, dtype=float32)}
[2016-04-16 11:17:33] INFO: Optimizer - Training score at iteration 1900: {'loss': array(0.9136889576911926, dtype=float32), 'mse': array(0.9558708071708679, dtype=float32)}
[2016-04-16 11:17:54] INFO: Optimizer - Training score at iteration 1920: {'loss': array(0.4060489237308502, dtype=float32), 'mse': array(0.6372197270393372, dtype=float32)}
[2016-04-16 11:18:15] INFO: Optimizer - Training score at iteration 1940: {'loss': array(0.6022108197212219, dtype=float32), 'mse': array(0.7760224342346191, dtype=float32)}
[2016-04-16 11:18:35] INFO: Optimizer - Training score at iteration 1960: {'loss': array(0.703931450843811, dtype=float32), 'mse': array(0.8390061855316162, dtype=float32)}
[2016-04-16 11:18:56] INFO: Optimizer - Training score at iteration 1980: {'loss': array(0.3420170247554779, dtype=float32), 'mse': array(0.5848221778869629, dtype=float32)}
[2016-04-16 11:19:36] INFO: Optimizer - Mean loss values for validation at iteration 1998 is: {'loss': 0.84396541, 'mse': 0.89251399}
[2016-04-16 11:19:38] INFO: Optimizer - Training score at iteration 2000: {'loss': array(0.48847007751464844, dtype=float32), 'mse': array(0.6989063024520874, dtype=float32)}
[2016-04-16 11:19:58] INFO: Optimizer - Training score at iteration 2020: {'loss': array(0.7234501242637634, dtype=float32), 'mse': array(0.8505586981773376, dtype=float32)}
[2016-04-16 11:20:19] INFO: Optimizer - Training score at iteration 2040: {'loss': array(0.6148700714111328, dtype=float32), 'mse': array(0.7841365337371826, dtype=float32)}
[2016-04-16 11:20:40] INFO: Optimizer - Training score at iteration 2060: {'loss': array(0.8854165077209473, dtype=float32), 'mse': array(0.9409657120704651, dtype=float32)}
[2016-04-16 11:21:00] INFO: Optimizer - Training score at iteration 2080: {'loss': array(0.8777594566345215, dtype=float32), 'mse': array(0.9368881583213806, dtype=float32)}
[2016-04-16 11:21:21] INFO: Optimizer - Training score at iteration 2100: {'loss': array(0.4003113806247711, dtype=float32), 'mse': array(0.6327016353607178, dtype=float32)}
[2016-04-16 11:21:41] INFO: Optimizer - Training score at iteration 2120: {'loss': array(0.7541088461875916, dtype=float32), 'mse': array(0.8683943748474121, dtype=float32)}
[2016-04-16 11:22:02] INFO: Optimizer - Training score at iteration 2140: {'loss': array(1.0679835081100464, dtype=float32), 'mse': array(1.0334328413009644, dtype=float32)}
[2016-04-16 11:22:23] INFO: Optimizer - Training score at iteration 2160: {'loss': array(1.1560243368148804, dtype=float32), 'mse': array(1.0751856565475464, dtype=float32)}
[2016-04-16 11:22:43] INFO: Optimizer - Training score at iteration 2180: {'loss': array(0.6402382850646973, dtype=float32), 'mse': array(0.8001489043235779, dtype=float32)}
[2016-04-16 11:23:23] INFO: Optimizer - Mean loss values for validation at iteration 2198 is: {'loss': 0.83599919, 'mse': 0.88616639}
[2016-04-16 11:23:25] INFO: Optimizer - Training score at iteration 2200: {'loss': array(1.0460705757141113, dtype=float32), 'mse': array(1.0227758884429932, dtype=float32)}
[2016-04-16 11:23:46] INFO: Optimizer - Training score at iteration 2220: {'loss': array(0.7355496287345886, dtype=float32), 'mse': array(0.857641875743866, dtype=float32)}
[2016-04-16 11:24:06] INFO: Optimizer - Training score at iteration 2240: {'loss': array(0.8113476634025574, dtype=float32), 'mse': array(0.9007483720779419, dtype=float32)}
[2016-04-16 11:24:27] INFO: Optimizer - Training score at iteration 2260: {'loss': array(0.8190271258354187, dtype=float32), 'mse': array(0.905001163482666, dtype=float32)}
[2016-04-16 11:24:48] INFO: Optimizer - Training score at iteration 2280: {'loss': array(0.47595807909965515, dtype=float32), 'mse': array(0.6898971796035767, dtype=float32)}
[2016-04-16 11:25:09] INFO: Optimizer - Training score at iteration 2300: {'loss': array(0.7388038039207458, dtype=float32), 'mse': array(0.8595369458198547, dtype=float32)}
[2016-04-16 11:25:30] INFO: Optimizer - Training score at iteration 2320: {'loss': array(0.2569638192653656, dtype=float32), 'mse': array(0.5069159865379333, dtype=float32)}
[2016-04-16 11:25:50] INFO: Optimizer - Training score at iteration 2340: {'loss': array(0.5822004675865173, dtype=float32), 'mse': array(0.7630206346511841, dtype=float32)}
[2016-04-16 11:26:11] INFO: Optimizer - Training score at iteration 2360: {'loss': array(1.0845355987548828, dtype=float32), 'mse': array(1.0414103269577026, dtype=float32)}
[2016-04-16 11:26:32] INFO: Optimizer - Training score at iteration 2380: {'loss': array(0.8248164057731628, dtype=float32), 'mse': array(0.9081940054893494, dtype=float32)}
[2016-04-16 11:27:13] INFO: Optimizer - Mean loss values for validation at iteration 2398 is: {'loss': 0.90201169, 'mse': 0.92243344}
[2016-04-16 11:27:15] INFO: Optimizer - Training score at iteration 2400: {'loss': array(1.2436774969100952, dtype=float32), 'mse': array(1.1152029037475586, dtype=float32)}
[2016-04-16 11:27:35] INFO: Optimizer - Training score at iteration 2420: {'loss': array(0.28895020484924316, dtype=float32), 'mse': array(0.537540853023529, dtype=float32)}
[2016-04-16 11:27:56] INFO: Optimizer - Training score at iteration 2440: {'loss': array(1.0821762084960938, dtype=float32), 'mse': array(1.0402770042419434, dtype=float32)}
[2016-04-16 11:28:17] INFO: Optimizer - Training score at iteration 2460: {'loss': array(0.6800169944763184, dtype=float32), 'mse': array(0.82463139295578, dtype=float32)}
[2016-04-16 11:28:38] INFO: Optimizer - Training score at iteration 2480: {'loss': array(1.0488876104354858, dtype=float32), 'mse': array(1.024152159690857, dtype=float32)}
[2016-04-16 11:28:59] INFO: Optimizer - Training score at iteration 2500: {'loss': array(0.49833616614341736, dtype=float32), 'mse': array(0.7059292793273926, dtype=float32)}
[2016-04-16 11:29:20] INFO: Optimizer - Training score at iteration 2520: {'loss': array(0.5470248460769653, dtype=float32), 'mse': array(0.7396112680435181, dtype=float32)}
[2016-04-16 11:29:41] INFO: Optimizer - Training score at iteration 2540: {'loss': array(0.9587820172309875, dtype=float32), 'mse': array(0.9791741371154785, dtype=float32)}
[2016-04-16 11:30:02] INFO: Optimizer - Training score at iteration 2560: {'loss': array(0.7851862907409668, dtype=float32), 'mse': array(0.8861073851585388, dtype=float32)}
[2016-04-16 11:30:24] INFO: Optimizer - Training score at iteration 2580: {'loss': array(0.7905115485191345, dtype=float32), 'mse': array(0.8891071677207947, dtype=float32)}
[2016-04-16 11:31:04] INFO: Optimizer - Mean loss values for validation at iteration 2597 is: {'loss': 0.9676441, 'mse': 0.94972622}
[2016-04-16 11:31:06] INFO: Optimizer - Training score at iteration 2600: {'loss': array(0.2529248893260956, dtype=float32), 'mse': array(0.5029163956642151, dtype=float32)}
[2016-04-16 11:31:28] INFO: Optimizer - Training score at iteration 2620: {'loss': array(0.45044299960136414, dtype=float32), 'mse': array(0.6711505055427551, dtype=float32)}
[2016-04-16 11:31:49] INFO: Optimizer - Training score at iteration 2640: {'loss': array(0.21419735252857208, dtype=float32), 'mse': array(0.462814599275589, dtype=float32)}
[2016-04-16 11:32:10] INFO: Optimizer - Training score at iteration 2660: {'loss': array(0.6125648617744446, dtype=float32), 'mse': array(0.7826651930809021, dtype=float32)}
[2016-04-16 11:32:31] INFO: Optimizer - Training score at iteration 2680: {'loss': array(0.6120321154594421, dtype=float32), 'mse': array(0.7823247909545898, dtype=float32)}
[2016-04-16 11:32:52] INFO: Optimizer - Training score at iteration 2700: {'loss': array(0.8564199805259705, dtype=float32), 'mse': array(0.92542964220047, dtype=float32)}
[2016-04-16 11:33:13] INFO: Optimizer - Training score at iteration 2720: {'loss': array(1.1057337522506714, dtype=float32), 'mse': array(1.0515388250350952, dtype=float32)}
[2016-04-16 11:33:34] INFO: Optimizer - Training score at iteration 2740: {'loss': array(1.3449207544326782, dtype=float32), 'mse': array(1.1597071886062622, dtype=float32)}
[2016-04-16 11:33:55] INFO: Optimizer - Training score at iteration 2760: {'loss': array(0.8225119709968567, dtype=float32), 'mse': array(0.9069244265556335, dtype=float32)}
[2016-04-16 11:34:16] INFO: Optimizer - Training score at iteration 2780: {'loss': array(0.440181702375412, dtype=float32), 'mse': array(0.6634619235992432, dtype=float32)}
[2016-04-16 11:34:56] INFO: Optimizer - Mean loss values for validation at iteration 2797 is: {'loss': 0.99415463, 'mse': 0.96423668}
[2016-04-16 11:34:59] INFO: Optimizer - Training score at iteration 2800: {'loss': array(1.0464515686035156, dtype=float32), 'mse': array(1.022962212562561, dtype=float32)}
[2016-04-16 11:35:20] INFO: Optimizer - Training score at iteration 2820: {'loss': array(0.9677244424819946, dtype=float32), 'mse': array(0.9837298393249512, dtype=float32)}
[2016-04-16 11:35:41] INFO: Optimizer - Training score at iteration 2840: {'loss': array(0.8525738716125488, dtype=float32), 'mse': array(0.9233492612838745, dtype=float32)}
[2016-04-16 11:36:02] INFO: Optimizer - Training score at iteration 2860: {'loss': array(0.6915889978408813, dtype=float32), 'mse': array(0.8316183090209961, dtype=float32)}
[2016-04-16 11:36:23] INFO: Optimizer - Training score at iteration 2880: {'loss': array(1.1980005502700806, dtype=float32), 'mse': array(1.0945321321487427, dtype=float32)}
[2016-04-16 11:36:44] INFO: Optimizer - Training score at iteration 2900: {'loss': array(0.6571524739265442, dtype=float32), 'mse': array(0.8106493949890137, dtype=float32)}
[2016-04-16 11:37:05] INFO: Optimizer - Training score at iteration 2920: {'loss': array(0.5175780057907104, dtype=float32), 'mse': array(0.7194289565086365, dtype=float32)}
[2016-04-16 11:37:26] INFO: Optimizer - Training score at iteration 2940: {'loss': array(0.8116200566291809, dtype=float32), 'mse': array(0.9008995890617371, dtype=float32)}
[2016-04-16 11:37:47] INFO: Optimizer - Training score at iteration 2960: {'loss': array(0.9373098611831665, dtype=float32), 'mse': array(0.9681476354598999, dtype=float32)}
[2016-04-16 11:38:08] INFO: Optimizer - Training score at iteration 2980: {'loss': array(0.5351184606552124, dtype=float32), 'mse': array(0.7315179109573364, dtype=float32)}
[2016-04-16 11:38:47] INFO: Optimizer - Mean loss values for validation at iteration 2997 is: {'loss': 0.89203054, 'mse': 0.9173429}
[2016-04-16 11:38:50] INFO: Optimizer - Training score at iteration 3000: {'loss': array(0.8618456125259399, dtype=float32), 'mse': array(0.928356409072876, dtype=float32)}
[2016-04-16 11:39:11] INFO: Optimizer - Training score at iteration 3020: {'loss': array(0.8181484341621399, dtype=float32), 'mse': array(0.9045155644416809, dtype=float32)}
[2016-04-16 11:39:32] INFO: Optimizer - Training score at iteration 3040: {'loss': array(0.7630519270896912, dtype=float32), 'mse': array(0.8735284209251404, dtype=float32)}
[2016-04-16 11:39:53] INFO: Optimizer - Training score at iteration 3060: {'loss': array(0.2973107695579529, dtype=float32), 'mse': array(0.5452620983123779, dtype=float32)}
[2016-04-16 11:40:14] INFO: Optimizer - Training score at iteration 3080: {'loss': array(0.6643085479736328, dtype=float32), 'mse': array(0.815051257610321, dtype=float32)}
[2016-04-16 11:40:34] INFO: Optimizer - Training score at iteration 3100: {'loss': array(0.8164324760437012, dtype=float32), 'mse': array(0.9035665392875671, dtype=float32)}
[2016-04-16 11:40:55] INFO: Optimizer - Training score at iteration 3120: {'loss': array(0.9355095028877258, dtype=float32), 'mse': array(0.9672173857688904, dtype=float32)}
[2016-04-16 11:41:16] INFO: Optimizer - Training score at iteration 3140: {'loss': array(0.7747471332550049, dtype=float32), 'mse': array(0.8801971673965454, dtype=float32)}
[2016-04-16 11:41:37] INFO: Optimizer - Training score at iteration 3160: {'loss': array(0.4809752404689789, dtype=float32), 'mse': array(0.6935237646102905, dtype=float32)}
[2016-04-16 11:41:58] INFO: Optimizer - Training score at iteration 3180: {'loss': array(1.0745576620101929, dtype=float32), 'mse': array(1.0366086959838867, dtype=float32)}
[2016-04-16 11:42:38] INFO: Optimizer - Mean loss values for validation at iteration 3197 is: {'loss': 1.0363681, 'mse': 0.98454261}
[2016-04-16 11:42:40] INFO: Optimizer - Training score at iteration 3200: {'loss': array(0.29091060161590576, dtype=float32), 'mse': array(0.539361298084259, dtype=float32)}
[2016-04-16 11:43:01] INFO: Optimizer - Training score at iteration 3220: {'loss': array(0.41896605491638184, dtype=float32), 'mse': array(0.6472758650779724, dtype=float32)}
[2016-04-16 11:43:22] INFO: Optimizer - Training score at iteration 3240: {'loss': array(0.5684781670570374, dtype=float32), 'mse': array(0.7539749145507812, dtype=float32)}
[2016-04-16 11:43:43] INFO: Optimizer - Training score at iteration 3260: {'loss': array(0.4342929422855377, dtype=float32), 'mse': array(0.6590090394020081, dtype=float32)}
[2016-04-16 11:44:04] INFO: Optimizer - Training score at iteration 3280: {'loss': array(0.5724388957023621, dtype=float32), 'mse': array(0.7565969228744507, dtype=float32)}
[2016-04-16 11:44:24] INFO: Optimizer - Training score at iteration 3300: {'loss': array(0.3658236265182495, dtype=float32), 'mse': array(0.6048335433006287, dtype=float32)}
[2016-04-16 11:44:45] INFO: Optimizer - Training score at iteration 3320: {'loss': array(0.9309087991714478, dtype=float32), 'mse': array(0.9648361206054688, dtype=float32)}
[2016-04-16 11:45:06] INFO: Optimizer - Training score at iteration 3340: {'loss': array(1.0195001363754272, dtype=float32), 'mse': array(1.0097029209136963, dtype=float32)}
[2016-04-16 11:45:27] INFO: Optimizer - Training score at iteration 3360: {'loss': array(0.5618533492088318, dtype=float32), 'mse': array(0.7495688199996948, dtype=float32)}
[2016-04-16 11:45:48] INFO: Optimizer - Training score at iteration 3380: {'loss': array(0.871097207069397, dtype=float32), 'mse': array(0.9333258867263794, dtype=float32)}
[2016-04-16 11:46:26] INFO: Optimizer - Mean loss values for validation at iteration 3396 is: {'loss': 0.99298555, 'mse': 0.96434325}
[2016-04-16 11:46:30] INFO: Optimizer - Training score at iteration 3400: {'loss': array(0.9022714495658875, dtype=float32), 'mse': array(0.9498797059059143, dtype=float32)}
[2016-04-16 11:46:50] INFO: Optimizer - Training score at iteration 3420: {'loss': array(1.2347146272659302, dtype=float32), 'mse': array(1.1111770868301392, dtype=float32)}
[2016-04-16 11:47:11] INFO: Optimizer - Training score at iteration 3440: {'loss': array(1.4893033504486084, dtype=float32), 'mse': array(1.2203701734542847, dtype=float32)}
[2016-04-16 11:47:32] INFO: Optimizer - Training score at iteration 3460: {'loss': array(0.27142566442489624, dtype=float32), 'mse': array(0.5209852457046509, dtype=float32)}
[2016-04-16 11:47:53] INFO: Optimizer - Training score at iteration 3480: {'loss': array(0.5466387271881104, dtype=float32), 'mse': array(0.7393501996994019, dtype=float32)}
[2016-04-16 11:48:14] INFO: Optimizer - Training score at iteration 3500: {'loss': array(0.5261529088020325, dtype=float32), 'mse': array(0.7253639698028564, dtype=float32)}
[2016-04-16 11:48:34] INFO: Optimizer - Training score at iteration 3520: {'loss': array(0.6891206502914429, dtype=float32), 'mse': array(0.8301329016685486, dtype=float32)}
[2016-04-16 11:48:55] INFO: Optimizer - Training score at iteration 3540: {'loss': array(1.3014065027236938, dtype=float32), 'mse': array(1.1407920122146606, dtype=float32)}
[2016-04-16 11:49:16] INFO: Optimizer - Training score at iteration 3560: {'loss': array(0.24917840957641602, dtype=float32), 'mse': array(0.4991777241230011, dtype=float32)}
[2016-04-16 11:49:36] INFO: Optimizer - Training score at iteration 3580: {'loss': array(0.5619775652885437, dtype=float32), 'mse': array(0.7496516704559326, dtype=float32)}
[2016-04-16 11:50:14] INFO: Optimizer - Mean loss values for validation at iteration 3596 is: {'loss': 0.95763898, 'mse': 0.94750708}
[2016-04-16 11:50:18] INFO: Optimizer - Training score at iteration 3600: {'loss': array(1.1699761152267456, dtype=float32), 'mse': array(1.0816543102264404, dtype=float32)}

In [ ]:
%matplotlib inline
import matplotlib.pyplot as plt
l = np.array([s["loss"] for s in optimizer.losses])
e = np.array([s["mse"] for s in optimizer.losses])
print l.mean()
plt.plot(l)
plt.show()
plt.plot(e)
plt.show()

In [ ]: