In [1]:
import sys
sys.path.append('..')

from deepgraph.utils.logging import log
from deepgraph.utils.common import batch_parallel, ConfigMixin, shuffle_in_unison_inplace, pickle_dump
from deepgraph.utils.image import batch_pad_mirror, rotate_transformer_scalar_float32, rotate_transformer_rgb_uint8
from deepgraph.constants import *
from deepgraph.conf import rng
from deepgraph.nn.core import Dropout

from deepgraph.pipeline import *


Using gpu device 0: GeForce GTX TITAN X (CNMeM is enabled with initial size: 85.0% of memory, CuDNN 3007)

  _____                _____                 _
 |  _  \              |  __ \               | |
 | | | |___  ___ _ __ | |  \/_ __ __ _ _ __ | |__
 | | | / _ \/ _ \ '_ \| | __| '__/ _` | '_ \| '_ \
 | |/ /  __/  __/ |_) | |_\ \ | | (_| | |_) | | | |
 |___/ \___|\___| .__/ \____/_|  \__,_| .__/|_| |_|
                | |                   | |
                |_|                   |_|


Available on GitHub: https://github.com/sebastian-schlecht/deepgraph


In [2]:
import math
class Transformer(Processor):
    """
    Apply online random augmentation.
    """
    def __init__(self, name, shapes, config, buffer_size=10):
        super(Transformer, self).__init__(name, shapes, config, buffer_size)
        self.mean = None

    def init(self):
        if self.conf("mean_file") is not None:
            self.mean = np.load(self.conf("mean_file"))
        else:
            log("Transformer - No mean file specified.", LOG_LEVEL_WARNING)

    def process(self):
        packet = self.pull()
        # Return if no data is there
        if not packet:
            return False
        # Unpack
        data, label = packet.data
        # Do processing
        log("Transformer - Processing data", LOG_LEVEL_VERBOSE)
        i_h = 228
        i_w = 304

        d_h = 228
        d_w = 304

        start = time.time()
        # Mean
        if packet.phase == PHASE_TRAIN or packet.phase == PHASE_VAL:
            data = data.astype(np.float32)
            if self.mean is not None:
                for idx in range(data.shape[0]):
                    # Subtract mean
                    data[idx] = data[idx] - self.mean.astype(np.float32)
            if self.conf("offset") is not None:
                label -= self.conf("offset")

        if packet.phase == PHASE_TRAIN:
            # Do elementwise operations

            data_old = data
            label_old = label
            data = np.zeros((data_old.shape[0], data_old.shape[1], i_h, i_w), dtype=np.float32)
            label = np.zeros((label_old.shape[0], d_h, d_w), dtype=np.float32)
            for idx in range(data.shape[0]):
                # Rotate
                # We rotate before cropping to be able to get filled corners
                # Maybe even adjust the border after rotating
                deg = np.random.randint(-5,6)
                # Operate on old data. Careful - data is already in float so we need to normalize and rescale afterwards
                # data_old[idx] = 255. * rotate_transformer_rgb_uint8(data_old[idx] * 0.003921568627, deg).astype(np.float32)
                # label_old[idx] = rotate_transformer_scalar_float32(label_old[idx], deg)
                
                # Take care of any empty areas, we crop on a smaller surface depending on the angle
                # TODO Remove this once loss supports masking
                shift = 0 #np.tan((deg/180.) * math.pi)
                # Random crops
                cy = rng.randint(data_old.shape[2] - d_h - shift, size=1)
                cx = rng.randint(data_old.shape[3] - d_w - shift, size=1)

                data[idx] = data_old[idx, :, cy:cy+i_h, cx:cx+i_w]
                label[idx] = label_old[idx, cy:cy+d_h, cx:cx+d_w]

                # Flip horizontally with probability 0.5
                p = rng.randint(2)
                if p > 0:
                    data[idx] = data[idx, :, :, ::-1]
                    label[idx] = label[idx, :, ::-1]

                # RGB we mult with a random value between 0.8 and 1.2
                r = rng.randint(80,121) / 100.
                g = rng.randint(80,121) / 100.
                b = rng.randint(80,121) / 100.
                data[idx, 0] = data[idx, 0] * r
                data[idx, 1] = data[idx, 1] * g
                data[idx, 2] = data[idx, 2] * b

            # Shuffle
            # data, label = shuffle_in_unison_inplace(data, label)
        elif packet.phase == PHASE_VAL:
            # Center crop
            cy = (data.shape[2] - i_h) // 2
            cx = (data.shape[3] - i_w) // 2
            data = data[:, :, cy:cy+i_h, cx:cx+i_w]
            label = label[:, cy:cy+d_h, cx:cx+d_w]
        end = time.time()
        log("Transformer - Processing took " + str(end - start) + " seconds.", LOG_LEVEL_VERBOSE)
        # Try to push into queue as long as thread should not terminate
        self.push(Packet(identifier=packet.id, phase=packet.phase, num=2, data=(data, label)))
        return True

    def setup_defaults(self):
        super(Transformer, self).setup_defaults()
        self.conf_default("mean_file", None)
        self.conf_default("offset", None)

In [3]:
from theano.tensor.nnet import relu

from deepgraph.graph import *
from deepgraph.nn.core import *
from deepgraph.nn.conv import *
from deepgraph.nn.loss import *
from deepgraph.solver import *

from deepgraph.pipeline import Optimizer, H5DBLoader, Pipeline


def build_graph():
    graph = Graph("depth_predictor")

    data            = Data(graph, "data", T.ftensor4, shape=(-1, 3, 228, 304))
    label           = Data(graph, "label", T.ftensor3, shape=(-1, 1, 228, 304), config={
        "phase": PHASE_TRAIN
    })
    pool_label = Pool(graph, "pool_label", config={
        "kernel": (4, 4),
        "mode": "average_exc_pad",
        "phase": PHASE_TRAIN
    })
    conv_0     = Conv2D(graph, "conv_0", config={
            "channels": 96,
            "kernel": (11, 11),
            "subsample": (4, 4),
            "activation": relu
        }
    )
    pool_0 = Pool(graph, "pool_0", config={
        "kernel": (3, 3),
        "stride": (2, 2)
    })
    lrn_0           = LRN(graph, "lrn_0")
    conv_1   = Conv2D(
        graph,
        "conv_1",
        config={
            "channels": 256,
            "kernel": (5, 5),
            "border_mode": 2,
            "activation": relu
        }
    )
    pool_1 = Pool(graph, "pool_1", config={
        "kernel": (3, 3),
        "stride": (2, 2)
    })
    lrn_1           = LRN(graph, "lrn_1")
    conv_2          = Conv2D(
        graph,
        "conv_2",
        config={
            "channels": 384,
            "kernel": (3, 3),
            "border_mode": 1,
            "activation": relu
        }
    )
    conv_3          = Conv2D(
        graph,
        "conv_3",
        config={
            "channels": 384,
            "kernel": (3, 3),
            "border_mode": 1,
            "activation": relu
        }
     )
    conv_4          = Conv2D(
        graph,
        "conv_4",
        config={
            "channels": 256,
            "kernel": (3, 3),
            "border_mode": 1,
            "activation": relu
        }
    )
    pool_4 = Pool(graph, "pool_4", config={
        "kernel": (3, 3),
        "stride": (2, 2)
    })
    flatten         = Flatten(graph, "flatten", config={
        "dims": 2
    })
    hidden_0        = Dense(graph, "fc_0", config={
        "out": 4096,
        "activation": None
    })
    dp_0            = Dropout(graph, "dp_0")
    hidden_1        = Dense(graph, "fc_1", config={
        "out": 4332,
        "activation": None
    })
    rs              = Reshape(graph, "reshape_0", config={
        "shape": (-1, 1, 57, 76),
        "is_output": True
    })

    loss            = EuclideanLoss(graph, "loss")

    error = MSE(graph, "mse", config={
        "root": True,
        "is_output": True,
        "phase": PHASE_TRAIN
    })

    # Connect
    data.connect(conv_0)
    conv_0.connect(pool_0)
    pool_0.connect(lrn_0)
    lrn_0.connect(conv_1)
    conv_1.connect(pool_1)
    pool_1.connect(lrn_1)
    lrn_1.connect(conv_2)
    conv_2.connect(conv_3)
    conv_3.connect(conv_4)
    conv_4.connect(pool_4)
    pool_4.connect(flatten)
    flatten.connect(hidden_0)
    hidden_0.connect(dp_0)
    dp_0.connect(hidden_1)
    hidden_1.connect(rs)
    
    label.connect(pool_label)
    
    rs.connect(loss)
    pool_label.connect(loss)

    pool_label.connect(error)
    rs.connect(error)

    return graph

In [ ]:
if __name__ == "__main__":

    batch_size = 64
    chunk_size = 10*batch_size
    transfer_shape = ((chunk_size, 3, 228, 304), (chunk_size, 228, 304))

    g = build_graph()

    # Build the training pipeline
    db_loader = H5DBLoader("db", ((chunk_size, 3, 480, 640), (chunk_size, 1, 480, 640)), config={
        "db": '/home/ga29mix/nashome/data/nyu_depth_v2_combined_50.hdf5',
        # "db": '../data/nyu_depth_unet_large.hdf5',
        "key_data": "images",
        "key_label": "depths",
        "chunk_size": chunk_size
    })
    transformer = Transformer("tr", transfer_shape, config={
        # Measured for the data-set
        # "offset": 2.7321029
        "mean_file": "/home/ga29mix/nashome/data/nyu_depth_v2_combined_50.npy"
    })
    optimizer = Optimizer("opt", g, transfer_shape, config={
        "batch_size":  batch_size,
        "chunk_size": chunk_size,
        "learning_rate": 0.01,
        "momentum": 0.9,
        "weight_decay": 0.0005,
        "print_freq": 100,
        "save_freq": 5000,
        # "weights": "../data/alexnet_combined_iter_10000.zip",
        "save_prefix": "../data/alexnet_scale_1"
    })

    p = Pipeline(config={
        "validation_frequency": 50,
        "cycles": 6500
    })
    p.add(db_loader)
    p.add(transformer)
    p.add(optimizer)
    p.run()


[2016-04-11 08:55:25] INFO: Pipeline - Starting computation
[2016-04-11 08:55:26] INFO: Graph - Setting up graph
[2016-04-11 08:55:26] INFO: Node - data has shape (-1, 3, 228, 304)
[2016-04-11 08:55:26] INFO: Node - label has shape (-1, 1, 228, 304)
[2016-04-11 08:55:26] INFO: Node - conv_0 has shape (-1, 96, 55, 74)
[2016-04-11 08:55:26] INFO: Node - pool_label has shape (-1, 1, 57, 76)
[2016-04-11 08:55:26] INFO: Node - pool_0 has shape (-1, 96, 27, 36)
[2016-04-11 08:55:26] INFO: Node - lrn_0 has shape (-1, 96, 27, 36)
[2016-04-11 08:55:26] INFO: Node - conv_1 has shape (-1, 256, 27, 36)
[2016-04-11 08:55:26] INFO: Node - pool_1 has shape (-1, 256, 13, 17)
[2016-04-11 08:55:26] INFO: Node - lrn_1 has shape (-1, 256, 13, 17)
[2016-04-11 08:55:26] INFO: Node - conv_2 has shape (-1, 384, 13, 17)
[2016-04-11 08:55:26] INFO: Node - conv_3 has shape (-1, 384, 13, 17)
[2016-04-11 08:55:26] INFO: Node - conv_4 has shape (-1, 256, 13, 17)
[2016-04-11 08:55:26] INFO: Node - pool_4 has shape (-1, 256, 6, 8)
[2016-04-11 08:55:26] INFO: Node - flatten has shape (-1, 12288)
[2016-04-11 08:55:29] INFO: Node - fc_0 has shape (-1, 4096)
[2016-04-11 08:55:29] INFO: Node - dp_0 has shape (-1, 4096)
[2016-04-11 08:55:30] INFO: Node - fc_1 has shape (-1, 4332)
[2016-04-11 08:55:30] INFO: Node - reshape_0 has shape (-1, 1, 57, 76)
[2016-04-11 08:55:30] INFO: Node - loss has shape (1,)
[2016-04-11 08:55:30] INFO: Node - mse has shape (1,)
[2016-04-11 08:55:31] INFO: Graph - Invoking Theano compiler
[2016-04-11 08:55:49] INFO: Optimizer - Compilation finished
/home/ga29mix/anaconda/envs/deep/lib/python2.7/site-packages/ipykernel/__main__.py:65: DeprecationWarning: converting an array with ndim > 0 to an index will result in an error in the future
/home/ga29mix/anaconda/envs/deep/lib/python2.7/site-packages/ipykernel/__main__.py:66: DeprecationWarning: converting an array with ndim > 0 to an index will result in an error in the future
[2016-04-11 08:59:22] INFO: Optimizer - Training score at iteration 100: {'loss': array(2.72184681892395, dtype=float32), 'mse': array(1.6498020887374878, dtype=float32)}
[2016-04-11 08:59:48] INFO: Optimizer - Training score at iteration 200: {'loss': array(2.0933027267456055, dtype=float32), 'mse': array(1.4468250274658203, dtype=float32)}
[2016-04-11 09:00:38] INFO: Optimizer - Training score at iteration 300: {'loss': array(2.1278207302093506, dtype=float32), 'mse': array(1.458705186843872, dtype=float32)}
[2016-04-11 09:01:05] INFO: Optimizer - Training score at iteration 400: {'loss': array(1.4080692529678345, dtype=float32), 'mse': array(1.1866209506988525, dtype=float32)}
[2016-04-11 09:01:15] INFO: Optimizer - Mean loss values for validation at iteration 436 is: {'loss': 1.5417058, 'mse': 1.2376715}
[2016-04-11 09:01:33] INFO: Optimizer - Training score at iteration 500: {'loss': array(1.2969697713851929, dtype=float32), 'mse': array(1.1388458013534546, dtype=float32)}
[2016-04-11 09:02:01] INFO: Optimizer - Training score at iteration 600: {'loss': array(1.154619574546814, dtype=float32), 'mse': array(1.0745322704315186, dtype=float32)}
[2016-04-11 09:02:28] INFO: Optimizer - Training score at iteration 700: {'loss': array(1.4452918767929077, dtype=float32), 'mse': array(1.2022029161453247, dtype=float32)}
[2016-04-11 09:02:55] INFO: Optimizer - Training score at iteration 800: {'loss': array(1.1393506526947021, dtype=float32), 'mse': array(1.0674036741256714, dtype=float32)}
[2016-04-11 09:03:15] INFO: Optimizer - Mean loss values for validation at iteration 872 is: {'loss': 1.0747951, 'mse': 1.0338618}
[2016-04-11 09:03:23] INFO: Optimizer - Training score at iteration 900: {'loss': array(1.1636834144592285, dtype=float32), 'mse': array(1.0787415504455566, dtype=float32)}
[2016-04-11 09:03:50] INFO: Optimizer - Training score at iteration 1000: {'loss': array(0.9537573456764221, dtype=float32), 'mse': array(0.9766049981117249, dtype=float32)}
[2016-04-11 09:03:50] INFO: Optimizer - Saving intermediate model state
[2016-04-11 09:04:06] INFO: Graph - Model file saved as: ../data/alexnet_scale_1_iter_1000.zip
[2016-04-11 09:04:44] INFO: Optimizer - Training score at iteration 1100: {'loss': array(0.9363116025924683, dtype=float32), 'mse': array(0.9676319360733032, dtype=float32)}
[2016-04-11 09:05:12] INFO: Optimizer - Training score at iteration 1200: {'loss': array(1.101688027381897, dtype=float32), 'mse': array(1.0496132373809814, dtype=float32)}
[2016-04-11 09:05:39] INFO: Optimizer - Training score at iteration 1300: {'loss': array(0.6646350622177124, dtype=float32), 'mse': array(0.8152515292167664, dtype=float32)}
[2016-04-11 09:05:40] INFO: Optimizer - Mean loss values for validation at iteration 1300 is: {'loss': 0.98818111, 'mse': 0.98944587}
[2016-04-11 09:06:08] INFO: Optimizer - Training score at iteration 1400: {'loss': array(1.034612774848938, dtype=float32), 'mse': array(1.017159104347229, dtype=float32)}
[2016-04-11 09:06:36] INFO: Optimizer - Training score at iteration 1500: {'loss': array(0.7486947774887085, dtype=float32), 'mse': array(0.8652715086936951, dtype=float32)}
[2016-04-11 09:07:03] INFO: Optimizer - Training score at iteration 1600: {'loss': array(0.9873155355453491, dtype=float32), 'mse': array(0.9936375021934509, dtype=float32)}
[2016-04-11 09:07:30] INFO: Optimizer - Training score at iteration 1700: {'loss': array(0.7173669934272766, dtype=float32), 'mse': array(0.8469752073287964, dtype=float32)}
[2016-04-11 09:07:41] INFO: Optimizer - Mean loss values for validation at iteration 1736 is: {'loss': 0.84670436, 'mse': 0.91618794}
[2016-04-11 09:07:59] INFO: Optimizer - Training score at iteration 1800: {'loss': array(0.6354448199272156, dtype=float32), 'mse': array(0.7971479296684265, dtype=float32)}
[2016-04-11 09:08:26] INFO: Optimizer - Training score at iteration 1900: {'loss': array(0.6726561188697815, dtype=float32), 'mse': array(0.8201561570167542, dtype=float32)}
[2016-04-11 09:08:53] INFO: Optimizer - Training score at iteration 2000: {'loss': array(0.8593543171882629, dtype=float32), 'mse': array(0.927013635635376, dtype=float32)}
[2016-04-11 09:08:53] INFO: Optimizer - Saving intermediate model state
[2016-04-11 09:09:07] INFO: Graph - Model file saved as: ../data/alexnet_scale_1_iter_2000.zip
[2016-04-11 09:09:48] INFO: Optimizer - Training score at iteration 2100: {'loss': array(0.6912788152694702, dtype=float32), 'mse': array(0.8314317464828491, dtype=float32)}
[2016-04-11 09:10:08] INFO: Optimizer - Mean loss values for validation at iteration 2172 is: {'loss': 0.81204855, 'mse': 0.89987832}
[2016-04-11 09:10:17] INFO: Optimizer - Training score at iteration 2200: {'loss': array(0.6961596608161926, dtype=float32), 'mse': array(0.8343617916107178, dtype=float32)}
[2016-04-11 09:10:44] INFO: Optimizer - Training score at iteration 2300: {'loss': array(0.6076697707176208, dtype=float32), 'mse': array(0.7795317769050598, dtype=float32)}
[2016-04-11 09:11:13] INFO: Optimizer - Training score at iteration 2400: {'loss': array(0.5431404709815979, dtype=float32), 'mse': array(0.7369806170463562, dtype=float32)}
[2016-04-11 09:11:41] INFO: Optimizer - Training score at iteration 2500: {'loss': array(0.8153773546218872, dtype=float32), 'mse': array(0.9029824733734131, dtype=float32)}
[2016-04-11 09:12:08] INFO: Optimizer - Training score at iteration 2600: {'loss': array(0.4436475336551666, dtype=float32), 'mse': array(0.6660687327384949, dtype=float32)}
[2016-04-11 09:12:09] INFO: Optimizer - Mean loss values for validation at iteration 2600 is: {'loss': 0.76014853, 'mse': 0.86818057}
[2016-04-11 09:12:37] INFO: Optimizer - Training score at iteration 2700: {'loss': array(0.7577621340751648, dtype=float32), 'mse': array(0.8704953193664551, dtype=float32)}
[2016-04-11 09:13:04] INFO: Optimizer - Training score at iteration 2800: {'loss': array(0.45294615626335144, dtype=float32), 'mse': array(0.6730127334594727, dtype=float32)}
[2016-04-11 09:13:31] INFO: Optimizer - Training score at iteration 2900: {'loss': array(0.5994579195976257, dtype=float32), 'mse': array(0.7742466926574707, dtype=float32)}
[2016-04-11 09:13:59] INFO: Optimizer - Training score at iteration 3000: {'loss': array(0.4932905435562134, dtype=float32), 'mse': array(0.7023464441299438, dtype=float32)}
[2016-04-11 09:13:59] INFO: Optimizer - Saving intermediate model state
[2016-04-11 09:14:14] INFO: Graph - Model file saved as: ../data/alexnet_scale_1_iter_3000.zip
[2016-04-11 09:14:38] INFO: Optimizer - Mean loss values for validation at iteration 3036 is: {'loss': 0.68489081, 'mse': 0.8243742}
[2016-04-11 09:14:56] INFO: Optimizer - Training score at iteration 3100: {'loss': array(0.5189505815505981, dtype=float32), 'mse': array(0.7203822135925293, dtype=float32)}
[2016-04-11 09:15:25] INFO: Optimizer - Training score at iteration 3200: {'loss': array(0.4746943414211273, dtype=float32), 'mse': array(0.6889806389808655, dtype=float32)}
[2016-04-11 09:15:54] INFO: Optimizer - Training score at iteration 3300: {'loss': array(0.5439378619194031, dtype=float32), 'mse': array(0.7375214099884033, dtype=float32)}
[2016-04-11 09:16:21] INFO: Optimizer - Training score at iteration 3400: {'loss': array(0.6138433218002319, dtype=float32), 'mse': array(0.7834815382957458, dtype=float32)}
[2016-04-11 09:16:41] INFO: Optimizer - Mean loss values for validation at iteration 3472 is: {'loss': 0.71313697, 'mse': 0.8430109}
[2016-04-11 09:16:49] INFO: Optimizer - Training score at iteration 3500: {'loss': array(0.5481426119804382, dtype=float32), 'mse': array(0.740366518497467, dtype=float32)}
[2016-04-11 09:17:17] INFO: Optimizer - Training score at iteration 3600: {'loss': array(0.5173214077949524, dtype=float32), 'mse': array(0.7192505598068237, dtype=float32)}
[2016-04-11 09:17:45] INFO: Optimizer - Training score at iteration 3700: {'loss': array(0.49371638894081116, dtype=float32), 'mse': array(0.7026495337486267, dtype=float32)}
[2016-04-11 09:18:12] INFO: Optimizer - Training score at iteration 3800: {'loss': array(0.7710192799568176, dtype=float32), 'mse': array(0.8780770301818848, dtype=float32)}
[2016-04-11 09:18:41] INFO: Optimizer - Training score at iteration 3900: {'loss': array(0.38194647431373596, dtype=float32), 'mse': array(0.6180181503295898, dtype=float32)}
[2016-04-11 09:18:42] INFO: Optimizer - Mean loss values for validation at iteration 3900 is: {'loss': 0.76942712, 'mse': 0.8738575}
[2016-04-11 09:19:09] INFO: Optimizer - Training score at iteration 4000: {'loss': array(0.5708481073379517, dtype=float32), 'mse': array(0.755544900894165, dtype=float32)}
[2016-04-11 09:19:09] INFO: Optimizer - Saving intermediate model state
[2016-04-11 09:19:24] INFO: Graph - Model file saved as: ../data/alexnet_scale_1_iter_4000.zip
[2016-04-11 09:19:58] INFO: Optimizer - Training score at iteration 4100: {'loss': array(0.38562944531440735, dtype=float32), 'mse': array(0.6209906935691833, dtype=float32)}
[2016-04-11 09:20:26] INFO: Optimizer - Training score at iteration 4200: {'loss': array(0.4718899130821228, dtype=float32), 'mse': array(0.6869423985481262, dtype=float32)}
[2016-04-11 09:20:54] INFO: Optimizer - Training score at iteration 4300: {'loss': array(0.404473215341568, dtype=float32), 'mse': array(0.635982096195221, dtype=float32)}
[2016-04-11 09:21:05] INFO: Optimizer - Mean loss values for validation at iteration 4336 is: {'loss': 0.62419796, 'mse': 0.78800452}
[2016-04-11 09:21:23] INFO: Optimizer - Training score at iteration 4400: {'loss': array(0.39756080508232117, dtype=float32), 'mse': array(0.630524218082428, dtype=float32)}
[2016-04-11 09:21:51] INFO: Optimizer - Training score at iteration 4500: {'loss': array(0.3993688225746155, dtype=float32), 'mse': array(0.6319563388824463, dtype=float32)}
[2016-04-11 09:22:19] INFO: Optimizer - Training score at iteration 4600: {'loss': array(0.4370363652706146, dtype=float32), 'mse': array(0.6610872745513916, dtype=float32)}
[2016-04-11 09:22:46] INFO: Optimizer - Training score at iteration 4700: {'loss': array(0.4386187195777893, dtype=float32), 'mse': array(0.6622829437255859, dtype=float32)}
[2016-04-11 09:23:07] INFO: Optimizer - Mean loss values for validation at iteration 4772 is: {'loss': 0.63021052, 'mse': 0.79072106}
[2016-04-11 09:23:15] INFO: Optimizer - Training score at iteration 4800: {'loss': array(0.4434914290904999, dtype=float32), 'mse': array(0.6659514904022217, dtype=float32)}
[2016-04-11 09:23:42] INFO: Optimizer - Training score at iteration 4900: {'loss': array(0.3881930708885193, dtype=float32), 'mse': array(0.6230514049530029, dtype=float32)}
[2016-04-11 09:24:10] INFO: Optimizer - Training score at iteration 5000: {'loss': array(0.37540191411972046, dtype=float32), 'mse': array(0.6127004623413086, dtype=float32)}
[2016-04-11 09:24:10] INFO: Optimizer - Saving intermediate model state
[2016-04-11 09:24:27] INFO: Graph - Model file saved as: ../data/alexnet_scale_1_iter_5000.zip
[2016-04-11 09:25:13] INFO: Optimizer - Training score at iteration 5100: {'loss': array(0.5698191523551941, dtype=float32), 'mse': array(0.7548636794090271, dtype=float32)}
[2016-04-11 09:25:40] INFO: Optimizer - Training score at iteration 5200: {'loss': array(0.2951453924179077, dtype=float32), 'mse': array(0.543272852897644, dtype=float32)}
[2016-04-11 09:25:41] INFO: Optimizer - Mean loss values for validation at iteration 5200 is: {'loss': 0.71296954, 'mse': 0.84336841}
[2016-04-11 09:26:09] INFO: Optimizer - Training score at iteration 5300: {'loss': array(0.6365317702293396, dtype=float32), 'mse': array(0.7978293895721436, dtype=float32)}
[2016-04-11 09:26:37] INFO: Optimizer - Training score at iteration 5400: {'loss': array(0.41052699089050293, dtype=float32), 'mse': array(0.6407237648963928, dtype=float32)}
[2016-04-11 09:27:05] INFO: Optimizer - Training score at iteration 5500: {'loss': array(0.4854618310928345, dtype=float32), 'mse': array(0.6967508792877197, dtype=float32)}
[2016-04-11 09:27:32] INFO: Optimizer - Training score at iteration 5600: {'loss': array(0.4467310309410095, dtype=float32), 'mse': array(0.6683793663978577, dtype=float32)}
[2016-04-11 09:27:43] INFO: Optimizer - Mean loss values for validation at iteration 5636 is: {'loss': 0.67708069, 'mse': 0.82119721}
[2016-04-11 09:28:01] INFO: Optimizer - Training score at iteration 5700: {'loss': array(0.43769514560699463, dtype=float32), 'mse': array(0.6615853309631348, dtype=float32)}
[2016-04-11 09:28:28] INFO: Optimizer - Training score at iteration 5800: {'loss': array(0.5240344405174255, dtype=float32), 'mse': array(0.7239022254943848, dtype=float32)}
[2016-04-11 09:28:56] INFO: Optimizer - Training score at iteration 5900: {'loss': array(0.5030854940414429, dtype=float32), 'mse': array(0.7092851996421814, dtype=float32)}
[2016-04-11 09:29:23] INFO: Optimizer - Training score at iteration 6000: {'loss': array(0.42857053875923157, dtype=float32), 'mse': array(0.654653012752533, dtype=float32)}
[2016-04-11 09:29:23] INFO: Optimizer - Saving intermediate model state
[2016-04-11 09:29:37] INFO: Graph - Model file saved as: ../data/alexnet_scale_1_iter_6000.zip
[2016-04-11 09:30:22] INFO: Optimizer - Mean loss values for validation at iteration 6072 is: {'loss': 0.62760437, 'mse': 0.78871334}
[2016-04-11 09:30:30] INFO: Optimizer - Training score at iteration 6100: {'loss': array(0.44019564986228943, dtype=float32), 'mse': array(0.6634724140167236, dtype=float32)}
[2016-04-11 09:30:58] INFO: Optimizer - Training score at iteration 6200: {'loss': array(0.3546451926231384, dtype=float32), 'mse': array(0.5955209136009216, dtype=float32)}
[2016-04-11 09:31:25] INFO: Optimizer - Training score at iteration 6300: {'loss': array(0.3288096785545349, dtype=float32), 'mse': array(0.5734192728996277, dtype=float32)}
[2016-04-11 09:31:53] INFO: Optimizer - Training score at iteration 6400: {'loss': array(0.46882063150405884, dtype=float32), 'mse': array(0.6847047805786133, dtype=float32)}
[2016-04-11 09:32:20] INFO: Optimizer - Training score at iteration 6500: {'loss': array(0.2593466341495514, dtype=float32), 'mse': array(0.5092608332633972, dtype=float32)}
[2016-04-11 09:32:21] INFO: Optimizer - Mean loss values for validation at iteration 6500 is: {'loss': 0.58923447, 'mse': 0.76537591}
[2016-04-11 09:32:49] INFO: Optimizer - Training score at iteration 6600: {'loss': array(0.4472500681877136, dtype=float32), 'mse': array(0.6687675714492798, dtype=float32)}
[2016-04-11 09:33:17] INFO: Optimizer - Training score at iteration 6700: {'loss': array(0.30090293288230896, dtype=float32), 'mse': array(0.5485461950302124, dtype=float32)}
[2016-04-11 09:33:44] INFO: Optimizer - Training score at iteration 6800: {'loss': array(0.40360406041145325, dtype=float32), 'mse': array(0.6352983713150024, dtype=float32)}
[2016-04-11 09:34:12] INFO: Optimizer - Training score at iteration 6900: {'loss': array(0.3240443766117096, dtype=float32), 'mse': array(0.5692489743232727, dtype=float32)}
[2016-04-11 09:34:22] INFO: Optimizer - Mean loss values for validation at iteration 6936 is: {'loss': 0.59004509, 'mse': 0.7659412}
[2016-04-11 09:34:40] INFO: Optimizer - Training score at iteration 7000: {'loss': array(0.31659477949142456, dtype=float32), 'mse': array(0.5626675486564636, dtype=float32)}
[2016-04-11 09:34:40] INFO: Optimizer - Saving intermediate model state
[2016-04-11 09:34:54] INFO: Graph - Model file saved as: ../data/alexnet_scale_1_iter_7000.zip
[2016-04-11 09:35:42] INFO: Optimizer - Training score at iteration 7100: {'loss': array(0.376440167427063, dtype=float32), 'mse': array(0.6135472059249878, dtype=float32)}
[2016-04-11 09:36:10] INFO: Optimizer - Training score at iteration 7200: {'loss': array(0.4365072250366211, dtype=float32), 'mse': array(0.6606869101524353, dtype=float32)}
[2016-04-11 09:36:38] INFO: Optimizer - Training score at iteration 7300: {'loss': array(0.38537806272506714, dtype=float32), 'mse': array(0.6207882761955261, dtype=float32)}
[2016-04-11 09:36:58] INFO: Optimizer - Mean loss values for validation at iteration 7372 is: {'loss': 0.65749103, 'mse': 0.81004632}
[2016-04-11 09:37:07] INFO: Optimizer - Training score at iteration 7400: {'loss': array(0.3900099992752075, dtype=float32), 'mse': array(0.6245077848434448, dtype=float32)}
[2016-04-11 09:37:35] INFO: Optimizer - Training score at iteration 7500: {'loss': array(0.3719244599342346, dtype=float32), 'mse': array(0.6098560690879822, dtype=float32)}
[2016-04-11 09:38:03] INFO: Optimizer - Training score at iteration 7600: {'loss': array(0.35038474202156067, dtype=float32), 'mse': array(0.5919330716133118, dtype=float32)}
[2016-04-11 09:38:30] INFO: Optimizer - Training score at iteration 7700: {'loss': array(0.42975226044654846, dtype=float32), 'mse': array(0.6555548906326294, dtype=float32)}
[2016-04-11 09:38:57] INFO: Optimizer - Training score at iteration 7800: {'loss': array(0.24114352464675903, dtype=float32), 'mse': array(0.49106365442276, dtype=float32)}
[2016-04-11 09:38:58] INFO: Optimizer - Mean loss values for validation at iteration 7800 is: {'loss': 0.56156504, 'mse': 0.74628019}
[2016-04-11 09:39:26] INFO: Optimizer - Training score at iteration 7900: {'loss': array(0.4253649413585663, dtype=float32), 'mse': array(0.6522000432014465, dtype=float32)}
[2016-04-11 09:39:53] INFO: Optimizer - Training score at iteration 8000: {'loss': array(0.32935765385627747, dtype=float32), 'mse': array(0.5738968849182129, dtype=float32)}
[2016-04-11 09:39:53] INFO: Optimizer - Saving intermediate model state
[2016-04-11 09:40:09] INFO: Graph - Model file saved as: ../data/alexnet_scale_1_iter_8000.zip
[2016-04-11 09:40:59] INFO: Optimizer - Training score at iteration 8100: {'loss': array(0.34381821751594543, dtype=float32), 'mse': array(0.5863600969314575, dtype=float32)}
[2016-04-11 09:41:28] INFO: Optimizer - Training score at iteration 8200: {'loss': array(0.3054974377155304, dtype=float32), 'mse': array(0.5527182221412659, dtype=float32)}
[2016-04-11 09:41:38] INFO: Optimizer - Mean loss values for validation at iteration 8236 is: {'loss': 0.60127223, 'mse': 0.77265847}
[2016-04-11 09:41:57] INFO: Optimizer - Training score at iteration 8300: {'loss': array(0.3068808913230896, dtype=float32), 'mse': array(0.5539683103561401, dtype=float32)}
[2016-04-11 09:42:24] INFO: Optimizer - Training score at iteration 8400: {'loss': array(0.36001908779144287, dtype=float32), 'mse': array(0.6000158786773682, dtype=float32)}
[2016-04-11 09:42:52] INFO: Optimizer - Training score at iteration 8500: {'loss': array(0.3857685923576355, dtype=float32), 'mse': array(0.6211027503013611, dtype=float32)}
[2016-04-11 09:43:19] INFO: Optimizer - Training score at iteration 8600: {'loss': array(0.3687487244606018, dtype=float32), 'mse': array(0.6072468161582947, dtype=float32)}
[2016-04-11 09:43:40] INFO: Optimizer - Mean loss values for validation at iteration 8672 is: {'loss': 0.63695413, 'mse': 0.793917}
[2016-04-11 09:43:48] INFO: Optimizer - Training score at iteration 8700: {'loss': array(0.3512827157974243, dtype=float32), 'mse': array(0.5926910638809204, dtype=float32)}
[2016-04-11 09:44:16] INFO: Optimizer - Training score at iteration 8800: {'loss': array(0.364767849445343, dtype=float32), 'mse': array(0.6039601564407349, dtype=float32)}
[2016-04-11 09:44:43] INFO: Optimizer - Training score at iteration 8900: {'loss': array(0.35147958993911743, dtype=float32), 'mse': array(0.5928571224212646, dtype=float32)}
[2016-04-11 09:45:11] INFO: Optimizer - Training score at iteration 9000: {'loss': array(0.438964307308197, dtype=float32), 'mse': array(0.6625438332557678, dtype=float32)}
[2016-04-11 09:45:11] INFO: Optimizer - Saving intermediate model state
[2016-04-11 09:45:28] INFO: Graph - Model file saved as: ../data/alexnet_scale_1_iter_9000.zip
[2016-04-11 09:46:09] INFO: Optimizer - Training score at iteration 9100: {'loss': array(0.27542442083358765, dtype=float32), 'mse': array(0.524808943271637, dtype=float32)}
[2016-04-11 09:46:10] INFO: Optimizer - Mean loss values for validation at iteration 9100 is: {'loss': 0.6020509, 'mse': 0.77455866}
[2016-04-11 09:46:38] INFO: Optimizer - Training score at iteration 9200: {'loss': array(0.44535791873931885, dtype=float32), 'mse': array(0.6673514246940613, dtype=float32)}
[2016-04-11 09:47:06] INFO: Optimizer - Training score at iteration 9300: {'loss': array(0.33979353308677673, dtype=float32), 'mse': array(0.582918107509613, dtype=float32)}
[2016-04-11 09:47:33] INFO: Optimizer - Training score at iteration 9400: {'loss': array(0.3864721357822418, dtype=float32), 'mse': array(0.621668815612793, dtype=float32)}
[2016-04-11 09:48:01] INFO: Optimizer - Training score at iteration 9500: {'loss': array(0.39214321970939636, dtype=float32), 'mse': array(0.6262133717536926, dtype=float32)}
[2016-04-11 09:48:12] INFO: Optimizer - Mean loss values for validation at iteration 9536 is: {'loss': 0.62724078, 'mse': 0.79130381}
[2016-04-11 09:48:30] INFO: Optimizer - Training score at iteration 9600: {'loss': array(0.3149096965789795, dtype=float32), 'mse': array(0.5611681342124939, dtype=float32)}
[2016-04-11 09:48:57] INFO: Optimizer - Training score at iteration 9700: {'loss': array(0.35541510581970215, dtype=float32), 'mse': array(0.5961669683456421, dtype=float32)}
[2016-04-11 09:49:25] INFO: Optimizer - Training score at iteration 9800: {'loss': array(0.4054698050022125, dtype=float32), 'mse': array(0.6367651224136353, dtype=float32)}
[2016-04-11 09:49:53] INFO: Optimizer - Training score at iteration 9900: {'loss': array(0.35963982343673706, dtype=float32), 'mse': array(0.5996997356414795, dtype=float32)}
[2016-04-11 09:50:13] INFO: Optimizer - Mean loss values for validation at iteration 9972 is: {'loss': 0.62307048, 'mse': 0.78847271}
[2016-04-11 09:50:21] INFO: Optimizer - Training score at iteration 10000: {'loss': array(0.354429692029953, dtype=float32), 'mse': array(0.5953399538993835, dtype=float32)}
[2016-04-11 09:50:21] INFO: Optimizer - Saving intermediate model state
[2016-04-11 09:50:35] INFO: Graph - Model file saved as: ../data/alexnet_scale_1_iter_10000.zip
[2016-04-11 09:51:17] INFO: Optimizer - Training score at iteration 10100: {'loss': array(0.3106748163700104, dtype=float32), 'mse': array(0.5573821067810059, dtype=float32)}
[2016-04-11 09:51:46] INFO: Optimizer - Training score at iteration 10200: {'loss': array(0.28788667917251587, dtype=float32), 'mse': array(0.5365507006645203, dtype=float32)}
[2016-04-11 09:52:14] INFO: Optimizer - Training score at iteration 10300: {'loss': array(0.4017873704433441, dtype=float32), 'mse': array(0.6338670253753662, dtype=float32)}
[2016-04-11 09:52:42] INFO: Optimizer - Training score at iteration 10400: {'loss': array(0.22882291674613953, dtype=float32), 'mse': array(0.47835439443588257, dtype=float32)}
[2016-04-11 09:52:43] INFO: Optimizer - Mean loss values for validation at iteration 10400 is: {'loss': 0.61050951, 'mse': 0.77748239}
[2016-04-11 09:53:11] INFO: Optimizer - Training score at iteration 10500: {'loss': array(0.417693555355072, dtype=float32), 'mse': array(0.6462921500205994, dtype=float32)}
[2016-04-11 09:53:39] INFO: Optimizer - Training score at iteration 10600: {'loss': array(0.25850215554237366, dtype=float32), 'mse': array(0.508431077003479, dtype=float32)}
[2016-04-11 09:54:07] INFO: Optimizer - Training score at iteration 10700: {'loss': array(0.3622443675994873, dtype=float32), 'mse': array(0.6018673777580261, dtype=float32)}
[2016-04-11 09:54:34] INFO: Optimizer - Training score at iteration 10800: {'loss': array(0.3137059807777405, dtype=float32), 'mse': array(0.5600945949554443, dtype=float32)}
[2016-04-11 09:54:45] INFO: Optimizer - Mean loss values for validation at iteration 10836 is: {'loss': 0.53664535, 'mse': 0.72988546}
[2016-04-11 09:55:04] INFO: Optimizer - Training score at iteration 10900: {'loss': array(0.27089595794677734, dtype=float32), 'mse': array(0.5204766392707825, dtype=float32)}
[2016-04-11 09:55:31] INFO: Optimizer - Training score at iteration 11000: {'loss': array(0.3216956853866577, dtype=float32), 'mse': array(0.5671822428703308, dtype=float32)}
[2016-04-11 09:55:31] INFO: Optimizer - Saving intermediate model state
[2016-04-11 09:55:46] INFO: Graph - Model file saved as: ../data/alexnet_scale_1_iter_11000.zip
[2016-04-11 09:56:37] INFO: Optimizer - Training score at iteration 11100: {'loss': array(0.3248574733734131, dtype=float32), 'mse': array(0.5699626803398132, dtype=float32)}
[2016-04-11 09:57:05] INFO: Optimizer - Training score at iteration 11200: {'loss': array(0.30780327320098877, dtype=float32), 'mse': array(0.5548002123832703, dtype=float32)}
[2016-04-11 09:57:25] INFO: Optimizer - Mean loss values for validation at iteration 11272 is: {'loss': 0.55003583, 'mse': 0.73961431}
[2016-04-11 09:57:33] INFO: Optimizer - Training score at iteration 11300: {'loss': array(0.3180873692035675, dtype=float32), 'mse': array(0.5639923214912415, dtype=float32)}
[2016-04-11 09:58:01] INFO: Optimizer - Training score at iteration 11400: {'loss': array(0.2658234238624573, dtype=float32), 'mse': array(0.5155806541442871, dtype=float32)}
[2016-04-11 09:58:28] INFO: Optimizer - Training score at iteration 11500: {'loss': array(0.2321750819683075, dtype=float32), 'mse': array(0.481845498085022, dtype=float32)}
[2016-04-11 09:58:56] INFO: Optimizer - Training score at iteration 11600: {'loss': array(0.35041651129722595, dtype=float32), 'mse': array(0.5919598937034607, dtype=float32)}
[2016-04-11 09:59:23] INFO: Optimizer - Training score at iteration 11700: {'loss': array(0.20731735229492188, dtype=float32), 'mse': array(0.45532113313674927, dtype=float32)}
[2016-04-11 09:59:24] INFO: Optimizer - Mean loss values for validation at iteration 11700 is: {'loss': 0.54343522, 'mse': 0.73375607}
[2016-04-11 09:59:52] INFO: Optimizer - Training score at iteration 11800: {'loss': array(0.33792462944984436, dtype=float32), 'mse': array(0.5813128352165222, dtype=float32)}
[2016-04-11 10:00:20] INFO: Optimizer - Training score at iteration 11900: {'loss': array(0.24481110274791718, dtype=float32), 'mse': array(0.494783878326416, dtype=float32)}
[2016-04-11 10:00:47] INFO: Optimizer - Training score at iteration 12000: {'loss': array(0.30708959698677063, dtype=float32), 'mse': array(0.5541566610336304, dtype=float32)}
[2016-04-11 10:00:47] INFO: Optimizer - Saving intermediate model state
[2016-04-11 10:01:02] INFO: Graph - Model file saved as: ../data/alexnet_scale_1_iter_12000.zip
[2016-04-11 10:01:44] INFO: Optimizer - Training score at iteration 12100: {'loss': array(0.2721773684024811, dtype=float32), 'mse': array(0.521706223487854, dtype=float32)}
[2016-04-11 10:01:54] INFO: Optimizer - Mean loss values for validation at iteration 12136 is: {'loss': 0.53597677, 'mse': 0.73020774}
[2016-04-11 10:02:13] INFO: Optimizer - Training score at iteration 12200: {'loss': array(0.28789544105529785, dtype=float32), 'mse': array(0.5365588665008545, dtype=float32)}
[2016-04-11 10:02:41] INFO: Optimizer - Training score at iteration 12300: {'loss': array(0.2865902781486511, dtype=float32), 'mse': array(0.5353412628173828, dtype=float32)}
[2016-04-11 10:03:08] INFO: Optimizer - Training score at iteration 12400: {'loss': array(0.310097336769104, dtype=float32), 'mse': array(0.5568638443946838, dtype=float32)}
[2016-04-11 10:03:37] INFO: Optimizer - Training score at iteration 12500: {'loss': array(0.29807189106941223, dtype=float32), 'mse': array(0.5459595918655396, dtype=float32)}
[2016-04-11 10:03:58] INFO: Optimizer - Mean loss values for validation at iteration 12572 is: {'loss': 0.54975039, 'mse': 0.73810208}
[2016-04-11 10:04:06] INFO: Optimizer - Training score at iteration 12600: {'loss': array(0.307096391916275, dtype=float32), 'mse': array(0.5541628003120422, dtype=float32)}
[2016-04-11 10:04:34] INFO: Optimizer - Training score at iteration 12700: {'loss': array(0.2761175036430359, dtype=float32), 'mse': array(0.5254688262939453, dtype=float32)}
[2016-04-11 10:05:02] INFO: Optimizer - Training score at iteration 12800: {'loss': array(0.22946830093860626, dtype=float32), 'mse': array(0.47902849316596985, dtype=float32)}

In [4]:
g = build_graph()
g.load_weights("../data/alexnet_scale_1_iter_19000.zip")
g.compile()


[2016-04-11 14:05:09] INFO: Graph - Loading parameters from file '../data/alexnet_scale_1_iter_19000.zip'
[2016-04-11 14:05:09] INFO: Graph - Setting up graph
[2016-04-11 14:05:09] INFO: Node - data has shape (-1, 3, 228, 304)
[2016-04-11 14:05:09] INFO: Node - label has shape (-1, 1, 228, 304)
[2016-04-11 14:05:09] INFO: Node - conv_0 has shape (-1, 96, 55, 74)
[2016-04-11 14:05:09] INFO: Node - pool_label has shape (-1, 1, 57, 76)
[2016-04-11 14:05:09] INFO: Node - pool_0 has shape (-1, 96, 27, 36)
[2016-04-11 14:05:09] INFO: Node - lrn_0 has shape (-1, 96, 27, 36)
[2016-04-11 14:05:09] INFO: Node - conv_1 has shape (-1, 256, 27, 36)
[2016-04-11 14:05:09] INFO: Node - pool_1 has shape (-1, 256, 13, 17)
[2016-04-11 14:05:09] INFO: Node - lrn_1 has shape (-1, 256, 13, 17)
[2016-04-11 14:05:09] INFO: Node - conv_2 has shape (-1, 384, 13, 17)
[2016-04-11 14:05:09] INFO: Node - conv_3 has shape (-1, 384, 13, 17)
[2016-04-11 14:05:09] INFO: Node - conv_4 has shape (-1, 256, 13, 17)
[2016-04-11 14:05:09] INFO: Node - pool_4 has shape (-1, 256, 6, 8)
[2016-04-11 14:05:09] INFO: Node - flatten has shape (-1, 12288)
[2016-04-11 14:05:09] INFO: Node - fc_0 has shape (-1, 4096)
[2016-04-11 14:05:09] INFO: Node - dp_0 has shape (-1, 4096)
[2016-04-11 14:05:09] INFO: Node - fc_1 has shape (-1, 4332)
[2016-04-11 14:05:09] INFO: Node - reshape_0 has shape (-1, 1, 57, 76)
[2016-04-11 14:05:09] INFO: Node - loss has shape (1,)
[2016-04-11 14:05:09] INFO: Node - mse has shape (1,)
[2016-04-11 14:05:11] INFO: Graph - Invoking Theano compiler

In [5]:
import h5py, numpy as np

f = h5py.File("/home/ga29mix/nashome/data/nyu_depth_v2_combined_50.hdf5")

b = int(f["images"].shape[0] * 0.9)
images = np.array(f["images"][b:])
depths = np.array(f["depths"][b:])
print images.shape
mean = np.load("/home/ga29mix/nashome/data/nyu_depth_v2_combined_50.npy")


(374, 3, 240, 320)

In [10]:
%matplotlib inline
import matplotlib.pyplot as plt
from deepgraph.nn.core import Dropout
w = 304
h = 228
plot = True
idx = 100
diffs = []
Dropout.set_dp_off()
for image in images[100:120]:
    tmp = image.astype(np.float32)
    tmp -= mean
    cy = (tmp.shape[1] - h) // 2
    cx = (tmp.shape[2] - w) // 2
    crop = tmp[:,cy:cy+h, cx:cx+w]
    res = g.infer([crop.reshape((1,3,228,304))])["reshape_0"]
    res = res.squeeze()
    depth = depths[idx][cy:cy + h, cx:cx + w]
    depth = depth[::4,::4]
    if plot and idx % 5 == 0:
        
        plt.imshow(image.transpose((1,2,0)).astype(np.uint8))
        plt.show()
        plt.imshow(depth)
        plt.show()
        plt.imshow(res)
        plt.show()
        print "RMSE: " + str(np.sqrt(np.mean((res-depth)**2)))
    diffs.append(res - depth)
    
    idx += 1
    
diffs = np.array(diffs)
rmse = np.sqrt(np.mean(diffs ** 2))
print "Accumulated RMSE: " + str(rmse)


RMSE: 0.305881
RMSE: 0.608154
RMSE: 0.875155
RMSE: 0.521884
Accumulated RMSE: 0.601685

In [ ]: