In [4]:
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
In [5]:
digits = datasets.load_digits()
In [25]:
# courtesy caffe devs
def vis_square(data, padsize=1, padval=0):
data -= data.min()
data /= data.max()
# force the number of filters to be square
n = int(np.ceil(np.sqrt(data.shape[0])))
padding = ((0, n ** 2 - data.shape[0]), (0, padsize), (0, padsize)) + ((0, 0),) * (data.ndim - 3)
data = np.pad(data, padding, mode='constant', constant_values=(padval, padval))
# tile the filters into an image
data = data.reshape((n, n) + data.shape[1:]).transpose((0, 2, 1, 3) + tuple(range(4, data.ndim + 1)))
data = data.reshape((n * data.shape[1], n * data.shape[3]) + data.shape[4:])
plt.imshow(data, cmap = plt.get_cmap('gray') )
In [27]:
vis_square( digits.data[ 0:100 ].reshape(100,8,8) )
In [31]:
from scipy.ndimage.interpolation import rotate
i2 = rotate( input=digits.data[ 1 ].reshape( 8, 8 ), angle=-30. )
plt.imshow( i2, cmap = plt.get_cmap('gray') )
Out[31]:
<matplotlib.image.AxesImage at 0x7fd9386ee950>
In [30]:
%timeit rotate( input=digits.data[ 1 ].reshape( 8, 8 ), angle=30. )
The slowest run took 6.92 times longer than the fastest. This could mean that an intermediate result is being cached
10000 loops, best of 3: 99.5 µs per loop
In [34]:
from sklearn.cross_validation import train_test_split
# split dataset into train and test
X_train, X_test, y_train, y_test = train_test_split( digits.data,
digits.target,
test_size=0.33 )
In [74]:
import lmdb
import sys
sys.path.append( '/localhome/marcino/work/caffe/python' )
import caffe
env = lmdb.open( 'digits_train_lmdb', map_size=1000000000L )
NUM_OF_ROTATIONS = 16
rotations = np.random.uniform( low= -30., high=30., size=( X_train.shape[0], NUM_OF_ROTATIONS ) )
k = 0
for i in range( X_train.shape[0] ):
x_in = X_train[ i ].reshape( (8,8) )
# add original digit
datum = caffe.proto.caffe_pb2.Datum()
datum.channels = 1
datum.height = 8
datum.width = 8
datum.data = x_in.tostring()
datum.label = int( y_train[i] )
str_id = '{:08}'.format( k )
k += 1
with env.begin( write=True ) as txn:
txn.put( str_id.encode('ascii'), datum.SerializeToString() )
# add rotations
for j in range( rotations.shape[ 1 ] ):
rotated_x = rotate( input=x_in, angle=rotations[ i, j ] )
datum = caffe.proto.caffe_pb2.Datum()
datum.channels = 1
datum.height = 8
datum.width = 8
datum.data = rotated_x.tostring()
datum.label = int( y_train[i] )
str_id = '{:08}'.format( k )
k += 1
with env.begin( write=True ) as txn:
txn.put( str_id.encode('ascii'), datum.SerializeToString() )
print( env.stat() )
env.close()
env = lmdb.open( 'digits_test_lmdb', map_size=1000000000L )
k = 0
for i in range( X_test.shape[0] ):
# add original digit
datum = caffe.proto.caffe_pb2.Datum()
datum.channels = 1
datum.height = 8
datum.width = 8
datum.data = X_test[ i ].tostring()
datum.label = int( y_test[i] )
str_id = '{:08}'.format( k )
k += 1
with env.begin( write=True ) as txn:
txn.put( str_id.encode('ascii'), datum.SerializeToString() )
print( env.stat() )
env.close()
{'branch_pages': 26L, 'leaf_pages': 5451L, 'overflow_pages': 0L, 'psize': 4096L, 'depth': 3L, 'entries': 20451L}
{'branch_pages': 1L, 'leaf_pages': 99L, 'overflow_pages': 0L, 'psize': 4096L, 'depth': 2L, 'entries': 594L}
In [75]:
solver_txt = """
# The train/test net protocol buffer definition
net: "digits_train_test.prototxt"
# test_iter specifies how many forward passes the test should carry out.
# the batch is 100 images, covering the full 10,000 testing images.
test_iter: 100
# Carry out testing every 500 training iterations.
test_interval: 500
# The base learning rate, momentum and the weight decay of the network.
base_lr: 0.01
momentum: 0.9
weight_decay: 0.0005
# The learning rate policy
lr_policy: "inv"
gamma: 0.0001
power: 0.75
# Display every 100 iterations
display: 100
# The maximum number of iterations
max_iter: 10000
# snapshot intermediate results
snapshot: 5000
snapshot_prefix: "digits"
# solver mode: CPU or GPU
solver_mode: GPU
"""
open( 'digits_solver.prototxt', 'wt' ).write( solver_txt )
digits_train_test = """
name: "digits"
layer {
name: "digits_data"
type: "Data"
top: "data"
top: "label"
include {
phase: TRAIN
}
transform_param {
scale: 0.0625
}
data_param {
source: "digits_train_lmdb"
batch_size: 100
backend: LMDB
}
}
layer {
name: "digits_data"
type: "Data"
top: "data"
top: "label"
include {
phase: TEST
}
transform_param {
scale: 0.0625
}
data_param {
source: "digits_test_lmdb"
batch_size: 100
backend: LMDB
}
}
layer {
name: "conv1"
type: "Convolution"
bottom: "data"
top: "conv1"
param {
lr_mult: 1
}
param {
lr_mult: 2
}
convolution_param {
num_output: 20
kernel_size: 3
stride: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "pool1"
type: "Pooling"
bottom: "conv1"
top: "pool1"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
name: "conv2"
type: "Convolution"
bottom: "pool1"
top: "conv2"
param {
lr_mult: 1
}
param {
lr_mult: 2
}
convolution_param {
num_output: 50
kernel_size: 3
stride: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "pool2"
type: "Pooling"
bottom: "conv2"
top: "pool2"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
name: "ip1"
type: "InnerProduct"
bottom: "pool2"
top: "ip1"
param {
lr_mult: 1
}
param {
lr_mult: 2
}
inner_product_param {
num_output: 500
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "relu1"
type: "ReLU"
bottom: "ip1"
top: "ip1"
}
layer {
name: "ip2"
type: "InnerProduct"
bottom: "ip1"
top: "ip2"
param {
lr_mult: 1
}
param {
lr_mult: 2
}
inner_product_param {
num_output: 10
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "accuracy"
type: "Accuracy"
bottom: "ip2"
bottom: "label"
top: "accuracy"
include {
phase: TEST
}
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "ip2"
bottom: "label"
top: "loss"
}
"""
open( 'digits_train_test.prototxt', 'wt' ).write( digits_train_test )
In [76]:
!./caffe/build/tools/caffe train -solver digits_solver.prototxt
I0811 20:54:40.812274 29555 caffe.cpp:113] Use GPU with device ID 0
I0811 20:54:41.175806 29555 caffe.cpp:121] Starting Optimization
I0811 20:54:41.175945 29555 solver.cpp:32] Initializing solver from parameters:
test_iter: 100
test_interval: 500
base_lr: 0.01
display: 100
max_iter: 10000
lr_policy: "inv"
gamma: 0.0001
power: 0.75
momentum: 0.9
weight_decay: 0.0005
snapshot: 5000
snapshot_prefix: "digits"
solver_mode: GPU
net: "digits_train_test.prototxt"
I0811 20:54:41.176013 29555 solver.cpp:70] Creating training net from net file: digits_train_test.prototxt
I0811 20:54:41.176491 29555 net.cpp:257] The NetState phase (0) differed from the phase (1) specified by a rule in layer digits_data
I0811 20:54:41.176518 29555 net.cpp:257] The NetState phase (0) differed from the phase (1) specified by a rule in layer accuracy
I0811 20:54:41.176619 29555 net.cpp:42] Initializing net from parameters:
name: "digits"
state {
phase: TRAIN
}
layer {
name: "digits_data"
type: "Data"
top: "data"
top: "label"
include {
phase: TRAIN
}
transform_param {
scale: 0.0625
}
data_param {
source: "digits_train_lmdb"
batch_size: 100
backend: LMDB
}
}
layer {
name: "conv1"
type: "Convolution"
bottom: "data"
top: "conv1"
param {
lr_mult: 1
}
param {
lr_mult: 2
}
convolution_param {
num_output: 20
kernel_size: 3
stride: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "pool1"
type: "Pooling"
bottom: "conv1"
top: "pool1"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
name: "conv2"
type: "Convolution"
bottom: "pool1"
top: "conv2"
param {
lr_mult: 1
}
param {
lr_mult: 2
}
convolution_param {
num_output: 50
kernel_size: 3
stride: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "pool2"
type: "Pooling"
bottom: "conv2"
top: "pool2"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
name: "ip1"
type: "InnerProduct"
bottom: "pool2"
top: "ip1"
param {
lr_mult: 1
}
param {
lr_mult: 2
}
inner_product_param {
num_output: 500
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "relu1"
type: "ReLU"
bottom: "ip1"
top: "ip1"
}
layer {
name: "ip2"
type: "InnerProduct"
bottom: "ip1"
top: "ip2"
param {
lr_mult: 1
}
param {
lr_mult: 2
}
inner_product_param {
num_output: 10
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "ip2"
bottom: "label"
top: "loss"
}
I0811 20:54:41.177028 29555 layer_factory.hpp:74] Creating layer digits_data
I0811 20:54:41.177053 29555 net.cpp:84] Creating Layer digits_data
I0811 20:54:41.177067 29555 net.cpp:338] digits_data -> data
I0811 20:54:41.177105 29555 net.cpp:338] digits_data -> label
I0811 20:54:41.177121 29555 net.cpp:113] Setting up digits_data
I0811 20:54:41.177229 29555 db.cpp:34] Opened lmdb digits_train_lmdb
I0811 20:54:41.177278 29555 data_layer.cpp:67] output data size: 100,1,8,8
I0811 20:54:41.177428 29555 net.cpp:120] Top shape: 100 1 8 8 (6400)
I0811 20:54:41.177443 29555 net.cpp:120] Top shape: 100 (100)
I0811 20:54:41.177459 29555 layer_factory.hpp:74] Creating layer conv1
I0811 20:54:41.177477 29555 net.cpp:84] Creating Layer conv1
I0811 20:54:41.177490 29555 net.cpp:380] conv1 <- data
I0811 20:54:41.177561 29555 net.cpp:338] conv1 -> conv1
I0811 20:54:41.177584 29555 net.cpp:113] Setting up conv1
I0811 20:54:41.177994 29555 net.cpp:120] Top shape: 100 20 6 6 (72000)
I0811 20:54:41.178025 29555 layer_factory.hpp:74] Creating layer pool1
I0811 20:54:41.178040 29555 net.cpp:84] Creating Layer pool1
I0811 20:54:41.178050 29555 net.cpp:380] pool1 <- conv1
I0811 20:54:41.178062 29555 net.cpp:338] pool1 -> pool1
I0811 20:54:41.178076 29555 net.cpp:113] Setting up pool1
I0811 20:54:41.178100 29555 net.cpp:120] Top shape: 100 20 3 3 (18000)
I0811 20:54:41.178145 29555 layer_factory.hpp:74] Creating layer conv2
I0811 20:54:41.178165 29555 net.cpp:84] Creating Layer conv2
I0811 20:54:41.178174 29555 net.cpp:380] conv2 <- pool1
I0811 20:54:41.178205 29555 net.cpp:338] conv2 -> conv2
I0811 20:54:41.178220 29555 net.cpp:113] Setting up conv2
I0811 20:54:41.178503 29555 net.cpp:120] Top shape: 100 50 1 1 (5000)
I0811 20:54:41.178524 29555 layer_factory.hpp:74] Creating layer pool2
I0811 20:54:41.178539 29555 net.cpp:84] Creating Layer pool2
I0811 20:54:41.178549 29555 net.cpp:380] pool2 <- conv2
I0811 20:54:41.178560 29555 net.cpp:338] pool2 -> pool2
I0811 20:54:41.178572 29555 net.cpp:113] Setting up pool2
I0811 20:54:41.178586 29555 net.cpp:120] Top shape: 100 50 1 1 (5000)
I0811 20:54:41.178596 29555 layer_factory.hpp:74] Creating layer ip1
I0811 20:54:41.178614 29555 net.cpp:84] Creating Layer ip1
I0811 20:54:41.178624 29555 net.cpp:380] ip1 <- pool2
I0811 20:54:41.178639 29555 net.cpp:338] ip1 -> ip1
I0811 20:54:41.178655 29555 net.cpp:113] Setting up ip1
I0811 20:54:41.179036 29555 net.cpp:120] Top shape: 100 500 (50000)
I0811 20:54:41.179059 29555 layer_factory.hpp:74] Creating layer relu1
I0811 20:54:41.179071 29555 net.cpp:84] Creating Layer relu1
I0811 20:54:41.179081 29555 net.cpp:380] relu1 <- ip1
I0811 20:54:41.179095 29555 net.cpp:327] relu1 -> ip1 (in-place)
I0811 20:54:41.179107 29555 net.cpp:113] Setting up relu1
I0811 20:54:41.179121 29555 net.cpp:120] Top shape: 100 500 (50000)
I0811 20:54:41.179131 29555 layer_factory.hpp:74] Creating layer ip2
I0811 20:54:41.179143 29555 net.cpp:84] Creating Layer ip2
I0811 20:54:41.179152 29555 net.cpp:380] ip2 <- ip1
I0811 20:54:41.179167 29555 net.cpp:338] ip2 -> ip2
I0811 20:54:41.179180 29555 net.cpp:113] Setting up ip2
I0811 20:54:41.179265 29555 net.cpp:120] Top shape: 100 10 (1000)
I0811 20:54:41.179278 29555 layer_factory.hpp:74] Creating layer loss
I0811 20:54:41.179293 29555 net.cpp:84] Creating Layer loss
I0811 20:54:41.179302 29555 net.cpp:380] loss <- ip2
I0811 20:54:41.179312 29555 net.cpp:380] loss <- label
I0811 20:54:41.179328 29555 net.cpp:338] loss -> loss
I0811 20:54:41.179343 29555 net.cpp:113] Setting up loss
I0811 20:54:41.179358 29555 layer_factory.hpp:74] Creating layer loss
I0811 20:54:41.179389 29555 net.cpp:120] Top shape: (1)
I0811 20:54:41.179400 29555 net.cpp:122] with loss weight 1
I0811 20:54:41.179466 29555 net.cpp:167] loss needs backward computation.
I0811 20:54:41.179481 29555 net.cpp:167] ip2 needs backward computation.
I0811 20:54:41.179491 29555 net.cpp:167] relu1 needs backward computation.
I0811 20:54:41.179500 29555 net.cpp:167] ip1 needs backward computation.
I0811 20:54:41.179510 29555 net.cpp:167] pool2 needs backward computation.
I0811 20:54:41.179519 29555 net.cpp:167] conv2 needs backward computation.
I0811 20:54:41.179529 29555 net.cpp:167] pool1 needs backward computation.
I0811 20:54:41.179538 29555 net.cpp:167] conv1 needs backward computation.
I0811 20:54:41.179548 29555 net.cpp:169] digits_data does not need backward computation.
I0811 20:54:41.179556 29555 net.cpp:205] This network produces output loss
I0811 20:54:41.179574 29555 net.cpp:447] Collecting Learning Rate and Weight Decay.
I0811 20:54:41.179586 29555 net.cpp:217] Network initialization done.
I0811 20:54:41.179595 29555 net.cpp:218] Memory required for data: 830004
I0811 20:54:41.180055 29555 solver.cpp:154] Creating test net (#0) specified by net file: digits_train_test.prototxt
I0811 20:54:41.180095 29555 net.cpp:257] The NetState phase (1) differed from the phase (0) specified by a rule in layer digits_data
I0811 20:54:41.180223 29555 net.cpp:42] Initializing net from parameters:
name: "digits"
state {
phase: TEST
}
layer {
name: "digits_data"
type: "Data"
top: "data"
top: "label"
include {
phase: TEST
}
transform_param {
scale: 0.0625
}
data_param {
source: "digits_test_lmdb"
batch_size: 100
backend: LMDB
}
}
layer {
name: "conv1"
type: "Convolution"
bottom: "data"
top: "conv1"
param {
lr_mult: 1
}
param {
lr_mult: 2
}
convolution_param {
num_output: 20
kernel_size: 3
stride: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "pool1"
type: "Pooling"
bottom: "conv1"
top: "pool1"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
name: "conv2"
type: "Convolution"
bottom: "pool1"
top: "conv2"
param {
lr_mult: 1
}
param {
lr_mult: 2
}
convolution_param {
num_output: 50
kernel_size: 3
stride: 1
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "pool2"
type: "Pooling"
bottom: "conv2"
top: "pool2"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
name: "ip1"
type: "InnerProduct"
bottom: "pool2"
top: "ip1"
param {
lr_mult: 1
}
param {
lr_mult: 2
}
inner_product_param {
num_output: 500
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "relu1"
type: "ReLU"
bottom: "ip1"
top: "ip1"
}
layer {
name: "ip2"
type: "InnerProduct"
bottom: "ip1"
top: "ip2"
param {
lr_mult: 1
}
param {
lr_mult: 2
}
inner_product_param {
num_output: 10
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "accuracy"
type: "Accuracy"
bottom: "ip2"
bottom: "label"
top: "accuracy"
include {
phase: TEST
}
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "ip2"
bottom: "label"
top: "loss"
}
I0811 20:54:41.180748 29555 layer_factory.hpp:74] Creating layer digits_data
I0811 20:54:41.180765 29555 net.cpp:84] Creating Layer digits_data
I0811 20:54:41.180778 29555 net.cpp:338] digits_data -> data
I0811 20:54:41.180793 29555 net.cpp:338] digits_data -> label
I0811 20:54:41.180806 29555 net.cpp:113] Setting up digits_data
I0811 20:54:41.180893 29555 db.cpp:34] Opened lmdb digits_test_lmdb
I0811 20:54:41.180933 29555 data_layer.cpp:67] output data size: 100,1,8,8
I0811 20:54:41.181000 29555 net.cpp:120] Top shape: 100 1 8 8 (6400)
I0811 20:54:41.181015 29555 net.cpp:120] Top shape: 100 (100)
I0811 20:54:41.181025 29555 layer_factory.hpp:74] Creating layer label_digits_data_1_split
I0811 20:54:41.181038 29555 net.cpp:84] Creating Layer label_digits_data_1_split
I0811 20:54:41.181048 29555 net.cpp:380] label_digits_data_1_split <- label
I0811 20:54:41.181061 29555 net.cpp:338] label_digits_data_1_split -> label_digits_data_1_split_0
I0811 20:54:41.181077 29555 net.cpp:338] label_digits_data_1_split -> label_digits_data_1_split_1
I0811 20:54:41.181089 29555 net.cpp:113] Setting up label_digits_data_1_split
I0811 20:54:41.181105 29555 net.cpp:120] Top shape: 100 (100)
I0811 20:54:41.181116 29555 net.cpp:120] Top shape: 100 (100)
I0811 20:54:41.181126 29555 layer_factory.hpp:74] Creating layer conv1
I0811 20:54:41.181145 29555 net.cpp:84] Creating Layer conv1
I0811 20:54:41.181155 29555 net.cpp:380] conv1 <- data
I0811 20:54:41.181167 29555 net.cpp:338] conv1 -> conv1
I0811 20:54:41.181181 29555 net.cpp:113] Setting up conv1
I0811 20:54:41.181210 29555 net.cpp:120] Top shape: 100 20 6 6 (72000)
I0811 20:54:41.181227 29555 layer_factory.hpp:74] Creating layer pool1
I0811 20:54:41.181241 29555 net.cpp:84] Creating Layer pool1
I0811 20:54:41.181252 29555 net.cpp:380] pool1 <- conv1
I0811 20:54:41.181263 29555 net.cpp:338] pool1 -> pool1
I0811 20:54:41.181278 29555 net.cpp:113] Setting up pool1
I0811 20:54:41.181293 29555 net.cpp:120] Top shape: 100 20 3 3 (18000)
I0811 20:54:41.181304 29555 layer_factory.hpp:74] Creating layer conv2
I0811 20:54:41.181320 29555 net.cpp:84] Creating Layer conv2
I0811 20:54:41.181330 29555 net.cpp:380] conv2 <- pool1
I0811 20:54:41.181342 29555 net.cpp:338] conv2 -> conv2
I0811 20:54:41.181355 29555 net.cpp:113] Setting up conv2
I0811 20:54:41.181486 29555 net.cpp:120] Top shape: 100 50 1 1 (5000)
I0811 20:54:41.181501 29555 layer_factory.hpp:74] Creating layer pool2
I0811 20:54:41.181515 29555 net.cpp:84] Creating Layer pool2
I0811 20:54:41.181538 29555 net.cpp:380] pool2 <- conv2
I0811 20:54:41.181551 29555 net.cpp:338] pool2 -> pool2
I0811 20:54:41.181565 29555 net.cpp:113] Setting up pool2
I0811 20:54:41.181581 29555 net.cpp:120] Top shape: 100 50 1 1 (5000)
I0811 20:54:41.181591 29555 layer_factory.hpp:74] Creating layer ip1
I0811 20:54:41.181605 29555 net.cpp:84] Creating Layer ip1
I0811 20:54:41.181614 29555 net.cpp:380] ip1 <- pool2
I0811 20:54:41.181627 29555 net.cpp:338] ip1 -> ip1
I0811 20:54:41.181640 29555 net.cpp:113] Setting up ip1
I0811 20:54:41.181951 29555 net.cpp:120] Top shape: 100 500 (50000)
I0811 20:54:41.181972 29555 layer_factory.hpp:74] Creating layer relu1
I0811 20:54:41.181984 29555 net.cpp:84] Creating Layer relu1
I0811 20:54:41.181993 29555 net.cpp:380] relu1 <- ip1
I0811 20:54:41.182008 29555 net.cpp:327] relu1 -> ip1 (in-place)
I0811 20:54:41.182020 29555 net.cpp:113] Setting up relu1
I0811 20:54:41.182032 29555 net.cpp:120] Top shape: 100 500 (50000)
I0811 20:54:41.182042 29555 layer_factory.hpp:74] Creating layer ip2
I0811 20:54:41.182055 29555 net.cpp:84] Creating Layer ip2
I0811 20:54:41.182065 29555 net.cpp:380] ip2 <- ip1
I0811 20:54:41.182080 29555 net.cpp:338] ip2 -> ip2
I0811 20:54:41.182093 29555 net.cpp:113] Setting up ip2
I0811 20:54:41.182173 29555 net.cpp:120] Top shape: 100 10 (1000)
I0811 20:54:41.182188 29555 layer_factory.hpp:74] Creating layer ip2_ip2_0_split
I0811 20:54:41.182199 29555 net.cpp:84] Creating Layer ip2_ip2_0_split
I0811 20:54:41.182209 29555 net.cpp:380] ip2_ip2_0_split <- ip2
I0811 20:54:41.182222 29555 net.cpp:338] ip2_ip2_0_split -> ip2_ip2_0_split_0
I0811 20:54:41.182236 29555 net.cpp:338] ip2_ip2_0_split -> ip2_ip2_0_split_1
I0811 20:54:41.182248 29555 net.cpp:113] Setting up ip2_ip2_0_split
I0811 20:54:41.182261 29555 net.cpp:120] Top shape: 100 10 (1000)
I0811 20:54:41.182272 29555 net.cpp:120] Top shape: 100 10 (1000)
I0811 20:54:41.182282 29555 layer_factory.hpp:74] Creating layer accuracy
I0811 20:54:41.182297 29555 net.cpp:84] Creating Layer accuracy
I0811 20:54:41.182307 29555 net.cpp:380] accuracy <- ip2_ip2_0_split_0
I0811 20:54:41.182317 29555 net.cpp:380] accuracy <- label_digits_data_1_split_0
I0811 20:54:41.182332 29555 net.cpp:338] accuracy -> accuracy
I0811 20:54:41.182346 29555 net.cpp:113] Setting up accuracy
I0811 20:54:41.182363 29555 net.cpp:120] Top shape: (1)
I0811 20:54:41.182373 29555 layer_factory.hpp:74] Creating layer loss
I0811 20:54:41.182384 29555 net.cpp:84] Creating Layer loss
I0811 20:54:41.182394 29555 net.cpp:380] loss <- ip2_ip2_0_split_1
I0811 20:54:41.182404 29555 net.cpp:380] loss <- label_digits_data_1_split_1
I0811 20:54:41.182416 29555 net.cpp:338] loss -> loss
I0811 20:54:41.182428 29555 net.cpp:113] Setting up loss
I0811 20:54:41.182440 29555 layer_factory.hpp:74] Creating layer loss
I0811 20:54:41.182464 29555 net.cpp:120] Top shape: (1)
I0811 20:54:41.182476 29555 net.cpp:122] with loss weight 1
I0811 20:54:41.182490 29555 net.cpp:167] loss needs backward computation.
I0811 20:54:41.182502 29555 net.cpp:169] accuracy does not need backward computation.
I0811 20:54:41.182512 29555 net.cpp:167] ip2_ip2_0_split needs backward computation.
I0811 20:54:41.182523 29555 net.cpp:167] ip2 needs backward computation.
I0811 20:54:41.182533 29555 net.cpp:167] relu1 needs backward computation.
I0811 20:54:41.182541 29555 net.cpp:167] ip1 needs backward computation.
I0811 20:54:41.182551 29555 net.cpp:167] pool2 needs backward computation.
I0811 20:54:41.182560 29555 net.cpp:167] conv2 needs backward computation.
I0811 20:54:41.182570 29555 net.cpp:167] pool1 needs backward computation.
I0811 20:54:41.182580 29555 net.cpp:167] conv1 needs backward computation.
I0811 20:54:41.182590 29555 net.cpp:169] label_digits_data_1_split does not need backward computation.
I0811 20:54:41.182600 29555 net.cpp:169] digits_data does not need backward computation.
I0811 20:54:41.182610 29555 net.cpp:205] This network produces output accuracy
I0811 20:54:41.182620 29555 net.cpp:205] This network produces output loss
I0811 20:54:41.182651 29555 net.cpp:447] Collecting Learning Rate and Weight Decay.
I0811 20:54:41.182663 29555 net.cpp:217] Network initialization done.
I0811 20:54:41.182672 29555 net.cpp:218] Memory required for data: 838808
I0811 20:54:41.182739 29555 solver.cpp:42] Solver scaffolding done.
I0811 20:54:41.182773 29555 solver.cpp:222] Solving digits
I0811 20:54:41.182783 29555 solver.cpp:223] Learning Rate Policy: inv
I0811 20:54:41.182796 29555 solver.cpp:266] Iteration 0, Testing net (#0)
I0811 20:54:41.685890 29555 solver.cpp:315] Test net output #0: accuracy = 0.1035
I0811 20:54:41.685941 29555 solver.cpp:315] Test net output #1: loss = 2.74477 (* 1 = 2.74477 loss)
I0811 20:54:41.694865 29555 solver.cpp:189] Iteration 0, loss = 2.70895
I0811 20:54:41.694905 29555 solver.cpp:204] Train net output #0: loss = 2.70895 (* 1 = 2.70895 loss)
I0811 20:54:41.694932 29555 solver.cpp:464] Iteration 0, lr = 0.01
I0811 20:54:42.554281 29555 solver.cpp:189] Iteration 100, loss = 2.43753
I0811 20:54:42.554334 29555 solver.cpp:204] Train net output #0: loss = 2.43753 (* 1 = 2.43753 loss)
I0811 20:54:42.554349 29555 solver.cpp:464] Iteration 100, lr = 0.00992565
I0811 20:54:43.412153 29555 solver.cpp:189] Iteration 200, loss = 2.29525
I0811 20:54:43.412204 29555 solver.cpp:204] Train net output #0: loss = 2.29525 (* 1 = 2.29525 loss)
I0811 20:54:43.412219 29555 solver.cpp:464] Iteration 200, lr = 0.00985258
I0811 20:54:44.270221 29555 solver.cpp:189] Iteration 300, loss = 2.29265
I0811 20:54:44.270274 29555 solver.cpp:204] Train net output #0: loss = 2.29265 (* 1 = 2.29265 loss)
I0811 20:54:44.270288 29555 solver.cpp:464] Iteration 300, lr = 0.00978075
I0811 20:54:45.128087 29555 solver.cpp:189] Iteration 400, loss = 2.27222
I0811 20:54:45.128137 29555 solver.cpp:204] Train net output #0: loss = 2.27222 (* 1 = 2.27222 loss)
I0811 20:54:45.128152 29555 solver.cpp:464] Iteration 400, lr = 0.00971013
I0811 20:54:45.977355 29555 solver.cpp:266] Iteration 500, Testing net (#0)
I0811 20:54:46.453042 29555 solver.cpp:315] Test net output #0: accuracy = 0.2712
I0811 20:54:46.453084 29555 solver.cpp:315] Test net output #1: loss = 1.97285 (* 1 = 1.97285 loss)
I0811 20:54:46.461392 29555 solver.cpp:189] Iteration 500, loss = 2.16202
I0811 20:54:46.461428 29555 solver.cpp:204] Train net output #0: loss = 2.16202 (* 1 = 2.16202 loss)
I0811 20:54:46.461442 29555 solver.cpp:464] Iteration 500, lr = 0.00964069
I0811 20:54:47.321872 29555 solver.cpp:189] Iteration 600, loss = 2.33224
I0811 20:54:47.321926 29555 solver.cpp:204] Train net output #0: loss = 2.33224 (* 1 = 2.33224 loss)
I0811 20:54:47.321940 29555 solver.cpp:464] Iteration 600, lr = 0.0095724
I0811 20:54:48.180279 29555 solver.cpp:189] Iteration 700, loss = 2.3451
I0811 20:54:48.180330 29555 solver.cpp:204] Train net output #0: loss = 2.3451 (* 1 = 2.3451 loss)
I0811 20:54:48.180344 29555 solver.cpp:464] Iteration 700, lr = 0.00950522
I0811 20:54:49.039413 29555 solver.cpp:189] Iteration 800, loss = 2.28788
I0811 20:54:49.039472 29555 solver.cpp:204] Train net output #0: loss = 2.28788 (* 1 = 2.28788 loss)
I0811 20:54:49.039487 29555 solver.cpp:464] Iteration 800, lr = 0.00943913
I0811 20:54:49.898066 29555 solver.cpp:189] Iteration 900, loss = 2.20559
I0811 20:54:49.898118 29555 solver.cpp:204] Train net output #0: loss = 2.20559 (* 1 = 2.20559 loss)
I0811 20:54:49.898133 29555 solver.cpp:464] Iteration 900, lr = 0.00937411
I0811 20:54:50.748605 29555 solver.cpp:266] Iteration 1000, Testing net (#0)
I0811 20:54:51.208335 29555 solver.cpp:315] Test net output #0: accuracy = 0.325
I0811 20:54:51.208377 29555 solver.cpp:315] Test net output #1: loss = 1.84973 (* 1 = 1.84973 loss)
I0811 20:54:51.216523 29555 solver.cpp:189] Iteration 1000, loss = 2.27287
I0811 20:54:51.216559 29555 solver.cpp:204] Train net output #0: loss = 2.27287 (* 1 = 2.27287 loss)
I0811 20:54:51.216574 29555 solver.cpp:464] Iteration 1000, lr = 0.00931012
I0811 20:54:52.076309 29555 solver.cpp:189] Iteration 1100, loss = 2.25423
I0811 20:54:52.076359 29555 solver.cpp:204] Train net output #0: loss = 2.25423 (* 1 = 2.25423 loss)
I0811 20:54:52.076406 29555 solver.cpp:464] Iteration 1100, lr = 0.00924715
I0811 20:54:52.935225 29555 solver.cpp:189] Iteration 1200, loss = 2.2991
I0811 20:54:52.935277 29555 solver.cpp:204] Train net output #0: loss = 2.2991 (* 1 = 2.2991 loss)
I0811 20:54:52.935292 29555 solver.cpp:464] Iteration 1200, lr = 0.00918515
I0811 20:54:53.794421 29555 solver.cpp:189] Iteration 1300, loss = 2.21525
I0811 20:54:53.794472 29555 solver.cpp:204] Train net output #0: loss = 2.21525 (* 1 = 2.21525 loss)
I0811 20:54:53.794486 29555 solver.cpp:464] Iteration 1300, lr = 0.00912412
I0811 20:54:54.652734 29555 solver.cpp:189] Iteration 1400, loss = 2.31546
I0811 20:54:54.652786 29555 solver.cpp:204] Train net output #0: loss = 2.31546 (* 1 = 2.31546 loss)
I0811 20:54:54.652799 29555 solver.cpp:464] Iteration 1400, lr = 0.00906403
I0811 20:54:55.503060 29555 solver.cpp:266] Iteration 1500, Testing net (#0)
I0811 20:54:55.962208 29555 solver.cpp:315] Test net output #0: accuracy = 0.3519
I0811 20:54:55.962251 29555 solver.cpp:315] Test net output #1: loss = 1.80561 (* 1 = 1.80561 loss)
I0811 20:54:55.970348 29555 solver.cpp:189] Iteration 1500, loss = 2.23058
I0811 20:54:55.970386 29555 solver.cpp:204] Train net output #0: loss = 2.23058 (* 1 = 2.23058 loss)
I0811 20:54:55.970399 29555 solver.cpp:464] Iteration 1500, lr = 0.00900485
I0811 20:54:56.832316 29555 solver.cpp:189] Iteration 1600, loss = 2.14683
I0811 20:54:56.832368 29555 solver.cpp:204] Train net output #0: loss = 2.14683 (* 1 = 2.14683 loss)
I0811 20:54:56.832382 29555 solver.cpp:464] Iteration 1600, lr = 0.00894657
I0811 20:54:57.691293 29555 solver.cpp:189] Iteration 1700, loss = 2.2434
I0811 20:54:57.691345 29555 solver.cpp:204] Train net output #0: loss = 2.2434 (* 1 = 2.2434 loss)
I0811 20:54:57.691359 29555 solver.cpp:464] Iteration 1700, lr = 0.00888916
I0811 20:54:58.550518 29555 solver.cpp:189] Iteration 1800, loss = 2.28233
I0811 20:54:58.550568 29555 solver.cpp:204] Train net output #0: loss = 2.28233 (* 1 = 2.28233 loss)
I0811 20:54:58.550583 29555 solver.cpp:464] Iteration 1800, lr = 0.0088326
I0811 20:54:59.409492 29555 solver.cpp:189] Iteration 1900, loss = 2.15489
I0811 20:54:59.409539 29555 solver.cpp:204] Train net output #0: loss = 2.15489 (* 1 = 2.15489 loss)
I0811 20:54:59.409554 29555 solver.cpp:464] Iteration 1900, lr = 0.00877687
I0811 20:55:00.259659 29555 solver.cpp:266] Iteration 2000, Testing net (#0)
I0811 20:55:00.720285 29555 solver.cpp:315] Test net output #0: accuracy = 0.369
I0811 20:55:00.720341 29555 solver.cpp:315] Test net output #1: loss = 1.75725 (* 1 = 1.75725 loss)
I0811 20:55:00.728525 29555 solver.cpp:189] Iteration 2000, loss = 2.17241
I0811 20:55:00.728562 29555 solver.cpp:204] Train net output #0: loss = 2.17241 (* 1 = 2.17241 loss)
I0811 20:55:00.728579 29555 solver.cpp:464] Iteration 2000, lr = 0.00872196
I0811 20:55:01.588526 29555 solver.cpp:189] Iteration 2100, loss = 2.1433
I0811 20:55:01.588578 29555 solver.cpp:204] Train net output #0: loss = 2.1433 (* 1 = 2.1433 loss)
I0811 20:55:01.588593 29555 solver.cpp:464] Iteration 2100, lr = 0.00866784
I0811 20:55:02.446952 29555 solver.cpp:189] Iteration 2200, loss = 2.27747
I0811 20:55:02.447002 29555 solver.cpp:204] Train net output #0: loss = 2.27747 (* 1 = 2.27747 loss)
I0811 20:55:02.447017 29555 solver.cpp:464] Iteration 2200, lr = 0.0086145
I0811 20:55:03.305249 29555 solver.cpp:189] Iteration 2300, loss = 2.11908
I0811 20:55:03.305302 29555 solver.cpp:204] Train net output #0: loss = 2.11908 (* 1 = 2.11908 loss)
I0811 20:55:03.305316 29555 solver.cpp:464] Iteration 2300, lr = 0.00856192
I0811 20:55:04.163856 29555 solver.cpp:189] Iteration 2400, loss = 2.16381
I0811 20:55:04.163908 29555 solver.cpp:204] Train net output #0: loss = 2.16381 (* 1 = 2.16381 loss)
I0811 20:55:04.163923 29555 solver.cpp:464] Iteration 2400, lr = 0.00851008
I0811 20:55:05.014049 29555 solver.cpp:266] Iteration 2500, Testing net (#0)
I0811 20:55:05.473345 29555 solver.cpp:315] Test net output #0: accuracy = 0.386
I0811 20:55:05.473422 29555 solver.cpp:315] Test net output #1: loss = 1.80856 (* 1 = 1.80856 loss)
I0811 20:55:05.481649 29555 solver.cpp:189] Iteration 2500, loss = 2.10935
I0811 20:55:05.481685 29555 solver.cpp:204] Train net output #0: loss = 2.10935 (* 1 = 2.10935 loss)
I0811 20:55:05.481699 29555 solver.cpp:464] Iteration 2500, lr = 0.00845897
I0811 20:55:06.340351 29555 solver.cpp:189] Iteration 2600, loss = 2.02131
I0811 20:55:06.340404 29555 solver.cpp:204] Train net output #0: loss = 2.02131 (* 1 = 2.02131 loss)
I0811 20:55:06.340420 29555 solver.cpp:464] Iteration 2600, lr = 0.00840857
I0811 20:55:07.198812 29555 solver.cpp:189] Iteration 2700, loss = 2.06152
I0811 20:55:07.198864 29555 solver.cpp:204] Train net output #0: loss = 2.06152 (* 1 = 2.06152 loss)
I0811 20:55:07.198879 29555 solver.cpp:464] Iteration 2700, lr = 0.00835886
I0811 20:55:08.057360 29555 solver.cpp:189] Iteration 2800, loss = 2.09238
I0811 20:55:08.057413 29555 solver.cpp:204] Train net output #0: loss = 2.09238 (* 1 = 2.09238 loss)
I0811 20:55:08.057428 29555 solver.cpp:464] Iteration 2800, lr = 0.00830984
I0811 20:55:08.916071 29555 solver.cpp:189] Iteration 2900, loss = 2.19304
I0811 20:55:08.916122 29555 solver.cpp:204] Train net output #0: loss = 2.19304 (* 1 = 2.19304 loss)
I0811 20:55:08.916137 29555 solver.cpp:464] Iteration 2900, lr = 0.00826148
I0811 20:55:09.766052 29555 solver.cpp:266] Iteration 3000, Testing net (#0)
I0811 20:55:10.222827 29555 solver.cpp:315] Test net output #0: accuracy = 0.3438
I0811 20:55:10.222870 29555 solver.cpp:315] Test net output #1: loss = 1.74611 (* 1 = 1.74611 loss)
I0811 20:55:10.231190 29555 solver.cpp:189] Iteration 3000, loss = 2.02648
I0811 20:55:10.231230 29555 solver.cpp:204] Train net output #0: loss = 2.02648 (* 1 = 2.02648 loss)
I0811 20:55:10.231245 29555 solver.cpp:464] Iteration 3000, lr = 0.00821377
I0811 20:55:11.090653 29555 solver.cpp:189] Iteration 3100, loss = 2.25156
I0811 20:55:11.091014 29555 solver.cpp:204] Train net output #0: loss = 2.25156 (* 1 = 2.25156 loss)
I0811 20:55:11.091029 29555 solver.cpp:464] Iteration 3100, lr = 0.0081667
I0811 20:55:11.949393 29555 solver.cpp:189] Iteration 3200, loss = 2.22924
I0811 20:55:11.949445 29555 solver.cpp:204] Train net output #0: loss = 2.22924 (* 1 = 2.22924 loss)
I0811 20:55:11.949458 29555 solver.cpp:464] Iteration 3200, lr = 0.00812025
I0811 20:55:12.808414 29555 solver.cpp:189] Iteration 3300, loss = 2.13704
I0811 20:55:12.808466 29555 solver.cpp:204] Train net output #0: loss = 2.13704 (* 1 = 2.13704 loss)
I0811 20:55:12.808482 29555 solver.cpp:464] Iteration 3300, lr = 0.00807442
I0811 20:55:13.667326 29555 solver.cpp:189] Iteration 3400, loss = 2.01636
I0811 20:55:13.667379 29555 solver.cpp:204] Train net output #0: loss = 2.01636 (* 1 = 2.01636 loss)
I0811 20:55:13.667393 29555 solver.cpp:464] Iteration 3400, lr = 0.00802918
I0811 20:55:14.517276 29555 solver.cpp:266] Iteration 3500, Testing net (#0)
I0811 20:55:14.981115 29555 solver.cpp:315] Test net output #0: accuracy = 0.3871
I0811 20:55:14.981160 29555 solver.cpp:315] Test net output #1: loss = 1.72104 (* 1 = 1.72104 loss)
I0811 20:55:14.989481 29555 solver.cpp:189] Iteration 3500, loss = 2.15729
I0811 20:55:14.989518 29555 solver.cpp:204] Train net output #0: loss = 2.15729 (* 1 = 2.15729 loss)
I0811 20:55:14.989532 29555 solver.cpp:464] Iteration 3500, lr = 0.00798454
I0811 20:55:15.849408 29555 solver.cpp:189] Iteration 3600, loss = 2.04148
I0811 20:55:15.849459 29555 solver.cpp:204] Train net output #0: loss = 2.04148 (* 1 = 2.04148 loss)
I0811 20:55:15.849473 29555 solver.cpp:464] Iteration 3600, lr = 0.00794046
I0811 20:55:16.707579 29555 solver.cpp:189] Iteration 3700, loss = 2.04606
I0811 20:55:16.707630 29555 solver.cpp:204] Train net output #0: loss = 2.04606 (* 1 = 2.04606 loss)
I0811 20:55:16.707645 29555 solver.cpp:464] Iteration 3700, lr = 0.00789695
I0811 20:55:17.565687 29555 solver.cpp:189] Iteration 3800, loss = 2.07367
I0811 20:55:17.565737 29555 solver.cpp:204] Train net output #0: loss = 2.07367 (* 1 = 2.07367 loss)
I0811 20:55:17.565752 29555 solver.cpp:464] Iteration 3800, lr = 0.007854
I0811 20:55:18.423382 29555 solver.cpp:189] Iteration 3900, loss = 2.14974
I0811 20:55:18.423429 29555 solver.cpp:204] Train net output #0: loss = 2.14974 (* 1 = 2.14974 loss)
I0811 20:55:18.423446 29555 solver.cpp:464] Iteration 3900, lr = 0.00781158
I0811 20:55:19.273260 29555 solver.cpp:266] Iteration 4000, Testing net (#0)
I0811 20:55:19.736945 29555 solver.cpp:315] Test net output #0: accuracy = 0.4112
I0811 20:55:19.736989 29555 solver.cpp:315] Test net output #1: loss = 1.68489 (* 1 = 1.68489 loss)
I0811 20:55:19.745043 29555 solver.cpp:189] Iteration 4000, loss = 2.10444
I0811 20:55:19.745079 29555 solver.cpp:204] Train net output #0: loss = 2.10444 (* 1 = 2.10444 loss)
I0811 20:55:19.745093 29555 solver.cpp:464] Iteration 4000, lr = 0.0077697
I0811 20:55:20.604315 29555 solver.cpp:189] Iteration 4100, loss = 2.11185
I0811 20:55:20.604367 29555 solver.cpp:204] Train net output #0: loss = 2.11185 (* 1 = 2.11185 loss)
I0811 20:55:20.604382 29555 solver.cpp:464] Iteration 4100, lr = 0.00772833
I0811 20:55:21.463248 29555 solver.cpp:189] Iteration 4200, loss = 2.07392
I0811 20:55:21.463301 29555 solver.cpp:204] Train net output #0: loss = 2.07392 (* 1 = 2.07392 loss)
I0811 20:55:21.463315 29555 solver.cpp:464] Iteration 4200, lr = 0.00768748
I0811 20:55:22.321562 29555 solver.cpp:189] Iteration 4300, loss = 2.15033
I0811 20:55:22.321614 29555 solver.cpp:204] Train net output #0: loss = 2.15033 (* 1 = 2.15033 loss)
I0811 20:55:22.321630 29555 solver.cpp:464] Iteration 4300, lr = 0.00764712
I0811 20:55:23.179580 29555 solver.cpp:189] Iteration 4400, loss = 2.10429
I0811 20:55:23.179630 29555 solver.cpp:204] Train net output #0: loss = 2.10429 (* 1 = 2.10429 loss)
I0811 20:55:23.179643 29555 solver.cpp:464] Iteration 4400, lr = 0.00760726
I0811 20:55:24.030283 29555 solver.cpp:266] Iteration 4500, Testing net (#0)
I0811 20:55:24.488862 29555 solver.cpp:315] Test net output #0: accuracy = 0.3803
I0811 20:55:24.488903 29555 solver.cpp:315] Test net output #1: loss = 1.77492 (* 1 = 1.77492 loss)
I0811 20:55:24.497139 29555 solver.cpp:189] Iteration 4500, loss = 2.12447
I0811 20:55:24.497176 29555 solver.cpp:204] Train net output #0: loss = 2.12447 (* 1 = 2.12447 loss)
I0811 20:55:24.497190 29555 solver.cpp:464] Iteration 4500, lr = 0.00756788
I0811 20:55:25.355633 29555 solver.cpp:189] Iteration 4600, loss = 2.03514
I0811 20:55:25.355685 29555 solver.cpp:204] Train net output #0: loss = 2.03514 (* 1 = 2.03514 loss)
I0811 20:55:25.355700 29555 solver.cpp:464] Iteration 4600, lr = 0.00752897
I0811 20:55:26.214128 29555 solver.cpp:189] Iteration 4700, loss = 2.07297
I0811 20:55:26.214180 29555 solver.cpp:204] Train net output #0: loss = 2.07297 (* 1 = 2.07297 loss)
I0811 20:55:26.214195 29555 solver.cpp:464] Iteration 4700, lr = 0.00749052
I0811 20:55:27.072558 29555 solver.cpp:189] Iteration 4800, loss = 2.02426
I0811 20:55:27.072608 29555 solver.cpp:204] Train net output #0: loss = 2.02426 (* 1 = 2.02426 loss)
I0811 20:55:27.072623 29555 solver.cpp:464] Iteration 4800, lr = 0.00745253
I0811 20:55:27.931026 29555 solver.cpp:189] Iteration 4900, loss = 2.17643
I0811 20:55:27.931077 29555 solver.cpp:204] Train net output #0: loss = 2.17643 (* 1 = 2.17643 loss)
I0811 20:55:27.931092 29555 solver.cpp:464] Iteration 4900, lr = 0.00741498
I0811 20:55:28.781815 29555 solver.cpp:334] Snapshotting to digits_iter_5000.caffemodel
I0811 20:55:28.782841 29555 solver.cpp:342] Snapshotting solver state to digits_iter_5000.solverstate
I0811 20:55:28.783231 29555 solver.cpp:266] Iteration 5000, Testing net (#0)
I0811 20:55:29.240682 29555 solver.cpp:315] Test net output #0: accuracy = 0.3821
I0811 20:55:29.240723 29555 solver.cpp:315] Test net output #1: loss = 1.75909 (* 1 = 1.75909 loss)
I0811 20:55:29.249155 29555 solver.cpp:189] Iteration 5000, loss = 1.86019
I0811 20:55:29.249192 29555 solver.cpp:204] Train net output #0: loss = 1.86019 (* 1 = 1.86019 loss)
I0811 20:55:29.249207 29555 solver.cpp:464] Iteration 5000, lr = 0.00737788
I0811 20:55:30.107553 29555 solver.cpp:189] Iteration 5100, loss = 2.01188
I0811 20:55:30.107605 29555 solver.cpp:204] Train net output #0: loss = 2.01188 (* 1 = 2.01188 loss)
I0811 20:55:30.107620 29555 solver.cpp:464] Iteration 5100, lr = 0.0073412
I0811 20:55:30.965950 29555 solver.cpp:189] Iteration 5200, loss = 2.17599
I0811 20:55:30.966001 29555 solver.cpp:204] Train net output #0: loss = 2.17599 (* 1 = 2.17599 loss)
I0811 20:55:30.966015 29555 solver.cpp:464] Iteration 5200, lr = 0.00730495
I0811 20:55:31.824430 29555 solver.cpp:189] Iteration 5300, loss = 2.07408
I0811 20:55:31.824483 29555 solver.cpp:204] Train net output #0: loss = 2.07408 (* 1 = 2.07408 loss)
I0811 20:55:31.824498 29555 solver.cpp:464] Iteration 5300, lr = 0.00726911
I0811 20:55:32.682723 29555 solver.cpp:189] Iteration 5400, loss = 2.15006
I0811 20:55:32.682775 29555 solver.cpp:204] Train net output #0: loss = 2.15006 (* 1 = 2.15006 loss)
I0811 20:55:32.682790 29555 solver.cpp:464] Iteration 5400, lr = 0.00723368
I0811 20:55:33.533351 29555 solver.cpp:266] Iteration 5500, Testing net (#0)
I0811 20:55:33.991721 29555 solver.cpp:315] Test net output #0: accuracy = 0.3701
I0811 20:55:33.991765 29555 solver.cpp:315] Test net output #1: loss = 1.7315 (* 1 = 1.7315 loss)
I0811 20:55:33.999974 29555 solver.cpp:189] Iteration 5500, loss = 1.99779
I0811 20:55:34.000007 29555 solver.cpp:204] Train net output #0: loss = 1.99779 (* 1 = 1.99779 loss)
I0811 20:55:34.000021 29555 solver.cpp:464] Iteration 5500, lr = 0.00719865
I0811 20:55:34.858438 29555 solver.cpp:189] Iteration 5600, loss = 2.12441
I0811 20:55:34.858489 29555 solver.cpp:204] Train net output #0: loss = 2.12441 (* 1 = 2.12441 loss)
I0811 20:55:34.858503 29555 solver.cpp:464] Iteration 5600, lr = 0.00716402
I0811 20:55:35.717233 29555 solver.cpp:189] Iteration 5700, loss = 2.1342
I0811 20:55:35.717286 29555 solver.cpp:204] Train net output #0: loss = 2.1342 (* 1 = 2.1342 loss)
I0811 20:55:35.717336 29555 solver.cpp:464] Iteration 5700, lr = 0.00712977
I0811 20:55:36.576200 29555 solver.cpp:189] Iteration 5800, loss = 2.10612
I0811 20:55:36.576251 29555 solver.cpp:204] Train net output #0: loss = 2.10612 (* 1 = 2.10612 loss)
I0811 20:55:36.576266 29555 solver.cpp:464] Iteration 5800, lr = 0.0070959
I0811 20:55:37.434979 29555 solver.cpp:189] Iteration 5900, loss = 1.99698
I0811 20:55:37.435031 29555 solver.cpp:204] Train net output #0: loss = 1.99698 (* 1 = 1.99698 loss)
I0811 20:55:37.435045 29555 solver.cpp:464] Iteration 5900, lr = 0.0070624
I0811 20:55:38.285215 29555 solver.cpp:266] Iteration 6000, Testing net (#0)
I0811 20:55:38.748976 29555 solver.cpp:315] Test net output #0: accuracy = 0.356
I0811 20:55:38.749017 29555 solver.cpp:315] Test net output #1: loss = 1.7347 (* 1 = 1.7347 loss)
I0811 20:55:38.757190 29555 solver.cpp:189] Iteration 6000, loss = 1.96201
I0811 20:55:38.757226 29555 solver.cpp:204] Train net output #0: loss = 1.96201 (* 1 = 1.96201 loss)
I0811 20:55:38.757241 29555 solver.cpp:464] Iteration 6000, lr = 0.00702927
I0811 20:55:39.615289 29555 solver.cpp:189] Iteration 6100, loss = 1.83075
I0811 20:55:39.615342 29555 solver.cpp:204] Train net output #0: loss = 1.83075 (* 1 = 1.83075 loss)
I0811 20:55:39.615357 29555 solver.cpp:464] Iteration 6100, lr = 0.0069965
I0811 20:55:40.473820 29555 solver.cpp:189] Iteration 6200, loss = 2.03572
I0811 20:55:40.473870 29555 solver.cpp:204] Train net output #0: loss = 2.03572 (* 1 = 2.03572 loss)
I0811 20:55:40.473884 29555 solver.cpp:464] Iteration 6200, lr = 0.00696408
I0811 20:55:41.333091 29555 solver.cpp:189] Iteration 6300, loss = 2.0723
I0811 20:55:41.333348 29555 solver.cpp:204] Train net output #0: loss = 2.0723 (* 1 = 2.0723 loss)
I0811 20:55:41.333364 29555 solver.cpp:464] Iteration 6300, lr = 0.00693201
I0811 20:55:42.191498 29555 solver.cpp:189] Iteration 6400, loss = 2.10261
I0811 20:55:42.191548 29555 solver.cpp:204] Train net output #0: loss = 2.10261 (* 1 = 2.10261 loss)
I0811 20:55:42.191563 29555 solver.cpp:464] Iteration 6400, lr = 0.00690029
I0811 20:55:43.041978 29555 solver.cpp:266] Iteration 6500, Testing net (#0)
I0811 20:55:43.500319 29555 solver.cpp:315] Test net output #0: accuracy = 0.3951
I0811 20:55:43.500360 29555 solver.cpp:315] Test net output #1: loss = 1.70358 (* 1 = 1.70358 loss)
I0811 20:55:43.508532 29555 solver.cpp:189] Iteration 6500, loss = 1.88797
I0811 20:55:43.508568 29555 solver.cpp:204] Train net output #0: loss = 1.88797 (* 1 = 1.88797 loss)
I0811 20:55:43.508580 29555 solver.cpp:464] Iteration 6500, lr = 0.0068689
I0811 20:55:44.367552 29555 solver.cpp:189] Iteration 6600, loss = 1.85622
I0811 20:55:44.367605 29555 solver.cpp:204] Train net output #0: loss = 1.85622 (* 1 = 1.85622 loss)
I0811 20:55:44.367619 29555 solver.cpp:464] Iteration 6600, lr = 0.00683784
I0811 20:55:45.225965 29555 solver.cpp:189] Iteration 6700, loss = 1.99971
I0811 20:55:45.226016 29555 solver.cpp:204] Train net output #0: loss = 1.99971 (* 1 = 1.99971 loss)
I0811 20:55:45.226030 29555 solver.cpp:464] Iteration 6700, lr = 0.00680711
I0811 20:55:46.084535 29555 solver.cpp:189] Iteration 6800, loss = 1.97541
I0811 20:55:46.084588 29555 solver.cpp:204] Train net output #0: loss = 1.97541 (* 1 = 1.97541 loss)
I0811 20:55:46.084601 29555 solver.cpp:464] Iteration 6800, lr = 0.0067767
I0811 20:55:46.943415 29555 solver.cpp:189] Iteration 6900, loss = 2.05481
I0811 20:55:46.943475 29555 solver.cpp:204] Train net output #0: loss = 2.05481 (* 1 = 2.05481 loss)
I0811 20:55:46.943490 29555 solver.cpp:464] Iteration 6900, lr = 0.0067466
I0811 20:55:47.793455 29555 solver.cpp:266] Iteration 7000, Testing net (#0)
I0811 20:55:48.257940 29555 solver.cpp:315] Test net output #0: accuracy = 0.409
I0811 20:55:48.257982 29555 solver.cpp:315] Test net output #1: loss = 1.73167 (* 1 = 1.73167 loss)
I0811 20:55:48.266615 29555 solver.cpp:189] Iteration 7000, loss = 2.03954
I0811 20:55:48.266654 29555 solver.cpp:204] Train net output #0: loss = 2.03954 (* 1 = 2.03954 loss)
I0811 20:55:48.266667 29555 solver.cpp:464] Iteration 7000, lr = 0.00671681
I0811 20:55:49.127663 29555 solver.cpp:189] Iteration 7100, loss = 2.07085
I0811 20:55:49.127717 29555 solver.cpp:204] Train net output #0: loss = 2.07085 (* 1 = 2.07085 loss)
I0811 20:55:49.127732 29555 solver.cpp:464] Iteration 7100, lr = 0.00668733
I0811 20:55:49.986162 29555 solver.cpp:189] Iteration 7200, loss = 1.85722
I0811 20:55:49.986214 29555 solver.cpp:204] Train net output #0: loss = 1.85722 (* 1 = 1.85722 loss)
I0811 20:55:49.986229 29555 solver.cpp:464] Iteration 7200, lr = 0.00665815
I0811 20:55:50.845194 29555 solver.cpp:189] Iteration 7300, loss = 1.904
I0811 20:55:50.845254 29555 solver.cpp:204] Train net output #0: loss = 1.904 (* 1 = 1.904 loss)
I0811 20:55:50.845270 29555 solver.cpp:464] Iteration 7300, lr = 0.00662927
I0811 20:55:51.703320 29555 solver.cpp:189] Iteration 7400, loss = 1.96442
I0811 20:55:51.703371 29555 solver.cpp:204] Train net output #0: loss = 1.96442 (* 1 = 1.96442 loss)
I0811 20:55:51.703384 29555 solver.cpp:464] Iteration 7400, lr = 0.00660067
I0811 20:55:52.553474 29555 solver.cpp:266] Iteration 7500, Testing net (#0)
I0811 20:55:53.012869 29555 solver.cpp:315] Test net output #0: accuracy = 0.3427
I0811 20:55:53.012910 29555 solver.cpp:315] Test net output #1: loss = 1.77808 (* 1 = 1.77808 loss)
I0811 20:55:53.021317 29555 solver.cpp:189] Iteration 7500, loss = 1.98527
I0811 20:55:53.021354 29555 solver.cpp:204] Train net output #0: loss = 1.98527 (* 1 = 1.98527 loss)
I0811 20:55:53.021368 29555 solver.cpp:464] Iteration 7500, lr = 0.00657236
I0811 20:55:53.879947 29555 solver.cpp:189] Iteration 7600, loss = 1.93372
I0811 20:55:53.880031 29555 solver.cpp:204] Train net output #0: loss = 1.93372 (* 1 = 1.93372 loss)
I0811 20:55:53.880046 29555 solver.cpp:464] Iteration 7600, lr = 0.00654433
I0811 20:55:54.737836 29555 solver.cpp:189] Iteration 7700, loss = 1.94742
I0811 20:55:54.737889 29555 solver.cpp:204] Train net output #0: loss = 1.94742 (* 1 = 1.94742 loss)
I0811 20:55:54.737903 29555 solver.cpp:464] Iteration 7700, lr = 0.00651658
I0811 20:55:55.595763 29555 solver.cpp:189] Iteration 7800, loss = 2.09409
I0811 20:55:55.595814 29555 solver.cpp:204] Train net output #0: loss = 2.09409 (* 1 = 2.09409 loss)
I0811 20:55:55.595829 29555 solver.cpp:464] Iteration 7800, lr = 0.00648911
I0811 20:55:56.453917 29555 solver.cpp:189] Iteration 7900, loss = 1.96904
I0811 20:55:56.453970 29555 solver.cpp:204] Train net output #0: loss = 1.96904 (* 1 = 1.96904 loss)
I0811 20:55:56.453984 29555 solver.cpp:464] Iteration 7900, lr = 0.0064619
I0811 20:55:57.303846 29555 solver.cpp:266] Iteration 8000, Testing net (#0)
I0811 20:55:57.766616 29555 solver.cpp:315] Test net output #0: accuracy = 0.3955
I0811 20:55:57.766659 29555 solver.cpp:315] Test net output #1: loss = 1.72714 (* 1 = 1.72714 loss)
I0811 20:55:57.774893 29555 solver.cpp:189] Iteration 8000, loss = 2.14621
I0811 20:55:57.774930 29555 solver.cpp:204] Train net output #0: loss = 2.14621 (* 1 = 2.14621 loss)
I0811 20:55:57.774945 29555 solver.cpp:464] Iteration 8000, lr = 0.00643496
I0811 20:55:58.643158 29555 solver.cpp:189] Iteration 8100, loss = 1.99159
I0811 20:55:58.643218 29555 solver.cpp:204] Train net output #0: loss = 1.99159 (* 1 = 1.99159 loss)
I0811 20:55:58.643232 29555 solver.cpp:464] Iteration 8100, lr = 0.00640827
I0811 20:55:59.502285 29555 solver.cpp:189] Iteration 8200, loss = 1.90481
I0811 20:55:59.502337 29555 solver.cpp:204] Train net output #0: loss = 1.90481 (* 1 = 1.90481 loss)
I0811 20:55:59.502352 29555 solver.cpp:464] Iteration 8200, lr = 0.00638185
I0811 20:56:00.360939 29555 solver.cpp:189] Iteration 8300, loss = 1.92683
I0811 20:56:00.360992 29555 solver.cpp:204] Train net output #0: loss = 1.92683 (* 1 = 1.92683 loss)
I0811 20:56:00.361006 29555 solver.cpp:464] Iteration 8300, lr = 0.00635567
I0811 20:56:01.220257 29555 solver.cpp:189] Iteration 8400, loss = 1.99772
I0811 20:56:01.220312 29555 solver.cpp:204] Train net output #0: loss = 1.99772 (* 1 = 1.99772 loss)
I0811 20:56:01.220327 29555 solver.cpp:464] Iteration 8400, lr = 0.00632975
I0811 20:56:02.070257 29555 solver.cpp:266] Iteration 8500, Testing net (#0)
I0811 20:56:02.546972 29555 solver.cpp:315] Test net output #0: accuracy = 0.395
I0811 20:56:02.547019 29555 solver.cpp:315] Test net output #1: loss = 1.72118 (* 1 = 1.72118 loss)
I0811 20:56:02.555199 29555 solver.cpp:189] Iteration 8500, loss = 1.94328
I0811 20:56:02.555238 29555 solver.cpp:204] Train net output #0: loss = 1.94328 (* 1 = 1.94328 loss)
I0811 20:56:02.555253 29555 solver.cpp:464] Iteration 8500, lr = 0.00630407
I0811 20:56:03.414355 29555 solver.cpp:189] Iteration 8600, loss = 2.11372
I0811 20:56:03.414407 29555 solver.cpp:204] Train net output #0: loss = 2.11372 (* 1 = 2.11372 loss)
I0811 20:56:03.414420 29555 solver.cpp:464] Iteration 8600, lr = 0.00627864
I0811 20:56:04.273253 29555 solver.cpp:189] Iteration 8700, loss = 2.10433
I0811 20:56:04.273306 29555 solver.cpp:204] Train net output #0: loss = 2.10433 (* 1 = 2.10433 loss)
I0811 20:56:04.273320 29555 solver.cpp:464] Iteration 8700, lr = 0.00625344
I0811 20:56:05.131646 29555 solver.cpp:189] Iteration 8800, loss = 1.91455
I0811 20:56:05.131695 29555 solver.cpp:204] Train net output #0: loss = 1.91455 (* 1 = 1.91455 loss)
I0811 20:56:05.131710 29555 solver.cpp:464] Iteration 8800, lr = 0.00622847
I0811 20:56:05.989825 29555 solver.cpp:189] Iteration 8900, loss = 1.98412
I0811 20:56:05.989876 29555 solver.cpp:204] Train net output #0: loss = 1.98412 (* 1 = 1.98412 loss)
I0811 20:56:05.989892 29555 solver.cpp:464] Iteration 8900, lr = 0.00620374
I0811 20:56:06.840550 29555 solver.cpp:266] Iteration 9000, Testing net (#0)
I0811 20:56:07.312862 29555 solver.cpp:315] Test net output #0: accuracy = 0.4091
I0811 20:56:07.312906 29555 solver.cpp:315] Test net output #1: loss = 1.72417 (* 1 = 1.72417 loss)
I0811 20:56:07.321004 29555 solver.cpp:189] Iteration 9000, loss = 1.95881
I0811 20:56:07.321038 29555 solver.cpp:204] Train net output #0: loss = 1.95881 (* 1 = 1.95881 loss)
I0811 20:56:07.321053 29555 solver.cpp:464] Iteration 9000, lr = 0.00617924
I0811 20:56:08.183089 29555 solver.cpp:189] Iteration 9100, loss = 1.97844
I0811 20:56:08.183150 29555 solver.cpp:204] Train net output #0: loss = 1.97844 (* 1 = 1.97844 loss)
I0811 20:56:08.183163 29555 solver.cpp:464] Iteration 9100, lr = 0.00615496
I0811 20:56:09.042196 29555 solver.cpp:189] Iteration 9200, loss = 1.95315
I0811 20:56:09.042248 29555 solver.cpp:204] Train net output #0: loss = 1.95315 (* 1 = 1.95315 loss)
I0811 20:56:09.042261 29555 solver.cpp:464] Iteration 9200, lr = 0.0061309
I0811 20:56:09.901109 29555 solver.cpp:189] Iteration 9300, loss = 1.97307
I0811 20:56:09.901165 29555 solver.cpp:204] Train net output #0: loss = 1.97307 (* 1 = 1.97307 loss)
I0811 20:56:09.901178 29555 solver.cpp:464] Iteration 9300, lr = 0.00610706
I0811 20:56:10.759769 29555 solver.cpp:189] Iteration 9400, loss = 1.9234
I0811 20:56:10.759820 29555 solver.cpp:204] Train net output #0: loss = 1.9234 (* 1 = 1.9234 loss)
I0811 20:56:10.759834 29555 solver.cpp:464] Iteration 9400, lr = 0.00608343
I0811 20:56:11.609840 29555 solver.cpp:266] Iteration 9500, Testing net (#0)
I0811 20:56:12.085965 29555 solver.cpp:315] Test net output #0: accuracy = 0.4003
I0811 20:56:12.086010 29555 solver.cpp:315] Test net output #1: loss = 1.71518 (* 1 = 1.71518 loss)
I0811 20:56:12.094187 29555 solver.cpp:189] Iteration 9500, loss = 2.10593
I0811 20:56:12.094224 29555 solver.cpp:204] Train net output #0: loss = 2.10593 (* 1 = 2.10593 loss)
I0811 20:56:12.094238 29555 solver.cpp:464] Iteration 9500, lr = 0.00606002
I0811 20:56:12.953382 29555 solver.cpp:189] Iteration 9600, loss = 2.02941
I0811 20:56:12.953434 29555 solver.cpp:204] Train net output #0: loss = 2.02941 (* 1 = 2.02941 loss)
I0811 20:56:12.953449 29555 solver.cpp:464] Iteration 9600, lr = 0.00603682
I0811 20:56:13.812620 29555 solver.cpp:189] Iteration 9700, loss = 2.09021
I0811 20:56:13.812674 29555 solver.cpp:204] Train net output #0: loss = 2.09021 (* 1 = 2.09021 loss)
I0811 20:56:13.812687 29555 solver.cpp:464] Iteration 9700, lr = 0.00601382
I0811 20:56:14.671519 29555 solver.cpp:189] Iteration 9800, loss = 2.05441
I0811 20:56:14.671571 29555 solver.cpp:204] Train net output #0: loss = 2.05441 (* 1 = 2.05441 loss)
I0811 20:56:14.671584 29555 solver.cpp:464] Iteration 9800, lr = 0.00599102
I0811 20:56:15.530724 29555 solver.cpp:189] Iteration 9900, loss = 2.15287
I0811 20:56:15.530776 29555 solver.cpp:204] Train net output #0: loss = 2.15287 (* 1 = 2.15287 loss)
I0811 20:56:15.530789 29555 solver.cpp:464] Iteration 9900, lr = 0.00596843
I0811 20:56:16.381554 29555 solver.cpp:334] Snapshotting to digits_iter_10000.caffemodel
I0811 20:56:16.382349 29555 solver.cpp:342] Snapshotting solver state to digits_iter_10000.solverstate
I0811 20:56:16.387269 29555 solver.cpp:248] Iteration 10000, loss = 1.9078
I0811 20:56:16.387306 29555 solver.cpp:266] Iteration 10000, Testing net (#0)
I0811 20:56:16.865006 29555 solver.cpp:315] Test net output #0: accuracy = 0.3877
I0811 20:56:16.865047 29555 solver.cpp:315] Test net output #1: loss = 1.66508 (* 1 = 1.66508 loss)
I0811 20:56:16.865061 29555 solver.cpp:253] Optimization Done.
I0811 20:56:16.865070 29555 caffe.cpp:134] Optimization Done.
In [88]:
!./caffe/build/tools/caffe test -model digits_iter_10000.caffemodel -weights digits_iter_10000.solverstate
I0811 20:59:02.457888 9346 caffe.cpp:151] Use CPU.
[libprotobuf ERROR google/protobuf/text_format.cc:245] Error parsing text-format caffe.NetParameter: 2:1: Invalid control characters encountered in text.
[libprotobuf ERROR google/protobuf/text_format.cc:245] Error parsing text-format caffe.NetParameter: 2:8: Message type "caffe.NetParameter" has no field named "digits".
F0811 20:59:02.797425 9346 upgrade_proto.cpp:928] Check failed: ReadProtoFromTextFile(param_file, param) Failed to parse NetParameter file: digits_iter_10000.caffemodel
*** Check failure stack trace: ***
@ 0x7fef2871fdaa (unknown)
@ 0x7fef2871fce4 (unknown)
@ 0x7fef2871f6e6 (unknown)
@ 0x7fef28722687 (unknown)
@ 0x7fef28a68c3e caffe::ReadNetParamsFromTextFileOrDie()
@ 0x7fef28a95ec0 caffe::Net<>::Net()
@ 0x40529f test()
@ 0x404a21 main
@ 0x7fef27c31ec5 (unknown)
@ 0x404fcd (unknown)
@ (nil) (unknown)
Aborted (core dumped)
In [83]:
!./caffe/build/tools/caffe
caffe: command line brew
usage: caffe <command> <args>
commands:
train train or finetune a model
test score a model
device_query show GPU diagnostic information
time benchmark model execution time
Flags from tools/caffe.cpp:
-gpu (Run in GPU mode on given device ID.) type: int32 default: -1
-iterations (The number of iterations to run.) type: int32 default: 50
-model (The model definition protocol buffer text file..) type: string
default: ""
-snapshot (Optional; the snapshot solver state to resume training.)
type: string default: ""
-solver (The solver definition protocol buffer text file.) type: string
default: ""
-weights (Optional; the pretrained weights to initialize finetuning. Cannot
be set simultaneously with snapshot.) type: string default: ""
In [87]:
ls -l
total 1618120
-rw-rw-r-- 1 marcino marcino 977224 Aug 11 14:47 00-classification.ipynb
-rw-rw-r-- 1 marcino marcino 1120495 Aug 10 21:58 00-classification.ipynb.1
-rw-rw-r-- 1 marcino marcino 1120495 Aug 10 21:58 00-classification.ipynb.2
-rw-rw-r-- 1 marcino marcino 1120495 Aug 10 21:59 00-classification.ipynb.3
-rw-rw-r-- 1 marcino marcino 154190693 Jul 15 22:25 2015-07-15-22-23-34.bag
drwxrwxr-x 2 marcino marcino 4096 Jun 14 11:43 3d/
-rw-rw-r-- 1 marcino marcino 194334 Aug 10 21:17 Classification_Ensemble.html
-rw-rw-r-- 1 marcino marcino 9403 Aug 10 21:29 Classification_Ensemble.ipynb
-rw-rw-r-- 1 marcino marcino 163317 Aug 11 20:58 Classification_With_NN.ipynb
drwxrwxr-x 12 marcino marcino 4096 Dec 26 2014 NVIDIA_CUDA-6.5_Samples/
drwxrwxr-x 9 marcino marcino 4096 Jun 14 10:13 Printrun/
drwxr-xr-x 6 marcino marcino 4096 Aug 4 2014 Slic3r/
-rw-rw-r-- 1 marcino marcino 74293 Aug 10 22:17 Untitled.ipynb
-rw-rw-r-- 1 marcino marcino 4407 Aug 10 22:20 Untitled1.ipynb
drwxr-xr-x 4 marcino marcino 4096 Apr 28 2014 ale_0.4.4/
drwxrwxr-x 12 marcino marcino 4096 Jul 16 06:27 android-sdk-linux/
-rw-rw-r-- 1 marcino marcino 309109716 Jun 18 18:00 android-sdk_r24.3.3-linux.tgz
drwxrwxr-x 10 marcino marcino 4096 Jul 17 06:15 android_firewall/
drwxrwxr-x 16 marcino marcino 4096 Apr 25 09:24 caffe/
-rw-rw-r-- 1 marcino marcino 2106895 Aug 10 21:58 caffe-classification.ipynb
-rw-rw-r-- 1 marcino marcino 1193 Mar 19 17:41 conv.tar.bz2
-rw-rw-r-- 1 marcino marcino 1185903388 Sep 12 2014 cuda-repo-ubuntu1404-6-5-prod_6.5-19_amd64.deb
drwxrwxr-x 4 marcino marcino 4096 Apr 6 22:30 data/
drwxrwxr-x 3 marcino marcino 4096 Apr 9 17:54 deeprgbd/
-rw-rw-r-- 1 marcino marcino 159718 Aug 11 20:56 digits_iter_10000.caffemodel
-rw-rw-r-- 1 marcino marcino 159176 Aug 11 20:56 digits_iter_10000.solverstate
-rw-rw-r-- 1 marcino marcino 159718 Aug 11 20:55 digits_iter_5000.caffemodel
-rw-rw-r-- 1 marcino marcino 159175 Aug 11 20:55 digits_iter_5000.solverstate
-rw-rw-r-- 1 marcino marcino 712 Aug 11 20:54 digits_solver.prototxt
drwxr-xr-x 2 marcino marcino 4096 Aug 11 20:53 digits_test_lmdb/
drwxr-xr-x 2 marcino marcino 4096 Aug 11 20:53 digits_train_lmdb/
-rw-rw-r-- 1 marcino marcino 2261 Aug 11 20:54 digits_train_test.prototxt
drwxrwxr-x 5 marcino marcino 4096 Apr 25 08:52 dqn-in-the-caffe/
drwxrwxr-x 4 marcino marcino 4096 Apr 8 21:22 driving/
drwxrwxr-x 26 marcino marcino 4096 May 31 22:00 kernel/
drwxr-xr-x 3 marcino marcino 4096 Nov 6 2014 lib/
drwxrwxr-x 3 marcino marcino 4096 Jul 30 22:10 pilco/
drwxrwxr-x 12 marcino marcino 4096 Jul 30 22:15 pycuda/
drwxrwxr-x 5 marcino marcino 4096 Jul 15 22:09 ros/
drwxrwxr-x 3 marcino marcino 4096 Jul 17 06:29 sandbox/
-rw-rw-r-- 1 marcino marcino 79560 Nov 6 2014 xhci-firmware-2014.11.06.00.00.tbz2
In [57]:
1/16.
Out[57]:
0.0625
In [ ]:
Content source: marcino239/notebooks
Similar notebooks: