DIGITS-CNN/cars/data-aug-investigations/rot-5-256batch/caffe_output.log

2214 lines
135 KiB
Plaintext
Raw Normal View History

I0419 22:06:56.483145 8388 upgrade_proto.cpp:1082] Attempting to upgrade input file specified using deprecated 'solver_type' field (enum)': /mnt/bigdisk/DIGITS-AMB-2/digits/jobs/20210419-220655-77eb/solver.prototxt
I0419 22:06:56.483283 8388 upgrade_proto.cpp:1089] Successfully upgraded file specified using deprecated 'solver_type' field (enum) to 'type' field (string).
W0419 22:06:56.483287 8388 upgrade_proto.cpp:1091] Note that future Caffe releases will only support 'type' field (string) for a solver's type.
I0419 22:06:56.483350 8388 caffe.cpp:218] Using GPUs 2
I0419 22:06:56.524782 8388 caffe.cpp:223] GPU 2: GeForce RTX 2080
I0419 22:06:56.852980 8388 solver.cpp:44] Initializing solver from parameters:
test_iter: 7
test_interval: 102
base_lr: 0.01
display: 12
max_iter: 3060
lr_policy: "exp"
gamma: 0.99934
momentum: 0.9
weight_decay: 0.0001
snapshot: 102
snapshot_prefix: "snapshot"
solver_mode: GPU
device_id: 2
net: "train_val.prototxt"
train_state {
level: 0
stage: ""
}
type: "SGD"
I0419 22:06:56.853807 8388 solver.cpp:87] Creating training net from net file: train_val.prototxt
I0419 22:06:56.854460 8388 net.cpp:294] The NetState phase (0) differed from the phase (1) specified by a rule in layer val-data
I0419 22:06:56.854472 8388 net.cpp:294] The NetState phase (0) differed from the phase (1) specified by a rule in layer accuracy
I0419 22:06:56.854599 8388 net.cpp:51] Initializing net from parameters:
state {
phase: TRAIN
level: 0
stage: ""
}
layer {
name: "train-data"
type: "Data"
top: "data"
top: "label"
include {
phase: TRAIN
}
transform_param {
mirror: true
crop_size: 227
mean_file: "/mnt/bigdisk/DIGITS-AMB-2/digits/jobs/20210419-135836-fd84/mean.binaryproto"
}
data_param {
source: "/mnt/bigdisk/DIGITS-AMB-2/digits/jobs/20210419-135836-fd84/train_db"
batch_size: 256
backend: LMDB
}
}
layer {
name: "conv1"
type: "Convolution"
bottom: "data"
top: "conv1"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 96
kernel_size: 11
stride: 4
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "relu1"
type: "ReLU"
bottom: "conv1"
top: "conv1"
}
layer {
name: "norm1"
type: "LRN"
bottom: "conv1"
top: "norm1"
lrn_param {
local_size: 5
alpha: 0.0001
beta: 0.75
}
}
layer {
name: "pool1"
type: "Pooling"
bottom: "norm1"
top: "pool1"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layer {
name: "conv2"
type: "Convolution"
bottom: "pool1"
top: "conv2"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 256
pad: 2
kernel_size: 5
group: 2
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layer {
name: "relu2"
type: "ReLU"
bottom: "conv2"
top: "conv2"
}
layer {
name: "norm2"
type: "LRN"
bottom: "conv2"
top: "norm2"
lrn_param {
local_size: 5
alpha: 0.0001
beta: 0.75
}
}
layer {
name: "pool2"
type: "Pooling"
bottom: "norm2"
top: "pool2"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layer {
name: "conv3"
type: "Convolution"
bottom: "pool2"
top: "conv3"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 384
pad: 1
kernel_size: 3
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "relu3"
type: "ReLU"
bottom: "conv3"
top: "conv3"
}
layer {
name: "conv4"
type: "Convolution"
bottom: "conv3"
top: "conv4"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 384
pad: 1
kernel_size: 3
group: 2
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layer {
name: "relu4"
type: "ReLU"
bottom: "conv4"
top: "conv4"
}
layer {
name: "conv5"
type: "Convolution"
bottom: "conv4"
top: "conv5"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 256
pad: 1
kernel_size: 3
group: 2
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layer {
name: "relu5"
type: "ReLU"
bottom: "conv5"
top: "conv5"
}
layer {
name: "pool5"
type: "Pooling"
bottom: "conv5"
top: "pool5"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layer {
name: "fc6"
type: "InnerProduct"
bottom: "pool5"
top: "fc6"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 4096
weight_filler {
type: "gaussian"
std: 0.005
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layer {
name: "relu6"
type: "ReLU"
bottom: "fc6"
top: "fc6"
}
layer {
name: "drop6"
type: "Dropout"
bottom: "fc6"
top: "fc6"
dropout_param {
dropout_ratio: 0.5
}
}
layer {
name: "fc7"
type: "InnerProduct"
bottom: "fc6"
top: "fc7"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 4096
weight_filler {
type: "gaussian"
std: 0.005
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layer {
name: "relu7"
type: "ReLU"
bottom: "fc7"
top: "fc7"
}
layer {
name: "drop7"
type: "Dropout"
bottom: "fc7"
top: "fc7"
dropout_param {
dropout_ratio: 0.5
}
}
layer {
name: "fc8"
type: "InnerProduct"
bottom: "fc7"
top: "fc8"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 196
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "fc8"
bottom: "label"
top: "loss"
}
I0419 22:06:56.854689 8388 layer_factory.hpp:77] Creating layer train-data
I0419 22:06:56.870759 8388 db_lmdb.cpp:35] Opened lmdb /mnt/bigdisk/DIGITS-AMB-2/digits/jobs/20210419-135836-fd84/train_db
I0419 22:06:56.902539 8388 net.cpp:84] Creating Layer train-data
I0419 22:06:56.902570 8388 net.cpp:380] train-data -> data
I0419 22:06:56.902606 8388 net.cpp:380] train-data -> label
I0419 22:06:56.902628 8388 data_transformer.cpp:25] Loading mean file from: /mnt/bigdisk/DIGITS-AMB-2/digits/jobs/20210419-135836-fd84/mean.binaryproto
I0419 22:06:56.921988 8388 data_layer.cpp:45] output data size: 256,3,227,227
I0419 22:06:57.172726 8388 net.cpp:122] Setting up train-data
I0419 22:06:57.172749 8388 net.cpp:129] Top shape: 256 3 227 227 (39574272)
I0419 22:06:57.172752 8388 net.cpp:129] Top shape: 256 (256)
I0419 22:06:57.172755 8388 net.cpp:137] Memory required for data: 158298112
I0419 22:06:57.172765 8388 layer_factory.hpp:77] Creating layer conv1
I0419 22:06:57.172804 8388 net.cpp:84] Creating Layer conv1
I0419 22:06:57.172811 8388 net.cpp:406] conv1 <- data
I0419 22:06:57.172822 8388 net.cpp:380] conv1 -> conv1
I0419 22:06:58.055094 8388 net.cpp:122] Setting up conv1
I0419 22:06:58.055115 8388 net.cpp:129] Top shape: 256 96 55 55 (74342400)
I0419 22:06:58.055119 8388 net.cpp:137] Memory required for data: 455667712
I0419 22:06:58.055136 8388 layer_factory.hpp:77] Creating layer relu1
I0419 22:06:58.055148 8388 net.cpp:84] Creating Layer relu1
I0419 22:06:58.055152 8388 net.cpp:406] relu1 <- conv1
I0419 22:06:58.055157 8388 net.cpp:367] relu1 -> conv1 (in-place)
I0419 22:06:58.055536 8388 net.cpp:122] Setting up relu1
I0419 22:06:58.055547 8388 net.cpp:129] Top shape: 256 96 55 55 (74342400)
I0419 22:06:58.055549 8388 net.cpp:137] Memory required for data: 753037312
I0419 22:06:58.055552 8388 layer_factory.hpp:77] Creating layer norm1
I0419 22:06:58.055560 8388 net.cpp:84] Creating Layer norm1
I0419 22:06:58.055563 8388 net.cpp:406] norm1 <- conv1
I0419 22:06:58.055593 8388 net.cpp:380] norm1 -> norm1
I0419 22:06:58.056195 8388 net.cpp:122] Setting up norm1
I0419 22:06:58.056205 8388 net.cpp:129] Top shape: 256 96 55 55 (74342400)
I0419 22:06:58.056207 8388 net.cpp:137] Memory required for data: 1050406912
I0419 22:06:58.056210 8388 layer_factory.hpp:77] Creating layer pool1
I0419 22:06:58.056217 8388 net.cpp:84] Creating Layer pool1
I0419 22:06:58.056222 8388 net.cpp:406] pool1 <- norm1
I0419 22:06:58.056227 8388 net.cpp:380] pool1 -> pool1
I0419 22:06:58.056262 8388 net.cpp:122] Setting up pool1
I0419 22:06:58.056267 8388 net.cpp:129] Top shape: 256 96 27 27 (17915904)
I0419 22:06:58.056269 8388 net.cpp:137] Memory required for data: 1122070528
I0419 22:06:58.056272 8388 layer_factory.hpp:77] Creating layer conv2
I0419 22:06:58.056283 8388 net.cpp:84] Creating Layer conv2
I0419 22:06:58.056286 8388 net.cpp:406] conv2 <- pool1
I0419 22:06:58.056291 8388 net.cpp:380] conv2 -> conv2
I0419 22:06:58.064507 8388 net.cpp:122] Setting up conv2
I0419 22:06:58.064524 8388 net.cpp:129] Top shape: 256 256 27 27 (47775744)
I0419 22:06:58.064527 8388 net.cpp:137] Memory required for data: 1313173504
I0419 22:06:58.064536 8388 layer_factory.hpp:77] Creating layer relu2
I0419 22:06:58.064546 8388 net.cpp:84] Creating Layer relu2
I0419 22:06:58.064550 8388 net.cpp:406] relu2 <- conv2
I0419 22:06:58.064555 8388 net.cpp:367] relu2 -> conv2 (in-place)
I0419 22:06:58.065127 8388 net.cpp:122] Setting up relu2
I0419 22:06:58.065138 8388 net.cpp:129] Top shape: 256 256 27 27 (47775744)
I0419 22:06:58.065141 8388 net.cpp:137] Memory required for data: 1504276480
I0419 22:06:58.065145 8388 layer_factory.hpp:77] Creating layer norm2
I0419 22:06:58.065150 8388 net.cpp:84] Creating Layer norm2
I0419 22:06:58.065153 8388 net.cpp:406] norm2 <- conv2
I0419 22:06:58.065160 8388 net.cpp:380] norm2 -> norm2
I0419 22:06:58.065552 8388 net.cpp:122] Setting up norm2
I0419 22:06:58.065562 8388 net.cpp:129] Top shape: 256 256 27 27 (47775744)
I0419 22:06:58.065564 8388 net.cpp:137] Memory required for data: 1695379456
I0419 22:06:58.065567 8388 layer_factory.hpp:77] Creating layer pool2
I0419 22:06:58.065575 8388 net.cpp:84] Creating Layer pool2
I0419 22:06:58.065578 8388 net.cpp:406] pool2 <- norm2
I0419 22:06:58.065583 8388 net.cpp:380] pool2 -> pool2
I0419 22:06:58.065610 8388 net.cpp:122] Setting up pool2
I0419 22:06:58.065615 8388 net.cpp:129] Top shape: 256 256 13 13 (11075584)
I0419 22:06:58.065618 8388 net.cpp:137] Memory required for data: 1739681792
I0419 22:06:58.065620 8388 layer_factory.hpp:77] Creating layer conv3
I0419 22:06:58.065630 8388 net.cpp:84] Creating Layer conv3
I0419 22:06:58.065634 8388 net.cpp:406] conv3 <- pool2
I0419 22:06:58.065639 8388 net.cpp:380] conv3 -> conv3
I0419 22:06:58.076313 8388 net.cpp:122] Setting up conv3
I0419 22:06:58.076326 8388 net.cpp:129] Top shape: 256 384 13 13 (16613376)
I0419 22:06:58.076329 8388 net.cpp:137] Memory required for data: 1806135296
I0419 22:06:58.076339 8388 layer_factory.hpp:77] Creating layer relu3
I0419 22:06:58.076349 8388 net.cpp:84] Creating Layer relu3
I0419 22:06:58.076351 8388 net.cpp:406] relu3 <- conv3
I0419 22:06:58.076359 8388 net.cpp:367] relu3 -> conv3 (in-place)
I0419 22:06:58.076949 8388 net.cpp:122] Setting up relu3
I0419 22:06:58.076958 8388 net.cpp:129] Top shape: 256 384 13 13 (16613376)
I0419 22:06:58.076961 8388 net.cpp:137] Memory required for data: 1872588800
I0419 22:06:58.076964 8388 layer_factory.hpp:77] Creating layer conv4
I0419 22:06:58.076974 8388 net.cpp:84] Creating Layer conv4
I0419 22:06:58.076977 8388 net.cpp:406] conv4 <- conv3
I0419 22:06:58.076984 8388 net.cpp:380] conv4 -> conv4
I0419 22:06:58.088205 8388 net.cpp:122] Setting up conv4
I0419 22:06:58.088222 8388 net.cpp:129] Top shape: 256 384 13 13 (16613376)
I0419 22:06:58.088225 8388 net.cpp:137] Memory required for data: 1939042304
I0419 22:06:58.088232 8388 layer_factory.hpp:77] Creating layer relu4
I0419 22:06:58.088239 8388 net.cpp:84] Creating Layer relu4
I0419 22:06:58.088264 8388 net.cpp:406] relu4 <- conv4
I0419 22:06:58.088271 8388 net.cpp:367] relu4 -> conv4 (in-place)
I0419 22:06:58.088814 8388 net.cpp:122] Setting up relu4
I0419 22:06:58.088822 8388 net.cpp:129] Top shape: 256 384 13 13 (16613376)
I0419 22:06:58.088825 8388 net.cpp:137] Memory required for data: 2005495808
I0419 22:06:58.088829 8388 layer_factory.hpp:77] Creating layer conv5
I0419 22:06:58.088840 8388 net.cpp:84] Creating Layer conv5
I0419 22:06:58.088843 8388 net.cpp:406] conv5 <- conv4
I0419 22:06:58.088848 8388 net.cpp:380] conv5 -> conv5
I0419 22:06:58.098453 8388 net.cpp:122] Setting up conv5
I0419 22:06:58.098469 8388 net.cpp:129] Top shape: 256 256 13 13 (11075584)
I0419 22:06:58.098472 8388 net.cpp:137] Memory required for data: 2049798144
I0419 22:06:58.098484 8388 layer_factory.hpp:77] Creating layer relu5
I0419 22:06:58.098492 8388 net.cpp:84] Creating Layer relu5
I0419 22:06:58.098496 8388 net.cpp:406] relu5 <- conv5
I0419 22:06:58.098503 8388 net.cpp:367] relu5 -> conv5 (in-place)
I0419 22:06:58.099056 8388 net.cpp:122] Setting up relu5
I0419 22:06:58.099064 8388 net.cpp:129] Top shape: 256 256 13 13 (11075584)
I0419 22:06:58.099067 8388 net.cpp:137] Memory required for data: 2094100480
I0419 22:06:58.099071 8388 layer_factory.hpp:77] Creating layer pool5
I0419 22:06:58.099077 8388 net.cpp:84] Creating Layer pool5
I0419 22:06:58.099081 8388 net.cpp:406] pool5 <- conv5
I0419 22:06:58.099087 8388 net.cpp:380] pool5 -> pool5
I0419 22:06:58.099120 8388 net.cpp:122] Setting up pool5
I0419 22:06:58.099128 8388 net.cpp:129] Top shape: 256 256 6 6 (2359296)
I0419 22:06:58.099129 8388 net.cpp:137] Memory required for data: 2103537664
I0419 22:06:58.099133 8388 layer_factory.hpp:77] Creating layer fc6
I0419 22:06:58.099141 8388 net.cpp:84] Creating Layer fc6
I0419 22:06:58.099144 8388 net.cpp:406] fc6 <- pool5
I0419 22:06:58.099149 8388 net.cpp:380] fc6 -> fc6
I0419 22:06:58.458634 8388 net.cpp:122] Setting up fc6
I0419 22:06:58.458652 8388 net.cpp:129] Top shape: 256 4096 (1048576)
I0419 22:06:58.458655 8388 net.cpp:137] Memory required for data: 2107731968
I0419 22:06:58.458663 8388 layer_factory.hpp:77] Creating layer relu6
I0419 22:06:58.458674 8388 net.cpp:84] Creating Layer relu6
I0419 22:06:58.458678 8388 net.cpp:406] relu6 <- fc6
I0419 22:06:58.458683 8388 net.cpp:367] relu6 -> fc6 (in-place)
I0419 22:06:58.459411 8388 net.cpp:122] Setting up relu6
I0419 22:06:58.459424 8388 net.cpp:129] Top shape: 256 4096 (1048576)
I0419 22:06:58.459426 8388 net.cpp:137] Memory required for data: 2111926272
I0419 22:06:58.459429 8388 layer_factory.hpp:77] Creating layer drop6
I0419 22:06:58.459435 8388 net.cpp:84] Creating Layer drop6
I0419 22:06:58.459439 8388 net.cpp:406] drop6 <- fc6
I0419 22:06:58.459442 8388 net.cpp:367] drop6 -> fc6 (in-place)
I0419 22:06:58.459472 8388 net.cpp:122] Setting up drop6
I0419 22:06:58.459477 8388 net.cpp:129] Top shape: 256 4096 (1048576)
I0419 22:06:58.459479 8388 net.cpp:137] Memory required for data: 2116120576
I0419 22:06:58.459482 8388 layer_factory.hpp:77] Creating layer fc7
I0419 22:06:58.459489 8388 net.cpp:84] Creating Layer fc7
I0419 22:06:58.459492 8388 net.cpp:406] fc7 <- fc6
I0419 22:06:58.459496 8388 net.cpp:380] fc7 -> fc7
I0419 22:06:58.618659 8388 net.cpp:122] Setting up fc7
I0419 22:06:58.618677 8388 net.cpp:129] Top shape: 256 4096 (1048576)
I0419 22:06:58.618680 8388 net.cpp:137] Memory required for data: 2120314880
I0419 22:06:58.618690 8388 layer_factory.hpp:77] Creating layer relu7
I0419 22:06:58.618696 8388 net.cpp:84] Creating Layer relu7
I0419 22:06:58.618700 8388 net.cpp:406] relu7 <- fc7
I0419 22:06:58.618708 8388 net.cpp:367] relu7 -> fc7 (in-place)
I0419 22:06:58.619190 8388 net.cpp:122] Setting up relu7
I0419 22:06:58.619199 8388 net.cpp:129] Top shape: 256 4096 (1048576)
I0419 22:06:58.619201 8388 net.cpp:137] Memory required for data: 2124509184
I0419 22:06:58.619204 8388 layer_factory.hpp:77] Creating layer drop7
I0419 22:06:58.619210 8388 net.cpp:84] Creating Layer drop7
I0419 22:06:58.619232 8388 net.cpp:406] drop7 <- fc7
I0419 22:06:58.619238 8388 net.cpp:367] drop7 -> fc7 (in-place)
I0419 22:06:58.619261 8388 net.cpp:122] Setting up drop7
I0419 22:06:58.619264 8388 net.cpp:129] Top shape: 256 4096 (1048576)
I0419 22:06:58.619267 8388 net.cpp:137] Memory required for data: 2128703488
I0419 22:06:58.619271 8388 layer_factory.hpp:77] Creating layer fc8
I0419 22:06:58.619277 8388 net.cpp:84] Creating Layer fc8
I0419 22:06:58.619280 8388 net.cpp:406] fc8 <- fc7
I0419 22:06:58.619285 8388 net.cpp:380] fc8 -> fc8
I0419 22:06:58.627069 8388 net.cpp:122] Setting up fc8
I0419 22:06:58.627079 8388 net.cpp:129] Top shape: 256 196 (50176)
I0419 22:06:58.627082 8388 net.cpp:137] Memory required for data: 2128904192
I0419 22:06:58.627089 8388 layer_factory.hpp:77] Creating layer loss
I0419 22:06:58.627096 8388 net.cpp:84] Creating Layer loss
I0419 22:06:58.627099 8388 net.cpp:406] loss <- fc8
I0419 22:06:58.627104 8388 net.cpp:406] loss <- label
I0419 22:06:58.627110 8388 net.cpp:380] loss -> loss
I0419 22:06:58.627118 8388 layer_factory.hpp:77] Creating layer loss
I0419 22:06:58.627857 8388 net.cpp:122] Setting up loss
I0419 22:06:58.627866 8388 net.cpp:129] Top shape: (1)
I0419 22:06:58.627869 8388 net.cpp:132] with loss weight 1
I0419 22:06:58.627887 8388 net.cpp:137] Memory required for data: 2128904196
I0419 22:06:58.627890 8388 net.cpp:198] loss needs backward computation.
I0419 22:06:58.627897 8388 net.cpp:198] fc8 needs backward computation.
I0419 22:06:58.627899 8388 net.cpp:198] drop7 needs backward computation.
I0419 22:06:58.627902 8388 net.cpp:198] relu7 needs backward computation.
I0419 22:06:58.627904 8388 net.cpp:198] fc7 needs backward computation.
I0419 22:06:58.627907 8388 net.cpp:198] drop6 needs backward computation.
I0419 22:06:58.627909 8388 net.cpp:198] relu6 needs backward computation.
I0419 22:06:58.627912 8388 net.cpp:198] fc6 needs backward computation.
I0419 22:06:58.627915 8388 net.cpp:198] pool5 needs backward computation.
I0419 22:06:58.627918 8388 net.cpp:198] relu5 needs backward computation.
I0419 22:06:58.627921 8388 net.cpp:198] conv5 needs backward computation.
I0419 22:06:58.627923 8388 net.cpp:198] relu4 needs backward computation.
I0419 22:06:58.627926 8388 net.cpp:198] conv4 needs backward computation.
I0419 22:06:58.627929 8388 net.cpp:198] relu3 needs backward computation.
I0419 22:06:58.627931 8388 net.cpp:198] conv3 needs backward computation.
I0419 22:06:58.627934 8388 net.cpp:198] pool2 needs backward computation.
I0419 22:06:58.627938 8388 net.cpp:198] norm2 needs backward computation.
I0419 22:06:58.627940 8388 net.cpp:198] relu2 needs backward computation.
I0419 22:06:58.627943 8388 net.cpp:198] conv2 needs backward computation.
I0419 22:06:58.627945 8388 net.cpp:198] pool1 needs backward computation.
I0419 22:06:58.627948 8388 net.cpp:198] norm1 needs backward computation.
I0419 22:06:58.627950 8388 net.cpp:198] relu1 needs backward computation.
I0419 22:06:58.627954 8388 net.cpp:198] conv1 needs backward computation.
I0419 22:06:58.627960 8388 net.cpp:200] train-data does not need backward computation.
I0419 22:06:58.627964 8388 net.cpp:242] This network produces output loss
I0419 22:06:58.627975 8388 net.cpp:255] Network initialization done.
I0419 22:06:58.628432 8388 solver.cpp:172] Creating test net (#0) specified by net file: train_val.prototxt
I0419 22:06:58.628460 8388 net.cpp:294] The NetState phase (1) differed from the phase (0) specified by a rule in layer train-data
I0419 22:06:58.628594 8388 net.cpp:51] Initializing net from parameters:
state {
phase: TEST
}
layer {
name: "val-data"
type: "Data"
top: "data"
top: "label"
include {
phase: TEST
}
transform_param {
crop_size: 227
mean_file: "/mnt/bigdisk/DIGITS-AMB-2/digits/jobs/20210419-135836-fd84/mean.binaryproto"
}
data_param {
source: "/mnt/bigdisk/DIGITS-AMB-2/digits/jobs/20210419-135836-fd84/val_db"
batch_size: 256
backend: LMDB
}
}
layer {
name: "conv1"
type: "Convolution"
bottom: "data"
top: "conv1"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 96
kernel_size: 11
stride: 4
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "relu1"
type: "ReLU"
bottom: "conv1"
top: "conv1"
}
layer {
name: "norm1"
type: "LRN"
bottom: "conv1"
top: "norm1"
lrn_param {
local_size: 5
alpha: 0.0001
beta: 0.75
}
}
layer {
name: "pool1"
type: "Pooling"
bottom: "norm1"
top: "pool1"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layer {
name: "conv2"
type: "Convolution"
bottom: "pool1"
top: "conv2"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 256
pad: 2
kernel_size: 5
group: 2
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layer {
name: "relu2"
type: "ReLU"
bottom: "conv2"
top: "conv2"
}
layer {
name: "norm2"
type: "LRN"
bottom: "conv2"
top: "norm2"
lrn_param {
local_size: 5
alpha: 0.0001
beta: 0.75
}
}
layer {
name: "pool2"
type: "Pooling"
bottom: "norm2"
top: "pool2"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layer {
name: "conv3"
type: "Convolution"
bottom: "pool2"
top: "conv3"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 384
pad: 1
kernel_size: 3
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "relu3"
type: "ReLU"
bottom: "conv3"
top: "conv3"
}
layer {
name: "conv4"
type: "Convolution"
bottom: "conv3"
top: "conv4"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 384
pad: 1
kernel_size: 3
group: 2
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layer {
name: "relu4"
type: "ReLU"
bottom: "conv4"
top: "conv4"
}
layer {
name: "conv5"
type: "Convolution"
bottom: "conv4"
top: "conv5"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 256
pad: 1
kernel_size: 3
group: 2
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layer {
name: "relu5"
type: "ReLU"
bottom: "conv5"
top: "conv5"
}
layer {
name: "pool5"
type: "Pooling"
bottom: "conv5"
top: "pool5"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layer {
name: "fc6"
type: "InnerProduct"
bottom: "pool5"
top: "fc6"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 4096
weight_filler {
type: "gaussian"
std: 0.005
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layer {
name: "relu6"
type: "ReLU"
bottom: "fc6"
top: "fc6"
}
layer {
name: "drop6"
type: "Dropout"
bottom: "fc6"
top: "fc6"
dropout_param {
dropout_ratio: 0.5
}
}
layer {
name: "fc7"
type: "InnerProduct"
bottom: "fc6"
top: "fc7"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 4096
weight_filler {
type: "gaussian"
std: 0.005
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layer {
name: "relu7"
type: "ReLU"
bottom: "fc7"
top: "fc7"
}
layer {
name: "drop7"
type: "Dropout"
bottom: "fc7"
top: "fc7"
dropout_param {
dropout_ratio: 0.5
}
}
layer {
name: "fc8"
type: "InnerProduct"
bottom: "fc7"
top: "fc8"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 196
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "accuracy"
type: "Accuracy"
bottom: "fc8"
bottom: "label"
top: "accuracy"
include {
phase: TEST
}
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "fc8"
bottom: "label"
top: "loss"
}
I0419 22:06:58.628697 8388 layer_factory.hpp:77] Creating layer val-data
I0419 22:06:58.632910 8388 db_lmdb.cpp:35] Opened lmdb /mnt/bigdisk/DIGITS-AMB-2/digits/jobs/20210419-135836-fd84/val_db
I0419 22:06:58.647346 8388 net.cpp:84] Creating Layer val-data
I0419 22:06:58.647372 8388 net.cpp:380] val-data -> data
I0419 22:06:58.647387 8388 net.cpp:380] val-data -> label
I0419 22:06:58.647397 8388 data_transformer.cpp:25] Loading mean file from: /mnt/bigdisk/DIGITS-AMB-2/digits/jobs/20210419-135836-fd84/mean.binaryproto
I0419 22:06:58.658825 8388 data_layer.cpp:45] output data size: 256,3,227,227
I0419 22:06:58.912772 8388 net.cpp:122] Setting up val-data
I0419 22:06:58.912793 8388 net.cpp:129] Top shape: 256 3 227 227 (39574272)
I0419 22:06:58.912797 8388 net.cpp:129] Top shape: 256 (256)
I0419 22:06:58.912801 8388 net.cpp:137] Memory required for data: 158298112
I0419 22:06:58.912806 8388 layer_factory.hpp:77] Creating layer label_val-data_1_split
I0419 22:06:58.912817 8388 net.cpp:84] Creating Layer label_val-data_1_split
I0419 22:06:58.912820 8388 net.cpp:406] label_val-data_1_split <- label
I0419 22:06:58.912827 8388 net.cpp:380] label_val-data_1_split -> label_val-data_1_split_0
I0419 22:06:58.912834 8388 net.cpp:380] label_val-data_1_split -> label_val-data_1_split_1
I0419 22:06:58.912879 8388 net.cpp:122] Setting up label_val-data_1_split
I0419 22:06:58.912884 8388 net.cpp:129] Top shape: 256 (256)
I0419 22:06:58.912887 8388 net.cpp:129] Top shape: 256 (256)
I0419 22:06:58.912889 8388 net.cpp:137] Memory required for data: 158300160
I0419 22:06:58.912892 8388 layer_factory.hpp:77] Creating layer conv1
I0419 22:06:58.912902 8388 net.cpp:84] Creating Layer conv1
I0419 22:06:58.912905 8388 net.cpp:406] conv1 <- data
I0419 22:06:58.912910 8388 net.cpp:380] conv1 -> conv1
I0419 22:06:58.916296 8388 net.cpp:122] Setting up conv1
I0419 22:06:58.916307 8388 net.cpp:129] Top shape: 256 96 55 55 (74342400)
I0419 22:06:58.916311 8388 net.cpp:137] Memory required for data: 455669760
I0419 22:06:58.916319 8388 layer_factory.hpp:77] Creating layer relu1
I0419 22:06:58.916325 8388 net.cpp:84] Creating Layer relu1
I0419 22:06:58.916328 8388 net.cpp:406] relu1 <- conv1
I0419 22:06:58.916332 8388 net.cpp:367] relu1 -> conv1 (in-place)
I0419 22:06:58.916653 8388 net.cpp:122] Setting up relu1
I0419 22:06:58.916662 8388 net.cpp:129] Top shape: 256 96 55 55 (74342400)
I0419 22:06:58.916666 8388 net.cpp:137] Memory required for data: 753039360
I0419 22:06:58.916668 8388 layer_factory.hpp:77] Creating layer norm1
I0419 22:06:58.916676 8388 net.cpp:84] Creating Layer norm1
I0419 22:06:58.916678 8388 net.cpp:406] norm1 <- conv1
I0419 22:06:58.916683 8388 net.cpp:380] norm1 -> norm1
I0419 22:06:58.917205 8388 net.cpp:122] Setting up norm1
I0419 22:06:58.917215 8388 net.cpp:129] Top shape: 256 96 55 55 (74342400)
I0419 22:06:58.917217 8388 net.cpp:137] Memory required for data: 1050408960
I0419 22:06:58.917220 8388 layer_factory.hpp:77] Creating layer pool1
I0419 22:06:58.917227 8388 net.cpp:84] Creating Layer pool1
I0419 22:06:58.917230 8388 net.cpp:406] pool1 <- norm1
I0419 22:06:58.917234 8388 net.cpp:380] pool1 -> pool1
I0419 22:06:58.917259 8388 net.cpp:122] Setting up pool1
I0419 22:06:58.917263 8388 net.cpp:129] Top shape: 256 96 27 27 (17915904)
I0419 22:06:58.917266 8388 net.cpp:137] Memory required for data: 1122072576
I0419 22:06:58.917268 8388 layer_factory.hpp:77] Creating layer conv2
I0419 22:06:58.917276 8388 net.cpp:84] Creating Layer conv2
I0419 22:06:58.917297 8388 net.cpp:406] conv2 <- pool1
I0419 22:06:58.917302 8388 net.cpp:380] conv2 -> conv2
I0419 22:06:58.937960 8388 net.cpp:122] Setting up conv2
I0419 22:06:58.937980 8388 net.cpp:129] Top shape: 256 256 27 27 (47775744)
I0419 22:06:58.937984 8388 net.cpp:137] Memory required for data: 1313175552
I0419 22:06:58.937997 8388 layer_factory.hpp:77] Creating layer relu2
I0419 22:06:58.938006 8388 net.cpp:84] Creating Layer relu2
I0419 22:06:58.938010 8388 net.cpp:406] relu2 <- conv2
I0419 22:06:58.938017 8388 net.cpp:367] relu2 -> conv2 (in-place)
I0419 22:06:58.938915 8388 net.cpp:122] Setting up relu2
I0419 22:06:58.938930 8388 net.cpp:129] Top shape: 256 256 27 27 (47775744)
I0419 22:06:58.938933 8388 net.cpp:137] Memory required for data: 1504278528
I0419 22:06:58.938938 8388 layer_factory.hpp:77] Creating layer norm2
I0419 22:06:58.938953 8388 net.cpp:84] Creating Layer norm2
I0419 22:06:58.938958 8388 net.cpp:406] norm2 <- conv2
I0419 22:06:58.938966 8388 net.cpp:380] norm2 -> norm2
I0419 22:06:58.940097 8388 net.cpp:122] Setting up norm2
I0419 22:06:58.940114 8388 net.cpp:129] Top shape: 256 256 27 27 (47775744)
I0419 22:06:58.940119 8388 net.cpp:137] Memory required for data: 1695381504
I0419 22:06:58.940124 8388 layer_factory.hpp:77] Creating layer pool2
I0419 22:06:58.940131 8388 net.cpp:84] Creating Layer pool2
I0419 22:06:58.940136 8388 net.cpp:406] pool2 <- norm2
I0419 22:06:58.940145 8388 net.cpp:380] pool2 -> pool2
I0419 22:06:58.940187 8388 net.cpp:122] Setting up pool2
I0419 22:06:58.940196 8388 net.cpp:129] Top shape: 256 256 13 13 (11075584)
I0419 22:06:58.940198 8388 net.cpp:137] Memory required for data: 1739683840
I0419 22:06:58.940203 8388 layer_factory.hpp:77] Creating layer conv3
I0419 22:06:58.940218 8388 net.cpp:84] Creating Layer conv3
I0419 22:06:58.940222 8388 net.cpp:406] conv3 <- pool2
I0419 22:06:58.940232 8388 net.cpp:380] conv3 -> conv3
I0419 22:06:58.955765 8388 net.cpp:122] Setting up conv3
I0419 22:06:58.955783 8388 net.cpp:129] Top shape: 256 384 13 13 (16613376)
I0419 22:06:58.955786 8388 net.cpp:137] Memory required for data: 1806137344
I0419 22:06:58.955798 8388 layer_factory.hpp:77] Creating layer relu3
I0419 22:06:58.955806 8388 net.cpp:84] Creating Layer relu3
I0419 22:06:58.955811 8388 net.cpp:406] relu3 <- conv3
I0419 22:06:58.955816 8388 net.cpp:367] relu3 -> conv3 (in-place)
I0419 22:06:58.956411 8388 net.cpp:122] Setting up relu3
I0419 22:06:58.956421 8388 net.cpp:129] Top shape: 256 384 13 13 (16613376)
I0419 22:06:58.956424 8388 net.cpp:137] Memory required for data: 1872590848
I0419 22:06:58.956427 8388 layer_factory.hpp:77] Creating layer conv4
I0419 22:06:58.956439 8388 net.cpp:84] Creating Layer conv4
I0419 22:06:58.956441 8388 net.cpp:406] conv4 <- conv3
I0419 22:06:58.956449 8388 net.cpp:380] conv4 -> conv4
I0419 22:06:58.966859 8388 net.cpp:122] Setting up conv4
I0419 22:06:58.966876 8388 net.cpp:129] Top shape: 256 384 13 13 (16613376)
I0419 22:06:58.966878 8388 net.cpp:137] Memory required for data: 1939044352
I0419 22:06:58.966886 8388 layer_factory.hpp:77] Creating layer relu4
I0419 22:06:58.966893 8388 net.cpp:84] Creating Layer relu4
I0419 22:06:58.966897 8388 net.cpp:406] relu4 <- conv4
I0419 22:06:58.966902 8388 net.cpp:367] relu4 -> conv4 (in-place)
I0419 22:06:58.967289 8388 net.cpp:122] Setting up relu4
I0419 22:06:58.967299 8388 net.cpp:129] Top shape: 256 384 13 13 (16613376)
I0419 22:06:58.967303 8388 net.cpp:137] Memory required for data: 2005497856
I0419 22:06:58.967305 8388 layer_factory.hpp:77] Creating layer conv5
I0419 22:06:58.967315 8388 net.cpp:84] Creating Layer conv5
I0419 22:06:58.967319 8388 net.cpp:406] conv5 <- conv4
I0419 22:06:58.967324 8388 net.cpp:380] conv5 -> conv5
I0419 22:06:58.977345 8388 net.cpp:122] Setting up conv5
I0419 22:06:58.977361 8388 net.cpp:129] Top shape: 256 256 13 13 (11075584)
I0419 22:06:58.977365 8388 net.cpp:137] Memory required for data: 2049800192
I0419 22:06:58.977376 8388 layer_factory.hpp:77] Creating layer relu5
I0419 22:06:58.977401 8388 net.cpp:84] Creating Layer relu5
I0419 22:06:58.977404 8388 net.cpp:406] relu5 <- conv5
I0419 22:06:58.977411 8388 net.cpp:367] relu5 -> conv5 (in-place)
I0419 22:06:58.977977 8388 net.cpp:122] Setting up relu5
I0419 22:06:58.977988 8388 net.cpp:129] Top shape: 256 256 13 13 (11075584)
I0419 22:06:58.977991 8388 net.cpp:137] Memory required for data: 2094102528
I0419 22:06:58.977994 8388 layer_factory.hpp:77] Creating layer pool5
I0419 22:06:58.978004 8388 net.cpp:84] Creating Layer pool5
I0419 22:06:58.978008 8388 net.cpp:406] pool5 <- conv5
I0419 22:06:58.978013 8388 net.cpp:380] pool5 -> pool5
I0419 22:06:58.978049 8388 net.cpp:122] Setting up pool5
I0419 22:06:58.978053 8388 net.cpp:129] Top shape: 256 256 6 6 (2359296)
I0419 22:06:58.978056 8388 net.cpp:137] Memory required for data: 2103539712
I0419 22:06:58.978060 8388 layer_factory.hpp:77] Creating layer fc6
I0419 22:06:58.978070 8388 net.cpp:84] Creating Layer fc6
I0419 22:06:58.978072 8388 net.cpp:406] fc6 <- pool5
I0419 22:06:58.978077 8388 net.cpp:380] fc6 -> fc6
I0419 22:06:59.337400 8388 net.cpp:122] Setting up fc6
I0419 22:06:59.337419 8388 net.cpp:129] Top shape: 256 4096 (1048576)
I0419 22:06:59.337422 8388 net.cpp:137] Memory required for data: 2107734016
I0419 22:06:59.337431 8388 layer_factory.hpp:77] Creating layer relu6
I0419 22:06:59.337440 8388 net.cpp:84] Creating Layer relu6
I0419 22:06:59.337443 8388 net.cpp:406] relu6 <- fc6
I0419 22:06:59.337450 8388 net.cpp:367] relu6 -> fc6 (in-place)
I0419 22:06:59.338204 8388 net.cpp:122] Setting up relu6
I0419 22:06:59.338214 8388 net.cpp:129] Top shape: 256 4096 (1048576)
I0419 22:06:59.338217 8388 net.cpp:137] Memory required for data: 2111928320
I0419 22:06:59.338220 8388 layer_factory.hpp:77] Creating layer drop6
I0419 22:06:59.338227 8388 net.cpp:84] Creating Layer drop6
I0419 22:06:59.338229 8388 net.cpp:406] drop6 <- fc6
I0419 22:06:59.338235 8388 net.cpp:367] drop6 -> fc6 (in-place)
I0419 22:06:59.338260 8388 net.cpp:122] Setting up drop6
I0419 22:06:59.338265 8388 net.cpp:129] Top shape: 256 4096 (1048576)
I0419 22:06:59.338268 8388 net.cpp:137] Memory required for data: 2116122624
I0419 22:06:59.338270 8388 layer_factory.hpp:77] Creating layer fc7
I0419 22:06:59.338276 8388 net.cpp:84] Creating Layer fc7
I0419 22:06:59.338279 8388 net.cpp:406] fc7 <- fc6
I0419 22:06:59.338284 8388 net.cpp:380] fc7 -> fc7
I0419 22:06:59.498278 8388 net.cpp:122] Setting up fc7
I0419 22:06:59.498297 8388 net.cpp:129] Top shape: 256 4096 (1048576)
I0419 22:06:59.498301 8388 net.cpp:137] Memory required for data: 2120316928
I0419 22:06:59.498308 8388 layer_factory.hpp:77] Creating layer relu7
I0419 22:06:59.498317 8388 net.cpp:84] Creating Layer relu7
I0419 22:06:59.498320 8388 net.cpp:406] relu7 <- fc7
I0419 22:06:59.498327 8388 net.cpp:367] relu7 -> fc7 (in-place)
I0419 22:06:59.498823 8388 net.cpp:122] Setting up relu7
I0419 22:06:59.498834 8388 net.cpp:129] Top shape: 256 4096 (1048576)
I0419 22:06:59.498837 8388 net.cpp:137] Memory required for data: 2124511232
I0419 22:06:59.498840 8388 layer_factory.hpp:77] Creating layer drop7
I0419 22:06:59.498847 8388 net.cpp:84] Creating Layer drop7
I0419 22:06:59.498850 8388 net.cpp:406] drop7 <- fc7
I0419 22:06:59.498854 8388 net.cpp:367] drop7 -> fc7 (in-place)
I0419 22:06:59.498880 8388 net.cpp:122] Setting up drop7
I0419 22:06:59.498885 8388 net.cpp:129] Top shape: 256 4096 (1048576)
I0419 22:06:59.498888 8388 net.cpp:137] Memory required for data: 2128705536
I0419 22:06:59.498890 8388 layer_factory.hpp:77] Creating layer fc8
I0419 22:06:59.498898 8388 net.cpp:84] Creating Layer fc8
I0419 22:06:59.498900 8388 net.cpp:406] fc8 <- fc7
I0419 22:06:59.498905 8388 net.cpp:380] fc8 -> fc8
I0419 22:06:59.506745 8388 net.cpp:122] Setting up fc8
I0419 22:06:59.506759 8388 net.cpp:129] Top shape: 256 196 (50176)
I0419 22:06:59.506762 8388 net.cpp:137] Memory required for data: 2128906240
I0419 22:06:59.506769 8388 layer_factory.hpp:77] Creating layer fc8_fc8_0_split
I0419 22:06:59.506775 8388 net.cpp:84] Creating Layer fc8_fc8_0_split
I0419 22:06:59.506796 8388 net.cpp:406] fc8_fc8_0_split <- fc8
I0419 22:06:59.506803 8388 net.cpp:380] fc8_fc8_0_split -> fc8_fc8_0_split_0
I0419 22:06:59.506809 8388 net.cpp:380] fc8_fc8_0_split -> fc8_fc8_0_split_1
I0419 22:06:59.506844 8388 net.cpp:122] Setting up fc8_fc8_0_split
I0419 22:06:59.506848 8388 net.cpp:129] Top shape: 256 196 (50176)
I0419 22:06:59.506850 8388 net.cpp:129] Top shape: 256 196 (50176)
I0419 22:06:59.506853 8388 net.cpp:137] Memory required for data: 2129307648
I0419 22:06:59.506855 8388 layer_factory.hpp:77] Creating layer accuracy
I0419 22:06:59.506862 8388 net.cpp:84] Creating Layer accuracy
I0419 22:06:59.506865 8388 net.cpp:406] accuracy <- fc8_fc8_0_split_0
I0419 22:06:59.506868 8388 net.cpp:406] accuracy <- label_val-data_1_split_0
I0419 22:06:59.506875 8388 net.cpp:380] accuracy -> accuracy
I0419 22:06:59.506881 8388 net.cpp:122] Setting up accuracy
I0419 22:06:59.506884 8388 net.cpp:129] Top shape: (1)
I0419 22:06:59.506886 8388 net.cpp:137] Memory required for data: 2129307652
I0419 22:06:59.506889 8388 layer_factory.hpp:77] Creating layer loss
I0419 22:06:59.506893 8388 net.cpp:84] Creating Layer loss
I0419 22:06:59.506896 8388 net.cpp:406] loss <- fc8_fc8_0_split_1
I0419 22:06:59.506899 8388 net.cpp:406] loss <- label_val-data_1_split_1
I0419 22:06:59.506903 8388 net.cpp:380] loss -> loss
I0419 22:06:59.506909 8388 layer_factory.hpp:77] Creating layer loss
I0419 22:06:59.507728 8388 net.cpp:122] Setting up loss
I0419 22:06:59.507737 8388 net.cpp:129] Top shape: (1)
I0419 22:06:59.507740 8388 net.cpp:132] with loss weight 1
I0419 22:06:59.507750 8388 net.cpp:137] Memory required for data: 2129307656
I0419 22:06:59.507753 8388 net.cpp:198] loss needs backward computation.
I0419 22:06:59.507757 8388 net.cpp:200] accuracy does not need backward computation.
I0419 22:06:59.507761 8388 net.cpp:198] fc8_fc8_0_split needs backward computation.
I0419 22:06:59.507763 8388 net.cpp:198] fc8 needs backward computation.
I0419 22:06:59.507766 8388 net.cpp:198] drop7 needs backward computation.
I0419 22:06:59.507769 8388 net.cpp:198] relu7 needs backward computation.
I0419 22:06:59.507771 8388 net.cpp:198] fc7 needs backward computation.
I0419 22:06:59.507774 8388 net.cpp:198] drop6 needs backward computation.
I0419 22:06:59.507776 8388 net.cpp:198] relu6 needs backward computation.
I0419 22:06:59.507779 8388 net.cpp:198] fc6 needs backward computation.
I0419 22:06:59.507783 8388 net.cpp:198] pool5 needs backward computation.
I0419 22:06:59.507786 8388 net.cpp:198] relu5 needs backward computation.
I0419 22:06:59.507788 8388 net.cpp:198] conv5 needs backward computation.
I0419 22:06:59.507791 8388 net.cpp:198] relu4 needs backward computation.
I0419 22:06:59.507793 8388 net.cpp:198] conv4 needs backward computation.
I0419 22:06:59.507797 8388 net.cpp:198] relu3 needs backward computation.
I0419 22:06:59.507799 8388 net.cpp:198] conv3 needs backward computation.
I0419 22:06:59.507802 8388 net.cpp:198] pool2 needs backward computation.
I0419 22:06:59.507805 8388 net.cpp:198] norm2 needs backward computation.
I0419 22:06:59.507808 8388 net.cpp:198] relu2 needs backward computation.
I0419 22:06:59.507812 8388 net.cpp:198] conv2 needs backward computation.
I0419 22:06:59.507814 8388 net.cpp:198] pool1 needs backward computation.
I0419 22:06:59.507817 8388 net.cpp:198] norm1 needs backward computation.
I0419 22:06:59.507819 8388 net.cpp:198] relu1 needs backward computation.
I0419 22:06:59.507822 8388 net.cpp:198] conv1 needs backward computation.
I0419 22:06:59.507825 8388 net.cpp:200] label_val-data_1_split does not need backward computation.
I0419 22:06:59.507828 8388 net.cpp:200] val-data does not need backward computation.
I0419 22:06:59.507831 8388 net.cpp:242] This network produces output accuracy
I0419 22:06:59.507834 8388 net.cpp:242] This network produces output loss
I0419 22:06:59.507850 8388 net.cpp:255] Network initialization done.
I0419 22:06:59.507917 8388 solver.cpp:56] Solver scaffolding done.
I0419 22:06:59.508343 8388 caffe.cpp:248] Starting Optimization
I0419 22:06:59.508352 8388 solver.cpp:272] Solving
I0419 22:06:59.508354 8388 solver.cpp:273] Learning Rate Policy: exp
I0419 22:06:59.509974 8388 solver.cpp:330] Iteration 0, Testing net (#0)
I0419 22:06:59.509982 8388 net.cpp:676] Ignoring source layer train-data
I0419 22:06:59.536379 8388 blocking_queue.cpp:49] Waiting for data
I0419 22:07:03.676674 8411 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:07:04.192658 8388 solver.cpp:397] Test net output #0: accuracy = 0.00502232
I0419 22:07:04.192687 8388 solver.cpp:397] Test net output #1: loss = 5.28011 (* 1 = 5.28011 loss)
I0419 22:07:04.370927 8388 solver.cpp:218] Iteration 0 (-1.14906e-43 iter/s, 4.86255s/12 iters), loss = 5.27742
I0419 22:07:04.370968 8388 solver.cpp:237] Train net output #0: loss = 5.27742 (* 1 = 5.27742 loss)
I0419 22:07:04.370988 8388 sgd_solver.cpp:105] Iteration 0, lr = 0.01
I0419 22:07:11.987108 8388 solver.cpp:218] Iteration 12 (1.5756 iter/s, 7.61616s/12 iters), loss = 5.28636
I0419 22:07:11.987147 8388 solver.cpp:237] Train net output #0: loss = 5.28636 (* 1 = 5.28636 loss)
I0419 22:07:11.987156 8388 sgd_solver.cpp:105] Iteration 12, lr = 0.00992109
I0419 22:07:22.447084 8388 solver.cpp:218] Iteration 24 (1.14723 iter/s, 10.46s/12 iters), loss = 5.29546
I0419 22:07:22.447124 8388 solver.cpp:237] Train net output #0: loss = 5.29546 (* 1 = 5.29546 loss)
I0419 22:07:22.447134 8388 sgd_solver.cpp:105] Iteration 24, lr = 0.0098428
I0419 22:07:32.886793 8388 solver.cpp:218] Iteration 36 (1.14946 iter/s, 10.4397s/12 iters), loss = 5.28789
I0419 22:07:32.886873 8388 solver.cpp:237] Train net output #0: loss = 5.28789 (* 1 = 5.28789 loss)
I0419 22:07:32.886881 8388 sgd_solver.cpp:105] Iteration 36, lr = 0.00976512
I0419 22:07:44.231516 8388 solver.cpp:218] Iteration 48 (1.05776 iter/s, 11.3447s/12 iters), loss = 5.27882
I0419 22:07:44.231555 8388 solver.cpp:237] Train net output #0: loss = 5.27882 (* 1 = 5.27882 loss)
I0419 22:07:44.231564 8388 sgd_solver.cpp:105] Iteration 48, lr = 0.00968806
I0419 22:07:54.341483 8388 solver.cpp:218] Iteration 60 (1.18695 iter/s, 10.11s/12 iters), loss = 5.28659
I0419 22:07:54.341528 8388 solver.cpp:237] Train net output #0: loss = 5.28659 (* 1 = 5.28659 loss)
I0419 22:07:54.341536 8388 sgd_solver.cpp:105] Iteration 60, lr = 0.00961161
I0419 22:08:04.359741 8388 solver.cpp:218] Iteration 72 (1.19781 iter/s, 10.0182s/12 iters), loss = 5.25948
I0419 22:08:04.359864 8388 solver.cpp:237] Train net output #0: loss = 5.25948 (* 1 = 5.25948 loss)
I0419 22:08:04.359874 8388 sgd_solver.cpp:105] Iteration 72, lr = 0.00953576
I0419 22:08:15.763921 8388 solver.cpp:218] Iteration 84 (1.05225 iter/s, 11.4041s/12 iters), loss = 5.27071
I0419 22:08:15.763963 8388 solver.cpp:237] Train net output #0: loss = 5.27071 (* 1 = 5.27071 loss)
I0419 22:08:15.763972 8388 sgd_solver.cpp:105] Iteration 84, lr = 0.00946051
I0419 22:08:30.372925 8388 solver.cpp:218] Iteration 96 (0.82141 iter/s, 14.609s/12 iters), loss = 5.28336
I0419 22:08:30.372970 8388 solver.cpp:237] Train net output #0: loss = 5.28336 (* 1 = 5.28336 loss)
I0419 22:08:30.372979 8388 sgd_solver.cpp:105] Iteration 96, lr = 0.00938586
I0419 22:08:34.068722 8400 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:08:34.683528 8388 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_102.caffemodel
I0419 22:08:38.636963 8388 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_102.solverstate
I0419 22:08:41.274389 8388 solver.cpp:330] Iteration 102, Testing net (#0)
I0419 22:08:41.274402 8388 net.cpp:676] Ignoring source layer train-data
I0419 22:08:43.282207 8411 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:08:44.306905 8388 solver.cpp:397] Test net output #0: accuracy = 0.00669643
I0419 22:08:44.306938 8388 solver.cpp:397] Test net output #1: loss = 5.28053 (* 1 = 5.28053 loss)
I0419 22:08:47.794473 8388 solver.cpp:218] Iteration 108 (0.688801 iter/s, 17.4216s/12 iters), loss = 5.2805
I0419 22:08:47.794520 8388 solver.cpp:237] Train net output #0: loss = 5.2805 (* 1 = 5.2805 loss)
I0419 22:08:47.794528 8388 sgd_solver.cpp:105] Iteration 108, lr = 0.00931179
I0419 22:08:57.640952 8388 solver.cpp:218] Iteration 120 (1.21871 iter/s, 9.84646s/12 iters), loss = 5.26157
I0419 22:08:57.640992 8388 solver.cpp:237] Train net output #0: loss = 5.26157 (* 1 = 5.26157 loss)
I0419 22:08:57.641000 8388 sgd_solver.cpp:105] Iteration 120, lr = 0.00923831
I0419 22:09:07.402696 8388 solver.cpp:218] Iteration 132 (1.22929 iter/s, 9.76174s/12 iters), loss = 5.22241
I0419 22:09:07.402845 8388 solver.cpp:237] Train net output #0: loss = 5.22241 (* 1 = 5.22241 loss)
I0419 22:09:07.402855 8388 sgd_solver.cpp:105] Iteration 132, lr = 0.0091654
I0419 22:09:17.390041 8388 solver.cpp:218] Iteration 144 (1.20153 iter/s, 9.98724s/12 iters), loss = 5.17911
I0419 22:09:17.390084 8388 solver.cpp:237] Train net output #0: loss = 5.17911 (* 1 = 5.17911 loss)
I0419 22:09:17.390091 8388 sgd_solver.cpp:105] Iteration 144, lr = 0.00909308
I0419 22:09:27.227888 8388 solver.cpp:218] Iteration 156 (1.21978 iter/s, 9.83784s/12 iters), loss = 5.17918
I0419 22:09:27.227932 8388 solver.cpp:237] Train net output #0: loss = 5.17918 (* 1 = 5.17918 loss)
I0419 22:09:27.227941 8388 sgd_solver.cpp:105] Iteration 156, lr = 0.00902132
I0419 22:09:36.834864 8388 solver.cpp:218] Iteration 168 (1.24909 iter/s, 9.60697s/12 iters), loss = 5.20648
I0419 22:09:36.834909 8388 solver.cpp:237] Train net output #0: loss = 5.20648 (* 1 = 5.20648 loss)
I0419 22:09:36.834916 8388 sgd_solver.cpp:105] Iteration 168, lr = 0.00895013
I0419 22:09:46.611712 8388 solver.cpp:218] Iteration 180 (1.22739 iter/s, 9.77684s/12 iters), loss = 5.20365
I0419 22:09:46.611807 8388 solver.cpp:237] Train net output #0: loss = 5.20365 (* 1 = 5.20365 loss)
I0419 22:09:46.611816 8388 sgd_solver.cpp:105] Iteration 180, lr = 0.0088795
I0419 22:09:56.472009 8388 solver.cpp:218] Iteration 192 (1.21701 iter/s, 9.86025s/12 iters), loss = 5.13487
I0419 22:09:56.472054 8388 solver.cpp:237] Train net output #0: loss = 5.13487 (* 1 = 5.13487 loss)
I0419 22:09:56.472061 8388 sgd_solver.cpp:105] Iteration 192, lr = 0.00880943
I0419 22:10:03.939743 8400 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:10:05.279071 8388 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_204.caffemodel
I0419 22:10:09.336427 8388 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_204.solverstate
I0419 22:10:12.999265 8388 solver.cpp:330] Iteration 204, Testing net (#0)
I0419 22:10:12.999284 8388 net.cpp:676] Ignoring source layer train-data
I0419 22:10:14.515853 8411 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:10:16.073765 8388 solver.cpp:397] Test net output #0: accuracy = 0.00948661
I0419 22:10:16.073799 8388 solver.cpp:397] Test net output #1: loss = 5.13957 (* 1 = 5.13957 loss)
I0419 22:10:16.244982 8388 solver.cpp:218] Iteration 204 (0.606887 iter/s, 19.773s/12 iters), loss = 5.19456
I0419 22:10:16.245023 8388 solver.cpp:237] Train net output #0: loss = 5.19456 (* 1 = 5.19456 loss)
I0419 22:10:16.245031 8388 sgd_solver.cpp:105] Iteration 204, lr = 0.00873991
I0419 22:10:24.500116 8388 solver.cpp:218] Iteration 216 (1.45364 iter/s, 8.25512s/12 iters), loss = 5.10501
I0419 22:10:24.500247 8388 solver.cpp:237] Train net output #0: loss = 5.10501 (* 1 = 5.10501 loss)
I0419 22:10:24.500257 8388 sgd_solver.cpp:105] Iteration 216, lr = 0.00867094
I0419 22:10:34.230397 8388 solver.cpp:218] Iteration 228 (1.23328 iter/s, 9.73019s/12 iters), loss = 5.18554
I0419 22:10:34.230440 8388 solver.cpp:237] Train net output #0: loss = 5.18554 (* 1 = 5.18554 loss)
I0419 22:10:34.230449 8388 sgd_solver.cpp:105] Iteration 228, lr = 0.00860252
I0419 22:10:43.910948 8388 solver.cpp:218] Iteration 240 (1.2396 iter/s, 9.68055s/12 iters), loss = 5.13257
I0419 22:10:43.910991 8388 solver.cpp:237] Train net output #0: loss = 5.13257 (* 1 = 5.13257 loss)
I0419 22:10:43.911000 8388 sgd_solver.cpp:105] Iteration 240, lr = 0.00853463
I0419 22:10:53.649382 8388 solver.cpp:218] Iteration 252 (1.23223 iter/s, 9.73844s/12 iters), loss = 5.11076
I0419 22:10:53.649430 8388 solver.cpp:237] Train net output #0: loss = 5.11076 (* 1 = 5.11076 loss)
I0419 22:10:53.649438 8388 sgd_solver.cpp:105] Iteration 252, lr = 0.00846728
I0419 22:11:03.364590 8388 solver.cpp:218] Iteration 264 (1.23518 iter/s, 9.7152s/12 iters), loss = 5.03355
I0419 22:11:03.364744 8388 solver.cpp:237] Train net output #0: loss = 5.03355 (* 1 = 5.03355 loss)
I0419 22:11:03.364755 8388 sgd_solver.cpp:105] Iteration 264, lr = 0.00840046
I0419 22:11:13.108999 8388 solver.cpp:218] Iteration 276 (1.23149 iter/s, 9.7443s/12 iters), loss = 5.0978
I0419 22:11:13.109045 8388 solver.cpp:237] Train net output #0: loss = 5.0978 (* 1 = 5.0978 loss)
I0419 22:11:13.109053 8388 sgd_solver.cpp:105] Iteration 276, lr = 0.00833417
I0419 22:11:22.917641 8388 solver.cpp:218] Iteration 288 (1.22341 iter/s, 9.80865s/12 iters), loss = 5.02466
I0419 22:11:22.917685 8388 solver.cpp:237] Train net output #0: loss = 5.02466 (* 1 = 5.02466 loss)
I0419 22:11:22.917691 8388 sgd_solver.cpp:105] Iteration 288, lr = 0.00826841
I0419 22:11:33.041905 8388 solver.cpp:218] Iteration 300 (1.18527 iter/s, 10.1243s/12 iters), loss = 5.14892
I0419 22:11:33.041949 8388 solver.cpp:237] Train net output #0: loss = 5.14892 (* 1 = 5.14892 loss)
I0419 22:11:33.041956 8388 sgd_solver.cpp:105] Iteration 300, lr = 0.00820316
I0419 22:11:35.067457 8400 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:11:37.284912 8388 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_306.caffemodel
I0419 22:11:40.398605 8388 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_306.solverstate
I0419 22:11:42.776551 8388 solver.cpp:330] Iteration 306, Testing net (#0)
I0419 22:11:42.776571 8388 net.cpp:676] Ignoring source layer train-data
I0419 22:11:43.839759 8411 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:11:45.802906 8388 solver.cpp:397] Test net output #0: accuracy = 0.0178571
I0419 22:11:45.802958 8388 solver.cpp:397] Test net output #1: loss = 5.08393 (* 1 = 5.08393 loss)
I0419 22:11:49.328936 8388 solver.cpp:218] Iteration 312 (0.73678 iter/s, 16.2871s/12 iters), loss = 5.03789
I0419 22:11:49.328981 8388 solver.cpp:237] Train net output #0: loss = 5.03789 (* 1 = 5.03789 loss)
I0419 22:11:49.328989 8388 sgd_solver.cpp:105] Iteration 312, lr = 0.00813842
I0419 22:11:59.111582 8388 solver.cpp:218] Iteration 324 (1.22666 iter/s, 9.78266s/12 iters), loss = 5.04894
I0419 22:11:59.111622 8388 solver.cpp:237] Train net output #0: loss = 5.04894 (* 1 = 5.04894 loss)
I0419 22:11:59.111630 8388 sgd_solver.cpp:105] Iteration 324, lr = 0.0080742
I0419 22:12:08.916244 8388 solver.cpp:218] Iteration 336 (1.22391 iter/s, 9.80468s/12 iters), loss = 5.06625
I0419 22:12:08.916357 8388 solver.cpp:237] Train net output #0: loss = 5.06625 (* 1 = 5.06625 loss)
I0419 22:12:08.916366 8388 sgd_solver.cpp:105] Iteration 336, lr = 0.00801048
I0419 22:12:18.739184 8388 solver.cpp:218] Iteration 348 (1.22164 iter/s, 9.82289s/12 iters), loss = 5.02382
I0419 22:12:18.739226 8388 solver.cpp:237] Train net output #0: loss = 5.02382 (* 1 = 5.02382 loss)
I0419 22:12:18.739235 8388 sgd_solver.cpp:105] Iteration 348, lr = 0.00794727
I0419 22:12:28.547685 8388 solver.cpp:218] Iteration 360 (1.22343 iter/s, 9.80852s/12 iters), loss = 5.01463
I0419 22:12:28.547729 8388 solver.cpp:237] Train net output #0: loss = 5.01463 (* 1 = 5.01463 loss)
I0419 22:12:28.547736 8388 sgd_solver.cpp:105] Iteration 360, lr = 0.00788456
I0419 22:12:38.392830 8388 solver.cpp:218] Iteration 372 (1.21887 iter/s, 9.84516s/12 iters), loss = 5.09938
I0419 22:12:38.392868 8388 solver.cpp:237] Train net output #0: loss = 5.09938 (* 1 = 5.09938 loss)
I0419 22:12:38.392876 8388 sgd_solver.cpp:105] Iteration 372, lr = 0.00782234
I0419 22:12:48.324082 8388 solver.cpp:218] Iteration 384 (1.20831 iter/s, 9.93127s/12 iters), loss = 5.00052
I0419 22:12:48.324270 8388 solver.cpp:237] Train net output #0: loss = 5.00052 (* 1 = 5.00052 loss)
I0419 22:12:48.324285 8388 sgd_solver.cpp:105] Iteration 384, lr = 0.00776061
I0419 22:12:58.162381 8388 solver.cpp:218] Iteration 396 (1.21974 iter/s, 9.83818s/12 iters), loss = 4.97863
I0419 22:12:58.162422 8388 solver.cpp:237] Train net output #0: loss = 4.97863 (* 1 = 4.97863 loss)
I0419 22:12:58.162431 8388 sgd_solver.cpp:105] Iteration 396, lr = 0.00769937
I0419 22:13:04.270758 8400 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:13:07.035266 8388 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_408.caffemodel
I0419 22:13:14.929359 8388 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_408.solverstate
I0419 22:13:21.026894 8388 solver.cpp:330] Iteration 408, Testing net (#0)
I0419 22:13:21.026968 8388 net.cpp:676] Ignoring source layer train-data
I0419 22:13:21.569077 8411 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:13:24.067196 8388 solver.cpp:397] Test net output #0: accuracy = 0.0245536
I0419 22:13:24.067234 8388 solver.cpp:397] Test net output #1: loss = 5.00928 (* 1 = 5.00928 loss)
I0419 22:13:24.237728 8388 solver.cpp:218] Iteration 408 (0.460202 iter/s, 26.0755s/12 iters), loss = 5.03614
I0419 22:13:24.237769 8388 solver.cpp:237] Train net output #0: loss = 5.03614 (* 1 = 5.03614 loss)
I0419 22:13:24.237778 8388 sgd_solver.cpp:105] Iteration 408, lr = 0.00763861
I0419 22:13:26.040987 8411 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:13:32.448987 8388 solver.cpp:218] Iteration 420 (1.46141 iter/s, 8.21126s/12 iters), loss = 5.03698
I0419 22:13:32.449030 8388 solver.cpp:237] Train net output #0: loss = 5.03698 (* 1 = 5.03698 loss)
I0419 22:13:32.449039 8388 sgd_solver.cpp:105] Iteration 420, lr = 0.00757833
I0419 22:13:42.054199 8388 solver.cpp:218] Iteration 432 (1.24932 iter/s, 9.60522s/12 iters), loss = 4.95438
I0419 22:13:42.054242 8388 solver.cpp:237] Train net output #0: loss = 4.95438 (* 1 = 4.95438 loss)
I0419 22:13:42.054251 8388 sgd_solver.cpp:105] Iteration 432, lr = 0.00751852
I0419 22:13:51.893339 8388 solver.cpp:218] Iteration 444 (1.21962 iter/s, 9.83915s/12 iters), loss = 4.87829
I0419 22:13:51.893465 8388 solver.cpp:237] Train net output #0: loss = 4.87829 (* 1 = 4.87829 loss)
I0419 22:13:51.893474 8388 sgd_solver.cpp:105] Iteration 444, lr = 0.00745919
I0419 22:14:01.872618 8388 solver.cpp:218] Iteration 456 (1.2025 iter/s, 9.97921s/12 iters), loss = 4.90401
I0419 22:14:01.872659 8388 solver.cpp:237] Train net output #0: loss = 4.90401 (* 1 = 4.90401 loss)
I0419 22:14:01.872668 8388 sgd_solver.cpp:105] Iteration 456, lr = 0.00740033
I0419 22:14:11.785838 8388 solver.cpp:218] Iteration 468 (1.2105 iter/s, 9.91324s/12 iters), loss = 5.00549
I0419 22:14:11.785876 8388 solver.cpp:237] Train net output #0: loss = 5.00549 (* 1 = 5.00549 loss)
I0419 22:14:11.785884 8388 sgd_solver.cpp:105] Iteration 468, lr = 0.00734193
I0419 22:14:21.412230 8388 solver.cpp:218] Iteration 480 (1.24657 iter/s, 9.62641s/12 iters), loss = 4.84857
I0419 22:14:21.412277 8388 solver.cpp:237] Train net output #0: loss = 4.84857 (* 1 = 4.84857 loss)
I0419 22:14:21.412286 8388 sgd_solver.cpp:105] Iteration 480, lr = 0.00728399
I0419 22:14:31.245781 8388 solver.cpp:218] Iteration 492 (1.22031 iter/s, 9.83356s/12 iters), loss = 4.87707
I0419 22:14:31.245918 8388 solver.cpp:237] Train net output #0: loss = 4.87707 (* 1 = 4.87707 loss)
I0419 22:14:31.245929 8388 sgd_solver.cpp:105] Iteration 492, lr = 0.00722651
I0419 22:14:41.128540 8388 solver.cpp:218] Iteration 504 (1.21424 iter/s, 9.88269s/12 iters), loss = 4.91282
I0419 22:14:41.128579 8388 solver.cpp:237] Train net output #0: loss = 4.91282 (* 1 = 4.91282 loss)
I0419 22:14:41.128587 8388 sgd_solver.cpp:105] Iteration 504, lr = 0.00716949
I0419 22:14:41.608240 8400 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:14:45.213430 8388 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_510.caffemodel
I0419 22:14:48.339941 8388 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_510.solverstate
I0419 22:14:50.737144 8388 solver.cpp:330] Iteration 510, Testing net (#0)
I0419 22:14:50.737165 8388 net.cpp:676] Ignoring source layer train-data
I0419 22:14:53.778944 8388 solver.cpp:397] Test net output #0: accuracy = 0.0379464
I0419 22:14:53.778973 8388 solver.cpp:397] Test net output #1: loss = 4.92743 (* 1 = 4.92743 loss)
I0419 22:14:55.302525 8411 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:14:57.260371 8388 solver.cpp:218] Iteration 516 (0.743867 iter/s, 16.1319s/12 iters), loss = 4.91347
I0419 22:14:57.260416 8388 solver.cpp:237] Train net output #0: loss = 4.91347 (* 1 = 4.91347 loss)
I0419 22:14:57.260424 8388 sgd_solver.cpp:105] Iteration 516, lr = 0.00711291
I0419 22:15:07.122664 8388 solver.cpp:218] Iteration 528 (1.21675 iter/s, 9.86231s/12 iters), loss = 4.88541
I0419 22:15:07.122802 8388 solver.cpp:237] Train net output #0: loss = 4.88541 (* 1 = 4.88541 loss)
I0419 22:15:07.122812 8388 sgd_solver.cpp:105] Iteration 528, lr = 0.00705678
I0419 22:15:16.861132 8388 solver.cpp:218] Iteration 540 (1.23224 iter/s, 9.73839s/12 iters), loss = 4.80057
I0419 22:15:16.861181 8388 solver.cpp:237] Train net output #0: loss = 4.80057 (* 1 = 4.80057 loss)
I0419 22:15:16.861189 8388 sgd_solver.cpp:105] Iteration 540, lr = 0.00700109
I0419 22:15:26.560159 8388 solver.cpp:218] Iteration 552 (1.23724 iter/s, 9.69904s/12 iters), loss = 4.8823
I0419 22:15:26.560205 8388 solver.cpp:237] Train net output #0: loss = 4.8823 (* 1 = 4.8823 loss)
I0419 22:15:26.560214 8388 sgd_solver.cpp:105] Iteration 552, lr = 0.00694584
I0419 22:15:36.282104 8388 solver.cpp:218] Iteration 564 (1.23432 iter/s, 9.72197s/12 iters), loss = 4.89687
I0419 22:15:36.282142 8388 solver.cpp:237] Train net output #0: loss = 4.89687 (* 1 = 4.89687 loss)
I0419 22:15:36.282151 8388 sgd_solver.cpp:105] Iteration 564, lr = 0.00689103
I0419 22:15:46.158918 8388 solver.cpp:218] Iteration 576 (1.21496 iter/s, 9.87684s/12 iters), loss = 4.8168
I0419 22:15:46.159042 8388 solver.cpp:237] Train net output #0: loss = 4.8168 (* 1 = 4.8168 loss)
I0419 22:15:46.159052 8388 sgd_solver.cpp:105] Iteration 576, lr = 0.00683665
I0419 22:15:55.851758 8388 solver.cpp:218] Iteration 588 (1.23803 iter/s, 9.69278s/12 iters), loss = 4.86145
I0419 22:15:55.851799 8388 solver.cpp:237] Train net output #0: loss = 4.86145 (* 1 = 4.86145 loss)
I0419 22:15:55.851807 8388 sgd_solver.cpp:105] Iteration 588, lr = 0.0067827
I0419 22:16:05.680584 8388 solver.cpp:218] Iteration 600 (1.2209 iter/s, 9.82885s/12 iters), loss = 4.73482
I0419 22:16:05.680625 8388 solver.cpp:237] Train net output #0: loss = 4.73482 (* 1 = 4.73482 loss)
I0419 22:16:05.680634 8388 sgd_solver.cpp:105] Iteration 600, lr = 0.00672918
I0419 22:16:10.303166 8400 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:16:14.512370 8388 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_612.caffemodel
I0419 22:16:18.717360 8388 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_612.solverstate
I0419 22:16:22.080000 8388 solver.cpp:330] Iteration 612, Testing net (#0)
I0419 22:16:22.080024 8388 net.cpp:676] Ignoring source layer train-data
I0419 22:16:25.093509 8388 solver.cpp:397] Test net output #0: accuracy = 0.0485491
I0419 22:16:25.093538 8388 solver.cpp:397] Test net output #1: loss = 4.78141 (* 1 = 4.78141 loss)
I0419 22:16:25.262876 8388 solver.cpp:218] Iteration 612 (0.612795 iter/s, 19.5824s/12 iters), loss = 4.80115
I0419 22:16:25.262925 8388 solver.cpp:237] Train net output #0: loss = 4.80115 (* 1 = 4.80115 loss)
I0419 22:16:25.262938 8388 sgd_solver.cpp:105] Iteration 612, lr = 0.00667608
I0419 22:16:26.159557 8411 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:16:33.420145 8388 solver.cpp:218] Iteration 624 (1.47108 iter/s, 8.15727s/12 iters), loss = 4.67514
I0419 22:16:33.420189 8388 solver.cpp:237] Train net output #0: loss = 4.67514 (* 1 = 4.67514 loss)
I0419 22:16:33.420197 8388 sgd_solver.cpp:105] Iteration 624, lr = 0.00662339
I0419 22:16:43.229065 8388 solver.cpp:218] Iteration 636 (1.22337 iter/s, 9.80893s/12 iters), loss = 4.63829
I0419 22:16:43.229113 8388 solver.cpp:237] Train net output #0: loss = 4.63829 (* 1 = 4.63829 loss)
I0419 22:16:43.229121 8388 sgd_solver.cpp:105] Iteration 636, lr = 0.00657113
I0419 22:16:52.955930 8388 solver.cpp:218] Iteration 648 (1.23369 iter/s, 9.72688s/12 iters), loss = 4.65555
I0419 22:16:52.956104 8388 solver.cpp:237] Train net output #0: loss = 4.65555 (* 1 = 4.65555 loss)
I0419 22:16:52.956113 8388 sgd_solver.cpp:105] Iteration 648, lr = 0.00651927
I0419 22:17:02.860975 8388 solver.cpp:218] Iteration 660 (1.21152 iter/s, 9.90494s/12 iters), loss = 4.60032
I0419 22:17:02.861021 8388 solver.cpp:237] Train net output #0: loss = 4.60032 (* 1 = 4.60032 loss)
I0419 22:17:02.861028 8388 sgd_solver.cpp:105] Iteration 660, lr = 0.00646782
I0419 22:17:12.729957 8388 solver.cpp:218] Iteration 672 (1.21593 iter/s, 9.869s/12 iters), loss = 4.65899
I0419 22:17:12.730000 8388 solver.cpp:237] Train net output #0: loss = 4.65899 (* 1 = 4.65899 loss)
I0419 22:17:12.730007 8388 sgd_solver.cpp:105] Iteration 672, lr = 0.00641678
I0419 22:17:22.591028 8388 solver.cpp:218] Iteration 684 (1.2169 iter/s, 9.86109s/12 iters), loss = 4.68494
I0419 22:17:22.591071 8388 solver.cpp:237] Train net output #0: loss = 4.68494 (* 1 = 4.68494 loss)
I0419 22:17:22.591080 8388 sgd_solver.cpp:105] Iteration 684, lr = 0.00636615
I0419 22:17:32.314834 8388 solver.cpp:218] Iteration 696 (1.23408 iter/s, 9.72383s/12 iters), loss = 4.66963
I0419 22:17:32.314961 8388 solver.cpp:237] Train net output #0: loss = 4.66963 (* 1 = 4.66963 loss)
I0419 22:17:32.314971 8388 sgd_solver.cpp:105] Iteration 696, lr = 0.00631591
I0419 22:17:41.576464 8400 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:17:42.369652 8388 solver.cpp:218] Iteration 708 (1.19346 iter/s, 10.0548s/12 iters), loss = 4.72233
I0419 22:17:42.369693 8388 solver.cpp:237] Train net output #0: loss = 4.72233 (* 1 = 4.72233 loss)
I0419 22:17:42.369701 8388 sgd_solver.cpp:105] Iteration 708, lr = 0.00626607
I0419 22:17:46.342007 8388 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_714.caffemodel
I0419 22:17:49.465385 8388 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_714.solverstate
I0419 22:17:51.828152 8388 solver.cpp:330] Iteration 714, Testing net (#0)
I0419 22:17:51.828171 8388 net.cpp:676] Ignoring source layer train-data
I0419 22:17:54.851297 8388 solver.cpp:397] Test net output #0: accuracy = 0.063058
I0419 22:17:54.851336 8388 solver.cpp:397] Test net output #1: loss = 4.61546 (* 1 = 4.61546 loss)
I0419 22:17:55.459297 8411 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:17:58.412201 8388 solver.cpp:218] Iteration 720 (0.748007 iter/s, 16.0426s/12 iters), loss = 4.56281
I0419 22:17:58.412247 8388 solver.cpp:237] Train net output #0: loss = 4.56281 (* 1 = 4.56281 loss)
I0419 22:17:58.412256 8388 sgd_solver.cpp:105] Iteration 720, lr = 0.00621662
I0419 22:18:07.979898 8388 solver.cpp:218] Iteration 732 (1.25422 iter/s, 9.56771s/12 iters), loss = 4.51193
I0419 22:18:07.980023 8388 solver.cpp:237] Train net output #0: loss = 4.51193 (* 1 = 4.51193 loss)
I0419 22:18:07.980033 8388 sgd_solver.cpp:105] Iteration 732, lr = 0.00616756
I0419 22:18:17.781811 8388 solver.cpp:218] Iteration 744 (1.22426 iter/s, 9.80185s/12 iters), loss = 4.65188
I0419 22:18:17.781855 8388 solver.cpp:237] Train net output #0: loss = 4.65188 (* 1 = 4.65188 loss)
I0419 22:18:17.781864 8388 sgd_solver.cpp:105] Iteration 744, lr = 0.00611889
I0419 22:18:27.519151 8388 solver.cpp:218] Iteration 756 (1.23237 iter/s, 9.73736s/12 iters), loss = 4.64757
I0419 22:18:27.519196 8388 solver.cpp:237] Train net output #0: loss = 4.64757 (* 1 = 4.64757 loss)
I0419 22:18:27.519203 8388 sgd_solver.cpp:105] Iteration 756, lr = 0.00607061
I0419 22:18:37.268318 8388 solver.cpp:218] Iteration 768 (1.23087 iter/s, 9.74918s/12 iters), loss = 4.57396
I0419 22:18:37.268359 8388 solver.cpp:237] Train net output #0: loss = 4.57396 (* 1 = 4.57396 loss)
I0419 22:18:37.268368 8388 sgd_solver.cpp:105] Iteration 768, lr = 0.0060227
I0419 22:18:47.094866 8388 solver.cpp:218] Iteration 780 (1.22118 iter/s, 9.82657s/12 iters), loss = 4.38618
I0419 22:18:47.095028 8388 solver.cpp:237] Train net output #0: loss = 4.38618 (* 1 = 4.38618 loss)
I0419 22:18:47.095038 8388 sgd_solver.cpp:105] Iteration 780, lr = 0.00597517
I0419 22:18:56.750957 8388 solver.cpp:218] Iteration 792 (1.24275 iter/s, 9.656s/12 iters), loss = 4.54611
I0419 22:18:56.751121 8388 solver.cpp:237] Train net output #0: loss = 4.54611 (* 1 = 4.54611 loss)
I0419 22:18:56.751132 8388 sgd_solver.cpp:105] Iteration 792, lr = 0.00592802
I0419 22:19:06.570677 8388 solver.cpp:218] Iteration 804 (1.22204 iter/s, 9.81962s/12 iters), loss = 4.42251
I0419 22:19:06.570724 8388 solver.cpp:237] Train net output #0: loss = 4.42251 (* 1 = 4.42251 loss)
I0419 22:19:06.570732 8388 sgd_solver.cpp:105] Iteration 804, lr = 0.00588124
I0419 22:19:09.988018 8400 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:19:15.360080 8388 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_816.caffemodel
I0419 22:19:18.538602 8388 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_816.solverstate
I0419 22:19:20.952364 8388 solver.cpp:330] Iteration 816, Testing net (#0)
I0419 22:19:20.952381 8388 net.cpp:676] Ignoring source layer train-data
I0419 22:19:23.996727 8388 solver.cpp:397] Test net output #0: accuracy = 0.0753348
I0419 22:19:23.996757 8388 solver.cpp:397] Test net output #1: loss = 4.44134 (* 1 = 4.44134 loss)
I0419 22:19:24.169198 8388 solver.cpp:218] Iteration 816 (0.681872 iter/s, 17.5986s/12 iters), loss = 4.25633
I0419 22:19:24.169243 8388 solver.cpp:237] Train net output #0: loss = 4.25633 (* 1 = 4.25633 loss)
I0419 22:19:24.169251 8388 sgd_solver.cpp:105] Iteration 816, lr = 0.00583483
I0419 22:19:24.183120 8411 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:19:32.335815 8388 solver.cpp:218] Iteration 828 (1.4694 iter/s, 8.16662s/12 iters), loss = 4.44123
I0419 22:19:32.335858 8388 solver.cpp:237] Train net output #0: loss = 4.44123 (* 1 = 4.44123 loss)
I0419 22:19:32.335865 8388 sgd_solver.cpp:105] Iteration 828, lr = 0.00578879
I0419 22:19:42.160807 8388 solver.cpp:218] Iteration 840 (1.22137 iter/s, 9.82502s/12 iters), loss = 4.24344
I0419 22:19:42.160849 8388 solver.cpp:237] Train net output #0: loss = 4.24344 (* 1 = 4.24344 loss)
I0419 22:19:42.160858 8388 sgd_solver.cpp:105] Iteration 840, lr = 0.00574311
I0419 22:19:51.896054 8388 solver.cpp:218] Iteration 852 (1.23263 iter/s, 9.73526s/12 iters), loss = 4.1888
I0419 22:19:51.896175 8388 solver.cpp:237] Train net output #0: loss = 4.1888 (* 1 = 4.1888 loss)
I0419 22:19:51.896185 8388 sgd_solver.cpp:105] Iteration 852, lr = 0.00569778
I0419 22:20:01.642796 8388 solver.cpp:218] Iteration 864 (1.23119 iter/s, 9.74669s/12 iters), loss = 4.34775
I0419 22:20:01.642838 8388 solver.cpp:237] Train net output #0: loss = 4.34775 (* 1 = 4.34775 loss)
I0419 22:20:01.642846 8388 sgd_solver.cpp:105] Iteration 864, lr = 0.00565282
I0419 22:20:11.453861 8388 solver.cpp:218] Iteration 876 (1.22311 iter/s, 9.81109s/12 iters), loss = 4.40992
I0419 22:20:11.453907 8388 solver.cpp:237] Train net output #0: loss = 4.40992 (* 1 = 4.40992 loss)
I0419 22:20:11.453914 8388 sgd_solver.cpp:105] Iteration 876, lr = 0.00560821
I0419 22:20:21.257522 8388 solver.cpp:218] Iteration 888 (1.22403 iter/s, 9.80368s/12 iters), loss = 4.19431
I0419 22:20:21.257570 8388 solver.cpp:237] Train net output #0: loss = 4.19431 (* 1 = 4.19431 loss)
I0419 22:20:21.257577 8388 sgd_solver.cpp:105] Iteration 888, lr = 0.00556396
I0419 22:20:31.047910 8388 solver.cpp:218] Iteration 900 (1.22569 iter/s, 9.79041s/12 iters), loss = 4.12859
I0419 22:20:31.048089 8388 solver.cpp:237] Train net output #0: loss = 4.12859 (* 1 = 4.12859 loss)
I0419 22:20:31.048099 8388 sgd_solver.cpp:105] Iteration 900, lr = 0.00552005
I0419 22:20:38.446794 8400 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:20:40.648494 8388 solver.cpp:218] Iteration 912 (1.24994 iter/s, 9.60047s/12 iters), loss = 4.22121
I0419 22:20:40.648537 8388 solver.cpp:237] Train net output #0: loss = 4.22121 (* 1 = 4.22121 loss)
I0419 22:20:40.648548 8388 sgd_solver.cpp:105] Iteration 912, lr = 0.00547649
I0419 22:20:44.622812 8388 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_918.caffemodel
I0419 22:20:47.748584 8388 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_918.solverstate
I0419 22:20:50.859928 8388 solver.cpp:330] Iteration 918, Testing net (#0)
I0419 22:20:50.859947 8388 net.cpp:676] Ignoring source layer train-data
I0419 22:20:53.534390 8411 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:20:53.922487 8388 solver.cpp:397] Test net output #0: accuracy = 0.104911
I0419 22:20:53.922518 8388 solver.cpp:397] Test net output #1: loss = 4.2927 (* 1 = 4.2927 loss)
I0419 22:20:57.571473 8388 solver.cpp:218] Iteration 924 (0.709091 iter/s, 16.9231s/12 iters), loss = 4.22238
I0419 22:20:57.571518 8388 solver.cpp:237] Train net output #0: loss = 4.22238 (* 1 = 4.22238 loss)
I0419 22:20:57.571527 8388 sgd_solver.cpp:105] Iteration 924, lr = 0.00543327
I0419 22:21:07.360793 8388 solver.cpp:218] Iteration 936 (1.22582 iter/s, 9.78934s/12 iters), loss = 4.21416
I0419 22:21:07.360913 8388 solver.cpp:237] Train net output #0: loss = 4.21416 (* 1 = 4.21416 loss)
I0419 22:21:07.360922 8388 sgd_solver.cpp:105] Iteration 936, lr = 0.0053904
I0419 22:21:17.155289 8388 solver.cpp:218] Iteration 948 (1.22518 iter/s, 9.79445s/12 iters), loss = 4.17344
I0419 22:21:17.155329 8388 solver.cpp:237] Train net output #0: loss = 4.17344 (* 1 = 4.17344 loss)
I0419 22:21:17.155337 8388 sgd_solver.cpp:105] Iteration 948, lr = 0.00534786
I0419 22:21:26.851521 8388 solver.cpp:218] Iteration 960 (1.23759 iter/s, 9.69625s/12 iters), loss = 4.04022
I0419 22:21:26.851565 8388 solver.cpp:237] Train net output #0: loss = 4.04022 (* 1 = 4.04022 loss)
I0419 22:21:26.851572 8388 sgd_solver.cpp:105] Iteration 960, lr = 0.00530566
I0419 22:21:36.642643 8388 solver.cpp:218] Iteration 972 (1.2256 iter/s, 9.79114s/12 iters), loss = 3.90668
I0419 22:21:36.642693 8388 solver.cpp:237] Train net output #0: loss = 3.90668 (* 1 = 3.90668 loss)
I0419 22:21:36.642701 8388 sgd_solver.cpp:105] Iteration 972, lr = 0.00526379
I0419 22:21:46.464399 8388 solver.cpp:218] Iteration 984 (1.22178 iter/s, 9.82177s/12 iters), loss = 3.93735
I0419 22:21:46.464527 8388 solver.cpp:237] Train net output #0: loss = 3.93735 (* 1 = 3.93735 loss)
I0419 22:21:46.464537 8388 sgd_solver.cpp:105] Iteration 984, lr = 0.00522225
I0419 22:21:48.803463 8388 blocking_queue.cpp:49] Waiting for data
I0419 22:21:56.272891 8388 solver.cpp:218] Iteration 996 (1.22344 iter/s, 9.80844s/12 iters), loss = 3.93039
I0419 22:21:56.272929 8388 solver.cpp:237] Train net output #0: loss = 3.93039 (* 1 = 3.93039 loss)
I0419 22:21:56.272939 8388 sgd_solver.cpp:105] Iteration 996, lr = 0.00518104
I0419 22:22:05.994793 8388 solver.cpp:218] Iteration 1008 (1.23432 iter/s, 9.72193s/12 iters), loss = 4.0363
I0419 22:22:05.994837 8388 solver.cpp:237] Train net output #0: loss = 4.0363 (* 1 = 4.0363 loss)
I0419 22:22:05.994845 8388 sgd_solver.cpp:105] Iteration 1008, lr = 0.00514015
I0419 22:22:07.926429 8400 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:22:14.820166 8388 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_1020.caffemodel
I0419 22:22:17.905823 8388 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_1020.solverstate
I0419 22:22:20.872442 8388 solver.cpp:330] Iteration 1020, Testing net (#0)
I0419 22:22:20.872467 8388 net.cpp:676] Ignoring source layer train-data
I0419 22:22:22.977860 8411 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:22:23.872515 8388 solver.cpp:397] Test net output #0: accuracy = 0.125
I0419 22:22:23.872546 8388 solver.cpp:397] Test net output #1: loss = 4.08602 (* 1 = 4.08602 loss)
I0419 22:22:24.046663 8388 solver.cpp:218] Iteration 1020 (0.664747 iter/s, 18.052s/12 iters), loss = 3.79699
I0419 22:22:24.046705 8388 solver.cpp:237] Train net output #0: loss = 3.79699 (* 1 = 3.79699 loss)
I0419 22:22:24.046715 8388 sgd_solver.cpp:105] Iteration 1020, lr = 0.00509959
I0419 22:22:32.249048 8388 solver.cpp:218] Iteration 1032 (1.46299 iter/s, 8.2024s/12 iters), loss = 3.88068
I0419 22:22:32.249089 8388 solver.cpp:237] Train net output #0: loss = 3.88068 (* 1 = 3.88068 loss)
I0419 22:22:32.249097 8388 sgd_solver.cpp:105] Iteration 1032, lr = 0.00505935
I0419 22:22:42.060923 8388 solver.cpp:218] Iteration 1044 (1.223 iter/s, 9.8119s/12 iters), loss = 3.90509
I0419 22:22:42.060966 8388 solver.cpp:237] Train net output #0: loss = 3.90509 (* 1 = 3.90509 loss)
I0419 22:22:42.060973 8388 sgd_solver.cpp:105] Iteration 1044, lr = 0.00501942
I0419 22:22:51.798135 8388 solver.cpp:218] Iteration 1056 (1.23238 iter/s, 9.73724s/12 iters), loss = 3.86523
I0419 22:22:51.798266 8388 solver.cpp:237] Train net output #0: loss = 3.86523 (* 1 = 3.86523 loss)
I0419 22:22:51.798275 8388 sgd_solver.cpp:105] Iteration 1056, lr = 0.00497981
I0419 22:23:01.596972 8388 solver.cpp:218] Iteration 1068 (1.22464 iter/s, 9.79877s/12 iters), loss = 3.74652
I0419 22:23:01.597016 8388 solver.cpp:237] Train net output #0: loss = 3.74652 (* 1 = 3.74652 loss)
I0419 22:23:01.597024 8388 sgd_solver.cpp:105] Iteration 1068, lr = 0.00494052
I0419 22:23:11.559576 8388 solver.cpp:218] Iteration 1080 (1.2045 iter/s, 9.96263s/12 iters), loss = 3.8482
I0419 22:23:11.559617 8388 solver.cpp:237] Train net output #0: loss = 3.8482 (* 1 = 3.8482 loss)
I0419 22:23:11.559625 8388 sgd_solver.cpp:105] Iteration 1080, lr = 0.00490153
I0419 22:23:21.465250 8388 solver.cpp:218] Iteration 1092 (1.21142 iter/s, 9.9057s/12 iters), loss = 3.81767
I0419 22:23:21.465296 8388 solver.cpp:237] Train net output #0: loss = 3.81767 (* 1 = 3.81767 loss)
I0419 22:23:21.465304 8388 sgd_solver.cpp:105] Iteration 1092, lr = 0.00486285
I0419 22:23:31.374925 8388 solver.cpp:218] Iteration 1104 (1.21093 iter/s, 9.9097s/12 iters), loss = 3.85051
I0419 22:23:31.375039 8388 solver.cpp:237] Train net output #0: loss = 3.85051 (* 1 = 3.85051 loss)
I0419 22:23:31.375049 8388 sgd_solver.cpp:105] Iteration 1104, lr = 0.00482448
I0419 22:23:37.570214 8400 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:23:41.292498 8388 solver.cpp:218] Iteration 1116 (1.20998 iter/s, 9.91753s/12 iters), loss = 4.02951
I0419 22:23:41.292541 8388 solver.cpp:237] Train net output #0: loss = 4.02951 (* 1 = 4.02951 loss)
I0419 22:23:41.292551 8388 sgd_solver.cpp:105] Iteration 1116, lr = 0.0047864
I0419 22:23:45.280167 8388 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_1122.caffemodel
I0419 22:23:48.396533 8388 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_1122.solverstate
I0419 22:23:50.755777 8388 solver.cpp:330] Iteration 1122, Testing net (#0)
I0419 22:23:50.755800 8388 net.cpp:676] Ignoring source layer train-data
I0419 22:23:52.446493 8411 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:23:53.785688 8388 solver.cpp:397] Test net output #0: accuracy = 0.15067
I0419 22:23:53.785719 8388 solver.cpp:397] Test net output #1: loss = 3.95934 (* 1 = 3.95934 loss)
I0419 22:23:57.366051 8388 solver.cpp:218] Iteration 1128 (0.746564 iter/s, 16.0736s/12 iters), loss = 3.85931
I0419 22:23:57.366094 8388 solver.cpp:237] Train net output #0: loss = 3.85931 (* 1 = 3.85931 loss)
I0419 22:23:57.366103 8388 sgd_solver.cpp:105] Iteration 1128, lr = 0.00474863
I0419 22:24:07.036546 8388 solver.cpp:218] Iteration 1140 (1.24089 iter/s, 9.67051s/12 iters), loss = 3.69307
I0419 22:24:07.036721 8388 solver.cpp:237] Train net output #0: loss = 3.69307 (* 1 = 3.69307 loss)
I0419 22:24:07.036731 8388 sgd_solver.cpp:105] Iteration 1140, lr = 0.00471116
I0419 22:24:16.775583 8388 solver.cpp:218] Iteration 1152 (1.23217 iter/s, 9.73893s/12 iters), loss = 3.50139
I0419 22:24:16.775624 8388 solver.cpp:237] Train net output #0: loss = 3.50139 (* 1 = 3.50139 loss)
I0419 22:24:16.775632 8388 sgd_solver.cpp:105] Iteration 1152, lr = 0.00467398
I0419 22:24:26.605556 8388 solver.cpp:218] Iteration 1164 (1.22075 iter/s, 9.83s/12 iters), loss = 3.85523
I0419 22:24:26.605602 8388 solver.cpp:237] Train net output #0: loss = 3.85523 (* 1 = 3.85523 loss)
I0419 22:24:26.605608 8388 sgd_solver.cpp:105] Iteration 1164, lr = 0.0046371
I0419 22:24:36.354758 8388 solver.cpp:218] Iteration 1176 (1.23087 iter/s, 9.74922s/12 iters), loss = 3.67101
I0419 22:24:36.354806 8388 solver.cpp:237] Train net output #0: loss = 3.67101 (* 1 = 3.67101 loss)
I0419 22:24:36.354815 8388 sgd_solver.cpp:105] Iteration 1176, lr = 0.00460051
I0419 22:24:46.051432 8388 solver.cpp:218] Iteration 1188 (1.23754 iter/s, 9.69669s/12 iters), loss = 3.51516
I0419 22:24:46.051532 8388 solver.cpp:237] Train net output #0: loss = 3.51516 (* 1 = 3.51516 loss)
I0419 22:24:46.051546 8388 sgd_solver.cpp:105] Iteration 1188, lr = 0.0045642
I0419 22:24:55.871601 8388 solver.cpp:218] Iteration 1200 (1.22198 iter/s, 9.82014s/12 iters), loss = 3.68767
I0419 22:24:55.871644 8388 solver.cpp:237] Train net output #0: loss = 3.68767 (* 1 = 3.68767 loss)
I0419 22:24:55.871651 8388 sgd_solver.cpp:105] Iteration 1200, lr = 0.00452818
I0419 22:25:05.834386 8388 solver.cpp:218] Iteration 1212 (1.20448 iter/s, 9.96281s/12 iters), loss = 3.54728
I0419 22:25:05.834432 8388 solver.cpp:237] Train net output #0: loss = 3.54728 (* 1 = 3.54728 loss)
I0419 22:25:05.834441 8388 sgd_solver.cpp:105] Iteration 1212, lr = 0.00449245
I0419 22:25:06.374996 8400 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:25:14.741755 8388 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_1224.caffemodel
I0419 22:25:17.770177 8388 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_1224.solverstate
I0419 22:25:22.704339 8388 solver.cpp:330] Iteration 1224, Testing net (#0)
I0419 22:25:22.704358 8388 net.cpp:676] Ignoring source layer train-data
I0419 22:25:23.896768 8411 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:25:25.749557 8388 solver.cpp:397] Test net output #0: accuracy = 0.158482
I0419 22:25:25.749588 8388 solver.cpp:397] Test net output #1: loss = 3.80681 (* 1 = 3.80681 loss)
I0419 22:25:25.927866 8388 solver.cpp:218] Iteration 1224 (0.597205 iter/s, 20.0936s/12 iters), loss = 3.48562
I0419 22:25:25.927906 8388 solver.cpp:237] Train net output #0: loss = 3.48562 (* 1 = 3.48562 loss)
I0419 22:25:25.927917 8388 sgd_solver.cpp:105] Iteration 1224, lr = 0.004457
I0419 22:25:34.027281 8388 solver.cpp:218] Iteration 1236 (1.48159 iter/s, 8.09942s/12 iters), loss = 3.65521
I0419 22:25:34.027325 8388 solver.cpp:237] Train net output #0: loss = 3.65521 (* 1 = 3.65521 loss)
I0419 22:25:34.027333 8388 sgd_solver.cpp:105] Iteration 1236, lr = 0.00442183
I0419 22:25:43.764396 8388 solver.cpp:218] Iteration 1248 (1.2324 iter/s, 9.73713s/12 iters), loss = 3.5639
I0419 22:25:43.764441 8388 solver.cpp:237] Train net output #0: loss = 3.5639 (* 1 = 3.5639 loss)
I0419 22:25:43.764449 8388 sgd_solver.cpp:105] Iteration 1248, lr = 0.00438693
I0419 22:25:53.579524 8388 solver.cpp:218] Iteration 1260 (1.2226 iter/s, 9.81515s/12 iters), loss = 3.56153
I0419 22:25:53.579686 8388 solver.cpp:237] Train net output #0: loss = 3.56153 (* 1 = 3.56153 loss)
I0419 22:25:53.579696 8388 sgd_solver.cpp:105] Iteration 1260, lr = 0.00435231
I0419 22:26:03.233553 8388 solver.cpp:218] Iteration 1272 (1.24302 iter/s, 9.65393s/12 iters), loss = 3.676
I0419 22:26:03.233594 8388 solver.cpp:237] Train net output #0: loss = 3.676 (* 1 = 3.676 loss)
I0419 22:26:03.233603 8388 sgd_solver.cpp:105] Iteration 1272, lr = 0.00431797
I0419 22:26:13.024641 8388 solver.cpp:218] Iteration 1284 (1.2256 iter/s, 9.79111s/12 iters), loss = 3.35556
I0419 22:26:13.024684 8388 solver.cpp:237] Train net output #0: loss = 3.35556 (* 1 = 3.35556 loss)
I0419 22:26:13.024693 8388 sgd_solver.cpp:105] Iteration 1284, lr = 0.00428389
I0419 22:26:22.812793 8388 solver.cpp:218] Iteration 1296 (1.22597 iter/s, 9.78817s/12 iters), loss = 3.35142
I0419 22:26:22.812835 8388 solver.cpp:237] Train net output #0: loss = 3.35142 (* 1 = 3.35142 loss)
I0419 22:26:22.812844 8388 sgd_solver.cpp:105] Iteration 1296, lr = 0.00425009
I0419 22:26:32.589087 8388 solver.cpp:218] Iteration 1308 (1.22746 iter/s, 9.77631s/12 iters), loss = 3.2421
I0419 22:26:32.589244 8388 solver.cpp:237] Train net output #0: loss = 3.2421 (* 1 = 3.2421 loss)
I0419 22:26:32.589258 8388 sgd_solver.cpp:105] Iteration 1308, lr = 0.00421655
I0419 22:26:37.478966 8400 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:26:42.247272 8388 solver.cpp:218] Iteration 1320 (1.24248 iter/s, 9.65809s/12 iters), loss = 3.21937
I0419 22:26:42.247328 8388 solver.cpp:237] Train net output #0: loss = 3.21937 (* 1 = 3.21937 loss)
I0419 22:26:42.247340 8388 sgd_solver.cpp:105] Iteration 1320, lr = 0.00418328
I0419 22:26:46.111183 8388 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_1326.caffemodel
I0419 22:26:49.383445 8388 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_1326.solverstate
I0419 22:26:51.741848 8388 solver.cpp:330] Iteration 1326, Testing net (#0)
I0419 22:26:51.741871 8388 net.cpp:676] Ignoring source layer train-data
I0419 22:26:52.462412 8411 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:26:54.965144 8388 solver.cpp:397] Test net output #0: accuracy = 0.174107
I0419 22:26:54.965175 8388 solver.cpp:397] Test net output #1: loss = 3.62477 (* 1 = 3.62477 loss)
I0419 22:26:58.431097 8388 solver.cpp:218] Iteration 1332 (0.741478 iter/s, 16.1839s/12 iters), loss = 3.18707
I0419 22:26:58.431138 8388 solver.cpp:237] Train net output #0: loss = 3.18707 (* 1 = 3.18707 loss)
I0419 22:26:58.431146 8388 sgd_solver.cpp:105] Iteration 1332, lr = 0.00415026
I0419 22:27:08.176178 8388 solver.cpp:218] Iteration 1344 (1.23139 iter/s, 9.7451s/12 iters), loss = 3.23452
I0419 22:27:08.176311 8388 solver.cpp:237] Train net output #0: loss = 3.23452 (* 1 = 3.23452 loss)
I0419 22:27:08.176326 8388 sgd_solver.cpp:105] Iteration 1344, lr = 0.00411751
I0419 22:27:18.015789 8388 solver.cpp:218] Iteration 1356 (1.21957 iter/s, 9.83955s/12 iters), loss = 3.21101
I0419 22:27:18.015828 8388 solver.cpp:237] Train net output #0: loss = 3.21101 (* 1 = 3.21101 loss)
I0419 22:27:18.015836 8388 sgd_solver.cpp:105] Iteration 1356, lr = 0.00408502
I0419 22:27:27.807385 8388 solver.cpp:218] Iteration 1368 (1.22554 iter/s, 9.79162s/12 iters), loss = 3.25041
I0419 22:27:27.807430 8388 solver.cpp:237] Train net output #0: loss = 3.25041 (* 1 = 3.25041 loss)
I0419 22:27:27.807440 8388 sgd_solver.cpp:105] Iteration 1368, lr = 0.00405278
I0419 22:27:37.482811 8388 solver.cpp:218] Iteration 1380 (1.24025 iter/s, 9.67545s/12 iters), loss = 3.29612
I0419 22:27:37.482851 8388 solver.cpp:237] Train net output #0: loss = 3.29612 (* 1 = 3.29612 loss)
I0419 22:27:37.482859 8388 sgd_solver.cpp:105] Iteration 1380, lr = 0.0040208
I0419 22:27:47.294028 8388 solver.cpp:218] Iteration 1392 (1.22309 iter/s, 9.81124s/12 iters), loss = 3.02213
I0419 22:27:47.294211 8388 solver.cpp:237] Train net output #0: loss = 3.02213 (* 1 = 3.02213 loss)
I0419 22:27:47.294222 8388 sgd_solver.cpp:105] Iteration 1392, lr = 0.00398907
I0419 22:27:57.097543 8388 solver.cpp:218] Iteration 1404 (1.22406 iter/s, 9.80341s/12 iters), loss = 2.81259
I0419 22:27:57.097580 8388 solver.cpp:237] Train net output #0: loss = 2.81259 (* 1 = 2.81259 loss)
I0419 22:27:57.097589 8388 sgd_solver.cpp:105] Iteration 1404, lr = 0.00395759
I0419 22:28:06.247995 8400 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:28:06.944599 8388 solver.cpp:218] Iteration 1416 (1.21863 iter/s, 9.84709s/12 iters), loss = 3.05076
I0419 22:28:06.944648 8388 solver.cpp:237] Train net output #0: loss = 3.05076 (* 1 = 3.05076 loss)
I0419 22:28:06.944659 8388 sgd_solver.cpp:105] Iteration 1416, lr = 0.00392636
I0419 22:28:15.831040 8388 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_1428.caffemodel
I0419 22:28:18.710727 8388 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_1428.solverstate
I0419 22:28:21.821646 8388 solver.cpp:330] Iteration 1428, Testing net (#0)
I0419 22:28:21.821672 8388 net.cpp:676] Ignoring source layer train-data
I0419 22:28:22.069674 8411 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:28:24.865481 8388 solver.cpp:397] Test net output #0: accuracy = 0.205357
I0419 22:28:24.865518 8388 solver.cpp:397] Test net output #1: loss = 3.51016 (* 1 = 3.51016 loss)
I0419 22:28:25.039078 8388 solver.cpp:218] Iteration 1428 (0.663182 iter/s, 18.0946s/12 iters), loss = 2.75677
I0419 22:28:25.039124 8388 solver.cpp:237] Train net output #0: loss = 2.75677 (* 1 = 2.75677 loss)
I0419 22:28:25.039134 8388 sgd_solver.cpp:105] Iteration 1428, lr = 0.00389538
I0419 22:28:26.586690 8411 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:28:33.717829 8388 solver.cpp:218] Iteration 1440 (1.38269 iter/s, 8.67876s/12 iters), loss = 2.84985
I0419 22:28:33.717872 8388 solver.cpp:237] Train net output #0: loss = 2.84985 (* 1 = 2.84985 loss)
I0419 22:28:33.717880 8388 sgd_solver.cpp:105] Iteration 1440, lr = 0.00386464
I0419 22:28:43.519527 8388 solver.cpp:218] Iteration 1452 (1.22428 iter/s, 9.80172s/12 iters), loss = 3.08693
I0419 22:28:43.519570 8388 solver.cpp:237] Train net output #0: loss = 3.08693 (* 1 = 3.08693 loss)
I0419 22:28:43.519578 8388 sgd_solver.cpp:105] Iteration 1452, lr = 0.00383414
I0419 22:28:53.362607 8388 solver.cpp:218] Iteration 1464 (1.21913 iter/s, 9.8431s/12 iters), loss = 2.7765
I0419 22:28:53.362725 8388 solver.cpp:237] Train net output #0: loss = 2.7765 (* 1 = 2.7765 loss)
I0419 22:28:53.362735 8388 sgd_solver.cpp:105] Iteration 1464, lr = 0.00380388
I0419 22:29:03.263828 8388 solver.cpp:218] Iteration 1476 (1.21198 iter/s, 9.90117s/12 iters), loss = 2.88889
I0419 22:29:03.263870 8388 solver.cpp:237] Train net output #0: loss = 2.88889 (* 1 = 2.88889 loss)
I0419 22:29:03.263878 8388 sgd_solver.cpp:105] Iteration 1476, lr = 0.00377387
I0419 22:29:13.129465 8388 solver.cpp:218] Iteration 1488 (1.21634 iter/s, 9.86566s/12 iters), loss = 2.89791
I0419 22:29:13.129505 8388 solver.cpp:237] Train net output #0: loss = 2.89791 (* 1 = 2.89791 loss)
I0419 22:29:13.129514 8388 sgd_solver.cpp:105] Iteration 1488, lr = 0.00374409
I0419 22:29:22.850618 8388 solver.cpp:218] Iteration 1500 (1.23442 iter/s, 9.72118s/12 iters), loss = 2.91509
I0419 22:29:22.850668 8388 solver.cpp:237] Train net output #0: loss = 2.91509 (* 1 = 2.91509 loss)
I0419 22:29:22.850677 8388 sgd_solver.cpp:105] Iteration 1500, lr = 0.00371454
I0419 22:29:32.649875 8388 solver.cpp:218] Iteration 1512 (1.22458 iter/s, 9.79927s/12 iters), loss = 2.86182
I0419 22:29:32.661799 8388 solver.cpp:237] Train net output #0: loss = 2.86182 (* 1 = 2.86182 loss)
I0419 22:29:32.661811 8388 sgd_solver.cpp:105] Iteration 1512, lr = 0.00368523
I0419 22:29:36.058912 8400 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:29:42.372839 8388 solver.cpp:218] Iteration 1524 (1.2357 iter/s, 9.71112s/12 iters), loss = 2.9262
I0419 22:29:42.372881 8388 solver.cpp:237] Train net output #0: loss = 2.9262 (* 1 = 2.9262 loss)
I0419 22:29:42.372889 8388 sgd_solver.cpp:105] Iteration 1524, lr = 0.00365615
I0419 22:29:46.351883 8388 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_1530.caffemodel
I0419 22:29:49.498188 8388 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_1530.solverstate
I0419 22:29:52.253917 8388 solver.cpp:330] Iteration 1530, Testing net (#0)
I0419 22:29:52.253942 8388 net.cpp:676] Ignoring source layer train-data
I0419 22:29:55.490345 8388 solver.cpp:397] Test net output #0: accuracy = 0.230469
I0419 22:29:55.490386 8388 solver.cpp:397] Test net output #1: loss = 3.41484 (* 1 = 3.41484 loss)
I0419 22:29:56.741845 8411 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:29:59.078377 8388 solver.cpp:218] Iteration 1536 (0.718321 iter/s, 16.7056s/12 iters), loss = 2.84205
I0419 22:29:59.078420 8388 solver.cpp:237] Train net output #0: loss = 2.84205 (* 1 = 2.84205 loss)
I0419 22:29:59.078429 8388 sgd_solver.cpp:105] Iteration 1536, lr = 0.00362729
I0419 22:30:08.985533 8388 solver.cpp:218] Iteration 1548 (1.21124 iter/s, 9.90718s/12 iters), loss = 2.49034
I0419 22:30:08.985680 8388 solver.cpp:237] Train net output #0: loss = 2.49034 (* 1 = 2.49034 loss)
I0419 22:30:08.985692 8388 sgd_solver.cpp:105] Iteration 1548, lr = 0.00359867
I0419 22:30:18.963770 8388 solver.cpp:218] Iteration 1560 (1.20263 iter/s, 9.97816s/12 iters), loss = 2.53481
I0419 22:30:18.963812 8388 solver.cpp:237] Train net output #0: loss = 2.53481 (* 1 = 2.53481 loss)
I0419 22:30:18.963820 8388 sgd_solver.cpp:105] Iteration 1560, lr = 0.00357027
I0419 22:30:28.806850 8388 solver.cpp:218] Iteration 1572 (1.21913 iter/s, 9.84311s/12 iters), loss = 2.5729
I0419 22:30:28.806885 8388 solver.cpp:237] Train net output #0: loss = 2.5729 (* 1 = 2.5729 loss)
I0419 22:30:28.806892 8388 sgd_solver.cpp:105] Iteration 1572, lr = 0.0035421
I0419 22:30:38.750676 8388 solver.cpp:218] Iteration 1584 (1.20678 iter/s, 9.94386s/12 iters), loss = 2.65253
I0419 22:30:38.750716 8388 solver.cpp:237] Train net output #0: loss = 2.65253 (* 1 = 2.65253 loss)
I0419 22:30:38.750725 8388 sgd_solver.cpp:105] Iteration 1584, lr = 0.00351415
I0419 22:30:48.568722 8388 solver.cpp:218] Iteration 1596 (1.22224 iter/s, 9.81807s/12 iters), loss = 2.43483
I0419 22:30:48.568843 8388 solver.cpp:237] Train net output #0: loss = 2.43483 (* 1 = 2.43483 loss)
I0419 22:30:48.568852 8388 sgd_solver.cpp:105] Iteration 1596, lr = 0.00348641
I0419 22:30:58.399544 8388 solver.cpp:218] Iteration 1608 (1.22066 iter/s, 9.83077s/12 iters), loss = 2.49191
I0419 22:30:58.399585 8388 solver.cpp:237] Train net output #0: loss = 2.49191 (* 1 = 2.49191 loss)
I0419 22:30:58.399593 8388 sgd_solver.cpp:105] Iteration 1608, lr = 0.0034589
I0419 22:31:06.074707 8400 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:31:08.206964 8388 solver.cpp:218] Iteration 1620 (1.22356 iter/s, 9.80745s/12 iters), loss = 2.75473
I0419 22:31:08.207008 8388 solver.cpp:237] Train net output #0: loss = 2.75473 (* 1 = 2.75473 loss)
I0419 22:31:08.207016 8388 sgd_solver.cpp:105] Iteration 1620, lr = 0.00343161
I0419 22:31:17.039129 8388 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_1632.caffemodel
I0419 22:31:22.041005 8388 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_1632.solverstate
I0419 22:31:24.760371 8388 solver.cpp:330] Iteration 1632, Testing net (#0)
I0419 22:31:24.760392 8388 net.cpp:676] Ignoring source layer train-data
I0419 22:31:27.884402 8388 solver.cpp:397] Test net output #0: accuracy = 0.243304
I0419 22:31:27.884439 8388 solver.cpp:397] Test net output #1: loss = 3.46093 (* 1 = 3.46093 loss)
I0419 22:31:28.057490 8388 solver.cpp:218] Iteration 1632 (0.604515 iter/s, 19.8506s/12 iters), loss = 2.68275
I0419 22:31:28.057539 8388 solver.cpp:237] Train net output #0: loss = 2.68275 (* 1 = 2.68275 loss)
I0419 22:31:28.057548 8388 sgd_solver.cpp:105] Iteration 1632, lr = 0.00340453
I0419 22:31:28.653399 8411 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:31:36.165235 8388 solver.cpp:218] Iteration 1644 (1.48007 iter/s, 8.10774s/12 iters), loss = 2.59135
I0419 22:31:36.165279 8388 solver.cpp:237] Train net output #0: loss = 2.59135 (* 1 = 2.59135 loss)
I0419 22:31:36.165288 8388 sgd_solver.cpp:105] Iteration 1644, lr = 0.00337766
I0419 22:31:45.957475 8388 solver.cpp:218] Iteration 1656 (1.22546 iter/s, 9.79226s/12 iters), loss = 2.64973
I0419 22:31:45.957515 8388 solver.cpp:237] Train net output #0: loss = 2.64973 (* 1 = 2.64973 loss)
I0419 22:31:45.957523 8388 sgd_solver.cpp:105] Iteration 1656, lr = 0.00335101
I0419 22:31:55.697950 8388 solver.cpp:218] Iteration 1668 (1.23197 iter/s, 9.7405s/12 iters), loss = 2.31952
I0419 22:31:55.698122 8388 solver.cpp:237] Train net output #0: loss = 2.31952 (* 1 = 2.31952 loss)
I0419 22:31:55.698132 8388 sgd_solver.cpp:105] Iteration 1668, lr = 0.00332456
I0419 22:32:05.569921 8388 solver.cpp:218] Iteration 1680 (1.21558 iter/s, 9.87187s/12 iters), loss = 2.34403
I0419 22:32:05.569962 8388 solver.cpp:237] Train net output #0: loss = 2.34403 (* 1 = 2.34403 loss)
I0419 22:32:05.569968 8388 sgd_solver.cpp:105] Iteration 1680, lr = 0.00329833
I0419 22:32:15.624895 8388 solver.cpp:218] Iteration 1692 (1.19344 iter/s, 10.055s/12 iters), loss = 2.29323
I0419 22:32:15.624943 8388 solver.cpp:237] Train net output #0: loss = 2.29323 (* 1 = 2.29323 loss)
I0419 22:32:15.624950 8388 sgd_solver.cpp:105] Iteration 1692, lr = 0.0032723
I0419 22:32:25.423933 8388 solver.cpp:218] Iteration 1704 (1.22461 iter/s, 9.79906s/12 iters), loss = 2.31681
I0419 22:32:25.423979 8388 solver.cpp:237] Train net output #0: loss = 2.31681 (* 1 = 2.31681 loss)
I0419 22:32:25.423986 8388 sgd_solver.cpp:105] Iteration 1704, lr = 0.00324648
I0419 22:32:35.330992 8388 solver.cpp:218] Iteration 1716 (1.21125 iter/s, 9.90708s/12 iters), loss = 2.28551
I0419 22:32:35.331084 8388 solver.cpp:237] Train net output #0: loss = 2.28551 (* 1 = 2.28551 loss)
I0419 22:32:35.331094 8388 sgd_solver.cpp:105] Iteration 1716, lr = 0.00322086
I0419 22:32:37.363232 8400 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:32:45.139684 8388 solver.cpp:218] Iteration 1728 (1.22341 iter/s, 9.80867s/12 iters), loss = 2.15263
I0419 22:32:45.139722 8388 solver.cpp:237] Train net output #0: loss = 2.15263 (* 1 = 2.15263 loss)
I0419 22:32:45.139731 8388 sgd_solver.cpp:105] Iteration 1728, lr = 0.00319544
I0419 22:32:49.103799 8388 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_1734.caffemodel
I0419 22:32:53.723433 8388 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_1734.solverstate
I0419 22:32:58.150080 8388 solver.cpp:330] Iteration 1734, Testing net (#0)
I0419 22:32:58.150105 8388 net.cpp:676] Ignoring source layer train-data
I0419 22:33:01.179518 8388 solver.cpp:397] Test net output #0: accuracy = 0.254464
I0419 22:33:01.179559 8388 solver.cpp:397] Test net output #1: loss = 3.31051 (* 1 = 3.31051 loss)
I0419 22:33:01.496866 8411 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:33:04.739768 8388 solver.cpp:218] Iteration 1740 (0.612239 iter/s, 19.6002s/12 iters), loss = 2.11149
I0419 22:33:04.739816 8388 solver.cpp:237] Train net output #0: loss = 2.11149 (* 1 = 2.11149 loss)
I0419 22:33:04.739825 8388 sgd_solver.cpp:105] Iteration 1740, lr = 0.00317022
I0419 22:33:14.660483 8388 solver.cpp:218] Iteration 1752 (1.20959 iter/s, 9.92074s/12 iters), loss = 2.34614
I0419 22:33:14.660619 8388 solver.cpp:237] Train net output #0: loss = 2.34614 (* 1 = 2.34614 loss)
I0419 22:33:14.660629 8388 sgd_solver.cpp:105] Iteration 1752, lr = 0.00314521
I0419 22:33:24.635685 8388 solver.cpp:218] Iteration 1764 (1.20299 iter/s, 9.97513s/12 iters), loss = 2.2315
I0419 22:33:24.635733 8388 solver.cpp:237] Train net output #0: loss = 2.2315 (* 1 = 2.2315 loss)
I0419 22:33:24.635740 8388 sgd_solver.cpp:105] Iteration 1764, lr = 0.00312039
I0419 22:33:34.390826 8388 solver.cpp:218] Iteration 1776 (1.23012 iter/s, 9.75516s/12 iters), loss = 2.35968
I0419 22:33:34.390867 8388 solver.cpp:237] Train net output #0: loss = 2.35968 (* 1 = 2.35968 loss)
I0419 22:33:34.390875 8388 sgd_solver.cpp:105] Iteration 1776, lr = 0.00309576
I0419 22:33:44.177677 8388 solver.cpp:218] Iteration 1788 (1.22613 iter/s, 9.78688s/12 iters), loss = 2.18109
I0419 22:33:44.177721 8388 solver.cpp:237] Train net output #0: loss = 2.18109 (* 1 = 2.18109 loss)
I0419 22:33:44.177728 8388 sgd_solver.cpp:105] Iteration 1788, lr = 0.00307133
I0419 22:33:53.989173 8388 solver.cpp:218] Iteration 1800 (1.22305 iter/s, 9.81152s/12 iters), loss = 2.19509
I0419 22:33:53.989322 8388 solver.cpp:237] Train net output #0: loss = 2.19509 (* 1 = 2.19509 loss)
I0419 22:33:53.989332 8388 sgd_solver.cpp:105] Iteration 1800, lr = 0.0030471
I0419 22:34:03.836587 8388 solver.cpp:218] Iteration 1812 (1.2186 iter/s, 9.84733s/12 iters), loss = 2.25706
I0419 22:34:03.836630 8388 solver.cpp:237] Train net output #0: loss = 2.25706 (* 1 = 2.25706 loss)
I0419 22:34:03.836638 8388 sgd_solver.cpp:105] Iteration 1812, lr = 0.00302305
I0419 22:34:10.085228 8400 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:34:13.665434 8388 solver.cpp:218] Iteration 1824 (1.22089 iter/s, 9.82886s/12 iters), loss = 2.01615
I0419 22:34:13.665491 8388 solver.cpp:237] Train net output #0: loss = 2.01615 (* 1 = 2.01615 loss)
I0419 22:34:13.665506 8388 sgd_solver.cpp:105] Iteration 1824, lr = 0.00299919
I0419 22:34:22.560498 8388 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_1836.caffemodel
I0419 22:34:26.105542 8388 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_1836.solverstate
I0419 22:34:28.658558 8388 solver.cpp:330] Iteration 1836, Testing net (#0)
I0419 22:34:28.658577 8388 net.cpp:676] Ignoring source layer train-data
I0419 22:34:31.590047 8411 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:34:31.863845 8388 solver.cpp:397] Test net output #0: accuracy = 0.261161
I0419 22:34:31.863878 8388 solver.cpp:397] Test net output #1: loss = 3.27935 (* 1 = 3.27935 loss)
I0419 22:34:32.037916 8388 solver.cpp:218] Iteration 1836 (0.653148 iter/s, 18.3726s/12 iters), loss = 1.96707
I0419 22:34:32.037962 8388 solver.cpp:237] Train net output #0: loss = 1.96707 (* 1 = 1.96707 loss)
I0419 22:34:32.037969 8388 sgd_solver.cpp:105] Iteration 1836, lr = 0.00297553
I0419 22:34:40.116690 8388 solver.cpp:218] Iteration 1848 (1.48537 iter/s, 8.07878s/12 iters), loss = 2.0347
I0419 22:34:40.116732 8388 solver.cpp:237] Train net output #0: loss = 2.0347 (* 1 = 2.0347 loss)
I0419 22:34:40.116739 8388 sgd_solver.cpp:105] Iteration 1848, lr = 0.00295205
I0419 22:34:49.859625 8388 solver.cpp:218] Iteration 1860 (1.23166 iter/s, 9.74296s/12 iters), loss = 1.73106
I0419 22:34:49.859669 8388 solver.cpp:237] Train net output #0: loss = 1.73106 (* 1 = 1.73106 loss)
I0419 22:34:49.859678 8388 sgd_solver.cpp:105] Iteration 1860, lr = 0.00292875
I0419 22:34:59.602984 8388 solver.cpp:218] Iteration 1872 (1.23161 iter/s, 9.74338s/12 iters), loss = 2.12766
I0419 22:34:59.603118 8388 solver.cpp:237] Train net output #0: loss = 2.12766 (* 1 = 2.12766 loss)
I0419 22:34:59.603128 8388 sgd_solver.cpp:105] Iteration 1872, lr = 0.00290564
I0419 22:35:09.399390 8388 solver.cpp:218] Iteration 1884 (1.22495 iter/s, 9.79634s/12 iters), loss = 1.92189
I0419 22:35:09.399436 8388 solver.cpp:237] Train net output #0: loss = 1.92189 (* 1 = 1.92189 loss)
I0419 22:35:09.399446 8388 sgd_solver.cpp:105] Iteration 1884, lr = 0.00288271
I0419 22:35:19.160270 8388 solver.cpp:218] Iteration 1896 (1.2294 iter/s, 9.76089s/12 iters), loss = 1.9632
I0419 22:35:19.160320 8388 solver.cpp:237] Train net output #0: loss = 1.9632 (* 1 = 1.9632 loss)
I0419 22:35:19.160328 8388 sgd_solver.cpp:105] Iteration 1896, lr = 0.00285996
I0419 22:35:28.981942 8388 solver.cpp:218] Iteration 1908 (1.22179 iter/s, 9.82169s/12 iters), loss = 2.12492
I0419 22:35:28.981997 8388 solver.cpp:237] Train net output #0: loss = 2.12492 (* 1 = 2.12492 loss)
I0419 22:35:28.982012 8388 sgd_solver.cpp:105] Iteration 1908, lr = 0.00283739
I0419 22:35:38.696141 8388 solver.cpp:218] Iteration 1920 (1.2353 iter/s, 9.71421s/12 iters), loss = 2.22292
I0419 22:35:38.696310 8388 solver.cpp:237] Train net output #0: loss = 2.22292 (* 1 = 2.22292 loss)
I0419 22:35:38.696319 8388 sgd_solver.cpp:105] Iteration 1920, lr = 0.002815
I0419 22:35:39.295141 8400 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:35:48.503829 8388 solver.cpp:218] Iteration 1932 (1.22354 iter/s, 9.80759s/12 iters), loss = 2.15333
I0419 22:35:48.503867 8388 solver.cpp:237] Train net output #0: loss = 2.15333 (* 1 = 2.15333 loss)
I0419 22:35:48.503875 8388 sgd_solver.cpp:105] Iteration 1932, lr = 0.00279279
I0419 22:35:52.489390 8388 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_1938.caffemodel
I0419 22:35:55.628623 8388 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_1938.solverstate
I0419 22:35:58.831880 8388 solver.cpp:330] Iteration 1938, Testing net (#0)
I0419 22:35:58.831900 8388 net.cpp:676] Ignoring source layer train-data
I0419 22:36:01.216876 8411 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:36:01.905382 8388 solver.cpp:397] Test net output #0: accuracy = 0.277344
I0419 22:36:01.905411 8388 solver.cpp:397] Test net output #1: loss = 3.27299 (* 1 = 3.27299 loss)
I0419 22:36:05.473835 8388 solver.cpp:218] Iteration 1944 (0.707126 iter/s, 16.9701s/12 iters), loss = 1.78337
I0419 22:36:05.473879 8388 solver.cpp:237] Train net output #0: loss = 1.78337 (* 1 = 1.78337 loss)
I0419 22:36:05.473888 8388 sgd_solver.cpp:105] Iteration 1944, lr = 0.00277075
I0419 22:36:15.302402 8388 solver.cpp:218] Iteration 1956 (1.22093 iter/s, 9.82858s/12 iters), loss = 1.91436
I0419 22:36:15.302536 8388 solver.cpp:237] Train net output #0: loss = 1.91436 (* 1 = 1.91436 loss)
I0419 22:36:15.302546 8388 sgd_solver.cpp:105] Iteration 1956, lr = 0.00274888
I0419 22:36:25.166011 8388 solver.cpp:218] Iteration 1968 (1.2166 iter/s, 9.86354s/12 iters), loss = 1.69343
I0419 22:36:25.166056 8388 solver.cpp:237] Train net output #0: loss = 1.69343 (* 1 = 1.69343 loss)
I0419 22:36:25.166065 8388 sgd_solver.cpp:105] Iteration 1968, lr = 0.00272719
I0419 22:36:32.518841 8388 blocking_queue.cpp:49] Waiting for data
I0419 22:36:35.081259 8388 solver.cpp:218] Iteration 1980 (1.21025 iter/s, 9.91527s/12 iters), loss = 2.14981
I0419 22:36:35.081298 8388 solver.cpp:237] Train net output #0: loss = 2.14981 (* 1 = 2.14981 loss)
I0419 22:36:35.081306 8388 sgd_solver.cpp:105] Iteration 1980, lr = 0.00270567
I0419 22:36:44.934294 8388 solver.cpp:218] Iteration 1992 (1.2179 iter/s, 9.85306s/12 iters), loss = 1.88374
I0419 22:36:44.934340 8388 solver.cpp:237] Train net output #0: loss = 1.88374 (* 1 = 1.88374 loss)
I0419 22:36:44.934347 8388 sgd_solver.cpp:105] Iteration 1992, lr = 0.00268432
I0419 22:36:54.721544 8388 solver.cpp:218] Iteration 2004 (1.22608 iter/s, 9.78728s/12 iters), loss = 1.73964
I0419 22:36:54.721675 8388 solver.cpp:237] Train net output #0: loss = 1.73964 (* 1 = 1.73964 loss)
I0419 22:36:54.721685 8388 sgd_solver.cpp:105] Iteration 2004, lr = 0.00266313
I0419 22:37:04.544394 8388 solver.cpp:218] Iteration 2016 (1.22165 iter/s, 9.82279s/12 iters), loss = 1.93405
I0419 22:37:04.544437 8388 solver.cpp:237] Train net output #0: loss = 1.93405 (* 1 = 1.93405 loss)
I0419 22:37:04.544445 8388 sgd_solver.cpp:105] Iteration 2016, lr = 0.00264212
I0419 22:37:09.526237 8400 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:37:14.346760 8388 solver.cpp:218] Iteration 2028 (1.22419 iter/s, 9.80239s/12 iters), loss = 2.0608
I0419 22:37:14.346794 8388 solver.cpp:237] Train net output #0: loss = 2.0608 (* 1 = 2.0608 loss)
I0419 22:37:14.346802 8388 sgd_solver.cpp:105] Iteration 2028, lr = 0.00262127
I0419 22:37:23.322456 8388 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_2040.caffemodel
I0419 22:37:26.393826 8388 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_2040.solverstate
I0419 22:37:28.745287 8388 solver.cpp:330] Iteration 2040, Testing net (#0)
I0419 22:37:28.745311 8388 net.cpp:676] Ignoring source layer train-data
I0419 22:37:30.623817 8411 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:37:31.875039 8388 solver.cpp:397] Test net output #0: accuracy = 0.301897
I0419 22:37:31.875072 8388 solver.cpp:397] Test net output #1: loss = 3.12177 (* 1 = 3.12177 loss)
I0419 22:37:32.048074 8388 solver.cpp:218] Iteration 2040 (0.677912 iter/s, 17.7014s/12 iters), loss = 1.81332
I0419 22:37:32.048116 8388 solver.cpp:237] Train net output #0: loss = 1.81332 (* 1 = 1.81332 loss)
I0419 22:37:32.048125 8388 sgd_solver.cpp:105] Iteration 2040, lr = 0.00260058
I0419 22:37:40.224575 8388 solver.cpp:218] Iteration 2052 (1.46762 iter/s, 8.17652s/12 iters), loss = 1.71558
I0419 22:37:40.224607 8388 solver.cpp:237] Train net output #0: loss = 1.71558 (* 1 = 1.71558 loss)
I0419 22:37:40.224614 8388 sgd_solver.cpp:105] Iteration 2052, lr = 0.00258006
I0419 22:37:50.017153 8388 solver.cpp:218] Iteration 2064 (1.22541 iter/s, 9.79261s/12 iters), loss = 1.68036
I0419 22:37:50.017196 8388 solver.cpp:237] Train net output #0: loss = 1.68036 (* 1 = 1.68036 loss)
I0419 22:37:50.017205 8388 sgd_solver.cpp:105] Iteration 2064, lr = 0.0025597
I0419 22:37:59.660641 8388 solver.cpp:218] Iteration 2076 (1.24436 iter/s, 9.64351s/12 iters), loss = 1.54389
I0419 22:37:59.660775 8388 solver.cpp:237] Train net output #0: loss = 1.54389 (* 1 = 1.54389 loss)
I0419 22:37:59.660784 8388 sgd_solver.cpp:105] Iteration 2076, lr = 0.0025395
I0419 22:38:09.577085 8388 solver.cpp:218] Iteration 2088 (1.21012 iter/s, 9.91638s/12 iters), loss = 1.78695
I0419 22:38:09.577127 8388 solver.cpp:237] Train net output #0: loss = 1.78695 (* 1 = 1.78695 loss)
I0419 22:38:09.577136 8388 sgd_solver.cpp:105] Iteration 2088, lr = 0.00251946
I0419 22:38:19.285251 8388 solver.cpp:218] Iteration 2100 (1.23607 iter/s, 9.70819s/12 iters), loss = 1.60938
I0419 22:38:19.285290 8388 solver.cpp:237] Train net output #0: loss = 1.60938 (* 1 = 1.60938 loss)
I0419 22:38:19.285298 8388 sgd_solver.cpp:105] Iteration 2100, lr = 0.00249958
I0419 22:38:29.112932 8388 solver.cpp:218] Iteration 2112 (1.22104 iter/s, 9.82771s/12 iters), loss = 1.73885
I0419 22:38:29.112983 8388 solver.cpp:237] Train net output #0: loss = 1.73885 (* 1 = 1.73885 loss)
I0419 22:38:29.112998 8388 sgd_solver.cpp:105] Iteration 2112, lr = 0.00247986
I0419 22:38:38.277842 8400 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:38:38.914078 8388 solver.cpp:218] Iteration 2124 (1.22434 iter/s, 9.80117s/12 iters), loss = 1.73088
I0419 22:38:38.914124 8388 solver.cpp:237] Train net output #0: loss = 1.73088 (* 1 = 1.73088 loss)
I0419 22:38:38.914131 8388 sgd_solver.cpp:105] Iteration 2124, lr = 0.00246029
I0419 22:38:48.726398 8388 solver.cpp:218] Iteration 2136 (1.22295 iter/s, 9.81234s/12 iters), loss = 1.49182
I0419 22:38:48.726444 8388 solver.cpp:237] Train net output #0: loss = 1.49182 (* 1 = 1.49182 loss)
I0419 22:38:48.726452 8388 sgd_solver.cpp:105] Iteration 2136, lr = 0.00244087
I0419 22:38:52.707587 8388 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_2142.caffemodel
I0419 22:38:55.952075 8388 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_2142.solverstate
I0419 22:38:59.652680 8388 solver.cpp:330] Iteration 2142, Testing net (#0)
I0419 22:38:59.652699 8388 net.cpp:676] Ignoring source layer train-data
I0419 22:39:01.000797 8411 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:39:02.751741 8388 solver.cpp:397] Test net output #0: accuracy = 0.299665
I0419 22:39:02.751771 8388 solver.cpp:397] Test net output #1: loss = 3.21212 (* 1 = 3.21212 loss)
I0419 22:39:06.503928 8388 solver.cpp:218] Iteration 2148 (0.675006 iter/s, 17.7776s/12 iters), loss = 1.55448
I0419 22:39:06.503994 8388 solver.cpp:237] Train net output #0: loss = 1.55448 (* 1 = 1.55448 loss)
I0419 22:39:06.504014 8388 sgd_solver.cpp:105] Iteration 2148, lr = 0.00242161
I0419 22:39:16.308874 8388 solver.cpp:218] Iteration 2160 (1.22387 iter/s, 9.80495s/12 iters), loss = 1.45883
I0419 22:39:16.309082 8388 solver.cpp:237] Train net output #0: loss = 1.45883 (* 1 = 1.45883 loss)
I0419 22:39:16.309094 8388 sgd_solver.cpp:105] Iteration 2160, lr = 0.0024025
I0419 22:39:26.119699 8388 solver.cpp:218] Iteration 2172 (1.22316 iter/s, 9.81068s/12 iters), loss = 1.29528
I0419 22:39:26.119742 8388 solver.cpp:237] Train net output #0: loss = 1.29528 (* 1 = 1.29528 loss)
I0419 22:39:26.119751 8388 sgd_solver.cpp:105] Iteration 2172, lr = 0.00238354
I0419 22:39:36.030683 8388 solver.cpp:218] Iteration 2184 (1.21078 iter/s, 9.911s/12 iters), loss = 1.46441
I0419 22:39:36.030730 8388 solver.cpp:237] Train net output #0: loss = 1.46441 (* 1 = 1.46441 loss)
I0419 22:39:36.030738 8388 sgd_solver.cpp:105] Iteration 2184, lr = 0.00236473
I0419 22:39:45.855113 8388 solver.cpp:218] Iteration 2196 (1.22144 iter/s, 9.82445s/12 iters), loss = 1.55591
I0419 22:39:45.855154 8388 solver.cpp:237] Train net output #0: loss = 1.55591 (* 1 = 1.55591 loss)
I0419 22:39:45.855162 8388 sgd_solver.cpp:105] Iteration 2196, lr = 0.00234607
I0419 22:39:55.670076 8388 solver.cpp:218] Iteration 2208 (1.22262 iter/s, 9.81499s/12 iters), loss = 1.30249
I0419 22:39:55.670199 8388 solver.cpp:237] Train net output #0: loss = 1.30249 (* 1 = 1.30249 loss)
I0419 22:39:55.670208 8388 sgd_solver.cpp:105] Iteration 2208, lr = 0.00232756
I0419 22:40:05.537498 8388 solver.cpp:218] Iteration 2220 (1.21613 iter/s, 9.86737s/12 iters), loss = 1.38986
I0419 22:40:05.537547 8388 solver.cpp:237] Train net output #0: loss = 1.38986 (* 1 = 1.38986 loss)
I0419 22:40:05.537556 8388 sgd_solver.cpp:105] Iteration 2220, lr = 0.00230919
I0419 22:40:09.054734 8400 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:40:15.299449 8388 solver.cpp:218] Iteration 2232 (1.22926 iter/s, 9.76197s/12 iters), loss = 1.49019
I0419 22:40:15.299496 8388 solver.cpp:237] Train net output #0: loss = 1.49019 (* 1 = 1.49019 loss)
I0419 22:40:15.299506 8388 sgd_solver.cpp:105] Iteration 2232, lr = 0.00229097
I0419 22:40:24.193723 8388 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_2244.caffemodel
I0419 22:40:27.289888 8388 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_2244.solverstate
I0419 22:40:30.391096 8388 solver.cpp:330] Iteration 2244, Testing net (#0)
I0419 22:40:30.391119 8388 net.cpp:676] Ignoring source layer train-data
I0419 22:40:31.270980 8411 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:40:33.412510 8388 solver.cpp:397] Test net output #0: accuracy = 0.308594
I0419 22:40:33.412540 8388 solver.cpp:397] Test net output #1: loss = 3.16502 (* 1 = 3.16502 loss)
I0419 22:40:33.585014 8388 solver.cpp:218] Iteration 2244 (0.656252 iter/s, 18.2857s/12 iters), loss = 1.38202
I0419 22:40:33.585055 8388 solver.cpp:237] Train net output #0: loss = 1.38202 (* 1 = 1.38202 loss)
I0419 22:40:33.585063 8388 sgd_solver.cpp:105] Iteration 2244, lr = 0.00227289
I0419 22:40:41.768937 8388 solver.cpp:218] Iteration 2256 (1.46629 iter/s, 8.18394s/12 iters), loss = 1.30427
I0419 22:40:41.768977 8388 solver.cpp:237] Train net output #0: loss = 1.30427 (* 1 = 1.30427 loss)
I0419 22:40:41.768986 8388 sgd_solver.cpp:105] Iteration 2256, lr = 0.00225495
I0419 22:40:51.592474 8388 solver.cpp:218] Iteration 2268 (1.22155 iter/s, 9.82356s/12 iters), loss = 1.37883
I0419 22:40:51.592515 8388 solver.cpp:237] Train net output #0: loss = 1.37883 (* 1 = 1.37883 loss)
I0419 22:40:51.592525 8388 sgd_solver.cpp:105] Iteration 2268, lr = 0.00223716
I0419 22:41:01.384368 8388 solver.cpp:218] Iteration 2280 (1.2255 iter/s, 9.79192s/12 iters), loss = 1.30331
I0419 22:41:01.384503 8388 solver.cpp:237] Train net output #0: loss = 1.30331 (* 1 = 1.30331 loss)
I0419 22:41:01.384513 8388 sgd_solver.cpp:105] Iteration 2280, lr = 0.0022195
I0419 22:41:11.074999 8388 solver.cpp:218] Iteration 2292 (1.23832 iter/s, 9.69057s/12 iters), loss = 1.30134
I0419 22:41:11.075040 8388 solver.cpp:237] Train net output #0: loss = 1.30134 (* 1 = 1.30134 loss)
I0419 22:41:11.075048 8388 sgd_solver.cpp:105] Iteration 2292, lr = 0.00220199
I0419 22:41:20.924155 8388 solver.cpp:218] Iteration 2304 (1.21838 iter/s, 9.84918s/12 iters), loss = 1.14105
I0419 22:41:20.924193 8388 solver.cpp:237] Train net output #0: loss = 1.14105 (* 1 = 1.14105 loss)
I0419 22:41:20.924201 8388 sgd_solver.cpp:105] Iteration 2304, lr = 0.00218461
I0419 22:41:30.694681 8388 solver.cpp:218] Iteration 2316 (1.22818 iter/s, 9.77055s/12 iters), loss = 1.18595
I0419 22:41:30.694722 8388 solver.cpp:237] Train net output #0: loss = 1.18595 (* 1 = 1.18595 loss)
I0419 22:41:30.694730 8388 sgd_solver.cpp:105] Iteration 2316, lr = 0.00216737
I0419 22:41:38.356743 8400 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:41:40.445322 8388 solver.cpp:218] Iteration 2328 (1.23068 iter/s, 9.75067s/12 iters), loss = 1.39826
I0419 22:41:40.445360 8388 solver.cpp:237] Train net output #0: loss = 1.39826 (* 1 = 1.39826 loss)
I0419 22:41:40.445367 8388 sgd_solver.cpp:105] Iteration 2328, lr = 0.00215027
I0419 22:41:50.245400 8388 solver.cpp:218] Iteration 2340 (1.22448 iter/s, 9.80011s/12 iters), loss = 1.21815
I0419 22:41:50.245445 8388 solver.cpp:237] Train net output #0: loss = 1.21815 (* 1 = 1.21815 loss)
I0419 22:41:50.245453 8388 sgd_solver.cpp:105] Iteration 2340, lr = 0.0021333
I0419 22:41:54.219383 8388 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_2346.caffemodel
I0419 22:41:57.359977 8388 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_2346.solverstate
I0419 22:42:01.384526 8388 solver.cpp:330] Iteration 2346, Testing net (#0)
I0419 22:42:01.384546 8388 net.cpp:676] Ignoring source layer train-data
I0419 22:42:01.746960 8411 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:42:04.450785 8388 solver.cpp:397] Test net output #0: accuracy = 0.327009
I0419 22:42:04.450820 8388 solver.cpp:397] Test net output #1: loss = 3.23813 (* 1 = 3.23813 loss)
I0419 22:42:06.248158 8411 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:42:08.106499 8388 solver.cpp:218] Iteration 2352 (0.671848 iter/s, 17.8612s/12 iters), loss = 1.19837
I0419 22:42:08.106539 8388 solver.cpp:237] Train net output #0: loss = 1.19837 (* 1 = 1.19837 loss)
I0419 22:42:08.106547 8388 sgd_solver.cpp:105] Iteration 2352, lr = 0.00211647
I0419 22:42:17.802978 8388 solver.cpp:218] Iteration 2364 (1.23756 iter/s, 9.69651s/12 iters), loss = 1.06662
I0419 22:42:17.803089 8388 solver.cpp:237] Train net output #0: loss = 1.06662 (* 1 = 1.06662 loss)
I0419 22:42:17.803099 8388 sgd_solver.cpp:105] Iteration 2364, lr = 0.00209976
I0419 22:42:27.647151 8388 solver.cpp:218] Iteration 2376 (1.219 iter/s, 9.84413s/12 iters), loss = 1.13939
I0419 22:42:27.647194 8388 solver.cpp:237] Train net output #0: loss = 1.13939 (* 1 = 1.13939 loss)
I0419 22:42:27.647202 8388 sgd_solver.cpp:105] Iteration 2376, lr = 0.00208319
I0419 22:42:37.463788 8388 solver.cpp:218] Iteration 2388 (1.22241 iter/s, 9.81666s/12 iters), loss = 1.23545
I0419 22:42:37.463829 8388 solver.cpp:237] Train net output #0: loss = 1.23545 (* 1 = 1.23545 loss)
I0419 22:42:37.463836 8388 sgd_solver.cpp:105] Iteration 2388, lr = 0.00206675
I0419 22:42:47.152652 8388 solver.cpp:218] Iteration 2400 (1.23853 iter/s, 9.68889s/12 iters), loss = 1.25383
I0419 22:42:47.152695 8388 solver.cpp:237] Train net output #0: loss = 1.25383 (* 1 = 1.25383 loss)
I0419 22:42:47.152702 8388 sgd_solver.cpp:105] Iteration 2400, lr = 0.00205044
I0419 22:42:56.863220 8388 solver.cpp:218] Iteration 2412 (1.23576 iter/s, 9.71059s/12 iters), loss = 0.9237
I0419 22:42:56.863379 8388 solver.cpp:237] Train net output #0: loss = 0.9237 (* 1 = 0.9237 loss)
I0419 22:42:56.863389 8388 sgd_solver.cpp:105] Iteration 2412, lr = 0.00203426
I0419 22:43:06.663940 8388 solver.cpp:218] Iteration 2424 (1.22441 iter/s, 9.80063s/12 iters), loss = 1.0121
I0419 22:43:06.663980 8388 solver.cpp:237] Train net output #0: loss = 1.0121 (* 1 = 1.0121 loss)
I0419 22:43:06.663987 8388 sgd_solver.cpp:105] Iteration 2424, lr = 0.00201821
I0419 22:43:08.757719 8400 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:43:16.257896 8388 solver.cpp:218] Iteration 2436 (1.25078 iter/s, 9.59398s/12 iters), loss = 1.01503
I0419 22:43:16.257942 8388 solver.cpp:237] Train net output #0: loss = 1.01503 (* 1 = 1.01503 loss)
I0419 22:43:16.257951 8388 sgd_solver.cpp:105] Iteration 2436, lr = 0.00200228
I0419 22:43:25.120759 8388 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_2448.caffemodel
I0419 22:43:28.219221 8388 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_2448.solverstate
I0419 22:43:30.617496 8388 solver.cpp:330] Iteration 2448, Testing net (#0)
I0419 22:43:30.617518 8388 net.cpp:676] Ignoring source layer train-data
I0419 22:43:33.710171 8388 solver.cpp:397] Test net output #0: accuracy = 0.338728
I0419 22:43:33.710209 8388 solver.cpp:397] Test net output #1: loss = 3.18856 (* 1 = 3.18856 loss)
I0419 22:43:33.884225 8388 solver.cpp:218] Iteration 2448 (0.680796 iter/s, 17.6264s/12 iters), loss = 0.922137
I0419 22:43:33.884268 8388 solver.cpp:237] Train net output #0: loss = 0.922137 (* 1 = 0.922137 loss)
I0419 22:43:33.884276 8388 sgd_solver.cpp:105] Iteration 2448, lr = 0.00198648
I0419 22:43:35.046147 8411 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:43:42.020509 8388 solver.cpp:218] Iteration 2460 (1.47487 iter/s, 8.1363s/12 iters), loss = 0.934932
I0419 22:43:42.020551 8388 solver.cpp:237] Train net output #0: loss = 0.934932 (* 1 = 0.934932 loss)
I0419 22:43:42.020560 8388 sgd_solver.cpp:105] Iteration 2460, lr = 0.00197081
I0419 22:43:51.718760 8388 solver.cpp:218] Iteration 2472 (1.23733 iter/s, 9.69827s/12 iters), loss = 0.866401
I0419 22:43:51.718802 8388 solver.cpp:237] Train net output #0: loss = 0.866401 (* 1 = 0.866401 loss)
I0419 22:43:51.718811 8388 sgd_solver.cpp:105] Iteration 2472, lr = 0.00195526
I0419 22:44:01.541172 8388 solver.cpp:218] Iteration 2484 (1.22169 iter/s, 9.82243s/12 iters), loss = 0.884973
I0419 22:44:01.541301 8388 solver.cpp:237] Train net output #0: loss = 0.884973 (* 1 = 0.884973 loss)
I0419 22:44:01.541309 8388 sgd_solver.cpp:105] Iteration 2484, lr = 0.00193983
I0419 22:44:11.288493 8388 solver.cpp:218] Iteration 2496 (1.23111 iter/s, 9.74726s/12 iters), loss = 0.939078
I0419 22:44:11.288533 8388 solver.cpp:237] Train net output #0: loss = 0.939078 (* 1 = 0.939078 loss)
I0419 22:44:11.288542 8388 sgd_solver.cpp:105] Iteration 2496, lr = 0.00192452
I0419 22:44:20.875571 8388 solver.cpp:218] Iteration 2508 (1.25168 iter/s, 9.5871s/12 iters), loss = 0.881123
I0419 22:44:20.875618 8388 solver.cpp:237] Train net output #0: loss = 0.881123 (* 1 = 0.881123 loss)
I0419 22:44:20.875627 8388 sgd_solver.cpp:105] Iteration 2508, lr = 0.00190933
I0419 22:44:30.594472 8388 solver.cpp:218] Iteration 2520 (1.2347 iter/s, 9.71892s/12 iters), loss = 0.970213
I0419 22:44:30.594516 8388 solver.cpp:237] Train net output #0: loss = 0.970213 (* 1 = 0.970213 loss)
I0419 22:44:30.594524 8388 sgd_solver.cpp:105] Iteration 2520, lr = 0.00189426
I0419 22:44:36.782052 8400 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:44:40.412081 8388 solver.cpp:218] Iteration 2532 (1.22229 iter/s, 9.81763s/12 iters), loss = 0.897572
I0419 22:44:40.412128 8388 solver.cpp:237] Train net output #0: loss = 0.897572 (* 1 = 0.897572 loss)
I0419 22:44:40.412137 8388 sgd_solver.cpp:105] Iteration 2532, lr = 0.00187932
I0419 22:44:50.133280 8388 solver.cpp:218] Iteration 2544 (1.23441 iter/s, 9.72122s/12 iters), loss = 0.902187
I0419 22:44:50.133322 8388 solver.cpp:237] Train net output #0: loss = 0.902187 (* 1 = 0.902187 loss)
I0419 22:44:50.133330 8388 sgd_solver.cpp:105] Iteration 2544, lr = 0.00186449
I0419 22:44:54.067863 8388 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_2550.caffemodel
I0419 22:44:57.808172 8388 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_2550.solverstate
I0419 22:45:02.258446 8388 solver.cpp:330] Iteration 2550, Testing net (#0)
I0419 22:45:02.258468 8388 net.cpp:676] Ignoring source layer train-data
I0419 22:45:05.264864 8388 solver.cpp:397] Test net output #0: accuracy = 0.334821
I0419 22:45:05.264896 8388 solver.cpp:397] Test net output #1: loss = 3.23214 (* 1 = 3.23214 loss)
I0419 22:45:06.165951 8411 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:45:08.863885 8388 solver.cpp:218] Iteration 2556 (0.640659 iter/s, 18.7307s/12 iters), loss = 0.789631
I0419 22:45:08.864049 8388 solver.cpp:237] Train net output #0: loss = 0.789631 (* 1 = 0.789631 loss)
I0419 22:45:08.864059 8388 sgd_solver.cpp:105] Iteration 2556, lr = 0.00184977
I0419 22:45:18.635654 8388 solver.cpp:218] Iteration 2568 (1.22804 iter/s, 9.77168s/12 iters), loss = 0.902336
I0419 22:45:18.635699 8388 solver.cpp:237] Train net output #0: loss = 0.902336 (* 1 = 0.902336 loss)
I0419 22:45:18.635707 8388 sgd_solver.cpp:105] Iteration 2568, lr = 0.00183517
I0419 22:45:28.377905 8388 solver.cpp:218] Iteration 2580 (1.23175 iter/s, 9.74227s/12 iters), loss = 0.939977
I0419 22:45:28.377946 8388 solver.cpp:237] Train net output #0: loss = 0.939977 (* 1 = 0.939977 loss)
I0419 22:45:28.377954 8388 sgd_solver.cpp:105] Iteration 2580, lr = 0.00182069
I0419 22:45:38.192561 8388 solver.cpp:218] Iteration 2592 (1.22266 iter/s, 9.81468s/12 iters), loss = 0.858514
I0419 22:45:38.192602 8388 solver.cpp:237] Train net output #0: loss = 0.858514 (* 1 = 0.858514 loss)
I0419 22:45:38.192610 8388 sgd_solver.cpp:105] Iteration 2592, lr = 0.00180633
I0419 22:45:48.030848 8388 solver.cpp:218] Iteration 2604 (1.21972 iter/s, 9.83831s/12 iters), loss = 0.750458
I0419 22:45:48.030958 8388 solver.cpp:237] Train net output #0: loss = 0.750458 (* 1 = 0.750458 loss)
I0419 22:45:48.030967 8388 sgd_solver.cpp:105] Iteration 2604, lr = 0.00179207
I0419 22:45:57.965318 8388 solver.cpp:218] Iteration 2616 (1.20792 iter/s, 9.93443s/12 iters), loss = 0.797696
I0419 22:45:57.965358 8388 solver.cpp:237] Train net output #0: loss = 0.797696 (* 1 = 0.797696 loss)
I0419 22:45:57.965366 8388 sgd_solver.cpp:105] Iteration 2616, lr = 0.00177793
I0419 22:46:08.003269 8388 solver.cpp:218] Iteration 2628 (1.19546 iter/s, 10.038s/12 iters), loss = 0.872876
I0419 22:46:08.003312 8388 solver.cpp:237] Train net output #0: loss = 0.872876 (* 1 = 0.872876 loss)
I0419 22:46:08.003321 8388 sgd_solver.cpp:105] Iteration 2628, lr = 0.0017639
I0419 22:46:08.895804 8400 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:46:18.045796 8388 solver.cpp:218] Iteration 2640 (1.19492 iter/s, 10.0426s/12 iters), loss = 0.951447
I0419 22:46:18.045980 8388 solver.cpp:237] Train net output #0: loss = 0.951447 (* 1 = 0.951447 loss)
I0419 22:46:18.045991 8388 sgd_solver.cpp:105] Iteration 2640, lr = 0.00174998
I0419 22:46:26.942122 8388 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_2652.caffemodel
I0419 22:46:30.177189 8388 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_2652.solverstate
I0419 22:46:33.047574 8388 solver.cpp:330] Iteration 2652, Testing net (#0)
I0419 22:46:33.047602 8388 net.cpp:676] Ignoring source layer train-data
I0419 22:46:36.077044 8388 solver.cpp:397] Test net output #0: accuracy = 0.340402
I0419 22:46:36.077075 8388 solver.cpp:397] Test net output #1: loss = 3.30123 (* 1 = 3.30123 loss)
I0419 22:46:36.249749 8388 solver.cpp:218] Iteration 2652 (0.659199 iter/s, 18.2039s/12 iters), loss = 0.749444
I0419 22:46:36.249789 8388 solver.cpp:237] Train net output #0: loss = 0.749444 (* 1 = 0.749444 loss)
I0419 22:46:36.249799 8388 sgd_solver.cpp:105] Iteration 2652, lr = 0.00173617
I0419 22:46:36.512521 8411 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:46:44.492962 8388 solver.cpp:218] Iteration 2664 (1.45574 iter/s, 8.24322s/12 iters), loss = 0.802865
I0419 22:46:44.493021 8388 solver.cpp:237] Train net output #0: loss = 0.802865 (* 1 = 0.802865 loss)
I0419 22:46:44.493036 8388 sgd_solver.cpp:105] Iteration 2664, lr = 0.00172247
I0419 22:46:54.758210 8388 solver.cpp:218] Iteration 2676 (1.16899 iter/s, 10.2653s/12 iters), loss = 0.810625
I0419 22:46:54.758373 8388 solver.cpp:237] Train net output #0: loss = 0.810625 (* 1 = 0.810625 loss)
I0419 22:46:54.758383 8388 sgd_solver.cpp:105] Iteration 2676, lr = 0.00170888
I0419 22:47:04.771196 8388 solver.cpp:218] Iteration 2688 (1.19845 iter/s, 10.0129s/12 iters), loss = 0.885112
I0419 22:47:04.771239 8388 solver.cpp:237] Train net output #0: loss = 0.885112 (* 1 = 0.885112 loss)
I0419 22:47:04.771246 8388 sgd_solver.cpp:105] Iteration 2688, lr = 0.00169539
I0419 22:47:14.607976 8388 solver.cpp:218] Iteration 2700 (1.21991 iter/s, 9.8368s/12 iters), loss = 0.725293
I0419 22:47:14.608017 8388 solver.cpp:237] Train net output #0: loss = 0.725293 (* 1 = 0.725293 loss)
I0419 22:47:14.608026 8388 sgd_solver.cpp:105] Iteration 2700, lr = 0.00168201
I0419 22:47:24.366281 8388 solver.cpp:218] Iteration 2712 (1.22972 iter/s, 9.75833s/12 iters), loss = 0.750635
I0419 22:47:24.366328 8388 solver.cpp:237] Train net output #0: loss = 0.750635 (* 1 = 0.750635 loss)
I0419 22:47:24.366335 8388 sgd_solver.cpp:105] Iteration 2712, lr = 0.00166874
I0419 22:47:34.164583 8388 solver.cpp:218] Iteration 2724 (1.2247 iter/s, 9.79832s/12 iters), loss = 0.681103
I0419 22:47:34.164701 8388 solver.cpp:237] Train net output #0: loss = 0.681103 (* 1 = 0.681103 loss)
I0419 22:47:34.164712 8388 sgd_solver.cpp:105] Iteration 2724, lr = 0.00165557
I0419 22:47:39.205572 8400 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:47:43.967484 8388 solver.cpp:218] Iteration 2736 (1.22413 iter/s, 9.80285s/12 iters), loss = 0.810941
I0419 22:47:43.967543 8388 solver.cpp:237] Train net output #0: loss = 0.810941 (* 1 = 0.810941 loss)
I0419 22:47:43.967558 8388 sgd_solver.cpp:105] Iteration 2736, lr = 0.00164251
I0419 22:47:53.669380 8388 solver.cpp:218] Iteration 2748 (1.23687 iter/s, 9.7019s/12 iters), loss = 0.664888
I0419 22:47:53.669421 8388 solver.cpp:237] Train net output #0: loss = 0.664888 (* 1 = 0.664888 loss)
I0419 22:47:53.669430 8388 sgd_solver.cpp:105] Iteration 2748, lr = 0.00162954
I0419 22:47:57.579953 8388 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_2754.caffemodel
I0419 22:48:01.370183 8388 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_2754.solverstate
I0419 22:48:03.736696 8388 solver.cpp:330] Iteration 2754, Testing net (#0)
I0419 22:48:03.736716 8388 net.cpp:676] Ignoring source layer train-data
I0419 22:48:06.646694 8411 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:48:06.776448 8388 solver.cpp:397] Test net output #0: accuracy = 0.339844
I0419 22:48:06.776479 8388 solver.cpp:397] Test net output #1: loss = 3.33529 (* 1 = 3.33529 loss)
I0419 22:48:10.266917 8388 solver.cpp:218] Iteration 2760 (0.722995 iter/s, 16.5976s/12 iters), loss = 0.585073
I0419 22:48:10.266980 8388 solver.cpp:237] Train net output #0: loss = 0.585073 (* 1 = 0.585073 loss)
I0419 22:48:10.266993 8388 sgd_solver.cpp:105] Iteration 2760, lr = 0.00161668
I0419 22:48:20.090080 8388 solver.cpp:218] Iteration 2772 (1.2216 iter/s, 9.82317s/12 iters), loss = 0.692769
I0419 22:48:20.090123 8388 solver.cpp:237] Train net output #0: loss = 0.692769 (* 1 = 0.692769 loss)
I0419 22:48:20.090131 8388 sgd_solver.cpp:105] Iteration 2772, lr = 0.00160393
I0419 22:48:29.951762 8388 solver.cpp:218] Iteration 2784 (1.21683 iter/s, 9.8617s/12 iters), loss = 0.569652
I0419 22:48:29.951815 8388 solver.cpp:237] Train net output #0: loss = 0.569652 (* 1 = 0.569652 loss)
I0419 22:48:29.951826 8388 sgd_solver.cpp:105] Iteration 2784, lr = 0.00159127
I0419 22:48:39.668905 8388 solver.cpp:218] Iteration 2796 (1.23493 iter/s, 9.71715s/12 iters), loss = 0.747644
I0419 22:48:39.669054 8388 solver.cpp:237] Train net output #0: loss = 0.747644 (* 1 = 0.747644 loss)
I0419 22:48:39.669064 8388 sgd_solver.cpp:105] Iteration 2796, lr = 0.00157871
I0419 22:48:49.481741 8388 solver.cpp:218] Iteration 2808 (1.2229 iter/s, 9.81276s/12 iters), loss = 0.710364
I0419 22:48:49.481786 8388 solver.cpp:237] Train net output #0: loss = 0.710364 (* 1 = 0.710364 loss)
I0419 22:48:49.481796 8388 sgd_solver.cpp:105] Iteration 2808, lr = 0.00156625
I0419 22:48:59.359920 8388 solver.cpp:218] Iteration 2820 (1.2148 iter/s, 9.87821s/12 iters), loss = 0.4777
I0419 22:48:59.359964 8388 solver.cpp:237] Train net output #0: loss = 0.4777 (* 1 = 0.4777 loss)
I0419 22:48:59.359972 8388 sgd_solver.cpp:105] Iteration 2820, lr = 0.00155389
I0419 22:49:08.605335 8400 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:49:09.203742 8388 solver.cpp:218] Iteration 2832 (1.21904 iter/s, 9.84384s/12 iters), loss = 0.57086
I0419 22:49:09.203785 8388 solver.cpp:237] Train net output #0: loss = 0.57086 (* 1 = 0.57086 loss)
I0419 22:49:09.203794 8388 sgd_solver.cpp:105] Iteration 2832, lr = 0.00154163
I0419 22:49:19.054204 8388 solver.cpp:218] Iteration 2844 (1.21821 iter/s, 9.85049s/12 iters), loss = 0.648551
I0419 22:49:19.054322 8388 solver.cpp:237] Train net output #0: loss = 0.648551 (* 1 = 0.648551 loss)
I0419 22:49:19.054337 8388 sgd_solver.cpp:105] Iteration 2844, lr = 0.00152947
I0419 22:49:27.918875 8388 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_2856.caffemodel
I0419 22:49:31.815085 8388 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_2856.solverstate
I0419 22:49:34.793996 8388 solver.cpp:330] Iteration 2856, Testing net (#0)
I0419 22:49:34.794014 8388 net.cpp:676] Ignoring source layer train-data
I0419 22:49:37.263738 8411 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:49:37.824700 8388 solver.cpp:397] Test net output #0: accuracy = 0.34933
I0419 22:49:37.824733 8388 solver.cpp:397] Test net output #1: loss = 3.33626 (* 1 = 3.33626 loss)
I0419 22:49:37.997040 8388 solver.cpp:218] Iteration 2856 (0.633484 iter/s, 18.9429s/12 iters), loss = 0.54696
I0419 22:49:37.997081 8388 solver.cpp:237] Train net output #0: loss = 0.54696 (* 1 = 0.54696 loss)
I0419 22:49:37.997089 8388 sgd_solver.cpp:105] Iteration 2856, lr = 0.0015174
I0419 22:49:46.265630 8388 solver.cpp:218] Iteration 2868 (1.45127 iter/s, 8.2686s/12 iters), loss = 0.563716
I0419 22:49:46.265679 8388 solver.cpp:237] Train net output #0: loss = 0.563716 (* 1 = 0.563716 loss)
I0419 22:49:46.265687 8388 sgd_solver.cpp:105] Iteration 2868, lr = 0.00150542
I0419 22:49:56.156960 8388 solver.cpp:218] Iteration 2880 (1.21318 iter/s, 9.89134s/12 iters), loss = 0.594555
I0419 22:49:56.157083 8388 solver.cpp:237] Train net output #0: loss = 0.594555 (* 1 = 0.594555 loss)
I0419 22:49:56.157091 8388 sgd_solver.cpp:105] Iteration 2880, lr = 0.00149354
I0419 22:50:05.899399 8388 solver.cpp:218] Iteration 2892 (1.23173 iter/s, 9.74238s/12 iters), loss = 0.571303
I0419 22:50:05.899441 8388 solver.cpp:237] Train net output #0: loss = 0.571303 (* 1 = 0.571303 loss)
I0419 22:50:05.899448 8388 sgd_solver.cpp:105] Iteration 2892, lr = 0.00148176
I0419 22:50:15.855947 8388 solver.cpp:218] Iteration 2904 (1.20523 iter/s, 9.95658s/12 iters), loss = 0.430634
I0419 22:50:15.855990 8388 solver.cpp:237] Train net output #0: loss = 0.430634 (* 1 = 0.430634 loss)
I0419 22:50:15.855998 8388 sgd_solver.cpp:105] Iteration 2904, lr = 0.00147006
I0419 22:50:25.686061 8388 solver.cpp:218] Iteration 2916 (1.22074 iter/s, 9.83013s/12 iters), loss = 0.53815
I0419 22:50:25.686111 8388 solver.cpp:237] Train net output #0: loss = 0.53815 (* 1 = 0.53815 loss)
I0419 22:50:25.686120 8388 sgd_solver.cpp:105] Iteration 2916, lr = 0.00145846
I0419 22:50:35.540338 8388 solver.cpp:218] Iteration 2928 (1.21774 iter/s, 9.85429s/12 iters), loss = 0.67309
I0419 22:50:35.540498 8388 solver.cpp:237] Train net output #0: loss = 0.67309 (* 1 = 0.67309 loss)
I0419 22:50:35.540508 8388 sgd_solver.cpp:105] Iteration 2928, lr = 0.00144695
I0419 22:50:39.136034 8400 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:50:45.362958 8388 solver.cpp:218] Iteration 2940 (1.22168 iter/s, 9.82253s/12 iters), loss = 0.485759
I0419 22:50:45.363000 8388 solver.cpp:237] Train net output #0: loss = 0.485759 (* 1 = 0.485759 loss)
I0419 22:50:45.363009 8388 sgd_solver.cpp:105] Iteration 2940, lr = 0.00143554
I0419 22:50:55.074352 8388 solver.cpp:218] Iteration 2952 (1.23566 iter/s, 9.71142s/12 iters), loss = 0.572076
I0419 22:50:55.074402 8388 solver.cpp:237] Train net output #0: loss = 0.572076 (* 1 = 0.572076 loss)
I0419 22:50:55.074411 8388 sgd_solver.cpp:105] Iteration 2952, lr = 0.00142421
I0419 22:50:59.112805 8388 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_2958.caffemodel
I0419 22:51:02.238713 8388 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_2958.solverstate
I0419 22:51:05.171911 8388 solver.cpp:330] Iteration 2958, Testing net (#0)
I0419 22:51:05.171931 8388 net.cpp:676] Ignoring source layer train-data
I0419 22:51:07.120378 8411 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:51:08.194213 8388 solver.cpp:397] Test net output #0: accuracy = 0.351004
I0419 22:51:08.194247 8388 solver.cpp:397] Test net output #1: loss = 3.38003 (* 1 = 3.38003 loss)
I0419 22:51:11.645649 8388 solver.cpp:218] Iteration 2964 (0.72414 iter/s, 16.5714s/12 iters), loss = 0.436507
I0419 22:51:11.645699 8388 solver.cpp:237] Train net output #0: loss = 0.436507 (* 1 = 0.436507 loss)
I0419 22:51:11.645709 8388 sgd_solver.cpp:105] Iteration 2964, lr = 0.00141297
I0419 22:51:13.992421 8388 blocking_queue.cpp:49] Waiting for data
I0419 22:51:21.367017 8388 solver.cpp:218] Iteration 2976 (1.23439 iter/s, 9.72138s/12 iters), loss = 0.411845
I0419 22:51:21.367058 8388 solver.cpp:237] Train net output #0: loss = 0.411845 (* 1 = 0.411845 loss)
I0419 22:51:21.367066 8388 sgd_solver.cpp:105] Iteration 2976, lr = 0.00140182
I0419 22:51:31.201329 8388 solver.cpp:218] Iteration 2988 (1.22021 iter/s, 9.83434s/12 iters), loss = 0.557631
I0419 22:51:31.201372 8388 solver.cpp:237] Train net output #0: loss = 0.557631 (* 1 = 0.557631 loss)
I0419 22:51:31.201380 8388 sgd_solver.cpp:105] Iteration 2988, lr = 0.00139076
I0419 22:51:40.914418 8388 solver.cpp:218] Iteration 3000 (1.23544 iter/s, 9.71311s/12 iters), loss = 0.486616
I0419 22:51:40.914543 8388 solver.cpp:237] Train net output #0: loss = 0.486616 (* 1 = 0.486616 loss)
I0419 22:51:40.914553 8388 sgd_solver.cpp:105] Iteration 3000, lr = 0.00137978
I0419 22:51:50.602695 8388 solver.cpp:218] Iteration 3012 (1.23862 iter/s, 9.68822s/12 iters), loss = 0.506703
I0419 22:51:50.602741 8388 solver.cpp:237] Train net output #0: loss = 0.506703 (* 1 = 0.506703 loss)
I0419 22:51:50.602747 8388 sgd_solver.cpp:105] Iteration 3012, lr = 0.00136889
I0419 22:52:00.423067 8388 solver.cpp:218] Iteration 3024 (1.22195 iter/s, 9.82039s/12 iters), loss = 0.387212
I0419 22:52:00.423125 8388 solver.cpp:237] Train net output #0: loss = 0.387212 (* 1 = 0.387212 loss)
I0419 22:52:00.423139 8388 sgd_solver.cpp:105] Iteration 3024, lr = 0.00135809
I0419 22:52:08.151093 8400 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:52:10.021952 8388 solver.cpp:218] Iteration 3036 (1.25014 iter/s, 9.5989s/12 iters), loss = 0.651075
I0419 22:52:10.021996 8388 solver.cpp:237] Train net output #0: loss = 0.651075 (* 1 = 0.651075 loss)
I0419 22:52:10.022003 8388 sgd_solver.cpp:105] Iteration 3036, lr = 0.00134737
I0419 22:52:19.727632 8388 solver.cpp:218] Iteration 3048 (1.23639 iter/s, 9.7057s/12 iters), loss = 0.52133
I0419 22:52:19.727761 8388 solver.cpp:237] Train net output #0: loss = 0.52133 (* 1 = 0.52133 loss)
I0419 22:52:19.727771 8388 sgd_solver.cpp:105] Iteration 3048, lr = 0.00133674
I0419 22:52:28.588091 8388 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_3060.caffemodel
I0419 22:52:31.731410 8388 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_3060.solverstate
I0419 22:52:34.167346 8388 solver.cpp:310] Iteration 3060, loss = 0.459728
I0419 22:52:34.167373 8388 solver.cpp:330] Iteration 3060, Testing net (#0)
I0419 22:52:34.167378 8388 net.cpp:676] Ignoring source layer train-data
I0419 22:52:35.631995 8411 data_layer.cpp:73] Restarting data prefetching from start.
I0419 22:52:37.216853 8388 solver.cpp:397] Test net output #0: accuracy = 0.347656
I0419 22:52:37.216886 8388 solver.cpp:397] Test net output #1: loss = 3.34689 (* 1 = 3.34689 loss)
I0419 22:52:37.216892 8388 solver.cpp:315] Optimization Done.
I0419 22:52:37.216894 8388 caffe.cpp:259] Optimization Done.