DIGITS-CNN/cars/architecture-investigations/conv/nonlinear/l5/2parts/caffe_output.log

4667 lines
363 KiB
Plaintext

I0412 12:43:14.715986 6200 upgrade_proto.cpp:1082] Attempting to upgrade input file specified using deprecated 'solver_type' field (enum)': /mnt/bigdisk/DIGITS-MAN-2/digits/jobs/20210412-124312-7270/solver.prototxt
I0412 12:43:14.717805 6200 upgrade_proto.cpp:1089] Successfully upgraded file specified using deprecated 'solver_type' field (enum) to 'type' field (string).
W0412 12:43:14.717823 6200 upgrade_proto.cpp:1091] Note that future Caffe releases will only support 'type' field (string) for a solver's type.
I0412 12:43:14.718022 6200 caffe.cpp:218] Using GPUs 0
I0412 12:43:14.747117 6200 caffe.cpp:223] GPU 0: GeForce GTX 1080 Ti
I0412 12:43:15.048785 6200 solver.cpp:44] Initializing solver from parameters:
test_iter: 51
test_interval: 102
base_lr: 0.01
display: 12
max_iter: 10200
lr_policy: "exp"
gamma: 0.99980193
momentum: 0.9
weight_decay: 0.0001
snapshot: 102
snapshot_prefix: "snapshot"
solver_mode: GPU
device_id: 0
net: "train_val.prototxt"
train_state {
level: 0
stage: ""
}
type: "SGD"
I0412 12:43:15.049546 6200 solver.cpp:87] Creating training net from net file: train_val.prototxt
I0412 12:43:15.050199 6200 net.cpp:294] The NetState phase (0) differed from the phase (1) specified by a rule in layer val-data
I0412 12:43:15.050215 6200 net.cpp:294] The NetState phase (0) differed from the phase (1) specified by a rule in layer accuracy
I0412 12:43:15.050369 6200 net.cpp:51] Initializing net from parameters:
state {
phase: TRAIN
level: 0
stage: ""
}
layer {
name: "train-data"
type: "Data"
top: "data"
top: "label"
include {
phase: TRAIN
}
transform_param {
mirror: true
crop_size: 227
mean_file: "/mnt/bigdisk/DIGITS-MAN-2/digits/jobs/20210407-214532-d396/mean.binaryproto"
}
data_param {
source: "/mnt/bigdisk/DIGITS-MAN-2/digits/jobs/20210407-214532-d396/train_db"
batch_size: 128
backend: LMDB
}
}
layer {
name: "conv1"
type: "Convolution"
bottom: "data"
top: "conv1"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 96
kernel_size: 11
stride: 4
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "relu1"
type: "ReLU"
bottom: "conv1"
top: "conv1"
}
layer {
name: "norm1"
type: "LRN"
bottom: "conv1"
top: "norm1"
lrn_param {
local_size: 5
alpha: 0.0001
beta: 0.75
}
}
layer {
name: "pool1"
type: "Pooling"
bottom: "norm1"
top: "pool1"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layer {
name: "conv2"
type: "Convolution"
bottom: "pool1"
top: "conv2"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 256
pad: 2
kernel_size: 5
group: 2
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layer {
name: "relu2"
type: "ReLU"
bottom: "conv2"
top: "conv2"
}
layer {
name: "norm2"
type: "LRN"
bottom: "conv2"
top: "norm2"
lrn_param {
local_size: 5
alpha: 0.0001
beta: 0.75
}
}
layer {
name: "pool2"
type: "Pooling"
bottom: "norm2"
top: "pool2"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layer {
name: "conv3"
type: "Convolution"
bottom: "pool2"
top: "conv3"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 384
pad: 1
kernel_size: 3
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "relu3"
type: "ReLU"
bottom: "conv3"
top: "conv3"
}
layer {
name: "conv4"
type: "Convolution"
bottom: "conv3"
top: "conv4"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 384
pad: 1
kernel_size: 3
group: 2
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layer {
name: "relu4"
type: "ReLU"
bottom: "conv4"
top: "conv4"
}
layer {
name: "conv5"
type: "Convolution"
bottom: "conv4"
top: "conv5"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 128
pad: 1
kernel_size: 3
group: 2
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layer {
name: "relu5"
type: "ReLU"
bottom: "conv5"
top: "conv5"
}
layer {
name: "conv5.2"
type: "Convolution"
bottom: "conv5"
top: "conv5.2"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 128
pad: 1
kernel_size: 3
group: 2
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layer {
name: "relu5.2"
type: "ReLU"
bottom: "conv5.2"
top: "conv5.2"
}
layer {
name: "pool5"
type: "Pooling"
bottom: "conv5.2"
top: "pool5"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layer {
name: "fc6"
type: "InnerProduct"
bottom: "pool5"
top: "fc6"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 4096
weight_filler {
type: "gaussian"
std: 0.005
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layer {
name: "relu6"
type: "ReLU"
bottom: "fc6"
top: "fc6"
}
layer {
name: "drop6"
type: "Dropout"
bottom: "fc6"
top: "fc6"
dropout_param {
dropout_ratio: 0.5
}
}
layer {
name: "fc7"
type: "InnerProduct"
bottom: "fc6"
top: "fc7"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 4096
weight_filler {
type: "gaussian"
std: 0.005
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layer {
name: "relu7"
type: "ReLU"
bottom: "fc7"
top: "fc7"
}
layer {
name: "drop7"
type: "Dropout"
bottom: "fc7"
top: "fc7"
dropout_param {
dropout_ratio: 0.5
}
}
layer {
name: "fc8"
type: "InnerProduct"
bottom: "fc7"
top: "fc8"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 196
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "fc8"
bottom: "label"
top: "loss"
}
I0412 12:43:15.050464 6200 layer_factory.hpp:77] Creating layer train-data
I0412 12:43:15.070906 6200 db_lmdb.cpp:35] Opened lmdb /mnt/bigdisk/DIGITS-MAN-2/digits/jobs/20210407-214532-d396/train_db
I0412 12:43:15.092752 6200 net.cpp:84] Creating Layer train-data
I0412 12:43:15.092767 6200 net.cpp:380] train-data -> data
I0412 12:43:15.092788 6200 net.cpp:380] train-data -> label
I0412 12:43:15.092801 6200 data_transformer.cpp:25] Loading mean file from: /mnt/bigdisk/DIGITS-MAN-2/digits/jobs/20210407-214532-d396/mean.binaryproto
I0412 12:43:15.149091 6200 data_layer.cpp:45] output data size: 128,3,227,227
I0412 12:43:15.333086 6200 net.cpp:122] Setting up train-data
I0412 12:43:15.333112 6200 net.cpp:129] Top shape: 128 3 227 227 (19787136)
I0412 12:43:15.333118 6200 net.cpp:129] Top shape: 128 (128)
I0412 12:43:15.333122 6200 net.cpp:137] Memory required for data: 79149056
I0412 12:43:15.333132 6200 layer_factory.hpp:77] Creating layer conv1
I0412 12:43:15.333155 6200 net.cpp:84] Creating Layer conv1
I0412 12:43:15.333163 6200 net.cpp:406] conv1 <- data
I0412 12:43:15.333175 6200 net.cpp:380] conv1 -> conv1
I0412 12:43:15.910151 6200 net.cpp:122] Setting up conv1
I0412 12:43:15.910173 6200 net.cpp:129] Top shape: 128 96 55 55 (37171200)
I0412 12:43:15.910177 6200 net.cpp:137] Memory required for data: 227833856
I0412 12:43:15.910197 6200 layer_factory.hpp:77] Creating layer relu1
I0412 12:43:15.910207 6200 net.cpp:84] Creating Layer relu1
I0412 12:43:15.910233 6200 net.cpp:406] relu1 <- conv1
I0412 12:43:15.910239 6200 net.cpp:367] relu1 -> conv1 (in-place)
I0412 12:43:15.910578 6200 net.cpp:122] Setting up relu1
I0412 12:43:15.910586 6200 net.cpp:129] Top shape: 128 96 55 55 (37171200)
I0412 12:43:15.910590 6200 net.cpp:137] Memory required for data: 376518656
I0412 12:43:15.910594 6200 layer_factory.hpp:77] Creating layer norm1
I0412 12:43:15.910604 6200 net.cpp:84] Creating Layer norm1
I0412 12:43:15.910607 6200 net.cpp:406] norm1 <- conv1
I0412 12:43:15.910612 6200 net.cpp:380] norm1 -> norm1
I0412 12:43:15.911109 6200 net.cpp:122] Setting up norm1
I0412 12:43:15.911119 6200 net.cpp:129] Top shape: 128 96 55 55 (37171200)
I0412 12:43:15.911123 6200 net.cpp:137] Memory required for data: 525203456
I0412 12:43:15.911128 6200 layer_factory.hpp:77] Creating layer pool1
I0412 12:43:15.911135 6200 net.cpp:84] Creating Layer pool1
I0412 12:43:15.911139 6200 net.cpp:406] pool1 <- norm1
I0412 12:43:15.911145 6200 net.cpp:380] pool1 -> pool1
I0412 12:43:15.911183 6200 net.cpp:122] Setting up pool1
I0412 12:43:15.911190 6200 net.cpp:129] Top shape: 128 96 27 27 (8957952)
I0412 12:43:15.911195 6200 net.cpp:137] Memory required for data: 561035264
I0412 12:43:15.911197 6200 layer_factory.hpp:77] Creating layer conv2
I0412 12:43:15.911207 6200 net.cpp:84] Creating Layer conv2
I0412 12:43:15.911211 6200 net.cpp:406] conv2 <- pool1
I0412 12:43:15.911216 6200 net.cpp:380] conv2 -> conv2
I0412 12:43:15.918716 6200 net.cpp:122] Setting up conv2
I0412 12:43:15.918731 6200 net.cpp:129] Top shape: 128 256 27 27 (23887872)
I0412 12:43:15.918735 6200 net.cpp:137] Memory required for data: 656586752
I0412 12:43:15.918746 6200 layer_factory.hpp:77] Creating layer relu2
I0412 12:43:15.918754 6200 net.cpp:84] Creating Layer relu2
I0412 12:43:15.918759 6200 net.cpp:406] relu2 <- conv2
I0412 12:43:15.918764 6200 net.cpp:367] relu2 -> conv2 (in-place)
I0412 12:43:15.919384 6200 net.cpp:122] Setting up relu2
I0412 12:43:15.919394 6200 net.cpp:129] Top shape: 128 256 27 27 (23887872)
I0412 12:43:15.919399 6200 net.cpp:137] Memory required for data: 752138240
I0412 12:43:15.919402 6200 layer_factory.hpp:77] Creating layer norm2
I0412 12:43:15.919409 6200 net.cpp:84] Creating Layer norm2
I0412 12:43:15.919414 6200 net.cpp:406] norm2 <- conv2
I0412 12:43:15.919420 6200 net.cpp:380] norm2 -> norm2
I0412 12:43:15.919749 6200 net.cpp:122] Setting up norm2
I0412 12:43:15.919757 6200 net.cpp:129] Top shape: 128 256 27 27 (23887872)
I0412 12:43:15.919761 6200 net.cpp:137] Memory required for data: 847689728
I0412 12:43:15.919765 6200 layer_factory.hpp:77] Creating layer pool2
I0412 12:43:15.919773 6200 net.cpp:84] Creating Layer pool2
I0412 12:43:15.919777 6200 net.cpp:406] pool2 <- norm2
I0412 12:43:15.919782 6200 net.cpp:380] pool2 -> pool2
I0412 12:43:15.919811 6200 net.cpp:122] Setting up pool2
I0412 12:43:15.919817 6200 net.cpp:129] Top shape: 128 256 13 13 (5537792)
I0412 12:43:15.919821 6200 net.cpp:137] Memory required for data: 869840896
I0412 12:43:15.919823 6200 layer_factory.hpp:77] Creating layer conv3
I0412 12:43:15.919833 6200 net.cpp:84] Creating Layer conv3
I0412 12:43:15.919836 6200 net.cpp:406] conv3 <- pool2
I0412 12:43:15.919842 6200 net.cpp:380] conv3 -> conv3
I0412 12:43:15.930610 6200 net.cpp:122] Setting up conv3
I0412 12:43:15.930622 6200 net.cpp:129] Top shape: 128 384 13 13 (8306688)
I0412 12:43:15.930626 6200 net.cpp:137] Memory required for data: 903067648
I0412 12:43:15.930636 6200 layer_factory.hpp:77] Creating layer relu3
I0412 12:43:15.930642 6200 net.cpp:84] Creating Layer relu3
I0412 12:43:15.930646 6200 net.cpp:406] relu3 <- conv3
I0412 12:43:15.930651 6200 net.cpp:367] relu3 -> conv3 (in-place)
I0412 12:43:15.931288 6200 net.cpp:122] Setting up relu3
I0412 12:43:15.931298 6200 net.cpp:129] Top shape: 128 384 13 13 (8306688)
I0412 12:43:15.931303 6200 net.cpp:137] Memory required for data: 936294400
I0412 12:43:15.931306 6200 layer_factory.hpp:77] Creating layer conv4
I0412 12:43:15.931318 6200 net.cpp:84] Creating Layer conv4
I0412 12:43:15.931339 6200 net.cpp:406] conv4 <- conv3
I0412 12:43:15.931346 6200 net.cpp:380] conv4 -> conv4
I0412 12:43:15.942237 6200 net.cpp:122] Setting up conv4
I0412 12:43:15.942250 6200 net.cpp:129] Top shape: 128 384 13 13 (8306688)
I0412 12:43:15.942255 6200 net.cpp:137] Memory required for data: 969521152
I0412 12:43:15.942262 6200 layer_factory.hpp:77] Creating layer relu4
I0412 12:43:15.942270 6200 net.cpp:84] Creating Layer relu4
I0412 12:43:15.942273 6200 net.cpp:406] relu4 <- conv4
I0412 12:43:15.942278 6200 net.cpp:367] relu4 -> conv4 (in-place)
I0412 12:43:15.942585 6200 net.cpp:122] Setting up relu4
I0412 12:43:15.942595 6200 net.cpp:129] Top shape: 128 384 13 13 (8306688)
I0412 12:43:15.942597 6200 net.cpp:137] Memory required for data: 1002747904
I0412 12:43:15.942601 6200 layer_factory.hpp:77] Creating layer conv5
I0412 12:43:15.942610 6200 net.cpp:84] Creating Layer conv5
I0412 12:43:15.942615 6200 net.cpp:406] conv5 <- conv4
I0412 12:43:15.942620 6200 net.cpp:380] conv5 -> conv5
I0412 12:43:15.948632 6200 net.cpp:122] Setting up conv5
I0412 12:43:15.948644 6200 net.cpp:129] Top shape: 128 128 13 13 (2768896)
I0412 12:43:15.948648 6200 net.cpp:137] Memory required for data: 1013823488
I0412 12:43:15.948660 6200 layer_factory.hpp:77] Creating layer relu5
I0412 12:43:15.948668 6200 net.cpp:84] Creating Layer relu5
I0412 12:43:15.948671 6200 net.cpp:406] relu5 <- conv5
I0412 12:43:15.948676 6200 net.cpp:367] relu5 -> conv5 (in-place)
I0412 12:43:15.949141 6200 net.cpp:122] Setting up relu5
I0412 12:43:15.949152 6200 net.cpp:129] Top shape: 128 128 13 13 (2768896)
I0412 12:43:15.949156 6200 net.cpp:137] Memory required for data: 1024899072
I0412 12:43:15.949160 6200 layer_factory.hpp:77] Creating layer conv5.2
I0412 12:43:15.949169 6200 net.cpp:84] Creating Layer conv5.2
I0412 12:43:15.949173 6200 net.cpp:406] conv5.2 <- conv5
I0412 12:43:15.949179 6200 net.cpp:380] conv5.2 -> conv5.2
I0412 12:43:15.952937 6200 net.cpp:122] Setting up conv5.2
I0412 12:43:15.952948 6200 net.cpp:129] Top shape: 128 128 13 13 (2768896)
I0412 12:43:15.952952 6200 net.cpp:137] Memory required for data: 1035974656
I0412 12:43:15.952960 6200 layer_factory.hpp:77] Creating layer relu5.2
I0412 12:43:15.952967 6200 net.cpp:84] Creating Layer relu5.2
I0412 12:43:15.952971 6200 net.cpp:406] relu5.2 <- conv5.2
I0412 12:43:15.952977 6200 net.cpp:367] relu5.2 -> conv5.2 (in-place)
I0412 12:43:15.953434 6200 net.cpp:122] Setting up relu5.2
I0412 12:43:15.953444 6200 net.cpp:129] Top shape: 128 128 13 13 (2768896)
I0412 12:43:15.953449 6200 net.cpp:137] Memory required for data: 1047050240
I0412 12:43:15.953452 6200 layer_factory.hpp:77] Creating layer pool5
I0412 12:43:15.953460 6200 net.cpp:84] Creating Layer pool5
I0412 12:43:15.953464 6200 net.cpp:406] pool5 <- conv5.2
I0412 12:43:15.953469 6200 net.cpp:380] pool5 -> pool5
I0412 12:43:15.953507 6200 net.cpp:122] Setting up pool5
I0412 12:43:15.953513 6200 net.cpp:129] Top shape: 128 128 6 6 (589824)
I0412 12:43:15.953516 6200 net.cpp:137] Memory required for data: 1049409536
I0412 12:43:15.953521 6200 layer_factory.hpp:77] Creating layer fc6
I0412 12:43:15.953527 6200 net.cpp:84] Creating Layer fc6
I0412 12:43:15.953531 6200 net.cpp:406] fc6 <- pool5
I0412 12:43:15.953536 6200 net.cpp:380] fc6 -> fc6
I0412 12:43:16.149281 6200 net.cpp:122] Setting up fc6
I0412 12:43:16.149302 6200 net.cpp:129] Top shape: 128 4096 (524288)
I0412 12:43:16.149305 6200 net.cpp:137] Memory required for data: 1051506688
I0412 12:43:16.149315 6200 layer_factory.hpp:77] Creating layer relu6
I0412 12:43:16.149324 6200 net.cpp:84] Creating Layer relu6
I0412 12:43:16.149328 6200 net.cpp:406] relu6 <- fc6
I0412 12:43:16.149335 6200 net.cpp:367] relu6 -> fc6 (in-place)
I0412 12:43:16.149691 6200 net.cpp:122] Setting up relu6
I0412 12:43:16.149699 6200 net.cpp:129] Top shape: 128 4096 (524288)
I0412 12:43:16.149703 6200 net.cpp:137] Memory required for data: 1053603840
I0412 12:43:16.149706 6200 layer_factory.hpp:77] Creating layer drop6
I0412 12:43:16.149732 6200 net.cpp:84] Creating Layer drop6
I0412 12:43:16.149736 6200 net.cpp:406] drop6 <- fc6
I0412 12:43:16.149741 6200 net.cpp:367] drop6 -> fc6 (in-place)
I0412 12:43:16.149770 6200 net.cpp:122] Setting up drop6
I0412 12:43:16.149775 6200 net.cpp:129] Top shape: 128 4096 (524288)
I0412 12:43:16.149778 6200 net.cpp:137] Memory required for data: 1055700992
I0412 12:43:16.149781 6200 layer_factory.hpp:77] Creating layer fc7
I0412 12:43:16.149788 6200 net.cpp:84] Creating Layer fc7
I0412 12:43:16.149791 6200 net.cpp:406] fc7 <- fc6
I0412 12:43:16.149797 6200 net.cpp:380] fc7 -> fc7
I0412 12:43:16.309814 6200 net.cpp:122] Setting up fc7
I0412 12:43:16.309834 6200 net.cpp:129] Top shape: 128 4096 (524288)
I0412 12:43:16.309837 6200 net.cpp:137] Memory required for data: 1057798144
I0412 12:43:16.309846 6200 layer_factory.hpp:77] Creating layer relu7
I0412 12:43:16.309854 6200 net.cpp:84] Creating Layer relu7
I0412 12:43:16.309859 6200 net.cpp:406] relu7 <- fc7
I0412 12:43:16.309865 6200 net.cpp:367] relu7 -> fc7 (in-place)
I0412 12:43:16.311455 6200 net.cpp:122] Setting up relu7
I0412 12:43:16.311465 6200 net.cpp:129] Top shape: 128 4096 (524288)
I0412 12:43:16.311468 6200 net.cpp:137] Memory required for data: 1059895296
I0412 12:43:16.311472 6200 layer_factory.hpp:77] Creating layer drop7
I0412 12:43:16.311480 6200 net.cpp:84] Creating Layer drop7
I0412 12:43:16.311483 6200 net.cpp:406] drop7 <- fc7
I0412 12:43:16.311488 6200 net.cpp:367] drop7 -> fc7 (in-place)
I0412 12:43:16.311511 6200 net.cpp:122] Setting up drop7
I0412 12:43:16.311517 6200 net.cpp:129] Top shape: 128 4096 (524288)
I0412 12:43:16.311519 6200 net.cpp:137] Memory required for data: 1061992448
I0412 12:43:16.311523 6200 layer_factory.hpp:77] Creating layer fc8
I0412 12:43:16.311529 6200 net.cpp:84] Creating Layer fc8
I0412 12:43:16.311533 6200 net.cpp:406] fc8 <- fc7
I0412 12:43:16.311538 6200 net.cpp:380] fc8 -> fc8
I0412 12:43:16.319265 6200 net.cpp:122] Setting up fc8
I0412 12:43:16.319274 6200 net.cpp:129] Top shape: 128 196 (25088)
I0412 12:43:16.319278 6200 net.cpp:137] Memory required for data: 1062092800
I0412 12:43:16.319288 6200 layer_factory.hpp:77] Creating layer loss
I0412 12:43:16.319295 6200 net.cpp:84] Creating Layer loss
I0412 12:43:16.319299 6200 net.cpp:406] loss <- fc8
I0412 12:43:16.319303 6200 net.cpp:406] loss <- label
I0412 12:43:16.319309 6200 net.cpp:380] loss -> loss
I0412 12:43:16.319319 6200 layer_factory.hpp:77] Creating layer loss
I0412 12:43:16.319839 6200 net.cpp:122] Setting up loss
I0412 12:43:16.319847 6200 net.cpp:129] Top shape: (1)
I0412 12:43:16.319850 6200 net.cpp:132] with loss weight 1
I0412 12:43:16.319869 6200 net.cpp:137] Memory required for data: 1062092804
I0412 12:43:16.319872 6200 net.cpp:198] loss needs backward computation.
I0412 12:43:16.319880 6200 net.cpp:198] fc8 needs backward computation.
I0412 12:43:16.319882 6200 net.cpp:198] drop7 needs backward computation.
I0412 12:43:16.319885 6200 net.cpp:198] relu7 needs backward computation.
I0412 12:43:16.319888 6200 net.cpp:198] fc7 needs backward computation.
I0412 12:43:16.319892 6200 net.cpp:198] drop6 needs backward computation.
I0412 12:43:16.319895 6200 net.cpp:198] relu6 needs backward computation.
I0412 12:43:16.319898 6200 net.cpp:198] fc6 needs backward computation.
I0412 12:43:16.319902 6200 net.cpp:198] pool5 needs backward computation.
I0412 12:43:16.319905 6200 net.cpp:198] relu5.2 needs backward computation.
I0412 12:43:16.319909 6200 net.cpp:198] conv5.2 needs backward computation.
I0412 12:43:16.319912 6200 net.cpp:198] relu5 needs backward computation.
I0412 12:43:16.319916 6200 net.cpp:198] conv5 needs backward computation.
I0412 12:43:16.319919 6200 net.cpp:198] relu4 needs backward computation.
I0412 12:43:16.319923 6200 net.cpp:198] conv4 needs backward computation.
I0412 12:43:16.319926 6200 net.cpp:198] relu3 needs backward computation.
I0412 12:43:16.319929 6200 net.cpp:198] conv3 needs backward computation.
I0412 12:43:16.319933 6200 net.cpp:198] pool2 needs backward computation.
I0412 12:43:16.319955 6200 net.cpp:198] norm2 needs backward computation.
I0412 12:43:16.319959 6200 net.cpp:198] relu2 needs backward computation.
I0412 12:43:16.319962 6200 net.cpp:198] conv2 needs backward computation.
I0412 12:43:16.319967 6200 net.cpp:198] pool1 needs backward computation.
I0412 12:43:16.319969 6200 net.cpp:198] norm1 needs backward computation.
I0412 12:43:16.319973 6200 net.cpp:198] relu1 needs backward computation.
I0412 12:43:16.319977 6200 net.cpp:198] conv1 needs backward computation.
I0412 12:43:16.319980 6200 net.cpp:200] train-data does not need backward computation.
I0412 12:43:16.319983 6200 net.cpp:242] This network produces output loss
I0412 12:43:16.319996 6200 net.cpp:255] Network initialization done.
I0412 12:43:16.320430 6200 solver.cpp:172] Creating test net (#0) specified by net file: train_val.prototxt
I0412 12:43:16.320461 6200 net.cpp:294] The NetState phase (1) differed from the phase (0) specified by a rule in layer train-data
I0412 12:43:16.320607 6200 net.cpp:51] Initializing net from parameters:
state {
phase: TEST
}
layer {
name: "val-data"
type: "Data"
top: "data"
top: "label"
include {
phase: TEST
}
transform_param {
crop_size: 227
mean_file: "/mnt/bigdisk/DIGITS-MAN-2/digits/jobs/20210407-214532-d396/mean.binaryproto"
}
data_param {
source: "/mnt/bigdisk/DIGITS-MAN-2/digits/jobs/20210407-214532-d396/val_db"
batch_size: 32
backend: LMDB
}
}
layer {
name: "conv1"
type: "Convolution"
bottom: "data"
top: "conv1"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 96
kernel_size: 11
stride: 4
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "relu1"
type: "ReLU"
bottom: "conv1"
top: "conv1"
}
layer {
name: "norm1"
type: "LRN"
bottom: "conv1"
top: "norm1"
lrn_param {
local_size: 5
alpha: 0.0001
beta: 0.75
}
}
layer {
name: "pool1"
type: "Pooling"
bottom: "norm1"
top: "pool1"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layer {
name: "conv2"
type: "Convolution"
bottom: "pool1"
top: "conv2"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 256
pad: 2
kernel_size: 5
group: 2
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layer {
name: "relu2"
type: "ReLU"
bottom: "conv2"
top: "conv2"
}
layer {
name: "norm2"
type: "LRN"
bottom: "conv2"
top: "norm2"
lrn_param {
local_size: 5
alpha: 0.0001
beta: 0.75
}
}
layer {
name: "pool2"
type: "Pooling"
bottom: "norm2"
top: "pool2"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layer {
name: "conv3"
type: "Convolution"
bottom: "pool2"
top: "conv3"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 384
pad: 1
kernel_size: 3
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "relu3"
type: "ReLU"
bottom: "conv3"
top: "conv3"
}
layer {
name: "conv4"
type: "Convolution"
bottom: "conv3"
top: "conv4"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 384
pad: 1
kernel_size: 3
group: 2
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layer {
name: "relu4"
type: "ReLU"
bottom: "conv4"
top: "conv4"
}
layer {
name: "conv5"
type: "Convolution"
bottom: "conv4"
top: "conv5"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 128
pad: 1
kernel_size: 3
group: 2
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layer {
name: "relu5"
type: "ReLU"
bottom: "conv5"
top: "conv5"
}
layer {
name: "conv5.2"
type: "Convolution"
bottom: "conv5"
top: "conv5.2"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 128
pad: 1
kernel_size: 3
group: 2
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layer {
name: "relu5.2"
type: "ReLU"
bottom: "conv5.2"
top: "conv5.2"
}
layer {
name: "pool5"
type: "Pooling"
bottom: "conv5.2"
top: "pool5"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layer {
name: "fc6"
type: "InnerProduct"
bottom: "pool5"
top: "fc6"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 4096
weight_filler {
type: "gaussian"
std: 0.005
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layer {
name: "relu6"
type: "ReLU"
bottom: "fc6"
top: "fc6"
}
layer {
name: "drop6"
type: "Dropout"
bottom: "fc6"
top: "fc6"
dropout_param {
dropout_ratio: 0.5
}
}
layer {
name: "fc7"
type: "InnerProduct"
bottom: "fc6"
top: "fc7"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 4096
weight_filler {
type: "gaussian"
std: 0.005
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layer {
name: "relu7"
type: "ReLU"
bottom: "fc7"
top: "fc7"
}
layer {
name: "drop7"
type: "Dropout"
bottom: "fc7"
top: "fc7"
dropout_param {
dropout_ratio: 0.5
}
}
layer {
name: "fc8"
type: "InnerProduct"
bottom: "fc7"
top: "fc8"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 196
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "accuracy"
type: "Accuracy"
bottom: "fc8"
bottom: "label"
top: "accuracy"
include {
phase: TEST
}
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "fc8"
bottom: "label"
top: "loss"
}
I0412 12:43:16.320694 6200 layer_factory.hpp:77] Creating layer val-data
I0412 12:43:16.353837 6200 db_lmdb.cpp:35] Opened lmdb /mnt/bigdisk/DIGITS-MAN-2/digits/jobs/20210407-214532-d396/val_db
I0412 12:43:16.402576 6200 net.cpp:84] Creating Layer val-data
I0412 12:43:16.402595 6200 net.cpp:380] val-data -> data
I0412 12:43:16.402609 6200 net.cpp:380] val-data -> label
I0412 12:43:16.402621 6200 data_transformer.cpp:25] Loading mean file from: /mnt/bigdisk/DIGITS-MAN-2/digits/jobs/20210407-214532-d396/mean.binaryproto
I0412 12:43:16.410495 6200 data_layer.cpp:45] output data size: 32,3,227,227
I0412 12:43:16.457258 6200 net.cpp:122] Setting up val-data
I0412 12:43:16.457280 6200 net.cpp:129] Top shape: 32 3 227 227 (4946784)
I0412 12:43:16.457286 6200 net.cpp:129] Top shape: 32 (32)
I0412 12:43:16.457290 6200 net.cpp:137] Memory required for data: 19787264
I0412 12:43:16.457298 6200 layer_factory.hpp:77] Creating layer label_val-data_1_split
I0412 12:43:16.457311 6200 net.cpp:84] Creating Layer label_val-data_1_split
I0412 12:43:16.457316 6200 net.cpp:406] label_val-data_1_split <- label
I0412 12:43:16.457324 6200 net.cpp:380] label_val-data_1_split -> label_val-data_1_split_0
I0412 12:43:16.457334 6200 net.cpp:380] label_val-data_1_split -> label_val-data_1_split_1
I0412 12:43:16.457393 6200 net.cpp:122] Setting up label_val-data_1_split
I0412 12:43:16.457399 6200 net.cpp:129] Top shape: 32 (32)
I0412 12:43:16.457404 6200 net.cpp:129] Top shape: 32 (32)
I0412 12:43:16.457427 6200 net.cpp:137] Memory required for data: 19787520
I0412 12:43:16.457432 6200 layer_factory.hpp:77] Creating layer conv1
I0412 12:43:16.457445 6200 net.cpp:84] Creating Layer conv1
I0412 12:43:16.457449 6200 net.cpp:406] conv1 <- data
I0412 12:43:16.457455 6200 net.cpp:380] conv1 -> conv1
I0412 12:43:16.459827 6200 net.cpp:122] Setting up conv1
I0412 12:43:16.459841 6200 net.cpp:129] Top shape: 32 96 55 55 (9292800)
I0412 12:43:16.459844 6200 net.cpp:137] Memory required for data: 56958720
I0412 12:43:16.459856 6200 layer_factory.hpp:77] Creating layer relu1
I0412 12:43:16.459863 6200 net.cpp:84] Creating Layer relu1
I0412 12:43:16.459867 6200 net.cpp:406] relu1 <- conv1
I0412 12:43:16.459873 6200 net.cpp:367] relu1 -> conv1 (in-place)
I0412 12:43:16.460402 6200 net.cpp:122] Setting up relu1
I0412 12:43:16.460412 6200 net.cpp:129] Top shape: 32 96 55 55 (9292800)
I0412 12:43:16.460417 6200 net.cpp:137] Memory required for data: 94129920
I0412 12:43:16.460420 6200 layer_factory.hpp:77] Creating layer norm1
I0412 12:43:16.460430 6200 net.cpp:84] Creating Layer norm1
I0412 12:43:16.460434 6200 net.cpp:406] norm1 <- conv1
I0412 12:43:16.460440 6200 net.cpp:380] norm1 -> norm1
I0412 12:43:16.460965 6200 net.cpp:122] Setting up norm1
I0412 12:43:16.460976 6200 net.cpp:129] Top shape: 32 96 55 55 (9292800)
I0412 12:43:16.460980 6200 net.cpp:137] Memory required for data: 131301120
I0412 12:43:16.460984 6200 layer_factory.hpp:77] Creating layer pool1
I0412 12:43:16.460992 6200 net.cpp:84] Creating Layer pool1
I0412 12:43:16.460996 6200 net.cpp:406] pool1 <- norm1
I0412 12:43:16.461002 6200 net.cpp:380] pool1 -> pool1
I0412 12:43:16.461038 6200 net.cpp:122] Setting up pool1
I0412 12:43:16.461045 6200 net.cpp:129] Top shape: 32 96 27 27 (2239488)
I0412 12:43:16.461048 6200 net.cpp:137] Memory required for data: 140259072
I0412 12:43:16.461051 6200 layer_factory.hpp:77] Creating layer conv2
I0412 12:43:16.461061 6200 net.cpp:84] Creating Layer conv2
I0412 12:43:16.461066 6200 net.cpp:406] conv2 <- pool1
I0412 12:43:16.461071 6200 net.cpp:380] conv2 -> conv2
I0412 12:43:16.470021 6200 net.cpp:122] Setting up conv2
I0412 12:43:16.470036 6200 net.cpp:129] Top shape: 32 256 27 27 (5971968)
I0412 12:43:16.470041 6200 net.cpp:137] Memory required for data: 164146944
I0412 12:43:16.470052 6200 layer_factory.hpp:77] Creating layer relu2
I0412 12:43:16.470059 6200 net.cpp:84] Creating Layer relu2
I0412 12:43:16.470063 6200 net.cpp:406] relu2 <- conv2
I0412 12:43:16.470069 6200 net.cpp:367] relu2 -> conv2 (in-place)
I0412 12:43:16.470405 6200 net.cpp:122] Setting up relu2
I0412 12:43:16.470425 6200 net.cpp:129] Top shape: 32 256 27 27 (5971968)
I0412 12:43:16.470429 6200 net.cpp:137] Memory required for data: 188034816
I0412 12:43:16.470433 6200 layer_factory.hpp:77] Creating layer norm2
I0412 12:43:16.470443 6200 net.cpp:84] Creating Layer norm2
I0412 12:43:16.470446 6200 net.cpp:406] norm2 <- conv2
I0412 12:43:16.470453 6200 net.cpp:380] norm2 -> norm2
I0412 12:43:16.470968 6200 net.cpp:122] Setting up norm2
I0412 12:43:16.470978 6200 net.cpp:129] Top shape: 32 256 27 27 (5971968)
I0412 12:43:16.470983 6200 net.cpp:137] Memory required for data: 211922688
I0412 12:43:16.470988 6200 layer_factory.hpp:77] Creating layer pool2
I0412 12:43:16.470994 6200 net.cpp:84] Creating Layer pool2
I0412 12:43:16.470999 6200 net.cpp:406] pool2 <- norm2
I0412 12:43:16.471004 6200 net.cpp:380] pool2 -> pool2
I0412 12:43:16.471037 6200 net.cpp:122] Setting up pool2
I0412 12:43:16.471043 6200 net.cpp:129] Top shape: 32 256 13 13 (1384448)
I0412 12:43:16.471046 6200 net.cpp:137] Memory required for data: 217460480
I0412 12:43:16.471050 6200 layer_factory.hpp:77] Creating layer conv3
I0412 12:43:16.471060 6200 net.cpp:84] Creating Layer conv3
I0412 12:43:16.471063 6200 net.cpp:406] conv3 <- pool2
I0412 12:43:16.471068 6200 net.cpp:380] conv3 -> conv3
I0412 12:43:16.482520 6200 net.cpp:122] Setting up conv3
I0412 12:43:16.482533 6200 net.cpp:129] Top shape: 32 384 13 13 (2076672)
I0412 12:43:16.482555 6200 net.cpp:137] Memory required for data: 225767168
I0412 12:43:16.482566 6200 layer_factory.hpp:77] Creating layer relu3
I0412 12:43:16.482573 6200 net.cpp:84] Creating Layer relu3
I0412 12:43:16.482578 6200 net.cpp:406] relu3 <- conv3
I0412 12:43:16.482583 6200 net.cpp:367] relu3 -> conv3 (in-place)
I0412 12:43:16.482904 6200 net.cpp:122] Setting up relu3
I0412 12:43:16.482913 6200 net.cpp:129] Top shape: 32 384 13 13 (2076672)
I0412 12:43:16.482918 6200 net.cpp:137] Memory required for data: 234073856
I0412 12:43:16.482921 6200 layer_factory.hpp:77] Creating layer conv4
I0412 12:43:16.482930 6200 net.cpp:84] Creating Layer conv4
I0412 12:43:16.482934 6200 net.cpp:406] conv4 <- conv3
I0412 12:43:16.482940 6200 net.cpp:380] conv4 -> conv4
I0412 12:43:16.494225 6200 net.cpp:122] Setting up conv4
I0412 12:43:16.494238 6200 net.cpp:129] Top shape: 32 384 13 13 (2076672)
I0412 12:43:16.494242 6200 net.cpp:137] Memory required for data: 242380544
I0412 12:43:16.494251 6200 layer_factory.hpp:77] Creating layer relu4
I0412 12:43:16.494259 6200 net.cpp:84] Creating Layer relu4
I0412 12:43:16.494263 6200 net.cpp:406] relu4 <- conv4
I0412 12:43:16.494269 6200 net.cpp:367] relu4 -> conv4 (in-place)
I0412 12:43:16.494733 6200 net.cpp:122] Setting up relu4
I0412 12:43:16.494743 6200 net.cpp:129] Top shape: 32 384 13 13 (2076672)
I0412 12:43:16.494747 6200 net.cpp:137] Memory required for data: 250687232
I0412 12:43:16.494751 6200 layer_factory.hpp:77] Creating layer conv5
I0412 12:43:16.494760 6200 net.cpp:84] Creating Layer conv5
I0412 12:43:16.494765 6200 net.cpp:406] conv5 <- conv4
I0412 12:43:16.494771 6200 net.cpp:380] conv5 -> conv5
I0412 12:43:16.501685 6200 net.cpp:122] Setting up conv5
I0412 12:43:16.501698 6200 net.cpp:129] Top shape: 32 128 13 13 (692224)
I0412 12:43:16.501703 6200 net.cpp:137] Memory required for data: 253456128
I0412 12:43:16.501713 6200 layer_factory.hpp:77] Creating layer relu5
I0412 12:43:16.501721 6200 net.cpp:84] Creating Layer relu5
I0412 12:43:16.501725 6200 net.cpp:406] relu5 <- conv5
I0412 12:43:16.501732 6200 net.cpp:367] relu5 -> conv5 (in-place)
I0412 12:43:16.502280 6200 net.cpp:122] Setting up relu5
I0412 12:43:16.502290 6200 net.cpp:129] Top shape: 32 128 13 13 (692224)
I0412 12:43:16.502295 6200 net.cpp:137] Memory required for data: 256225024
I0412 12:43:16.502297 6200 layer_factory.hpp:77] Creating layer conv5.2
I0412 12:43:16.502312 6200 net.cpp:84] Creating Layer conv5.2
I0412 12:43:16.502316 6200 net.cpp:406] conv5.2 <- conv5
I0412 12:43:16.502323 6200 net.cpp:380] conv5.2 -> conv5.2
I0412 12:43:16.506263 6200 net.cpp:122] Setting up conv5.2
I0412 12:43:16.506274 6200 net.cpp:129] Top shape: 32 128 13 13 (692224)
I0412 12:43:16.506278 6200 net.cpp:137] Memory required for data: 258993920
I0412 12:43:16.506285 6200 layer_factory.hpp:77] Creating layer relu5.2
I0412 12:43:16.506294 6200 net.cpp:84] Creating Layer relu5.2
I0412 12:43:16.506297 6200 net.cpp:406] relu5.2 <- conv5.2
I0412 12:43:16.506302 6200 net.cpp:367] relu5.2 -> conv5.2 (in-place)
I0412 12:43:16.506839 6200 net.cpp:122] Setting up relu5.2
I0412 12:43:16.506848 6200 net.cpp:129] Top shape: 32 128 13 13 (692224)
I0412 12:43:16.506852 6200 net.cpp:137] Memory required for data: 261762816
I0412 12:43:16.506856 6200 layer_factory.hpp:77] Creating layer pool5
I0412 12:43:16.506863 6200 net.cpp:84] Creating Layer pool5
I0412 12:43:16.506867 6200 net.cpp:406] pool5 <- conv5.2
I0412 12:43:16.506873 6200 net.cpp:380] pool5 -> pool5
I0412 12:43:16.506912 6200 net.cpp:122] Setting up pool5
I0412 12:43:16.506919 6200 net.cpp:129] Top shape: 32 128 6 6 (147456)
I0412 12:43:16.506923 6200 net.cpp:137] Memory required for data: 262352640
I0412 12:43:16.506927 6200 layer_factory.hpp:77] Creating layer fc6
I0412 12:43:16.506933 6200 net.cpp:84] Creating Layer fc6
I0412 12:43:16.506937 6200 net.cpp:406] fc6 <- pool5
I0412 12:43:16.506943 6200 net.cpp:380] fc6 -> fc6
I0412 12:43:16.684170 6200 net.cpp:122] Setting up fc6
I0412 12:43:16.684190 6200 net.cpp:129] Top shape: 32 4096 (131072)
I0412 12:43:16.684212 6200 net.cpp:137] Memory required for data: 262876928
I0412 12:43:16.684221 6200 layer_factory.hpp:77] Creating layer relu6
I0412 12:43:16.684231 6200 net.cpp:84] Creating Layer relu6
I0412 12:43:16.684234 6200 net.cpp:406] relu6 <- fc6
I0412 12:43:16.684240 6200 net.cpp:367] relu6 -> fc6 (in-place)
I0412 12:43:16.686084 6200 net.cpp:122] Setting up relu6
I0412 12:43:16.686095 6200 net.cpp:129] Top shape: 32 4096 (131072)
I0412 12:43:16.686098 6200 net.cpp:137] Memory required for data: 263401216
I0412 12:43:16.686103 6200 layer_factory.hpp:77] Creating layer drop6
I0412 12:43:16.686111 6200 net.cpp:84] Creating Layer drop6
I0412 12:43:16.686115 6200 net.cpp:406] drop6 <- fc6
I0412 12:43:16.686122 6200 net.cpp:367] drop6 -> fc6 (in-place)
I0412 12:43:16.686149 6200 net.cpp:122] Setting up drop6
I0412 12:43:16.686154 6200 net.cpp:129] Top shape: 32 4096 (131072)
I0412 12:43:16.686157 6200 net.cpp:137] Memory required for data: 263925504
I0412 12:43:16.686161 6200 layer_factory.hpp:77] Creating layer fc7
I0412 12:43:16.686169 6200 net.cpp:84] Creating Layer fc7
I0412 12:43:16.686172 6200 net.cpp:406] fc7 <- fc6
I0412 12:43:16.686178 6200 net.cpp:380] fc7 -> fc7
I0412 12:43:16.843058 6200 net.cpp:122] Setting up fc7
I0412 12:43:16.843080 6200 net.cpp:129] Top shape: 32 4096 (131072)
I0412 12:43:16.843082 6200 net.cpp:137] Memory required for data: 264449792
I0412 12:43:16.843091 6200 layer_factory.hpp:77] Creating layer relu7
I0412 12:43:16.843099 6200 net.cpp:84] Creating Layer relu7
I0412 12:43:16.843104 6200 net.cpp:406] relu7 <- fc7
I0412 12:43:16.843112 6200 net.cpp:367] relu7 -> fc7 (in-place)
I0412 12:43:16.843772 6200 net.cpp:122] Setting up relu7
I0412 12:43:16.843781 6200 net.cpp:129] Top shape: 32 4096 (131072)
I0412 12:43:16.843784 6200 net.cpp:137] Memory required for data: 264974080
I0412 12:43:16.843788 6200 layer_factory.hpp:77] Creating layer drop7
I0412 12:43:16.843796 6200 net.cpp:84] Creating Layer drop7
I0412 12:43:16.843799 6200 net.cpp:406] drop7 <- fc7
I0412 12:43:16.843804 6200 net.cpp:367] drop7 -> fc7 (in-place)
I0412 12:43:16.843830 6200 net.cpp:122] Setting up drop7
I0412 12:43:16.843837 6200 net.cpp:129] Top shape: 32 4096 (131072)
I0412 12:43:16.843839 6200 net.cpp:137] Memory required for data: 265498368
I0412 12:43:16.843842 6200 layer_factory.hpp:77] Creating layer fc8
I0412 12:43:16.843849 6200 net.cpp:84] Creating Layer fc8
I0412 12:43:16.843852 6200 net.cpp:406] fc8 <- fc7
I0412 12:43:16.843858 6200 net.cpp:380] fc8 -> fc8
I0412 12:43:16.851505 6200 net.cpp:122] Setting up fc8
I0412 12:43:16.851514 6200 net.cpp:129] Top shape: 32 196 (6272)
I0412 12:43:16.851517 6200 net.cpp:137] Memory required for data: 265523456
I0412 12:43:16.851527 6200 layer_factory.hpp:77] Creating layer fc8_fc8_0_split
I0412 12:43:16.851536 6200 net.cpp:84] Creating Layer fc8_fc8_0_split
I0412 12:43:16.851539 6200 net.cpp:406] fc8_fc8_0_split <- fc8
I0412 12:43:16.851544 6200 net.cpp:380] fc8_fc8_0_split -> fc8_fc8_0_split_0
I0412 12:43:16.851550 6200 net.cpp:380] fc8_fc8_0_split -> fc8_fc8_0_split_1
I0412 12:43:16.851583 6200 net.cpp:122] Setting up fc8_fc8_0_split
I0412 12:43:16.851588 6200 net.cpp:129] Top shape: 32 196 (6272)
I0412 12:43:16.851591 6200 net.cpp:129] Top shape: 32 196 (6272)
I0412 12:43:16.851594 6200 net.cpp:137] Memory required for data: 265573632
I0412 12:43:16.851598 6200 layer_factory.hpp:77] Creating layer accuracy
I0412 12:43:16.851604 6200 net.cpp:84] Creating Layer accuracy
I0412 12:43:16.851608 6200 net.cpp:406] accuracy <- fc8_fc8_0_split_0
I0412 12:43:16.851611 6200 net.cpp:406] accuracy <- label_val-data_1_split_0
I0412 12:43:16.851616 6200 net.cpp:380] accuracy -> accuracy
I0412 12:43:16.851624 6200 net.cpp:122] Setting up accuracy
I0412 12:43:16.851627 6200 net.cpp:129] Top shape: (1)
I0412 12:43:16.851630 6200 net.cpp:137] Memory required for data: 265573636
I0412 12:43:16.851634 6200 layer_factory.hpp:77] Creating layer loss
I0412 12:43:16.851639 6200 net.cpp:84] Creating Layer loss
I0412 12:43:16.851660 6200 net.cpp:406] loss <- fc8_fc8_0_split_1
I0412 12:43:16.851663 6200 net.cpp:406] loss <- label_val-data_1_split_1
I0412 12:43:16.851670 6200 net.cpp:380] loss -> loss
I0412 12:43:16.851676 6200 layer_factory.hpp:77] Creating layer loss
I0412 12:43:16.852113 6200 net.cpp:122] Setting up loss
I0412 12:43:16.852121 6200 net.cpp:129] Top shape: (1)
I0412 12:43:16.852124 6200 net.cpp:132] with loss weight 1
I0412 12:43:16.852134 6200 net.cpp:137] Memory required for data: 265573640
I0412 12:43:16.852138 6200 net.cpp:198] loss needs backward computation.
I0412 12:43:16.852142 6200 net.cpp:200] accuracy does not need backward computation.
I0412 12:43:16.852146 6200 net.cpp:198] fc8_fc8_0_split needs backward computation.
I0412 12:43:16.852149 6200 net.cpp:198] fc8 needs backward computation.
I0412 12:43:16.852154 6200 net.cpp:198] drop7 needs backward computation.
I0412 12:43:16.852156 6200 net.cpp:198] relu7 needs backward computation.
I0412 12:43:16.852159 6200 net.cpp:198] fc7 needs backward computation.
I0412 12:43:16.852162 6200 net.cpp:198] drop6 needs backward computation.
I0412 12:43:16.852165 6200 net.cpp:198] relu6 needs backward computation.
I0412 12:43:16.852169 6200 net.cpp:198] fc6 needs backward computation.
I0412 12:43:16.852172 6200 net.cpp:198] pool5 needs backward computation.
I0412 12:43:16.852176 6200 net.cpp:198] relu5.2 needs backward computation.
I0412 12:43:16.852180 6200 net.cpp:198] conv5.2 needs backward computation.
I0412 12:43:16.852185 6200 net.cpp:198] relu5 needs backward computation.
I0412 12:43:16.852187 6200 net.cpp:198] conv5 needs backward computation.
I0412 12:43:16.852191 6200 net.cpp:198] relu4 needs backward computation.
I0412 12:43:16.852195 6200 net.cpp:198] conv4 needs backward computation.
I0412 12:43:16.852197 6200 net.cpp:198] relu3 needs backward computation.
I0412 12:43:16.852201 6200 net.cpp:198] conv3 needs backward computation.
I0412 12:43:16.852205 6200 net.cpp:198] pool2 needs backward computation.
I0412 12:43:16.852208 6200 net.cpp:198] norm2 needs backward computation.
I0412 12:43:16.852211 6200 net.cpp:198] relu2 needs backward computation.
I0412 12:43:16.852214 6200 net.cpp:198] conv2 needs backward computation.
I0412 12:43:16.852218 6200 net.cpp:198] pool1 needs backward computation.
I0412 12:43:16.852221 6200 net.cpp:198] norm1 needs backward computation.
I0412 12:43:16.852226 6200 net.cpp:198] relu1 needs backward computation.
I0412 12:43:16.852228 6200 net.cpp:198] conv1 needs backward computation.
I0412 12:43:16.852232 6200 net.cpp:200] label_val-data_1_split does not need backward computation.
I0412 12:43:16.852236 6200 net.cpp:200] val-data does not need backward computation.
I0412 12:43:16.852239 6200 net.cpp:242] This network produces output accuracy
I0412 12:43:16.852243 6200 net.cpp:242] This network produces output loss
I0412 12:43:16.852259 6200 net.cpp:255] Network initialization done.
I0412 12:43:16.852334 6200 solver.cpp:56] Solver scaffolding done.
I0412 12:43:16.852816 6200 caffe.cpp:248] Starting Optimization
I0412 12:43:16.852824 6200 solver.cpp:272] Solving
I0412 12:43:16.852828 6200 solver.cpp:273] Learning Rate Policy: exp
I0412 12:43:16.854113 6200 solver.cpp:330] Iteration 0, Testing net (#0)
I0412 12:43:16.854122 6200 net.cpp:676] Ignoring source layer train-data
I0412 12:43:16.908701 6200 blocking_queue.cpp:49] Waiting for data
I0412 12:43:21.233322 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 12:43:21.277779 6200 solver.cpp:397] Test net output #0: accuracy = 0.00919118
I0412 12:43:21.277822 6200 solver.cpp:397] Test net output #1: loss = 5.27962 (* 1 = 5.27962 loss)
I0412 12:43:21.367172 6200 solver.cpp:218] Iteration 0 (2.82817e+37 iter/s, 4.51416s/12 iters), loss = 5.29421
I0412 12:43:21.368685 6200 solver.cpp:237] Train net output #0: loss = 5.29421 (* 1 = 5.29421 loss)
I0412 12:43:21.368711 6200 sgd_solver.cpp:105] Iteration 0, lr = 0.01
I0412 12:43:25.279996 6200 solver.cpp:218] Iteration 12 (3.06814 iter/s, 3.91116s/12 iters), loss = 5.28238
I0412 12:43:25.280053 6200 solver.cpp:237] Train net output #0: loss = 5.28238 (* 1 = 5.28238 loss)
I0412 12:43:25.280062 6200 sgd_solver.cpp:105] Iteration 12, lr = 0.00997626
I0412 12:43:30.309818 6200 solver.cpp:218] Iteration 24 (2.38588 iter/s, 5.02959s/12 iters), loss = 5.292
I0412 12:43:30.309861 6200 solver.cpp:237] Train net output #0: loss = 5.292 (* 1 = 5.292 loss)
I0412 12:43:30.309875 6200 sgd_solver.cpp:105] Iteration 24, lr = 0.00995257
I0412 12:43:35.480439 6200 solver.cpp:218] Iteration 36 (2.3209 iter/s, 5.1704s/12 iters), loss = 5.27949
I0412 12:43:35.480479 6200 solver.cpp:237] Train net output #0: loss = 5.27949 (* 1 = 5.27949 loss)
I0412 12:43:35.480489 6200 sgd_solver.cpp:105] Iteration 36, lr = 0.00992894
I0412 12:43:40.457831 6200 solver.cpp:218] Iteration 48 (2.411 iter/s, 4.97719s/12 iters), loss = 5.30789
I0412 12:43:40.457864 6200 solver.cpp:237] Train net output #0: loss = 5.30789 (* 1 = 5.30789 loss)
I0412 12:43:40.457872 6200 sgd_solver.cpp:105] Iteration 48, lr = 0.00990537
I0412 12:43:45.454850 6200 solver.cpp:218] Iteration 60 (2.40153 iter/s, 4.99681s/12 iters), loss = 5.27876
I0412 12:43:45.455178 6200 solver.cpp:237] Train net output #0: loss = 5.27876 (* 1 = 5.27876 loss)
I0412 12:43:45.455191 6200 sgd_solver.cpp:105] Iteration 60, lr = 0.00988185
I0412 12:43:50.476509 6200 solver.cpp:218] Iteration 72 (2.38989 iter/s, 5.02116s/12 iters), loss = 5.30815
I0412 12:43:50.476557 6200 solver.cpp:237] Train net output #0: loss = 5.30815 (* 1 = 5.30815 loss)
I0412 12:43:50.476568 6200 sgd_solver.cpp:105] Iteration 72, lr = 0.00985839
I0412 12:43:55.868820 6200 solver.cpp:218] Iteration 84 (2.22549 iter/s, 5.39208s/12 iters), loss = 5.30584
I0412 12:43:55.868875 6200 solver.cpp:237] Train net output #0: loss = 5.30584 (* 1 = 5.30584 loss)
I0412 12:43:55.868891 6200 sgd_solver.cpp:105] Iteration 84, lr = 0.00983498
I0412 12:44:01.332108 6200 solver.cpp:218] Iteration 96 (2.19658 iter/s, 5.46305s/12 iters), loss = 5.32383
I0412 12:44:01.332162 6200 solver.cpp:237] Train net output #0: loss = 5.32383 (* 1 = 5.32383 loss)
I0412 12:44:01.332177 6200 sgd_solver.cpp:105] Iteration 96, lr = 0.00981163
I0412 12:44:03.006876 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 12:44:03.314685 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_102.caffemodel
I0412 12:44:05.437461 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_102.solverstate
I0412 12:44:08.245488 6200 solver.cpp:330] Iteration 102, Testing net (#0)
I0412 12:44:08.245522 6200 net.cpp:676] Ignoring source layer train-data
I0412 12:44:12.612149 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 12:44:12.688344 6200 solver.cpp:397] Test net output #0: accuracy = 0.00551471
I0412 12:44:12.688387 6200 solver.cpp:397] Test net output #1: loss = 5.29089 (* 1 = 5.29089 loss)
I0412 12:44:14.528246 6200 solver.cpp:218] Iteration 108 (0.909391 iter/s, 13.1956s/12 iters), loss = 5.31051
I0412 12:44:14.528292 6200 solver.cpp:237] Train net output #0: loss = 5.31051 (* 1 = 5.31051 loss)
I0412 12:44:14.528303 6200 sgd_solver.cpp:105] Iteration 108, lr = 0.00978834
I0412 12:44:19.456842 6200 solver.cpp:218] Iteration 120 (2.43488 iter/s, 4.92838s/12 iters), loss = 5.27997
I0412 12:44:19.457868 6200 solver.cpp:237] Train net output #0: loss = 5.27997 (* 1 = 5.27997 loss)
I0412 12:44:19.457883 6200 sgd_solver.cpp:105] Iteration 120, lr = 0.0097651
I0412 12:44:24.292282 6200 solver.cpp:218] Iteration 132 (2.48229 iter/s, 4.83425s/12 iters), loss = 5.24165
I0412 12:44:24.292333 6200 solver.cpp:237] Train net output #0: loss = 5.24165 (* 1 = 5.24165 loss)
I0412 12:44:24.292348 6200 sgd_solver.cpp:105] Iteration 132, lr = 0.00974192
I0412 12:44:29.086052 6200 solver.cpp:218] Iteration 144 (2.50336 iter/s, 4.79355s/12 iters), loss = 5.31108
I0412 12:44:29.086098 6200 solver.cpp:237] Train net output #0: loss = 5.31108 (* 1 = 5.31108 loss)
I0412 12:44:29.086110 6200 sgd_solver.cpp:105] Iteration 144, lr = 0.00971879
I0412 12:44:34.028210 6200 solver.cpp:218] Iteration 156 (2.4282 iter/s, 4.94194s/12 iters), loss = 5.26109
I0412 12:44:34.028254 6200 solver.cpp:237] Train net output #0: loss = 5.26109 (* 1 = 5.26109 loss)
I0412 12:44:34.028267 6200 sgd_solver.cpp:105] Iteration 156, lr = 0.00969571
I0412 12:44:38.957546 6200 solver.cpp:218] Iteration 168 (2.43451 iter/s, 4.92912s/12 iters), loss = 5.27746
I0412 12:44:38.957588 6200 solver.cpp:237] Train net output #0: loss = 5.27746 (* 1 = 5.27746 loss)
I0412 12:44:38.957600 6200 sgd_solver.cpp:105] Iteration 168, lr = 0.00967269
I0412 12:44:43.841759 6200 solver.cpp:218] Iteration 180 (2.457 iter/s, 4.884s/12 iters), loss = 5.27427
I0412 12:44:43.841806 6200 solver.cpp:237] Train net output #0: loss = 5.27427 (* 1 = 5.27427 loss)
I0412 12:44:43.841818 6200 sgd_solver.cpp:105] Iteration 180, lr = 0.00964973
I0412 12:44:48.716380 6200 solver.cpp:218] Iteration 192 (2.46184 iter/s, 4.8744s/12 iters), loss = 5.27359
I0412 12:44:48.716423 6200 solver.cpp:237] Train net output #0: loss = 5.27359 (* 1 = 5.27359 loss)
I0412 12:44:48.716434 6200 sgd_solver.cpp:105] Iteration 192, lr = 0.00962682
I0412 12:44:52.483530 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 12:44:53.206496 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_204.caffemodel
I0412 12:44:55.231020 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_204.solverstate
I0412 12:44:56.796512 6200 solver.cpp:330] Iteration 204, Testing net (#0)
I0412 12:44:56.796543 6200 net.cpp:676] Ignoring source layer train-data
I0412 12:45:01.106988 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 12:45:01.228603 6200 solver.cpp:397] Test net output #0: accuracy = 0.00551471
I0412 12:45:01.228648 6200 solver.cpp:397] Test net output #1: loss = 5.28902 (* 1 = 5.28902 loss)
I0412 12:45:01.313827 6200 solver.cpp:218] Iteration 204 (0.95261 iter/s, 12.597s/12 iters), loss = 5.2775
I0412 12:45:01.313871 6200 solver.cpp:237] Train net output #0: loss = 5.2775 (* 1 = 5.2775 loss)
I0412 12:45:01.313882 6200 sgd_solver.cpp:105] Iteration 204, lr = 0.00960396
I0412 12:45:05.481495 6200 solver.cpp:218] Iteration 216 (2.87944 iter/s, 4.16747s/12 iters), loss = 5.2861
I0412 12:45:05.481541 6200 solver.cpp:237] Train net output #0: loss = 5.2861 (* 1 = 5.2861 loss)
I0412 12:45:05.481554 6200 sgd_solver.cpp:105] Iteration 216, lr = 0.00958116
I0412 12:45:10.388808 6200 solver.cpp:218] Iteration 228 (2.44544 iter/s, 4.90709s/12 iters), loss = 5.27528
I0412 12:45:10.388854 6200 solver.cpp:237] Train net output #0: loss = 5.27528 (* 1 = 5.27528 loss)
I0412 12:45:10.388865 6200 sgd_solver.cpp:105] Iteration 228, lr = 0.00955841
I0412 12:45:15.199846 6200 solver.cpp:218] Iteration 240 (2.49438 iter/s, 4.81082s/12 iters), loss = 5.30681
I0412 12:45:15.199889 6200 solver.cpp:237] Train net output #0: loss = 5.30681 (* 1 = 5.30681 loss)
I0412 12:45:15.199901 6200 sgd_solver.cpp:105] Iteration 240, lr = 0.00953572
I0412 12:45:20.014143 6200 solver.cpp:218] Iteration 252 (2.49269 iter/s, 4.81408s/12 iters), loss = 5.27832
I0412 12:45:20.014199 6200 solver.cpp:237] Train net output #0: loss = 5.27832 (* 1 = 5.27832 loss)
I0412 12:45:20.014214 6200 sgd_solver.cpp:105] Iteration 252, lr = 0.00951308
I0412 12:45:24.888717 6200 solver.cpp:218] Iteration 264 (2.46187 iter/s, 4.87435s/12 iters), loss = 5.27575
I0412 12:45:24.888823 6200 solver.cpp:237] Train net output #0: loss = 5.27575 (* 1 = 5.27575 loss)
I0412 12:45:24.888833 6200 sgd_solver.cpp:105] Iteration 264, lr = 0.00949049
I0412 12:45:29.867291 6200 solver.cpp:218] Iteration 276 (2.41046 iter/s, 4.97829s/12 iters), loss = 5.30589
I0412 12:45:29.867336 6200 solver.cpp:237] Train net output #0: loss = 5.30589 (* 1 = 5.30589 loss)
I0412 12:45:29.867348 6200 sgd_solver.cpp:105] Iteration 276, lr = 0.00946796
I0412 12:45:34.712762 6200 solver.cpp:218] Iteration 288 (2.47665 iter/s, 4.84526s/12 iters), loss = 5.29002
I0412 12:45:34.712803 6200 solver.cpp:237] Train net output #0: loss = 5.29002 (* 1 = 5.29002 loss)
I0412 12:45:34.712812 6200 sgd_solver.cpp:105] Iteration 288, lr = 0.00944548
I0412 12:45:39.559490 6200 solver.cpp:218] Iteration 300 (2.47601 iter/s, 4.84652s/12 iters), loss = 5.29276
I0412 12:45:39.559525 6200 solver.cpp:237] Train net output #0: loss = 5.29276 (* 1 = 5.29276 loss)
I0412 12:45:39.559535 6200 sgd_solver.cpp:105] Iteration 300, lr = 0.00942305
I0412 12:45:40.524821 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 12:45:41.534845 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_306.caffemodel
I0412 12:45:43.528228 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_306.solverstate
I0412 12:45:45.828728 6200 solver.cpp:330] Iteration 306, Testing net (#0)
I0412 12:45:45.828763 6200 net.cpp:676] Ignoring source layer train-data
I0412 12:45:50.104923 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 12:45:50.261370 6200 solver.cpp:397] Test net output #0: accuracy = 0.00551471
I0412 12:45:50.261416 6200 solver.cpp:397] Test net output #1: loss = 5.28511 (* 1 = 5.28511 loss)
I0412 12:45:52.202020 6200 solver.cpp:218] Iteration 312 (0.949212 iter/s, 12.6421s/12 iters), loss = 5.28548
I0412 12:45:52.202067 6200 solver.cpp:237] Train net output #0: loss = 5.28548 (* 1 = 5.28548 loss)
I0412 12:45:52.202080 6200 sgd_solver.cpp:105] Iteration 312, lr = 0.00940068
I0412 12:45:57.124382 6200 solver.cpp:218] Iteration 324 (2.43796 iter/s, 4.92214s/12 iters), loss = 5.24686
I0412 12:45:57.124529 6200 solver.cpp:237] Train net output #0: loss = 5.24686 (* 1 = 5.24686 loss)
I0412 12:45:57.124542 6200 sgd_solver.cpp:105] Iteration 324, lr = 0.00937836
I0412 12:46:01.949796 6200 solver.cpp:218] Iteration 336 (2.487 iter/s, 4.8251s/12 iters), loss = 5.25637
I0412 12:46:01.949843 6200 solver.cpp:237] Train net output #0: loss = 5.25637 (* 1 = 5.25637 loss)
I0412 12:46:01.949856 6200 sgd_solver.cpp:105] Iteration 336, lr = 0.0093561
I0412 12:46:06.754177 6200 solver.cpp:218] Iteration 348 (2.49783 iter/s, 4.80417s/12 iters), loss = 5.263
I0412 12:46:06.754225 6200 solver.cpp:237] Train net output #0: loss = 5.263 (* 1 = 5.263 loss)
I0412 12:46:06.754237 6200 sgd_solver.cpp:105] Iteration 348, lr = 0.00933388
I0412 12:46:11.632820 6200 solver.cpp:218] Iteration 360 (2.45981 iter/s, 4.87843s/12 iters), loss = 5.26957
I0412 12:46:11.632856 6200 solver.cpp:237] Train net output #0: loss = 5.26957 (* 1 = 5.26957 loss)
I0412 12:46:11.632865 6200 sgd_solver.cpp:105] Iteration 360, lr = 0.00931172
I0412 12:46:16.528729 6200 solver.cpp:218] Iteration 372 (2.45113 iter/s, 4.8957s/12 iters), loss = 5.20652
I0412 12:46:16.528769 6200 solver.cpp:237] Train net output #0: loss = 5.20652 (* 1 = 5.20652 loss)
I0412 12:46:16.528779 6200 sgd_solver.cpp:105] Iteration 372, lr = 0.00928961
I0412 12:46:21.418817 6200 solver.cpp:218] Iteration 384 (2.45405 iter/s, 4.88988s/12 iters), loss = 5.20417
I0412 12:46:21.418859 6200 solver.cpp:237] Train net output #0: loss = 5.20417 (* 1 = 5.20417 loss)
I0412 12:46:21.418869 6200 sgd_solver.cpp:105] Iteration 384, lr = 0.00926756
I0412 12:46:26.283644 6200 solver.cpp:218] Iteration 396 (2.46679 iter/s, 4.86462s/12 iters), loss = 5.12691
I0412 12:46:26.283689 6200 solver.cpp:237] Train net output #0: loss = 5.12691 (* 1 = 5.12691 loss)
I0412 12:46:26.283701 6200 sgd_solver.cpp:105] Iteration 396, lr = 0.00924556
I0412 12:46:29.259817 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 12:46:30.644049 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_408.caffemodel
I0412 12:46:33.265967 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_408.solverstate
I0412 12:46:35.385210 6200 solver.cpp:330] Iteration 408, Testing net (#0)
I0412 12:46:35.385236 6200 net.cpp:676] Ignoring source layer train-data
I0412 12:46:39.625635 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 12:46:39.827380 6200 solver.cpp:397] Test net output #0: accuracy = 0.00796569
I0412 12:46:39.827423 6200 solver.cpp:397] Test net output #1: loss = 5.19194 (* 1 = 5.19194 loss)
I0412 12:46:39.912551 6200 solver.cpp:218] Iteration 408 (0.880514 iter/s, 13.6284s/12 iters), loss = 5.25409
I0412 12:46:39.912595 6200 solver.cpp:237] Train net output #0: loss = 5.25409 (* 1 = 5.25409 loss)
I0412 12:46:39.912606 6200 sgd_solver.cpp:105] Iteration 408, lr = 0.00922361
I0412 12:46:44.089007 6200 solver.cpp:218] Iteration 420 (2.87338 iter/s, 4.17626s/12 iters), loss = 5.25
I0412 12:46:44.089051 6200 solver.cpp:237] Train net output #0: loss = 5.25 (* 1 = 5.25 loss)
I0412 12:46:44.089061 6200 sgd_solver.cpp:105] Iteration 420, lr = 0.00920171
I0412 12:46:48.860289 6200 solver.cpp:218] Iteration 432 (2.51516 iter/s, 4.77107s/12 iters), loss = 5.17866
I0412 12:46:48.860335 6200 solver.cpp:237] Train net output #0: loss = 5.17866 (* 1 = 5.17866 loss)
I0412 12:46:48.860347 6200 sgd_solver.cpp:105] Iteration 432, lr = 0.00917986
I0412 12:46:53.758358 6200 solver.cpp:218] Iteration 444 (2.45005 iter/s, 4.89785s/12 iters), loss = 5.15224
I0412 12:46:53.758400 6200 solver.cpp:237] Train net output #0: loss = 5.15224 (* 1 = 5.15224 loss)
I0412 12:46:53.758412 6200 sgd_solver.cpp:105] Iteration 444, lr = 0.00915807
I0412 12:46:58.573980 6200 solver.cpp:218] Iteration 456 (2.492 iter/s, 4.81542s/12 iters), loss = 5.25666
I0412 12:46:58.574019 6200 solver.cpp:237] Train net output #0: loss = 5.25666 (* 1 = 5.25666 loss)
I0412 12:46:58.574029 6200 sgd_solver.cpp:105] Iteration 456, lr = 0.00913632
I0412 12:47:03.505192 6200 solver.cpp:218] Iteration 468 (2.43358 iter/s, 4.931s/12 iters), loss = 5.16849
I0412 12:47:03.505316 6200 solver.cpp:237] Train net output #0: loss = 5.16849 (* 1 = 5.16849 loss)
I0412 12:47:03.505329 6200 sgd_solver.cpp:105] Iteration 468, lr = 0.00911463
I0412 12:47:08.295526 6200 solver.cpp:218] Iteration 480 (2.50519 iter/s, 4.79005s/12 iters), loss = 5.13989
I0412 12:47:08.295568 6200 solver.cpp:237] Train net output #0: loss = 5.13989 (* 1 = 5.13989 loss)
I0412 12:47:08.295580 6200 sgd_solver.cpp:105] Iteration 480, lr = 0.00909299
I0412 12:47:13.125854 6200 solver.cpp:218] Iteration 492 (2.48441 iter/s, 4.83012s/12 iters), loss = 5.18368
I0412 12:47:13.125895 6200 solver.cpp:237] Train net output #0: loss = 5.18368 (* 1 = 5.18368 loss)
I0412 12:47:13.125906 6200 sgd_solver.cpp:105] Iteration 492, lr = 0.0090714
I0412 12:47:17.914045 6200 solver.cpp:218] Iteration 504 (2.50628 iter/s, 4.78798s/12 iters), loss = 5.19726
I0412 12:47:17.914090 6200 solver.cpp:237] Train net output #0: loss = 5.19726 (* 1 = 5.19726 loss)
I0412 12:47:17.914103 6200 sgd_solver.cpp:105] Iteration 504, lr = 0.00904986
I0412 12:47:18.173023 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 12:47:19.867420 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_510.caffemodel
I0412 12:47:24.484581 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_510.solverstate
I0412 12:47:29.363060 6200 solver.cpp:330] Iteration 510, Testing net (#0)
I0412 12:47:29.363082 6200 net.cpp:676] Ignoring source layer train-data
I0412 12:47:33.559221 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 12:47:33.795363 6200 solver.cpp:397] Test net output #0: accuracy = 0.00857843
I0412 12:47:33.795409 6200 solver.cpp:397] Test net output #1: loss = 5.15661 (* 1 = 5.15661 loss)
I0412 12:47:35.732496 6200 solver.cpp:218] Iteration 516 (0.673483 iter/s, 17.8178s/12 iters), loss = 5.13855
I0412 12:47:35.732547 6200 solver.cpp:237] Train net output #0: loss = 5.13855 (* 1 = 5.13855 loss)
I0412 12:47:35.732558 6200 sgd_solver.cpp:105] Iteration 516, lr = 0.00902838
I0412 12:47:40.628947 6200 solver.cpp:218] Iteration 528 (2.45086 iter/s, 4.89623s/12 iters), loss = 5.18965
I0412 12:47:40.628994 6200 solver.cpp:237] Train net output #0: loss = 5.18965 (* 1 = 5.18965 loss)
I0412 12:47:40.629007 6200 sgd_solver.cpp:105] Iteration 528, lr = 0.00900694
I0412 12:47:45.496062 6200 solver.cpp:218] Iteration 540 (2.46563 iter/s, 4.86691s/12 iters), loss = 5.15013
I0412 12:47:45.496098 6200 solver.cpp:237] Train net output #0: loss = 5.15013 (* 1 = 5.15013 loss)
I0412 12:47:45.496106 6200 sgd_solver.cpp:105] Iteration 540, lr = 0.00898556
I0412 12:47:50.282233 6200 solver.cpp:218] Iteration 552 (2.50733 iter/s, 4.78597s/12 iters), loss = 5.1101
I0412 12:47:50.282275 6200 solver.cpp:237] Train net output #0: loss = 5.1101 (* 1 = 5.1101 loss)
I0412 12:47:50.282287 6200 sgd_solver.cpp:105] Iteration 552, lr = 0.00896423
I0412 12:47:55.179790 6200 solver.cpp:218] Iteration 564 (2.45031 iter/s, 4.89735s/12 iters), loss = 5.16008
I0412 12:47:55.179836 6200 solver.cpp:237] Train net output #0: loss = 5.16008 (* 1 = 5.16008 loss)
I0412 12:47:55.179847 6200 sgd_solver.cpp:105] Iteration 564, lr = 0.00894294
I0412 12:48:00.058307 6200 solver.cpp:218] Iteration 576 (2.45987 iter/s, 4.87831s/12 iters), loss = 5.0977
I0412 12:48:00.058355 6200 solver.cpp:237] Train net output #0: loss = 5.0977 (* 1 = 5.0977 loss)
I0412 12:48:00.058368 6200 sgd_solver.cpp:105] Iteration 576, lr = 0.00892171
I0412 12:48:04.845983 6200 solver.cpp:218] Iteration 588 (2.50654 iter/s, 4.78747s/12 iters), loss = 5.11338
I0412 12:48:04.846139 6200 solver.cpp:237] Train net output #0: loss = 5.11338 (* 1 = 5.11338 loss)
I0412 12:48:04.846153 6200 sgd_solver.cpp:105] Iteration 588, lr = 0.00890053
I0412 12:48:09.773476 6200 solver.cpp:218] Iteration 600 (2.43547 iter/s, 4.92718s/12 iters), loss = 5.12066
I0412 12:48:09.773517 6200 solver.cpp:237] Train net output #0: loss = 5.12066 (* 1 = 5.12066 loss)
I0412 12:48:09.773527 6200 sgd_solver.cpp:105] Iteration 600, lr = 0.0088794
I0412 12:48:12.081336 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 12:48:14.179677 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_612.caffemodel
I0412 12:48:16.196909 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_612.solverstate
I0412 12:48:17.758641 6200 solver.cpp:330] Iteration 612, Testing net (#0)
I0412 12:48:17.758666 6200 net.cpp:676] Ignoring source layer train-data
I0412 12:48:21.870329 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 12:48:22.153467 6200 solver.cpp:397] Test net output #0: accuracy = 0.0128676
I0412 12:48:22.153501 6200 solver.cpp:397] Test net output #1: loss = 5.11622 (* 1 = 5.11622 loss)
I0412 12:48:22.238582 6200 solver.cpp:218] Iteration 612 (0.962722 iter/s, 12.4647s/12 iters), loss = 5.13863
I0412 12:48:22.238620 6200 solver.cpp:237] Train net output #0: loss = 5.13863 (* 1 = 5.13863 loss)
I0412 12:48:22.238631 6200 sgd_solver.cpp:105] Iteration 612, lr = 0.00885831
I0412 12:48:26.406509 6200 solver.cpp:218] Iteration 624 (2.87926 iter/s, 4.16774s/12 iters), loss = 5.11865
I0412 12:48:26.406555 6200 solver.cpp:237] Train net output #0: loss = 5.11865 (* 1 = 5.11865 loss)
I0412 12:48:26.406565 6200 sgd_solver.cpp:105] Iteration 624, lr = 0.00883728
I0412 12:48:31.158201 6200 solver.cpp:218] Iteration 636 (2.52553 iter/s, 4.75149s/12 iters), loss = 5.0215
I0412 12:48:31.158243 6200 solver.cpp:237] Train net output #0: loss = 5.0215 (* 1 = 5.0215 loss)
I0412 12:48:31.158255 6200 sgd_solver.cpp:105] Iteration 636, lr = 0.0088163
I0412 12:48:35.913324 6200 solver.cpp:218] Iteration 648 (2.5237 iter/s, 4.75492s/12 iters), loss = 5.17387
I0412 12:48:35.913441 6200 solver.cpp:237] Train net output #0: loss = 5.17387 (* 1 = 5.17387 loss)
I0412 12:48:35.913453 6200 sgd_solver.cpp:105] Iteration 648, lr = 0.00879537
I0412 12:48:40.696321 6200 solver.cpp:218] Iteration 660 (2.50903 iter/s, 4.78272s/12 iters), loss = 5.07469
I0412 12:48:40.696368 6200 solver.cpp:237] Train net output #0: loss = 5.07469 (* 1 = 5.07469 loss)
I0412 12:48:40.696379 6200 sgd_solver.cpp:105] Iteration 660, lr = 0.00877449
I0412 12:48:45.499676 6200 solver.cpp:218] Iteration 672 (2.49836 iter/s, 4.80314s/12 iters), loss = 5.10536
I0412 12:48:45.499733 6200 solver.cpp:237] Train net output #0: loss = 5.10536 (* 1 = 5.10536 loss)
I0412 12:48:45.499749 6200 sgd_solver.cpp:105] Iteration 672, lr = 0.00875366
I0412 12:48:50.306782 6200 solver.cpp:218] Iteration 684 (2.49642 iter/s, 4.80689s/12 iters), loss = 4.91974
I0412 12:48:50.306824 6200 solver.cpp:237] Train net output #0: loss = 4.91974 (* 1 = 4.91974 loss)
I0412 12:48:50.306835 6200 sgd_solver.cpp:105] Iteration 684, lr = 0.00873287
I0412 12:48:50.666425 6200 blocking_queue.cpp:49] Waiting for data
I0412 12:48:55.200419 6200 solver.cpp:218] Iteration 696 (2.45227 iter/s, 4.89343s/12 iters), loss = 5.06842
I0412 12:48:55.200466 6200 solver.cpp:237] Train net output #0: loss = 5.06842 (* 1 = 5.06842 loss)
I0412 12:48:55.200479 6200 sgd_solver.cpp:105] Iteration 696, lr = 0.00871214
I0412 12:48:59.710296 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 12:49:00.082803 6200 solver.cpp:218] Iteration 708 (2.45792 iter/s, 4.88217s/12 iters), loss = 5.13213
I0412 12:49:00.082851 6200 solver.cpp:237] Train net output #0: loss = 5.13213 (* 1 = 5.13213 loss)
I0412 12:49:00.082863 6200 sgd_solver.cpp:105] Iteration 708, lr = 0.00869145
I0412 12:49:02.061528 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_714.caffemodel
I0412 12:49:04.085319 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_714.solverstate
I0412 12:49:05.645586 6200 solver.cpp:330] Iteration 714, Testing net (#0)
I0412 12:49:05.645610 6200 net.cpp:676] Ignoring source layer train-data
I0412 12:49:09.634178 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 12:49:09.951778 6200 solver.cpp:397] Test net output #0: accuracy = 0.0122549
I0412 12:49:09.951822 6200 solver.cpp:397] Test net output #1: loss = 5.06603 (* 1 = 5.06603 loss)
I0412 12:49:11.729269 6200 solver.cpp:218] Iteration 720 (1.03039 iter/s, 11.646s/12 iters), loss = 5.14771
I0412 12:49:11.729307 6200 solver.cpp:237] Train net output #0: loss = 5.14771 (* 1 = 5.14771 loss)
I0412 12:49:11.729316 6200 sgd_solver.cpp:105] Iteration 720, lr = 0.00867082
I0412 12:49:16.543406 6200 solver.cpp:218] Iteration 732 (2.49277 iter/s, 4.81393s/12 iters), loss = 4.97012
I0412 12:49:16.543457 6200 solver.cpp:237] Train net output #0: loss = 4.97012 (* 1 = 4.97012 loss)
I0412 12:49:16.543467 6200 sgd_solver.cpp:105] Iteration 732, lr = 0.00865023
I0412 12:49:21.436856 6200 solver.cpp:218] Iteration 744 (2.45236 iter/s, 4.89324s/12 iters), loss = 5.03528
I0412 12:49:21.436898 6200 solver.cpp:237] Train net output #0: loss = 5.03528 (* 1 = 5.03528 loss)
I0412 12:49:21.436909 6200 sgd_solver.cpp:105] Iteration 744, lr = 0.0086297
I0412 12:49:26.200538 6200 solver.cpp:218] Iteration 756 (2.51917 iter/s, 4.76347s/12 iters), loss = 5.03601
I0412 12:49:26.200593 6200 solver.cpp:237] Train net output #0: loss = 5.03601 (* 1 = 5.03601 loss)
I0412 12:49:26.200606 6200 sgd_solver.cpp:105] Iteration 756, lr = 0.00860921
I0412 12:49:31.085985 6200 solver.cpp:218] Iteration 768 (2.45639 iter/s, 4.88522s/12 iters), loss = 5.08884
I0412 12:49:31.086021 6200 solver.cpp:237] Train net output #0: loss = 5.08884 (* 1 = 5.08884 loss)
I0412 12:49:31.086030 6200 sgd_solver.cpp:105] Iteration 768, lr = 0.00858877
I0412 12:49:35.955350 6200 solver.cpp:218] Iteration 780 (2.46449 iter/s, 4.86916s/12 iters), loss = 5.08089
I0412 12:49:35.955389 6200 solver.cpp:237] Train net output #0: loss = 5.08089 (* 1 = 5.08089 loss)
I0412 12:49:35.955397 6200 sgd_solver.cpp:105] Iteration 780, lr = 0.00856838
I0412 12:49:40.845616 6200 solver.cpp:218] Iteration 792 (2.45396 iter/s, 4.89006s/12 iters), loss = 4.98639
I0412 12:49:40.845753 6200 solver.cpp:237] Train net output #0: loss = 4.98639 (* 1 = 4.98639 loss)
I0412 12:49:40.845767 6200 sgd_solver.cpp:105] Iteration 792, lr = 0.00854803
I0412 12:49:45.664124 6200 solver.cpp:218] Iteration 804 (2.49055 iter/s, 4.81821s/12 iters), loss = 5.06424
I0412 12:49:45.664170 6200 solver.cpp:237] Train net output #0: loss = 5.06424 (* 1 = 5.06424 loss)
I0412 12:49:45.664183 6200 sgd_solver.cpp:105] Iteration 804, lr = 0.00852774
I0412 12:49:47.315809 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 12:49:49.985700 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_816.caffemodel
I0412 12:49:52.542279 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_816.solverstate
I0412 12:49:55.174886 6200 solver.cpp:330] Iteration 816, Testing net (#0)
I0412 12:49:55.174911 6200 net.cpp:676] Ignoring source layer train-data
I0412 12:49:59.314908 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 12:49:59.667682 6200 solver.cpp:397] Test net output #0: accuracy = 0.0140931
I0412 12:49:59.667729 6200 solver.cpp:397] Test net output #1: loss = 5.03356 (* 1 = 5.03356 loss)
I0412 12:49:59.752951 6200 solver.cpp:218] Iteration 816 (0.851769 iter/s, 14.0883s/12 iters), loss = 5.10569
I0412 12:49:59.753005 6200 solver.cpp:237] Train net output #0: loss = 5.10569 (* 1 = 5.10569 loss)
I0412 12:49:59.753016 6200 sgd_solver.cpp:105] Iteration 816, lr = 0.00850749
I0412 12:50:03.989770 6200 solver.cpp:218] Iteration 828 (2.83245 iter/s, 4.23661s/12 iters), loss = 5.10642
I0412 12:50:03.989817 6200 solver.cpp:237] Train net output #0: loss = 5.10642 (* 1 = 5.10642 loss)
I0412 12:50:03.989830 6200 sgd_solver.cpp:105] Iteration 828, lr = 0.00848729
I0412 12:50:08.839556 6200 solver.cpp:218] Iteration 840 (2.47444 iter/s, 4.84958s/12 iters), loss = 4.93866
I0412 12:50:08.839602 6200 solver.cpp:237] Train net output #0: loss = 4.93866 (* 1 = 4.93866 loss)
I0412 12:50:08.839615 6200 sgd_solver.cpp:105] Iteration 840, lr = 0.00846714
I0412 12:50:13.640779 6200 solver.cpp:218] Iteration 852 (2.49947 iter/s, 4.80102s/12 iters), loss = 4.90426
I0412 12:50:13.640919 6200 solver.cpp:237] Train net output #0: loss = 4.90426 (* 1 = 4.90426 loss)
I0412 12:50:13.640933 6200 sgd_solver.cpp:105] Iteration 852, lr = 0.00844704
I0412 12:50:18.428814 6200 solver.cpp:218] Iteration 864 (2.5064 iter/s, 4.78774s/12 iters), loss = 5.00598
I0412 12:50:18.428865 6200 solver.cpp:237] Train net output #0: loss = 5.00598 (* 1 = 5.00598 loss)
I0412 12:50:18.428879 6200 sgd_solver.cpp:105] Iteration 864, lr = 0.00842698
I0412 12:50:23.270452 6200 solver.cpp:218] Iteration 876 (2.47861 iter/s, 4.84143s/12 iters), loss = 5.04408
I0412 12:50:23.270498 6200 solver.cpp:237] Train net output #0: loss = 5.04408 (* 1 = 5.04408 loss)
I0412 12:50:23.270510 6200 sgd_solver.cpp:105] Iteration 876, lr = 0.00840698
I0412 12:50:28.105100 6200 solver.cpp:218] Iteration 888 (2.48219 iter/s, 4.83444s/12 iters), loss = 4.94274
I0412 12:50:28.105147 6200 solver.cpp:237] Train net output #0: loss = 4.94274 (* 1 = 4.94274 loss)
I0412 12:50:28.105160 6200 sgd_solver.cpp:105] Iteration 888, lr = 0.00838702
I0412 12:50:32.977931 6200 solver.cpp:218] Iteration 900 (2.46274 iter/s, 4.87262s/12 iters), loss = 5.07907
I0412 12:50:32.977988 6200 solver.cpp:237] Train net output #0: loss = 5.07907 (* 1 = 5.07907 loss)
I0412 12:50:32.978001 6200 sgd_solver.cpp:105] Iteration 900, lr = 0.0083671
I0412 12:50:36.719875 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 12:50:37.824852 6200 solver.cpp:218] Iteration 912 (2.47591 iter/s, 4.84671s/12 iters), loss = 4.8915
I0412 12:50:37.824899 6200 solver.cpp:237] Train net output #0: loss = 4.8915 (* 1 = 4.8915 loss)
I0412 12:50:37.824913 6200 sgd_solver.cpp:105] Iteration 912, lr = 0.00834724
I0412 12:50:39.753726 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_918.caffemodel
I0412 12:50:41.865799 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_918.solverstate
I0412 12:50:43.416007 6200 solver.cpp:330] Iteration 918, Testing net (#0)
I0412 12:50:43.416028 6200 net.cpp:676] Ignoring source layer train-data
I0412 12:50:47.480468 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 12:50:47.884318 6200 solver.cpp:397] Test net output #0: accuracy = 0.0226716
I0412 12:50:47.884366 6200 solver.cpp:397] Test net output #1: loss = 4.96989 (* 1 = 4.96989 loss)
I0412 12:50:49.660306 6200 solver.cpp:218] Iteration 924 (1.01394 iter/s, 11.835s/12 iters), loss = 4.96917
I0412 12:50:49.660353 6200 solver.cpp:237] Train net output #0: loss = 4.96917 (* 1 = 4.96917 loss)
I0412 12:50:49.660365 6200 sgd_solver.cpp:105] Iteration 924, lr = 0.00832742
I0412 12:50:54.485672 6200 solver.cpp:218] Iteration 936 (2.48697 iter/s, 4.82516s/12 iters), loss = 4.97233
I0412 12:50:54.485730 6200 solver.cpp:237] Train net output #0: loss = 4.97233 (* 1 = 4.97233 loss)
I0412 12:50:54.485744 6200 sgd_solver.cpp:105] Iteration 936, lr = 0.00830765
I0412 12:50:59.218894 6200 solver.cpp:218] Iteration 948 (2.53539 iter/s, 4.73301s/12 iters), loss = 4.97604
I0412 12:50:59.218945 6200 solver.cpp:237] Train net output #0: loss = 4.97604 (* 1 = 4.97604 loss)
I0412 12:50:59.218955 6200 sgd_solver.cpp:105] Iteration 948, lr = 0.00828793
I0412 12:51:03.972944 6200 solver.cpp:218] Iteration 960 (2.52427 iter/s, 4.75385s/12 iters), loss = 4.89748
I0412 12:51:03.972991 6200 solver.cpp:237] Train net output #0: loss = 4.89748 (* 1 = 4.89748 loss)
I0412 12:51:03.973002 6200 sgd_solver.cpp:105] Iteration 960, lr = 0.00826825
I0412 12:51:08.690605 6200 solver.cpp:218] Iteration 972 (2.54374 iter/s, 4.71746s/12 iters), loss = 4.94121
I0412 12:51:08.690647 6200 solver.cpp:237] Train net output #0: loss = 4.94121 (* 1 = 4.94121 loss)
I0412 12:51:08.690658 6200 sgd_solver.cpp:105] Iteration 972, lr = 0.00824862
I0412 12:51:13.456575 6200 solver.cpp:218] Iteration 984 (2.51796 iter/s, 4.76577s/12 iters), loss = 4.90821
I0412 12:51:13.456634 6200 solver.cpp:237] Train net output #0: loss = 4.90821 (* 1 = 4.90821 loss)
I0412 12:51:13.456647 6200 sgd_solver.cpp:105] Iteration 984, lr = 0.00822903
I0412 12:51:18.287834 6200 solver.cpp:218] Iteration 996 (2.48393 iter/s, 4.83105s/12 iters), loss = 4.79568
I0412 12:51:18.287935 6200 solver.cpp:237] Train net output #0: loss = 4.79568 (* 1 = 4.79568 loss)
I0412 12:51:18.287945 6200 sgd_solver.cpp:105] Iteration 996, lr = 0.0082095
I0412 12:51:23.213214 6200 solver.cpp:218] Iteration 1008 (2.43649 iter/s, 4.92512s/12 iters), loss = 4.95068
I0412 12:51:23.213263 6200 solver.cpp:237] Train net output #0: loss = 4.95068 (* 1 = 4.95068 loss)
I0412 12:51:23.213276 6200 sgd_solver.cpp:105] Iteration 1008, lr = 0.00819001
I0412 12:51:24.192101 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 12:51:27.630270 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_1020.caffemodel
I0412 12:51:29.787312 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_1020.solverstate
I0412 12:51:31.355614 6200 solver.cpp:330] Iteration 1020, Testing net (#0)
I0412 12:51:31.355641 6200 net.cpp:676] Ignoring source layer train-data
I0412 12:51:35.358956 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 12:51:35.788372 6200 solver.cpp:397] Test net output #0: accuracy = 0.0251225
I0412 12:51:35.788419 6200 solver.cpp:397] Test net output #1: loss = 4.93609 (* 1 = 4.93609 loss)
I0412 12:51:35.873895 6200 solver.cpp:218] Iteration 1020 (0.947849 iter/s, 12.6602s/12 iters), loss = 4.83221
I0412 12:51:35.873950 6200 solver.cpp:237] Train net output #0: loss = 4.83221 (* 1 = 4.83221 loss)
I0412 12:51:35.873976 6200 sgd_solver.cpp:105] Iteration 1020, lr = 0.00817056
I0412 12:51:39.814534 6200 solver.cpp:218] Iteration 1032 (3.04533 iter/s, 3.94045s/12 iters), loss = 4.85799
I0412 12:51:39.814582 6200 solver.cpp:237] Train net output #0: loss = 4.85799 (* 1 = 4.85799 loss)
I0412 12:51:39.814595 6200 sgd_solver.cpp:105] Iteration 1032, lr = 0.00815116
I0412 12:51:45.027765 6200 solver.cpp:218] Iteration 1044 (2.30193 iter/s, 5.21302s/12 iters), loss = 4.96661
I0412 12:51:45.027812 6200 solver.cpp:237] Train net output #0: loss = 4.96661 (* 1 = 4.96661 loss)
I0412 12:51:45.027823 6200 sgd_solver.cpp:105] Iteration 1044, lr = 0.00813181
I0412 12:51:49.867318 6200 solver.cpp:218] Iteration 1056 (2.47968 iter/s, 4.83934s/12 iters), loss = 4.8684
I0412 12:51:49.867484 6200 solver.cpp:237] Train net output #0: loss = 4.8684 (* 1 = 4.8684 loss)
I0412 12:51:49.867504 6200 sgd_solver.cpp:105] Iteration 1056, lr = 0.0081125
I0412 12:51:54.724697 6200 solver.cpp:218] Iteration 1068 (2.47063 iter/s, 4.85707s/12 iters), loss = 4.9322
I0412 12:51:54.724741 6200 solver.cpp:237] Train net output #0: loss = 4.9322 (* 1 = 4.9322 loss)
I0412 12:51:54.724751 6200 sgd_solver.cpp:105] Iteration 1068, lr = 0.00809324
I0412 12:51:59.591224 6200 solver.cpp:218] Iteration 1080 (2.46593 iter/s, 4.86633s/12 iters), loss = 4.75652
I0412 12:51:59.591264 6200 solver.cpp:237] Train net output #0: loss = 4.75652 (* 1 = 4.75652 loss)
I0412 12:51:59.591271 6200 sgd_solver.cpp:105] Iteration 1080, lr = 0.00807403
I0412 12:52:04.455893 6200 solver.cpp:218] Iteration 1092 (2.46687 iter/s, 4.86447s/12 iters), loss = 4.87008
I0412 12:52:04.455940 6200 solver.cpp:237] Train net output #0: loss = 4.87008 (* 1 = 4.87008 loss)
I0412 12:52:04.455952 6200 sgd_solver.cpp:105] Iteration 1092, lr = 0.00805486
I0412 12:52:09.302680 6200 solver.cpp:218] Iteration 1104 (2.47597 iter/s, 4.84659s/12 iters), loss = 4.85103
I0412 12:52:09.302731 6200 solver.cpp:237] Train net output #0: loss = 4.85103 (* 1 = 4.85103 loss)
I0412 12:52:09.302742 6200 sgd_solver.cpp:105] Iteration 1104, lr = 0.00803573
I0412 12:52:12.272256 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 12:52:14.055140 6200 solver.cpp:218] Iteration 1116 (2.52511 iter/s, 4.75226s/12 iters), loss = 4.86526
I0412 12:52:14.055176 6200 solver.cpp:237] Train net output #0: loss = 4.86526 (* 1 = 4.86526 loss)
I0412 12:52:14.055186 6200 sgd_solver.cpp:105] Iteration 1116, lr = 0.00801666
I0412 12:52:16.072424 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_1122.caffemodel
I0412 12:52:18.114478 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_1122.solverstate
I0412 12:52:20.221258 6200 solver.cpp:330] Iteration 1122, Testing net (#0)
I0412 12:52:20.221336 6200 net.cpp:676] Ignoring source layer train-data
I0412 12:52:24.053294 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 12:52:24.526584 6200 solver.cpp:397] Test net output #0: accuracy = 0.0398284
I0412 12:52:24.526631 6200 solver.cpp:397] Test net output #1: loss = 4.82837 (* 1 = 4.82837 loss)
I0412 12:52:26.380666 6200 solver.cpp:218] Iteration 1128 (0.973622 iter/s, 12.3251s/12 iters), loss = 4.90611
I0412 12:52:26.380715 6200 solver.cpp:237] Train net output #0: loss = 4.90611 (* 1 = 4.90611 loss)
I0412 12:52:26.380728 6200 sgd_solver.cpp:105] Iteration 1128, lr = 0.00799762
I0412 12:52:31.235987 6200 solver.cpp:218] Iteration 1140 (2.47162 iter/s, 4.85512s/12 iters), loss = 4.83103
I0412 12:52:31.236021 6200 solver.cpp:237] Train net output #0: loss = 4.83103 (* 1 = 4.83103 loss)
I0412 12:52:31.236030 6200 sgd_solver.cpp:105] Iteration 1140, lr = 0.00797863
I0412 12:52:36.049161 6200 solver.cpp:218] Iteration 1152 (2.49326 iter/s, 4.81298s/12 iters), loss = 4.73801
I0412 12:52:36.049202 6200 solver.cpp:237] Train net output #0: loss = 4.73801 (* 1 = 4.73801 loss)
I0412 12:52:36.049211 6200 sgd_solver.cpp:105] Iteration 1152, lr = 0.00795969
I0412 12:52:40.861441 6200 solver.cpp:218] Iteration 1164 (2.49372 iter/s, 4.81209s/12 iters), loss = 4.69134
I0412 12:52:40.861486 6200 solver.cpp:237] Train net output #0: loss = 4.69134 (* 1 = 4.69134 loss)
I0412 12:52:40.861497 6200 sgd_solver.cpp:105] Iteration 1164, lr = 0.00794079
I0412 12:52:45.587611 6200 solver.cpp:218] Iteration 1176 (2.53916 iter/s, 4.72598s/12 iters), loss = 4.83549
I0412 12:52:45.587652 6200 solver.cpp:237] Train net output #0: loss = 4.83549 (* 1 = 4.83549 loss)
I0412 12:52:45.587663 6200 sgd_solver.cpp:105] Iteration 1176, lr = 0.00792194
I0412 12:52:50.466256 6200 solver.cpp:218] Iteration 1188 (2.4598 iter/s, 4.87845s/12 iters), loss = 4.76906
I0412 12:52:50.466413 6200 solver.cpp:237] Train net output #0: loss = 4.76906 (* 1 = 4.76906 loss)
I0412 12:52:50.466424 6200 sgd_solver.cpp:105] Iteration 1188, lr = 0.00790313
I0412 12:52:55.251955 6200 solver.cpp:218] Iteration 1200 (2.50763 iter/s, 4.78539s/12 iters), loss = 4.73812
I0412 12:52:55.252004 6200 solver.cpp:237] Train net output #0: loss = 4.73812 (* 1 = 4.73812 loss)
I0412 12:52:55.252017 6200 sgd_solver.cpp:105] Iteration 1200, lr = 0.00788437
I0412 12:53:00.169983 6200 solver.cpp:218] Iteration 1212 (2.4401 iter/s, 4.91782s/12 iters), loss = 4.70471
I0412 12:53:00.170029 6200 solver.cpp:237] Train net output #0: loss = 4.70471 (* 1 = 4.70471 loss)
I0412 12:53:00.170040 6200 sgd_solver.cpp:105] Iteration 1212, lr = 0.00786565
I0412 12:53:00.460319 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 12:53:04.500243 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_1224.caffemodel
I0412 12:53:07.313792 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_1224.solverstate
I0412 12:53:09.958847 6200 solver.cpp:330] Iteration 1224, Testing net (#0)
I0412 12:53:09.958875 6200 net.cpp:676] Ignoring source layer train-data
I0412 12:53:13.896950 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 12:53:14.438614 6200 solver.cpp:397] Test net output #0: accuracy = 0.0392157
I0412 12:53:14.438664 6200 solver.cpp:397] Test net output #1: loss = 4.71964 (* 1 = 4.71964 loss)
I0412 12:53:14.523952 6200 solver.cpp:218] Iteration 1224 (0.836034 iter/s, 14.3535s/12 iters), loss = 4.74197
I0412 12:53:14.524008 6200 solver.cpp:237] Train net output #0: loss = 4.74197 (* 1 = 4.74197 loss)
I0412 12:53:14.524020 6200 sgd_solver.cpp:105] Iteration 1224, lr = 0.00784697
I0412 12:53:18.672845 6200 solver.cpp:218] Iteration 1236 (2.89247 iter/s, 4.14871s/12 iters), loss = 4.80712
I0412 12:53:18.672885 6200 solver.cpp:237] Train net output #0: loss = 4.80712 (* 1 = 4.80712 loss)
I0412 12:53:18.672895 6200 sgd_solver.cpp:105] Iteration 1236, lr = 0.00782834
I0412 12:53:23.501721 6200 solver.cpp:218] Iteration 1248 (2.48515 iter/s, 4.82869s/12 iters), loss = 4.66801
I0412 12:53:23.501806 6200 solver.cpp:237] Train net output #0: loss = 4.66801 (* 1 = 4.66801 loss)
I0412 12:53:23.501816 6200 sgd_solver.cpp:105] Iteration 1248, lr = 0.00780976
I0412 12:53:28.417883 6200 solver.cpp:218] Iteration 1260 (2.44105 iter/s, 4.91592s/12 iters), loss = 4.63375
I0412 12:53:28.417932 6200 solver.cpp:237] Train net output #0: loss = 4.63375 (* 1 = 4.63375 loss)
I0412 12:53:28.417944 6200 sgd_solver.cpp:105] Iteration 1260, lr = 0.00779122
I0412 12:53:33.329792 6200 solver.cpp:218] Iteration 1272 (2.44314 iter/s, 4.91171s/12 iters), loss = 4.58707
I0412 12:53:33.329830 6200 solver.cpp:237] Train net output #0: loss = 4.58707 (* 1 = 4.58707 loss)
I0412 12:53:33.329839 6200 sgd_solver.cpp:105] Iteration 1272, lr = 0.00777272
I0412 12:53:38.170421 6200 solver.cpp:218] Iteration 1284 (2.47912 iter/s, 4.84044s/12 iters), loss = 4.64647
I0412 12:53:38.170470 6200 solver.cpp:237] Train net output #0: loss = 4.64647 (* 1 = 4.64647 loss)
I0412 12:53:38.170483 6200 sgd_solver.cpp:105] Iteration 1284, lr = 0.00775426
I0412 12:53:42.932113 6200 solver.cpp:218] Iteration 1296 (2.52022 iter/s, 4.76149s/12 iters), loss = 4.55544
I0412 12:53:42.932169 6200 solver.cpp:237] Train net output #0: loss = 4.55544 (* 1 = 4.55544 loss)
I0412 12:53:42.932185 6200 sgd_solver.cpp:105] Iteration 1296, lr = 0.00773585
I0412 12:53:47.796079 6200 solver.cpp:218] Iteration 1308 (2.46723 iter/s, 4.86376s/12 iters), loss = 4.4895
I0412 12:53:47.796130 6200 solver.cpp:237] Train net output #0: loss = 4.4895 (* 1 = 4.4895 loss)
I0412 12:53:47.796144 6200 sgd_solver.cpp:105] Iteration 1308, lr = 0.00771749
I0412 12:53:50.267814 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 12:53:52.681386 6200 solver.cpp:218] Iteration 1320 (2.45645 iter/s, 4.8851s/12 iters), loss = 4.59599
I0412 12:53:52.681432 6200 solver.cpp:237] Train net output #0: loss = 4.59599 (* 1 = 4.59599 loss)
I0412 12:53:52.681444 6200 sgd_solver.cpp:105] Iteration 1320, lr = 0.00769916
I0412 12:53:54.664109 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_1326.caffemodel
I0412 12:53:57.008929 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_1326.solverstate
I0412 12:53:58.560588 6200 solver.cpp:330] Iteration 1326, Testing net (#0)
I0412 12:53:58.560614 6200 net.cpp:676] Ignoring source layer train-data
I0412 12:54:02.403553 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 12:54:02.957298 6200 solver.cpp:397] Test net output #0: accuracy = 0.0447304
I0412 12:54:02.957355 6200 solver.cpp:397] Test net output #1: loss = 4.69486 (* 1 = 4.69486 loss)
I0412 12:54:04.890967 6200 solver.cpp:218] Iteration 1332 (0.982867 iter/s, 12.2092s/12 iters), loss = 4.50128
I0412 12:54:04.891003 6200 solver.cpp:237] Train net output #0: loss = 4.50128 (* 1 = 4.50128 loss)
I0412 12:54:04.891011 6200 sgd_solver.cpp:105] Iteration 1332, lr = 0.00768088
I0412 12:54:09.704994 6200 solver.cpp:218] Iteration 1344 (2.49281 iter/s, 4.81384s/12 iters), loss = 4.42197
I0412 12:54:09.705031 6200 solver.cpp:237] Train net output #0: loss = 4.42197 (* 1 = 4.42197 loss)
I0412 12:54:09.705039 6200 sgd_solver.cpp:105] Iteration 1344, lr = 0.00766265
I0412 12:54:14.579286 6200 solver.cpp:218] Iteration 1356 (2.46199 iter/s, 4.8741s/12 iters), loss = 4.76751
I0412 12:54:14.579340 6200 solver.cpp:237] Train net output #0: loss = 4.76751 (* 1 = 4.76751 loss)
I0412 12:54:14.579355 6200 sgd_solver.cpp:105] Iteration 1356, lr = 0.00764446
I0412 12:54:19.420652 6200 solver.cpp:218] Iteration 1368 (2.47875 iter/s, 4.84116s/12 iters), loss = 4.63932
I0412 12:54:19.420711 6200 solver.cpp:237] Train net output #0: loss = 4.63932 (* 1 = 4.63932 loss)
I0412 12:54:19.420725 6200 sgd_solver.cpp:105] Iteration 1368, lr = 0.00762631
I0412 12:54:20.188743 6200 blocking_queue.cpp:49] Waiting for data
I0412 12:54:24.329597 6200 solver.cpp:218] Iteration 1380 (2.44462 iter/s, 4.90873s/12 iters), loss = 4.42182
I0412 12:54:24.329641 6200 solver.cpp:237] Train net output #0: loss = 4.42182 (* 1 = 4.42182 loss)
I0412 12:54:24.329651 6200 sgd_solver.cpp:105] Iteration 1380, lr = 0.0076082
I0412 12:54:29.206840 6200 solver.cpp:218] Iteration 1392 (2.4605 iter/s, 4.87705s/12 iters), loss = 4.573
I0412 12:54:29.206925 6200 solver.cpp:237] Train net output #0: loss = 4.573 (* 1 = 4.573 loss)
I0412 12:54:29.206935 6200 sgd_solver.cpp:105] Iteration 1392, lr = 0.00759014
I0412 12:54:34.099941 6200 solver.cpp:218] Iteration 1404 (2.45255 iter/s, 4.89287s/12 iters), loss = 4.5114
I0412 12:54:34.099977 6200 solver.cpp:237] Train net output #0: loss = 4.5114 (* 1 = 4.5114 loss)
I0412 12:54:34.099985 6200 sgd_solver.cpp:105] Iteration 1404, lr = 0.00757212
I0412 12:54:38.610390 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 12:54:38.954963 6200 solver.cpp:218] Iteration 1416 (2.47177 iter/s, 4.85483s/12 iters), loss = 4.29999
I0412 12:54:38.955004 6200 solver.cpp:237] Train net output #0: loss = 4.29999 (* 1 = 4.29999 loss)
I0412 12:54:38.955013 6200 sgd_solver.cpp:105] Iteration 1416, lr = 0.00755414
I0412 12:54:43.424198 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_1428.caffemodel
I0412 12:54:45.534541 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_1428.solverstate
I0412 12:54:47.108049 6200 solver.cpp:330] Iteration 1428, Testing net (#0)
I0412 12:54:47.108076 6200 net.cpp:676] Ignoring source layer train-data
I0412 12:54:50.971729 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 12:54:51.559166 6200 solver.cpp:397] Test net output #0: accuracy = 0.0686275
I0412 12:54:51.559206 6200 solver.cpp:397] Test net output #1: loss = 4.5088 (* 1 = 4.5088 loss)
I0412 12:54:51.644826 6200 solver.cpp:218] Iteration 1428 (0.945668 iter/s, 12.6894s/12 iters), loss = 4.45603
I0412 12:54:51.644865 6200 solver.cpp:237] Train net output #0: loss = 4.45603 (* 1 = 4.45603 loss)
I0412 12:54:51.644874 6200 sgd_solver.cpp:105] Iteration 1428, lr = 0.0075362
I0412 12:54:55.804155 6200 solver.cpp:218] Iteration 1440 (2.8852 iter/s, 4.15916s/12 iters), loss = 4.35141
I0412 12:54:55.804203 6200 solver.cpp:237] Train net output #0: loss = 4.35141 (* 1 = 4.35141 loss)
I0412 12:54:55.804214 6200 sgd_solver.cpp:105] Iteration 1440, lr = 0.00751831
I0412 12:55:00.495710 6200 solver.cpp:218] Iteration 1452 (2.55789 iter/s, 4.69136s/12 iters), loss = 4.56454
I0412 12:55:00.495885 6200 solver.cpp:237] Train net output #0: loss = 4.56454 (* 1 = 4.56454 loss)
I0412 12:55:00.495903 6200 sgd_solver.cpp:105] Iteration 1452, lr = 0.00750046
I0412 12:55:05.253986 6200 solver.cpp:218] Iteration 1464 (2.52209 iter/s, 4.75796s/12 iters), loss = 4.45965
I0412 12:55:05.254035 6200 solver.cpp:237] Train net output #0: loss = 4.45965 (* 1 = 4.45965 loss)
I0412 12:55:05.254048 6200 sgd_solver.cpp:105] Iteration 1464, lr = 0.00748265
I0412 12:55:10.101449 6200 solver.cpp:218] Iteration 1476 (2.47562 iter/s, 4.84726s/12 iters), loss = 4.52329
I0412 12:55:10.101497 6200 solver.cpp:237] Train net output #0: loss = 4.52329 (* 1 = 4.52329 loss)
I0412 12:55:10.101508 6200 sgd_solver.cpp:105] Iteration 1476, lr = 0.00746489
I0412 12:55:15.018771 6200 solver.cpp:218] Iteration 1488 (2.44045 iter/s, 4.91712s/12 iters), loss = 4.4598
I0412 12:55:15.018810 6200 solver.cpp:237] Train net output #0: loss = 4.4598 (* 1 = 4.4598 loss)
I0412 12:55:15.018819 6200 sgd_solver.cpp:105] Iteration 1488, lr = 0.00744716
I0412 12:55:19.887727 6200 solver.cpp:218] Iteration 1500 (2.46469 iter/s, 4.86876s/12 iters), loss = 4.05085
I0412 12:55:19.887780 6200 solver.cpp:237] Train net output #0: loss = 4.05085 (* 1 = 4.05085 loss)
I0412 12:55:19.887794 6200 sgd_solver.cpp:105] Iteration 1500, lr = 0.00742948
I0412 12:55:24.746826 6200 solver.cpp:218] Iteration 1512 (2.4697 iter/s, 4.85889s/12 iters), loss = 4.40082
I0412 12:55:24.746881 6200 solver.cpp:237] Train net output #0: loss = 4.40082 (* 1 = 4.40082 loss)
I0412 12:55:24.746894 6200 sgd_solver.cpp:105] Iteration 1512, lr = 0.00741184
I0412 12:55:26.433367 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 12:55:29.612779 6200 solver.cpp:218] Iteration 1524 (2.46622 iter/s, 4.86575s/12 iters), loss = 4.29602
I0412 12:55:29.612828 6200 solver.cpp:237] Train net output #0: loss = 4.29602 (* 1 = 4.29602 loss)
I0412 12:55:29.612840 6200 sgd_solver.cpp:105] Iteration 1524, lr = 0.00739425
I0412 12:55:31.652442 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_1530.caffemodel
I0412 12:55:33.709980 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_1530.solverstate
I0412 12:55:35.287567 6200 solver.cpp:330] Iteration 1530, Testing net (#0)
I0412 12:55:35.287595 6200 net.cpp:676] Ignoring source layer train-data
I0412 12:55:39.111040 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 12:55:39.743353 6200 solver.cpp:397] Test net output #0: accuracy = 0.067402
I0412 12:55:39.743402 6200 solver.cpp:397] Test net output #1: loss = 4.4591 (* 1 = 4.4591 loss)
I0412 12:55:41.513095 6200 solver.cpp:218] Iteration 1536 (1.00841 iter/s, 11.8999s/12 iters), loss = 4.3919
I0412 12:55:41.513146 6200 solver.cpp:237] Train net output #0: loss = 4.3919 (* 1 = 4.3919 loss)
I0412 12:55:41.513159 6200 sgd_solver.cpp:105] Iteration 1536, lr = 0.00737669
I0412 12:55:46.368311 6200 solver.cpp:218] Iteration 1548 (2.47167 iter/s, 4.85501s/12 iters), loss = 4.05868
I0412 12:55:46.368356 6200 solver.cpp:237] Train net output #0: loss = 4.05868 (* 1 = 4.05868 loss)
I0412 12:55:46.368368 6200 sgd_solver.cpp:105] Iteration 1548, lr = 0.00735918
I0412 12:55:51.157284 6200 solver.cpp:218] Iteration 1560 (2.50586 iter/s, 4.78878s/12 iters), loss = 4.36113
I0412 12:55:51.157336 6200 solver.cpp:237] Train net output #0: loss = 4.36113 (* 1 = 4.36113 loss)
I0412 12:55:51.157349 6200 sgd_solver.cpp:105] Iteration 1560, lr = 0.00734171
I0412 12:55:56.083235 6200 solver.cpp:218] Iteration 1572 (2.43618 iter/s, 4.92575s/12 iters), loss = 4.30841
I0412 12:55:56.083276 6200 solver.cpp:237] Train net output #0: loss = 4.30841 (* 1 = 4.30841 loss)
I0412 12:55:56.083287 6200 sgd_solver.cpp:105] Iteration 1572, lr = 0.00732427
I0412 12:56:00.897780 6200 solver.cpp:218] Iteration 1584 (2.49255 iter/s, 4.81435s/12 iters), loss = 4.36044
I0412 12:56:00.897830 6200 solver.cpp:237] Train net output #0: loss = 4.36044 (* 1 = 4.36044 loss)
I0412 12:56:00.897842 6200 sgd_solver.cpp:105] Iteration 1584, lr = 0.00730688
I0412 12:56:05.824137 6200 solver.cpp:218] Iteration 1596 (2.43598 iter/s, 4.92615s/12 iters), loss = 4.29792
I0412 12:56:05.824311 6200 solver.cpp:237] Train net output #0: loss = 4.29792 (* 1 = 4.29792 loss)
I0412 12:56:05.824324 6200 sgd_solver.cpp:105] Iteration 1596, lr = 0.00728954
I0412 12:56:10.722602 6200 solver.cpp:218] Iteration 1608 (2.44991 iter/s, 4.89814s/12 iters), loss = 4.1384
I0412 12:56:10.722651 6200 solver.cpp:237] Train net output #0: loss = 4.1384 (* 1 = 4.1384 loss)
I0412 12:56:10.722662 6200 sgd_solver.cpp:105] Iteration 1608, lr = 0.00727223
I0412 12:56:14.607614 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 12:56:15.692869 6200 solver.cpp:218] Iteration 1620 (2.41446 iter/s, 4.97006s/12 iters), loss = 4.34047
I0412 12:56:15.692926 6200 solver.cpp:237] Train net output #0: loss = 4.34047 (* 1 = 4.34047 loss)
I0412 12:56:15.692939 6200 sgd_solver.cpp:105] Iteration 1620, lr = 0.00725496
I0412 12:56:20.185415 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_1632.caffemodel
I0412 12:56:22.360277 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_1632.solverstate
I0412 12:56:25.606704 6200 solver.cpp:330] Iteration 1632, Testing net (#0)
I0412 12:56:25.606732 6200 net.cpp:676] Ignoring source layer train-data
I0412 12:56:29.415462 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 12:56:30.083362 6200 solver.cpp:397] Test net output #0: accuracy = 0.0808824
I0412 12:56:30.083411 6200 solver.cpp:397] Test net output #1: loss = 4.32148 (* 1 = 4.32148 loss)
I0412 12:56:30.168756 6200 solver.cpp:218] Iteration 1632 (0.828993 iter/s, 14.4754s/12 iters), loss = 4.28532
I0412 12:56:30.168807 6200 solver.cpp:237] Train net output #0: loss = 4.28532 (* 1 = 4.28532 loss)
I0412 12:56:30.168819 6200 sgd_solver.cpp:105] Iteration 1632, lr = 0.00723774
I0412 12:56:34.341073 6200 solver.cpp:218] Iteration 1644 (2.87623 iter/s, 4.17213s/12 iters), loss = 4.15207
I0412 12:56:34.341122 6200 solver.cpp:237] Train net output #0: loss = 4.15207 (* 1 = 4.15207 loss)
I0412 12:56:34.341133 6200 sgd_solver.cpp:105] Iteration 1644, lr = 0.00722056
I0412 12:56:39.159179 6200 solver.cpp:218] Iteration 1656 (2.49071 iter/s, 4.8179s/12 iters), loss = 4.21007
I0412 12:56:39.159251 6200 solver.cpp:237] Train net output #0: loss = 4.21007 (* 1 = 4.21007 loss)
I0412 12:56:39.159260 6200 sgd_solver.cpp:105] Iteration 1656, lr = 0.00720341
I0412 12:56:44.090701 6200 solver.cpp:218] Iteration 1668 (2.43344 iter/s, 4.9313s/12 iters), loss = 4.02778
I0412 12:56:44.090736 6200 solver.cpp:237] Train net output #0: loss = 4.02778 (* 1 = 4.02778 loss)
I0412 12:56:44.090744 6200 sgd_solver.cpp:105] Iteration 1668, lr = 0.00718631
I0412 12:56:48.957314 6200 solver.cpp:218] Iteration 1680 (2.46588 iter/s, 4.86642s/12 iters), loss = 4.13667
I0412 12:56:48.957367 6200 solver.cpp:237] Train net output #0: loss = 4.13667 (* 1 = 4.13667 loss)
I0412 12:56:48.957381 6200 sgd_solver.cpp:105] Iteration 1680, lr = 0.00716925
I0412 12:56:53.774808 6200 solver.cpp:218] Iteration 1692 (2.49103 iter/s, 4.81729s/12 iters), loss = 4.09917
I0412 12:56:53.774858 6200 solver.cpp:237] Train net output #0: loss = 4.09917 (* 1 = 4.09917 loss)
I0412 12:56:53.774868 6200 sgd_solver.cpp:105] Iteration 1692, lr = 0.00715223
I0412 12:56:58.498981 6200 solver.cpp:218] Iteration 1704 (2.54024 iter/s, 4.72397s/12 iters), loss = 3.81291
I0412 12:56:58.499032 6200 solver.cpp:237] Train net output #0: loss = 3.81291 (* 1 = 3.81291 loss)
I0412 12:56:58.499044 6200 sgd_solver.cpp:105] Iteration 1704, lr = 0.00713525
I0412 12:57:03.433732 6200 solver.cpp:218] Iteration 1716 (2.43184 iter/s, 4.93455s/12 iters), loss = 4.27595
I0412 12:57:03.433792 6200 solver.cpp:237] Train net output #0: loss = 4.27595 (* 1 = 4.27595 loss)
I0412 12:57:03.433809 6200 sgd_solver.cpp:105] Iteration 1716, lr = 0.00711831
I0412 12:57:04.419112 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 12:57:08.161129 6200 solver.cpp:218] Iteration 1728 (2.5385 iter/s, 4.72719s/12 iters), loss = 3.9451
I0412 12:57:08.161186 6200 solver.cpp:237] Train net output #0: loss = 3.9451 (* 1 = 3.9451 loss)
I0412 12:57:08.161202 6200 sgd_solver.cpp:105] Iteration 1728, lr = 0.00710141
I0412 12:57:10.114619 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_1734.caffemodel
I0412 12:57:12.141419 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_1734.solverstate
I0412 12:57:13.695446 6200 solver.cpp:330] Iteration 1734, Testing net (#0)
I0412 12:57:13.695472 6200 net.cpp:676] Ignoring source layer train-data
I0412 12:57:17.448534 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 12:57:18.150702 6200 solver.cpp:397] Test net output #0: accuracy = 0.0851716
I0412 12:57:18.150750 6200 solver.cpp:397] Test net output #1: loss = 4.18639 (* 1 = 4.18639 loss)
I0412 12:57:20.010023 6200 solver.cpp:218] Iteration 1740 (1.01279 iter/s, 11.8485s/12 iters), loss = 4.12872
I0412 12:57:20.010076 6200 solver.cpp:237] Train net output #0: loss = 4.12872 (* 1 = 4.12872 loss)
I0412 12:57:20.010089 6200 sgd_solver.cpp:105] Iteration 1740, lr = 0.00708455
I0412 12:57:24.882958 6200 solver.cpp:218] Iteration 1752 (2.46269 iter/s, 4.87273s/12 iters), loss = 4.24282
I0412 12:57:24.883011 6200 solver.cpp:237] Train net output #0: loss = 4.24282 (* 1 = 4.24282 loss)
I0412 12:57:24.883023 6200 sgd_solver.cpp:105] Iteration 1752, lr = 0.00706773
I0412 12:57:29.756269 6200 solver.cpp:218] Iteration 1764 (2.46249 iter/s, 4.87311s/12 iters), loss = 4.21739
I0412 12:57:29.756309 6200 solver.cpp:237] Train net output #0: loss = 4.21739 (* 1 = 4.21739 loss)
I0412 12:57:29.756317 6200 sgd_solver.cpp:105] Iteration 1764, lr = 0.00705094
I0412 12:57:34.594120 6200 solver.cpp:218] Iteration 1776 (2.48054 iter/s, 4.83766s/12 iters), loss = 4.16774
I0412 12:57:34.594163 6200 solver.cpp:237] Train net output #0: loss = 4.16774 (* 1 = 4.16774 loss)
I0412 12:57:34.594173 6200 sgd_solver.cpp:105] Iteration 1776, lr = 0.0070342
I0412 12:57:39.446763 6200 solver.cpp:218] Iteration 1788 (2.47298 iter/s, 4.85245s/12 iters), loss = 4.19436
I0412 12:57:39.446815 6200 solver.cpp:237] Train net output #0: loss = 4.19436 (* 1 = 4.19436 loss)
I0412 12:57:39.446830 6200 sgd_solver.cpp:105] Iteration 1788, lr = 0.0070175
I0412 12:57:44.303936 6200 solver.cpp:218] Iteration 1800 (2.47068 iter/s, 4.85697s/12 iters), loss = 3.96803
I0412 12:57:44.304039 6200 solver.cpp:237] Train net output #0: loss = 3.96803 (* 1 = 3.96803 loss)
I0412 12:57:44.304050 6200 sgd_solver.cpp:105] Iteration 1800, lr = 0.00700084
I0412 12:57:49.250707 6200 solver.cpp:218] Iteration 1812 (2.42595 iter/s, 4.94651s/12 iters), loss = 4.09143
I0412 12:57:49.250768 6200 solver.cpp:237] Train net output #0: loss = 4.09143 (* 1 = 4.09143 loss)
I0412 12:57:49.250790 6200 sgd_solver.cpp:105] Iteration 1812, lr = 0.00698422
I0412 12:57:52.355062 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 12:57:54.055918 6200 solver.cpp:218] Iteration 1824 (2.49739 iter/s, 4.80501s/12 iters), loss = 4.13338
I0412 12:57:54.055960 6200 solver.cpp:237] Train net output #0: loss = 4.13338 (* 1 = 4.13338 loss)
I0412 12:57:54.055970 6200 sgd_solver.cpp:105] Iteration 1824, lr = 0.00696764
I0412 12:57:58.464684 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_1836.caffemodel
I0412 12:58:02.321904 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_1836.solverstate
I0412 12:58:07.423067 6200 solver.cpp:330] Iteration 1836, Testing net (#0)
I0412 12:58:07.423091 6200 net.cpp:676] Ignoring source layer train-data
I0412 12:58:11.375706 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 12:58:12.122112 6200 solver.cpp:397] Test net output #0: accuracy = 0.0998775
I0412 12:58:12.122160 6200 solver.cpp:397] Test net output #1: loss = 4.13278 (* 1 = 4.13278 loss)
I0412 12:58:12.207701 6200 solver.cpp:218] Iteration 1836 (0.661113 iter/s, 18.1512s/12 iters), loss = 4.10363
I0412 12:58:12.207752 6200 solver.cpp:237] Train net output #0: loss = 4.10363 (* 1 = 4.10363 loss)
I0412 12:58:12.207762 6200 sgd_solver.cpp:105] Iteration 1836, lr = 0.0069511
I0412 12:58:16.470357 6200 solver.cpp:218] Iteration 1848 (2.81527 iter/s, 4.26247s/12 iters), loss = 4.11907
I0412 12:58:16.470542 6200 solver.cpp:237] Train net output #0: loss = 4.11907 (* 1 = 4.11907 loss)
I0412 12:58:16.470559 6200 sgd_solver.cpp:105] Iteration 1848, lr = 0.00693459
I0412 12:58:21.559499 6200 solver.cpp:218] Iteration 1860 (2.35812 iter/s, 5.0888s/12 iters), loss = 3.79214
I0412 12:58:21.559549 6200 solver.cpp:237] Train net output #0: loss = 3.79214 (* 1 = 3.79214 loss)
I0412 12:58:21.559561 6200 sgd_solver.cpp:105] Iteration 1860, lr = 0.00691813
I0412 12:58:26.564468 6200 solver.cpp:218] Iteration 1872 (2.39772 iter/s, 5.00476s/12 iters), loss = 4.0046
I0412 12:58:26.564522 6200 solver.cpp:237] Train net output #0: loss = 4.0046 (* 1 = 4.0046 loss)
I0412 12:58:26.564535 6200 sgd_solver.cpp:105] Iteration 1872, lr = 0.0069017
I0412 12:58:31.495548 6200 solver.cpp:218] Iteration 1884 (2.43365 iter/s, 4.93087s/12 iters), loss = 4.07379
I0412 12:58:31.495604 6200 solver.cpp:237] Train net output #0: loss = 4.07379 (* 1 = 4.07379 loss)
I0412 12:58:31.495617 6200 sgd_solver.cpp:105] Iteration 1884, lr = 0.00688532
I0412 12:58:36.308724 6200 solver.cpp:218] Iteration 1896 (2.49326 iter/s, 4.81298s/12 iters), loss = 3.979
I0412 12:58:36.308759 6200 solver.cpp:237] Train net output #0: loss = 3.979 (* 1 = 3.979 loss)
I0412 12:58:36.308768 6200 sgd_solver.cpp:105] Iteration 1896, lr = 0.00686897
I0412 12:58:41.185585 6200 solver.cpp:218] Iteration 1908 (2.46069 iter/s, 4.87667s/12 iters), loss = 3.97853
I0412 12:58:41.185636 6200 solver.cpp:237] Train net output #0: loss = 3.97853 (* 1 = 3.97853 loss)
I0412 12:58:41.185648 6200 sgd_solver.cpp:105] Iteration 1908, lr = 0.00685266
I0412 12:58:46.017805 6200 solver.cpp:218] Iteration 1920 (2.48343 iter/s, 4.83202s/12 iters), loss = 3.8948
I0412 12:58:46.017844 6200 solver.cpp:237] Train net output #0: loss = 3.8948 (* 1 = 3.8948 loss)
I0412 12:58:46.017853 6200 sgd_solver.cpp:105] Iteration 1920, lr = 0.00683639
I0412 12:58:46.336777 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 12:58:50.742425 6200 solver.cpp:218] Iteration 1932 (2.53999 iter/s, 4.72443s/12 iters), loss = 3.81387
I0412 12:58:50.742516 6200 solver.cpp:237] Train net output #0: loss = 3.81387 (* 1 = 3.81387 loss)
I0412 12:58:50.742525 6200 sgd_solver.cpp:105] Iteration 1932, lr = 0.00682016
I0412 12:58:52.709978 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_1938.caffemodel
I0412 12:58:55.758438 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_1938.solverstate
I0412 12:58:57.333679 6200 solver.cpp:330] Iteration 1938, Testing net (#0)
I0412 12:58:57.333711 6200 net.cpp:676] Ignoring source layer train-data
I0412 12:59:00.995924 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 12:59:01.778154 6200 solver.cpp:397] Test net output #0: accuracy = 0.118873
I0412 12:59:01.778195 6200 solver.cpp:397] Test net output #1: loss = 3.98084 (* 1 = 3.98084 loss)
I0412 12:59:03.646323 6200 solver.cpp:218] Iteration 1944 (0.929985 iter/s, 12.9034s/12 iters), loss = 3.84614
I0412 12:59:03.646384 6200 solver.cpp:237] Train net output #0: loss = 3.84614 (* 1 = 3.84614 loss)
I0412 12:59:03.646397 6200 sgd_solver.cpp:105] Iteration 1944, lr = 0.00680397
I0412 12:59:08.574615 6200 solver.cpp:218] Iteration 1956 (2.43503 iter/s, 4.92808s/12 iters), loss = 3.65558
I0412 12:59:08.574666 6200 solver.cpp:237] Train net output #0: loss = 3.65558 (* 1 = 3.65558 loss)
I0412 12:59:08.574676 6200 sgd_solver.cpp:105] Iteration 1956, lr = 0.00678782
I0412 12:59:13.376659 6200 solver.cpp:218] Iteration 1968 (2.49904 iter/s, 4.80184s/12 iters), loss = 3.69098
I0412 12:59:13.376704 6200 solver.cpp:237] Train net output #0: loss = 3.69098 (* 1 = 3.69098 loss)
I0412 12:59:13.376716 6200 sgd_solver.cpp:105] Iteration 1968, lr = 0.0067717
I0412 12:59:18.317255 6200 solver.cpp:218] Iteration 1980 (2.42896 iter/s, 4.94039s/12 iters), loss = 3.6636
I0412 12:59:18.317306 6200 solver.cpp:237] Train net output #0: loss = 3.6636 (* 1 = 3.6636 loss)
I0412 12:59:18.317317 6200 sgd_solver.cpp:105] Iteration 1980, lr = 0.00675562
I0412 12:59:23.398351 6200 solver.cpp:218] Iteration 1992 (2.36179 iter/s, 5.08089s/12 iters), loss = 3.85722
I0412 12:59:23.398520 6200 solver.cpp:237] Train net output #0: loss = 3.85722 (* 1 = 3.85722 loss)
I0412 12:59:23.398535 6200 sgd_solver.cpp:105] Iteration 1992, lr = 0.00673958
I0412 12:59:28.320991 6200 solver.cpp:218] Iteration 2004 (2.43787 iter/s, 4.92233s/12 iters), loss = 3.61896
I0412 12:59:28.321031 6200 solver.cpp:237] Train net output #0: loss = 3.61896 (* 1 = 3.61896 loss)
I0412 12:59:28.321040 6200 sgd_solver.cpp:105] Iteration 2004, lr = 0.00672358
I0412 12:59:33.226212 6200 solver.cpp:218] Iteration 2016 (2.44647 iter/s, 4.90503s/12 iters), loss = 3.88117
I0412 12:59:33.226249 6200 solver.cpp:237] Train net output #0: loss = 3.88117 (* 1 = 3.88117 loss)
I0412 12:59:33.226258 6200 sgd_solver.cpp:105] Iteration 2016, lr = 0.00670762
I0412 12:59:35.697896 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 12:59:38.100064 6200 solver.cpp:218] Iteration 2028 (2.46221 iter/s, 4.87366s/12 iters), loss = 3.7257
I0412 12:59:38.100109 6200 solver.cpp:237] Train net output #0: loss = 3.7257 (* 1 = 3.7257 loss)
I0412 12:59:38.100118 6200 sgd_solver.cpp:105] Iteration 2028, lr = 0.00669169
I0412 12:59:42.511909 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_2040.caffemodel
I0412 12:59:45.452201 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_2040.solverstate
I0412 12:59:48.255045 6200 solver.cpp:330] Iteration 2040, Testing net (#0)
I0412 12:59:48.255070 6200 net.cpp:676] Ignoring source layer train-data
I0412 12:59:51.856765 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 12:59:52.819557 6200 solver.cpp:397] Test net output #0: accuracy = 0.130515
I0412 12:59:52.819607 6200 solver.cpp:397] Test net output #1: loss = 3.84461 (* 1 = 3.84461 loss)
I0412 12:59:52.905133 6200 solver.cpp:218] Iteration 2040 (0.810559 iter/s, 14.8046s/12 iters), loss = 3.65474
I0412 12:59:52.905210 6200 solver.cpp:237] Train net output #0: loss = 3.65474 (* 1 = 3.65474 loss)
I0412 12:59:52.905225 6200 sgd_solver.cpp:105] Iteration 2040, lr = 0.00667581
I0412 12:59:57.037629 6200 solver.cpp:218] Iteration 2052 (2.90396 iter/s, 4.1323s/12 iters), loss = 3.73139
I0412 12:59:57.037808 6200 solver.cpp:237] Train net output #0: loss = 3.73139 (* 1 = 3.73139 loss)
I0412 12:59:57.037820 6200 sgd_solver.cpp:105] Iteration 2052, lr = 0.00665996
I0412 12:59:58.204133 6200 blocking_queue.cpp:49] Waiting for data
I0412 13:00:01.876716 6200 solver.cpp:218] Iteration 2064 (2.47997 iter/s, 4.83876s/12 iters), loss = 3.56459
I0412 13:00:01.876772 6200 solver.cpp:237] Train net output #0: loss = 3.56459 (* 1 = 3.56459 loss)
I0412 13:00:01.876785 6200 sgd_solver.cpp:105] Iteration 2064, lr = 0.00664414
I0412 13:00:06.717013 6200 solver.cpp:218] Iteration 2076 (2.47929 iter/s, 4.84009s/12 iters), loss = 3.76532
I0412 13:00:06.717072 6200 solver.cpp:237] Train net output #0: loss = 3.76532 (* 1 = 3.76532 loss)
I0412 13:00:06.717085 6200 sgd_solver.cpp:105] Iteration 2076, lr = 0.00662837
I0412 13:00:11.555438 6200 solver.cpp:218] Iteration 2088 (2.48025 iter/s, 4.83821s/12 iters), loss = 3.74486
I0412 13:00:11.555495 6200 solver.cpp:237] Train net output #0: loss = 3.74486 (* 1 = 3.74486 loss)
I0412 13:00:11.555505 6200 sgd_solver.cpp:105] Iteration 2088, lr = 0.00661263
I0412 13:00:16.375128 6200 solver.cpp:218] Iteration 2100 (2.48989 iter/s, 4.81949s/12 iters), loss = 3.73121
I0412 13:00:16.375185 6200 solver.cpp:237] Train net output #0: loss = 3.73121 (* 1 = 3.73121 loss)
I0412 13:00:16.375197 6200 sgd_solver.cpp:105] Iteration 2100, lr = 0.00659693
I0412 13:00:21.257336 6200 solver.cpp:218] Iteration 2112 (2.45801 iter/s, 4.882s/12 iters), loss = 3.59104
I0412 13:00:21.257391 6200 solver.cpp:237] Train net output #0: loss = 3.59104 (* 1 = 3.59104 loss)
I0412 13:00:21.257403 6200 sgd_solver.cpp:105] Iteration 2112, lr = 0.00658127
I0412 13:00:25.786295 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:00:26.101682 6200 solver.cpp:218] Iteration 2124 (2.47722 iter/s, 4.84414s/12 iters), loss = 3.19751
I0412 13:00:26.101738 6200 solver.cpp:237] Train net output #0: loss = 3.19751 (* 1 = 3.19751 loss)
I0412 13:00:26.101752 6200 sgd_solver.cpp:105] Iteration 2124, lr = 0.00656564
I0412 13:00:30.929906 6200 solver.cpp:218] Iteration 2136 (2.48549 iter/s, 4.82802s/12 iters), loss = 3.68515
I0412 13:00:30.930047 6200 solver.cpp:237] Train net output #0: loss = 3.68515 (* 1 = 3.68515 loss)
I0412 13:00:30.930058 6200 sgd_solver.cpp:105] Iteration 2136, lr = 0.00655006
I0412 13:00:33.053889 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_2142.caffemodel
I0412 13:00:37.822796 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_2142.solverstate
I0412 13:00:40.146412 6200 solver.cpp:330] Iteration 2142, Testing net (#0)
I0412 13:00:40.146435 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:00:43.823769 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:00:44.686580 6200 solver.cpp:397] Test net output #0: accuracy = 0.150735
I0412 13:00:44.686609 6200 solver.cpp:397] Test net output #1: loss = 3.72943 (* 1 = 3.72943 loss)
I0412 13:00:46.434412 6200 solver.cpp:218] Iteration 2148 (0.773997 iter/s, 15.5039s/12 iters), loss = 3.63465
I0412 13:00:46.434445 6200 solver.cpp:237] Train net output #0: loss = 3.63465 (* 1 = 3.63465 loss)
I0412 13:00:46.434454 6200 sgd_solver.cpp:105] Iteration 2148, lr = 0.00653451
I0412 13:00:51.378113 6200 solver.cpp:218] Iteration 2160 (2.42743 iter/s, 4.94351s/12 iters), loss = 3.93478
I0412 13:00:51.378168 6200 solver.cpp:237] Train net output #0: loss = 3.93478 (* 1 = 3.93478 loss)
I0412 13:00:51.378185 6200 sgd_solver.cpp:105] Iteration 2160, lr = 0.00651899
I0412 13:00:56.283787 6200 solver.cpp:218] Iteration 2172 (2.44625 iter/s, 4.90548s/12 iters), loss = 3.74247
I0412 13:00:56.283830 6200 solver.cpp:237] Train net output #0: loss = 3.74247 (* 1 = 3.74247 loss)
I0412 13:00:56.283840 6200 sgd_solver.cpp:105] Iteration 2172, lr = 0.00650351
I0412 13:01:01.060235 6200 solver.cpp:218] Iteration 2184 (2.51243 iter/s, 4.77626s/12 iters), loss = 3.62868
I0412 13:01:01.060372 6200 solver.cpp:237] Train net output #0: loss = 3.62868 (* 1 = 3.62868 loss)
I0412 13:01:01.060386 6200 sgd_solver.cpp:105] Iteration 2184, lr = 0.00648807
I0412 13:01:05.978330 6200 solver.cpp:218] Iteration 2196 (2.44011 iter/s, 4.91781s/12 iters), loss = 3.43749
I0412 13:01:05.978380 6200 solver.cpp:237] Train net output #0: loss = 3.43749 (* 1 = 3.43749 loss)
I0412 13:01:05.978391 6200 sgd_solver.cpp:105] Iteration 2196, lr = 0.00647267
I0412 13:01:10.742480 6200 solver.cpp:218] Iteration 2208 (2.51892 iter/s, 4.76395s/12 iters), loss = 3.31356
I0412 13:01:10.742522 6200 solver.cpp:237] Train net output #0: loss = 3.31356 (* 1 = 3.31356 loss)
I0412 13:01:10.742529 6200 sgd_solver.cpp:105] Iteration 2208, lr = 0.0064573
I0412 13:01:15.608604 6200 solver.cpp:218] Iteration 2220 (2.46612 iter/s, 4.86593s/12 iters), loss = 3.40995
I0412 13:01:15.608652 6200 solver.cpp:237] Train net output #0: loss = 3.40995 (* 1 = 3.40995 loss)
I0412 13:01:15.608664 6200 sgd_solver.cpp:105] Iteration 2220, lr = 0.00644197
I0412 13:01:17.393555 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:01:20.750353 6200 solver.cpp:218] Iteration 2232 (2.33393 iter/s, 5.14155s/12 iters), loss = 3.54891
I0412 13:01:20.750416 6200 solver.cpp:237] Train net output #0: loss = 3.54891 (* 1 = 3.54891 loss)
I0412 13:01:20.750428 6200 sgd_solver.cpp:105] Iteration 2232, lr = 0.00642668
I0412 13:01:25.417413 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_2244.caffemodel
I0412 13:01:27.855428 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_2244.solverstate
I0412 13:01:31.986517 6200 solver.cpp:330] Iteration 2244, Testing net (#0)
I0412 13:01:31.986618 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:01:35.532194 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:01:36.437872 6200 solver.cpp:397] Test net output #0: accuracy = 0.139706
I0412 13:01:36.437922 6200 solver.cpp:397] Test net output #1: loss = 3.75479 (* 1 = 3.75479 loss)
I0412 13:01:36.523473 6200 solver.cpp:218] Iteration 2244 (0.760813 iter/s, 15.7726s/12 iters), loss = 3.52498
I0412 13:01:36.523526 6200 solver.cpp:237] Train net output #0: loss = 3.52498 (* 1 = 3.52498 loss)
I0412 13:01:36.523538 6200 sgd_solver.cpp:105] Iteration 2244, lr = 0.00641142
I0412 13:01:40.685428 6200 solver.cpp:218] Iteration 2256 (2.88339 iter/s, 4.16177s/12 iters), loss = 3.24965
I0412 13:01:40.685480 6200 solver.cpp:237] Train net output #0: loss = 3.24965 (* 1 = 3.24965 loss)
I0412 13:01:40.685492 6200 sgd_solver.cpp:105] Iteration 2256, lr = 0.0063962
I0412 13:01:45.527181 6200 solver.cpp:218] Iteration 2268 (2.47854 iter/s, 4.84156s/12 iters), loss = 3.43718
I0412 13:01:45.527223 6200 solver.cpp:237] Train net output #0: loss = 3.43718 (* 1 = 3.43718 loss)
I0412 13:01:45.527232 6200 sgd_solver.cpp:105] Iteration 2268, lr = 0.00638101
I0412 13:01:50.452044 6200 solver.cpp:218] Iteration 2280 (2.43672 iter/s, 4.92466s/12 iters), loss = 3.17585
I0412 13:01:50.452116 6200 solver.cpp:237] Train net output #0: loss = 3.17585 (* 1 = 3.17585 loss)
I0412 13:01:50.452133 6200 sgd_solver.cpp:105] Iteration 2280, lr = 0.00636586
I0412 13:01:55.266127 6200 solver.cpp:218] Iteration 2292 (2.4928 iter/s, 4.81387s/12 iters), loss = 3.49098
I0412 13:01:55.266187 6200 solver.cpp:237] Train net output #0: loss = 3.49098 (* 1 = 3.49098 loss)
I0412 13:01:55.266199 6200 sgd_solver.cpp:105] Iteration 2292, lr = 0.00635075
I0412 13:02:00.230705 6200 solver.cpp:218] Iteration 2304 (2.41723 iter/s, 4.96437s/12 iters), loss = 3.38532
I0412 13:02:00.230754 6200 solver.cpp:237] Train net output #0: loss = 3.38532 (* 1 = 3.38532 loss)
I0412 13:02:00.230765 6200 sgd_solver.cpp:105] Iteration 2304, lr = 0.00633567
I0412 13:02:05.016957 6200 solver.cpp:218] Iteration 2316 (2.50728 iter/s, 4.78606s/12 iters), loss = 3.4181
I0412 13:02:05.017047 6200 solver.cpp:237] Train net output #0: loss = 3.4181 (* 1 = 3.4181 loss)
I0412 13:02:05.017055 6200 sgd_solver.cpp:105] Iteration 2316, lr = 0.00632063
I0412 13:02:08.885938 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:02:09.931866 6200 solver.cpp:218] Iteration 2328 (2.44167 iter/s, 4.91467s/12 iters), loss = 3.26388
I0412 13:02:09.931915 6200 solver.cpp:237] Train net output #0: loss = 3.26388 (* 1 = 3.26388 loss)
I0412 13:02:09.931927 6200 sgd_solver.cpp:105] Iteration 2328, lr = 0.00630562
I0412 13:02:14.836369 6200 solver.cpp:218] Iteration 2340 (2.44683 iter/s, 4.90431s/12 iters), loss = 3.39874
I0412 13:02:14.836407 6200 solver.cpp:237] Train net output #0: loss = 3.39874 (* 1 = 3.39874 loss)
I0412 13:02:14.836416 6200 sgd_solver.cpp:105] Iteration 2340, lr = 0.00629065
I0412 13:02:16.851104 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_2346.caffemodel
I0412 13:02:18.825981 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_2346.solverstate
I0412 13:02:20.412753 6200 solver.cpp:330] Iteration 2346, Testing net (#0)
I0412 13:02:20.412781 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:02:23.903087 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:02:24.843008 6200 solver.cpp:397] Test net output #0: accuracy = 0.172181
I0412 13:02:24.843039 6200 solver.cpp:397] Test net output #1: loss = 3.56142 (* 1 = 3.56142 loss)
I0412 13:02:26.575763 6200 solver.cpp:218] Iteration 2352 (1.02223 iter/s, 11.739s/12 iters), loss = 3.3434
I0412 13:02:26.575824 6200 solver.cpp:237] Train net output #0: loss = 3.3434 (* 1 = 3.3434 loss)
I0412 13:02:26.575836 6200 sgd_solver.cpp:105] Iteration 2352, lr = 0.00627571
I0412 13:02:31.353382 6200 solver.cpp:218] Iteration 2364 (2.51182 iter/s, 4.77742s/12 iters), loss = 3.02418
I0412 13:02:31.353433 6200 solver.cpp:237] Train net output #0: loss = 3.02418 (* 1 = 3.02418 loss)
I0412 13:02:31.353446 6200 sgd_solver.cpp:105] Iteration 2364, lr = 0.00626081
I0412 13:02:36.322523 6200 solver.cpp:218] Iteration 2376 (2.415 iter/s, 4.96894s/12 iters), loss = 3.17566
I0412 13:02:36.322664 6200 solver.cpp:237] Train net output #0: loss = 3.17566 (* 1 = 3.17566 loss)
I0412 13:02:36.322679 6200 sgd_solver.cpp:105] Iteration 2376, lr = 0.00624595
I0412 13:02:41.278627 6200 solver.cpp:218] Iteration 2388 (2.4214 iter/s, 4.95582s/12 iters), loss = 3.05121
I0412 13:02:41.278687 6200 solver.cpp:237] Train net output #0: loss = 3.05121 (* 1 = 3.05121 loss)
I0412 13:02:41.278703 6200 sgd_solver.cpp:105] Iteration 2388, lr = 0.00623112
I0412 13:02:46.788759 6200 solver.cpp:218] Iteration 2400 (2.1779 iter/s, 5.50991s/12 iters), loss = 3.14047
I0412 13:02:46.788807 6200 solver.cpp:237] Train net output #0: loss = 3.14047 (* 1 = 3.14047 loss)
I0412 13:02:46.788817 6200 sgd_solver.cpp:105] Iteration 2400, lr = 0.00621633
I0412 13:02:51.674806 6200 solver.cpp:218] Iteration 2412 (2.45607 iter/s, 4.88585s/12 iters), loss = 2.92771
I0412 13:02:51.674849 6200 solver.cpp:237] Train net output #0: loss = 2.92771 (* 1 = 2.92771 loss)
I0412 13:02:51.674857 6200 sgd_solver.cpp:105] Iteration 2412, lr = 0.00620157
I0412 13:02:56.794514 6200 solver.cpp:218] Iteration 2424 (2.34397 iter/s, 5.11951s/12 iters), loss = 3.25821
I0412 13:02:56.794555 6200 solver.cpp:237] Train net output #0: loss = 3.25821 (* 1 = 3.25821 loss)
I0412 13:02:56.794562 6200 sgd_solver.cpp:105] Iteration 2424, lr = 0.00618684
I0412 13:02:57.929378 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:03:01.794303 6200 solver.cpp:218] Iteration 2436 (2.4002 iter/s, 4.99959s/12 iters), loss = 2.90309
I0412 13:03:01.794363 6200 solver.cpp:237] Train net output #0: loss = 2.90309 (* 1 = 2.90309 loss)
I0412 13:03:01.794378 6200 sgd_solver.cpp:105] Iteration 2436, lr = 0.00617215
I0412 13:03:06.288046 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_2448.caffemodel
I0412 13:03:09.899281 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_2448.solverstate
I0412 13:03:11.794760 6200 solver.cpp:330] Iteration 2448, Testing net (#0)
I0412 13:03:11.794790 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:03:15.601737 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:03:16.655788 6200 solver.cpp:397] Test net output #0: accuracy = 0.192402
I0412 13:03:16.655838 6200 solver.cpp:397] Test net output #1: loss = 3.49399 (* 1 = 3.49399 loss)
I0412 13:03:16.741036 6200 solver.cpp:218] Iteration 2448 (0.802877 iter/s, 14.9463s/12 iters), loss = 3.12943
I0412 13:03:16.741091 6200 solver.cpp:237] Train net output #0: loss = 3.12943 (* 1 = 3.12943 loss)
I0412 13:03:16.741102 6200 sgd_solver.cpp:105] Iteration 2448, lr = 0.0061575
I0412 13:03:20.947703 6200 solver.cpp:218] Iteration 2460 (2.85274 iter/s, 4.20648s/12 iters), loss = 2.93879
I0412 13:03:20.947753 6200 solver.cpp:237] Train net output #0: loss = 2.93879 (* 1 = 2.93879 loss)
I0412 13:03:20.947767 6200 sgd_solver.cpp:105] Iteration 2460, lr = 0.00614288
I0412 13:03:25.832646 6200 solver.cpp:218] Iteration 2472 (2.45663 iter/s, 4.88475s/12 iters), loss = 3.28912
I0412 13:03:25.832696 6200 solver.cpp:237] Train net output #0: loss = 3.28912 (* 1 = 3.28912 loss)
I0412 13:03:25.832707 6200 sgd_solver.cpp:105] Iteration 2472, lr = 0.0061283
I0412 13:03:30.845971 6200 solver.cpp:218] Iteration 2484 (2.39372 iter/s, 5.01311s/12 iters), loss = 3.12713
I0412 13:03:30.846026 6200 solver.cpp:237] Train net output #0: loss = 3.12713 (* 1 = 3.12713 loss)
I0412 13:03:30.846038 6200 sgd_solver.cpp:105] Iteration 2484, lr = 0.00611375
I0412 13:03:35.781821 6200 solver.cpp:218] Iteration 2496 (2.43129 iter/s, 4.93565s/12 iters), loss = 2.98234
I0412 13:03:35.781865 6200 solver.cpp:237] Train net output #0: loss = 2.98234 (* 1 = 2.98234 loss)
I0412 13:03:35.781874 6200 sgd_solver.cpp:105] Iteration 2496, lr = 0.00609923
I0412 13:03:40.724555 6200 solver.cpp:218] Iteration 2508 (2.4279 iter/s, 4.94254s/12 iters), loss = 3.13256
I0412 13:03:40.724659 6200 solver.cpp:237] Train net output #0: loss = 3.13256 (* 1 = 3.13256 loss)
I0412 13:03:40.724673 6200 sgd_solver.cpp:105] Iteration 2508, lr = 0.00608475
I0412 13:03:45.701122 6200 solver.cpp:218] Iteration 2520 (2.41142 iter/s, 4.97632s/12 iters), loss = 3.20172
I0412 13:03:45.701179 6200 solver.cpp:237] Train net output #0: loss = 3.20172 (* 1 = 3.20172 loss)
I0412 13:03:45.701190 6200 sgd_solver.cpp:105] Iteration 2520, lr = 0.0060703
I0412 13:03:48.874255 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:03:50.559671 6200 solver.cpp:218] Iteration 2532 (2.46997 iter/s, 4.85835s/12 iters), loss = 3.68074
I0412 13:03:50.559715 6200 solver.cpp:237] Train net output #0: loss = 3.68074 (* 1 = 3.68074 loss)
I0412 13:03:50.559725 6200 sgd_solver.cpp:105] Iteration 2532, lr = 0.00605589
I0412 13:03:55.450031 6200 solver.cpp:218] Iteration 2544 (2.4539 iter/s, 4.89017s/12 iters), loss = 2.99494
I0412 13:03:55.450074 6200 solver.cpp:237] Train net output #0: loss = 2.99494 (* 1 = 2.99494 loss)
I0412 13:03:55.450083 6200 sgd_solver.cpp:105] Iteration 2544, lr = 0.00604151
I0412 13:03:57.463657 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_2550.caffemodel
I0412 13:03:59.477732 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_2550.solverstate
I0412 13:04:01.045488 6200 solver.cpp:330] Iteration 2550, Testing net (#0)
I0412 13:04:01.045516 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:04:04.432981 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:04:05.454339 6200 solver.cpp:397] Test net output #0: accuracy = 0.195466
I0412 13:04:05.454382 6200 solver.cpp:397] Test net output #1: loss = 3.46052 (* 1 = 3.46052 loss)
I0412 13:04:07.361995 6200 solver.cpp:218] Iteration 2556 (1.00742 iter/s, 11.9116s/12 iters), loss = 3.39051
I0412 13:04:07.362044 6200 solver.cpp:237] Train net output #0: loss = 3.39051 (* 1 = 3.39051 loss)
I0412 13:04:07.362052 6200 sgd_solver.cpp:105] Iteration 2556, lr = 0.00602717
I0412 13:04:12.356827 6200 solver.cpp:218] Iteration 2568 (2.40258 iter/s, 4.99464s/12 iters), loss = 2.81593
I0412 13:04:12.356950 6200 solver.cpp:237] Train net output #0: loss = 2.81593 (* 1 = 2.81593 loss)
I0412 13:04:12.356961 6200 sgd_solver.cpp:105] Iteration 2568, lr = 0.00601286
I0412 13:04:17.292088 6200 solver.cpp:218] Iteration 2580 (2.43161 iter/s, 4.935s/12 iters), loss = 3.12515
I0412 13:04:17.292129 6200 solver.cpp:237] Train net output #0: loss = 3.12515 (* 1 = 3.12515 loss)
I0412 13:04:17.292140 6200 sgd_solver.cpp:105] Iteration 2580, lr = 0.00599858
I0412 13:04:22.286969 6200 solver.cpp:218] Iteration 2592 (2.40255 iter/s, 4.99469s/12 iters), loss = 3.38384
I0412 13:04:22.287001 6200 solver.cpp:237] Train net output #0: loss = 3.38384 (* 1 = 3.38384 loss)
I0412 13:04:22.287009 6200 sgd_solver.cpp:105] Iteration 2592, lr = 0.00598434
I0412 13:04:27.203590 6200 solver.cpp:218] Iteration 2604 (2.44079 iter/s, 4.91644s/12 iters), loss = 3.02023
I0412 13:04:27.203634 6200 solver.cpp:237] Train net output #0: loss = 3.02023 (* 1 = 3.02023 loss)
I0412 13:04:27.203644 6200 sgd_solver.cpp:105] Iteration 2604, lr = 0.00597013
I0412 13:04:32.155658 6200 solver.cpp:218] Iteration 2616 (2.42333 iter/s, 4.95187s/12 iters), loss = 3.14035
I0412 13:04:32.155711 6200 solver.cpp:237] Train net output #0: loss = 3.14035 (* 1 = 3.14035 loss)
I0412 13:04:32.155725 6200 sgd_solver.cpp:105] Iteration 2616, lr = 0.00595596
I0412 13:04:37.555377 6200 solver.cpp:218] Iteration 2628 (2.22243 iter/s, 5.3995s/12 iters), loss = 2.67373
I0412 13:04:37.555435 6200 solver.cpp:237] Train net output #0: loss = 2.67373 (* 1 = 2.67373 loss)
I0412 13:04:37.555447 6200 sgd_solver.cpp:105] Iteration 2628, lr = 0.00594182
I0412 13:04:37.958914 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:04:42.508576 6200 solver.cpp:218] Iteration 2640 (2.42278 iter/s, 4.95299s/12 iters), loss = 2.86862
I0412 13:04:42.508667 6200 solver.cpp:237] Train net output #0: loss = 2.86862 (* 1 = 2.86862 loss)
I0412 13:04:42.508678 6200 sgd_solver.cpp:105] Iteration 2640, lr = 0.00592771
I0412 13:04:47.161615 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_2652.caffemodel
I0412 13:04:49.244729 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_2652.solverstate
I0412 13:04:50.847900 6200 solver.cpp:330] Iteration 2652, Testing net (#0)
I0412 13:04:50.847926 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:04:54.630071 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:04:55.762367 6200 solver.cpp:397] Test net output #0: accuracy = 0.193015
I0412 13:04:55.762418 6200 solver.cpp:397] Test net output #1: loss = 3.5211 (* 1 = 3.5211 loss)
I0412 13:04:55.847784 6200 solver.cpp:218] Iteration 2652 (0.899635 iter/s, 13.3387s/12 iters), loss = 3.03223
I0412 13:04:55.847836 6200 solver.cpp:237] Train net output #0: loss = 3.03223 (* 1 = 3.03223 loss)
I0412 13:04:55.847848 6200 sgd_solver.cpp:105] Iteration 2652, lr = 0.00591364
I0412 13:04:59.873203 6200 solver.cpp:218] Iteration 2664 (2.98119 iter/s, 4.02524s/12 iters), loss = 2.92499
I0412 13:04:59.873245 6200 solver.cpp:237] Train net output #0: loss = 2.92499 (* 1 = 2.92499 loss)
I0412 13:04:59.873255 6200 sgd_solver.cpp:105] Iteration 2664, lr = 0.0058996
I0412 13:05:04.789626 6200 solver.cpp:218] Iteration 2676 (2.4409 iter/s, 4.91622s/12 iters), loss = 3.07726
I0412 13:05:04.789681 6200 solver.cpp:237] Train net output #0: loss = 3.07726 (* 1 = 3.07726 loss)
I0412 13:05:04.789693 6200 sgd_solver.cpp:105] Iteration 2676, lr = 0.00588559
I0412 13:05:09.916021 6200 solver.cpp:218] Iteration 2688 (2.34092 iter/s, 5.12619s/12 iters), loss = 2.95724
I0412 13:05:09.916070 6200 solver.cpp:237] Train net output #0: loss = 2.95724 (* 1 = 2.95724 loss)
I0412 13:05:09.916080 6200 sgd_solver.cpp:105] Iteration 2688, lr = 0.00587162
I0412 13:05:14.975471 6200 solver.cpp:218] Iteration 2700 (2.37189 iter/s, 5.05925s/12 iters), loss = 3.0331
I0412 13:05:14.975574 6200 solver.cpp:237] Train net output #0: loss = 3.0331 (* 1 = 3.0331 loss)
I0412 13:05:14.975584 6200 sgd_solver.cpp:105] Iteration 2700, lr = 0.00585768
I0412 13:05:19.883368 6200 solver.cpp:218] Iteration 2712 (2.44516 iter/s, 4.90765s/12 iters), loss = 2.72918
I0412 13:05:19.883419 6200 solver.cpp:237] Train net output #0: loss = 2.72918 (* 1 = 2.72918 loss)
I0412 13:05:19.883430 6200 sgd_solver.cpp:105] Iteration 2712, lr = 0.00584377
I0412 13:05:24.908582 6200 solver.cpp:218] Iteration 2724 (2.38805 iter/s, 5.02501s/12 iters), loss = 3.1136
I0412 13:05:24.908634 6200 solver.cpp:237] Train net output #0: loss = 3.1136 (* 1 = 3.1136 loss)
I0412 13:05:24.908646 6200 sgd_solver.cpp:105] Iteration 2724, lr = 0.0058299
I0412 13:05:27.361228 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:05:29.728132 6200 solver.cpp:218] Iteration 2736 (2.48996 iter/s, 4.81936s/12 iters), loss = 2.47546
I0412 13:05:29.728173 6200 solver.cpp:237] Train net output #0: loss = 2.47546 (* 1 = 2.47546 loss)
I0412 13:05:29.728183 6200 sgd_solver.cpp:105] Iteration 2736, lr = 0.00581605
I0412 13:05:34.636924 6200 solver.cpp:218] Iteration 2748 (2.44469 iter/s, 4.9086s/12 iters), loss = 3.05402
I0412 13:05:34.636970 6200 solver.cpp:237] Train net output #0: loss = 3.05402 (* 1 = 3.05402 loss)
I0412 13:05:34.636981 6200 sgd_solver.cpp:105] Iteration 2748, lr = 0.00580225
I0412 13:05:36.834470 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_2754.caffemodel
I0412 13:05:39.308710 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_2754.solverstate
I0412 13:05:43.251729 6200 solver.cpp:330] Iteration 2754, Testing net (#0)
I0412 13:05:43.251755 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:05:46.452663 6200 blocking_queue.cpp:49] Waiting for data
I0412 13:05:46.787329 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:05:47.889926 6200 solver.cpp:397] Test net output #0: accuracy = 0.207108
I0412 13:05:47.889998 6200 solver.cpp:397] Test net output #1: loss = 3.45166 (* 1 = 3.45166 loss)
I0412 13:05:49.704869 6200 solver.cpp:218] Iteration 2760 (0.796417 iter/s, 15.0675s/12 iters), loss = 2.99502
I0412 13:05:49.704916 6200 solver.cpp:237] Train net output #0: loss = 2.99502 (* 1 = 2.99502 loss)
I0412 13:05:49.704926 6200 sgd_solver.cpp:105] Iteration 2760, lr = 0.00578847
I0412 13:05:54.557440 6200 solver.cpp:218] Iteration 2772 (2.47302 iter/s, 4.85238s/12 iters), loss = 3.12662
I0412 13:05:54.557487 6200 solver.cpp:237] Train net output #0: loss = 3.12662 (* 1 = 3.12662 loss)
I0412 13:05:54.557498 6200 sgd_solver.cpp:105] Iteration 2772, lr = 0.00577473
I0412 13:05:59.573279 6200 solver.cpp:218] Iteration 2784 (2.39252 iter/s, 5.01564s/12 iters), loss = 2.9619
I0412 13:05:59.573334 6200 solver.cpp:237] Train net output #0: loss = 2.9619 (* 1 = 2.9619 loss)
I0412 13:05:59.573345 6200 sgd_solver.cpp:105] Iteration 2784, lr = 0.00576102
I0412 13:06:04.422650 6200 solver.cpp:218] Iteration 2796 (2.47465 iter/s, 4.84917s/12 iters), loss = 2.7622
I0412 13:06:04.422705 6200 solver.cpp:237] Train net output #0: loss = 2.7622 (* 1 = 2.7622 loss)
I0412 13:06:04.422719 6200 sgd_solver.cpp:105] Iteration 2796, lr = 0.00574734
I0412 13:06:09.358773 6200 solver.cpp:218] Iteration 2808 (2.43116 iter/s, 4.93592s/12 iters), loss = 2.91258
I0412 13:06:09.358814 6200 solver.cpp:237] Train net output #0: loss = 2.91258 (* 1 = 2.91258 loss)
I0412 13:06:09.358824 6200 sgd_solver.cpp:105] Iteration 2808, lr = 0.00573369
I0412 13:06:14.282109 6200 solver.cpp:218] Iteration 2820 (2.43747 iter/s, 4.92315s/12 iters), loss = 2.65191
I0412 13:06:14.282155 6200 solver.cpp:237] Train net output #0: loss = 2.65191 (* 1 = 2.65191 loss)
I0412 13:06:14.282166 6200 sgd_solver.cpp:105] Iteration 2820, lr = 0.00572008
I0412 13:06:18.993984 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:06:19.281565 6200 solver.cpp:218] Iteration 2832 (2.40036 iter/s, 4.99926s/12 iters), loss = 2.52377
I0412 13:06:19.281610 6200 solver.cpp:237] Train net output #0: loss = 2.52377 (* 1 = 2.52377 loss)
I0412 13:06:19.281622 6200 sgd_solver.cpp:105] Iteration 2832, lr = 0.0057065
I0412 13:06:24.505867 6200 solver.cpp:218] Iteration 2844 (2.29705 iter/s, 5.2241s/12 iters), loss = 2.71268
I0412 13:06:24.505908 6200 solver.cpp:237] Train net output #0: loss = 2.71268 (* 1 = 2.71268 loss)
I0412 13:06:24.505918 6200 sgd_solver.cpp:105] Iteration 2844, lr = 0.00569295
I0412 13:06:28.983331 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_2856.caffemodel
I0412 13:06:31.369505 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_2856.solverstate
I0412 13:06:38.065737 6200 solver.cpp:330] Iteration 2856, Testing net (#0)
I0412 13:06:38.065757 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:06:41.387624 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:06:42.523780 6200 solver.cpp:397] Test net output #0: accuracy = 0.223039
I0412 13:06:42.523818 6200 solver.cpp:397] Test net output #1: loss = 3.39755 (* 1 = 3.39755 loss)
I0412 13:06:42.609083 6200 solver.cpp:218] Iteration 2856 (0.662885 iter/s, 18.1027s/12 iters), loss = 2.70552
I0412 13:06:42.609115 6200 solver.cpp:237] Train net output #0: loss = 2.70552 (* 1 = 2.70552 loss)
I0412 13:06:42.609124 6200 sgd_solver.cpp:105] Iteration 2856, lr = 0.00567944
I0412 13:06:47.029150 6200 solver.cpp:218] Iteration 2868 (2.715 iter/s, 4.4199s/12 iters), loss = 2.5875
I0412 13:06:47.029197 6200 solver.cpp:237] Train net output #0: loss = 2.5875 (* 1 = 2.5875 loss)
I0412 13:06:47.029206 6200 sgd_solver.cpp:105] Iteration 2868, lr = 0.00566595
I0412 13:06:52.065757 6200 solver.cpp:218] Iteration 2880 (2.38265 iter/s, 5.03641s/12 iters), loss = 3.05498
I0412 13:06:52.065876 6200 solver.cpp:237] Train net output #0: loss = 3.05498 (* 1 = 3.05498 loss)
I0412 13:06:52.065888 6200 sgd_solver.cpp:105] Iteration 2880, lr = 0.0056525
I0412 13:06:56.997177 6200 solver.cpp:218] Iteration 2892 (2.43351 iter/s, 4.93116s/12 iters), loss = 2.6739
I0412 13:06:56.997228 6200 solver.cpp:237] Train net output #0: loss = 2.6739 (* 1 = 2.6739 loss)
I0412 13:06:56.997241 6200 sgd_solver.cpp:105] Iteration 2892, lr = 0.00563908
I0412 13:07:01.935855 6200 solver.cpp:218] Iteration 2904 (2.4299 iter/s, 4.93848s/12 iters), loss = 2.58882
I0412 13:07:01.935904 6200 solver.cpp:237] Train net output #0: loss = 2.58882 (* 1 = 2.58882 loss)
I0412 13:07:01.935914 6200 sgd_solver.cpp:105] Iteration 2904, lr = 0.00562569
I0412 13:07:06.896994 6200 solver.cpp:218] Iteration 2916 (2.4189 iter/s, 4.96094s/12 iters), loss = 2.62976
I0412 13:07:06.897044 6200 solver.cpp:237] Train net output #0: loss = 2.62976 (* 1 = 2.62976 loss)
I0412 13:07:06.897055 6200 sgd_solver.cpp:105] Iteration 2916, lr = 0.00561233
I0412 13:07:12.101459 6200 solver.cpp:218] Iteration 2928 (2.3058 iter/s, 5.20426s/12 iters), loss = 2.69142
I0412 13:07:12.101521 6200 solver.cpp:237] Train net output #0: loss = 2.69142 (* 1 = 2.69142 loss)
I0412 13:07:12.101537 6200 sgd_solver.cpp:105] Iteration 2928, lr = 0.00559901
I0412 13:07:13.954957 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:07:17.151053 6200 solver.cpp:218] Iteration 2940 (2.37653 iter/s, 5.04939s/12 iters), loss = 2.47887
I0412 13:07:17.151104 6200 solver.cpp:237] Train net output #0: loss = 2.47887 (* 1 = 2.47887 loss)
I0412 13:07:17.151116 6200 sgd_solver.cpp:105] Iteration 2940, lr = 0.00558572
I0412 13:07:22.103678 6200 solver.cpp:218] Iteration 2952 (2.42305 iter/s, 4.95243s/12 iters), loss = 2.32672
I0412 13:07:22.103802 6200 solver.cpp:237] Train net output #0: loss = 2.32672 (* 1 = 2.32672 loss)
I0412 13:07:22.103816 6200 sgd_solver.cpp:105] Iteration 2952, lr = 0.00557245
I0412 13:07:24.051723 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_2958.caffemodel
I0412 13:07:27.429741 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_2958.solverstate
I0412 13:07:29.352272 6200 solver.cpp:330] Iteration 2958, Testing net (#0)
I0412 13:07:29.352299 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:07:32.584798 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:07:33.930264 6200 solver.cpp:397] Test net output #0: accuracy = 0.232843
I0412 13:07:33.930305 6200 solver.cpp:397] Test net output #1: loss = 3.2416 (* 1 = 3.2416 loss)
I0412 13:07:35.851274 6200 solver.cpp:218] Iteration 2964 (0.872912 iter/s, 13.7471s/12 iters), loss = 2.5172
I0412 13:07:35.851325 6200 solver.cpp:237] Train net output #0: loss = 2.5172 (* 1 = 2.5172 loss)
I0412 13:07:35.851336 6200 sgd_solver.cpp:105] Iteration 2964, lr = 0.00555922
I0412 13:07:40.823663 6200 solver.cpp:218] Iteration 2976 (2.41342 iter/s, 4.97219s/12 iters), loss = 2.66433
I0412 13:07:40.823705 6200 solver.cpp:237] Train net output #0: loss = 2.66433 (* 1 = 2.66433 loss)
I0412 13:07:40.823715 6200 sgd_solver.cpp:105] Iteration 2976, lr = 0.00554603
I0412 13:07:45.848714 6200 solver.cpp:218] Iteration 2988 (2.38813 iter/s, 5.02485s/12 iters), loss = 2.33945
I0412 13:07:45.848767 6200 solver.cpp:237] Train net output #0: loss = 2.33945 (* 1 = 2.33945 loss)
I0412 13:07:45.848776 6200 sgd_solver.cpp:105] Iteration 2988, lr = 0.00553286
I0412 13:07:50.864495 6200 solver.cpp:218] Iteration 3000 (2.39254 iter/s, 5.01558s/12 iters), loss = 2.94136
I0412 13:07:50.864544 6200 solver.cpp:237] Train net output #0: loss = 2.94136 (* 1 = 2.94136 loss)
I0412 13:07:50.864553 6200 sgd_solver.cpp:105] Iteration 3000, lr = 0.00551972
I0412 13:07:56.115691 6200 solver.cpp:218] Iteration 3012 (2.28528 iter/s, 5.25099s/12 iters), loss = 2.60589
I0412 13:07:56.115833 6200 solver.cpp:237] Train net output #0: loss = 2.60589 (* 1 = 2.60589 loss)
I0412 13:07:56.115846 6200 sgd_solver.cpp:105] Iteration 3012, lr = 0.00550662
I0412 13:08:01.043424 6200 solver.cpp:218] Iteration 3024 (2.43534 iter/s, 4.92744s/12 iters), loss = 2.62041
I0412 13:08:01.043483 6200 solver.cpp:237] Train net output #0: loss = 2.62041 (* 1 = 2.62041 loss)
I0412 13:08:01.043498 6200 sgd_solver.cpp:105] Iteration 3024, lr = 0.00549354
I0412 13:08:04.862406 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:08:05.877981 6200 solver.cpp:218] Iteration 3036 (2.48223 iter/s, 4.83436s/12 iters), loss = 2.43518
I0412 13:08:05.878031 6200 solver.cpp:237] Train net output #0: loss = 2.43518 (* 1 = 2.43518 loss)
I0412 13:08:05.878042 6200 sgd_solver.cpp:105] Iteration 3036, lr = 0.0054805
I0412 13:08:10.808564 6200 solver.cpp:218] Iteration 3048 (2.43389 iter/s, 4.93039s/12 iters), loss = 2.36292
I0412 13:08:10.808614 6200 solver.cpp:237] Train net output #0: loss = 2.36292 (* 1 = 2.36292 loss)
I0412 13:08:10.808625 6200 sgd_solver.cpp:105] Iteration 3048, lr = 0.00546749
I0412 13:08:15.258976 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_3060.caffemodel
I0412 13:08:22.460780 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_3060.solverstate
I0412 13:08:28.937294 6200 solver.cpp:330] Iteration 3060, Testing net (#0)
I0412 13:08:28.937381 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:08:32.131624 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:08:33.346084 6200 solver.cpp:397] Test net output #0: accuracy = 0.247549
I0412 13:08:33.346148 6200 solver.cpp:397] Test net output #1: loss = 3.15883 (* 1 = 3.15883 loss)
I0412 13:08:33.431931 6200 solver.cpp:218] Iteration 3060 (0.530441 iter/s, 22.6227s/12 iters), loss = 2.69366
I0412 13:08:33.431984 6200 solver.cpp:237] Train net output #0: loss = 2.69366 (* 1 = 2.69366 loss)
I0412 13:08:33.431996 6200 sgd_solver.cpp:105] Iteration 3060, lr = 0.00545451
I0412 13:08:37.611795 6200 solver.cpp:218] Iteration 3072 (2.87103 iter/s, 4.17969s/12 iters), loss = 2.36594
I0412 13:08:37.611838 6200 solver.cpp:237] Train net output #0: loss = 2.36594 (* 1 = 2.36594 loss)
I0412 13:08:37.611847 6200 sgd_solver.cpp:105] Iteration 3072, lr = 0.00544156
I0412 13:08:42.556823 6200 solver.cpp:218] Iteration 3084 (2.42677 iter/s, 4.94484s/12 iters), loss = 2.50406
I0412 13:08:42.556872 6200 solver.cpp:237] Train net output #0: loss = 2.50406 (* 1 = 2.50406 loss)
I0412 13:08:42.556883 6200 sgd_solver.cpp:105] Iteration 3084, lr = 0.00542864
I0412 13:08:47.388736 6200 solver.cpp:218] Iteration 3096 (2.48359 iter/s, 4.83172s/12 iters), loss = 2.40366
I0412 13:08:47.388790 6200 solver.cpp:237] Train net output #0: loss = 2.40366 (* 1 = 2.40366 loss)
I0412 13:08:47.388803 6200 sgd_solver.cpp:105] Iteration 3096, lr = 0.00541575
I0412 13:08:52.311476 6200 solver.cpp:218] Iteration 3108 (2.43777 iter/s, 4.92253s/12 iters), loss = 2.56544
I0412 13:08:52.311543 6200 solver.cpp:237] Train net output #0: loss = 2.56544 (* 1 = 2.56544 loss)
I0412 13:08:52.311560 6200 sgd_solver.cpp:105] Iteration 3108, lr = 0.00540289
I0412 13:08:57.299176 6200 solver.cpp:218] Iteration 3120 (2.40602 iter/s, 4.98749s/12 iters), loss = 2.27809
I0412 13:08:57.299227 6200 solver.cpp:237] Train net output #0: loss = 2.27809 (* 1 = 2.27809 loss)
I0412 13:08:57.299240 6200 sgd_solver.cpp:105] Iteration 3120, lr = 0.00539006
I0412 13:09:02.357821 6200 solver.cpp:218] Iteration 3132 (2.37227 iter/s, 5.05845s/12 iters), loss = 2.55079
I0412 13:09:02.357977 6200 solver.cpp:237] Train net output #0: loss = 2.55079 (* 1 = 2.55079 loss)
I0412 13:09:02.357988 6200 sgd_solver.cpp:105] Iteration 3132, lr = 0.00537727
I0412 13:09:03.479620 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:09:07.421458 6200 solver.cpp:218] Iteration 3144 (2.36997 iter/s, 5.06335s/12 iters), loss = 2.29663
I0412 13:09:07.421500 6200 solver.cpp:237] Train net output #0: loss = 2.29663 (* 1 = 2.29663 loss)
I0412 13:09:07.421510 6200 sgd_solver.cpp:105] Iteration 3144, lr = 0.0053645
I0412 13:09:12.629251 6200 solver.cpp:218] Iteration 3156 (2.30433 iter/s, 5.20759s/12 iters), loss = 2.44805
I0412 13:09:12.629302 6200 solver.cpp:237] Train net output #0: loss = 2.44805 (* 1 = 2.44805 loss)
I0412 13:09:12.629314 6200 sgd_solver.cpp:105] Iteration 3156, lr = 0.00535176
I0412 13:09:14.643954 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_3162.caffemodel
I0412 13:09:21.740943 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_3162.solverstate
I0412 13:09:23.304535 6200 solver.cpp:330] Iteration 3162, Testing net (#0)
I0412 13:09:23.304563 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:09:26.492379 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:09:27.751497 6200 solver.cpp:397] Test net output #0: accuracy = 0.25674
I0412 13:09:27.751544 6200 solver.cpp:397] Test net output #1: loss = 3.07159 (* 1 = 3.07159 loss)
I0412 13:09:29.675987 6200 solver.cpp:218] Iteration 3168 (0.703969 iter/s, 17.0462s/12 iters), loss = 2.10204
I0412 13:09:29.676048 6200 solver.cpp:237] Train net output #0: loss = 2.10204 (* 1 = 2.10204 loss)
I0412 13:09:29.676062 6200 sgd_solver.cpp:105] Iteration 3168, lr = 0.00533906
I0412 13:09:34.583052 6200 solver.cpp:218] Iteration 3180 (2.44556 iter/s, 4.90686s/12 iters), loss = 2.41534
I0412 13:09:34.583159 6200 solver.cpp:237] Train net output #0: loss = 2.41534 (* 1 = 2.41534 loss)
I0412 13:09:34.583173 6200 sgd_solver.cpp:105] Iteration 3180, lr = 0.00532638
I0412 13:09:39.414619 6200 solver.cpp:218] Iteration 3192 (2.4838 iter/s, 4.83132s/12 iters), loss = 2.60894
I0412 13:09:39.414671 6200 solver.cpp:237] Train net output #0: loss = 2.60894 (* 1 = 2.60894 loss)
I0412 13:09:39.414682 6200 sgd_solver.cpp:105] Iteration 3192, lr = 0.00531374
I0412 13:09:44.298485 6200 solver.cpp:218] Iteration 3204 (2.45717 iter/s, 4.88367s/12 iters), loss = 2.38113
I0412 13:09:44.298532 6200 solver.cpp:237] Train net output #0: loss = 2.38113 (* 1 = 2.38113 loss)
I0412 13:09:44.298542 6200 sgd_solver.cpp:105] Iteration 3204, lr = 0.00530112
I0412 13:09:49.406474 6200 solver.cpp:218] Iteration 3216 (2.34935 iter/s, 5.10779s/12 iters), loss = 2.25399
I0412 13:09:49.406513 6200 solver.cpp:237] Train net output #0: loss = 2.25399 (* 1 = 2.25399 loss)
I0412 13:09:49.406522 6200 sgd_solver.cpp:105] Iteration 3216, lr = 0.00528853
I0412 13:09:54.411772 6200 solver.cpp:218] Iteration 3228 (2.39755 iter/s, 5.0051s/12 iters), loss = 2.21078
I0412 13:09:54.411816 6200 solver.cpp:237] Train net output #0: loss = 2.21078 (* 1 = 2.21078 loss)
I0412 13:09:54.411826 6200 sgd_solver.cpp:105] Iteration 3228, lr = 0.00527598
I0412 13:09:57.904503 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:09:59.689318 6200 solver.cpp:218] Iteration 3240 (2.27387 iter/s, 5.27734s/12 iters), loss = 2.37598
I0412 13:09:59.689369 6200 solver.cpp:237] Train net output #0: loss = 2.37598 (* 1 = 2.37598 loss)
I0412 13:09:59.689381 6200 sgd_solver.cpp:105] Iteration 3240, lr = 0.00526345
I0412 13:10:04.691490 6200 solver.cpp:218] Iteration 3252 (2.39905 iter/s, 5.00198s/12 iters), loss = 2.35124
I0412 13:10:04.691608 6200 solver.cpp:237] Train net output #0: loss = 2.35124 (* 1 = 2.35124 loss)
I0412 13:10:04.691619 6200 sgd_solver.cpp:105] Iteration 3252, lr = 0.00525095
I0412 13:10:09.223750 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_3264.caffemodel
I0412 13:10:11.333915 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_3264.solverstate
I0412 13:10:16.268113 6200 solver.cpp:330] Iteration 3264, Testing net (#0)
I0412 13:10:16.268142 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:10:19.510546 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:10:20.885589 6200 solver.cpp:397] Test net output #0: accuracy = 0.258578
I0412 13:10:20.885639 6200 solver.cpp:397] Test net output #1: loss = 3.1808 (* 1 = 3.1808 loss)
I0412 13:10:20.971086 6200 solver.cpp:218] Iteration 3264 (0.737145 iter/s, 16.279s/12 iters), loss = 2.56171
I0412 13:10:20.971136 6200 solver.cpp:237] Train net output #0: loss = 2.56171 (* 1 = 2.56171 loss)
I0412 13:10:20.971146 6200 sgd_solver.cpp:105] Iteration 3264, lr = 0.00523849
I0412 13:10:25.183435 6200 solver.cpp:218] Iteration 3276 (2.84889 iter/s, 4.21217s/12 iters), loss = 2.37841
I0412 13:10:25.183480 6200 solver.cpp:237] Train net output #0: loss = 2.37841 (* 1 = 2.37841 loss)
I0412 13:10:25.183490 6200 sgd_solver.cpp:105] Iteration 3276, lr = 0.00522605
I0412 13:10:30.138195 6200 solver.cpp:218] Iteration 3288 (2.42201 iter/s, 4.95456s/12 iters), loss = 2.31435
I0412 13:10:30.138248 6200 solver.cpp:237] Train net output #0: loss = 2.31435 (* 1 = 2.31435 loss)
I0412 13:10:30.138262 6200 sgd_solver.cpp:105] Iteration 3288, lr = 0.00521364
I0412 13:10:35.256187 6200 solver.cpp:218] Iteration 3300 (2.34476 iter/s, 5.11779s/12 iters), loss = 2.26393
I0412 13:10:35.256316 6200 solver.cpp:237] Train net output #0: loss = 2.26393 (* 1 = 2.26393 loss)
I0412 13:10:35.256330 6200 sgd_solver.cpp:105] Iteration 3300, lr = 0.00520126
I0412 13:10:40.243651 6200 solver.cpp:218] Iteration 3312 (2.40617 iter/s, 4.98719s/12 iters), loss = 2.48209
I0412 13:10:40.243705 6200 solver.cpp:237] Train net output #0: loss = 2.48209 (* 1 = 2.48209 loss)
I0412 13:10:40.243716 6200 sgd_solver.cpp:105] Iteration 3312, lr = 0.00518892
I0412 13:10:45.266531 6200 solver.cpp:218] Iteration 3324 (2.38916 iter/s, 5.02268s/12 iters), loss = 2.2272
I0412 13:10:45.266583 6200 solver.cpp:237] Train net output #0: loss = 2.2272 (* 1 = 2.2272 loss)
I0412 13:10:45.266597 6200 sgd_solver.cpp:105] Iteration 3324, lr = 0.0051766
I0412 13:10:50.016104 6200 solver.cpp:218] Iteration 3336 (2.52664 iter/s, 4.74938s/12 iters), loss = 2.24411
I0412 13:10:50.016142 6200 solver.cpp:237] Train net output #0: loss = 2.24411 (* 1 = 2.24411 loss)
I0412 13:10:50.016151 6200 sgd_solver.cpp:105] Iteration 3336, lr = 0.00516431
I0412 13:10:50.471413 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:10:54.892397 6200 solver.cpp:218] Iteration 3348 (2.46098 iter/s, 4.87611s/12 iters), loss = 2.43541
I0412 13:10:54.892441 6200 solver.cpp:237] Train net output #0: loss = 2.43541 (* 1 = 2.43541 loss)
I0412 13:10:54.892449 6200 sgd_solver.cpp:105] Iteration 3348, lr = 0.00515204
I0412 13:10:59.851665 6200 solver.cpp:218] Iteration 3360 (2.41981 iter/s, 4.95908s/12 iters), loss = 2.29191
I0412 13:10:59.851706 6200 solver.cpp:237] Train net output #0: loss = 2.29191 (* 1 = 2.29191 loss)
I0412 13:10:59.851716 6200 sgd_solver.cpp:105] Iteration 3360, lr = 0.00513981
I0412 13:11:01.875685 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_3366.caffemodel
I0412 13:11:05.117270 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_3366.solverstate
I0412 13:11:07.504086 6200 solver.cpp:330] Iteration 3366, Testing net (#0)
I0412 13:11:07.504181 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:11:10.794409 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:11:12.257776 6200 solver.cpp:397] Test net output #0: accuracy = 0.241422
I0412 13:11:12.257819 6200 solver.cpp:397] Test net output #1: loss = 3.30236 (* 1 = 3.30236 loss)
I0412 13:11:14.129040 6200 solver.cpp:218] Iteration 3372 (0.840517 iter/s, 14.2769s/12 iters), loss = 2.14713
I0412 13:11:14.129086 6200 solver.cpp:237] Train net output #0: loss = 2.14713 (* 1 = 2.14713 loss)
I0412 13:11:14.129094 6200 sgd_solver.cpp:105] Iteration 3372, lr = 0.00512761
I0412 13:11:19.035984 6200 solver.cpp:218] Iteration 3384 (2.44561 iter/s, 4.90675s/12 iters), loss = 2.3567
I0412 13:11:19.036026 6200 solver.cpp:237] Train net output #0: loss = 2.3567 (* 1 = 2.3567 loss)
I0412 13:11:19.036036 6200 sgd_solver.cpp:105] Iteration 3384, lr = 0.00511544
I0412 13:11:23.936769 6200 solver.cpp:218] Iteration 3396 (2.44868 iter/s, 4.9006s/12 iters), loss = 2.44932
I0412 13:11:23.936821 6200 solver.cpp:237] Train net output #0: loss = 2.44932 (* 1 = 2.44932 loss)
I0412 13:11:23.936833 6200 sgd_solver.cpp:105] Iteration 3396, lr = 0.00510329
I0412 13:11:28.741448 6200 solver.cpp:218] Iteration 3408 (2.49767 iter/s, 4.80448s/12 iters), loss = 2.09486
I0412 13:11:28.741498 6200 solver.cpp:237] Train net output #0: loss = 2.09486 (* 1 = 2.09486 loss)
I0412 13:11:28.741511 6200 sgd_solver.cpp:105] Iteration 3408, lr = 0.00509117
I0412 13:11:33.614320 6200 solver.cpp:218] Iteration 3420 (2.46271 iter/s, 4.87267s/12 iters), loss = 1.88598
I0412 13:11:33.614369 6200 solver.cpp:237] Train net output #0: loss = 1.88598 (* 1 = 1.88598 loss)
I0412 13:11:33.614382 6200 sgd_solver.cpp:105] Iteration 3420, lr = 0.00507909
I0412 13:11:38.468274 6200 solver.cpp:218] Iteration 3432 (2.47231 iter/s, 4.85376s/12 iters), loss = 2.26411
I0412 13:11:38.468379 6200 solver.cpp:237] Train net output #0: loss = 2.26411 (* 1 = 2.26411 loss)
I0412 13:11:38.468389 6200 sgd_solver.cpp:105] Iteration 3432, lr = 0.00506703
I0412 13:11:41.048139 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:11:43.317423 6200 solver.cpp:218] Iteration 3444 (2.47479 iter/s, 4.8489s/12 iters), loss = 1.8577
I0412 13:11:43.317471 6200 solver.cpp:237] Train net output #0: loss = 1.8577 (* 1 = 1.8577 loss)
I0412 13:11:43.317481 6200 sgd_solver.cpp:105] Iteration 3444, lr = 0.005055
I0412 13:11:48.297600 6200 solver.cpp:218] Iteration 3456 (2.40965 iter/s, 4.97997s/12 iters), loss = 2.22141
I0412 13:11:48.297667 6200 solver.cpp:237] Train net output #0: loss = 2.22141 (* 1 = 2.22141 loss)
I0412 13:11:48.297683 6200 sgd_solver.cpp:105] Iteration 3456, lr = 0.005043
I0412 13:11:52.714627 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_3468.caffemodel
I0412 13:11:55.225481 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_3468.solverstate
I0412 13:11:56.788997 6200 solver.cpp:330] Iteration 3468, Testing net (#0)
I0412 13:11:56.789027 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:11:57.143707 6200 blocking_queue.cpp:49] Waiting for data
I0412 13:11:59.854001 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:12:01.232137 6200 solver.cpp:397] Test net output #0: accuracy = 0.279412
I0412 13:12:01.232188 6200 solver.cpp:397] Test net output #1: loss = 3.07933 (* 1 = 3.07933 loss)
I0412 13:12:01.317610 6200 solver.cpp:218] Iteration 3468 (0.921689 iter/s, 13.0196s/12 iters), loss = 2.35436
I0412 13:12:01.317659 6200 solver.cpp:237] Train net output #0: loss = 2.35436 (* 1 = 2.35436 loss)
I0412 13:12:01.317669 6200 sgd_solver.cpp:105] Iteration 3468, lr = 0.00503102
I0412 13:12:05.742905 6200 solver.cpp:218] Iteration 3480 (2.7118 iter/s, 4.42511s/12 iters), loss = 2.08689
I0412 13:12:05.742959 6200 solver.cpp:237] Train net output #0: loss = 2.08689 (* 1 = 2.08689 loss)
I0412 13:12:05.742970 6200 sgd_solver.cpp:105] Iteration 3480, lr = 0.00501908
I0412 13:12:10.717841 6200 solver.cpp:218] Iteration 3492 (2.41219 iter/s, 4.97473s/12 iters), loss = 2.13867
I0412 13:12:10.717979 6200 solver.cpp:237] Train net output #0: loss = 2.13867 (* 1 = 2.13867 loss)
I0412 13:12:10.717990 6200 sgd_solver.cpp:105] Iteration 3492, lr = 0.00500716
I0412 13:12:15.775813 6200 solver.cpp:218] Iteration 3504 (2.37262 iter/s, 5.05771s/12 iters), loss = 1.94194
I0412 13:12:15.775864 6200 solver.cpp:237] Train net output #0: loss = 1.94194 (* 1 = 1.94194 loss)
I0412 13:12:15.775876 6200 sgd_solver.cpp:105] Iteration 3504, lr = 0.00499527
I0412 13:12:20.676303 6200 solver.cpp:218] Iteration 3516 (2.44883 iter/s, 4.90029s/12 iters), loss = 2.01775
I0412 13:12:20.676352 6200 solver.cpp:237] Train net output #0: loss = 2.01775 (* 1 = 2.01775 loss)
I0412 13:12:20.676360 6200 sgd_solver.cpp:105] Iteration 3516, lr = 0.00498341
I0412 13:12:25.601373 6200 solver.cpp:218] Iteration 3528 (2.43661 iter/s, 4.92487s/12 iters), loss = 2.00428
I0412 13:12:25.601428 6200 solver.cpp:237] Train net output #0: loss = 2.00428 (* 1 = 2.00428 loss)
I0412 13:12:25.601440 6200 sgd_solver.cpp:105] Iteration 3528, lr = 0.00497158
I0412 13:12:30.674916 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:12:30.933176 6200 solver.cpp:218] Iteration 3540 (2.25074 iter/s, 5.33159s/12 iters), loss = 1.87571
I0412 13:12:30.933291 6200 solver.cpp:237] Train net output #0: loss = 1.87571 (* 1 = 1.87571 loss)
I0412 13:12:30.933303 6200 sgd_solver.cpp:105] Iteration 3540, lr = 0.00495978
I0412 13:12:35.880482 6200 solver.cpp:218] Iteration 3552 (2.42569 iter/s, 4.94705s/12 iters), loss = 2.19048
I0412 13:12:35.880520 6200 solver.cpp:237] Train net output #0: loss = 2.19048 (* 1 = 2.19048 loss)
I0412 13:12:35.880530 6200 sgd_solver.cpp:105] Iteration 3552, lr = 0.004948
I0412 13:12:40.776993 6200 solver.cpp:218] Iteration 3564 (2.45082 iter/s, 4.89633s/12 iters), loss = 2.26086
I0412 13:12:40.777089 6200 solver.cpp:237] Train net output #0: loss = 2.26086 (* 1 = 2.26086 loss)
I0412 13:12:40.777098 6200 sgd_solver.cpp:105] Iteration 3564, lr = 0.00493626
I0412 13:12:42.791985 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_3570.caffemodel
I0412 13:12:45.515316 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_3570.solverstate
I0412 13:12:47.059898 6200 solver.cpp:330] Iteration 3570, Testing net (#0)
I0412 13:12:47.059921 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:12:50.304922 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:12:51.713634 6200 solver.cpp:397] Test net output #0: accuracy = 0.283701
I0412 13:12:51.713675 6200 solver.cpp:397] Test net output #1: loss = 3.11512 (* 1 = 3.11512 loss)
I0412 13:12:53.653695 6200 solver.cpp:218] Iteration 3576 (0.931949 iter/s, 12.8762s/12 iters), loss = 2.45859
I0412 13:12:53.653751 6200 solver.cpp:237] Train net output #0: loss = 2.45859 (* 1 = 2.45859 loss)
I0412 13:12:53.653764 6200 sgd_solver.cpp:105] Iteration 3576, lr = 0.00492454
I0412 13:12:58.626014 6200 solver.cpp:218] Iteration 3588 (2.41346 iter/s, 4.97211s/12 iters), loss = 1.92351
I0412 13:12:58.626065 6200 solver.cpp:237] Train net output #0: loss = 1.92351 (* 1 = 1.92351 loss)
I0412 13:12:58.626076 6200 sgd_solver.cpp:105] Iteration 3588, lr = 0.00491284
I0412 13:13:03.664935 6200 solver.cpp:218] Iteration 3600 (2.38156 iter/s, 5.03872s/12 iters), loss = 1.92521
I0412 13:13:03.664985 6200 solver.cpp:237] Train net output #0: loss = 1.92521 (* 1 = 1.92521 loss)
I0412 13:13:03.664996 6200 sgd_solver.cpp:105] Iteration 3600, lr = 0.00490118
I0412 13:13:08.649144 6200 solver.cpp:218] Iteration 3612 (2.4077 iter/s, 4.98401s/12 iters), loss = 2.04607
I0412 13:13:08.649189 6200 solver.cpp:237] Train net output #0: loss = 2.04607 (* 1 = 2.04607 loss)
I0412 13:13:08.649200 6200 sgd_solver.cpp:105] Iteration 3612, lr = 0.00488954
I0412 13:13:13.733772 6200 solver.cpp:218] Iteration 3624 (2.36015 iter/s, 5.08443s/12 iters), loss = 1.74302
I0412 13:13:13.733899 6200 solver.cpp:237] Train net output #0: loss = 1.74302 (* 1 = 1.74302 loss)
I0412 13:13:13.733909 6200 sgd_solver.cpp:105] Iteration 3624, lr = 0.00487793
I0412 13:13:18.629637 6200 solver.cpp:218] Iteration 3636 (2.45119 iter/s, 4.89559s/12 iters), loss = 1.7814
I0412 13:13:18.629694 6200 solver.cpp:237] Train net output #0: loss = 1.7814 (* 1 = 1.7814 loss)
I0412 13:13:18.629705 6200 sgd_solver.cpp:105] Iteration 3636, lr = 0.00486635
I0412 13:13:20.522550 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:13:23.577500 6200 solver.cpp:218] Iteration 3648 (2.42539 iter/s, 4.94765s/12 iters), loss = 1.84784
I0412 13:13:23.577562 6200 solver.cpp:237] Train net output #0: loss = 1.84784 (* 1 = 1.84784 loss)
I0412 13:13:23.577579 6200 sgd_solver.cpp:105] Iteration 3648, lr = 0.0048548
I0412 13:13:28.508965 6200 solver.cpp:218] Iteration 3660 (2.43345 iter/s, 4.93126s/12 iters), loss = 2.23228
I0412 13:13:28.509014 6200 solver.cpp:237] Train net output #0: loss = 2.23228 (* 1 = 2.23228 loss)
I0412 13:13:28.509025 6200 sgd_solver.cpp:105] Iteration 3660, lr = 0.00484327
I0412 13:13:32.995229 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_3672.caffemodel
I0412 13:13:38.484129 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_3672.solverstate
I0412 13:13:46.085163 6200 solver.cpp:330] Iteration 3672, Testing net (#0)
I0412 13:13:46.085232 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:13:49.141535 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:13:50.661312 6200 solver.cpp:397] Test net output #0: accuracy = 0.293505
I0412 13:13:50.661350 6200 solver.cpp:397] Test net output #1: loss = 2.9565 (* 1 = 2.9565 loss)
I0412 13:13:50.745421 6200 solver.cpp:218] Iteration 3672 (0.539671 iter/s, 22.2358s/12 iters), loss = 1.94892
I0412 13:13:50.745482 6200 solver.cpp:237] Train net output #0: loss = 1.94892 (* 1 = 1.94892 loss)
I0412 13:13:50.745496 6200 sgd_solver.cpp:105] Iteration 3672, lr = 0.00483177
I0412 13:13:54.867535 6200 solver.cpp:218] Iteration 3684 (2.91126 iter/s, 4.12193s/12 iters), loss = 1.94023
I0412 13:13:54.867589 6200 solver.cpp:237] Train net output #0: loss = 1.94023 (* 1 = 1.94023 loss)
I0412 13:13:54.867601 6200 sgd_solver.cpp:105] Iteration 3684, lr = 0.0048203
I0412 13:13:59.893694 6200 solver.cpp:218] Iteration 3696 (2.3876 iter/s, 5.02596s/12 iters), loss = 1.83941
I0412 13:13:59.893734 6200 solver.cpp:237] Train net output #0: loss = 1.83941 (* 1 = 1.83941 loss)
I0412 13:13:59.893744 6200 sgd_solver.cpp:105] Iteration 3696, lr = 0.00480886
I0412 13:14:04.874105 6200 solver.cpp:218] Iteration 3708 (2.40953 iter/s, 4.98022s/12 iters), loss = 1.90818
I0412 13:14:04.874148 6200 solver.cpp:237] Train net output #0: loss = 1.90818 (* 1 = 1.90818 loss)
I0412 13:14:04.874157 6200 sgd_solver.cpp:105] Iteration 3708, lr = 0.00479744
I0412 13:14:09.855233 6200 solver.cpp:218] Iteration 3720 (2.40919 iter/s, 4.98094s/12 iters), loss = 1.87409
I0412 13:14:09.855275 6200 solver.cpp:237] Train net output #0: loss = 1.87409 (* 1 = 1.87409 loss)
I0412 13:14:09.855284 6200 sgd_solver.cpp:105] Iteration 3720, lr = 0.00478605
I0412 13:14:14.725715 6200 solver.cpp:218] Iteration 3732 (2.46392 iter/s, 4.87029s/12 iters), loss = 1.80693
I0412 13:14:14.725769 6200 solver.cpp:237] Train net output #0: loss = 1.80693 (* 1 = 1.80693 loss)
I0412 13:14:14.725782 6200 sgd_solver.cpp:105] Iteration 3732, lr = 0.00477469
I0412 13:14:18.681594 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:14:19.676497 6200 solver.cpp:218] Iteration 3744 (2.42396 iter/s, 4.95058s/12 iters), loss = 1.59277
I0412 13:14:19.676539 6200 solver.cpp:237] Train net output #0: loss = 1.59277 (* 1 = 1.59277 loss)
I0412 13:14:19.676548 6200 sgd_solver.cpp:105] Iteration 3744, lr = 0.00476335
I0412 13:14:24.625574 6200 solver.cpp:218] Iteration 3756 (2.42479 iter/s, 4.94888s/12 iters), loss = 1.94835
I0412 13:14:24.625627 6200 solver.cpp:237] Train net output #0: loss = 1.94835 (* 1 = 1.94835 loss)
I0412 13:14:24.625639 6200 sgd_solver.cpp:105] Iteration 3756, lr = 0.00475204
I0412 13:14:29.534154 6200 solver.cpp:218] Iteration 3768 (2.4448 iter/s, 4.90838s/12 iters), loss = 1.95841
I0412 13:14:29.534211 6200 solver.cpp:237] Train net output #0: loss = 1.95841 (* 1 = 1.95841 loss)
I0412 13:14:29.534224 6200 sgd_solver.cpp:105] Iteration 3768, lr = 0.00474076
I0412 13:14:31.549358 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_3774.caffemodel
I0412 13:14:38.023867 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_3774.solverstate
I0412 13:14:41.369045 6200 solver.cpp:330] Iteration 3774, Testing net (#0)
I0412 13:14:41.369074 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:14:44.407197 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:14:45.896462 6200 solver.cpp:397] Test net output #0: accuracy = 0.322304
I0412 13:14:45.896502 6200 solver.cpp:397] Test net output #1: loss = 2.92107 (* 1 = 2.92107 loss)
I0412 13:14:47.851161 6200 solver.cpp:218] Iteration 3780 (0.655149 iter/s, 18.3164s/12 iters), loss = 1.727
I0412 13:14:47.851204 6200 solver.cpp:237] Train net output #0: loss = 1.727 (* 1 = 1.727 loss)
I0412 13:14:47.851213 6200 sgd_solver.cpp:105] Iteration 3780, lr = 0.00472951
I0412 13:14:52.852401 6200 solver.cpp:218] Iteration 3792 (2.3995 iter/s, 5.00104s/12 iters), loss = 1.87089
I0412 13:14:52.852522 6200 solver.cpp:237] Train net output #0: loss = 1.87089 (* 1 = 1.87089 loss)
I0412 13:14:52.852535 6200 sgd_solver.cpp:105] Iteration 3792, lr = 0.00471828
I0412 13:14:58.146970 6200 solver.cpp:218] Iteration 3804 (2.26659 iter/s, 5.2943s/12 iters), loss = 1.82894
I0412 13:14:58.147008 6200 solver.cpp:237] Train net output #0: loss = 1.82894 (* 1 = 1.82894 loss)
I0412 13:14:58.147017 6200 sgd_solver.cpp:105] Iteration 3804, lr = 0.00470707
I0412 13:15:03.086603 6200 solver.cpp:218] Iteration 3816 (2.42943 iter/s, 4.93944s/12 iters), loss = 1.82142
I0412 13:15:03.086661 6200 solver.cpp:237] Train net output #0: loss = 1.82142 (* 1 = 1.82142 loss)
I0412 13:15:03.086674 6200 sgd_solver.cpp:105] Iteration 3816, lr = 0.0046959
I0412 13:15:07.952564 6200 solver.cpp:218] Iteration 3828 (2.46621 iter/s, 4.86576s/12 iters), loss = 1.52478
I0412 13:15:07.952608 6200 solver.cpp:237] Train net output #0: loss = 1.52478 (* 1 = 1.52478 loss)
I0412 13:15:07.952617 6200 sgd_solver.cpp:105] Iteration 3828, lr = 0.00468475
I0412 13:15:12.865237 6200 solver.cpp:218] Iteration 3840 (2.44276 iter/s, 4.91248s/12 iters), loss = 2.04772
I0412 13:15:12.865285 6200 solver.cpp:237] Train net output #0: loss = 2.04772 (* 1 = 2.04772 loss)
I0412 13:15:12.865298 6200 sgd_solver.cpp:105] Iteration 3840, lr = 0.00467363
I0412 13:15:14.078382 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:15:18.043588 6200 solver.cpp:218] Iteration 3852 (2.31743 iter/s, 5.17814s/12 iters), loss = 1.82267
I0412 13:15:18.043644 6200 solver.cpp:237] Train net output #0: loss = 1.82267 (* 1 = 1.82267 loss)
I0412 13:15:18.043655 6200 sgd_solver.cpp:105] Iteration 3852, lr = 0.00466253
I0412 13:15:22.942288 6200 solver.cpp:218] Iteration 3864 (2.44973 iter/s, 4.8985s/12 iters), loss = 2.18083
I0412 13:15:22.942443 6200 solver.cpp:237] Train net output #0: loss = 2.18083 (* 1 = 2.18083 loss)
I0412 13:15:22.942456 6200 sgd_solver.cpp:105] Iteration 3864, lr = 0.00465146
I0412 13:15:27.355669 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_3876.caffemodel
I0412 13:15:31.290812 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_3876.solverstate
I0412 13:15:37.998009 6200 solver.cpp:330] Iteration 3876, Testing net (#0)
I0412 13:15:37.998035 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:15:40.902684 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:15:42.440080 6200 solver.cpp:397] Test net output #0: accuracy = 0.316789
I0412 13:15:42.440126 6200 solver.cpp:397] Test net output #1: loss = 2.86825 (* 1 = 2.86825 loss)
I0412 13:15:42.525233 6200 solver.cpp:218] Iteration 3876 (0.6128 iter/s, 19.5822s/12 iters), loss = 1.60368
I0412 13:15:42.525293 6200 solver.cpp:237] Train net output #0: loss = 1.60368 (* 1 = 1.60368 loss)
I0412 13:15:42.525305 6200 sgd_solver.cpp:105] Iteration 3876, lr = 0.00464042
I0412 13:15:46.677896 6200 solver.cpp:218] Iteration 3888 (2.88984 iter/s, 4.15247s/12 iters), loss = 1.73864
I0412 13:15:46.677940 6200 solver.cpp:237] Train net output #0: loss = 1.73864 (* 1 = 1.73864 loss)
I0412 13:15:46.677949 6200 sgd_solver.cpp:105] Iteration 3888, lr = 0.0046294
I0412 13:15:51.605105 6200 solver.cpp:218] Iteration 3900 (2.43555 iter/s, 4.92702s/12 iters), loss = 1.37004
I0412 13:15:51.605149 6200 solver.cpp:237] Train net output #0: loss = 1.37004 (* 1 = 1.37004 loss)
I0412 13:15:51.605159 6200 sgd_solver.cpp:105] Iteration 3900, lr = 0.00461841
I0412 13:15:56.551489 6200 solver.cpp:218] Iteration 3912 (2.42611 iter/s, 4.94619s/12 iters), loss = 1.69621
I0412 13:15:56.551564 6200 solver.cpp:237] Train net output #0: loss = 1.69621 (* 1 = 1.69621 loss)
I0412 13:15:56.551574 6200 sgd_solver.cpp:105] Iteration 3912, lr = 0.00460744
I0412 13:16:01.451869 6200 solver.cpp:218] Iteration 3924 (2.4489 iter/s, 4.90015s/12 iters), loss = 1.56703
I0412 13:16:01.451915 6200 solver.cpp:237] Train net output #0: loss = 1.56703 (* 1 = 1.56703 loss)
I0412 13:16:01.451925 6200 sgd_solver.cpp:105] Iteration 3924, lr = 0.0045965
I0412 13:16:06.382609 6200 solver.cpp:218] Iteration 3936 (2.43381 iter/s, 4.93055s/12 iters), loss = 1.86576
I0412 13:16:06.382658 6200 solver.cpp:237] Train net output #0: loss = 1.86576 (* 1 = 1.86576 loss)
I0412 13:16:06.382666 6200 sgd_solver.cpp:105] Iteration 3936, lr = 0.00458559
I0412 13:16:09.737738 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:16:11.328984 6200 solver.cpp:218] Iteration 3948 (2.42612 iter/s, 4.94618s/12 iters), loss = 1.78136
I0412 13:16:11.329025 6200 solver.cpp:237] Train net output #0: loss = 1.78136 (* 1 = 1.78136 loss)
I0412 13:16:11.329033 6200 sgd_solver.cpp:105] Iteration 3948, lr = 0.0045747
I0412 13:16:16.227593 6200 solver.cpp:218] Iteration 3960 (2.44977 iter/s, 4.89842s/12 iters), loss = 1.87592
I0412 13:16:16.227633 6200 solver.cpp:237] Train net output #0: loss = 1.87592 (* 1 = 1.87592 loss)
I0412 13:16:16.227643 6200 sgd_solver.cpp:105] Iteration 3960, lr = 0.00456384
I0412 13:16:21.158923 6200 solver.cpp:218] Iteration 3972 (2.43351 iter/s, 4.93114s/12 iters), loss = 1.72793
I0412 13:16:21.158967 6200 solver.cpp:237] Train net output #0: loss = 1.72793 (* 1 = 1.72793 loss)
I0412 13:16:21.158974 6200 sgd_solver.cpp:105] Iteration 3972, lr = 0.00455301
I0412 13:16:23.158425 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_3978.caffemodel
I0412 13:16:25.198518 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_3978.solverstate
I0412 13:16:26.763355 6200 solver.cpp:330] Iteration 3978, Testing net (#0)
I0412 13:16:26.763494 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:16:29.592303 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:16:31.167665 6200 solver.cpp:397] Test net output #0: accuracy = 0.310049
I0412 13:16:31.167713 6200 solver.cpp:397] Test net output #1: loss = 2.94004 (* 1 = 2.94004 loss)
I0412 13:16:33.073240 6200 solver.cpp:218] Iteration 3984 (1.00722 iter/s, 11.9139s/12 iters), loss = 1.53329
I0412 13:16:33.073298 6200 solver.cpp:237] Train net output #0: loss = 1.53329 (* 1 = 1.53329 loss)
I0412 13:16:33.073310 6200 sgd_solver.cpp:105] Iteration 3984, lr = 0.0045422
I0412 13:16:38.103420 6200 solver.cpp:218] Iteration 3996 (2.3857 iter/s, 5.02996s/12 iters), loss = 1.74968
I0412 13:16:38.103471 6200 solver.cpp:237] Train net output #0: loss = 1.74968 (* 1 = 1.74968 loss)
I0412 13:16:38.103485 6200 sgd_solver.cpp:105] Iteration 3996, lr = 0.00453141
I0412 13:16:43.046255 6200 solver.cpp:218] Iteration 4008 (2.42786 iter/s, 4.94263s/12 iters), loss = 1.57449
I0412 13:16:43.046306 6200 solver.cpp:237] Train net output #0: loss = 1.57449 (* 1 = 1.57449 loss)
I0412 13:16:43.046319 6200 sgd_solver.cpp:105] Iteration 4008, lr = 0.00452066
I0412 13:16:48.207845 6200 solver.cpp:218] Iteration 4020 (2.32496 iter/s, 5.16139s/12 iters), loss = 1.57105
I0412 13:16:48.207892 6200 solver.cpp:237] Train net output #0: loss = 1.57105 (* 1 = 1.57105 loss)
I0412 13:16:48.207903 6200 sgd_solver.cpp:105] Iteration 4020, lr = 0.00450992
I0412 13:16:53.632534 6200 solver.cpp:218] Iteration 4032 (2.21219 iter/s, 5.42448s/12 iters), loss = 1.48176
I0412 13:16:53.632586 6200 solver.cpp:237] Train net output #0: loss = 1.48176 (* 1 = 1.48176 loss)
I0412 13:16:53.632597 6200 sgd_solver.cpp:105] Iteration 4032, lr = 0.00449921
I0412 13:16:58.793462 6200 solver.cpp:218] Iteration 4044 (2.32526 iter/s, 5.16072s/12 iters), loss = 1.55195
I0412 13:16:58.795009 6200 solver.cpp:237] Train net output #0: loss = 1.55195 (* 1 = 1.55195 loss)
I0412 13:16:58.795020 6200 sgd_solver.cpp:105] Iteration 4044, lr = 0.00448853
I0412 13:16:59.302995 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:17:03.927651 6200 solver.cpp:218] Iteration 4056 (2.33805 iter/s, 5.13248s/12 iters), loss = 1.56384
I0412 13:17:03.927737 6200 solver.cpp:237] Train net output #0: loss = 1.56384 (* 1 = 1.56384 loss)
I0412 13:17:03.927765 6200 sgd_solver.cpp:105] Iteration 4056, lr = 0.00447788
I0412 13:17:08.785038 6200 solver.cpp:218] Iteration 4068 (2.47058 iter/s, 4.85716s/12 iters), loss = 1.91526
I0412 13:17:08.785086 6200 solver.cpp:237] Train net output #0: loss = 1.91526 (* 1 = 1.91526 loss)
I0412 13:17:08.785096 6200 sgd_solver.cpp:105] Iteration 4068, lr = 0.00446724
I0412 13:17:13.293713 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_4080.caffemodel
I0412 13:17:17.212271 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_4080.solverstate
I0412 13:17:20.714160 6200 solver.cpp:330] Iteration 4080, Testing net (#0)
I0412 13:17:20.714188 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:17:23.553887 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:17:25.163828 6200 solver.cpp:397] Test net output #0: accuracy = 0.32598
I0412 13:17:25.163858 6200 solver.cpp:397] Test net output #1: loss = 2.77216 (* 1 = 2.77216 loss)
I0412 13:17:25.248807 6200 solver.cpp:218] Iteration 4080 (0.728896 iter/s, 16.4633s/12 iters), loss = 1.70009
I0412 13:17:25.248849 6200 solver.cpp:237] Train net output #0: loss = 1.70009 (* 1 = 1.70009 loss)
I0412 13:17:25.248858 6200 sgd_solver.cpp:105] Iteration 4080, lr = 0.00445664
I0412 13:17:29.457474 6200 solver.cpp:218] Iteration 4092 (2.85137 iter/s, 4.2085s/12 iters), loss = 1.68407
I0412 13:17:29.457568 6200 solver.cpp:237] Train net output #0: loss = 1.68407 (* 1 = 1.68407 loss)
I0412 13:17:29.457578 6200 sgd_solver.cpp:105] Iteration 4092, lr = 0.00444606
I0412 13:17:34.240582 6200 solver.cpp:218] Iteration 4104 (2.50896 iter/s, 4.78287s/12 iters), loss = 1.43667
I0412 13:17:34.240629 6200 solver.cpp:237] Train net output #0: loss = 1.43667 (* 1 = 1.43667 loss)
I0412 13:17:34.240639 6200 sgd_solver.cpp:105] Iteration 4104, lr = 0.0044355
I0412 13:17:39.121121 6200 solver.cpp:218] Iteration 4116 (2.45884 iter/s, 4.88034s/12 iters), loss = 1.52072
I0412 13:17:39.121174 6200 solver.cpp:237] Train net output #0: loss = 1.52072 (* 1 = 1.52072 loss)
I0412 13:17:39.121186 6200 sgd_solver.cpp:105] Iteration 4116, lr = 0.00442497
I0412 13:17:43.952021 6200 solver.cpp:218] Iteration 4128 (2.48411 iter/s, 4.8307s/12 iters), loss = 1.45828
I0412 13:17:43.952067 6200 solver.cpp:237] Train net output #0: loss = 1.45828 (* 1 = 1.45828 loss)
I0412 13:17:43.952075 6200 sgd_solver.cpp:105] Iteration 4128, lr = 0.00441447
I0412 13:17:48.810750 6200 solver.cpp:218] Iteration 4140 (2.46988 iter/s, 4.85853s/12 iters), loss = 1.40583
I0412 13:17:48.810794 6200 solver.cpp:237] Train net output #0: loss = 1.40583 (* 1 = 1.40583 loss)
I0412 13:17:48.810803 6200 sgd_solver.cpp:105] Iteration 4140, lr = 0.00440398
I0412 13:17:51.414623 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:17:53.720487 6200 solver.cpp:218] Iteration 4152 (2.44422 iter/s, 4.90955s/12 iters), loss = 1.33717
I0412 13:17:53.720533 6200 solver.cpp:237] Train net output #0: loss = 1.33717 (* 1 = 1.33717 loss)
I0412 13:17:53.720543 6200 sgd_solver.cpp:105] Iteration 4152, lr = 0.00439353
I0412 13:17:54.922021 6200 blocking_queue.cpp:49] Waiting for data
I0412 13:17:58.736858 6200 solver.cpp:218] Iteration 4164 (2.39226 iter/s, 5.01617s/12 iters), loss = 1.91626
I0412 13:17:58.736898 6200 solver.cpp:237] Train net output #0: loss = 1.91626 (* 1 = 1.91626 loss)
I0412 13:17:58.736907 6200 sgd_solver.cpp:105] Iteration 4164, lr = 0.0043831
I0412 13:18:03.668120 6200 solver.cpp:218] Iteration 4176 (2.43355 iter/s, 4.93108s/12 iters), loss = 1.70846
I0412 13:18:03.668243 6200 solver.cpp:237] Train net output #0: loss = 1.70846 (* 1 = 1.70846 loss)
I0412 13:18:03.668253 6200 sgd_solver.cpp:105] Iteration 4176, lr = 0.00437269
I0412 13:18:05.677134 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_4182.caffemodel
I0412 13:18:07.749142 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_4182.solverstate
I0412 13:18:10.179105 6200 solver.cpp:330] Iteration 4182, Testing net (#0)
I0412 13:18:10.179133 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:18:12.992367 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:18:14.654327 6200 solver.cpp:397] Test net output #0: accuracy = 0.327206
I0412 13:18:14.654381 6200 solver.cpp:397] Test net output #1: loss = 2.78056 (* 1 = 2.78056 loss)
I0412 13:18:16.445446 6200 solver.cpp:218] Iteration 4188 (0.939199 iter/s, 12.7768s/12 iters), loss = 1.24034
I0412 13:18:16.445500 6200 solver.cpp:237] Train net output #0: loss = 1.24034 (* 1 = 1.24034 loss)
I0412 13:18:16.445513 6200 sgd_solver.cpp:105] Iteration 4188, lr = 0.00436231
I0412 13:18:21.308205 6200 solver.cpp:218] Iteration 4200 (2.46784 iter/s, 4.86256s/12 iters), loss = 1.42011
I0412 13:18:21.308256 6200 solver.cpp:237] Train net output #0: loss = 1.42011 (* 1 = 1.42011 loss)
I0412 13:18:21.308269 6200 sgd_solver.cpp:105] Iteration 4200, lr = 0.00435195
I0412 13:18:26.257997 6200 solver.cpp:218] Iteration 4212 (2.42445 iter/s, 4.94959s/12 iters), loss = 1.35074
I0412 13:18:26.258062 6200 solver.cpp:237] Train net output #0: loss = 1.35074 (* 1 = 1.35074 loss)
I0412 13:18:26.258076 6200 sgd_solver.cpp:105] Iteration 4212, lr = 0.00434162
I0412 13:18:31.132208 6200 solver.cpp:218] Iteration 4224 (2.46204 iter/s, 4.87401s/12 iters), loss = 1.39763
I0412 13:18:31.132256 6200 solver.cpp:237] Train net output #0: loss = 1.39763 (* 1 = 1.39763 loss)
I0412 13:18:31.132267 6200 sgd_solver.cpp:105] Iteration 4224, lr = 0.00433131
I0412 13:18:36.336055 6200 solver.cpp:218] Iteration 4236 (2.30608 iter/s, 5.20364s/12 iters), loss = 1.08738
I0412 13:18:36.336210 6200 solver.cpp:237] Train net output #0: loss = 1.08738 (* 1 = 1.08738 loss)
I0412 13:18:36.336223 6200 sgd_solver.cpp:105] Iteration 4236, lr = 0.00432103
I0412 13:18:40.993713 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:18:41.223757 6200 solver.cpp:218] Iteration 4248 (2.45529 iter/s, 4.8874s/12 iters), loss = 1.57765
I0412 13:18:41.223819 6200 solver.cpp:237] Train net output #0: loss = 1.57765 (* 1 = 1.57765 loss)
I0412 13:18:41.223834 6200 sgd_solver.cpp:105] Iteration 4248, lr = 0.00431077
I0412 13:18:46.184885 6200 solver.cpp:218] Iteration 4260 (2.41891 iter/s, 4.96092s/12 iters), loss = 1.48112
I0412 13:18:46.184935 6200 solver.cpp:237] Train net output #0: loss = 1.48112 (* 1 = 1.48112 loss)
I0412 13:18:46.184947 6200 sgd_solver.cpp:105] Iteration 4260, lr = 0.00430053
I0412 13:18:51.048982 6200 solver.cpp:218] Iteration 4272 (2.46715 iter/s, 4.8639s/12 iters), loss = 1.73731
I0412 13:18:51.049027 6200 solver.cpp:237] Train net output #0: loss = 1.73731 (* 1 = 1.73731 loss)
I0412 13:18:51.049036 6200 sgd_solver.cpp:105] Iteration 4272, lr = 0.00429032
I0412 13:18:55.471868 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_4284.caffemodel
I0412 13:18:58.594324 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_4284.solverstate
I0412 13:19:01.374639 6200 solver.cpp:330] Iteration 4284, Testing net (#0)
I0412 13:19:01.374668 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:19:04.214464 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:19:05.900938 6200 solver.cpp:397] Test net output #0: accuracy = 0.344976
I0412 13:19:05.900987 6200 solver.cpp:397] Test net output #1: loss = 2.755 (* 1 = 2.755 loss)
I0412 13:19:05.986824 6200 solver.cpp:218] Iteration 4284 (0.803354 iter/s, 14.9374s/12 iters), loss = 1.54423
I0412 13:19:05.986871 6200 solver.cpp:237] Train net output #0: loss = 1.54423 (* 1 = 1.54423 loss)
I0412 13:19:05.986882 6200 sgd_solver.cpp:105] Iteration 4284, lr = 0.00428014
I0412 13:19:10.172178 6200 solver.cpp:218] Iteration 4296 (2.86726 iter/s, 4.18518s/12 iters), loss = 1.59636
I0412 13:19:10.172255 6200 solver.cpp:237] Train net output #0: loss = 1.59636 (* 1 = 1.59636 loss)
I0412 13:19:10.172267 6200 sgd_solver.cpp:105] Iteration 4296, lr = 0.00426998
I0412 13:19:15.254230 6200 solver.cpp:218] Iteration 4308 (2.36136 iter/s, 5.08182s/12 iters), loss = 1.37346
I0412 13:19:15.254287 6200 solver.cpp:237] Train net output #0: loss = 1.37346 (* 1 = 1.37346 loss)
I0412 13:19:15.254298 6200 sgd_solver.cpp:105] Iteration 4308, lr = 0.00425984
I0412 13:19:20.299073 6200 solver.cpp:218] Iteration 4320 (2.37877 iter/s, 5.04463s/12 iters), loss = 1.1776
I0412 13:19:20.299132 6200 solver.cpp:237] Train net output #0: loss = 1.1776 (* 1 = 1.1776 loss)
I0412 13:19:20.299142 6200 sgd_solver.cpp:105] Iteration 4320, lr = 0.00424972
I0412 13:19:25.379596 6200 solver.cpp:218] Iteration 4332 (2.36206 iter/s, 5.0803s/12 iters), loss = 1.36115
I0412 13:19:25.379657 6200 solver.cpp:237] Train net output #0: loss = 1.36115 (* 1 = 1.36115 loss)
I0412 13:19:25.379670 6200 sgd_solver.cpp:105] Iteration 4332, lr = 0.00423964
I0412 13:19:30.402947 6200 solver.cpp:218] Iteration 4344 (2.38894 iter/s, 5.02314s/12 iters), loss = 1.24255
I0412 13:19:30.403000 6200 solver.cpp:237] Train net output #0: loss = 1.24255 (* 1 = 1.24255 loss)
I0412 13:19:30.403010 6200 sgd_solver.cpp:105] Iteration 4344, lr = 0.00422957
I0412 13:19:32.334851 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:19:35.425356 6200 solver.cpp:218] Iteration 4356 (2.38939 iter/s, 5.0222s/12 iters), loss = 1.4675
I0412 13:19:35.425410 6200 solver.cpp:237] Train net output #0: loss = 1.4675 (* 1 = 1.4675 loss)
I0412 13:19:35.425422 6200 sgd_solver.cpp:105] Iteration 4356, lr = 0.00421953
I0412 13:19:40.321314 6200 solver.cpp:218] Iteration 4368 (2.45111 iter/s, 4.89575s/12 iters), loss = 1.33193
I0412 13:19:40.321483 6200 solver.cpp:237] Train net output #0: loss = 1.33193 (* 1 = 1.33193 loss)
I0412 13:19:40.321501 6200 sgd_solver.cpp:105] Iteration 4368, lr = 0.00420951
I0412 13:19:45.547155 6200 solver.cpp:218] Iteration 4380 (2.29642 iter/s, 5.22552s/12 iters), loss = 1.33101
I0412 13:19:45.547206 6200 solver.cpp:237] Train net output #0: loss = 1.33101 (* 1 = 1.33101 loss)
I0412 13:19:45.547217 6200 sgd_solver.cpp:105] Iteration 4380, lr = 0.00419952
I0412 13:19:47.498585 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_4386.caffemodel
I0412 13:19:49.645618 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_4386.solverstate
I0412 13:19:54.202057 6200 solver.cpp:330] Iteration 4386, Testing net (#0)
I0412 13:19:54.202078 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:19:57.008724 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:19:58.791054 6200 solver.cpp:397] Test net output #0: accuracy = 0.335784
I0412 13:19:58.791090 6200 solver.cpp:397] Test net output #1: loss = 2.82145 (* 1 = 2.82145 loss)
I0412 13:20:00.603735 6200 solver.cpp:218] Iteration 4392 (0.797019 iter/s, 15.0561s/12 iters), loss = 1.04072
I0412 13:20:00.603788 6200 solver.cpp:237] Train net output #0: loss = 1.04072 (* 1 = 1.04072 loss)
I0412 13:20:00.603801 6200 sgd_solver.cpp:105] Iteration 4392, lr = 0.00418954
I0412 13:20:05.855144 6200 solver.cpp:218] Iteration 4404 (2.28519 iter/s, 5.2512s/12 iters), loss = 1.28557
I0412 13:20:05.855187 6200 solver.cpp:237] Train net output #0: loss = 1.28557 (* 1 = 1.28557 loss)
I0412 13:20:05.855197 6200 sgd_solver.cpp:105] Iteration 4404, lr = 0.0041796
I0412 13:20:10.819720 6200 solver.cpp:218] Iteration 4416 (2.41722 iter/s, 4.96438s/12 iters), loss = 1.11276
I0412 13:20:10.819856 6200 solver.cpp:237] Train net output #0: loss = 1.11276 (* 1 = 1.11276 loss)
I0412 13:20:10.819870 6200 sgd_solver.cpp:105] Iteration 4416, lr = 0.00416967
I0412 13:20:15.687520 6200 solver.cpp:218] Iteration 4428 (2.46532 iter/s, 4.86752s/12 iters), loss = 1.20107
I0412 13:20:15.687574 6200 solver.cpp:237] Train net output #0: loss = 1.20107 (* 1 = 1.20107 loss)
I0412 13:20:15.687585 6200 sgd_solver.cpp:105] Iteration 4428, lr = 0.00415977
I0412 13:20:20.691205 6200 solver.cpp:218] Iteration 4440 (2.39833 iter/s, 5.00348s/12 iters), loss = 1.18679
I0412 13:20:20.691254 6200 solver.cpp:237] Train net output #0: loss = 1.18679 (* 1 = 1.18679 loss)
I0412 13:20:20.691264 6200 sgd_solver.cpp:105] Iteration 4440, lr = 0.0041499
I0412 13:20:24.759704 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:20:25.704932 6200 solver.cpp:218] Iteration 4452 (2.39353 iter/s, 5.01352s/12 iters), loss = 1.13738
I0412 13:20:25.704985 6200 solver.cpp:237] Train net output #0: loss = 1.13738 (* 1 = 1.13738 loss)
I0412 13:20:25.704996 6200 sgd_solver.cpp:105] Iteration 4452, lr = 0.00414005
I0412 13:20:30.620252 6200 solver.cpp:218] Iteration 4464 (2.44145 iter/s, 4.91512s/12 iters), loss = 1.1565
I0412 13:20:30.620296 6200 solver.cpp:237] Train net output #0: loss = 1.1565 (* 1 = 1.1565 loss)
I0412 13:20:30.620304 6200 sgd_solver.cpp:105] Iteration 4464, lr = 0.00413022
I0412 13:20:35.658291 6200 solver.cpp:218] Iteration 4476 (2.38198 iter/s, 5.03783s/12 iters), loss = 1.37167
I0412 13:20:35.658345 6200 solver.cpp:237] Train net output #0: loss = 1.37167 (* 1 = 1.37167 loss)
I0412 13:20:35.658356 6200 sgd_solver.cpp:105] Iteration 4476, lr = 0.00412041
I0412 13:20:40.142844 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_4488.caffemodel
I0412 13:20:42.164866 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_4488.solverstate
I0412 13:20:43.750258 6200 solver.cpp:330] Iteration 4488, Testing net (#0)
I0412 13:20:43.750280 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:20:46.404346 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:20:48.304252 6200 solver.cpp:397] Test net output #0: accuracy = 0.36152
I0412 13:20:48.304280 6200 solver.cpp:397] Test net output #1: loss = 2.86395 (* 1 = 2.86395 loss)
I0412 13:20:48.388660 6200 solver.cpp:218] Iteration 4488 (0.942659 iter/s, 12.73s/12 iters), loss = 1.29828
I0412 13:20:48.388706 6200 solver.cpp:237] Train net output #0: loss = 1.29828 (* 1 = 1.29828 loss)
I0412 13:20:48.388715 6200 sgd_solver.cpp:105] Iteration 4488, lr = 0.00411063
I0412 13:20:52.476598 6200 solver.cpp:218] Iteration 4500 (2.93559 iter/s, 4.08777s/12 iters), loss = 1.07058
I0412 13:20:52.476639 6200 solver.cpp:237] Train net output #0: loss = 1.07058 (* 1 = 1.07058 loss)
I0412 13:20:52.476647 6200 sgd_solver.cpp:105] Iteration 4500, lr = 0.00410087
I0412 13:20:57.412101 6200 solver.cpp:218] Iteration 4512 (2.43146 iter/s, 4.93531s/12 iters), loss = 1.09604
I0412 13:20:57.412142 6200 solver.cpp:237] Train net output #0: loss = 1.09604 (* 1 = 1.09604 loss)
I0412 13:20:57.412150 6200 sgd_solver.cpp:105] Iteration 4512, lr = 0.00409113
I0412 13:21:02.411872 6200 solver.cpp:218] Iteration 4524 (2.4002 iter/s, 4.99958s/12 iters), loss = 1.29068
I0412 13:21:02.411908 6200 solver.cpp:237] Train net output #0: loss = 1.29068 (* 1 = 1.29068 loss)
I0412 13:21:02.411916 6200 sgd_solver.cpp:105] Iteration 4524, lr = 0.00408142
I0412 13:21:07.489591 6200 solver.cpp:218] Iteration 4536 (2.36336 iter/s, 5.07752s/12 iters), loss = 1.10768
I0412 13:21:07.489645 6200 solver.cpp:237] Train net output #0: loss = 1.10768 (* 1 = 1.10768 loss)
I0412 13:21:07.489658 6200 sgd_solver.cpp:105] Iteration 4536, lr = 0.00407173
I0412 13:21:12.520031 6200 solver.cpp:218] Iteration 4548 (2.38557 iter/s, 5.03023s/12 iters), loss = 1.3241
I0412 13:21:12.520195 6200 solver.cpp:237] Train net output #0: loss = 1.3241 (* 1 = 1.3241 loss)
I0412 13:21:12.520208 6200 sgd_solver.cpp:105] Iteration 4548, lr = 0.00406206
I0412 13:21:13.787319 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:21:17.495405 6200 solver.cpp:218] Iteration 4560 (2.41203 iter/s, 4.97507s/12 iters), loss = 1.17322
I0412 13:21:17.495450 6200 solver.cpp:237] Train net output #0: loss = 1.17322 (* 1 = 1.17322 loss)
I0412 13:21:17.495460 6200 sgd_solver.cpp:105] Iteration 4560, lr = 0.00405242
I0412 13:21:22.418452 6200 solver.cpp:218] Iteration 4572 (2.43761 iter/s, 4.92286s/12 iters), loss = 1.15445
I0412 13:21:22.418493 6200 solver.cpp:237] Train net output #0: loss = 1.15445 (* 1 = 1.15445 loss)
I0412 13:21:22.418502 6200 sgd_solver.cpp:105] Iteration 4572, lr = 0.0040428
I0412 13:21:27.289098 6200 solver.cpp:218] Iteration 4584 (2.46383 iter/s, 4.87046s/12 iters), loss = 1.06203
I0412 13:21:27.289149 6200 solver.cpp:237] Train net output #0: loss = 1.06203 (* 1 = 1.06203 loss)
I0412 13:21:27.289161 6200 sgd_solver.cpp:105] Iteration 4584, lr = 0.0040332
I0412 13:21:29.292054 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_4590.caffemodel
I0412 13:21:31.382745 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_4590.solverstate
I0412 13:21:34.584173 6200 solver.cpp:330] Iteration 4590, Testing net (#0)
I0412 13:21:34.584201 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:21:37.187237 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:21:39.002621 6200 solver.cpp:397] Test net output #0: accuracy = 0.359681
I0412 13:21:39.002671 6200 solver.cpp:397] Test net output #1: loss = 2.86241 (* 1 = 2.86241 loss)
I0412 13:21:40.810122 6200 solver.cpp:218] Iteration 4596 (0.887535 iter/s, 13.5206s/12 iters), loss = 1.29101
I0412 13:21:40.810175 6200 solver.cpp:237] Train net output #0: loss = 1.29101 (* 1 = 1.29101 loss)
I0412 13:21:40.810189 6200 sgd_solver.cpp:105] Iteration 4596, lr = 0.00402362
I0412 13:21:45.746210 6200 solver.cpp:218] Iteration 4608 (2.43117 iter/s, 4.93589s/12 iters), loss = 1.00402
I0412 13:21:45.746356 6200 solver.cpp:237] Train net output #0: loss = 1.00402 (* 1 = 1.00402 loss)
I0412 13:21:45.746369 6200 sgd_solver.cpp:105] Iteration 4608, lr = 0.00401407
I0412 13:21:50.689853 6200 solver.cpp:218] Iteration 4620 (2.4275 iter/s, 4.94335s/12 iters), loss = 1.42075
I0412 13:21:50.689898 6200 solver.cpp:237] Train net output #0: loss = 1.42075 (* 1 = 1.42075 loss)
I0412 13:21:50.689908 6200 sgd_solver.cpp:105] Iteration 4620, lr = 0.00400454
I0412 13:21:55.789363 6200 solver.cpp:218] Iteration 4632 (2.35326 iter/s, 5.09932s/12 iters), loss = 0.976893
I0412 13:21:55.789403 6200 solver.cpp:237] Train net output #0: loss = 0.976893 (* 1 = 0.976893 loss)
I0412 13:21:55.789412 6200 sgd_solver.cpp:105] Iteration 4632, lr = 0.00399503
I0412 13:22:00.679875 6200 solver.cpp:218] Iteration 4644 (2.45382 iter/s, 4.89033s/12 iters), loss = 0.878939
I0412 13:22:00.679926 6200 solver.cpp:237] Train net output #0: loss = 0.878939 (* 1 = 0.878939 loss)
I0412 13:22:00.679936 6200 sgd_solver.cpp:105] Iteration 4644, lr = 0.00398555
I0412 13:22:04.017045 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:22:05.596501 6200 solver.cpp:218] Iteration 4656 (2.4408 iter/s, 4.91642s/12 iters), loss = 1.16417
I0412 13:22:05.596557 6200 solver.cpp:237] Train net output #0: loss = 1.16417 (* 1 = 1.16417 loss)
I0412 13:22:05.596570 6200 sgd_solver.cpp:105] Iteration 4656, lr = 0.00397608
I0412 13:22:10.560669 6200 solver.cpp:218] Iteration 4668 (2.41742 iter/s, 4.96396s/12 iters), loss = 1.22338
I0412 13:22:10.560727 6200 solver.cpp:237] Train net output #0: loss = 1.22338 (* 1 = 1.22338 loss)
I0412 13:22:10.560739 6200 sgd_solver.cpp:105] Iteration 4668, lr = 0.00396664
I0412 13:22:15.627908 6200 solver.cpp:218] Iteration 4680 (2.36825 iter/s, 5.06703s/12 iters), loss = 1.43541
I0412 13:22:15.627948 6200 solver.cpp:237] Train net output #0: loss = 1.43541 (* 1 = 1.43541 loss)
I0412 13:22:15.627956 6200 sgd_solver.cpp:105] Iteration 4680, lr = 0.00395723
I0412 13:22:20.096585 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_4692.caffemodel
I0412 13:22:25.357482 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_4692.solverstate
I0412 13:22:30.704747 6200 solver.cpp:330] Iteration 4692, Testing net (#0)
I0412 13:22:30.704773 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:22:33.470161 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:22:35.319021 6200 solver.cpp:397] Test net output #0: accuracy = 0.352328
I0412 13:22:35.319057 6200 solver.cpp:397] Test net output #1: loss = 2.93509 (* 1 = 2.93509 loss)
I0412 13:22:35.404237 6200 solver.cpp:218] Iteration 4692 (0.606804 iter/s, 19.7757s/12 iters), loss = 1.28809
I0412 13:22:35.404290 6200 solver.cpp:237] Train net output #0: loss = 1.28809 (* 1 = 1.28809 loss)
I0412 13:22:35.404301 6200 sgd_solver.cpp:105] Iteration 4692, lr = 0.00394783
I0412 13:22:39.373733 6200 solver.cpp:218] Iteration 4704 (3.02319 iter/s, 3.96932s/12 iters), loss = 0.882132
I0412 13:22:39.373771 6200 solver.cpp:237] Train net output #0: loss = 0.882132 (* 1 = 0.882132 loss)
I0412 13:22:39.373780 6200 sgd_solver.cpp:105] Iteration 4704, lr = 0.00393846
I0412 13:22:44.335218 6200 solver.cpp:218] Iteration 4716 (2.41872 iter/s, 4.96129s/12 iters), loss = 0.983371
I0412 13:22:44.335265 6200 solver.cpp:237] Train net output #0: loss = 0.983371 (* 1 = 0.983371 loss)
I0412 13:22:44.335276 6200 sgd_solver.cpp:105] Iteration 4716, lr = 0.00392911
I0412 13:22:49.209497 6200 solver.cpp:218] Iteration 4728 (2.462 iter/s, 4.87408s/12 iters), loss = 1.19672
I0412 13:22:49.209553 6200 solver.cpp:237] Train net output #0: loss = 1.19672 (* 1 = 1.19672 loss)
I0412 13:22:49.209566 6200 sgd_solver.cpp:105] Iteration 4728, lr = 0.00391978
I0412 13:22:54.131762 6200 solver.cpp:218] Iteration 4740 (2.438 iter/s, 4.92206s/12 iters), loss = 0.890768
I0412 13:22:54.131886 6200 solver.cpp:237] Train net output #0: loss = 0.890768 (* 1 = 0.890768 loss)
I0412 13:22:54.131896 6200 sgd_solver.cpp:105] Iteration 4740, lr = 0.00391047
I0412 13:22:59.006036 6200 solver.cpp:218] Iteration 4752 (2.46204 iter/s, 4.874s/12 iters), loss = 1.00948
I0412 13:22:59.006081 6200 solver.cpp:237] Train net output #0: loss = 1.00948 (* 1 = 1.00948 loss)
I0412 13:22:59.006091 6200 sgd_solver.cpp:105] Iteration 4752, lr = 0.00390119
I0412 13:22:59.563027 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:23:03.995215 6200 solver.cpp:218] Iteration 4764 (2.4053 iter/s, 4.98898s/12 iters), loss = 0.83105
I0412 13:23:03.995270 6200 solver.cpp:237] Train net output #0: loss = 0.83105 (* 1 = 0.83105 loss)
I0412 13:23:03.995282 6200 sgd_solver.cpp:105] Iteration 4764, lr = 0.00389193
I0412 13:23:09.090158 6200 solver.cpp:218] Iteration 4776 (2.35537 iter/s, 5.09473s/12 iters), loss = 1.07211
I0412 13:23:09.090224 6200 solver.cpp:237] Train net output #0: loss = 1.07211 (* 1 = 1.07211 loss)
I0412 13:23:09.090235 6200 sgd_solver.cpp:105] Iteration 4776, lr = 0.00388269
I0412 13:23:14.015450 6200 solver.cpp:218] Iteration 4788 (2.43651 iter/s, 4.92508s/12 iters), loss = 1.50812
I0412 13:23:14.015502 6200 solver.cpp:237] Train net output #0: loss = 1.50812 (* 1 = 1.50812 loss)
I0412 13:23:14.015513 6200 sgd_solver.cpp:105] Iteration 4788, lr = 0.00387347
I0412 13:23:16.057770 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_4794.caffemodel
I0412 13:23:19.591933 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_4794.solverstate
I0412 13:23:23.725173 6200 solver.cpp:330] Iteration 4794, Testing net (#0)
I0412 13:23:23.725196 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:23:26.252054 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:23:28.150799 6200 solver.cpp:397] Test net output #0: accuracy = 0.38174
I0412 13:23:28.150828 6200 solver.cpp:397] Test net output #1: loss = 2.6448 (* 1 = 2.6448 loss)
I0412 13:23:29.831564 6200 solver.cpp:218] Iteration 4800 (0.758744 iter/s, 15.8156s/12 iters), loss = 0.997368
I0412 13:23:29.831606 6200 solver.cpp:237] Train net output #0: loss = 0.997368 (* 1 = 0.997368 loss)
I0412 13:23:29.831615 6200 sgd_solver.cpp:105] Iteration 4800, lr = 0.00386427
I0412 13:23:34.712093 6200 solver.cpp:218] Iteration 4812 (2.45885 iter/s, 4.88033s/12 iters), loss = 1.08253
I0412 13:23:34.712141 6200 solver.cpp:237] Train net output #0: loss = 1.08253 (* 1 = 1.08253 loss)
I0412 13:23:34.712149 6200 sgd_solver.cpp:105] Iteration 4812, lr = 0.0038551
I0412 13:23:39.611907 6200 solver.cpp:218] Iteration 4824 (2.44917 iter/s, 4.89961s/12 iters), loss = 0.794042
I0412 13:23:39.611963 6200 solver.cpp:237] Train net output #0: loss = 0.794042 (* 1 = 0.794042 loss)
I0412 13:23:39.611972 6200 sgd_solver.cpp:105] Iteration 4824, lr = 0.00384594
I0412 13:23:44.538044 6200 solver.cpp:218] Iteration 4836 (2.43608 iter/s, 4.92594s/12 iters), loss = 0.936843
I0412 13:23:44.538085 6200 solver.cpp:237] Train net output #0: loss = 0.936843 (* 1 = 0.936843 loss)
I0412 13:23:44.538094 6200 sgd_solver.cpp:105] Iteration 4836, lr = 0.00383681
I0412 13:23:46.099050 6200 blocking_queue.cpp:49] Waiting for data
I0412 13:23:49.372133 6200 solver.cpp:218] Iteration 4848 (2.48247 iter/s, 4.8339s/12 iters), loss = 0.775997
I0412 13:23:49.372184 6200 solver.cpp:237] Train net output #0: loss = 0.775997 (* 1 = 0.775997 loss)
I0412 13:23:49.372196 6200 sgd_solver.cpp:105] Iteration 4848, lr = 0.0038277
I0412 13:23:52.138597 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:23:54.572142 6200 solver.cpp:218] Iteration 4860 (2.30778 iter/s, 5.19981s/12 iters), loss = 0.833873
I0412 13:23:54.572183 6200 solver.cpp:237] Train net output #0: loss = 0.833873 (* 1 = 0.833873 loss)
I0412 13:23:54.572192 6200 sgd_solver.cpp:105] Iteration 4860, lr = 0.00381862
I0412 13:23:59.485922 6200 solver.cpp:218] Iteration 4872 (2.44221 iter/s, 4.91359s/12 iters), loss = 1.19292
I0412 13:23:59.486078 6200 solver.cpp:237] Train net output #0: loss = 1.19292 (* 1 = 1.19292 loss)
I0412 13:23:59.486090 6200 sgd_solver.cpp:105] Iteration 4872, lr = 0.00380955
I0412 13:24:04.569432 6200 solver.cpp:218] Iteration 4884 (2.36072 iter/s, 5.0832s/12 iters), loss = 1.33538
I0412 13:24:04.569484 6200 solver.cpp:237] Train net output #0: loss = 1.33538 (* 1 = 1.33538 loss)
I0412 13:24:04.569495 6200 sgd_solver.cpp:105] Iteration 4884, lr = 0.0038005
I0412 13:24:08.956952 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_4896.caffemodel
I0412 13:24:15.878891 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_4896.solverstate
I0412 13:24:17.446784 6200 solver.cpp:330] Iteration 4896, Testing net (#0)
I0412 13:24:17.446810 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:24:19.960629 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:24:21.888500 6200 solver.cpp:397] Test net output #0: accuracy = 0.386029
I0412 13:24:21.888540 6200 solver.cpp:397] Test net output #1: loss = 2.74874 (* 1 = 2.74874 loss)
I0412 13:24:21.973450 6200 solver.cpp:218] Iteration 4896 (0.689517 iter/s, 17.4035s/12 iters), loss = 1.08259
I0412 13:24:21.973493 6200 solver.cpp:237] Train net output #0: loss = 1.08259 (* 1 = 1.08259 loss)
I0412 13:24:21.973502 6200 sgd_solver.cpp:105] Iteration 4896, lr = 0.00379148
I0412 13:24:26.090812 6200 solver.cpp:218] Iteration 4908 (2.91461 iter/s, 4.11719s/12 iters), loss = 1.01546
I0412 13:24:26.090854 6200 solver.cpp:237] Train net output #0: loss = 1.01546 (* 1 = 1.01546 loss)
I0412 13:24:26.090863 6200 sgd_solver.cpp:105] Iteration 4908, lr = 0.00378248
I0412 13:24:31.033913 6200 solver.cpp:218] Iteration 4920 (2.42772 iter/s, 4.94291s/12 iters), loss = 0.921445
I0412 13:24:31.034030 6200 solver.cpp:237] Train net output #0: loss = 0.921445 (* 1 = 0.921445 loss)
I0412 13:24:31.034042 6200 sgd_solver.cpp:105] Iteration 4920, lr = 0.0037735
I0412 13:24:36.047780 6200 solver.cpp:218] Iteration 4932 (2.39349 iter/s, 5.0136s/12 iters), loss = 0.852309
I0412 13:24:36.047823 6200 solver.cpp:237] Train net output #0: loss = 0.852309 (* 1 = 0.852309 loss)
I0412 13:24:36.047832 6200 sgd_solver.cpp:105] Iteration 4932, lr = 0.00376454
I0412 13:24:41.121279 6200 solver.cpp:218] Iteration 4944 (2.36533 iter/s, 5.07329s/12 iters), loss = 1.05436
I0412 13:24:41.121331 6200 solver.cpp:237] Train net output #0: loss = 1.05436 (* 1 = 1.05436 loss)
I0412 13:24:41.121342 6200 sgd_solver.cpp:105] Iteration 4944, lr = 0.0037556
I0412 13:24:45.788722 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:24:45.983234 6200 solver.cpp:218] Iteration 4956 (2.46824 iter/s, 4.86176s/12 iters), loss = 1.15806
I0412 13:24:45.983284 6200 solver.cpp:237] Train net output #0: loss = 1.15806 (* 1 = 1.15806 loss)
I0412 13:24:45.983296 6200 sgd_solver.cpp:105] Iteration 4956, lr = 0.00374669
I0412 13:24:50.962064 6200 solver.cpp:218] Iteration 4968 (2.4103 iter/s, 4.97863s/12 iters), loss = 1.0577
I0412 13:24:50.962112 6200 solver.cpp:237] Train net output #0: loss = 1.0577 (* 1 = 1.0577 loss)
I0412 13:24:50.962124 6200 sgd_solver.cpp:105] Iteration 4968, lr = 0.00373779
I0412 13:24:55.925103 6200 solver.cpp:218] Iteration 4980 (2.41797 iter/s, 4.96285s/12 iters), loss = 1.16808
I0412 13:24:55.925143 6200 solver.cpp:237] Train net output #0: loss = 1.16808 (* 1 = 1.16808 loss)
I0412 13:24:55.925151 6200 sgd_solver.cpp:105] Iteration 4980, lr = 0.00372892
I0412 13:25:00.814808 6200 solver.cpp:218] Iteration 4992 (2.45423 iter/s, 4.88952s/12 iters), loss = 1.04107
I0412 13:25:00.814853 6200 solver.cpp:237] Train net output #0: loss = 1.04107 (* 1 = 1.04107 loss)
I0412 13:25:00.814867 6200 sgd_solver.cpp:105] Iteration 4992, lr = 0.00372006
I0412 13:25:02.965224 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_4998.caffemodel
I0412 13:25:05.807942 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_4998.solverstate
I0412 13:25:07.932971 6200 solver.cpp:330] Iteration 4998, Testing net (#0)
I0412 13:25:07.932997 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:25:10.529978 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:25:12.533320 6200 solver.cpp:397] Test net output #0: accuracy = 0.386642
I0412 13:25:12.533370 6200 solver.cpp:397] Test net output #1: loss = 2.82778 (* 1 = 2.82778 loss)
I0412 13:25:14.397444 6200 solver.cpp:218] Iteration 5004 (0.883509 iter/s, 13.5822s/12 iters), loss = 0.681317
I0412 13:25:14.397487 6200 solver.cpp:237] Train net output #0: loss = 0.681317 (* 1 = 0.681317 loss)
I0412 13:25:14.397496 6200 sgd_solver.cpp:105] Iteration 5004, lr = 0.00371123
I0412 13:25:19.405947 6200 solver.cpp:218] Iteration 5016 (2.39602 iter/s, 5.00831s/12 iters), loss = 0.82166
I0412 13:25:19.406018 6200 solver.cpp:237] Train net output #0: loss = 0.82166 (* 1 = 0.82166 loss)
I0412 13:25:19.406033 6200 sgd_solver.cpp:105] Iteration 5016, lr = 0.00370242
I0412 13:25:24.301154 6200 solver.cpp:218] Iteration 5028 (2.45149 iter/s, 4.89499s/12 iters), loss = 0.959226
I0412 13:25:24.301208 6200 solver.cpp:237] Train net output #0: loss = 0.959226 (* 1 = 0.959226 loss)
I0412 13:25:24.301219 6200 sgd_solver.cpp:105] Iteration 5028, lr = 0.00369363
I0412 13:25:29.176057 6200 solver.cpp:218] Iteration 5040 (2.46169 iter/s, 4.8747s/12 iters), loss = 0.853088
I0412 13:25:29.176110 6200 solver.cpp:237] Train net output #0: loss = 0.853088 (* 1 = 0.853088 loss)
I0412 13:25:29.176122 6200 sgd_solver.cpp:105] Iteration 5040, lr = 0.00368486
I0412 13:25:34.117532 6200 solver.cpp:218] Iteration 5052 (2.42852 iter/s, 4.94127s/12 iters), loss = 0.873383
I0412 13:25:34.117658 6200 solver.cpp:237] Train net output #0: loss = 0.873383 (* 1 = 0.873383 loss)
I0412 13:25:34.117671 6200 sgd_solver.cpp:105] Iteration 5052, lr = 0.00367611
I0412 13:25:36.027395 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:25:39.068697 6200 solver.cpp:218] Iteration 5064 (2.4238 iter/s, 4.95089s/12 iters), loss = 0.847218
I0412 13:25:39.068748 6200 solver.cpp:237] Train net output #0: loss = 0.847218 (* 1 = 0.847218 loss)
I0412 13:25:39.068759 6200 sgd_solver.cpp:105] Iteration 5064, lr = 0.00366738
I0412 13:25:44.084328 6200 solver.cpp:218] Iteration 5076 (2.39262 iter/s, 5.01543s/12 iters), loss = 0.759517
I0412 13:25:44.084388 6200 solver.cpp:237] Train net output #0: loss = 0.759517 (* 1 = 0.759517 loss)
I0412 13:25:44.084399 6200 sgd_solver.cpp:105] Iteration 5076, lr = 0.00365868
I0412 13:25:49.173346 6200 solver.cpp:218] Iteration 5088 (2.35811 iter/s, 5.08881s/12 iters), loss = 0.937437
I0412 13:25:49.173382 6200 solver.cpp:237] Train net output #0: loss = 0.937437 (* 1 = 0.937437 loss)
I0412 13:25:49.173390 6200 sgd_solver.cpp:105] Iteration 5088, lr = 0.00364999
I0412 13:25:53.776197 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_5100.caffemodel
I0412 13:25:59.668903 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_5100.solverstate
I0412 13:26:02.008484 6200 solver.cpp:330] Iteration 5100, Testing net (#0)
I0412 13:26:02.008509 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:26:04.454416 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:26:06.462481 6200 solver.cpp:397] Test net output #0: accuracy = 0.390931
I0412 13:26:06.462514 6200 solver.cpp:397] Test net output #1: loss = 2.8157 (* 1 = 2.8157 loss)
I0412 13:26:06.549156 6200 solver.cpp:218] Iteration 5100 (0.690636 iter/s, 17.3753s/12 iters), loss = 1.02524
I0412 13:26:06.549199 6200 solver.cpp:237] Train net output #0: loss = 1.02524 (* 1 = 1.02524 loss)
I0412 13:26:06.549207 6200 sgd_solver.cpp:105] Iteration 5100, lr = 0.00364132
I0412 13:26:10.789885 6200 solver.cpp:218] Iteration 5112 (2.82982 iter/s, 4.24055s/12 iters), loss = 0.800379
I0412 13:26:10.789934 6200 solver.cpp:237] Train net output #0: loss = 0.800379 (* 1 = 0.800379 loss)
I0412 13:26:10.789943 6200 sgd_solver.cpp:105] Iteration 5112, lr = 0.00363268
I0412 13:26:15.625847 6200 solver.cpp:218] Iteration 5124 (2.48151 iter/s, 4.83576s/12 iters), loss = 1.06391
I0412 13:26:15.625905 6200 solver.cpp:237] Train net output #0: loss = 1.06391 (* 1 = 1.06391 loss)
I0412 13:26:15.625918 6200 sgd_solver.cpp:105] Iteration 5124, lr = 0.00362405
I0412 13:26:20.588032 6200 solver.cpp:218] Iteration 5136 (2.41839 iter/s, 4.96198s/12 iters), loss = 0.805358
I0412 13:26:20.588078 6200 solver.cpp:237] Train net output #0: loss = 0.805358 (* 1 = 0.805358 loss)
I0412 13:26:20.588088 6200 sgd_solver.cpp:105] Iteration 5136, lr = 0.00361545
I0412 13:26:25.525552 6200 solver.cpp:218] Iteration 5148 (2.43047 iter/s, 4.93732s/12 iters), loss = 0.749542
I0412 13:26:25.525596 6200 solver.cpp:237] Train net output #0: loss = 0.749542 (* 1 = 0.749542 loss)
I0412 13:26:25.525606 6200 sgd_solver.cpp:105] Iteration 5148, lr = 0.00360687
I0412 13:26:29.531605 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:26:30.507444 6200 solver.cpp:218] Iteration 5160 (2.40882 iter/s, 4.9817s/12 iters), loss = 0.826272
I0412 13:26:30.507490 6200 solver.cpp:237] Train net output #0: loss = 0.826272 (* 1 = 0.826272 loss)
I0412 13:26:30.507500 6200 sgd_solver.cpp:105] Iteration 5160, lr = 0.0035983
I0412 13:26:35.531564 6200 solver.cpp:218] Iteration 5172 (2.38857 iter/s, 5.02392s/12 iters), loss = 0.712681
I0412 13:26:35.533020 6200 solver.cpp:237] Train net output #0: loss = 0.712681 (* 1 = 0.712681 loss)
I0412 13:26:35.533030 6200 sgd_solver.cpp:105] Iteration 5172, lr = 0.00358976
I0412 13:26:40.550905 6200 solver.cpp:218] Iteration 5184 (2.39152 iter/s, 5.01773s/12 iters), loss = 0.73236
I0412 13:26:40.550957 6200 solver.cpp:237] Train net output #0: loss = 0.73236 (* 1 = 0.73236 loss)
I0412 13:26:40.550968 6200 sgd_solver.cpp:105] Iteration 5184, lr = 0.00358124
I0412 13:26:45.537623 6200 solver.cpp:218] Iteration 5196 (2.40649 iter/s, 4.98652s/12 iters), loss = 0.918629
I0412 13:26:45.537678 6200 solver.cpp:237] Train net output #0: loss = 0.918629 (* 1 = 0.918629 loss)
I0412 13:26:45.537689 6200 sgd_solver.cpp:105] Iteration 5196, lr = 0.00357273
I0412 13:26:47.577733 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_5202.caffemodel
I0412 13:26:51.313385 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_5202.solverstate
I0412 13:26:54.927222 6200 solver.cpp:330] Iteration 5202, Testing net (#0)
I0412 13:26:54.927239 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:26:57.379007 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:26:59.459350 6200 solver.cpp:397] Test net output #0: accuracy = 0.375613
I0412 13:26:59.459388 6200 solver.cpp:397] Test net output #1: loss = 2.84689 (* 1 = 2.84689 loss)
I0412 13:27:01.374125 6200 solver.cpp:218] Iteration 5208 (0.757767 iter/s, 15.836s/12 iters), loss = 0.642052
I0412 13:27:01.374171 6200 solver.cpp:237] Train net output #0: loss = 0.642052 (* 1 = 0.642052 loss)
I0412 13:27:01.374182 6200 sgd_solver.cpp:105] Iteration 5208, lr = 0.00356425
I0412 13:27:06.221686 6200 solver.cpp:218] Iteration 5220 (2.47557 iter/s, 4.84737s/12 iters), loss = 0.577155
I0412 13:27:06.221783 6200 solver.cpp:237] Train net output #0: loss = 0.577155 (* 1 = 0.577155 loss)
I0412 13:27:06.221796 6200 sgd_solver.cpp:105] Iteration 5220, lr = 0.00355579
I0412 13:27:11.232549 6200 solver.cpp:218] Iteration 5232 (2.39491 iter/s, 5.01062s/12 iters), loss = 0.757778
I0412 13:27:11.232599 6200 solver.cpp:237] Train net output #0: loss = 0.757778 (* 1 = 0.757778 loss)
I0412 13:27:11.232609 6200 sgd_solver.cpp:105] Iteration 5232, lr = 0.00354735
I0412 13:27:16.549355 6200 solver.cpp:218] Iteration 5244 (2.25708 iter/s, 5.3166s/12 iters), loss = 0.944393
I0412 13:27:16.549401 6200 solver.cpp:237] Train net output #0: loss = 0.944393 (* 1 = 0.944393 loss)
I0412 13:27:16.549410 6200 sgd_solver.cpp:105] Iteration 5244, lr = 0.00353892
I0412 13:27:21.916147 6200 solver.cpp:218] Iteration 5256 (2.23606 iter/s, 5.36658s/12 iters), loss = 0.81184
I0412 13:27:21.916206 6200 solver.cpp:237] Train net output #0: loss = 0.81184 (* 1 = 0.81184 loss)
I0412 13:27:21.916219 6200 sgd_solver.cpp:105] Iteration 5256, lr = 0.00353052
I0412 13:27:23.386935 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:27:27.402578 6200 solver.cpp:218] Iteration 5268 (2.1873 iter/s, 5.48621s/12 iters), loss = 0.803746
I0412 13:27:27.402634 6200 solver.cpp:237] Train net output #0: loss = 0.803746 (* 1 = 0.803746 loss)
I0412 13:27:27.402645 6200 sgd_solver.cpp:105] Iteration 5268, lr = 0.00352214
I0412 13:27:32.605182 6200 solver.cpp:218] Iteration 5280 (2.30663 iter/s, 5.2024s/12 iters), loss = 0.92216
I0412 13:27:32.605228 6200 solver.cpp:237] Train net output #0: loss = 0.92216 (* 1 = 0.92216 loss)
I0412 13:27:32.605240 6200 sgd_solver.cpp:105] Iteration 5280, lr = 0.00351378
I0412 13:27:37.571481 6200 solver.cpp:218] Iteration 5292 (2.41638 iter/s, 4.9661s/12 iters), loss = 0.854658
I0412 13:27:37.571610 6200 solver.cpp:237] Train net output #0: loss = 0.854658 (* 1 = 0.854658 loss)
I0412 13:27:37.571620 6200 sgd_solver.cpp:105] Iteration 5292, lr = 0.00350544
I0412 13:27:42.063453 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_5304.caffemodel
I0412 13:27:46.357069 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_5304.solverstate
I0412 13:27:52.264140 6200 solver.cpp:330] Iteration 5304, Testing net (#0)
I0412 13:27:52.264169 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:27:54.621331 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:27:56.886390 6200 solver.cpp:397] Test net output #0: accuracy = 0.365809
I0412 13:27:56.886443 6200 solver.cpp:397] Test net output #1: loss = 3.01739 (* 1 = 3.01739 loss)
I0412 13:27:56.970258 6200 solver.cpp:218] Iteration 5304 (0.618617 iter/s, 19.3981s/12 iters), loss = 0.984758
I0412 13:27:56.970309 6200 solver.cpp:237] Train net output #0: loss = 0.984758 (* 1 = 0.984758 loss)
I0412 13:27:56.970321 6200 sgd_solver.cpp:105] Iteration 5304, lr = 0.00349711
I0412 13:28:01.041924 6200 solver.cpp:218] Iteration 5316 (2.94733 iter/s, 4.07149s/12 iters), loss = 0.828824
I0412 13:28:01.042001 6200 solver.cpp:237] Train net output #0: loss = 0.828824 (* 1 = 0.828824 loss)
I0412 13:28:01.042014 6200 sgd_solver.cpp:105] Iteration 5316, lr = 0.00348881
I0412 13:28:05.993705 6200 solver.cpp:218] Iteration 5328 (2.42348 iter/s, 4.95156s/12 iters), loss = 0.909378
I0412 13:28:05.993752 6200 solver.cpp:237] Train net output #0: loss = 0.909378 (* 1 = 0.909378 loss)
I0412 13:28:05.993762 6200 sgd_solver.cpp:105] Iteration 5328, lr = 0.00348053
I0412 13:28:11.109300 6200 solver.cpp:218] Iteration 5340 (2.34586 iter/s, 5.11539s/12 iters), loss = 0.816494
I0412 13:28:11.110752 6200 solver.cpp:237] Train net output #0: loss = 0.816494 (* 1 = 0.816494 loss)
I0412 13:28:11.110766 6200 sgd_solver.cpp:105] Iteration 5340, lr = 0.00347226
I0412 13:28:16.099841 6200 solver.cpp:218] Iteration 5352 (2.40532 iter/s, 4.98894s/12 iters), loss = 0.710438
I0412 13:28:16.099886 6200 solver.cpp:237] Train net output #0: loss = 0.710438 (* 1 = 0.710438 loss)
I0412 13:28:16.099895 6200 sgd_solver.cpp:105] Iteration 5352, lr = 0.00346402
I0412 13:28:19.514343 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:28:21.096841 6200 solver.cpp:218] Iteration 5364 (2.40153 iter/s, 4.99681s/12 iters), loss = 0.854988
I0412 13:28:21.096890 6200 solver.cpp:237] Train net output #0: loss = 0.854988 (* 1 = 0.854988 loss)
I0412 13:28:21.096902 6200 sgd_solver.cpp:105] Iteration 5364, lr = 0.0034558
I0412 13:28:26.159577 6200 solver.cpp:218] Iteration 5376 (2.37035 iter/s, 5.06253s/12 iters), loss = 0.984142
I0412 13:28:26.159628 6200 solver.cpp:237] Train net output #0: loss = 0.984142 (* 1 = 0.984142 loss)
I0412 13:28:26.159641 6200 sgd_solver.cpp:105] Iteration 5376, lr = 0.00344759
I0412 13:28:31.100956 6200 solver.cpp:218] Iteration 5388 (2.42857 iter/s, 4.94118s/12 iters), loss = 0.854936
I0412 13:28:31.101002 6200 solver.cpp:237] Train net output #0: loss = 0.854936 (* 1 = 0.854936 loss)
I0412 13:28:31.101012 6200 sgd_solver.cpp:105] Iteration 5388, lr = 0.00343941
I0412 13:28:35.982933 6200 solver.cpp:218] Iteration 5400 (2.45812 iter/s, 4.88178s/12 iters), loss = 0.881243
I0412 13:28:35.982987 6200 solver.cpp:237] Train net output #0: loss = 0.881243 (* 1 = 0.881243 loss)
I0412 13:28:35.983000 6200 sgd_solver.cpp:105] Iteration 5400, lr = 0.00343124
I0412 13:28:38.161571 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_5406.caffemodel
I0412 13:28:40.182809 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_5406.solverstate
I0412 13:28:41.750066 6200 solver.cpp:330] Iteration 5406, Testing net (#0)
I0412 13:28:41.751641 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:28:43.941673 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:28:46.067941 6200 solver.cpp:397] Test net output #0: accuracy = 0.377451
I0412 13:28:46.067986 6200 solver.cpp:397] Test net output #1: loss = 2.87138 (* 1 = 2.87138 loss)
I0412 13:28:47.765947 6200 solver.cpp:218] Iteration 5412 (1.01845 iter/s, 11.7826s/12 iters), loss = 0.930482
I0412 13:28:47.766024 6200 solver.cpp:237] Train net output #0: loss = 0.930482 (* 1 = 0.930482 loss)
I0412 13:28:47.766036 6200 sgd_solver.cpp:105] Iteration 5412, lr = 0.00342309
I0412 13:28:52.750259 6200 solver.cpp:218] Iteration 5424 (2.40766 iter/s, 4.98409s/12 iters), loss = 0.812466
I0412 13:28:52.750311 6200 solver.cpp:237] Train net output #0: loss = 0.812466 (* 1 = 0.812466 loss)
I0412 13:28:52.750324 6200 sgd_solver.cpp:105] Iteration 5424, lr = 0.00341497
I0412 13:28:57.771764 6200 solver.cpp:218] Iteration 5436 (2.38982 iter/s, 5.0213s/12 iters), loss = 0.767061
I0412 13:28:57.771821 6200 solver.cpp:237] Train net output #0: loss = 0.767061 (* 1 = 0.767061 loss)
I0412 13:28:57.771832 6200 sgd_solver.cpp:105] Iteration 5436, lr = 0.00340686
I0412 13:29:03.051980 6200 solver.cpp:218] Iteration 5448 (2.27273 iter/s, 5.28s/12 iters), loss = 0.841058
I0412 13:29:03.052038 6200 solver.cpp:237] Train net output #0: loss = 0.841058 (* 1 = 0.841058 loss)
I0412 13:29:03.052049 6200 sgd_solver.cpp:105] Iteration 5448, lr = 0.00339877
I0412 13:29:07.993659 6200 solver.cpp:218] Iteration 5460 (2.42843 iter/s, 4.94147s/12 iters), loss = 0.61544
I0412 13:29:07.993710 6200 solver.cpp:237] Train net output #0: loss = 0.61544 (* 1 = 0.61544 loss)
I0412 13:29:07.993721 6200 sgd_solver.cpp:105] Iteration 5460, lr = 0.0033907
I0412 13:29:08.543764 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:29:12.952319 6200 solver.cpp:218] Iteration 5472 (2.42011 iter/s, 4.95846s/12 iters), loss = 0.73547
I0412 13:29:12.952451 6200 solver.cpp:237] Train net output #0: loss = 0.73547 (* 1 = 0.73547 loss)
I0412 13:29:12.952469 6200 sgd_solver.cpp:105] Iteration 5472, lr = 0.00338265
I0412 13:29:17.848162 6200 solver.cpp:218] Iteration 5484 (2.4512 iter/s, 4.89557s/12 iters), loss = 0.44467
I0412 13:29:17.848212 6200 solver.cpp:237] Train net output #0: loss = 0.44467 (* 1 = 0.44467 loss)
I0412 13:29:17.848222 6200 sgd_solver.cpp:105] Iteration 5484, lr = 0.00337462
I0412 13:29:22.852073 6200 solver.cpp:218] Iteration 5496 (2.39822 iter/s, 5.00371s/12 iters), loss = 0.65648
I0412 13:29:22.852121 6200 solver.cpp:237] Train net output #0: loss = 0.65648 (* 1 = 0.65648 loss)
I0412 13:29:22.852133 6200 sgd_solver.cpp:105] Iteration 5496, lr = 0.00336661
I0412 13:29:27.503274 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_5508.caffemodel
I0412 13:29:30.934669 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_5508.solverstate
I0412 13:29:33.896201 6200 solver.cpp:330] Iteration 5508, Testing net (#0)
I0412 13:29:33.896232 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:29:36.156134 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:29:38.409682 6200 solver.cpp:397] Test net output #0: accuracy = 0.38174
I0412 13:29:38.409720 6200 solver.cpp:397] Test net output #1: loss = 3.05101 (* 1 = 3.05101 loss)
I0412 13:29:38.494813 6200 solver.cpp:218] Iteration 5508 (0.767153 iter/s, 15.6423s/12 iters), loss = 0.609104
I0412 13:29:38.494858 6200 solver.cpp:237] Train net output #0: loss = 0.609104 (* 1 = 0.609104 loss)
I0412 13:29:38.494868 6200 sgd_solver.cpp:105] Iteration 5508, lr = 0.00335861
I0412 13:29:42.856397 6200 solver.cpp:218] Iteration 5520 (2.75141 iter/s, 4.36141s/12 iters), loss = 0.782956
I0412 13:29:42.856446 6200 solver.cpp:237] Train net output #0: loss = 0.782956 (* 1 = 0.782956 loss)
I0412 13:29:42.856457 6200 sgd_solver.cpp:105] Iteration 5520, lr = 0.00335064
I0412 13:29:44.787976 6200 blocking_queue.cpp:49] Waiting for data
I0412 13:29:47.715555 6200 solver.cpp:218] Iteration 5532 (2.46966 iter/s, 4.85896s/12 iters), loss = 0.621638
I0412 13:29:47.715607 6200 solver.cpp:237] Train net output #0: loss = 0.621638 (* 1 = 0.621638 loss)
I0412 13:29:47.715620 6200 sgd_solver.cpp:105] Iteration 5532, lr = 0.00334268
I0412 13:29:52.750898 6200 solver.cpp:218] Iteration 5544 (2.38325 iter/s, 5.03514s/12 iters), loss = 0.618093
I0412 13:29:52.750942 6200 solver.cpp:237] Train net output #0: loss = 0.618093 (* 1 = 0.618093 loss)
I0412 13:29:52.750952 6200 sgd_solver.cpp:105] Iteration 5544, lr = 0.00333475
I0412 13:29:57.796634 6200 solver.cpp:218] Iteration 5556 (2.37834 iter/s, 5.04554s/12 iters), loss = 0.574244
I0412 13:29:57.796686 6200 solver.cpp:237] Train net output #0: loss = 0.574244 (* 1 = 0.574244 loss)
I0412 13:29:57.796698 6200 sgd_solver.cpp:105] Iteration 5556, lr = 0.00332683
I0412 13:30:00.569507 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:30:02.924054 6200 solver.cpp:218] Iteration 5568 (2.34045 iter/s, 5.12721s/12 iters), loss = 0.715673
I0412 13:30:02.924120 6200 solver.cpp:237] Train net output #0: loss = 0.715673 (* 1 = 0.715673 loss)
I0412 13:30:02.924132 6200 sgd_solver.cpp:105] Iteration 5568, lr = 0.00331893
I0412 13:30:07.915489 6200 solver.cpp:218] Iteration 5580 (2.40422 iter/s, 4.99122s/12 iters), loss = 0.816599
I0412 13:30:07.915549 6200 solver.cpp:237] Train net output #0: loss = 0.816599 (* 1 = 0.816599 loss)
I0412 13:30:07.915560 6200 sgd_solver.cpp:105] Iteration 5580, lr = 0.00331105
I0412 13:30:12.806764 6200 solver.cpp:218] Iteration 5592 (2.45345 iter/s, 4.89107s/12 iters), loss = 0.610774
I0412 13:30:12.806819 6200 solver.cpp:237] Train net output #0: loss = 0.610774 (* 1 = 0.610774 loss)
I0412 13:30:12.806830 6200 sgd_solver.cpp:105] Iteration 5592, lr = 0.00330319
I0412 13:30:17.781397 6200 solver.cpp:218] Iteration 5604 (2.41233 iter/s, 4.97443s/12 iters), loss = 0.63411
I0412 13:30:17.781500 6200 solver.cpp:237] Train net output #0: loss = 0.63411 (* 1 = 0.63411 loss)
I0412 13:30:17.781508 6200 sgd_solver.cpp:105] Iteration 5604, lr = 0.00329535
I0412 13:30:19.932463 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_5610.caffemodel
I0412 13:30:23.191056 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_5610.solverstate
I0412 13:30:24.755563 6200 solver.cpp:330] Iteration 5610, Testing net (#0)
I0412 13:30:24.755591 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:30:27.012295 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:30:29.369057 6200 solver.cpp:397] Test net output #0: accuracy = 0.411765
I0412 13:30:29.369104 6200 solver.cpp:397] Test net output #1: loss = 2.82321 (* 1 = 2.82321 loss)
I0412 13:30:31.210984 6200 solver.cpp:218] Iteration 5616 (0.893582 iter/s, 13.4291s/12 iters), loss = 0.742404
I0412 13:30:31.211036 6200 solver.cpp:237] Train net output #0: loss = 0.742404 (* 1 = 0.742404 loss)
I0412 13:30:31.211047 6200 sgd_solver.cpp:105] Iteration 5616, lr = 0.00328752
I0412 13:30:36.189154 6200 solver.cpp:218] Iteration 5628 (2.41062 iter/s, 4.97797s/12 iters), loss = 0.678217
I0412 13:30:36.189211 6200 solver.cpp:237] Train net output #0: loss = 0.678217 (* 1 = 0.678217 loss)
I0412 13:30:36.189224 6200 sgd_solver.cpp:105] Iteration 5628, lr = 0.00327972
I0412 13:30:41.156706 6200 solver.cpp:218] Iteration 5640 (2.41578 iter/s, 4.96735s/12 iters), loss = 0.616884
I0412 13:30:41.156754 6200 solver.cpp:237] Train net output #0: loss = 0.616884 (* 1 = 0.616884 loss)
I0412 13:30:41.156764 6200 sgd_solver.cpp:105] Iteration 5640, lr = 0.00327193
I0412 13:30:46.271936 6200 solver.cpp:218] Iteration 5652 (2.34603 iter/s, 5.11503s/12 iters), loss = 0.604135
I0412 13:30:46.271996 6200 solver.cpp:237] Train net output #0: loss = 0.604135 (* 1 = 0.604135 loss)
I0412 13:30:46.272009 6200 sgd_solver.cpp:105] Iteration 5652, lr = 0.00326416
I0412 13:30:50.971371 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:30:51.139818 6200 solver.cpp:218] Iteration 5664 (2.46524 iter/s, 4.86768s/12 iters), loss = 0.69737
I0412 13:30:51.139865 6200 solver.cpp:237] Train net output #0: loss = 0.69737 (* 1 = 0.69737 loss)
I0412 13:30:51.139875 6200 sgd_solver.cpp:105] Iteration 5664, lr = 0.00325641
I0412 13:30:56.079025 6200 solver.cpp:218] Iteration 5676 (2.42964 iter/s, 4.93901s/12 iters), loss = 0.52716
I0412 13:30:56.079083 6200 solver.cpp:237] Train net output #0: loss = 0.52716 (* 1 = 0.52716 loss)
I0412 13:30:56.079097 6200 sgd_solver.cpp:105] Iteration 5676, lr = 0.00324868
I0412 13:31:01.093855 6200 solver.cpp:218] Iteration 5688 (2.393 iter/s, 5.01462s/12 iters), loss = 0.654212
I0412 13:31:01.093907 6200 solver.cpp:237] Train net output #0: loss = 0.654212 (* 1 = 0.654212 loss)
I0412 13:31:01.093919 6200 sgd_solver.cpp:105] Iteration 5688, lr = 0.00324097
I0412 13:31:06.179927 6200 solver.cpp:218] Iteration 5700 (2.35948 iter/s, 5.08587s/12 iters), loss = 0.559176
I0412 13:31:06.179986 6200 solver.cpp:237] Train net output #0: loss = 0.559176 (* 1 = 0.559176 loss)
I0412 13:31:06.179998 6200 sgd_solver.cpp:105] Iteration 5700, lr = 0.00323328
I0412 13:31:10.834775 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_5712.caffemodel
I0412 13:31:16.357043 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_5712.solverstate
I0412 13:31:23.825294 6200 solver.cpp:330] Iteration 5712, Testing net (#0)
I0412 13:31:23.825362 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:31:26.159900 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:31:28.577481 6200 solver.cpp:397] Test net output #0: accuracy = 0.407476
I0412 13:31:28.577522 6200 solver.cpp:397] Test net output #1: loss = 2.84606 (* 1 = 2.84606 loss)
I0412 13:31:28.662678 6200 solver.cpp:218] Iteration 5712 (0.533759 iter/s, 22.4821s/12 iters), loss = 0.421864
I0412 13:31:28.662724 6200 solver.cpp:237] Train net output #0: loss = 0.421864 (* 1 = 0.421864 loss)
I0412 13:31:28.662734 6200 sgd_solver.cpp:105] Iteration 5712, lr = 0.0032256
I0412 13:31:32.851112 6200 solver.cpp:218] Iteration 5724 (2.86515 iter/s, 4.18826s/12 iters), loss = 0.739523
I0412 13:31:32.851164 6200 solver.cpp:237] Train net output #0: loss = 0.739523 (* 1 = 0.739523 loss)
I0412 13:31:32.851176 6200 sgd_solver.cpp:105] Iteration 5724, lr = 0.00321794
I0412 13:31:37.924023 6200 solver.cpp:218] Iteration 5736 (2.3656 iter/s, 5.07271s/12 iters), loss = 0.4957
I0412 13:31:37.924070 6200 solver.cpp:237] Train net output #0: loss = 0.4957 (* 1 = 0.4957 loss)
I0412 13:31:37.924082 6200 sgd_solver.cpp:105] Iteration 5736, lr = 0.0032103
I0412 13:31:42.936642 6200 solver.cpp:218] Iteration 5748 (2.39406 iter/s, 5.01241s/12 iters), loss = 0.490067
I0412 13:31:42.936697 6200 solver.cpp:237] Train net output #0: loss = 0.490067 (* 1 = 0.490067 loss)
I0412 13:31:42.936710 6200 sgd_solver.cpp:105] Iteration 5748, lr = 0.00320268
I0412 13:31:47.895493 6200 solver.cpp:218] Iteration 5760 (2.42002 iter/s, 4.95864s/12 iters), loss = 0.720296
I0412 13:31:47.895535 6200 solver.cpp:237] Train net output #0: loss = 0.720296 (* 1 = 0.720296 loss)
I0412 13:31:47.895545 6200 sgd_solver.cpp:105] Iteration 5760, lr = 0.00319508
I0412 13:31:49.836963 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:31:52.811385 6200 solver.cpp:218] Iteration 5772 (2.44116 iter/s, 4.9157s/12 iters), loss = 0.566475
I0412 13:31:52.811441 6200 solver.cpp:237] Train net output #0: loss = 0.566475 (* 1 = 0.566475 loss)
I0412 13:31:52.811453 6200 sgd_solver.cpp:105] Iteration 5772, lr = 0.00318749
I0412 13:31:57.750135 6200 solver.cpp:218] Iteration 5784 (2.42986 iter/s, 4.93855s/12 iters), loss = 0.57002
I0412 13:31:57.750291 6200 solver.cpp:237] Train net output #0: loss = 0.57002 (* 1 = 0.57002 loss)
I0412 13:31:57.750304 6200 sgd_solver.cpp:105] Iteration 5784, lr = 0.00317992
I0412 13:32:02.532282 6200 solver.cpp:218] Iteration 5796 (2.50949 iter/s, 4.78185s/12 iters), loss = 0.603514
I0412 13:32:02.532344 6200 solver.cpp:237] Train net output #0: loss = 0.603514 (* 1 = 0.603514 loss)
I0412 13:32:02.532359 6200 sgd_solver.cpp:105] Iteration 5796, lr = 0.00317237
I0412 13:32:07.440452 6200 solver.cpp:218] Iteration 5808 (2.44501 iter/s, 4.90796s/12 iters), loss = 0.605744
I0412 13:32:07.440503 6200 solver.cpp:237] Train net output #0: loss = 0.605744 (* 1 = 0.605744 loss)
I0412 13:32:07.440515 6200 sgd_solver.cpp:105] Iteration 5808, lr = 0.00316484
I0412 13:32:09.471287 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_5814.caffemodel
I0412 13:32:11.492013 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_5814.solverstate
I0412 13:32:13.046455 6200 solver.cpp:330] Iteration 5814, Testing net (#0)
I0412 13:32:13.046483 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:32:15.210695 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:32:17.544986 6200 solver.cpp:397] Test net output #0: accuracy = 0.408088
I0412 13:32:17.545048 6200 solver.cpp:397] Test net output #1: loss = 2.91473 (* 1 = 2.91473 loss)
I0412 13:32:19.493425 6200 solver.cpp:218] Iteration 5820 (0.995638 iter/s, 12.0526s/12 iters), loss = 0.540865
I0412 13:32:19.493481 6200 solver.cpp:237] Train net output #0: loss = 0.540865 (* 1 = 0.540865 loss)
I0412 13:32:19.493494 6200 sgd_solver.cpp:105] Iteration 5820, lr = 0.00315733
I0412 13:32:24.521710 6200 solver.cpp:218] Iteration 5832 (2.3866 iter/s, 5.02808s/12 iters), loss = 0.610058
I0412 13:32:24.521770 6200 solver.cpp:237] Train net output #0: loss = 0.610058 (* 1 = 0.610058 loss)
I0412 13:32:24.521782 6200 sgd_solver.cpp:105] Iteration 5832, lr = 0.00314983
I0412 13:32:29.443854 6200 solver.cpp:218] Iteration 5844 (2.43807 iter/s, 4.92193s/12 iters), loss = 0.646589
I0412 13:32:29.443953 6200 solver.cpp:237] Train net output #0: loss = 0.646589 (* 1 = 0.646589 loss)
I0412 13:32:29.443964 6200 sgd_solver.cpp:105] Iteration 5844, lr = 0.00314235
I0412 13:32:34.482736 6200 solver.cpp:218] Iteration 5856 (2.3816 iter/s, 5.03863s/12 iters), loss = 0.44665
I0412 13:32:34.482784 6200 solver.cpp:237] Train net output #0: loss = 0.44665 (* 1 = 0.44665 loss)
I0412 13:32:34.482792 6200 sgd_solver.cpp:105] Iteration 5856, lr = 0.00313489
I0412 13:32:38.833248 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:32:39.648098 6200 solver.cpp:218] Iteration 5868 (2.32326 iter/s, 5.16516s/12 iters), loss = 0.552307
I0412 13:32:39.648141 6200 solver.cpp:237] Train net output #0: loss = 0.552307 (* 1 = 0.552307 loss)
I0412 13:32:39.648150 6200 sgd_solver.cpp:105] Iteration 5868, lr = 0.00312745
I0412 13:32:44.586799 6200 solver.cpp:218] Iteration 5880 (2.42988 iter/s, 4.93851s/12 iters), loss = 0.501762
I0412 13:32:44.586853 6200 solver.cpp:237] Train net output #0: loss = 0.501762 (* 1 = 0.501762 loss)
I0412 13:32:44.586867 6200 sgd_solver.cpp:105] Iteration 5880, lr = 0.00312002
I0412 13:32:49.486335 6200 solver.cpp:218] Iteration 5892 (2.44931 iter/s, 4.89934s/12 iters), loss = 0.466604
I0412 13:32:49.486382 6200 solver.cpp:237] Train net output #0: loss = 0.466604 (* 1 = 0.466604 loss)
I0412 13:32:49.486392 6200 sgd_solver.cpp:105] Iteration 5892, lr = 0.00311262
I0412 13:32:54.414944 6200 solver.cpp:218] Iteration 5904 (2.43486 iter/s, 4.92841s/12 iters), loss = 0.599248
I0412 13:32:54.414988 6200 solver.cpp:237] Train net output #0: loss = 0.599248 (* 1 = 0.599248 loss)
I0412 13:32:54.414996 6200 sgd_solver.cpp:105] Iteration 5904, lr = 0.00310523
I0412 13:32:58.832356 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_5916.caffemodel
I0412 13:33:00.886046 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_5916.solverstate
I0412 13:33:02.451752 6200 solver.cpp:330] Iteration 5916, Testing net (#0)
I0412 13:33:02.451781 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:33:04.842389 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:33:07.482606 6200 solver.cpp:397] Test net output #0: accuracy = 0.41973
I0412 13:33:07.482656 6200 solver.cpp:397] Test net output #1: loss = 2.8159 (* 1 = 2.8159 loss)
I0412 13:33:07.567569 6200 solver.cpp:218] Iteration 5916 (0.912394 iter/s, 13.1522s/12 iters), loss = 0.679706
I0412 13:33:07.567618 6200 solver.cpp:237] Train net output #0: loss = 0.679706 (* 1 = 0.679706 loss)
I0412 13:33:07.567629 6200 sgd_solver.cpp:105] Iteration 5916, lr = 0.00309785
I0412 13:33:11.682242 6200 solver.cpp:218] Iteration 5928 (2.91652 iter/s, 4.1145s/12 iters), loss = 0.55357
I0412 13:33:11.682288 6200 solver.cpp:237] Train net output #0: loss = 0.55357 (* 1 = 0.55357 loss)
I0412 13:33:11.682299 6200 sgd_solver.cpp:105] Iteration 5928, lr = 0.0030905
I0412 13:33:16.553436 6200 solver.cpp:218] Iteration 5940 (2.46356 iter/s, 4.871s/12 iters), loss = 0.57879
I0412 13:33:16.553486 6200 solver.cpp:237] Train net output #0: loss = 0.57879 (* 1 = 0.57879 loss)
I0412 13:33:16.553498 6200 sgd_solver.cpp:105] Iteration 5940, lr = 0.00308316
I0412 13:33:21.436775 6200 solver.cpp:218] Iteration 5952 (2.45743 iter/s, 4.88314s/12 iters), loss = 0.611226
I0412 13:33:21.436821 6200 solver.cpp:237] Train net output #0: loss = 0.611226 (* 1 = 0.611226 loss)
I0412 13:33:21.436833 6200 sgd_solver.cpp:105] Iteration 5952, lr = 0.00307584
I0412 13:33:26.431682 6200 solver.cpp:218] Iteration 5964 (2.40254 iter/s, 4.99471s/12 iters), loss = 0.39201
I0412 13:33:26.431730 6200 solver.cpp:237] Train net output #0: loss = 0.39201 (* 1 = 0.39201 loss)
I0412 13:33:26.431741 6200 sgd_solver.cpp:105] Iteration 5964, lr = 0.00306854
I0412 13:33:27.677052 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:33:31.351819 6200 solver.cpp:218] Iteration 5976 (2.43905 iter/s, 4.91994s/12 iters), loss = 0.531199
I0412 13:33:31.351894 6200 solver.cpp:237] Train net output #0: loss = 0.531199 (* 1 = 0.531199 loss)
I0412 13:33:31.351904 6200 sgd_solver.cpp:105] Iteration 5976, lr = 0.00306125
I0412 13:33:36.242936 6200 solver.cpp:218] Iteration 5988 (2.45354 iter/s, 4.89089s/12 iters), loss = 0.674719
I0412 13:33:36.242991 6200 solver.cpp:237] Train net output #0: loss = 0.674719 (* 1 = 0.674719 loss)
I0412 13:33:36.243002 6200 sgd_solver.cpp:105] Iteration 5988, lr = 0.00305398
I0412 13:33:41.136143 6200 solver.cpp:218] Iteration 6000 (2.45248 iter/s, 4.893s/12 iters), loss = 0.437853
I0412 13:33:41.136198 6200 solver.cpp:237] Train net output #0: loss = 0.437853 (* 1 = 0.437853 loss)
I0412 13:33:41.136210 6200 sgd_solver.cpp:105] Iteration 6000, lr = 0.00304673
I0412 13:33:46.035826 6200 solver.cpp:218] Iteration 6012 (2.44924 iter/s, 4.89948s/12 iters), loss = 0.392242
I0412 13:33:46.035879 6200 solver.cpp:237] Train net output #0: loss = 0.392242 (* 1 = 0.392242 loss)
I0412 13:33:46.035892 6200 sgd_solver.cpp:105] Iteration 6012, lr = 0.0030395
I0412 13:33:48.072078 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_6018.caffemodel
I0412 13:33:50.181988 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_6018.solverstate
I0412 13:33:51.758745 6200 solver.cpp:330] Iteration 6018, Testing net (#0)
I0412 13:33:51.758771 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:33:53.901911 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:33:56.267643 6200 solver.cpp:397] Test net output #0: accuracy = 0.422181
I0412 13:33:56.267683 6200 solver.cpp:397] Test net output #1: loss = 2.80319 (* 1 = 2.80319 loss)
I0412 13:33:58.182965 6200 solver.cpp:218] Iteration 6024 (0.987919 iter/s, 12.1467s/12 iters), loss = 0.526193
I0412 13:33:58.183024 6200 solver.cpp:237] Train net output #0: loss = 0.526193 (* 1 = 0.526193 loss)
I0412 13:33:58.183037 6200 sgd_solver.cpp:105] Iteration 6024, lr = 0.00303228
I0412 13:34:03.269029 6200 solver.cpp:218] Iteration 6036 (2.35949 iter/s, 5.08585s/12 iters), loss = 0.453887
I0412 13:34:03.269130 6200 solver.cpp:237] Train net output #0: loss = 0.453887 (* 1 = 0.453887 loss)
I0412 13:34:03.269141 6200 sgd_solver.cpp:105] Iteration 6036, lr = 0.00302508
I0412 13:34:08.297449 6200 solver.cpp:218] Iteration 6048 (2.38656 iter/s, 5.02817s/12 iters), loss = 0.62402
I0412 13:34:08.297504 6200 solver.cpp:237] Train net output #0: loss = 0.62402 (* 1 = 0.62402 loss)
I0412 13:34:08.297516 6200 sgd_solver.cpp:105] Iteration 6048, lr = 0.0030179
I0412 13:34:13.494758 6200 solver.cpp:218] Iteration 6060 (2.30898 iter/s, 5.1971s/12 iters), loss = 0.6825
I0412 13:34:13.494812 6200 solver.cpp:237] Train net output #0: loss = 0.6825 (* 1 = 0.6825 loss)
I0412 13:34:13.494823 6200 sgd_solver.cpp:105] Iteration 6060, lr = 0.00301074
I0412 13:34:17.193616 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:34:18.907692 6200 solver.cpp:218] Iteration 6072 (2.217 iter/s, 5.41272s/12 iters), loss = 0.478056
I0412 13:34:18.907747 6200 solver.cpp:237] Train net output #0: loss = 0.478056 (* 1 = 0.478056 loss)
I0412 13:34:18.907759 6200 sgd_solver.cpp:105] Iteration 6072, lr = 0.00300359
I0412 13:34:23.991811 6200 solver.cpp:218] Iteration 6084 (2.36039 iter/s, 5.08391s/12 iters), loss = 0.684065
I0412 13:34:23.991861 6200 solver.cpp:237] Train net output #0: loss = 0.684065 (* 1 = 0.684065 loss)
I0412 13:34:23.991874 6200 sgd_solver.cpp:105] Iteration 6084, lr = 0.00299646
I0412 13:34:29.001802 6200 solver.cpp:218] Iteration 6096 (2.39531 iter/s, 5.00978s/12 iters), loss = 0.472703
I0412 13:34:29.001857 6200 solver.cpp:237] Train net output #0: loss = 0.472703 (* 1 = 0.472703 loss)
I0412 13:34:29.001869 6200 sgd_solver.cpp:105] Iteration 6096, lr = 0.00298934
I0412 13:34:34.176664 6200 solver.cpp:218] Iteration 6108 (2.319 iter/s, 5.17465s/12 iters), loss = 0.569762
I0412 13:34:34.176759 6200 solver.cpp:237] Train net output #0: loss = 0.569762 (* 1 = 0.569762 loss)
I0412 13:34:34.176767 6200 sgd_solver.cpp:105] Iteration 6108, lr = 0.00298225
I0412 13:34:38.616053 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_6120.caffemodel
I0412 13:34:40.939826 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_6120.solverstate
I0412 13:34:43.162700 6200 solver.cpp:330] Iteration 6120, Testing net (#0)
I0412 13:34:43.162729 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:34:45.197342 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:34:47.595880 6200 solver.cpp:397] Test net output #0: accuracy = 0.442402
I0412 13:34:47.595929 6200 solver.cpp:397] Test net output #1: loss = 2.72583 (* 1 = 2.72583 loss)
I0412 13:34:47.680546 6200 solver.cpp:218] Iteration 6120 (0.888665 iter/s, 13.5034s/12 iters), loss = 0.362771
I0412 13:34:47.680598 6200 solver.cpp:237] Train net output #0: loss = 0.362771 (* 1 = 0.362771 loss)
I0412 13:34:47.680608 6200 sgd_solver.cpp:105] Iteration 6120, lr = 0.00297517
I0412 13:34:51.986532 6200 solver.cpp:218] Iteration 6132 (2.78693 iter/s, 4.30581s/12 iters), loss = 0.50115
I0412 13:34:51.986572 6200 solver.cpp:237] Train net output #0: loss = 0.50115 (* 1 = 0.50115 loss)
I0412 13:34:51.986580 6200 sgd_solver.cpp:105] Iteration 6132, lr = 0.0029681
I0412 13:34:56.929579 6200 solver.cpp:218] Iteration 6144 (2.42775 iter/s, 4.94286s/12 iters), loss = 0.49977
I0412 13:34:56.929630 6200 solver.cpp:237] Train net output #0: loss = 0.49977 (* 1 = 0.49977 loss)
I0412 13:34:56.929641 6200 sgd_solver.cpp:105] Iteration 6144, lr = 0.00296105
I0412 13:35:02.108011 6200 solver.cpp:218] Iteration 6156 (2.3174 iter/s, 5.17822s/12 iters), loss = 0.376119
I0412 13:35:02.108064 6200 solver.cpp:237] Train net output #0: loss = 0.376119 (* 1 = 0.376119 loss)
I0412 13:35:02.108076 6200 sgd_solver.cpp:105] Iteration 6156, lr = 0.00295402
I0412 13:35:07.237910 6200 solver.cpp:218] Iteration 6168 (2.33932 iter/s, 5.12969s/12 iters), loss = 0.687399
I0412 13:35:07.238039 6200 solver.cpp:237] Train net output #0: loss = 0.687399 (* 1 = 0.687399 loss)
I0412 13:35:07.238049 6200 sgd_solver.cpp:105] Iteration 6168, lr = 0.00294701
I0412 13:35:07.833817 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:35:12.310357 6200 solver.cpp:218] Iteration 6180 (2.36585 iter/s, 5.07217s/12 iters), loss = 0.701292
I0412 13:35:12.310405 6200 solver.cpp:237] Train net output #0: loss = 0.701292 (* 1 = 0.701292 loss)
I0412 13:35:12.310415 6200 sgd_solver.cpp:105] Iteration 6180, lr = 0.00294001
I0412 13:35:17.237780 6200 solver.cpp:218] Iteration 6192 (2.43545 iter/s, 4.92722s/12 iters), loss = 0.435302
I0412 13:35:17.237835 6200 solver.cpp:237] Train net output #0: loss = 0.435302 (* 1 = 0.435302 loss)
I0412 13:35:17.237848 6200 sgd_solver.cpp:105] Iteration 6192, lr = 0.00293303
I0412 13:35:22.317066 6200 solver.cpp:218] Iteration 6204 (2.36263 iter/s, 5.07908s/12 iters), loss = 0.519867
I0412 13:35:22.317113 6200 solver.cpp:237] Train net output #0: loss = 0.519867 (* 1 = 0.519867 loss)
I0412 13:35:22.317124 6200 sgd_solver.cpp:105] Iteration 6204, lr = 0.00292607
I0412 13:35:27.376570 6200 solver.cpp:218] Iteration 6216 (2.37189 iter/s, 5.05926s/12 iters), loss = 0.469665
I0412 13:35:27.376634 6200 solver.cpp:237] Train net output #0: loss = 0.469665 (* 1 = 0.469665 loss)
I0412 13:35:27.376647 6200 sgd_solver.cpp:105] Iteration 6216, lr = 0.00291912
I0412 13:35:29.435552 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_6222.caffemodel
I0412 13:35:32.203891 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_6222.solverstate
I0412 13:35:35.006954 6200 solver.cpp:330] Iteration 6222, Testing net (#0)
I0412 13:35:35.006980 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:35:36.986176 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:35:38.163902 6200 blocking_queue.cpp:49] Waiting for data
I0412 13:35:39.425292 6200 solver.cpp:397] Test net output #0: accuracy = 0.413603
I0412 13:35:39.425343 6200 solver.cpp:397] Test net output #1: loss = 2.91351 (* 1 = 2.91351 loss)
I0412 13:35:41.382407 6200 solver.cpp:218] Iteration 6228 (0.856814 iter/s, 14.0054s/12 iters), loss = 0.41408
I0412 13:35:41.382454 6200 solver.cpp:237] Train net output #0: loss = 0.41408 (* 1 = 0.41408 loss)
I0412 13:35:41.382464 6200 sgd_solver.cpp:105] Iteration 6228, lr = 0.00291219
I0412 13:35:46.392688 6200 solver.cpp:218] Iteration 6240 (2.39517 iter/s, 5.01008s/12 iters), loss = 0.622726
I0412 13:35:46.392735 6200 solver.cpp:237] Train net output #0: loss = 0.622726 (* 1 = 0.622726 loss)
I0412 13:35:46.392743 6200 sgd_solver.cpp:105] Iteration 6240, lr = 0.00290528
I0412 13:35:51.359417 6200 solver.cpp:218] Iteration 6252 (2.41617 iter/s, 4.96653s/12 iters), loss = 0.595201
I0412 13:35:51.359467 6200 solver.cpp:237] Train net output #0: loss = 0.595201 (* 1 = 0.595201 loss)
I0412 13:35:51.359478 6200 sgd_solver.cpp:105] Iteration 6252, lr = 0.00289838
I0412 13:35:56.238442 6200 solver.cpp:218] Iteration 6264 (2.45961 iter/s, 4.87882s/12 iters), loss = 0.579341
I0412 13:35:56.238494 6200 solver.cpp:237] Train net output #0: loss = 0.579341 (* 1 = 0.579341 loss)
I0412 13:35:56.238507 6200 sgd_solver.cpp:105] Iteration 6264, lr = 0.0028915
I0412 13:35:58.911437 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:36:01.218152 6200 solver.cpp:218] Iteration 6276 (2.40988 iter/s, 4.9795s/12 iters), loss = 0.412613
I0412 13:36:01.218207 6200 solver.cpp:237] Train net output #0: loss = 0.412613 (* 1 = 0.412613 loss)
I0412 13:36:01.218219 6200 sgd_solver.cpp:105] Iteration 6276, lr = 0.00288463
I0412 13:36:06.100762 6200 solver.cpp:218] Iteration 6288 (2.4578 iter/s, 4.88241s/12 iters), loss = 0.424296
I0412 13:36:06.100811 6200 solver.cpp:237] Train net output #0: loss = 0.424296 (* 1 = 0.424296 loss)
I0412 13:36:06.100822 6200 sgd_solver.cpp:105] Iteration 6288, lr = 0.00287779
I0412 13:36:11.125828 6200 solver.cpp:218] Iteration 6300 (2.38812 iter/s, 5.02486s/12 iters), loss = 0.464567
I0412 13:36:11.125972 6200 solver.cpp:237] Train net output #0: loss = 0.464567 (* 1 = 0.464567 loss)
I0412 13:36:11.125984 6200 sgd_solver.cpp:105] Iteration 6300, lr = 0.00287095
I0412 13:36:15.948523 6200 solver.cpp:218] Iteration 6312 (2.48838 iter/s, 4.82242s/12 iters), loss = 0.536297
I0412 13:36:15.948576 6200 solver.cpp:237] Train net output #0: loss = 0.536297 (* 1 = 0.536297 loss)
I0412 13:36:15.948587 6200 sgd_solver.cpp:105] Iteration 6312, lr = 0.00286414
I0412 13:36:20.845237 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_6324.caffemodel
I0412 13:36:22.943917 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_6324.solverstate
I0412 13:36:24.497855 6200 solver.cpp:330] Iteration 6324, Testing net (#0)
I0412 13:36:24.497880 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:36:26.488106 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:36:29.016258 6200 solver.cpp:397] Test net output #0: accuracy = 0.41299
I0412 13:36:29.016305 6200 solver.cpp:397] Test net output #1: loss = 2.89682 (* 1 = 2.89682 loss)
I0412 13:36:29.101243 6200 solver.cpp:218] Iteration 6324 (0.912388 iter/s, 13.1523s/12 iters), loss = 0.466227
I0412 13:36:29.101291 6200 solver.cpp:237] Train net output #0: loss = 0.466227 (* 1 = 0.466227 loss)
I0412 13:36:29.101301 6200 sgd_solver.cpp:105] Iteration 6324, lr = 0.00285734
I0412 13:36:33.329802 6200 solver.cpp:218] Iteration 6336 (2.83796 iter/s, 4.22839s/12 iters), loss = 0.498663
I0412 13:36:33.329839 6200 solver.cpp:237] Train net output #0: loss = 0.498663 (* 1 = 0.498663 loss)
I0412 13:36:33.329847 6200 sgd_solver.cpp:105] Iteration 6336, lr = 0.00285055
I0412 13:36:38.334372 6200 solver.cpp:218] Iteration 6348 (2.3979 iter/s, 5.00437s/12 iters), loss = 0.356384
I0412 13:36:38.334420 6200 solver.cpp:237] Train net output #0: loss = 0.356384 (* 1 = 0.356384 loss)
I0412 13:36:38.334429 6200 sgd_solver.cpp:105] Iteration 6348, lr = 0.00284379
I0412 13:36:43.445503 6200 solver.cpp:218] Iteration 6360 (2.34791 iter/s, 5.11093s/12 iters), loss = 0.275827
I0412 13:36:43.445574 6200 solver.cpp:237] Train net output #0: loss = 0.275827 (* 1 = 0.275827 loss)
I0412 13:36:43.445585 6200 sgd_solver.cpp:105] Iteration 6360, lr = 0.00283703
I0412 13:36:48.422567 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:36:48.558320 6200 solver.cpp:218] Iteration 6372 (2.34714 iter/s, 5.11259s/12 iters), loss = 0.241558
I0412 13:36:48.558368 6200 solver.cpp:237] Train net output #0: loss = 0.241558 (* 1 = 0.241558 loss)
I0412 13:36:48.558377 6200 sgd_solver.cpp:105] Iteration 6372, lr = 0.0028303
I0412 13:36:53.617390 6200 solver.cpp:218] Iteration 6384 (2.37207 iter/s, 5.05887s/12 iters), loss = 0.482264
I0412 13:36:53.617434 6200 solver.cpp:237] Train net output #0: loss = 0.482264 (* 1 = 0.482264 loss)
I0412 13:36:53.617444 6200 sgd_solver.cpp:105] Iteration 6384, lr = 0.00282358
I0412 13:36:58.535681 6200 solver.cpp:218] Iteration 6396 (2.43997 iter/s, 4.9181s/12 iters), loss = 0.333614
I0412 13:36:58.535720 6200 solver.cpp:237] Train net output #0: loss = 0.333614 (* 1 = 0.333614 loss)
I0412 13:36:58.535729 6200 sgd_solver.cpp:105] Iteration 6396, lr = 0.00281687
I0412 13:37:03.551808 6200 solver.cpp:218] Iteration 6408 (2.39237 iter/s, 5.01594s/12 iters), loss = 0.516104
I0412 13:37:03.551856 6200 solver.cpp:237] Train net output #0: loss = 0.516104 (* 1 = 0.516104 loss)
I0412 13:37:03.551867 6200 sgd_solver.cpp:105] Iteration 6408, lr = 0.00281019
I0412 13:37:08.585688 6200 solver.cpp:218] Iteration 6420 (2.38394 iter/s, 5.03367s/12 iters), loss = 0.550839
I0412 13:37:08.585747 6200 solver.cpp:237] Train net output #0: loss = 0.550839 (* 1 = 0.550839 loss)
I0412 13:37:08.585758 6200 sgd_solver.cpp:105] Iteration 6420, lr = 0.00280351
I0412 13:37:10.571341 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_6426.caffemodel
I0412 13:37:14.084465 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_6426.solverstate
I0412 13:37:16.639395 6200 solver.cpp:330] Iteration 6426, Testing net (#0)
I0412 13:37:16.639425 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:37:18.583711 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:37:21.108467 6200 solver.cpp:397] Test net output #0: accuracy = 0.415441
I0412 13:37:21.108502 6200 solver.cpp:397] Test net output #1: loss = 2.79266 (* 1 = 2.79266 loss)
I0412 13:37:22.959442 6200 solver.cpp:218] Iteration 6432 (0.834882 iter/s, 14.3733s/12 iters), loss = 0.395962
I0412 13:37:22.959483 6200 solver.cpp:237] Train net output #0: loss = 0.395962 (* 1 = 0.395962 loss)
I0412 13:37:22.959492 6200 sgd_solver.cpp:105] Iteration 6432, lr = 0.00279686
I0412 13:37:27.841403 6200 solver.cpp:218] Iteration 6444 (2.45813 iter/s, 4.88177s/12 iters), loss = 0.476749
I0412 13:37:27.841460 6200 solver.cpp:237] Train net output #0: loss = 0.476749 (* 1 = 0.476749 loss)
I0412 13:37:27.841473 6200 sgd_solver.cpp:105] Iteration 6444, lr = 0.00279022
I0412 13:37:32.698235 6200 solver.cpp:218] Iteration 6456 (2.47085 iter/s, 4.85663s/12 iters), loss = 0.432693
I0412 13:37:32.698285 6200 solver.cpp:237] Train net output #0: loss = 0.432693 (* 1 = 0.432693 loss)
I0412 13:37:32.698297 6200 sgd_solver.cpp:105] Iteration 6456, lr = 0.00278359
I0412 13:37:37.772065 6200 solver.cpp:218] Iteration 6468 (2.36517 iter/s, 5.07363s/12 iters), loss = 0.666103
I0412 13:37:37.772119 6200 solver.cpp:237] Train net output #0: loss = 0.666103 (* 1 = 0.666103 loss)
I0412 13:37:37.772130 6200 sgd_solver.cpp:105] Iteration 6468, lr = 0.00277698
I0412 13:37:39.673898 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:37:42.713245 6200 solver.cpp:218] Iteration 6480 (2.42867 iter/s, 4.94098s/12 iters), loss = 0.33772
I0412 13:37:42.713301 6200 solver.cpp:237] Train net output #0: loss = 0.33772 (* 1 = 0.33772 loss)
I0412 13:37:42.713312 6200 sgd_solver.cpp:105] Iteration 6480, lr = 0.00277039
I0412 13:37:47.756630 6200 solver.cpp:218] Iteration 6492 (2.37946 iter/s, 5.04317s/12 iters), loss = 0.372162
I0412 13:37:47.756716 6200 solver.cpp:237] Train net output #0: loss = 0.372162 (* 1 = 0.372162 loss)
I0412 13:37:47.756727 6200 sgd_solver.cpp:105] Iteration 6492, lr = 0.00276381
I0412 13:37:52.617347 6200 solver.cpp:218] Iteration 6504 (2.46889 iter/s, 4.86049s/12 iters), loss = 0.257392
I0412 13:37:52.617398 6200 solver.cpp:237] Train net output #0: loss = 0.257392 (* 1 = 0.257392 loss)
I0412 13:37:52.617410 6200 sgd_solver.cpp:105] Iteration 6504, lr = 0.00275725
I0412 13:37:57.469441 6200 solver.cpp:218] Iteration 6516 (2.47326 iter/s, 4.8519s/12 iters), loss = 0.488492
I0412 13:37:57.469491 6200 solver.cpp:237] Train net output #0: loss = 0.488492 (* 1 = 0.488492 loss)
I0412 13:37:57.469503 6200 sgd_solver.cpp:105] Iteration 6516, lr = 0.00275071
I0412 13:38:01.980429 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_6528.caffemodel
I0412 13:38:05.803671 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_6528.solverstate
I0412 13:38:07.956662 6200 solver.cpp:330] Iteration 6528, Testing net (#0)
I0412 13:38:07.956692 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:38:09.891146 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:38:12.486959 6200 solver.cpp:397] Test net output #0: accuracy = 0.435049
I0412 13:38:12.487008 6200 solver.cpp:397] Test net output #1: loss = 2.78967 (* 1 = 2.78967 loss)
I0412 13:38:12.572284 6200 solver.cpp:218] Iteration 6528 (0.794577 iter/s, 15.1024s/12 iters), loss = 0.253056
I0412 13:38:12.572356 6200 solver.cpp:237] Train net output #0: loss = 0.253056 (* 1 = 0.253056 loss)
I0412 13:38:12.572373 6200 sgd_solver.cpp:105] Iteration 6528, lr = 0.00274418
I0412 13:38:16.764701 6200 solver.cpp:218] Iteration 6540 (2.86244 iter/s, 4.19223s/12 iters), loss = 0.532399
I0412 13:38:16.764760 6200 solver.cpp:237] Train net output #0: loss = 0.532399 (* 1 = 0.532399 loss)
I0412 13:38:16.764771 6200 sgd_solver.cpp:105] Iteration 6540, lr = 0.00273766
I0412 13:38:21.691598 6200 solver.cpp:218] Iteration 6552 (2.43571 iter/s, 4.9267s/12 iters), loss = 0.433366
I0412 13:38:21.691741 6200 solver.cpp:237] Train net output #0: loss = 0.433366 (* 1 = 0.433366 loss)
I0412 13:38:21.691752 6200 sgd_solver.cpp:105] Iteration 6552, lr = 0.00273116
I0412 13:38:26.589447 6200 solver.cpp:218] Iteration 6564 (2.4502 iter/s, 4.89757s/12 iters), loss = 0.336811
I0412 13:38:26.589504 6200 solver.cpp:237] Train net output #0: loss = 0.336811 (* 1 = 0.336811 loss)
I0412 13:38:26.589517 6200 sgd_solver.cpp:105] Iteration 6564, lr = 0.00272468
I0412 13:38:30.917033 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:38:31.692055 6200 solver.cpp:218] Iteration 6576 (2.35183 iter/s, 5.1024s/12 iters), loss = 0.398455
I0412 13:38:31.692098 6200 solver.cpp:237] Train net output #0: loss = 0.398455 (* 1 = 0.398455 loss)
I0412 13:38:31.692107 6200 sgd_solver.cpp:105] Iteration 6576, lr = 0.00271821
I0412 13:38:36.850387 6200 solver.cpp:218] Iteration 6588 (2.32642 iter/s, 5.15814s/12 iters), loss = 0.407941
I0412 13:38:36.850436 6200 solver.cpp:237] Train net output #0: loss = 0.407941 (* 1 = 0.407941 loss)
I0412 13:38:36.850445 6200 sgd_solver.cpp:105] Iteration 6588, lr = 0.00271175
I0412 13:38:42.100492 6200 solver.cpp:218] Iteration 6600 (2.28576 iter/s, 5.2499s/12 iters), loss = 0.427848
I0412 13:38:42.100539 6200 solver.cpp:237] Train net output #0: loss = 0.427848 (* 1 = 0.427848 loss)
I0412 13:38:42.100548 6200 sgd_solver.cpp:105] Iteration 6600, lr = 0.00270532
I0412 13:38:47.223748 6200 solver.cpp:218] Iteration 6612 (2.34235 iter/s, 5.12307s/12 iters), loss = 0.273452
I0412 13:38:47.223790 6200 solver.cpp:237] Train net output #0: loss = 0.273452 (* 1 = 0.273452 loss)
I0412 13:38:47.223799 6200 sgd_solver.cpp:105] Iteration 6612, lr = 0.00269889
I0412 13:38:52.300158 6200 solver.cpp:218] Iteration 6624 (2.36397 iter/s, 5.07622s/12 iters), loss = 0.508191
I0412 13:38:52.300251 6200 solver.cpp:237] Train net output #0: loss = 0.508191 (* 1 = 0.508191 loss)
I0412 13:38:52.300261 6200 sgd_solver.cpp:105] Iteration 6624, lr = 0.00269248
I0412 13:38:54.355192 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_6630.caffemodel
I0412 13:39:00.460357 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_6630.solverstate
I0412 13:39:04.026819 6200 solver.cpp:330] Iteration 6630, Testing net (#0)
I0412 13:39:04.026846 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:39:05.839602 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:39:08.492182 6200 solver.cpp:397] Test net output #0: accuracy = 0.431985
I0412 13:39:08.492230 6200 solver.cpp:397] Test net output #1: loss = 2.84087 (* 1 = 2.84087 loss)
I0412 13:39:10.336975 6200 solver.cpp:218] Iteration 6636 (0.665327 iter/s, 18.0362s/12 iters), loss = 0.488665
I0412 13:39:10.337049 6200 solver.cpp:237] Train net output #0: loss = 0.488665 (* 1 = 0.488665 loss)
I0412 13:39:10.337065 6200 sgd_solver.cpp:105] Iteration 6636, lr = 0.00268609
I0412 13:39:15.376099 6200 solver.cpp:218] Iteration 6648 (2.38146 iter/s, 5.03892s/12 iters), loss = 0.391102
I0412 13:39:15.376143 6200 solver.cpp:237] Train net output #0: loss = 0.391102 (* 1 = 0.391102 loss)
I0412 13:39:15.376152 6200 sgd_solver.cpp:105] Iteration 6648, lr = 0.00267971
I0412 13:39:20.506351 6200 solver.cpp:218] Iteration 6660 (2.33915 iter/s, 5.13007s/12 iters), loss = 0.561377
I0412 13:39:20.506392 6200 solver.cpp:237] Train net output #0: loss = 0.561377 (* 1 = 0.561377 loss)
I0412 13:39:20.506400 6200 sgd_solver.cpp:105] Iteration 6660, lr = 0.00267335
I0412 13:39:25.432407 6200 solver.cpp:218] Iteration 6672 (2.43612 iter/s, 4.92587s/12 iters), loss = 0.347879
I0412 13:39:25.432552 6200 solver.cpp:237] Train net output #0: loss = 0.347879 (* 1 = 0.347879 loss)
I0412 13:39:25.432564 6200 sgd_solver.cpp:105] Iteration 6672, lr = 0.00266701
I0412 13:39:26.792860 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:39:30.347090 6200 solver.cpp:218] Iteration 6684 (2.4418 iter/s, 4.9144s/12 iters), loss = 0.307613
I0412 13:39:30.347139 6200 solver.cpp:237] Train net output #0: loss = 0.307613 (* 1 = 0.307613 loss)
I0412 13:39:30.347152 6200 sgd_solver.cpp:105] Iteration 6684, lr = 0.00266067
I0412 13:39:35.424762 6200 solver.cpp:218] Iteration 6696 (2.36338 iter/s, 5.07748s/12 iters), loss = 0.363632
I0412 13:39:35.424811 6200 solver.cpp:237] Train net output #0: loss = 0.363632 (* 1 = 0.363632 loss)
I0412 13:39:35.424823 6200 sgd_solver.cpp:105] Iteration 6696, lr = 0.00265436
I0412 13:39:40.579880 6200 solver.cpp:218] Iteration 6708 (2.32787 iter/s, 5.15492s/12 iters), loss = 0.445656
I0412 13:39:40.579936 6200 solver.cpp:237] Train net output #0: loss = 0.445656 (* 1 = 0.445656 loss)
I0412 13:39:40.579948 6200 sgd_solver.cpp:105] Iteration 6708, lr = 0.00264805
I0412 13:39:45.700763 6200 solver.cpp:218] Iteration 6720 (2.34344 iter/s, 5.12068s/12 iters), loss = 0.335665
I0412 13:39:45.700817 6200 solver.cpp:237] Train net output #0: loss = 0.335665 (* 1 = 0.335665 loss)
I0412 13:39:45.700830 6200 sgd_solver.cpp:105] Iteration 6720, lr = 0.00264177
I0412 13:39:50.207459 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_6732.caffemodel
I0412 13:39:52.259590 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_6732.solverstate
I0412 13:40:07.499433 6200 solver.cpp:330] Iteration 6732, Testing net (#0)
I0412 13:40:07.499519 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:40:09.286564 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:40:11.930723 6200 solver.cpp:397] Test net output #0: accuracy = 0.442402
I0412 13:40:11.930771 6200 solver.cpp:397] Test net output #1: loss = 2.75519 (* 1 = 2.75519 loss)
I0412 13:40:12.016147 6200 solver.cpp:218] Iteration 6732 (0.45602 iter/s, 26.3146s/12 iters), loss = 0.351211
I0412 13:40:12.016193 6200 solver.cpp:237] Train net output #0: loss = 0.351211 (* 1 = 0.351211 loss)
I0412 13:40:12.016206 6200 sgd_solver.cpp:105] Iteration 6732, lr = 0.0026355
I0412 13:40:16.147814 6200 solver.cpp:218] Iteration 6744 (2.90452 iter/s, 4.1315s/12 iters), loss = 0.281198
I0412 13:40:16.147874 6200 solver.cpp:237] Train net output #0: loss = 0.281198 (* 1 = 0.281198 loss)
I0412 13:40:16.147889 6200 sgd_solver.cpp:105] Iteration 6744, lr = 0.00262924
I0412 13:40:21.239392 6200 solver.cpp:218] Iteration 6756 (2.35693 iter/s, 5.09137s/12 iters), loss = 0.310396
I0412 13:40:21.239452 6200 solver.cpp:237] Train net output #0: loss = 0.310396 (* 1 = 0.310396 loss)
I0412 13:40:21.239467 6200 sgd_solver.cpp:105] Iteration 6756, lr = 0.002623
I0412 13:40:26.599596 6200 solver.cpp:218] Iteration 6768 (2.23881 iter/s, 5.35999s/12 iters), loss = 0.411068
I0412 13:40:26.599640 6200 solver.cpp:237] Train net output #0: loss = 0.411068 (* 1 = 0.411068 loss)
I0412 13:40:26.599650 6200 sgd_solver.cpp:105] Iteration 6768, lr = 0.00261677
I0412 13:40:30.062091 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:40:31.545826 6200 solver.cpp:218] Iteration 6780 (2.42618 iter/s, 4.94604s/12 iters), loss = 0.374629
I0412 13:40:31.545881 6200 solver.cpp:237] Train net output #0: loss = 0.374629 (* 1 = 0.374629 loss)
I0412 13:40:31.545893 6200 sgd_solver.cpp:105] Iteration 6780, lr = 0.00261056
I0412 13:40:36.458994 6200 solver.cpp:218] Iteration 6792 (2.44251 iter/s, 4.91297s/12 iters), loss = 0.389929
I0412 13:40:36.459045 6200 solver.cpp:237] Train net output #0: loss = 0.389929 (* 1 = 0.389929 loss)
I0412 13:40:36.459056 6200 sgd_solver.cpp:105] Iteration 6792, lr = 0.00260436
I0412 13:40:41.359596 6200 solver.cpp:218] Iteration 6804 (2.44878 iter/s, 4.90041s/12 iters), loss = 0.329397
I0412 13:40:41.359764 6200 solver.cpp:237] Train net output #0: loss = 0.329397 (* 1 = 0.329397 loss)
I0412 13:40:41.359778 6200 sgd_solver.cpp:105] Iteration 6804, lr = 0.00259817
I0412 13:40:46.209923 6200 solver.cpp:218] Iteration 6816 (2.47422 iter/s, 4.85002s/12 iters), loss = 0.3137
I0412 13:40:46.209985 6200 solver.cpp:237] Train net output #0: loss = 0.3137 (* 1 = 0.3137 loss)
I0412 13:40:46.209997 6200 sgd_solver.cpp:105] Iteration 6816, lr = 0.00259201
I0412 13:40:51.185000 6200 solver.cpp:218] Iteration 6828 (2.41212 iter/s, 4.97487s/12 iters), loss = 0.356181
I0412 13:40:51.185048 6200 solver.cpp:237] Train net output #0: loss = 0.356181 (* 1 = 0.356181 loss)
I0412 13:40:51.185058 6200 sgd_solver.cpp:105] Iteration 6828, lr = 0.00258585
I0412 13:40:53.244485 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_6834.caffemodel
I0412 13:40:55.364297 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_6834.solverstate
I0412 13:40:59.147413 6200 solver.cpp:330] Iteration 6834, Testing net (#0)
I0412 13:40:59.147429 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:41:00.872344 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:41:03.593842 6200 solver.cpp:397] Test net output #0: accuracy = 0.441789
I0412 13:41:03.593892 6200 solver.cpp:397] Test net output #1: loss = 2.81608 (* 1 = 2.81608 loss)
I0412 13:41:05.430948 6200 solver.cpp:218] Iteration 6840 (0.842371 iter/s, 14.2455s/12 iters), loss = 0.277379
I0412 13:41:05.430999 6200 solver.cpp:237] Train net output #0: loss = 0.277379 (* 1 = 0.277379 loss)
I0412 13:41:05.431010 6200 sgd_solver.cpp:105] Iteration 6840, lr = 0.00257971
I0412 13:41:10.480049 6200 solver.cpp:218] Iteration 6852 (2.37675 iter/s, 5.0489s/12 iters), loss = 0.244675
I0412 13:41:10.480110 6200 solver.cpp:237] Train net output #0: loss = 0.244675 (* 1 = 0.244675 loss)
I0412 13:41:10.480124 6200 sgd_solver.cpp:105] Iteration 6852, lr = 0.00257359
I0412 13:41:15.622882 6200 solver.cpp:218] Iteration 6864 (2.33344 iter/s, 5.14263s/12 iters), loss = 0.347973
I0412 13:41:15.622979 6200 solver.cpp:237] Train net output #0: loss = 0.347973 (* 1 = 0.347973 loss)
I0412 13:41:15.622988 6200 sgd_solver.cpp:105] Iteration 6864, lr = 0.00256748
I0412 13:41:20.678181 6200 solver.cpp:218] Iteration 6876 (2.37386 iter/s, 5.05505s/12 iters), loss = 0.389372
I0412 13:41:20.678225 6200 solver.cpp:237] Train net output #0: loss = 0.389372 (* 1 = 0.389372 loss)
I0412 13:41:20.678232 6200 sgd_solver.cpp:105] Iteration 6876, lr = 0.00256138
I0412 13:41:21.289321 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:41:25.590576 6200 solver.cpp:218] Iteration 6888 (2.44289 iter/s, 4.91221s/12 iters), loss = 0.300391
I0412 13:41:25.590616 6200 solver.cpp:237] Train net output #0: loss = 0.300391 (* 1 = 0.300391 loss)
I0412 13:41:25.590626 6200 sgd_solver.cpp:105] Iteration 6888, lr = 0.0025553
I0412 13:41:30.527148 6200 solver.cpp:218] Iteration 6900 (2.43093 iter/s, 4.93639s/12 iters), loss = 0.215269
I0412 13:41:30.527186 6200 solver.cpp:237] Train net output #0: loss = 0.215269 (* 1 = 0.215269 loss)
I0412 13:41:30.527196 6200 sgd_solver.cpp:105] Iteration 6900, lr = 0.00254923
I0412 13:41:35.660920 6200 solver.cpp:218] Iteration 6912 (2.33755 iter/s, 5.13358s/12 iters), loss = 0.249273
I0412 13:41:35.660974 6200 solver.cpp:237] Train net output #0: loss = 0.249273 (* 1 = 0.249273 loss)
I0412 13:41:35.660986 6200 sgd_solver.cpp:105] Iteration 6912, lr = 0.00254318
I0412 13:41:40.575515 6200 solver.cpp:218] Iteration 6924 (2.4418 iter/s, 4.9144s/12 iters), loss = 0.312291
I0412 13:41:40.575557 6200 solver.cpp:237] Train net output #0: loss = 0.312291 (* 1 = 0.312291 loss)
I0412 13:41:40.575567 6200 sgd_solver.cpp:105] Iteration 6924, lr = 0.00253714
I0412 13:41:45.043056 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_6936.caffemodel
I0412 13:41:47.028121 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_6936.solverstate
I0412 13:41:48.588457 6200 solver.cpp:330] Iteration 6936, Testing net (#0)
I0412 13:41:48.588493 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:41:49.073585 6200 blocking_queue.cpp:49] Waiting for data
I0412 13:41:50.226667 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:41:52.985313 6200 solver.cpp:397] Test net output #0: accuracy = 0.441789
I0412 13:41:52.985359 6200 solver.cpp:397] Test net output #1: loss = 2.78702 (* 1 = 2.78702 loss)
I0412 13:41:53.070785 6200 solver.cpp:218] Iteration 6936 (0.960393 iter/s, 12.4949s/12 iters), loss = 0.298272
I0412 13:41:53.070832 6200 solver.cpp:237] Train net output #0: loss = 0.298272 (* 1 = 0.298272 loss)
I0412 13:41:53.070842 6200 sgd_solver.cpp:105] Iteration 6936, lr = 0.00253112
I0412 13:41:57.255455 6200 solver.cpp:218] Iteration 6948 (2.86773 iter/s, 4.1845s/12 iters), loss = 0.320722
I0412 13:41:57.255511 6200 solver.cpp:237] Train net output #0: loss = 0.320722 (* 1 = 0.320722 loss)
I0412 13:41:57.255522 6200 sgd_solver.cpp:105] Iteration 6948, lr = 0.00252511
I0412 13:42:02.134471 6200 solver.cpp:218] Iteration 6960 (2.45962 iter/s, 4.87881s/12 iters), loss = 0.386698
I0412 13:42:02.134521 6200 solver.cpp:237] Train net output #0: loss = 0.386698 (* 1 = 0.386698 loss)
I0412 13:42:02.134534 6200 sgd_solver.cpp:105] Iteration 6960, lr = 0.00251911
I0412 13:42:07.025025 6200 solver.cpp:218] Iteration 6972 (2.45381 iter/s, 4.89036s/12 iters), loss = 0.328164
I0412 13:42:07.025076 6200 solver.cpp:237] Train net output #0: loss = 0.328164 (* 1 = 0.328164 loss)
I0412 13:42:07.025089 6200 sgd_solver.cpp:105] Iteration 6972, lr = 0.00251313
I0412 13:42:09.676502 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:42:12.018944 6200 solver.cpp:218] Iteration 6984 (2.40302 iter/s, 4.99372s/12 iters), loss = 0.34574
I0412 13:42:12.018987 6200 solver.cpp:237] Train net output #0: loss = 0.34574 (* 1 = 0.34574 loss)
I0412 13:42:12.018997 6200 sgd_solver.cpp:105] Iteration 6984, lr = 0.00250717
I0412 13:42:17.079146 6200 solver.cpp:218] Iteration 6996 (2.37154 iter/s, 5.06001s/12 iters), loss = 0.501575
I0412 13:42:17.079236 6200 solver.cpp:237] Train net output #0: loss = 0.501575 (* 1 = 0.501575 loss)
I0412 13:42:17.079244 6200 sgd_solver.cpp:105] Iteration 6996, lr = 0.00250121
I0412 13:42:21.972802 6200 solver.cpp:218] Iteration 7008 (2.45227 iter/s, 4.89343s/12 iters), loss = 0.404926
I0412 13:42:21.972846 6200 solver.cpp:237] Train net output #0: loss = 0.404926 (* 1 = 0.404926 loss)
I0412 13:42:21.972856 6200 sgd_solver.cpp:105] Iteration 7008, lr = 0.00249528
I0412 13:42:26.837697 6200 solver.cpp:218] Iteration 7020 (2.46675 iter/s, 4.86471s/12 iters), loss = 0.258778
I0412 13:42:26.837749 6200 solver.cpp:237] Train net output #0: loss = 0.258778 (* 1 = 0.258778 loss)
I0412 13:42:26.837760 6200 sgd_solver.cpp:105] Iteration 7020, lr = 0.00248935
I0412 13:42:31.827167 6200 solver.cpp:218] Iteration 7032 (2.40516 iter/s, 4.98927s/12 iters), loss = 0.233829
I0412 13:42:31.827219 6200 solver.cpp:237] Train net output #0: loss = 0.233829 (* 1 = 0.233829 loss)
I0412 13:42:31.827230 6200 sgd_solver.cpp:105] Iteration 7032, lr = 0.00248344
I0412 13:42:34.230073 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_7038.caffemodel
I0412 13:42:36.682456 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_7038.solverstate
I0412 13:42:40.966955 6200 solver.cpp:330] Iteration 7038, Testing net (#0)
I0412 13:42:40.966982 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:42:42.760049 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:42:45.635396 6200 solver.cpp:397] Test net output #0: accuracy = 0.452819
I0412 13:42:45.635427 6200 solver.cpp:397] Test net output #1: loss = 2.7847 (* 1 = 2.7847 loss)
I0412 13:42:47.456343 6200 solver.cpp:218] Iteration 7044 (0.767819 iter/s, 15.6287s/12 iters), loss = 0.295463
I0412 13:42:47.456547 6200 solver.cpp:237] Train net output #0: loss = 0.295463 (* 1 = 0.295463 loss)
I0412 13:42:47.456565 6200 sgd_solver.cpp:105] Iteration 7044, lr = 0.00247755
I0412 13:42:52.446571 6200 solver.cpp:218] Iteration 7056 (2.40486 iter/s, 4.98989s/12 iters), loss = 0.184745
I0412 13:42:52.446615 6200 solver.cpp:237] Train net output #0: loss = 0.184745 (* 1 = 0.184745 loss)
I0412 13:42:52.446625 6200 sgd_solver.cpp:105] Iteration 7056, lr = 0.00247166
I0412 13:42:57.383023 6200 solver.cpp:218] Iteration 7068 (2.43099 iter/s, 4.93626s/12 iters), loss = 0.390388
I0412 13:42:57.383078 6200 solver.cpp:237] Train net output #0: loss = 0.390388 (* 1 = 0.390388 loss)
I0412 13:42:57.383090 6200 sgd_solver.cpp:105] Iteration 7068, lr = 0.0024658
I0412 13:43:02.431241 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:43:02.535046 6200 solver.cpp:218] Iteration 7080 (2.32927 iter/s, 5.15182s/12 iters), loss = 0.353781
I0412 13:43:02.535097 6200 solver.cpp:237] Train net output #0: loss = 0.353781 (* 1 = 0.353781 loss)
I0412 13:43:02.535110 6200 sgd_solver.cpp:105] Iteration 7080, lr = 0.00245994
I0412 13:43:07.542201 6200 solver.cpp:218] Iteration 7092 (2.39666 iter/s, 5.00696s/12 iters), loss = 0.204016
I0412 13:43:07.542245 6200 solver.cpp:237] Train net output #0: loss = 0.204016 (* 1 = 0.204016 loss)
I0412 13:43:07.542256 6200 sgd_solver.cpp:105] Iteration 7092, lr = 0.0024541
I0412 13:43:12.440438 6200 solver.cpp:218] Iteration 7104 (2.44995 iter/s, 4.89805s/12 iters), loss = 0.265404
I0412 13:43:12.440491 6200 solver.cpp:237] Train net output #0: loss = 0.265404 (* 1 = 0.265404 loss)
I0412 13:43:12.440502 6200 sgd_solver.cpp:105] Iteration 7104, lr = 0.00244827
I0412 13:43:17.301645 6200 solver.cpp:218] Iteration 7116 (2.46862 iter/s, 4.86102s/12 iters), loss = 0.400173
I0412 13:43:17.301685 6200 solver.cpp:237] Train net output #0: loss = 0.400173 (* 1 = 0.400173 loss)
I0412 13:43:17.301695 6200 sgd_solver.cpp:105] Iteration 7116, lr = 0.00244246
I0412 13:43:22.137151 6200 solver.cpp:218] Iteration 7128 (2.48174 iter/s, 4.83532s/12 iters), loss = 0.319583
I0412 13:43:22.137260 6200 solver.cpp:237] Train net output #0: loss = 0.319583 (* 1 = 0.319583 loss)
I0412 13:43:22.137272 6200 sgd_solver.cpp:105] Iteration 7128, lr = 0.00243666
I0412 13:43:26.698815 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_7140.caffemodel
I0412 13:43:30.180436 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_7140.solverstate
I0412 13:43:33.728343 6200 solver.cpp:330] Iteration 7140, Testing net (#0)
I0412 13:43:33.728361 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:43:35.357626 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:43:38.205119 6200 solver.cpp:397] Test net output #0: accuracy = 0.458946
I0412 13:43:38.205149 6200 solver.cpp:397] Test net output #1: loss = 2.7832 (* 1 = 2.7832 loss)
I0412 13:43:38.290413 6200 solver.cpp:218] Iteration 7140 (0.742909 iter/s, 16.1527s/12 iters), loss = 0.30809
I0412 13:43:38.290472 6200 solver.cpp:237] Train net output #0: loss = 0.30809 (* 1 = 0.30809 loss)
I0412 13:43:38.290485 6200 sgd_solver.cpp:105] Iteration 7140, lr = 0.00243088
I0412 13:43:42.407320 6200 solver.cpp:218] Iteration 7152 (2.91494 iter/s, 4.11672s/12 iters), loss = 0.171298
I0412 13:43:42.407363 6200 solver.cpp:237] Train net output #0: loss = 0.171298 (* 1 = 0.171298 loss)
I0412 13:43:42.407371 6200 sgd_solver.cpp:105] Iteration 7152, lr = 0.00242511
I0412 13:43:47.461414 6200 solver.cpp:218] Iteration 7164 (2.3744 iter/s, 5.0539s/12 iters), loss = 0.20227
I0412 13:43:47.461469 6200 solver.cpp:237] Train net output #0: loss = 0.20227 (* 1 = 0.20227 loss)
I0412 13:43:47.461483 6200 sgd_solver.cpp:105] Iteration 7164, lr = 0.00241935
I0412 13:43:52.489045 6200 solver.cpp:218] Iteration 7176 (2.38691 iter/s, 5.02743s/12 iters), loss = 0.187555
I0412 13:43:52.489209 6200 solver.cpp:237] Train net output #0: loss = 0.187555 (* 1 = 0.187555 loss)
I0412 13:43:52.489224 6200 sgd_solver.cpp:105] Iteration 7176, lr = 0.0024136
I0412 13:43:54.561067 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:43:57.637560 6200 solver.cpp:218] Iteration 7188 (2.33091 iter/s, 5.14821s/12 iters), loss = 0.321615
I0412 13:43:57.637603 6200 solver.cpp:237] Train net output #0: loss = 0.321615 (* 1 = 0.321615 loss)
I0412 13:43:57.637611 6200 sgd_solver.cpp:105] Iteration 7188, lr = 0.00240787
I0412 13:44:02.527902 6200 solver.cpp:218] Iteration 7200 (2.45391 iter/s, 4.89015s/12 iters), loss = 0.210114
I0412 13:44:02.527945 6200 solver.cpp:237] Train net output #0: loss = 0.210114 (* 1 = 0.210114 loss)
I0412 13:44:02.527954 6200 sgd_solver.cpp:105] Iteration 7200, lr = 0.00240216
I0412 13:44:07.533170 6200 solver.cpp:218] Iteration 7212 (2.39757 iter/s, 5.00508s/12 iters), loss = 0.323355
I0412 13:44:07.533226 6200 solver.cpp:237] Train net output #0: loss = 0.323355 (* 1 = 0.323355 loss)
I0412 13:44:07.533237 6200 sgd_solver.cpp:105] Iteration 7212, lr = 0.00239645
I0412 13:44:12.478404 6200 solver.cpp:218] Iteration 7224 (2.42668 iter/s, 4.94504s/12 iters), loss = 0.181167
I0412 13:44:12.478458 6200 solver.cpp:237] Train net output #0: loss = 0.181167 (* 1 = 0.181167 loss)
I0412 13:44:12.478469 6200 sgd_solver.cpp:105] Iteration 7224, lr = 0.00239076
I0412 13:44:17.414755 6200 solver.cpp:218] Iteration 7236 (2.43104 iter/s, 4.93615s/12 iters), loss = 0.372568
I0412 13:44:17.414801 6200 solver.cpp:237] Train net output #0: loss = 0.372568 (* 1 = 0.372568 loss)
I0412 13:44:17.414813 6200 sgd_solver.cpp:105] Iteration 7236, lr = 0.00238509
I0412 13:44:19.462580 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_7242.caffemodel
I0412 13:44:23.998561 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_7242.solverstate
I0412 13:44:26.212899 6200 solver.cpp:330] Iteration 7242, Testing net (#0)
I0412 13:44:26.212929 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:44:27.833192 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:44:30.664038 6200 solver.cpp:397] Test net output #0: accuracy = 0.45098
I0412 13:44:30.664088 6200 solver.cpp:397] Test net output #1: loss = 2.79291 (* 1 = 2.79291 loss)
I0412 13:44:32.379467 6200 solver.cpp:218] Iteration 7248 (0.801911 iter/s, 14.9643s/12 iters), loss = 0.252138
I0412 13:44:32.379521 6200 solver.cpp:237] Train net output #0: loss = 0.252138 (* 1 = 0.252138 loss)
I0412 13:44:32.379534 6200 sgd_solver.cpp:105] Iteration 7248, lr = 0.00237942
I0412 13:44:37.485065 6200 solver.cpp:218] Iteration 7260 (2.35045 iter/s, 5.1054s/12 iters), loss = 0.165393
I0412 13:44:37.485110 6200 solver.cpp:237] Train net output #0: loss = 0.165393 (* 1 = 0.165393 loss)
I0412 13:44:37.485121 6200 sgd_solver.cpp:105] Iteration 7260, lr = 0.00237378
I0412 13:44:42.425141 6200 solver.cpp:218] Iteration 7272 (2.42921 iter/s, 4.93988s/12 iters), loss = 0.272774
I0412 13:44:42.425185 6200 solver.cpp:237] Train net output #0: loss = 0.272774 (* 1 = 0.272774 loss)
I0412 13:44:42.425194 6200 sgd_solver.cpp:105] Iteration 7272, lr = 0.00236814
I0412 13:44:46.743980 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:44:47.472733 6200 solver.cpp:218] Iteration 7284 (2.37746 iter/s, 5.0474s/12 iters), loss = 0.248227
I0412 13:44:47.472781 6200 solver.cpp:237] Train net output #0: loss = 0.248227 (* 1 = 0.248227 loss)
I0412 13:44:47.472793 6200 sgd_solver.cpp:105] Iteration 7284, lr = 0.00236252
I0412 13:44:52.419030 6200 solver.cpp:218] Iteration 7296 (2.42615 iter/s, 4.9461s/12 iters), loss = 0.195935
I0412 13:44:52.419081 6200 solver.cpp:237] Train net output #0: loss = 0.195935 (* 1 = 0.195935 loss)
I0412 13:44:52.419092 6200 sgd_solver.cpp:105] Iteration 7296, lr = 0.00235691
I0412 13:44:57.331250 6200 solver.cpp:218] Iteration 7308 (2.44298 iter/s, 4.91203s/12 iters), loss = 0.152834
I0412 13:44:57.331352 6200 solver.cpp:237] Train net output #0: loss = 0.152834 (* 1 = 0.152834 loss)
I0412 13:44:57.331362 6200 sgd_solver.cpp:105] Iteration 7308, lr = 0.00235131
I0412 13:45:02.197088 6200 solver.cpp:218] Iteration 7320 (2.4663 iter/s, 4.86559s/12 iters), loss = 0.49126
I0412 13:45:02.197142 6200 solver.cpp:237] Train net output #0: loss = 0.49126 (* 1 = 0.49126 loss)
I0412 13:45:02.197154 6200 sgd_solver.cpp:105] Iteration 7320, lr = 0.00234573
I0412 13:45:07.170513 6200 solver.cpp:218] Iteration 7332 (2.41292 iter/s, 4.97322s/12 iters), loss = 0.262614
I0412 13:45:07.170564 6200 solver.cpp:237] Train net output #0: loss = 0.262614 (* 1 = 0.262614 loss)
I0412 13:45:07.170576 6200 sgd_solver.cpp:105] Iteration 7332, lr = 0.00234016
I0412 13:45:11.644296 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_7344.caffemodel
I0412 13:45:17.769529 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_7344.solverstate
I0412 13:45:20.620314 6200 solver.cpp:330] Iteration 7344, Testing net (#0)
I0412 13:45:20.620342 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:45:22.204943 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:45:25.145289 6200 solver.cpp:397] Test net output #0: accuracy = 0.460784
I0412 13:45:25.145330 6200 solver.cpp:397] Test net output #1: loss = 2.78633 (* 1 = 2.78633 loss)
I0412 13:45:25.230650 6200 solver.cpp:218] Iteration 7344 (0.664467 iter/s, 18.0596s/12 iters), loss = 0.202888
I0412 13:45:25.230695 6200 solver.cpp:237] Train net output #0: loss = 0.202888 (* 1 = 0.202888 loss)
I0412 13:45:25.230705 6200 sgd_solver.cpp:105] Iteration 7344, lr = 0.0023346
I0412 13:45:29.365650 6200 solver.cpp:218] Iteration 7356 (2.90218 iter/s, 4.13483s/12 iters), loss = 0.356991
I0412 13:45:29.365790 6200 solver.cpp:237] Train net output #0: loss = 0.356991 (* 1 = 0.356991 loss)
I0412 13:45:29.365805 6200 sgd_solver.cpp:105] Iteration 7356, lr = 0.00232906
I0412 13:45:34.215829 6200 solver.cpp:218] Iteration 7368 (2.47428 iter/s, 4.8499s/12 iters), loss = 0.346195
I0412 13:45:34.215883 6200 solver.cpp:237] Train net output #0: loss = 0.346195 (* 1 = 0.346195 loss)
I0412 13:45:34.215893 6200 sgd_solver.cpp:105] Iteration 7368, lr = 0.00232353
I0412 13:45:39.124325 6200 solver.cpp:218] Iteration 7380 (2.44484 iter/s, 4.9083s/12 iters), loss = 0.180677
I0412 13:45:39.124372 6200 solver.cpp:237] Train net output #0: loss = 0.180677 (* 1 = 0.180677 loss)
I0412 13:45:39.124382 6200 sgd_solver.cpp:105] Iteration 7380, lr = 0.00231802
I0412 13:45:40.552371 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:45:44.372905 6200 solver.cpp:218] Iteration 7392 (2.28642 iter/s, 5.24838s/12 iters), loss = 0.289082
I0412 13:45:44.372948 6200 solver.cpp:237] Train net output #0: loss = 0.289082 (* 1 = 0.289082 loss)
I0412 13:45:44.372957 6200 sgd_solver.cpp:105] Iteration 7392, lr = 0.00231251
I0412 13:45:49.259479 6200 solver.cpp:218] Iteration 7404 (2.45581 iter/s, 4.88638s/12 iters), loss = 0.238782
I0412 13:45:49.259541 6200 solver.cpp:237] Train net output #0: loss = 0.238782 (* 1 = 0.238782 loss)
I0412 13:45:49.259552 6200 sgd_solver.cpp:105] Iteration 7404, lr = 0.00230702
I0412 13:45:54.392112 6200 solver.cpp:218] Iteration 7416 (2.33808 iter/s, 5.13242s/12 iters), loss = 0.252154
I0412 13:45:54.392168 6200 solver.cpp:237] Train net output #0: loss = 0.252154 (* 1 = 0.252154 loss)
I0412 13:45:54.392181 6200 sgd_solver.cpp:105] Iteration 7416, lr = 0.00230154
I0412 13:45:59.450090 6200 solver.cpp:218] Iteration 7428 (2.37258 iter/s, 5.05778s/12 iters), loss = 0.26709
I0412 13:45:59.450250 6200 solver.cpp:237] Train net output #0: loss = 0.26709 (* 1 = 0.26709 loss)
I0412 13:45:59.450265 6200 sgd_solver.cpp:105] Iteration 7428, lr = 0.00229608
I0412 13:46:04.509652 6200 solver.cpp:218] Iteration 7440 (2.37189 iter/s, 5.05926s/12 iters), loss = 0.170898
I0412 13:46:04.509693 6200 solver.cpp:237] Train net output #0: loss = 0.170898 (* 1 = 0.170898 loss)
I0412 13:46:04.509701 6200 sgd_solver.cpp:105] Iteration 7440, lr = 0.00229063
I0412 13:46:06.515853 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_7446.caffemodel
I0412 13:46:08.557647 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_7446.solverstate
I0412 13:46:10.619027 6200 solver.cpp:330] Iteration 7446, Testing net (#0)
I0412 13:46:10.619055 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:46:12.141824 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:46:15.106379 6200 solver.cpp:397] Test net output #0: accuracy = 0.457721
I0412 13:46:15.106431 6200 solver.cpp:397] Test net output #1: loss = 2.85658 (* 1 = 2.85658 loss)
I0412 13:46:17.070935 6200 solver.cpp:218] Iteration 7452 (0.955346 iter/s, 12.5609s/12 iters), loss = 0.137094
I0412 13:46:17.070983 6200 solver.cpp:237] Train net output #0: loss = 0.137094 (* 1 = 0.137094 loss)
I0412 13:46:17.070992 6200 sgd_solver.cpp:105] Iteration 7452, lr = 0.00228519
I0412 13:46:21.986207 6200 solver.cpp:218] Iteration 7464 (2.44147 iter/s, 4.91508s/12 iters), loss = 0.295747
I0412 13:46:21.986248 6200 solver.cpp:237] Train net output #0: loss = 0.295747 (* 1 = 0.295747 loss)
I0412 13:46:21.986258 6200 sgd_solver.cpp:105] Iteration 7464, lr = 0.00227976
I0412 13:46:26.894215 6200 solver.cpp:218] Iteration 7476 (2.44508 iter/s, 4.90782s/12 iters), loss = 0.230527
I0412 13:46:26.894260 6200 solver.cpp:237] Train net output #0: loss = 0.230527 (* 1 = 0.230527 loss)
I0412 13:46:26.894269 6200 sgd_solver.cpp:105] Iteration 7476, lr = 0.00227435
I0412 13:46:30.364069 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:46:31.795274 6200 solver.cpp:218] Iteration 7488 (2.44855 iter/s, 4.90087s/12 iters), loss = 0.209752
I0412 13:46:31.795318 6200 solver.cpp:237] Train net output #0: loss = 0.209752 (* 1 = 0.209752 loss)
I0412 13:46:31.795327 6200 sgd_solver.cpp:105] Iteration 7488, lr = 0.00226895
I0412 13:46:36.677467 6200 solver.cpp:218] Iteration 7500 (2.45801 iter/s, 4.882s/12 iters), loss = 0.276743
I0412 13:46:36.677525 6200 solver.cpp:237] Train net output #0: loss = 0.276743 (* 1 = 0.276743 loss)
I0412 13:46:36.677537 6200 sgd_solver.cpp:105] Iteration 7500, lr = 0.00226357
I0412 13:46:41.581094 6200 solver.cpp:218] Iteration 7512 (2.44727 iter/s, 4.90342s/12 iters), loss = 0.212113
I0412 13:46:41.581148 6200 solver.cpp:237] Train net output #0: loss = 0.212113 (* 1 = 0.212113 loss)
I0412 13:46:41.581161 6200 sgd_solver.cpp:105] Iteration 7512, lr = 0.00225819
I0412 13:46:46.537869 6200 solver.cpp:218] Iteration 7524 (2.42102 iter/s, 4.95658s/12 iters), loss = 0.321674
I0412 13:46:46.537912 6200 solver.cpp:237] Train net output #0: loss = 0.321674 (* 1 = 0.321674 loss)
I0412 13:46:46.537921 6200 sgd_solver.cpp:105] Iteration 7524, lr = 0.00225283
I0412 13:46:51.463158 6200 solver.cpp:218] Iteration 7536 (2.4365 iter/s, 4.9251s/12 iters), loss = 0.173347
I0412 13:46:51.463220 6200 solver.cpp:237] Train net output #0: loss = 0.173347 (* 1 = 0.173347 loss)
I0412 13:46:51.463234 6200 sgd_solver.cpp:105] Iteration 7536, lr = 0.00224748
I0412 13:46:55.937994 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_7548.caffemodel
I0412 13:46:57.968709 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_7548.solverstate
I0412 13:46:59.569470 6200 solver.cpp:330] Iteration 7548, Testing net (#0)
I0412 13:46:59.569494 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:47:01.059659 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:47:04.046296 6200 solver.cpp:397] Test net output #0: accuracy = 0.455882
I0412 13:47:04.046345 6200 solver.cpp:397] Test net output #1: loss = 3.01104 (* 1 = 3.01104 loss)
I0412 13:47:04.131989 6200 solver.cpp:218] Iteration 7548 (0.947237 iter/s, 12.6684s/12 iters), loss = 0.225271
I0412 13:47:04.132051 6200 solver.cpp:237] Train net output #0: loss = 0.225271 (* 1 = 0.225271 loss)
I0412 13:47:04.132064 6200 sgd_solver.cpp:105] Iteration 7548, lr = 0.00224215
I0412 13:47:08.260788 6200 solver.cpp:218] Iteration 7560 (2.90654 iter/s, 4.12861s/12 iters), loss = 0.146552
I0412 13:47:08.260844 6200 solver.cpp:237] Train net output #0: loss = 0.146552 (* 1 = 0.146552 loss)
I0412 13:47:08.260857 6200 sgd_solver.cpp:105] Iteration 7560, lr = 0.00223682
I0412 13:47:13.323251 6200 solver.cpp:218] Iteration 7572 (2.37048 iter/s, 5.06226s/12 iters), loss = 0.595146
I0412 13:47:13.323295 6200 solver.cpp:237] Train net output #0: loss = 0.595146 (* 1 = 0.595146 loss)
I0412 13:47:13.323303 6200 sgd_solver.cpp:105] Iteration 7572, lr = 0.00223151
I0412 13:47:18.275306 6200 solver.cpp:218] Iteration 7584 (2.42333 iter/s, 4.95187s/12 iters), loss = 0.0995237
I0412 13:47:18.275346 6200 solver.cpp:237] Train net output #0: loss = 0.0995237 (* 1 = 0.0995237 loss)
I0412 13:47:18.275357 6200 sgd_solver.cpp:105] Iteration 7584, lr = 0.00222621
I0412 13:47:18.927212 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:47:23.174170 6200 solver.cpp:218] Iteration 7596 (2.44964 iter/s, 4.89868s/12 iters), loss = 0.301989
I0412 13:47:23.174212 6200 solver.cpp:237] Train net output #0: loss = 0.301989 (* 1 = 0.301989 loss)
I0412 13:47:23.174221 6200 sgd_solver.cpp:105] Iteration 7596, lr = 0.00222093
I0412 13:47:28.107381 6200 solver.cpp:218] Iteration 7608 (2.43259 iter/s, 4.93302s/12 iters), loss = 0.322013
I0412 13:47:28.107429 6200 solver.cpp:237] Train net output #0: loss = 0.322013 (* 1 = 0.322013 loss)
I0412 13:47:28.107437 6200 sgd_solver.cpp:105] Iteration 7608, lr = 0.00221565
I0412 13:47:33.030733 6200 solver.cpp:218] Iteration 7620 (2.43746 iter/s, 4.92316s/12 iters), loss = 0.221556
I0412 13:47:33.030853 6200 solver.cpp:237] Train net output #0: loss = 0.221556 (* 1 = 0.221556 loss)
I0412 13:47:33.030862 6200 sgd_solver.cpp:105] Iteration 7620, lr = 0.00221039
I0412 13:47:34.954882 6200 blocking_queue.cpp:49] Waiting for data
I0412 13:47:37.900907 6200 solver.cpp:218] Iteration 7632 (2.46411 iter/s, 4.86991s/12 iters), loss = 0.362159
I0412 13:47:37.900955 6200 solver.cpp:237] Train net output #0: loss = 0.362159 (* 1 = 0.362159 loss)
I0412 13:47:37.900964 6200 sgd_solver.cpp:105] Iteration 7632, lr = 0.00220515
I0412 13:47:42.777283 6200 solver.cpp:218] Iteration 7644 (2.46094 iter/s, 4.87618s/12 iters), loss = 0.220559
I0412 13:47:42.777334 6200 solver.cpp:237] Train net output #0: loss = 0.220559 (* 1 = 0.220559 loss)
I0412 13:47:42.777346 6200 sgd_solver.cpp:105] Iteration 7644, lr = 0.00219991
I0412 13:47:44.791615 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_7650.caffemodel
I0412 13:47:49.036793 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_7650.solverstate
I0412 13:47:50.601284 6200 solver.cpp:330] Iteration 7650, Testing net (#0)
I0412 13:47:50.601315 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:47:52.020043 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:47:55.010114 6200 solver.cpp:397] Test net output #0: accuracy = 0.460172
I0412 13:47:55.010162 6200 solver.cpp:397] Test net output #1: loss = 2.8786 (* 1 = 2.8786 loss)
I0412 13:47:56.751101 6200 solver.cpp:218] Iteration 7656 (0.858776 iter/s, 13.9734s/12 iters), loss = 0.231538
I0412 13:47:56.751150 6200 solver.cpp:237] Train net output #0: loss = 0.231538 (* 1 = 0.231538 loss)
I0412 13:47:56.751160 6200 sgd_solver.cpp:105] Iteration 7656, lr = 0.00219469
I0412 13:48:01.694589 6200 solver.cpp:218] Iteration 7668 (2.42753 iter/s, 4.94329s/12 iters), loss = 0.158646
I0412 13:48:01.694631 6200 solver.cpp:237] Train net output #0: loss = 0.158646 (* 1 = 0.158646 loss)
I0412 13:48:01.694640 6200 sgd_solver.cpp:105] Iteration 7668, lr = 0.00218948
I0412 13:48:07.043576 6200 solver.cpp:218] Iteration 7680 (2.2435 iter/s, 5.34879s/12 iters), loss = 0.154352
I0412 13:48:07.043709 6200 solver.cpp:237] Train net output #0: loss = 0.154352 (* 1 = 0.154352 loss)
I0412 13:48:07.043720 6200 sgd_solver.cpp:105] Iteration 7680, lr = 0.00218428
I0412 13:48:09.794864 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:48:11.940908 6200 solver.cpp:218] Iteration 7692 (2.45045 iter/s, 4.89705s/12 iters), loss = 0.218889
I0412 13:48:11.940960 6200 solver.cpp:237] Train net output #0: loss = 0.218889 (* 1 = 0.218889 loss)
I0412 13:48:11.940971 6200 sgd_solver.cpp:105] Iteration 7692, lr = 0.00217909
I0412 13:48:16.851734 6200 solver.cpp:218] Iteration 7704 (2.44368 iter/s, 4.91063s/12 iters), loss = 0.179278
I0412 13:48:16.851781 6200 solver.cpp:237] Train net output #0: loss = 0.179278 (* 1 = 0.179278 loss)
I0412 13:48:16.851792 6200 sgd_solver.cpp:105] Iteration 7704, lr = 0.00217392
I0412 13:48:21.771425 6200 solver.cpp:218] Iteration 7716 (2.43927 iter/s, 4.9195s/12 iters), loss = 0.27066
I0412 13:48:21.771478 6200 solver.cpp:237] Train net output #0: loss = 0.27066 (* 1 = 0.27066 loss)
I0412 13:48:21.771490 6200 sgd_solver.cpp:105] Iteration 7716, lr = 0.00216876
I0412 13:48:26.763336 6200 solver.cpp:218] Iteration 7728 (2.40398 iter/s, 4.99172s/12 iters), loss = 0.197488
I0412 13:48:26.763382 6200 solver.cpp:237] Train net output #0: loss = 0.197488 (* 1 = 0.197488 loss)
I0412 13:48:26.763391 6200 sgd_solver.cpp:105] Iteration 7728, lr = 0.00216361
I0412 13:48:31.779901 6200 solver.cpp:218] Iteration 7740 (2.39217 iter/s, 5.01637s/12 iters), loss = 0.152927
I0412 13:48:31.779953 6200 solver.cpp:237] Train net output #0: loss = 0.152927 (* 1 = 0.152927 loss)
I0412 13:48:31.779965 6200 sgd_solver.cpp:105] Iteration 7740, lr = 0.00215847
I0412 13:48:36.616200 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_7752.caffemodel
I0412 13:48:39.863111 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_7752.solverstate
I0412 13:48:41.819679 6200 solver.cpp:330] Iteration 7752, Testing net (#0)
I0412 13:48:41.819702 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:48:43.264609 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:48:46.289572 6200 solver.cpp:397] Test net output #0: accuracy = 0.469363
I0412 13:48:46.289615 6200 solver.cpp:397] Test net output #1: loss = 2.88272 (* 1 = 2.88272 loss)
I0412 13:48:46.375313 6200 solver.cpp:218] Iteration 7752 (0.822202 iter/s, 14.595s/12 iters), loss = 0.141767
I0412 13:48:46.375358 6200 solver.cpp:237] Train net output #0: loss = 0.141767 (* 1 = 0.141767 loss)
I0412 13:48:46.375367 6200 sgd_solver.cpp:105] Iteration 7752, lr = 0.00215335
I0412 13:48:50.447930 6200 solver.cpp:218] Iteration 7764 (2.94663 iter/s, 4.07244s/12 iters), loss = 0.109339
I0412 13:48:50.447980 6200 solver.cpp:237] Train net output #0: loss = 0.109339 (* 1 = 0.109339 loss)
I0412 13:48:50.447993 6200 sgd_solver.cpp:105] Iteration 7764, lr = 0.00214823
I0412 13:48:55.372450 6200 solver.cpp:218] Iteration 7776 (2.43688 iter/s, 4.92433s/12 iters), loss = 0.203422
I0412 13:48:55.372489 6200 solver.cpp:237] Train net output #0: loss = 0.203422 (* 1 = 0.203422 loss)
I0412 13:48:55.372499 6200 sgd_solver.cpp:105] Iteration 7776, lr = 0.00214313
I0412 13:49:00.249049 6200 solver.cpp:218] Iteration 7788 (2.46082 iter/s, 4.87641s/12 iters), loss = 0.26134
I0412 13:49:00.249106 6200 solver.cpp:237] Train net output #0: loss = 0.26134 (* 1 = 0.26134 loss)
I0412 13:49:00.249119 6200 sgd_solver.cpp:105] Iteration 7788, lr = 0.00213805
I0412 13:49:00.257138 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:49:05.178732 6200 solver.cpp:218] Iteration 7800 (2.43433 iter/s, 4.92948s/12 iters), loss = 0.282169
I0412 13:49:05.178778 6200 solver.cpp:237] Train net output #0: loss = 0.282169 (* 1 = 0.282169 loss)
I0412 13:49:05.178788 6200 sgd_solver.cpp:105] Iteration 7800, lr = 0.00213297
I0412 13:49:10.100069 6200 solver.cpp:218] Iteration 7812 (2.43846 iter/s, 4.92115s/12 iters), loss = 0.22183
I0412 13:49:10.100201 6200 solver.cpp:237] Train net output #0: loss = 0.22183 (* 1 = 0.22183 loss)
I0412 13:49:10.100211 6200 sgd_solver.cpp:105] Iteration 7812, lr = 0.00212791
I0412 13:49:15.275988 6200 solver.cpp:218] Iteration 7824 (2.31855 iter/s, 5.17564s/12 iters), loss = 0.155193
I0412 13:49:15.276034 6200 solver.cpp:237] Train net output #0: loss = 0.155193 (* 1 = 0.155193 loss)
I0412 13:49:15.276043 6200 sgd_solver.cpp:105] Iteration 7824, lr = 0.00212285
I0412 13:49:20.375213 6200 solver.cpp:218] Iteration 7836 (2.35339 iter/s, 5.09903s/12 iters), loss = 0.114667
I0412 13:49:20.375258 6200 solver.cpp:237] Train net output #0: loss = 0.114667 (* 1 = 0.114667 loss)
I0412 13:49:20.375267 6200 sgd_solver.cpp:105] Iteration 7836, lr = 0.00211781
I0412 13:49:25.465987 6200 solver.cpp:218] Iteration 7848 (2.3573 iter/s, 5.09056s/12 iters), loss = 0.243968
I0412 13:49:25.466033 6200 solver.cpp:237] Train net output #0: loss = 0.243968 (* 1 = 0.243968 loss)
I0412 13:49:25.466042 6200 sgd_solver.cpp:105] Iteration 7848, lr = 0.00211279
I0412 13:49:27.539240 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_7854.caffemodel
I0412 13:49:30.070315 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_7854.solverstate
I0412 13:49:33.790369 6200 solver.cpp:330] Iteration 7854, Testing net (#0)
I0412 13:49:33.790396 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:49:35.208036 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:49:38.285625 6200 solver.cpp:397] Test net output #0: accuracy = 0.463235
I0412 13:49:38.285655 6200 solver.cpp:397] Test net output #1: loss = 2.80629 (* 1 = 2.80629 loss)
I0412 13:49:40.051362 6200 solver.cpp:218] Iteration 7860 (0.822767 iter/s, 14.5849s/12 iters), loss = 0.183912
I0412 13:49:40.051409 6200 solver.cpp:237] Train net output #0: loss = 0.183912 (* 1 = 0.183912 loss)
I0412 13:49:40.051419 6200 sgd_solver.cpp:105] Iteration 7860, lr = 0.00210777
I0412 13:49:44.979513 6200 solver.cpp:218] Iteration 7872 (2.43509 iter/s, 4.92796s/12 iters), loss = 0.108757
I0412 13:49:44.979610 6200 solver.cpp:237] Train net output #0: loss = 0.108757 (* 1 = 0.108757 loss)
I0412 13:49:44.979620 6200 sgd_solver.cpp:105] Iteration 7872, lr = 0.00210277
I0412 13:49:50.025887 6200 solver.cpp:218] Iteration 7884 (2.37806 iter/s, 5.04613s/12 iters), loss = 0.234304
I0412 13:49:50.025930 6200 solver.cpp:237] Train net output #0: loss = 0.234304 (* 1 = 0.234304 loss)
I0412 13:49:50.025940 6200 sgd_solver.cpp:105] Iteration 7884, lr = 0.00209777
I0412 13:49:52.117276 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:49:54.983017 6200 solver.cpp:218] Iteration 7896 (2.42085 iter/s, 4.95694s/12 iters), loss = 0.211987
I0412 13:49:54.983060 6200 solver.cpp:237] Train net output #0: loss = 0.211987 (* 1 = 0.211987 loss)
I0412 13:49:54.983069 6200 sgd_solver.cpp:105] Iteration 7896, lr = 0.00209279
I0412 13:49:59.918545 6200 solver.cpp:218] Iteration 7908 (2.43144 iter/s, 4.93534s/12 iters), loss = 0.212199
I0412 13:49:59.918587 6200 solver.cpp:237] Train net output #0: loss = 0.212199 (* 1 = 0.212199 loss)
I0412 13:49:59.918597 6200 sgd_solver.cpp:105] Iteration 7908, lr = 0.00208782
I0412 13:50:04.816906 6200 solver.cpp:218] Iteration 7920 (2.44989 iter/s, 4.89818s/12 iters), loss = 0.083055
I0412 13:50:04.816951 6200 solver.cpp:237] Train net output #0: loss = 0.0830551 (* 1 = 0.0830551 loss)
I0412 13:50:04.816960 6200 sgd_solver.cpp:105] Iteration 7920, lr = 0.00208287
I0412 13:50:09.783187 6200 solver.cpp:218] Iteration 7932 (2.41639 iter/s, 4.96609s/12 iters), loss = 0.0977808
I0412 13:50:09.783242 6200 solver.cpp:237] Train net output #0: loss = 0.0977809 (* 1 = 0.0977809 loss)
I0412 13:50:09.783255 6200 sgd_solver.cpp:105] Iteration 7932, lr = 0.00207792
I0412 13:50:14.853849 6200 solver.cpp:218] Iteration 7944 (2.36665 iter/s, 5.07046s/12 iters), loss = 0.166565
I0412 13:50:14.853899 6200 solver.cpp:237] Train net output #0: loss = 0.166565 (* 1 = 0.166565 loss)
I0412 13:50:14.853909 6200 sgd_solver.cpp:105] Iteration 7944, lr = 0.00207299
I0412 13:50:19.327525 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_7956.caffemodel
I0412 13:50:21.375170 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_7956.solverstate
I0412 13:50:22.944919 6200 solver.cpp:330] Iteration 7956, Testing net (#0)
I0412 13:50:22.944945 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:50:24.294879 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:50:27.454900 6200 solver.cpp:397] Test net output #0: accuracy = 0.463235
I0412 13:50:27.454927 6200 solver.cpp:397] Test net output #1: loss = 2.87532 (* 1 = 2.87532 loss)
I0412 13:50:27.540417 6200 solver.cpp:218] Iteration 7956 (0.945912 iter/s, 12.6862s/12 iters), loss = 0.167017
I0412 13:50:27.540460 6200 solver.cpp:237] Train net output #0: loss = 0.167018 (* 1 = 0.167018 loss)
I0412 13:50:27.540470 6200 sgd_solver.cpp:105] Iteration 7956, lr = 0.00206807
I0412 13:50:31.882354 6200 solver.cpp:218] Iteration 7968 (2.76385 iter/s, 4.34176s/12 iters), loss = 0.181759
I0412 13:50:31.882401 6200 solver.cpp:237] Train net output #0: loss = 0.18176 (* 1 = 0.18176 loss)
I0412 13:50:31.882412 6200 sgd_solver.cpp:105] Iteration 7968, lr = 0.00206316
I0412 13:50:36.876062 6200 solver.cpp:218] Iteration 7980 (2.40312 iter/s, 4.99351s/12 iters), loss = 0.239589
I0412 13:50:36.876109 6200 solver.cpp:237] Train net output #0: loss = 0.239589 (* 1 = 0.239589 loss)
I0412 13:50:36.876119 6200 sgd_solver.cpp:105] Iteration 7980, lr = 0.00205826
I0412 13:50:41.159198 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:50:41.857234 6200 solver.cpp:218] Iteration 7992 (2.40917 iter/s, 4.98098s/12 iters), loss = 0.280082
I0412 13:50:41.857283 6200 solver.cpp:237] Train net output #0: loss = 0.280083 (* 1 = 0.280083 loss)
I0412 13:50:41.857293 6200 sgd_solver.cpp:105] Iteration 7992, lr = 0.00205337
I0412 13:50:46.803778 6200 solver.cpp:218] Iteration 8004 (2.42603 iter/s, 4.94635s/12 iters), loss = 0.124869
I0412 13:50:46.803828 6200 solver.cpp:237] Train net output #0: loss = 0.124869 (* 1 = 0.124869 loss)
I0412 13:50:46.803839 6200 sgd_solver.cpp:105] Iteration 8004, lr = 0.0020485
I0412 13:50:51.861415 6200 solver.cpp:218] Iteration 8016 (2.37274 iter/s, 5.05744s/12 iters), loss = 0.146786
I0412 13:50:51.861536 6200 solver.cpp:237] Train net output #0: loss = 0.146786 (* 1 = 0.146786 loss)
I0412 13:50:51.861548 6200 sgd_solver.cpp:105] Iteration 8016, lr = 0.00204363
I0412 13:50:56.939658 6200 solver.cpp:218] Iteration 8028 (2.36314 iter/s, 5.07798s/12 iters), loss = 0.257699
I0412 13:50:56.939703 6200 solver.cpp:237] Train net output #0: loss = 0.257699 (* 1 = 0.257699 loss)
I0412 13:50:56.939713 6200 sgd_solver.cpp:105] Iteration 8028, lr = 0.00203878
I0412 13:51:02.167912 6200 solver.cpp:218] Iteration 8040 (2.29531 iter/s, 5.22806s/12 iters), loss = 0.148337
I0412 13:51:02.167965 6200 solver.cpp:237] Train net output #0: loss = 0.148337 (* 1 = 0.148337 loss)
I0412 13:51:02.167979 6200 sgd_solver.cpp:105] Iteration 8040, lr = 0.00203394
I0412 13:51:07.075484 6200 solver.cpp:218] Iteration 8052 (2.4453 iter/s, 4.90737s/12 iters), loss = 0.22309
I0412 13:51:07.075538 6200 solver.cpp:237] Train net output #0: loss = 0.22309 (* 1 = 0.22309 loss)
I0412 13:51:07.075552 6200 sgd_solver.cpp:105] Iteration 8052, lr = 0.00202911
I0412 13:51:09.166867 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_8058.caffemodel
I0412 13:51:14.785934 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_8058.solverstate
I0412 13:51:20.151784 6200 solver.cpp:330] Iteration 8058, Testing net (#0)
I0412 13:51:20.151806 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:51:21.418644 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:51:24.564640 6200 solver.cpp:397] Test net output #0: accuracy = 0.467524
I0412 13:51:24.564803 6200 solver.cpp:397] Test net output #1: loss = 2.86942 (* 1 = 2.86942 loss)
I0412 13:51:26.527019 6200 solver.cpp:218] Iteration 8064 (0.616937 iter/s, 19.4509s/12 iters), loss = 0.202118
I0412 13:51:26.527062 6200 solver.cpp:237] Train net output #0: loss = 0.202118 (* 1 = 0.202118 loss)
I0412 13:51:26.527073 6200 sgd_solver.cpp:105] Iteration 8064, lr = 0.00202429
I0412 13:51:31.391510 6200 solver.cpp:218] Iteration 8076 (2.46695 iter/s, 4.8643s/12 iters), loss = 0.187215
I0412 13:51:31.391567 6200 solver.cpp:237] Train net output #0: loss = 0.187215 (* 1 = 0.187215 loss)
I0412 13:51:31.391579 6200 sgd_solver.cpp:105] Iteration 8076, lr = 0.00201949
I0412 13:51:36.415496 6200 solver.cpp:218] Iteration 8088 (2.38864 iter/s, 5.02378s/12 iters), loss = 0.179767
I0412 13:51:36.415549 6200 solver.cpp:237] Train net output #0: loss = 0.179767 (* 1 = 0.179767 loss)
I0412 13:51:36.415563 6200 sgd_solver.cpp:105] Iteration 8088, lr = 0.00201469
I0412 13:51:37.805426 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:51:41.313630 6200 solver.cpp:218] Iteration 8100 (2.45001 iter/s, 4.89794s/12 iters), loss = 0.211689
I0412 13:51:41.313679 6200 solver.cpp:237] Train net output #0: loss = 0.211689 (* 1 = 0.211689 loss)
I0412 13:51:41.313690 6200 sgd_solver.cpp:105] Iteration 8100, lr = 0.00200991
I0412 13:51:46.478773 6200 solver.cpp:218] Iteration 8112 (2.32336 iter/s, 5.16494s/12 iters), loss = 0.136899
I0412 13:51:46.478822 6200 solver.cpp:237] Train net output #0: loss = 0.136899 (* 1 = 0.136899 loss)
I0412 13:51:46.478833 6200 sgd_solver.cpp:105] Iteration 8112, lr = 0.00200514
I0412 13:51:51.396334 6200 solver.cpp:218] Iteration 8124 (2.44033 iter/s, 4.91736s/12 iters), loss = 0.0668092
I0412 13:51:51.396386 6200 solver.cpp:237] Train net output #0: loss = 0.0668093 (* 1 = 0.0668093 loss)
I0412 13:51:51.396397 6200 sgd_solver.cpp:105] Iteration 8124, lr = 0.00200038
I0412 13:51:56.249559 6200 solver.cpp:218] Iteration 8136 (2.47268 iter/s, 4.85303s/12 iters), loss = 0.262812
I0412 13:51:56.249680 6200 solver.cpp:237] Train net output #0: loss = 0.262812 (* 1 = 0.262812 loss)
I0412 13:51:56.249691 6200 sgd_solver.cpp:105] Iteration 8136, lr = 0.00199563
I0412 13:52:01.091861 6200 solver.cpp:218] Iteration 8148 (2.47829 iter/s, 4.84204s/12 iters), loss = 0.109504
I0412 13:52:01.091909 6200 solver.cpp:237] Train net output #0: loss = 0.109504 (* 1 = 0.109504 loss)
I0412 13:52:01.091920 6200 sgd_solver.cpp:105] Iteration 8148, lr = 0.00199089
I0412 13:52:05.698915 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_8160.caffemodel
I0412 13:52:07.938417 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_8160.solverstate
I0412 13:52:14.747248 6200 solver.cpp:330] Iteration 8160, Testing net (#0)
I0412 13:52:14.747272 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:52:15.958256 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:52:19.142263 6200 solver.cpp:397] Test net output #0: accuracy = 0.463235
I0412 13:52:19.142298 6200 solver.cpp:397] Test net output #1: loss = 2.91474 (* 1 = 2.91474 loss)
I0412 13:52:19.227720 6200 solver.cpp:218] Iteration 8160 (0.661693 iter/s, 18.1353s/12 iters), loss = 0.16154
I0412 13:52:19.227778 6200 solver.cpp:237] Train net output #0: loss = 0.16154 (* 1 = 0.16154 loss)
I0412 13:52:19.227788 6200 sgd_solver.cpp:105] Iteration 8160, lr = 0.00198616
I0412 13:52:23.365703 6200 solver.cpp:218] Iteration 8172 (2.9001 iter/s, 4.13779s/12 iters), loss = 0.153866
I0412 13:52:23.365754 6200 solver.cpp:237] Train net output #0: loss = 0.153866 (* 1 = 0.153866 loss)
I0412 13:52:23.365763 6200 sgd_solver.cpp:105] Iteration 8172, lr = 0.00198145
I0412 13:52:28.236863 6200 solver.cpp:218] Iteration 8184 (2.46358 iter/s, 4.87096s/12 iters), loss = 0.15105
I0412 13:52:28.236990 6200 solver.cpp:237] Train net output #0: loss = 0.15105 (* 1 = 0.15105 loss)
I0412 13:52:28.237001 6200 sgd_solver.cpp:105] Iteration 8184, lr = 0.00197674
I0412 13:52:31.693434 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:52:33.104884 6200 solver.cpp:218] Iteration 8196 (2.4652 iter/s, 4.86775s/12 iters), loss = 0.095241
I0412 13:52:33.104934 6200 solver.cpp:237] Train net output #0: loss = 0.0952411 (* 1 = 0.0952411 loss)
I0412 13:52:33.104946 6200 sgd_solver.cpp:105] Iteration 8196, lr = 0.00197205
I0412 13:52:38.186154 6200 solver.cpp:218] Iteration 8208 (2.36171 iter/s, 5.08107s/12 iters), loss = 0.144465
I0412 13:52:38.186206 6200 solver.cpp:237] Train net output #0: loss = 0.144465 (* 1 = 0.144465 loss)
I0412 13:52:38.186218 6200 sgd_solver.cpp:105] Iteration 8208, lr = 0.00196737
I0412 13:52:43.131162 6200 solver.cpp:218] Iteration 8220 (2.42679 iter/s, 4.94481s/12 iters), loss = 0.189257
I0412 13:52:43.131214 6200 solver.cpp:237] Train net output #0: loss = 0.189257 (* 1 = 0.189257 loss)
I0412 13:52:43.131225 6200 sgd_solver.cpp:105] Iteration 8220, lr = 0.0019627
I0412 13:52:48.014926 6200 solver.cpp:218] Iteration 8232 (2.45722 iter/s, 4.88357s/12 iters), loss = 0.0903612
I0412 13:52:48.014964 6200 solver.cpp:237] Train net output #0: loss = 0.0903613 (* 1 = 0.0903613 loss)
I0412 13:52:48.014972 6200 sgd_solver.cpp:105] Iteration 8232, lr = 0.00195804
I0412 13:52:53.327337 6200 solver.cpp:218] Iteration 8244 (2.25894 iter/s, 5.31222s/12 iters), loss = 0.220528
I0412 13:52:53.327383 6200 solver.cpp:237] Train net output #0: loss = 0.220528 (* 1 = 0.220528 loss)
I0412 13:52:53.327392 6200 sgd_solver.cpp:105] Iteration 8244, lr = 0.00195339
I0412 13:52:58.315213 6200 solver.cpp:218] Iteration 8256 (2.40593 iter/s, 4.98768s/12 iters), loss = 0.154314
I0412 13:52:58.315335 6200 solver.cpp:237] Train net output #0: loss = 0.154314 (* 1 = 0.154314 loss)
I0412 13:52:58.315346 6200 sgd_solver.cpp:105] Iteration 8256, lr = 0.00194875
I0412 13:53:00.364753 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_8262.caffemodel
I0412 13:53:02.728911 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_8262.solverstate
I0412 13:53:04.294126 6200 solver.cpp:330] Iteration 8262, Testing net (#0)
I0412 13:53:04.294147 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:53:05.463229 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:53:08.682098 6200 solver.cpp:397] Test net output #0: accuracy = 0.461397
I0412 13:53:08.682126 6200 solver.cpp:397] Test net output #1: loss = 2.92563 (* 1 = 2.92563 loss)
I0412 13:53:10.517055 6200 solver.cpp:218] Iteration 8268 (0.983495 iter/s, 12.2014s/12 iters), loss = 0.154013
I0412 13:53:10.517103 6200 solver.cpp:237] Train net output #0: loss = 0.154013 (* 1 = 0.154013 loss)
I0412 13:53:10.517113 6200 sgd_solver.cpp:105] Iteration 8268, lr = 0.00194412
I0412 13:53:15.627595 6200 solver.cpp:218] Iteration 8280 (2.34818 iter/s, 5.11034s/12 iters), loss = 0.178495
I0412 13:53:15.627640 6200 solver.cpp:237] Train net output #0: loss = 0.178495 (* 1 = 0.178495 loss)
I0412 13:53:15.627650 6200 sgd_solver.cpp:105] Iteration 8280, lr = 0.00193951
I0412 13:53:20.615996 6200 solver.cpp:218] Iteration 8292 (2.40567 iter/s, 4.98821s/12 iters), loss = 0.116818
I0412 13:53:20.616051 6200 solver.cpp:237] Train net output #0: loss = 0.116818 (* 1 = 0.116818 loss)
I0412 13:53:20.616065 6200 sgd_solver.cpp:105] Iteration 8292, lr = 0.0019349
I0412 13:53:21.303737 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:53:25.497807 6200 solver.cpp:218] Iteration 8304 (2.45821 iter/s, 4.88161s/12 iters), loss = 0.16976
I0412 13:53:25.497859 6200 solver.cpp:237] Train net output #0: loss = 0.169761 (* 1 = 0.169761 loss)
I0412 13:53:25.497870 6200 sgd_solver.cpp:105] Iteration 8304, lr = 0.00193031
I0412 13:53:28.072377 6200 blocking_queue.cpp:49] Waiting for data
I0412 13:53:30.749372 6200 solver.cpp:218] Iteration 8316 (2.28512 iter/s, 5.25136s/12 iters), loss = 0.197541
I0412 13:53:30.749481 6200 solver.cpp:237] Train net output #0: loss = 0.197541 (* 1 = 0.197541 loss)
I0412 13:53:30.749491 6200 sgd_solver.cpp:105] Iteration 8316, lr = 0.00192573
I0412 13:53:35.679189 6200 solver.cpp:218] Iteration 8328 (2.43429 iter/s, 4.92957s/12 iters), loss = 0.183818
I0412 13:53:35.679229 6200 solver.cpp:237] Train net output #0: loss = 0.183818 (* 1 = 0.183818 loss)
I0412 13:53:35.679239 6200 sgd_solver.cpp:105] Iteration 8328, lr = 0.00192115
I0412 13:53:40.782354 6200 solver.cpp:218] Iteration 8340 (2.35157 iter/s, 5.10297s/12 iters), loss = 0.241882
I0412 13:53:40.782414 6200 solver.cpp:237] Train net output #0: loss = 0.241882 (* 1 = 0.241882 loss)
I0412 13:53:40.782429 6200 sgd_solver.cpp:105] Iteration 8340, lr = 0.00191659
I0412 13:53:45.762157 6200 solver.cpp:218] Iteration 8352 (2.40984 iter/s, 4.97959s/12 iters), loss = 0.115874
I0412 13:53:45.762230 6200 solver.cpp:237] Train net output #0: loss = 0.115874 (* 1 = 0.115874 loss)
I0412 13:53:45.762245 6200 sgd_solver.cpp:105] Iteration 8352, lr = 0.00191204
I0412 13:53:50.305017 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_8364.caffemodel
I0412 13:53:53.590174 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_8364.solverstate
I0412 13:53:57.514184 6200 solver.cpp:330] Iteration 8364, Testing net (#0)
I0412 13:53:57.514209 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:53:58.669018 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:54:01.974135 6200 solver.cpp:397] Test net output #0: accuracy = 0.458333
I0412 13:54:01.977752 6200 solver.cpp:397] Test net output #1: loss = 2.87092 (* 1 = 2.87092 loss)
I0412 13:54:02.063194 6200 solver.cpp:218] Iteration 8364 (0.736173 iter/s, 16.3005s/12 iters), loss = 0.162591
I0412 13:54:02.063246 6200 solver.cpp:237] Train net output #0: loss = 0.162591 (* 1 = 0.162591 loss)
I0412 13:54:02.063257 6200 sgd_solver.cpp:105] Iteration 8364, lr = 0.0019075
I0412 13:54:06.335096 6200 solver.cpp:218] Iteration 8376 (2.80917 iter/s, 4.27172s/12 iters), loss = 0.144347
I0412 13:54:06.335135 6200 solver.cpp:237] Train net output #0: loss = 0.144347 (* 1 = 0.144347 loss)
I0412 13:54:06.335144 6200 sgd_solver.cpp:105] Iteration 8376, lr = 0.00190297
I0412 13:54:11.421212 6200 solver.cpp:218] Iteration 8388 (2.35945 iter/s, 5.08592s/12 iters), loss = 0.151382
I0412 13:54:11.421269 6200 solver.cpp:237] Train net output #0: loss = 0.151382 (* 1 = 0.151382 loss)
I0412 13:54:11.421283 6200 sgd_solver.cpp:105] Iteration 8388, lr = 0.00189846
I0412 13:54:14.316701 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:54:16.455173 6200 solver.cpp:218] Iteration 8400 (2.3839 iter/s, 5.03376s/12 iters), loss = 0.328628
I0412 13:54:16.455215 6200 solver.cpp:237] Train net output #0: loss = 0.328628 (* 1 = 0.328628 loss)
I0412 13:54:16.455224 6200 sgd_solver.cpp:105] Iteration 8400, lr = 0.00189395
I0412 13:54:21.289860 6200 solver.cpp:218] Iteration 8412 (2.48216 iter/s, 4.83449s/12 iters), loss = 0.137005
I0412 13:54:21.289918 6200 solver.cpp:237] Train net output #0: loss = 0.137006 (* 1 = 0.137006 loss)
I0412 13:54:21.289932 6200 sgd_solver.cpp:105] Iteration 8412, lr = 0.00188945
I0412 13:54:26.134037 6200 solver.cpp:218] Iteration 8424 (2.4773 iter/s, 4.84398s/12 iters), loss = 0.171955
I0412 13:54:26.134085 6200 solver.cpp:237] Train net output #0: loss = 0.171955 (* 1 = 0.171955 loss)
I0412 13:54:26.134095 6200 sgd_solver.cpp:105] Iteration 8424, lr = 0.00188497
I0412 13:54:31.206293 6200 solver.cpp:218] Iteration 8436 (2.3659 iter/s, 5.07206s/12 iters), loss = 0.118214
I0412 13:54:31.206342 6200 solver.cpp:237] Train net output #0: loss = 0.118214 (* 1 = 0.118214 loss)
I0412 13:54:31.206353 6200 sgd_solver.cpp:105] Iteration 8436, lr = 0.00188049
I0412 13:54:36.169054 6200 solver.cpp:218] Iteration 8448 (2.4181 iter/s, 4.96257s/12 iters), loss = 0.191208
I0412 13:54:36.169194 6200 solver.cpp:237] Train net output #0: loss = 0.191208 (* 1 = 0.191208 loss)
I0412 13:54:36.169205 6200 sgd_solver.cpp:105] Iteration 8448, lr = 0.00187603
I0412 13:54:40.998014 6200 solver.cpp:218] Iteration 8460 (2.48516 iter/s, 4.82867s/12 iters), loss = 0.112014
I0412 13:54:40.998075 6200 solver.cpp:237] Train net output #0: loss = 0.112014 (* 1 = 0.112014 loss)
I0412 13:54:40.998090 6200 sgd_solver.cpp:105] Iteration 8460, lr = 0.00187157
I0412 13:54:43.026955 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_8466.caffemodel
I0412 13:54:45.054622 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_8466.solverstate
I0412 13:54:48.839723 6200 solver.cpp:330] Iteration 8466, Testing net (#0)
I0412 13:54:48.839749 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:54:49.999029 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:54:53.299753 6200 solver.cpp:397] Test net output #0: accuracy = 0.466299
I0412 13:54:53.299783 6200 solver.cpp:397] Test net output #1: loss = 2.98445 (* 1 = 2.98445 loss)
I0412 13:54:55.131546 6200 solver.cpp:218] Iteration 8472 (0.849072 iter/s, 14.1331s/12 iters), loss = 0.121387
I0412 13:54:55.131594 6200 solver.cpp:237] Train net output #0: loss = 0.121387 (* 1 = 0.121387 loss)
I0412 13:54:55.131604 6200 sgd_solver.cpp:105] Iteration 8472, lr = 0.00186713
I0412 13:55:00.036130 6200 solver.cpp:218] Iteration 8484 (2.44679 iter/s, 4.90438s/12 iters), loss = 0.136468
I0412 13:55:00.036187 6200 solver.cpp:237] Train net output #0: loss = 0.136468 (* 1 = 0.136468 loss)
I0412 13:55:00.036199 6200 sgd_solver.cpp:105] Iteration 8484, lr = 0.0018627
I0412 13:55:04.891861 6200 solver.cpp:218] Iteration 8496 (2.47141 iter/s, 4.85553s/12 iters), loss = 0.0914598
I0412 13:55:04.891912 6200 solver.cpp:237] Train net output #0: loss = 0.0914599 (* 1 = 0.0914599 loss)
I0412 13:55:04.891923 6200 sgd_solver.cpp:105] Iteration 8496, lr = 0.00185827
I0412 13:55:04.930945 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:55:09.950263 6200 solver.cpp:218] Iteration 8508 (2.37239 iter/s, 5.0582s/12 iters), loss = 0.192799
I0412 13:55:09.950412 6200 solver.cpp:237] Train net output #0: loss = 0.192799 (* 1 = 0.192799 loss)
I0412 13:55:09.950431 6200 sgd_solver.cpp:105] Iteration 8508, lr = 0.00185386
I0412 13:55:14.879101 6200 solver.cpp:218] Iteration 8520 (2.43479 iter/s, 4.92855s/12 iters), loss = 0.170772
I0412 13:55:14.879142 6200 solver.cpp:237] Train net output #0: loss = 0.170772 (* 1 = 0.170772 loss)
I0412 13:55:14.879151 6200 sgd_solver.cpp:105] Iteration 8520, lr = 0.00184946
I0412 13:55:19.765372 6200 solver.cpp:218] Iteration 8532 (2.45596 iter/s, 4.88608s/12 iters), loss = 0.121293
I0412 13:55:19.765413 6200 solver.cpp:237] Train net output #0: loss = 0.121293 (* 1 = 0.121293 loss)
I0412 13:55:19.765421 6200 sgd_solver.cpp:105] Iteration 8532, lr = 0.00184507
I0412 13:55:24.986044 6200 solver.cpp:218] Iteration 8544 (2.29864 iter/s, 5.22047s/12 iters), loss = 0.207015
I0412 13:55:24.986100 6200 solver.cpp:237] Train net output #0: loss = 0.207015 (* 1 = 0.207015 loss)
I0412 13:55:24.986112 6200 sgd_solver.cpp:105] Iteration 8544, lr = 0.00184069
I0412 13:55:30.043390 6200 solver.cpp:218] Iteration 8556 (2.37288 iter/s, 5.05714s/12 iters), loss = 0.116999
I0412 13:55:30.043444 6200 solver.cpp:237] Train net output #0: loss = 0.116999 (* 1 = 0.116999 loss)
I0412 13:55:30.043457 6200 sgd_solver.cpp:105] Iteration 8556, lr = 0.00183632
I0412 13:55:34.719589 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_8568.caffemodel
I0412 13:55:36.708402 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_8568.solverstate
I0412 13:55:38.263345 6200 solver.cpp:330] Iteration 8568, Testing net (#0)
I0412 13:55:38.263373 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:55:39.374655 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:55:42.754101 6200 solver.cpp:397] Test net output #0: accuracy = 0.462623
I0412 13:55:42.754271 6200 solver.cpp:397] Test net output #1: loss = 2.88691 (* 1 = 2.88691 loss)
I0412 13:55:42.838557 6200 solver.cpp:218] Iteration 8568 (0.937884 iter/s, 12.7948s/12 iters), loss = 0.176494
I0412 13:55:42.838598 6200 solver.cpp:237] Train net output #0: loss = 0.176494 (* 1 = 0.176494 loss)
I0412 13:55:42.838608 6200 sgd_solver.cpp:105] Iteration 8568, lr = 0.00183196
I0412 13:55:47.568104 6200 solver.cpp:218] Iteration 8580 (2.53734 iter/s, 4.72936s/12 iters), loss = 0.128095
I0412 13:55:47.568146 6200 solver.cpp:237] Train net output #0: loss = 0.128095 (* 1 = 0.128095 loss)
I0412 13:55:47.568154 6200 sgd_solver.cpp:105] Iteration 8580, lr = 0.00182761
I0412 13:55:52.347002 6200 solver.cpp:218] Iteration 8592 (2.51114 iter/s, 4.77871s/12 iters), loss = 0.202651
I0412 13:55:52.347054 6200 solver.cpp:237] Train net output #0: loss = 0.202651 (* 1 = 0.202651 loss)
I0412 13:55:52.347065 6200 sgd_solver.cpp:105] Iteration 8592, lr = 0.00182327
I0412 13:55:54.445206 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:55:57.231730 6200 solver.cpp:218] Iteration 8604 (2.45674 iter/s, 4.88453s/12 iters), loss = 0.0925016
I0412 13:55:57.231775 6200 solver.cpp:237] Train net output #0: loss = 0.0925017 (* 1 = 0.0925017 loss)
I0412 13:55:57.231784 6200 sgd_solver.cpp:105] Iteration 8604, lr = 0.00181894
I0412 13:56:02.225867 6200 solver.cpp:218] Iteration 8616 (2.40292 iter/s, 4.99393s/12 iters), loss = 0.0587197
I0412 13:56:02.225926 6200 solver.cpp:237] Train net output #0: loss = 0.0587198 (* 1 = 0.0587198 loss)
I0412 13:56:02.225942 6200 sgd_solver.cpp:105] Iteration 8616, lr = 0.00181462
I0412 13:56:07.165257 6200 solver.cpp:218] Iteration 8628 (2.42955 iter/s, 4.93919s/12 iters), loss = 0.145209
I0412 13:56:07.165297 6200 solver.cpp:237] Train net output #0: loss = 0.14521 (* 1 = 0.14521 loss)
I0412 13:56:07.165305 6200 sgd_solver.cpp:105] Iteration 8628, lr = 0.00181031
I0412 13:56:12.040211 6200 solver.cpp:218] Iteration 8640 (2.46166 iter/s, 4.87477s/12 iters), loss = 0.161031
I0412 13:56:12.040261 6200 solver.cpp:237] Train net output #0: loss = 0.161031 (* 1 = 0.161031 loss)
I0412 13:56:12.040271 6200 sgd_solver.cpp:105] Iteration 8640, lr = 0.00180602
I0412 13:56:16.926086 6200 solver.cpp:218] Iteration 8652 (2.45616 iter/s, 4.88568s/12 iters), loss = 0.0873033
I0412 13:56:16.926214 6200 solver.cpp:237] Train net output #0: loss = 0.0873034 (* 1 = 0.0873034 loss)
I0412 13:56:16.926229 6200 sgd_solver.cpp:105] Iteration 8652, lr = 0.00180173
I0412 13:56:21.863334 6200 solver.cpp:218] Iteration 8664 (2.43064 iter/s, 4.93698s/12 iters), loss = 0.146607
I0412 13:56:21.863396 6200 solver.cpp:237] Train net output #0: loss = 0.146607 (* 1 = 0.146607 loss)
I0412 13:56:21.863411 6200 sgd_solver.cpp:105] Iteration 8664, lr = 0.00179745
I0412 13:56:23.928200 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_8670.caffemodel
I0412 13:56:25.985841 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_8670.solverstate
I0412 13:56:27.534791 6200 solver.cpp:330] Iteration 8670, Testing net (#0)
I0412 13:56:27.534811 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:56:28.744359 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:56:32.206511 6200 solver.cpp:397] Test net output #0: accuracy = 0.46875
I0412 13:56:32.206554 6200 solver.cpp:397] Test net output #1: loss = 2.93678 (* 1 = 2.93678 loss)
I0412 13:56:34.069643 6200 solver.cpp:218] Iteration 8676 (0.98313 iter/s, 12.2059s/12 iters), loss = 0.183545
I0412 13:56:34.069701 6200 solver.cpp:237] Train net output #0: loss = 0.183545 (* 1 = 0.183545 loss)
I0412 13:56:34.069715 6200 sgd_solver.cpp:105] Iteration 8676, lr = 0.00179318
I0412 13:56:38.993149 6200 solver.cpp:218] Iteration 8688 (2.43739 iter/s, 4.9233s/12 iters), loss = 0.139988
I0412 13:56:38.993199 6200 solver.cpp:237] Train net output #0: loss = 0.139988 (* 1 = 0.139988 loss)
I0412 13:56:38.993208 6200 sgd_solver.cpp:105] Iteration 8688, lr = 0.00178893
I0412 13:56:43.317037 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:56:43.981591 6200 solver.cpp:218] Iteration 8700 (2.40565 iter/s, 4.98825s/12 iters), loss = 0.091007
I0412 13:56:43.981637 6200 solver.cpp:237] Train net output #0: loss = 0.0910071 (* 1 = 0.0910071 loss)
I0412 13:56:43.981647 6200 sgd_solver.cpp:105] Iteration 8700, lr = 0.00178468
I0412 13:56:48.926641 6200 solver.cpp:218] Iteration 8712 (2.42676 iter/s, 4.94486s/12 iters), loss = 0.169545
I0412 13:56:48.926811 6200 solver.cpp:237] Train net output #0: loss = 0.169545 (* 1 = 0.169545 loss)
I0412 13:56:48.926826 6200 sgd_solver.cpp:105] Iteration 8712, lr = 0.00178044
I0412 13:56:54.081487 6200 solver.cpp:218] Iteration 8724 (2.32805 iter/s, 5.15453s/12 iters), loss = 0.156619
I0412 13:56:54.081547 6200 solver.cpp:237] Train net output #0: loss = 0.156619 (* 1 = 0.156619 loss)
I0412 13:56:54.081562 6200 sgd_solver.cpp:105] Iteration 8724, lr = 0.00177621
I0412 13:56:59.008163 6200 solver.cpp:218] Iteration 8736 (2.43582 iter/s, 4.92648s/12 iters), loss = 0.0936191
I0412 13:56:59.008208 6200 solver.cpp:237] Train net output #0: loss = 0.0936192 (* 1 = 0.0936192 loss)
I0412 13:56:59.008217 6200 sgd_solver.cpp:105] Iteration 8736, lr = 0.001772
I0412 13:57:04.453600 6200 solver.cpp:218] Iteration 8748 (2.20376 iter/s, 5.44523s/12 iters), loss = 0.0457017
I0412 13:57:04.453653 6200 solver.cpp:237] Train net output #0: loss = 0.0457017 (* 1 = 0.0457017 loss)
I0412 13:57:04.453665 6200 sgd_solver.cpp:105] Iteration 8748, lr = 0.00176779
I0412 13:57:09.446317 6200 solver.cpp:218] Iteration 8760 (2.4036 iter/s, 4.99251s/12 iters), loss = 0.0922076
I0412 13:57:09.446374 6200 solver.cpp:237] Train net output #0: loss = 0.0922076 (* 1 = 0.0922076 loss)
I0412 13:57:09.446386 6200 sgd_solver.cpp:105] Iteration 8760, lr = 0.00176359
I0412 13:57:14.017510 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_8772.caffemodel
I0412 13:57:16.107942 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_8772.solverstate
I0412 13:57:17.698427 6200 solver.cpp:330] Iteration 8772, Testing net (#0)
I0412 13:57:17.698453 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:57:18.738191 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:57:22.419364 6200 solver.cpp:397] Test net output #0: accuracy = 0.481618
I0412 13:57:22.419497 6200 solver.cpp:397] Test net output #1: loss = 2.91643 (* 1 = 2.91643 loss)
I0412 13:57:22.504957 6200 solver.cpp:218] Iteration 8772 (0.918961 iter/s, 13.0582s/12 iters), loss = 0.267249
I0412 13:57:22.505007 6200 solver.cpp:237] Train net output #0: loss = 0.267249 (* 1 = 0.267249 loss)
I0412 13:57:22.505018 6200 sgd_solver.cpp:105] Iteration 8772, lr = 0.00175941
I0412 13:57:26.669816 6200 solver.cpp:218] Iteration 8784 (2.88137 iter/s, 4.16468s/12 iters), loss = 0.116418
I0412 13:57:26.669862 6200 solver.cpp:237] Train net output #0: loss = 0.116418 (* 1 = 0.116418 loss)
I0412 13:57:26.669872 6200 sgd_solver.cpp:105] Iteration 8784, lr = 0.00175523
I0412 13:57:31.669363 6200 solver.cpp:218] Iteration 8796 (2.40031 iter/s, 4.99935s/12 iters), loss = 0.104414
I0412 13:57:31.669425 6200 solver.cpp:237] Train net output #0: loss = 0.104414 (* 1 = 0.104414 loss)
I0412 13:57:31.669440 6200 sgd_solver.cpp:105] Iteration 8796, lr = 0.00175106
I0412 13:57:32.996120 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:57:36.562232 6200 solver.cpp:218] Iteration 8808 (2.45265 iter/s, 4.89267s/12 iters), loss = 0.199804
I0412 13:57:36.562284 6200 solver.cpp:237] Train net output #0: loss = 0.199804 (* 1 = 0.199804 loss)
I0412 13:57:36.562297 6200 sgd_solver.cpp:105] Iteration 8808, lr = 0.0017469
I0412 13:57:41.587129 6200 solver.cpp:218] Iteration 8820 (2.3882 iter/s, 5.02469s/12 iters), loss = 0.0568472
I0412 13:57:41.587182 6200 solver.cpp:237] Train net output #0: loss = 0.0568472 (* 1 = 0.0568472 loss)
I0412 13:57:41.587193 6200 sgd_solver.cpp:105] Iteration 8820, lr = 0.00174276
I0412 13:57:46.491768 6200 solver.cpp:218] Iteration 8832 (2.44676 iter/s, 4.90444s/12 iters), loss = 0.132366
I0412 13:57:46.491808 6200 solver.cpp:237] Train net output #0: loss = 0.132366 (* 1 = 0.132366 loss)
I0412 13:57:46.491817 6200 sgd_solver.cpp:105] Iteration 8832, lr = 0.00173862
I0412 13:57:51.532136 6200 solver.cpp:218] Iteration 8844 (2.38087 iter/s, 5.04018s/12 iters), loss = 0.148628
I0412 13:57:51.532188 6200 solver.cpp:237] Train net output #0: loss = 0.148628 (* 1 = 0.148628 loss)
I0412 13:57:51.532202 6200 sgd_solver.cpp:105] Iteration 8844, lr = 0.00173449
I0412 13:57:56.642936 6200 solver.cpp:218] Iteration 8856 (2.34806 iter/s, 5.1106s/12 iters), loss = 0.160307
I0412 13:57:56.643136 6200 solver.cpp:237] Train net output #0: loss = 0.160307 (* 1 = 0.160307 loss)
I0412 13:57:56.643155 6200 sgd_solver.cpp:105] Iteration 8856, lr = 0.00173037
I0412 13:58:01.482053 6200 solver.cpp:218] Iteration 8868 (2.47996 iter/s, 4.83878s/12 iters), loss = 0.102441
I0412 13:58:01.482098 6200 solver.cpp:237] Train net output #0: loss = 0.102441 (* 1 = 0.102441 loss)
I0412 13:58:01.482110 6200 sgd_solver.cpp:105] Iteration 8868, lr = 0.00172626
I0412 13:58:03.544953 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_8874.caffemodel
I0412 13:58:05.526125 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_8874.solverstate
I0412 13:58:07.069514 6200 solver.cpp:330] Iteration 8874, Testing net (#0)
I0412 13:58:07.069537 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:58:07.994858 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:58:11.596448 6200 solver.cpp:397] Test net output #0: accuracy = 0.476716
I0412 13:58:11.596496 6200 solver.cpp:397] Test net output #1: loss = 2.98271 (* 1 = 2.98271 loss)
I0412 13:58:13.562273 6200 solver.cpp:218] Iteration 8880 (0.993391 iter/s, 12.0798s/12 iters), loss = 0.0997641
I0412 13:58:13.562326 6200 solver.cpp:237] Train net output #0: loss = 0.0997642 (* 1 = 0.0997642 loss)
I0412 13:58:13.562338 6200 sgd_solver.cpp:105] Iteration 8880, lr = 0.00172217
I0412 13:58:18.726946 6200 solver.cpp:218] Iteration 8892 (2.32357 iter/s, 5.16447s/12 iters), loss = 0.110888
I0412 13:58:18.726997 6200 solver.cpp:237] Train net output #0: loss = 0.110888 (* 1 = 0.110888 loss)
I0412 13:58:18.727008 6200 sgd_solver.cpp:105] Iteration 8892, lr = 0.00171808
I0412 13:58:22.315085 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:58:23.718247 6200 solver.cpp:218] Iteration 8904 (2.40428 iter/s, 4.9911s/12 iters), loss = 0.039756
I0412 13:58:23.718294 6200 solver.cpp:237] Train net output #0: loss = 0.039756 (* 1 = 0.039756 loss)
I0412 13:58:23.718302 6200 sgd_solver.cpp:105] Iteration 8904, lr = 0.001714
I0412 13:58:28.663097 6200 solver.cpp:218] Iteration 8916 (2.42686 iter/s, 4.94465s/12 iters), loss = 0.198889
I0412 13:58:28.663204 6200 solver.cpp:237] Train net output #0: loss = 0.198889 (* 1 = 0.198889 loss)
I0412 13:58:28.663215 6200 sgd_solver.cpp:105] Iteration 8916, lr = 0.00170993
I0412 13:58:33.648797 6200 solver.cpp:218] Iteration 8928 (2.407 iter/s, 4.98545s/12 iters), loss = 0.161716
I0412 13:58:33.648844 6200 solver.cpp:237] Train net output #0: loss = 0.161716 (* 1 = 0.161716 loss)
I0412 13:58:33.648855 6200 sgd_solver.cpp:105] Iteration 8928, lr = 0.00170587
I0412 13:58:38.450860 6200 solver.cpp:218] Iteration 8940 (2.49902 iter/s, 4.80188s/12 iters), loss = 0.124547
I0412 13:58:38.450911 6200 solver.cpp:237] Train net output #0: loss = 0.124547 (* 1 = 0.124547 loss)
I0412 13:58:38.450922 6200 sgd_solver.cpp:105] Iteration 8940, lr = 0.00170182
I0412 13:58:43.352203 6200 solver.cpp:218] Iteration 8952 (2.44841 iter/s, 4.90115s/12 iters), loss = 0.0655529
I0412 13:58:43.352257 6200 solver.cpp:237] Train net output #0: loss = 0.065553 (* 1 = 0.065553 loss)
I0412 13:58:43.352268 6200 sgd_solver.cpp:105] Iteration 8952, lr = 0.00169778
I0412 13:58:48.249550 6200 solver.cpp:218] Iteration 8964 (2.45041 iter/s, 4.89715s/12 iters), loss = 0.14962
I0412 13:58:48.249594 6200 solver.cpp:237] Train net output #0: loss = 0.14962 (* 1 = 0.14962 loss)
I0412 13:58:48.249603 6200 sgd_solver.cpp:105] Iteration 8964, lr = 0.00169375
I0412 13:58:52.746673 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_8976.caffemodel
I0412 13:58:54.782385 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_8976.solverstate
I0412 13:58:56.352982 6200 solver.cpp:330] Iteration 8976, Testing net (#0)
I0412 13:58:56.353013 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:58:57.278549 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:59:00.834631 6200 solver.cpp:397] Test net output #0: accuracy = 0.479167
I0412 13:59:00.834810 6200 solver.cpp:397] Test net output #1: loss = 2.96803 (* 1 = 2.96803 loss)
I0412 13:59:00.920315 6200 solver.cpp:218] Iteration 8976 (0.947092 iter/s, 12.6704s/12 iters), loss = 0.0730589
I0412 13:59:00.920365 6200 solver.cpp:237] Train net output #0: loss = 0.0730589 (* 1 = 0.0730589 loss)
I0412 13:59:00.920378 6200 sgd_solver.cpp:105] Iteration 8976, lr = 0.00168973
I0412 13:59:05.123929 6200 solver.cpp:218] Iteration 8988 (2.85481 iter/s, 4.20343s/12 iters), loss = 0.0863362
I0412 13:59:05.123980 6200 solver.cpp:237] Train net output #0: loss = 0.0863363 (* 1 = 0.0863363 loss)
I0412 13:59:05.123994 6200 sgd_solver.cpp:105] Iteration 8988, lr = 0.00168571
I0412 13:59:07.973381 6200 blocking_queue.cpp:49] Waiting for data
I0412 13:59:10.154973 6200 solver.cpp:218] Iteration 9000 (2.38528 iter/s, 5.03085s/12 iters), loss = 0.233593
I0412 13:59:10.155025 6200 solver.cpp:237] Train net output #0: loss = 0.233593 (* 1 = 0.233593 loss)
I0412 13:59:10.155038 6200 sgd_solver.cpp:105] Iteration 9000, lr = 0.00168171
I0412 13:59:10.819480 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:59:15.084213 6200 solver.cpp:218] Iteration 9012 (2.43455 iter/s, 4.92904s/12 iters), loss = 0.0943106
I0412 13:59:15.084259 6200 solver.cpp:237] Train net output #0: loss = 0.0943106 (* 1 = 0.0943106 loss)
I0412 13:59:15.084270 6200 sgd_solver.cpp:105] Iteration 9012, lr = 0.00167772
I0412 13:59:20.153286 6200 solver.cpp:218] Iteration 9024 (2.36739 iter/s, 5.06888s/12 iters), loss = 0.135555
I0412 13:59:20.153342 6200 solver.cpp:237] Train net output #0: loss = 0.135555 (* 1 = 0.135555 loss)
I0412 13:59:20.153357 6200 sgd_solver.cpp:105] Iteration 9024, lr = 0.00167374
I0412 13:59:25.478475 6200 solver.cpp:218] Iteration 9036 (2.25353 iter/s, 5.32497s/12 iters), loss = 0.0338589
I0412 13:59:25.478533 6200 solver.cpp:237] Train net output #0: loss = 0.0338589 (* 1 = 0.0338589 loss)
I0412 13:59:25.478544 6200 sgd_solver.cpp:105] Iteration 9036, lr = 0.00166976
I0412 13:59:30.468502 6200 solver.cpp:218] Iteration 9048 (2.40489 iter/s, 4.98982s/12 iters), loss = 0.191311
I0412 13:59:30.468554 6200 solver.cpp:237] Train net output #0: loss = 0.191311 (* 1 = 0.191311 loss)
I0412 13:59:30.468565 6200 sgd_solver.cpp:105] Iteration 9048, lr = 0.0016658
I0412 13:59:35.338475 6200 solver.cpp:218] Iteration 9060 (2.46418 iter/s, 4.86978s/12 iters), loss = 0.121985
I0412 13:59:35.338604 6200 solver.cpp:237] Train net output #0: loss = 0.121985 (* 1 = 0.121985 loss)
I0412 13:59:35.338614 6200 sgd_solver.cpp:105] Iteration 9060, lr = 0.00166184
I0412 13:59:40.338882 6200 solver.cpp:218] Iteration 9072 (2.39994 iter/s, 5.00013s/12 iters), loss = 0.0970863
I0412 13:59:40.338932 6200 solver.cpp:237] Train net output #0: loss = 0.0970863 (* 1 = 0.0970863 loss)
I0412 13:59:40.338941 6200 sgd_solver.cpp:105] Iteration 9072, lr = 0.0016579
I0412 13:59:42.337932 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_9078.caffemodel
I0412 13:59:45.137467 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_9078.solverstate
I0412 13:59:48.836491 6200 solver.cpp:330] Iteration 9078, Testing net (#0)
I0412 13:59:48.836520 6200 net.cpp:676] Ignoring source layer train-data
I0412 13:59:49.751505 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 13:59:53.303969 6200 solver.cpp:397] Test net output #0: accuracy = 0.474877
I0412 13:59:53.304003 6200 solver.cpp:397] Test net output #1: loss = 2.96077 (* 1 = 2.96077 loss)
I0412 13:59:55.223994 6200 solver.cpp:218] Iteration 9084 (0.8062 iter/s, 14.8846s/12 iters), loss = 0.054615
I0412 13:59:55.224048 6200 solver.cpp:237] Train net output #0: loss = 0.054615 (* 1 = 0.054615 loss)
I0412 13:59:55.224059 6200 sgd_solver.cpp:105] Iteration 9084, lr = 0.00165396
I0412 14:00:00.120154 6200 solver.cpp:218] Iteration 9096 (2.451 iter/s, 4.89596s/12 iters), loss = 0.114491
I0412 14:00:00.120205 6200 solver.cpp:237] Train net output #0: loss = 0.114491 (* 1 = 0.114491 loss)
I0412 14:00:00.120216 6200 sgd_solver.cpp:105] Iteration 9096, lr = 0.00165003
I0412 14:00:03.059674 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 14:00:05.073081 6200 solver.cpp:218] Iteration 9108 (2.42291 iter/s, 4.95273s/12 iters), loss = 0.150351
I0412 14:00:05.073140 6200 solver.cpp:237] Train net output #0: loss = 0.150352 (* 1 = 0.150352 loss)
I0412 14:00:05.073156 6200 sgd_solver.cpp:105] Iteration 9108, lr = 0.00164612
I0412 14:00:10.245492 6200 solver.cpp:218] Iteration 9120 (2.3201 iter/s, 5.1722s/12 iters), loss = 0.180183
I0412 14:00:10.245570 6200 solver.cpp:237] Train net output #0: loss = 0.180183 (* 1 = 0.180183 loss)
I0412 14:00:10.245580 6200 sgd_solver.cpp:105] Iteration 9120, lr = 0.00164221
I0412 14:00:15.101985 6200 solver.cpp:218] Iteration 9132 (2.47103 iter/s, 4.85627s/12 iters), loss = 0.115556
I0412 14:00:15.102041 6200 solver.cpp:237] Train net output #0: loss = 0.115556 (* 1 = 0.115556 loss)
I0412 14:00:15.102051 6200 sgd_solver.cpp:105] Iteration 9132, lr = 0.00163831
I0412 14:00:20.401257 6200 solver.cpp:218] Iteration 9144 (2.26455 iter/s, 5.29906s/12 iters), loss = 0.0996909
I0412 14:00:20.401316 6200 solver.cpp:237] Train net output #0: loss = 0.099691 (* 1 = 0.099691 loss)
I0412 14:00:20.401332 6200 sgd_solver.cpp:105] Iteration 9144, lr = 0.00163442
I0412 14:00:25.350458 6200 solver.cpp:218] Iteration 9156 (2.42473 iter/s, 4.949s/12 iters), loss = 0.192472
I0412 14:00:25.350504 6200 solver.cpp:237] Train net output #0: loss = 0.192472 (* 1 = 0.192472 loss)
I0412 14:00:25.350514 6200 sgd_solver.cpp:105] Iteration 9156, lr = 0.00163054
I0412 14:00:30.319898 6200 solver.cpp:218] Iteration 9168 (2.41486 iter/s, 4.96924s/12 iters), loss = 0.123164
I0412 14:00:30.319949 6200 solver.cpp:237] Train net output #0: loss = 0.123164 (* 1 = 0.123164 loss)
I0412 14:00:30.319958 6200 sgd_solver.cpp:105] Iteration 9168, lr = 0.00162667
I0412 14:00:34.806044 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_9180.caffemodel
I0412 14:00:36.815253 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_9180.solverstate
I0412 14:00:38.381026 6200 solver.cpp:330] Iteration 9180, Testing net (#0)
I0412 14:00:38.381053 6200 net.cpp:676] Ignoring source layer train-data
I0412 14:00:39.279006 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 14:00:43.003664 6200 solver.cpp:397] Test net output #0: accuracy = 0.46875
I0412 14:00:43.003784 6200 solver.cpp:397] Test net output #1: loss = 2.96353 (* 1 = 2.96353 loss)
I0412 14:00:43.088956 6200 solver.cpp:218] Iteration 9180 (0.939802 iter/s, 12.7686s/12 iters), loss = 0.0816885
I0412 14:00:43.089000 6200 solver.cpp:237] Train net output #0: loss = 0.0816885 (* 1 = 0.0816885 loss)
I0412 14:00:43.089010 6200 sgd_solver.cpp:105] Iteration 9180, lr = 0.00162281
I0412 14:00:47.547186 6200 solver.cpp:218] Iteration 9192 (2.69176 iter/s, 4.45805s/12 iters), loss = 0.243191
I0412 14:00:47.547231 6200 solver.cpp:237] Train net output #0: loss = 0.243191 (* 1 = 0.243191 loss)
I0412 14:00:47.547240 6200 sgd_solver.cpp:105] Iteration 9192, lr = 0.00161895
I0412 14:00:52.646140 6200 solver.cpp:218] Iteration 9204 (2.35352 iter/s, 5.09876s/12 iters), loss = 0.0906556
I0412 14:00:52.646190 6200 solver.cpp:237] Train net output #0: loss = 0.0906556 (* 1 = 0.0906556 loss)
I0412 14:00:52.646201 6200 sgd_solver.cpp:105] Iteration 9204, lr = 0.00161511
I0412 14:00:52.714296 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 14:00:57.755780 6200 solver.cpp:218] Iteration 9216 (2.34859 iter/s, 5.10944s/12 iters), loss = 0.105052
I0412 14:00:57.755822 6200 solver.cpp:237] Train net output #0: loss = 0.105052 (* 1 = 0.105052 loss)
I0412 14:00:57.755831 6200 sgd_solver.cpp:105] Iteration 9216, lr = 0.00161128
I0412 14:01:02.879961 6200 solver.cpp:218] Iteration 9228 (2.34193 iter/s, 5.12399s/12 iters), loss = 0.0914345
I0412 14:01:02.880008 6200 solver.cpp:237] Train net output #0: loss = 0.0914345 (* 1 = 0.0914345 loss)
I0412 14:01:02.880017 6200 sgd_solver.cpp:105] Iteration 9228, lr = 0.00160745
I0412 14:01:07.803668 6200 solver.cpp:218] Iteration 9240 (2.43728 iter/s, 4.92351s/12 iters), loss = 0.123521
I0412 14:01:07.803715 6200 solver.cpp:237] Train net output #0: loss = 0.123521 (* 1 = 0.123521 loss)
I0412 14:01:07.803725 6200 sgd_solver.cpp:105] Iteration 9240, lr = 0.00160363
I0412 14:01:12.735982 6200 solver.cpp:218] Iteration 9252 (2.43303 iter/s, 4.93212s/12 iters), loss = 0.101546
I0412 14:01:12.736042 6200 solver.cpp:237] Train net output #0: loss = 0.101546 (* 1 = 0.101546 loss)
I0412 14:01:12.736054 6200 sgd_solver.cpp:105] Iteration 9252, lr = 0.00159983
I0412 14:01:17.902525 6200 solver.cpp:218] Iteration 9264 (2.32273 iter/s, 5.16633s/12 iters), loss = 0.143709
I0412 14:01:17.902629 6200 solver.cpp:237] Train net output #0: loss = 0.143709 (* 1 = 0.143709 loss)
I0412 14:01:17.902640 6200 sgd_solver.cpp:105] Iteration 9264, lr = 0.00159603
I0412 14:01:22.784327 6200 solver.cpp:218] Iteration 9276 (2.45823 iter/s, 4.88155s/12 iters), loss = 0.0865597
I0412 14:01:22.784382 6200 solver.cpp:237] Train net output #0: loss = 0.0865597 (* 1 = 0.0865597 loss)
I0412 14:01:22.784394 6200 sgd_solver.cpp:105] Iteration 9276, lr = 0.00159224
I0412 14:01:24.758657 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_9282.caffemodel
I0412 14:01:28.545696 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_9282.solverstate
I0412 14:01:31.939251 6200 solver.cpp:330] Iteration 9282, Testing net (#0)
I0412 14:01:31.939280 6200 net.cpp:676] Ignoring source layer train-data
I0412 14:01:32.754106 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 14:01:36.382506 6200 solver.cpp:397] Test net output #0: accuracy = 0.479779
I0412 14:01:36.382539 6200 solver.cpp:397] Test net output #1: loss = 2.9963 (* 1 = 2.9963 loss)
I0412 14:01:38.400977 6200 solver.cpp:218] Iteration 9288 (0.768435 iter/s, 15.6162s/12 iters), loss = 0.101105
I0412 14:01:38.401019 6200 solver.cpp:237] Train net output #0: loss = 0.101105 (* 1 = 0.101105 loss)
I0412 14:01:38.401029 6200 sgd_solver.cpp:105] Iteration 9288, lr = 0.00158846
I0412 14:01:43.433835 6200 solver.cpp:218] Iteration 9300 (2.38442 iter/s, 5.03267s/12 iters), loss = 0.187669
I0412 14:01:43.433881 6200 solver.cpp:237] Train net output #0: loss = 0.187669 (* 1 = 0.187669 loss)
I0412 14:01:43.433892 6200 sgd_solver.cpp:105] Iteration 9300, lr = 0.00158469
I0412 14:01:45.603724 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 14:01:48.300421 6200 solver.cpp:218] Iteration 9312 (2.46589 iter/s, 4.86639s/12 iters), loss = 0.0948704
I0412 14:01:48.300591 6200 solver.cpp:237] Train net output #0: loss = 0.0948704 (* 1 = 0.0948704 loss)
I0412 14:01:48.300608 6200 sgd_solver.cpp:105] Iteration 9312, lr = 0.00158092
I0412 14:01:53.230808 6200 solver.cpp:218] Iteration 9324 (2.43404 iter/s, 4.93008s/12 iters), loss = 0.0298726
I0412 14:01:53.230850 6200 solver.cpp:237] Train net output #0: loss = 0.0298726 (* 1 = 0.0298726 loss)
I0412 14:01:53.230859 6200 sgd_solver.cpp:105] Iteration 9324, lr = 0.00157717
I0412 14:01:58.213729 6200 solver.cpp:218] Iteration 9336 (2.40832 iter/s, 4.98273s/12 iters), loss = 0.151616
I0412 14:01:58.213783 6200 solver.cpp:237] Train net output #0: loss = 0.151616 (* 1 = 0.151616 loss)
I0412 14:01:58.213796 6200 sgd_solver.cpp:105] Iteration 9336, lr = 0.00157343
I0412 14:02:03.123507 6200 solver.cpp:218] Iteration 9348 (2.44421 iter/s, 4.90957s/12 iters), loss = 0.071443
I0412 14:02:03.123561 6200 solver.cpp:237] Train net output #0: loss = 0.071443 (* 1 = 0.071443 loss)
I0412 14:02:03.123574 6200 sgd_solver.cpp:105] Iteration 9348, lr = 0.00156969
I0412 14:02:08.040489 6200 solver.cpp:218] Iteration 9360 (2.44062 iter/s, 4.91678s/12 iters), loss = 0.0485276
I0412 14:02:08.040534 6200 solver.cpp:237] Train net output #0: loss = 0.0485276 (* 1 = 0.0485276 loss)
I0412 14:02:08.040541 6200 sgd_solver.cpp:105] Iteration 9360, lr = 0.00156596
I0412 14:02:12.982532 6200 solver.cpp:218] Iteration 9372 (2.42824 iter/s, 4.94185s/12 iters), loss = 0.0861642
I0412 14:02:12.982584 6200 solver.cpp:237] Train net output #0: loss = 0.0861642 (* 1 = 0.0861642 loss)
I0412 14:02:12.982597 6200 sgd_solver.cpp:105] Iteration 9372, lr = 0.00156225
I0412 14:02:17.448593 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_9384.caffemodel
I0412 14:02:19.460750 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_9384.solverstate
I0412 14:02:21.019969 6200 solver.cpp:330] Iteration 9384, Testing net (#0)
I0412 14:02:21.019997 6200 net.cpp:676] Ignoring source layer train-data
I0412 14:02:21.695430 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 14:02:25.441102 6200 solver.cpp:397] Test net output #0: accuracy = 0.48652
I0412 14:02:25.441146 6200 solver.cpp:397] Test net output #1: loss = 2.92244 (* 1 = 2.92244 loss)
I0412 14:02:25.530293 6200 solver.cpp:218] Iteration 9384 (0.956376 iter/s, 12.5474s/12 iters), loss = 0.105993
I0412 14:02:25.530337 6200 solver.cpp:237] Train net output #0: loss = 0.105993 (* 1 = 0.105993 loss)
I0412 14:02:25.530346 6200 sgd_solver.cpp:105] Iteration 9384, lr = 0.00155854
I0412 14:02:29.871028 6200 solver.cpp:218] Iteration 9396 (2.76462 iter/s, 4.34056s/12 iters), loss = 0.0494277
I0412 14:02:29.871083 6200 solver.cpp:237] Train net output #0: loss = 0.0494277 (* 1 = 0.0494277 loss)
I0412 14:02:29.871095 6200 sgd_solver.cpp:105] Iteration 9396, lr = 0.00155484
I0412 14:02:34.179167 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 14:02:34.838534 6200 solver.cpp:218] Iteration 9408 (2.4158 iter/s, 4.96731s/12 iters), loss = 0.16571
I0412 14:02:34.838580 6200 solver.cpp:237] Train net output #0: loss = 0.16571 (* 1 = 0.16571 loss)
I0412 14:02:34.838588 6200 sgd_solver.cpp:105] Iteration 9408, lr = 0.00155114
I0412 14:02:39.801087 6200 solver.cpp:218] Iteration 9420 (2.41821 iter/s, 4.96236s/12 iters), loss = 0.0728537
I0412 14:02:39.801147 6200 solver.cpp:237] Train net output #0: loss = 0.0728537 (* 1 = 0.0728537 loss)
I0412 14:02:39.801159 6200 sgd_solver.cpp:105] Iteration 9420, lr = 0.00154746
I0412 14:02:44.747412 6200 solver.cpp:218] Iteration 9432 (2.42614 iter/s, 4.94612s/12 iters), loss = 0.0679881
I0412 14:02:44.747457 6200 solver.cpp:237] Train net output #0: loss = 0.0679882 (* 1 = 0.0679882 loss)
I0412 14:02:44.747467 6200 sgd_solver.cpp:105] Iteration 9432, lr = 0.00154379
I0412 14:02:49.756871 6200 solver.cpp:218] Iteration 9444 (2.39556 iter/s, 5.00926s/12 iters), loss = 0.0757769
I0412 14:02:49.756991 6200 solver.cpp:237] Train net output #0: loss = 0.0757769 (* 1 = 0.0757769 loss)
I0412 14:02:49.757000 6200 sgd_solver.cpp:105] Iteration 9444, lr = 0.00154012
I0412 14:02:54.703069 6200 solver.cpp:218] Iteration 9456 (2.42624 iter/s, 4.94593s/12 iters), loss = 0.0853102
I0412 14:02:54.703116 6200 solver.cpp:237] Train net output #0: loss = 0.0853102 (* 1 = 0.0853102 loss)
I0412 14:02:54.703125 6200 sgd_solver.cpp:105] Iteration 9456, lr = 0.00153647
I0412 14:02:59.730437 6200 solver.cpp:218] Iteration 9468 (2.38703 iter/s, 5.02717s/12 iters), loss = 0.0617597
I0412 14:02:59.730481 6200 solver.cpp:237] Train net output #0: loss = 0.0617598 (* 1 = 0.0617598 loss)
I0412 14:02:59.730490 6200 sgd_solver.cpp:105] Iteration 9468, lr = 0.00153282
I0412 14:03:04.864914 6200 solver.cpp:218] Iteration 9480 (2.33723 iter/s, 5.13428s/12 iters), loss = 0.0552073
I0412 14:03:04.864967 6200 solver.cpp:237] Train net output #0: loss = 0.0552073 (* 1 = 0.0552073 loss)
I0412 14:03:04.864979 6200 sgd_solver.cpp:105] Iteration 9480, lr = 0.00152918
I0412 14:03:07.009449 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_9486.caffemodel
I0412 14:03:10.312669 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_9486.solverstate
I0412 14:03:11.884354 6200 solver.cpp:330] Iteration 9486, Testing net (#0)
I0412 14:03:11.884387 6200 net.cpp:676] Ignoring source layer train-data
I0412 14:03:12.625481 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 14:03:16.478204 6200 solver.cpp:397] Test net output #0: accuracy = 0.476103
I0412 14:03:16.478250 6200 solver.cpp:397] Test net output #1: loss = 3.00557 (* 1 = 3.00557 loss)
I0412 14:03:18.371071 6200 solver.cpp:218] Iteration 9492 (0.888512 iter/s, 13.5057s/12 iters), loss = 0.161726
I0412 14:03:18.371126 6200 solver.cpp:237] Train net output #0: loss = 0.161726 (* 1 = 0.161726 loss)
I0412 14:03:18.371138 6200 sgd_solver.cpp:105] Iteration 9492, lr = 0.00152555
I0412 14:03:23.485071 6200 solver.cpp:218] Iteration 9504 (2.34659 iter/s, 5.11379s/12 iters), loss = 0.0545316
I0412 14:03:23.485172 6200 solver.cpp:237] Train net output #0: loss = 0.0545316 (* 1 = 0.0545316 loss)
I0412 14:03:23.485183 6200 sgd_solver.cpp:105] Iteration 9504, lr = 0.00152193
I0412 14:03:24.936180 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 14:03:28.323932 6200 solver.cpp:218] Iteration 9516 (2.48005 iter/s, 4.83861s/12 iters), loss = 0.0838746
I0412 14:03:28.323985 6200 solver.cpp:237] Train net output #0: loss = 0.0838746 (* 1 = 0.0838746 loss)
I0412 14:03:28.323997 6200 sgd_solver.cpp:105] Iteration 9516, lr = 0.00151831
I0412 14:03:33.247916 6200 solver.cpp:218] Iteration 9528 (2.43715 iter/s, 4.92378s/12 iters), loss = 0.125358
I0412 14:03:33.247973 6200 solver.cpp:237] Train net output #0: loss = 0.125358 (* 1 = 0.125358 loss)
I0412 14:03:33.247987 6200 sgd_solver.cpp:105] Iteration 9528, lr = 0.00151471
I0412 14:03:38.061619 6200 solver.cpp:218] Iteration 9540 (2.49299 iter/s, 4.8135s/12 iters), loss = 0.0782754
I0412 14:03:38.061664 6200 solver.cpp:237] Train net output #0: loss = 0.0782754 (* 1 = 0.0782754 loss)
I0412 14:03:38.061673 6200 sgd_solver.cpp:105] Iteration 9540, lr = 0.00151111
I0412 14:03:42.955054 6200 solver.cpp:218] Iteration 9552 (2.45236 iter/s, 4.89324s/12 iters), loss = 0.150414
I0412 14:03:42.955097 6200 solver.cpp:237] Train net output #0: loss = 0.150414 (* 1 = 0.150414 loss)
I0412 14:03:42.955106 6200 sgd_solver.cpp:105] Iteration 9552, lr = 0.00150752
I0412 14:03:47.933257 6200 solver.cpp:218] Iteration 9564 (2.4106 iter/s, 4.97801s/12 iters), loss = 0.0316077
I0412 14:03:47.933315 6200 solver.cpp:237] Train net output #0: loss = 0.0316077 (* 1 = 0.0316077 loss)
I0412 14:03:47.933328 6200 sgd_solver.cpp:105] Iteration 9564, lr = 0.00150395
I0412 14:03:52.849117 6200 solver.cpp:218] Iteration 9576 (2.44118 iter/s, 4.91566s/12 iters), loss = 0.0813755
I0412 14:03:52.849171 6200 solver.cpp:237] Train net output #0: loss = 0.0813756 (* 1 = 0.0813756 loss)
I0412 14:03:52.849185 6200 sgd_solver.cpp:105] Iteration 9576, lr = 0.00150037
I0412 14:03:57.350427 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_9588.caffemodel
I0412 14:04:00.844030 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_9588.solverstate
I0412 14:04:04.453341 6200 solver.cpp:330] Iteration 9588, Testing net (#0)
I0412 14:04:04.453368 6200 net.cpp:676] Ignoring source layer train-data
I0412 14:04:05.147195 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 14:04:09.093430 6200 solver.cpp:397] Test net output #0: accuracy = 0.477941
I0412 14:04:09.093480 6200 solver.cpp:397] Test net output #1: loss = 3.04604 (* 1 = 3.04604 loss)
I0412 14:04:09.178539 6200 solver.cpp:218] Iteration 9588 (0.734893 iter/s, 16.3289s/12 iters), loss = 0.0462628
I0412 14:04:09.178592 6200 solver.cpp:237] Train net output #0: loss = 0.0462628 (* 1 = 0.0462628 loss)
I0412 14:04:09.178603 6200 sgd_solver.cpp:105] Iteration 9588, lr = 0.00149681
I0412 14:04:13.481812 6200 solver.cpp:218] Iteration 9600 (2.78869 iter/s, 4.30309s/12 iters), loss = 0.188555
I0412 14:04:13.481858 6200 solver.cpp:237] Train net output #0: loss = 0.188555 (* 1 = 0.188555 loss)
I0412 14:04:13.481868 6200 sgd_solver.cpp:105] Iteration 9600, lr = 0.00149326
I0412 14:04:17.209558 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 14:04:18.601554 6200 solver.cpp:218] Iteration 9612 (2.34396 iter/s, 5.11954s/12 iters), loss = 0.133946
I0412 14:04:18.601604 6200 solver.cpp:237] Train net output #0: loss = 0.133946 (* 1 = 0.133946 loss)
I0412 14:04:18.601614 6200 sgd_solver.cpp:105] Iteration 9612, lr = 0.00148971
I0412 14:04:23.548002 6200 solver.cpp:218] Iteration 9624 (2.42608 iter/s, 4.94625s/12 iters), loss = 0.0845998
I0412 14:04:23.548058 6200 solver.cpp:237] Train net output #0: loss = 0.0845998 (* 1 = 0.0845998 loss)
I0412 14:04:23.548070 6200 sgd_solver.cpp:105] Iteration 9624, lr = 0.00148618
I0412 14:04:28.399299 6200 solver.cpp:218] Iteration 9636 (2.47367 iter/s, 4.8511s/12 iters), loss = 0.104874
I0412 14:04:28.399381 6200 solver.cpp:237] Train net output #0: loss = 0.104874 (* 1 = 0.104874 loss)
I0412 14:04:28.399394 6200 sgd_solver.cpp:105] Iteration 9636, lr = 0.00148265
I0412 14:04:33.411813 6200 solver.cpp:218] Iteration 9648 (2.39412 iter/s, 5.01228s/12 iters), loss = 0.106706
I0412 14:04:33.411865 6200 solver.cpp:237] Train net output #0: loss = 0.106706 (* 1 = 0.106706 loss)
I0412 14:04:33.411877 6200 sgd_solver.cpp:105] Iteration 9648, lr = 0.00147913
I0412 14:04:38.277127 6200 solver.cpp:218] Iteration 9660 (2.46654 iter/s, 4.86512s/12 iters), loss = 0.221794
I0412 14:04:38.277171 6200 solver.cpp:237] Train net output #0: loss = 0.221794 (* 1 = 0.221794 loss)
I0412 14:04:38.277179 6200 sgd_solver.cpp:105] Iteration 9660, lr = 0.00147562
I0412 14:04:43.152527 6200 solver.cpp:218] Iteration 9672 (2.46143 iter/s, 4.87521s/12 iters), loss = 0.0944538
I0412 14:04:43.152571 6200 solver.cpp:237] Train net output #0: loss = 0.0944538 (* 1 = 0.0944538 loss)
I0412 14:04:43.152580 6200 sgd_solver.cpp:105] Iteration 9672, lr = 0.00147211
I0412 14:04:48.087098 6200 solver.cpp:218] Iteration 9684 (2.43192 iter/s, 4.93438s/12 iters), loss = 0.140151
I0412 14:04:48.087144 6200 solver.cpp:237] Train net output #0: loss = 0.140151 (* 1 = 0.140151 loss)
I0412 14:04:48.087153 6200 sgd_solver.cpp:105] Iteration 9684, lr = 0.00146862
I0412 14:04:50.216233 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_9690.caffemodel
I0412 14:04:53.215499 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_9690.solverstate
I0412 14:04:55.529354 6200 solver.cpp:330] Iteration 9690, Testing net (#0)
I0412 14:04:55.529377 6200 net.cpp:676] Ignoring source layer train-data
I0412 14:04:56.173768 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 14:04:58.896999 6200 blocking_queue.cpp:49] Waiting for data
I0412 14:04:59.969681 6200 solver.cpp:397] Test net output #0: accuracy = 0.471201
I0412 14:04:59.969746 6200 solver.cpp:397] Test net output #1: loss = 3.04484 (* 1 = 3.04484 loss)
I0412 14:05:01.782438 6200 solver.cpp:218] Iteration 9696 (0.876238 iter/s, 13.6949s/12 iters), loss = 0.11049
I0412 14:05:01.782490 6200 solver.cpp:237] Train net output #0: loss = 0.11049 (* 1 = 0.11049 loss)
I0412 14:05:01.782500 6200 sgd_solver.cpp:105] Iteration 9696, lr = 0.00146513
I0412 14:05:06.758424 6200 solver.cpp:218] Iteration 9708 (2.41168 iter/s, 4.97578s/12 iters), loss = 0.195294
I0412 14:05:06.758472 6200 solver.cpp:237] Train net output #0: loss = 0.195294 (* 1 = 0.195294 loss)
I0412 14:05:06.758484 6200 sgd_solver.cpp:105] Iteration 9708, lr = 0.00146165
I0412 14:05:07.526515 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 14:05:12.180308 6200 solver.cpp:218] Iteration 9720 (2.21334 iter/s, 5.42168s/12 iters), loss = 0.126734
I0412 14:05:12.180353 6200 solver.cpp:237] Train net output #0: loss = 0.126734 (* 1 = 0.126734 loss)
I0412 14:05:12.180363 6200 sgd_solver.cpp:105] Iteration 9720, lr = 0.00145818
I0412 14:05:17.333657 6200 solver.cpp:218] Iteration 9732 (2.32867 iter/s, 5.15315s/12 iters), loss = 0.144519
I0412 14:05:17.333709 6200 solver.cpp:237] Train net output #0: loss = 0.144519 (* 1 = 0.144519 loss)
I0412 14:05:17.333719 6200 sgd_solver.cpp:105] Iteration 9732, lr = 0.00145472
I0412 14:05:22.275635 6200 solver.cpp:218] Iteration 9744 (2.42827 iter/s, 4.94178s/12 iters), loss = 0.173497
I0412 14:05:22.275678 6200 solver.cpp:237] Train net output #0: loss = 0.173497 (* 1 = 0.173497 loss)
I0412 14:05:22.275687 6200 sgd_solver.cpp:105] Iteration 9744, lr = 0.00145127
I0412 14:05:27.272111 6200 solver.cpp:218] Iteration 9756 (2.40179 iter/s, 4.99628s/12 iters), loss = 0.180932
I0412 14:05:27.272167 6200 solver.cpp:237] Train net output #0: loss = 0.180932 (* 1 = 0.180932 loss)
I0412 14:05:27.272181 6200 sgd_solver.cpp:105] Iteration 9756, lr = 0.00144782
I0412 14:05:32.157132 6200 solver.cpp:218] Iteration 9768 (2.45659 iter/s, 4.88482s/12 iters), loss = 0.102248
I0412 14:05:32.157243 6200 solver.cpp:237] Train net output #0: loss = 0.102248 (* 1 = 0.102248 loss)
I0412 14:05:32.157255 6200 sgd_solver.cpp:105] Iteration 9768, lr = 0.00144438
I0412 14:05:37.113306 6200 solver.cpp:218] Iteration 9780 (2.42135 iter/s, 4.95592s/12 iters), loss = 0.0943117
I0412 14:05:37.113353 6200 solver.cpp:237] Train net output #0: loss = 0.0943117 (* 1 = 0.0943117 loss)
I0412 14:05:37.113363 6200 sgd_solver.cpp:105] Iteration 9780, lr = 0.00144095
I0412 14:05:41.704293 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_9792.caffemodel
I0412 14:05:43.760645 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_9792.solverstate
I0412 14:05:45.341925 6200 solver.cpp:330] Iteration 9792, Testing net (#0)
I0412 14:05:45.341980 6200 net.cpp:676] Ignoring source layer train-data
I0412 14:05:45.947774 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 14:05:49.986027 6200 solver.cpp:397] Test net output #0: accuracy = 0.489583
I0412 14:05:49.986063 6200 solver.cpp:397] Test net output #1: loss = 2.90537 (* 1 = 2.90537 loss)
I0412 14:05:50.071131 6200 solver.cpp:218] Iteration 9792 (0.926111 iter/s, 12.9574s/12 iters), loss = 0.0672732
I0412 14:05:50.071188 6200 solver.cpp:237] Train net output #0: loss = 0.0672732 (* 1 = 0.0672732 loss)
I0412 14:05:50.071199 6200 sgd_solver.cpp:105] Iteration 9792, lr = 0.00143753
I0412 14:05:54.197448 6200 solver.cpp:218] Iteration 9804 (2.90829 iter/s, 4.12614s/12 iters), loss = 0.0824661
I0412 14:05:54.197489 6200 solver.cpp:237] Train net output #0: loss = 0.0824661 (* 1 = 0.0824661 loss)
I0412 14:05:54.197499 6200 sgd_solver.cpp:105] Iteration 9804, lr = 0.00143412
I0412 14:05:57.114765 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 14:05:59.296716 6200 solver.cpp:218] Iteration 9816 (2.35337 iter/s, 5.09908s/12 iters), loss = 0.0937282
I0412 14:05:59.296761 6200 solver.cpp:237] Train net output #0: loss = 0.0937282 (* 1 = 0.0937282 loss)
I0412 14:05:59.296770 6200 sgd_solver.cpp:105] Iteration 9816, lr = 0.00143072
I0412 14:06:04.274044 6200 solver.cpp:218] Iteration 9828 (2.41103 iter/s, 4.97713s/12 iters), loss = 0.115459
I0412 14:06:04.274174 6200 solver.cpp:237] Train net output #0: loss = 0.115459 (* 1 = 0.115459 loss)
I0412 14:06:04.274184 6200 sgd_solver.cpp:105] Iteration 9828, lr = 0.00142732
I0412 14:06:09.494117 6200 solver.cpp:218] Iteration 9840 (2.29894 iter/s, 5.21979s/12 iters), loss = 0.138441
I0412 14:06:09.494168 6200 solver.cpp:237] Train net output #0: loss = 0.138441 (* 1 = 0.138441 loss)
I0412 14:06:09.494179 6200 sgd_solver.cpp:105] Iteration 9840, lr = 0.00142393
I0412 14:06:14.544116 6200 solver.cpp:218] Iteration 9852 (2.37633 iter/s, 5.0498s/12 iters), loss = 0.143268
I0412 14:06:14.544173 6200 solver.cpp:237] Train net output #0: loss = 0.143268 (* 1 = 0.143268 loss)
I0412 14:06:14.544184 6200 sgd_solver.cpp:105] Iteration 9852, lr = 0.00142055
I0412 14:06:19.449378 6200 solver.cpp:218] Iteration 9864 (2.44645 iter/s, 4.90506s/12 iters), loss = 0.0678009
I0412 14:06:19.449422 6200 solver.cpp:237] Train net output #0: loss = 0.0678009 (* 1 = 0.0678009 loss)
I0412 14:06:19.449432 6200 sgd_solver.cpp:105] Iteration 9864, lr = 0.00141718
I0412 14:06:24.596001 6200 solver.cpp:218] Iteration 9876 (2.33172 iter/s, 5.14642s/12 iters), loss = 0.0538963
I0412 14:06:24.596062 6200 solver.cpp:237] Train net output #0: loss = 0.0538963 (* 1 = 0.0538963 loss)
I0412 14:06:24.596076 6200 sgd_solver.cpp:105] Iteration 9876, lr = 0.00141381
I0412 14:06:29.506835 6200 solver.cpp:218] Iteration 9888 (2.44368 iter/s, 4.91063s/12 iters), loss = 0.113328
I0412 14:06:29.506886 6200 solver.cpp:237] Train net output #0: loss = 0.113328 (* 1 = 0.113328 loss)
I0412 14:06:29.506894 6200 sgd_solver.cpp:105] Iteration 9888, lr = 0.00141045
I0412 14:06:31.530609 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_9894.caffemodel
I0412 14:06:35.000237 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_9894.solverstate
I0412 14:06:37.777382 6200 solver.cpp:330] Iteration 9894, Testing net (#0)
I0412 14:06:37.777407 6200 net.cpp:676] Ignoring source layer train-data
I0412 14:06:38.307510 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 14:06:42.227119 6200 solver.cpp:397] Test net output #0: accuracy = 0.488358
I0412 14:06:42.227161 6200 solver.cpp:397] Test net output #1: loss = 2.87353 (* 1 = 2.87353 loss)
I0412 14:06:44.116001 6200 solver.cpp:218] Iteration 9900 (0.821428 iter/s, 14.6087s/12 iters), loss = 0.176527
I0412 14:06:44.116053 6200 solver.cpp:237] Train net output #0: loss = 0.176527 (* 1 = 0.176527 loss)
I0412 14:06:44.116065 6200 sgd_solver.cpp:105] Iteration 9900, lr = 0.00140711
I0412 14:06:49.020817 6200 solver.cpp:218] Iteration 9912 (2.44668 iter/s, 4.90461s/12 iters), loss = 0.116025
I0412 14:06:49.020870 6200 solver.cpp:237] Train net output #0: loss = 0.116025 (* 1 = 0.116025 loss)
I0412 14:06:49.020886 6200 sgd_solver.cpp:105] Iteration 9912, lr = 0.00140377
I0412 14:06:49.130546 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 14:06:54.014367 6200 solver.cpp:218] Iteration 9924 (2.40319 iter/s, 4.99335s/12 iters), loss = 0.0758666
I0412 14:06:54.014413 6200 solver.cpp:237] Train net output #0: loss = 0.0758666 (* 1 = 0.0758666 loss)
I0412 14:06:54.014422 6200 sgd_solver.cpp:105] Iteration 9924, lr = 0.00140043
I0412 14:06:59.020177 6200 solver.cpp:218] Iteration 9936 (2.39731 iter/s, 5.00561s/12 iters), loss = 0.123978
I0412 14:06:59.020222 6200 solver.cpp:237] Train net output #0: loss = 0.123978 (* 1 = 0.123978 loss)
I0412 14:06:59.020231 6200 sgd_solver.cpp:105] Iteration 9936, lr = 0.00139711
I0412 14:07:03.873716 6200 solver.cpp:218] Iteration 9948 (2.47252 iter/s, 4.85335s/12 iters), loss = 0.269892
I0412 14:07:03.873775 6200 solver.cpp:237] Train net output #0: loss = 0.269892 (* 1 = 0.269892 loss)
I0412 14:07:03.873791 6200 sgd_solver.cpp:105] Iteration 9948, lr = 0.00139379
I0412 14:07:08.842154 6200 solver.cpp:218] Iteration 9960 (2.41535 iter/s, 4.96822s/12 iters), loss = 0.0854635
I0412 14:07:08.845082 6200 solver.cpp:237] Train net output #0: loss = 0.0854636 (* 1 = 0.0854636 loss)
I0412 14:07:08.845095 6200 sgd_solver.cpp:105] Iteration 9960, lr = 0.00139048
I0412 14:07:13.763723 6200 solver.cpp:218] Iteration 9972 (2.43976 iter/s, 4.91851s/12 iters), loss = 0.113302
I0412 14:07:13.763762 6200 solver.cpp:237] Train net output #0: loss = 0.113302 (* 1 = 0.113302 loss)
I0412 14:07:13.763772 6200 sgd_solver.cpp:105] Iteration 9972, lr = 0.00138718
I0412 14:07:18.700031 6200 solver.cpp:218] Iteration 9984 (2.43106 iter/s, 4.93612s/12 iters), loss = 0.120875
I0412 14:07:18.700073 6200 solver.cpp:237] Train net output #0: loss = 0.120875 (* 1 = 0.120875 loss)
I0412 14:07:18.700081 6200 sgd_solver.cpp:105] Iteration 9984, lr = 0.00138389
I0412 14:07:23.124397 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_9996.caffemodel
I0412 14:07:25.189535 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_9996.solverstate
I0412 14:07:26.742547 6200 solver.cpp:330] Iteration 9996, Testing net (#0)
I0412 14:07:26.742570 6200 net.cpp:676] Ignoring source layer train-data
I0412 14:07:27.254513 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 14:07:31.181373 6200 solver.cpp:397] Test net output #0: accuracy = 0.487745
I0412 14:07:31.181418 6200 solver.cpp:397] Test net output #1: loss = 2.94881 (* 1 = 2.94881 loss)
I0412 14:07:31.266638 6200 solver.cpp:218] Iteration 9996 (0.954942 iter/s, 12.5662s/12 iters), loss = 0.0505945
I0412 14:07:31.266695 6200 solver.cpp:237] Train net output #0: loss = 0.0505945 (* 1 = 0.0505945 loss)
I0412 14:07:31.266707 6200 sgd_solver.cpp:105] Iteration 9996, lr = 0.0013806
I0412 14:07:35.657061 6200 solver.cpp:218] Iteration 10008 (2.73334 iter/s, 4.39023s/12 iters), loss = 0.100613
I0412 14:07:35.657119 6200 solver.cpp:237] Train net output #0: loss = 0.100613 (* 1 = 0.100613 loss)
I0412 14:07:35.657131 6200 sgd_solver.cpp:105] Iteration 10008, lr = 0.00137732
I0412 14:07:37.845711 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 14:07:40.512691 6200 solver.cpp:218] Iteration 10020 (2.47146 iter/s, 4.85543s/12 iters), loss = 0.0330346
I0412 14:07:40.512784 6200 solver.cpp:237] Train net output #0: loss = 0.0330347 (* 1 = 0.0330347 loss)
I0412 14:07:40.512795 6200 sgd_solver.cpp:105] Iteration 10020, lr = 0.00137405
I0412 14:07:45.447053 6200 solver.cpp:218] Iteration 10032 (2.43204 iter/s, 4.93412s/12 iters), loss = 0.0248874
I0412 14:07:45.447100 6200 solver.cpp:237] Train net output #0: loss = 0.0248875 (* 1 = 0.0248875 loss)
I0412 14:07:45.447113 6200 sgd_solver.cpp:105] Iteration 10032, lr = 0.00137079
I0412 14:07:50.426726 6200 solver.cpp:218] Iteration 10044 (2.40989 iter/s, 4.97948s/12 iters), loss = 0.0370085
I0412 14:07:50.426775 6200 solver.cpp:237] Train net output #0: loss = 0.0370085 (* 1 = 0.0370085 loss)
I0412 14:07:50.426784 6200 sgd_solver.cpp:105] Iteration 10044, lr = 0.00136754
I0412 14:07:55.309130 6200 solver.cpp:218] Iteration 10056 (2.45791 iter/s, 4.8822s/12 iters), loss = 0.113688
I0412 14:07:55.309181 6200 solver.cpp:237] Train net output #0: loss = 0.113688 (* 1 = 0.113688 loss)
I0412 14:07:55.309192 6200 sgd_solver.cpp:105] Iteration 10056, lr = 0.00136429
I0412 14:08:00.193807 6200 solver.cpp:218] Iteration 10068 (2.45676 iter/s, 4.88448s/12 iters), loss = 0.0487831
I0412 14:08:00.193854 6200 solver.cpp:237] Train net output #0: loss = 0.0487831 (* 1 = 0.0487831 loss)
I0412 14:08:00.193863 6200 sgd_solver.cpp:105] Iteration 10068, lr = 0.00136105
I0412 14:08:05.247815 6200 solver.cpp:218] Iteration 10080 (2.37445 iter/s, 5.05381s/12 iters), loss = 0.089714
I0412 14:08:05.247864 6200 solver.cpp:237] Train net output #0: loss = 0.0897141 (* 1 = 0.0897141 loss)
I0412 14:08:05.247875 6200 sgd_solver.cpp:105] Iteration 10080, lr = 0.00135782
I0412 14:08:10.495405 6200 solver.cpp:218] Iteration 10092 (2.28685 iter/s, 5.24738s/12 iters), loss = 0.0215895
I0412 14:08:10.495457 6200 solver.cpp:237] Train net output #0: loss = 0.0215896 (* 1 = 0.0215896 loss)
I0412 14:08:10.495469 6200 sgd_solver.cpp:105] Iteration 10092, lr = 0.0013546
I0412 14:08:12.533725 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_10098.caffemodel
I0412 14:08:14.640017 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_10098.solverstate
I0412 14:08:16.191380 6200 solver.cpp:330] Iteration 10098, Testing net (#0)
I0412 14:08:16.191402 6200 net.cpp:676] Ignoring source layer train-data
I0412 14:08:16.622699 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 14:08:20.632416 6200 solver.cpp:397] Test net output #0: accuracy = 0.489583
I0412 14:08:20.632444 6200 solver.cpp:397] Test net output #1: loss = 2.98303 (* 1 = 2.98303 loss)
I0412 14:08:22.607854 6200 solver.cpp:218] Iteration 10104 (0.990749 iter/s, 12.1121s/12 iters), loss = 0.0579605
I0412 14:08:22.607904 6200 solver.cpp:237] Train net output #0: loss = 0.0579605 (* 1 = 0.0579605 loss)
I0412 14:08:22.607914 6200 sgd_solver.cpp:105] Iteration 10104, lr = 0.00135138
I0412 14:08:27.044301 6204 data_layer.cpp:73] Restarting data prefetching from start.
I0412 14:08:27.669260 6200 solver.cpp:218] Iteration 10116 (2.37098 iter/s, 5.0612s/12 iters), loss = 0.0714781
I0412 14:08:27.669308 6200 solver.cpp:237] Train net output #0: loss = 0.0714782 (* 1 = 0.0714782 loss)
I0412 14:08:27.669319 6200 sgd_solver.cpp:105] Iteration 10116, lr = 0.00134817
I0412 14:08:32.683054 6200 solver.cpp:218] Iteration 10128 (2.39349 iter/s, 5.0136s/12 iters), loss = 0.111822
I0412 14:08:32.683096 6200 solver.cpp:237] Train net output #0: loss = 0.111822 (* 1 = 0.111822 loss)
I0412 14:08:32.683105 6200 sgd_solver.cpp:105] Iteration 10128, lr = 0.00134497
I0412 14:08:37.656711 6200 solver.cpp:218] Iteration 10140 (2.4128 iter/s, 4.97347s/12 iters), loss = 0.0330273
I0412 14:08:37.656766 6200 solver.cpp:237] Train net output #0: loss = 0.0330274 (* 1 = 0.0330274 loss)
I0412 14:08:37.656778 6200 sgd_solver.cpp:105] Iteration 10140, lr = 0.00134178
I0412 14:08:42.658411 6200 solver.cpp:218] Iteration 10152 (2.39928 iter/s, 5.00149s/12 iters), loss = 0.0435296
I0412 14:08:42.658540 6200 solver.cpp:237] Train net output #0: loss = 0.0435297 (* 1 = 0.0435297 loss)
I0412 14:08:42.658553 6200 sgd_solver.cpp:105] Iteration 10152, lr = 0.00133859
I0412 14:08:47.503547 6200 solver.cpp:218] Iteration 10164 (2.47685 iter/s, 4.84486s/12 iters), loss = 0.0480556
I0412 14:08:47.503607 6200 solver.cpp:237] Train net output #0: loss = 0.0480556 (* 1 = 0.0480556 loss)
I0412 14:08:47.503619 6200 sgd_solver.cpp:105] Iteration 10164, lr = 0.00133541
I0412 14:08:52.440933 6200 solver.cpp:218] Iteration 10176 (2.43054 iter/s, 4.93718s/12 iters), loss = 0.040205
I0412 14:08:52.440977 6200 solver.cpp:237] Train net output #0: loss = 0.0402051 (* 1 = 0.0402051 loss)
I0412 14:08:52.440986 6200 sgd_solver.cpp:105] Iteration 10176, lr = 0.00133224
I0412 14:08:57.755086 6200 solver.cpp:218] Iteration 10188 (2.25821 iter/s, 5.31395s/12 iters), loss = 0.0797417
I0412 14:08:57.755136 6200 solver.cpp:237] Train net output #0: loss = 0.0797418 (* 1 = 0.0797418 loss)
I0412 14:08:57.755146 6200 sgd_solver.cpp:105] Iteration 10188, lr = 0.00132908
I0412 14:09:02.273619 6200 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_10200.caffemodel
I0412 14:09:04.874008 6200 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_10200.solverstate
I0412 14:09:09.789876 6200 solver.cpp:310] Iteration 10200, loss = 0.0827337
I0412 14:09:09.789903 6200 solver.cpp:330] Iteration 10200, Testing net (#0)
I0412 14:09:09.789909 6200 net.cpp:676] Ignoring source layer train-data
I0412 14:09:10.161761 6205 data_layer.cpp:73] Restarting data prefetching from start.
I0412 14:09:14.192184 6200 solver.cpp:397] Test net output #0: accuracy = 0.492034
I0412 14:09:14.193526 6200 solver.cpp:397] Test net output #1: loss = 2.99996 (* 1 = 2.99996 loss)
I0412 14:09:14.193539 6200 solver.cpp:315] Optimization Done.
I0412 14:09:14.193547 6200 caffe.cpp:259] Optimization Done.