DIGITS-CNN/cars/lr-investigations/step-down/1e-2/33_0.33/caffe_output.log

4568 lines
357 KiB
Plaintext
Raw Permalink Normal View History

2021-04-29 00:53:46 +01:00
I0428 20:24:36.731207 12706 upgrade_proto.cpp:1082] Attempting to upgrade input file specified using deprecated 'solver_type' field (enum)': /mnt/bigdisk/DIGITS-MAN-3/digits/jobs/20210428-200748-c36d/solver.prototxt
I0428 20:24:36.731359 12706 upgrade_proto.cpp:1089] Successfully upgraded file specified using deprecated 'solver_type' field (enum) to 'type' field (string).
W0428 20:24:36.731364 12706 upgrade_proto.cpp:1091] Note that future Caffe releases will only support 'type' field (string) for a solver's type.
I0428 20:24:36.731420 12706 caffe.cpp:218] Using GPUs 3
I0428 20:24:36.797277 12706 caffe.cpp:223] GPU 3: GeForce GTX 1080 Ti
I0428 20:24:37.040671 12706 solver.cpp:44] Initializing solver from parameters:
test_iter: 51
test_interval: 102
base_lr: 0.01
display: 12
max_iter: 10200
lr_policy: "step"
gamma: 0.33
momentum: 0.9
weight_decay: 0.0001
stepsize: 3366
snapshot: 102
snapshot_prefix: "snapshot"
solver_mode: GPU
device_id: 3
net: "train_val.prototxt"
train_state {
level: 0
stage: ""
}
type: "SGD"
I0428 20:24:37.041440 12706 solver.cpp:87] Creating training net from net file: train_val.prototxt
I0428 20:24:37.041994 12706 net.cpp:294] The NetState phase (0) differed from the phase (1) specified by a rule in layer val-data
I0428 20:24:37.042009 12706 net.cpp:294] The NetState phase (0) differed from the phase (1) specified by a rule in layer accuracy
I0428 20:24:37.042138 12706 net.cpp:51] Initializing net from parameters:
state {
phase: TRAIN
level: 0
stage: ""
}
layer {
name: "train-data"
type: "Data"
top: "data"
top: "label"
include {
phase: TRAIN
}
transform_param {
mirror: true
crop_size: 227
mean_file: "/mnt/bigdisk/DIGITS-MAN-3/digits/jobs/20210421-230320-902c/mean.binaryproto"
}
data_param {
source: "/mnt/bigdisk/DIGITS-MAN-3/digits/jobs/20210421-230320-902c/train_db"
batch_size: 128
backend: LMDB
}
}
layer {
name: "conv1"
type: "Convolution"
bottom: "data"
top: "conv1"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 96
kernel_size: 11
stride: 4
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "relu1"
type: "ReLU"
bottom: "conv1"
top: "conv1"
}
layer {
name: "norm1"
type: "LRN"
bottom: "conv1"
top: "norm1"
lrn_param {
local_size: 5
alpha: 0.0001
beta: 0.75
}
}
layer {
name: "pool1"
type: "Pooling"
bottom: "norm1"
top: "pool1"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layer {
name: "conv2"
type: "Convolution"
bottom: "pool1"
top: "conv2"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 256
pad: 2
kernel_size: 5
group: 2
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layer {
name: "relu2"
type: "ReLU"
bottom: "conv2"
top: "conv2"
}
layer {
name: "norm2"
type: "LRN"
bottom: "conv2"
top: "norm2"
lrn_param {
local_size: 5
alpha: 0.0001
beta: 0.75
}
}
layer {
name: "pool2"
type: "Pooling"
bottom: "norm2"
top: "pool2"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layer {
name: "conv3"
type: "Convolution"
bottom: "pool2"
top: "conv3"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 384
pad: 1
kernel_size: 3
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "relu3"
type: "ReLU"
bottom: "conv3"
top: "conv3"
}
layer {
name: "conv4"
type: "Convolution"
bottom: "conv3"
top: "conv4"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 384
pad: 1
kernel_size: 3
group: 2
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layer {
name: "relu4"
type: "ReLU"
bottom: "conv4"
top: "conv4"
}
layer {
name: "conv5"
type: "Convolution"
bottom: "conv4"
top: "conv5"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 256
pad: 1
kernel_size: 3
group: 2
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layer {
name: "relu5"
type: "ReLU"
bottom: "conv5"
top: "conv5"
}
layer {
name: "pool5"
type: "Pooling"
bottom: "conv5"
top: "pool5"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layer {
name: "fc6"
type: "InnerProduct"
bottom: "pool5"
top: "fc6"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 4096
weight_filler {
type: "gaussian"
std: 0.005
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layer {
name: "relu6"
type: "ReLU"
bottom: "fc6"
top: "fc6"
}
layer {
name: "drop6"
type: "Dropout"
bottom: "fc6"
top: "fc6"
dropout_param {
dropout_ratio: 0.5
}
}
layer {
name: "fc7"
type: "InnerProduct"
bottom: "fc6"
top: "fc7"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 4096
weight_filler {
type: "gaussian"
std: 0.005
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layer {
name: "relu7"
type: "ReLU"
bottom: "fc7"
top: "fc7"
}
layer {
name: "drop7"
type: "Dropout"
bottom: "fc7"
top: "fc7"
dropout_param {
dropout_ratio: 0.5
}
}
layer {
name: "fc8"
type: "InnerProduct"
bottom: "fc7"
top: "fc8"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 196
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "fc8"
bottom: "label"
top: "loss"
}
I0428 20:24:37.042223 12706 layer_factory.hpp:77] Creating layer train-data
I0428 20:24:37.050719 12706 db_lmdb.cpp:35] Opened lmdb /mnt/bigdisk/DIGITS-MAN-3/digits/jobs/20210421-230320-902c/train_db
I0428 20:24:37.050891 12706 net.cpp:84] Creating Layer train-data
I0428 20:24:37.050902 12706 net.cpp:380] train-data -> data
I0428 20:24:37.050920 12706 net.cpp:380] train-data -> label
I0428 20:24:37.050931 12706 data_transformer.cpp:25] Loading mean file from: /mnt/bigdisk/DIGITS-MAN-3/digits/jobs/20210421-230320-902c/mean.binaryproto
I0428 20:24:37.055986 12706 data_layer.cpp:45] output data size: 128,3,227,227
I0428 20:24:37.197850 12706 net.cpp:122] Setting up train-data
I0428 20:24:37.197875 12706 net.cpp:129] Top shape: 128 3 227 227 (19787136)
I0428 20:24:37.197881 12706 net.cpp:129] Top shape: 128 (128)
I0428 20:24:37.197886 12706 net.cpp:137] Memory required for data: 79149056
I0428 20:24:37.197896 12706 layer_factory.hpp:77] Creating layer conv1
I0428 20:24:37.197918 12706 net.cpp:84] Creating Layer conv1
I0428 20:24:37.197924 12706 net.cpp:406] conv1 <- data
I0428 20:24:37.197937 12706 net.cpp:380] conv1 -> conv1
I0428 20:24:37.791007 12706 net.cpp:122] Setting up conv1
I0428 20:24:37.791029 12706 net.cpp:129] Top shape: 128 96 55 55 (37171200)
I0428 20:24:37.791035 12706 net.cpp:137] Memory required for data: 227833856
I0428 20:24:37.791055 12706 layer_factory.hpp:77] Creating layer relu1
I0428 20:24:37.791065 12706 net.cpp:84] Creating Layer relu1
I0428 20:24:37.791069 12706 net.cpp:406] relu1 <- conv1
I0428 20:24:37.791075 12706 net.cpp:367] relu1 -> conv1 (in-place)
I0428 20:24:37.791368 12706 net.cpp:122] Setting up relu1
I0428 20:24:37.791376 12706 net.cpp:129] Top shape: 128 96 55 55 (37171200)
I0428 20:24:37.791379 12706 net.cpp:137] Memory required for data: 376518656
I0428 20:24:37.791383 12706 layer_factory.hpp:77] Creating layer norm1
I0428 20:24:37.791393 12706 net.cpp:84] Creating Layer norm1
I0428 20:24:37.791416 12706 net.cpp:406] norm1 <- conv1
I0428 20:24:37.791422 12706 net.cpp:380] norm1 -> norm1
I0428 20:24:37.791867 12706 net.cpp:122] Setting up norm1
I0428 20:24:37.791877 12706 net.cpp:129] Top shape: 128 96 55 55 (37171200)
I0428 20:24:37.791882 12706 net.cpp:137] Memory required for data: 525203456
I0428 20:24:37.791887 12706 layer_factory.hpp:77] Creating layer pool1
I0428 20:24:37.791896 12706 net.cpp:84] Creating Layer pool1
I0428 20:24:37.791900 12706 net.cpp:406] pool1 <- norm1
I0428 20:24:37.791906 12706 net.cpp:380] pool1 -> pool1
I0428 20:24:37.791944 12706 net.cpp:122] Setting up pool1
I0428 20:24:37.791950 12706 net.cpp:129] Top shape: 128 96 27 27 (8957952)
I0428 20:24:37.791954 12706 net.cpp:137] Memory required for data: 561035264
I0428 20:24:37.791958 12706 layer_factory.hpp:77] Creating layer conv2
I0428 20:24:37.791970 12706 net.cpp:84] Creating Layer conv2
I0428 20:24:37.791973 12706 net.cpp:406] conv2 <- pool1
I0428 20:24:37.791980 12706 net.cpp:380] conv2 -> conv2
I0428 20:24:37.799386 12706 net.cpp:122] Setting up conv2
I0428 20:24:37.799403 12706 net.cpp:129] Top shape: 128 256 27 27 (23887872)
I0428 20:24:37.799408 12706 net.cpp:137] Memory required for data: 656586752
I0428 20:24:37.799422 12706 layer_factory.hpp:77] Creating layer relu2
I0428 20:24:37.799430 12706 net.cpp:84] Creating Layer relu2
I0428 20:24:37.799435 12706 net.cpp:406] relu2 <- conv2
I0428 20:24:37.799443 12706 net.cpp:367] relu2 -> conv2 (in-place)
I0428 20:24:37.799870 12706 net.cpp:122] Setting up relu2
I0428 20:24:37.799880 12706 net.cpp:129] Top shape: 128 256 27 27 (23887872)
I0428 20:24:37.799885 12706 net.cpp:137] Memory required for data: 752138240
I0428 20:24:37.799891 12706 layer_factory.hpp:77] Creating layer norm2
I0428 20:24:37.799901 12706 net.cpp:84] Creating Layer norm2
I0428 20:24:37.799904 12706 net.cpp:406] norm2 <- conv2
I0428 20:24:37.799912 12706 net.cpp:380] norm2 -> norm2
I0428 20:24:37.800202 12706 net.cpp:122] Setting up norm2
I0428 20:24:37.800212 12706 net.cpp:129] Top shape: 128 256 27 27 (23887872)
I0428 20:24:37.800216 12706 net.cpp:137] Memory required for data: 847689728
I0428 20:24:37.800222 12706 layer_factory.hpp:77] Creating layer pool2
I0428 20:24:37.800231 12706 net.cpp:84] Creating Layer pool2
I0428 20:24:37.800235 12706 net.cpp:406] pool2 <- norm2
I0428 20:24:37.800243 12706 net.cpp:380] pool2 -> pool2
I0428 20:24:37.800271 12706 net.cpp:122] Setting up pool2
I0428 20:24:37.800277 12706 net.cpp:129] Top shape: 128 256 13 13 (5537792)
I0428 20:24:37.800282 12706 net.cpp:137] Memory required for data: 869840896
I0428 20:24:37.800287 12706 layer_factory.hpp:77] Creating layer conv3
I0428 20:24:37.800297 12706 net.cpp:84] Creating Layer conv3
I0428 20:24:37.800300 12706 net.cpp:406] conv3 <- pool2
I0428 20:24:37.800307 12706 net.cpp:380] conv3 -> conv3
I0428 20:24:37.810350 12706 net.cpp:122] Setting up conv3
I0428 20:24:37.810369 12706 net.cpp:129] Top shape: 128 384 13 13 (8306688)
I0428 20:24:37.810372 12706 net.cpp:137] Memory required for data: 903067648
I0428 20:24:37.810385 12706 layer_factory.hpp:77] Creating layer relu3
I0428 20:24:37.810393 12706 net.cpp:84] Creating Layer relu3
I0428 20:24:37.810397 12706 net.cpp:406] relu3 <- conv3
I0428 20:24:37.810405 12706 net.cpp:367] relu3 -> conv3 (in-place)
I0428 20:24:37.810899 12706 net.cpp:122] Setting up relu3
I0428 20:24:37.810909 12706 net.cpp:129] Top shape: 128 384 13 13 (8306688)
I0428 20:24:37.810914 12706 net.cpp:137] Memory required for data: 936294400
I0428 20:24:37.810920 12706 layer_factory.hpp:77] Creating layer conv4
I0428 20:24:37.810932 12706 net.cpp:84] Creating Layer conv4
I0428 20:24:37.810936 12706 net.cpp:406] conv4 <- conv3
I0428 20:24:37.810945 12706 net.cpp:380] conv4 -> conv4
I0428 20:24:37.833113 12706 net.cpp:122] Setting up conv4
I0428 20:24:37.833133 12706 net.cpp:129] Top shape: 128 384 13 13 (8306688)
I0428 20:24:37.833137 12706 net.cpp:137] Memory required for data: 969521152
I0428 20:24:37.833148 12706 layer_factory.hpp:77] Creating layer relu4
I0428 20:24:37.833156 12706 net.cpp:84] Creating Layer relu4
I0428 20:24:37.833180 12706 net.cpp:406] relu4 <- conv4
I0428 20:24:37.833189 12706 net.cpp:367] relu4 -> conv4 (in-place)
I0428 20:24:37.833544 12706 net.cpp:122] Setting up relu4
I0428 20:24:37.833554 12706 net.cpp:129] Top shape: 128 384 13 13 (8306688)
I0428 20:24:37.833559 12706 net.cpp:137] Memory required for data: 1002747904
I0428 20:24:37.833564 12706 layer_factory.hpp:77] Creating layer conv5
I0428 20:24:37.833575 12706 net.cpp:84] Creating Layer conv5
I0428 20:24:37.833580 12706 net.cpp:406] conv5 <- conv4
I0428 20:24:37.833586 12706 net.cpp:380] conv5 -> conv5
I0428 20:24:37.854825 12706 net.cpp:122] Setting up conv5
I0428 20:24:37.854847 12706 net.cpp:129] Top shape: 128 256 13 13 (5537792)
I0428 20:24:37.854853 12706 net.cpp:137] Memory required for data: 1024899072
I0428 20:24:37.854869 12706 layer_factory.hpp:77] Creating layer relu5
I0428 20:24:37.854879 12706 net.cpp:84] Creating Layer relu5
I0428 20:24:37.854887 12706 net.cpp:406] relu5 <- conv5
I0428 20:24:37.854897 12706 net.cpp:367] relu5 -> conv5 (in-place)
I0428 20:24:37.855437 12706 net.cpp:122] Setting up relu5
I0428 20:24:37.855448 12706 net.cpp:129] Top shape: 128 256 13 13 (5537792)
I0428 20:24:37.855455 12706 net.cpp:137] Memory required for data: 1047050240
I0428 20:24:37.855461 12706 layer_factory.hpp:77] Creating layer pool5
I0428 20:24:37.855469 12706 net.cpp:84] Creating Layer pool5
I0428 20:24:37.855474 12706 net.cpp:406] pool5 <- conv5
I0428 20:24:37.855480 12706 net.cpp:380] pool5 -> pool5
I0428 20:24:37.855521 12706 net.cpp:122] Setting up pool5
I0428 20:24:37.855528 12706 net.cpp:129] Top shape: 128 256 6 6 (1179648)
I0428 20:24:37.855533 12706 net.cpp:137] Memory required for data: 1051768832
I0428 20:24:37.855538 12706 layer_factory.hpp:77] Creating layer fc6
I0428 20:24:37.855548 12706 net.cpp:84] Creating Layer fc6
I0428 20:24:37.855552 12706 net.cpp:406] fc6 <- pool5
I0428 20:24:37.855559 12706 net.cpp:380] fc6 -> fc6
I0428 20:24:38.210518 12706 net.cpp:122] Setting up fc6
I0428 20:24:38.210541 12706 net.cpp:129] Top shape: 128 4096 (524288)
I0428 20:24:38.210544 12706 net.cpp:137] Memory required for data: 1053865984
I0428 20:24:38.210554 12706 layer_factory.hpp:77] Creating layer relu6
I0428 20:24:38.210563 12706 net.cpp:84] Creating Layer relu6
I0428 20:24:38.210568 12706 net.cpp:406] relu6 <- fc6
I0428 20:24:38.210577 12706 net.cpp:367] relu6 -> fc6 (in-place)
I0428 20:24:38.211217 12706 net.cpp:122] Setting up relu6
I0428 20:24:38.211228 12706 net.cpp:129] Top shape: 128 4096 (524288)
I0428 20:24:38.211232 12706 net.cpp:137] Memory required for data: 1055963136
I0428 20:24:38.211237 12706 layer_factory.hpp:77] Creating layer drop6
I0428 20:24:38.211243 12706 net.cpp:84] Creating Layer drop6
I0428 20:24:38.211248 12706 net.cpp:406] drop6 <- fc6
I0428 20:24:38.211256 12706 net.cpp:367] drop6 -> fc6 (in-place)
I0428 20:24:38.211284 12706 net.cpp:122] Setting up drop6
I0428 20:24:38.211292 12706 net.cpp:129] Top shape: 128 4096 (524288)
I0428 20:24:38.211295 12706 net.cpp:137] Memory required for data: 1058060288
I0428 20:24:38.211298 12706 layer_factory.hpp:77] Creating layer fc7
I0428 20:24:38.211306 12706 net.cpp:84] Creating Layer fc7
I0428 20:24:38.211309 12706 net.cpp:406] fc7 <- fc6
I0428 20:24:38.211316 12706 net.cpp:380] fc7 -> fc7
I0428 20:24:38.369305 12706 net.cpp:122] Setting up fc7
I0428 20:24:38.369328 12706 net.cpp:129] Top shape: 128 4096 (524288)
I0428 20:24:38.369331 12706 net.cpp:137] Memory required for data: 1060157440
I0428 20:24:38.369340 12706 layer_factory.hpp:77] Creating layer relu7
I0428 20:24:38.369349 12706 net.cpp:84] Creating Layer relu7
I0428 20:24:38.369354 12706 net.cpp:406] relu7 <- fc7
I0428 20:24:38.369362 12706 net.cpp:367] relu7 -> fc7 (in-place)
I0428 20:24:38.375500 12706 net.cpp:122] Setting up relu7
I0428 20:24:38.375514 12706 net.cpp:129] Top shape: 128 4096 (524288)
I0428 20:24:38.375517 12706 net.cpp:137] Memory required for data: 1062254592
I0428 20:24:38.375522 12706 layer_factory.hpp:77] Creating layer drop7
I0428 20:24:38.375530 12706 net.cpp:84] Creating Layer drop7
I0428 20:24:38.375552 12706 net.cpp:406] drop7 <- fc7
I0428 20:24:38.375560 12706 net.cpp:367] drop7 -> fc7 (in-place)
I0428 20:24:38.375586 12706 net.cpp:122] Setting up drop7
I0428 20:24:38.375592 12706 net.cpp:129] Top shape: 128 4096 (524288)
I0428 20:24:38.375595 12706 net.cpp:137] Memory required for data: 1064351744
I0428 20:24:38.375599 12706 layer_factory.hpp:77] Creating layer fc8
I0428 20:24:38.375608 12706 net.cpp:84] Creating Layer fc8
I0428 20:24:38.375612 12706 net.cpp:406] fc8 <- fc7
I0428 20:24:38.375617 12706 net.cpp:380] fc8 -> fc8
I0428 20:24:38.383708 12706 net.cpp:122] Setting up fc8
I0428 20:24:38.383725 12706 net.cpp:129] Top shape: 128 196 (25088)
I0428 20:24:38.383729 12706 net.cpp:137] Memory required for data: 1064452096
I0428 20:24:38.383738 12706 layer_factory.hpp:77] Creating layer loss
I0428 20:24:38.383746 12706 net.cpp:84] Creating Layer loss
I0428 20:24:38.383751 12706 net.cpp:406] loss <- fc8
I0428 20:24:38.383757 12706 net.cpp:406] loss <- label
I0428 20:24:38.383765 12706 net.cpp:380] loss -> loss
I0428 20:24:38.383776 12706 layer_factory.hpp:77] Creating layer loss
I0428 20:24:38.384476 12706 net.cpp:122] Setting up loss
I0428 20:24:38.384508 12706 net.cpp:129] Top shape: (1)
I0428 20:24:38.384516 12706 net.cpp:132] with loss weight 1
I0428 20:24:38.384534 12706 net.cpp:137] Memory required for data: 1064452100
I0428 20:24:38.384538 12706 net.cpp:198] loss needs backward computation.
I0428 20:24:38.384546 12706 net.cpp:198] fc8 needs backward computation.
I0428 20:24:38.384550 12706 net.cpp:198] drop7 needs backward computation.
I0428 20:24:38.384554 12706 net.cpp:198] relu7 needs backward computation.
I0428 20:24:38.384558 12706 net.cpp:198] fc7 needs backward computation.
I0428 20:24:38.384563 12706 net.cpp:198] drop6 needs backward computation.
I0428 20:24:38.384567 12706 net.cpp:198] relu6 needs backward computation.
I0428 20:24:38.384572 12706 net.cpp:198] fc6 needs backward computation.
I0428 20:24:38.384577 12706 net.cpp:198] pool5 needs backward computation.
I0428 20:24:38.384580 12706 net.cpp:198] relu5 needs backward computation.
I0428 20:24:38.384585 12706 net.cpp:198] conv5 needs backward computation.
I0428 20:24:38.384589 12706 net.cpp:198] relu4 needs backward computation.
I0428 20:24:38.384593 12706 net.cpp:198] conv4 needs backward computation.
I0428 20:24:38.384598 12706 net.cpp:198] relu3 needs backward computation.
I0428 20:24:38.384603 12706 net.cpp:198] conv3 needs backward computation.
I0428 20:24:38.384606 12706 net.cpp:198] pool2 needs backward computation.
I0428 20:24:38.384610 12706 net.cpp:198] norm2 needs backward computation.
I0428 20:24:38.384614 12706 net.cpp:198] relu2 needs backward computation.
I0428 20:24:38.384619 12706 net.cpp:198] conv2 needs backward computation.
I0428 20:24:38.384624 12706 net.cpp:198] pool1 needs backward computation.
I0428 20:24:38.384627 12706 net.cpp:198] norm1 needs backward computation.
I0428 20:24:38.384631 12706 net.cpp:198] relu1 needs backward computation.
I0428 20:24:38.384636 12706 net.cpp:198] conv1 needs backward computation.
I0428 20:24:38.384641 12706 net.cpp:200] train-data does not need backward computation.
I0428 20:24:38.384645 12706 net.cpp:242] This network produces output loss
I0428 20:24:38.384660 12706 net.cpp:255] Network initialization done.
I0428 20:24:38.385149 12706 solver.cpp:172] Creating test net (#0) specified by net file: train_val.prototxt
I0428 20:24:38.385179 12706 net.cpp:294] The NetState phase (1) differed from the phase (0) specified by a rule in layer train-data
I0428 20:24:38.385319 12706 net.cpp:51] Initializing net from parameters:
state {
phase: TEST
}
layer {
name: "val-data"
type: "Data"
top: "data"
top: "label"
include {
phase: TEST
}
transform_param {
crop_size: 227
mean_file: "/mnt/bigdisk/DIGITS-MAN-3/digits/jobs/20210421-230320-902c/mean.binaryproto"
}
data_param {
source: "/mnt/bigdisk/DIGITS-MAN-3/digits/jobs/20210421-230320-902c/val_db"
batch_size: 32
backend: LMDB
}
}
layer {
name: "conv1"
type: "Convolution"
bottom: "data"
top: "conv1"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 96
kernel_size: 11
stride: 4
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "relu1"
type: "ReLU"
bottom: "conv1"
top: "conv1"
}
layer {
name: "norm1"
type: "LRN"
bottom: "conv1"
top: "norm1"
lrn_param {
local_size: 5
alpha: 0.0001
beta: 0.75
}
}
layer {
name: "pool1"
type: "Pooling"
bottom: "norm1"
top: "pool1"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layer {
name: "conv2"
type: "Convolution"
bottom: "pool1"
top: "conv2"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 256
pad: 2
kernel_size: 5
group: 2
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layer {
name: "relu2"
type: "ReLU"
bottom: "conv2"
top: "conv2"
}
layer {
name: "norm2"
type: "LRN"
bottom: "conv2"
top: "norm2"
lrn_param {
local_size: 5
alpha: 0.0001
beta: 0.75
}
}
layer {
name: "pool2"
type: "Pooling"
bottom: "norm2"
top: "pool2"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layer {
name: "conv3"
type: "Convolution"
bottom: "pool2"
top: "conv3"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 384
pad: 1
kernel_size: 3
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "relu3"
type: "ReLU"
bottom: "conv3"
top: "conv3"
}
layer {
name: "conv4"
type: "Convolution"
bottom: "conv3"
top: "conv4"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 384
pad: 1
kernel_size: 3
group: 2
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layer {
name: "relu4"
type: "ReLU"
bottom: "conv4"
top: "conv4"
}
layer {
name: "conv5"
type: "Convolution"
bottom: "conv4"
top: "conv5"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 256
pad: 1
kernel_size: 3
group: 2
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layer {
name: "relu5"
type: "ReLU"
bottom: "conv5"
top: "conv5"
}
layer {
name: "pool5"
type: "Pooling"
bottom: "conv5"
top: "pool5"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layer {
name: "fc6"
type: "InnerProduct"
bottom: "pool5"
top: "fc6"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 4096
weight_filler {
type: "gaussian"
std: 0.005
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layer {
name: "relu6"
type: "ReLU"
bottom: "fc6"
top: "fc6"
}
layer {
name: "drop6"
type: "Dropout"
bottom: "fc6"
top: "fc6"
dropout_param {
dropout_ratio: 0.5
}
}
layer {
name: "fc7"
type: "InnerProduct"
bottom: "fc6"
top: "fc7"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 4096
weight_filler {
type: "gaussian"
std: 0.005
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layer {
name: "relu7"
type: "ReLU"
bottom: "fc7"
top: "fc7"
}
layer {
name: "drop7"
type: "Dropout"
bottom: "fc7"
top: "fc7"
dropout_param {
dropout_ratio: 0.5
}
}
layer {
name: "fc8"
type: "InnerProduct"
bottom: "fc7"
top: "fc8"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 196
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "accuracy"
type: "Accuracy"
bottom: "fc8"
bottom: "label"
top: "accuracy"
include {
phase: TEST
}
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "fc8"
bottom: "label"
top: "loss"
}
I0428 20:24:38.385423 12706 layer_factory.hpp:77] Creating layer val-data
I0428 20:24:38.403707 12706 db_lmdb.cpp:35] Opened lmdb /mnt/bigdisk/DIGITS-MAN-3/digits/jobs/20210421-230320-902c/val_db
I0428 20:24:38.403878 12706 net.cpp:84] Creating Layer val-data
I0428 20:24:38.403892 12706 net.cpp:380] val-data -> data
I0428 20:24:38.403901 12706 net.cpp:380] val-data -> label
I0428 20:24:38.403909 12706 data_transformer.cpp:25] Loading mean file from: /mnt/bigdisk/DIGITS-MAN-3/digits/jobs/20210421-230320-902c/mean.binaryproto
I0428 20:24:38.407388 12706 data_layer.cpp:45] output data size: 32,3,227,227
I0428 20:24:38.440773 12706 net.cpp:122] Setting up val-data
I0428 20:24:38.440791 12706 net.cpp:129] Top shape: 32 3 227 227 (4946784)
I0428 20:24:38.440796 12706 net.cpp:129] Top shape: 32 (32)
I0428 20:24:38.440800 12706 net.cpp:137] Memory required for data: 19787264
I0428 20:24:38.440806 12706 layer_factory.hpp:77] Creating layer label_val-data_1_split
I0428 20:24:38.440819 12706 net.cpp:84] Creating Layer label_val-data_1_split
I0428 20:24:38.440824 12706 net.cpp:406] label_val-data_1_split <- label
I0428 20:24:38.440830 12706 net.cpp:380] label_val-data_1_split -> label_val-data_1_split_0
I0428 20:24:38.440840 12706 net.cpp:380] label_val-data_1_split -> label_val-data_1_split_1
I0428 20:24:38.440891 12706 net.cpp:122] Setting up label_val-data_1_split
I0428 20:24:38.440896 12706 net.cpp:129] Top shape: 32 (32)
I0428 20:24:38.440901 12706 net.cpp:129] Top shape: 32 (32)
I0428 20:24:38.440903 12706 net.cpp:137] Memory required for data: 19787520
I0428 20:24:38.440907 12706 layer_factory.hpp:77] Creating layer conv1
I0428 20:24:38.440918 12706 net.cpp:84] Creating Layer conv1
I0428 20:24:38.440922 12706 net.cpp:406] conv1 <- data
I0428 20:24:38.440928 12706 net.cpp:380] conv1 -> conv1
I0428 20:24:38.442782 12706 net.cpp:122] Setting up conv1
I0428 20:24:38.442795 12706 net.cpp:129] Top shape: 32 96 55 55 (9292800)
I0428 20:24:38.442798 12706 net.cpp:137] Memory required for data: 56958720
I0428 20:24:38.442809 12706 layer_factory.hpp:77] Creating layer relu1
I0428 20:24:38.442816 12706 net.cpp:84] Creating Layer relu1
I0428 20:24:38.442819 12706 net.cpp:406] relu1 <- conv1
I0428 20:24:38.442824 12706 net.cpp:367] relu1 -> conv1 (in-place)
I0428 20:24:38.443112 12706 net.cpp:122] Setting up relu1
I0428 20:24:38.443121 12706 net.cpp:129] Top shape: 32 96 55 55 (9292800)
I0428 20:24:38.443126 12706 net.cpp:137] Memory required for data: 94129920
I0428 20:24:38.443130 12706 layer_factory.hpp:77] Creating layer norm1
I0428 20:24:38.443140 12706 net.cpp:84] Creating Layer norm1
I0428 20:24:38.443145 12706 net.cpp:406] norm1 <- conv1
I0428 20:24:38.443150 12706 net.cpp:380] norm1 -> norm1
I0428 20:24:38.443612 12706 net.cpp:122] Setting up norm1
I0428 20:24:38.443622 12706 net.cpp:129] Top shape: 32 96 55 55 (9292800)
I0428 20:24:38.443626 12706 net.cpp:137] Memory required for data: 131301120
I0428 20:24:38.443631 12706 layer_factory.hpp:77] Creating layer pool1
I0428 20:24:38.443639 12706 net.cpp:84] Creating Layer pool1
I0428 20:24:38.443642 12706 net.cpp:406] pool1 <- norm1
I0428 20:24:38.443648 12706 net.cpp:380] pool1 -> pool1
I0428 20:24:38.443677 12706 net.cpp:122] Setting up pool1
I0428 20:24:38.443683 12706 net.cpp:129] Top shape: 32 96 27 27 (2239488)
I0428 20:24:38.443686 12706 net.cpp:137] Memory required for data: 140259072
I0428 20:24:38.443691 12706 layer_factory.hpp:77] Creating layer conv2
I0428 20:24:38.443699 12706 net.cpp:84] Creating Layer conv2
I0428 20:24:38.443704 12706 net.cpp:406] conv2 <- pool1
I0428 20:24:38.443730 12706 net.cpp:380] conv2 -> conv2
I0428 20:24:38.452816 12706 net.cpp:122] Setting up conv2
I0428 20:24:38.452836 12706 net.cpp:129] Top shape: 32 256 27 27 (5971968)
I0428 20:24:38.452839 12706 net.cpp:137] Memory required for data: 164146944
I0428 20:24:38.452852 12706 layer_factory.hpp:77] Creating layer relu2
I0428 20:24:38.452860 12706 net.cpp:84] Creating Layer relu2
I0428 20:24:38.452865 12706 net.cpp:406] relu2 <- conv2
I0428 20:24:38.452874 12706 net.cpp:367] relu2 -> conv2 (in-place)
I0428 20:24:38.453382 12706 net.cpp:122] Setting up relu2
I0428 20:24:38.453392 12706 net.cpp:129] Top shape: 32 256 27 27 (5971968)
I0428 20:24:38.453397 12706 net.cpp:137] Memory required for data: 188034816
I0428 20:24:38.453400 12706 layer_factory.hpp:77] Creating layer norm2
I0428 20:24:38.453413 12706 net.cpp:84] Creating Layer norm2
I0428 20:24:38.453418 12706 net.cpp:406] norm2 <- conv2
I0428 20:24:38.453423 12706 net.cpp:380] norm2 -> norm2
I0428 20:24:38.453948 12706 net.cpp:122] Setting up norm2
I0428 20:24:38.453958 12706 net.cpp:129] Top shape: 32 256 27 27 (5971968)
I0428 20:24:38.453961 12706 net.cpp:137] Memory required for data: 211922688
I0428 20:24:38.453966 12706 layer_factory.hpp:77] Creating layer pool2
I0428 20:24:38.453974 12706 net.cpp:84] Creating Layer pool2
I0428 20:24:38.453979 12706 net.cpp:406] pool2 <- norm2
I0428 20:24:38.453985 12706 net.cpp:380] pool2 -> pool2
I0428 20:24:38.454017 12706 net.cpp:122] Setting up pool2
I0428 20:24:38.454022 12706 net.cpp:129] Top shape: 32 256 13 13 (1384448)
I0428 20:24:38.454026 12706 net.cpp:137] Memory required for data: 217460480
I0428 20:24:38.454031 12706 layer_factory.hpp:77] Creating layer conv3
I0428 20:24:38.454041 12706 net.cpp:84] Creating Layer conv3
I0428 20:24:38.454046 12706 net.cpp:406] conv3 <- pool2
I0428 20:24:38.454052 12706 net.cpp:380] conv3 -> conv3
I0428 20:24:38.466078 12706 net.cpp:122] Setting up conv3
I0428 20:24:38.466099 12706 net.cpp:129] Top shape: 32 384 13 13 (2076672)
I0428 20:24:38.466104 12706 net.cpp:137] Memory required for data: 225767168
I0428 20:24:38.466118 12706 layer_factory.hpp:77] Creating layer relu3
I0428 20:24:38.466127 12706 net.cpp:84] Creating Layer relu3
I0428 20:24:38.466132 12706 net.cpp:406] relu3 <- conv3
I0428 20:24:38.466140 12706 net.cpp:367] relu3 -> conv3 (in-place)
I0428 20:24:38.466658 12706 net.cpp:122] Setting up relu3
I0428 20:24:38.466668 12706 net.cpp:129] Top shape: 32 384 13 13 (2076672)
I0428 20:24:38.466672 12706 net.cpp:137] Memory required for data: 234073856
I0428 20:24:38.466677 12706 layer_factory.hpp:77] Creating layer conv4
I0428 20:24:38.466691 12706 net.cpp:84] Creating Layer conv4
I0428 20:24:38.466694 12706 net.cpp:406] conv4 <- conv3
I0428 20:24:38.466703 12706 net.cpp:380] conv4 -> conv4
I0428 20:24:38.476464 12706 net.cpp:122] Setting up conv4
I0428 20:24:38.476482 12706 net.cpp:129] Top shape: 32 384 13 13 (2076672)
I0428 20:24:38.476517 12706 net.cpp:137] Memory required for data: 242380544
I0428 20:24:38.476527 12706 layer_factory.hpp:77] Creating layer relu4
I0428 20:24:38.476538 12706 net.cpp:84] Creating Layer relu4
I0428 20:24:38.476544 12706 net.cpp:406] relu4 <- conv4
I0428 20:24:38.476557 12706 net.cpp:367] relu4 -> conv4 (in-place)
I0428 20:24:38.476909 12706 net.cpp:122] Setting up relu4
I0428 20:24:38.476917 12706 net.cpp:129] Top shape: 32 384 13 13 (2076672)
I0428 20:24:38.476922 12706 net.cpp:137] Memory required for data: 250687232
I0428 20:24:38.476927 12706 layer_factory.hpp:77] Creating layer conv5
I0428 20:24:38.476940 12706 net.cpp:84] Creating Layer conv5
I0428 20:24:38.476946 12706 net.cpp:406] conv5 <- conv4
I0428 20:24:38.476954 12706 net.cpp:380] conv5 -> conv5
I0428 20:24:38.486075 12706 net.cpp:122] Setting up conv5
I0428 20:24:38.486097 12706 net.cpp:129] Top shape: 32 256 13 13 (1384448)
I0428 20:24:38.486101 12706 net.cpp:137] Memory required for data: 256225024
I0428 20:24:38.486114 12706 layer_factory.hpp:77] Creating layer relu5
I0428 20:24:38.486124 12706 net.cpp:84] Creating Layer relu5
I0428 20:24:38.486147 12706 net.cpp:406] relu5 <- conv5
I0428 20:24:38.486155 12706 net.cpp:367] relu5 -> conv5 (in-place)
I0428 20:24:38.486660 12706 net.cpp:122] Setting up relu5
I0428 20:24:38.486670 12706 net.cpp:129] Top shape: 32 256 13 13 (1384448)
I0428 20:24:38.486673 12706 net.cpp:137] Memory required for data: 261762816
I0428 20:24:38.486678 12706 layer_factory.hpp:77] Creating layer pool5
I0428 20:24:38.486690 12706 net.cpp:84] Creating Layer pool5
I0428 20:24:38.486696 12706 net.cpp:406] pool5 <- conv5
I0428 20:24:38.486702 12706 net.cpp:380] pool5 -> pool5
I0428 20:24:38.486742 12706 net.cpp:122] Setting up pool5
I0428 20:24:38.486748 12706 net.cpp:129] Top shape: 32 256 6 6 (294912)
I0428 20:24:38.486752 12706 net.cpp:137] Memory required for data: 262942464
I0428 20:24:38.486755 12706 layer_factory.hpp:77] Creating layer fc6
I0428 20:24:38.486763 12706 net.cpp:84] Creating Layer fc6
I0428 20:24:38.486768 12706 net.cpp:406] fc6 <- pool5
I0428 20:24:38.486773 12706 net.cpp:380] fc6 -> fc6
I0428 20:24:38.848675 12706 net.cpp:122] Setting up fc6
I0428 20:24:38.848695 12706 net.cpp:129] Top shape: 32 4096 (131072)
I0428 20:24:38.848699 12706 net.cpp:137] Memory required for data: 263466752
I0428 20:24:38.848709 12706 layer_factory.hpp:77] Creating layer relu6
I0428 20:24:38.848717 12706 net.cpp:84] Creating Layer relu6
I0428 20:24:38.848722 12706 net.cpp:406] relu6 <- fc6
I0428 20:24:38.848731 12706 net.cpp:367] relu6 -> fc6 (in-place)
I0428 20:24:38.849573 12706 net.cpp:122] Setting up relu6
I0428 20:24:38.849583 12706 net.cpp:129] Top shape: 32 4096 (131072)
I0428 20:24:38.849587 12706 net.cpp:137] Memory required for data: 263991040
I0428 20:24:38.849592 12706 layer_factory.hpp:77] Creating layer drop6
I0428 20:24:38.849602 12706 net.cpp:84] Creating Layer drop6
I0428 20:24:38.849606 12706 net.cpp:406] drop6 <- fc6
I0428 20:24:38.849611 12706 net.cpp:367] drop6 -> fc6 (in-place)
I0428 20:24:38.849642 12706 net.cpp:122] Setting up drop6
I0428 20:24:38.849647 12706 net.cpp:129] Top shape: 32 4096 (131072)
I0428 20:24:38.849651 12706 net.cpp:137] Memory required for data: 264515328
I0428 20:24:38.849655 12706 layer_factory.hpp:77] Creating layer fc7
I0428 20:24:38.849663 12706 net.cpp:84] Creating Layer fc7
I0428 20:24:38.849668 12706 net.cpp:406] fc7 <- fc6
I0428 20:24:38.849675 12706 net.cpp:380] fc7 -> fc7
I0428 20:24:39.017043 12706 net.cpp:122] Setting up fc7
I0428 20:24:39.017064 12706 net.cpp:129] Top shape: 32 4096 (131072)
I0428 20:24:39.017069 12706 net.cpp:137] Memory required for data: 265039616
I0428 20:24:39.017081 12706 layer_factory.hpp:77] Creating layer relu7
I0428 20:24:39.017091 12706 net.cpp:84] Creating Layer relu7
I0428 20:24:39.017096 12706 net.cpp:406] relu7 <- fc7
I0428 20:24:39.017103 12706 net.cpp:367] relu7 -> fc7 (in-place)
I0428 20:24:39.017534 12706 net.cpp:122] Setting up relu7
I0428 20:24:39.017544 12706 net.cpp:129] Top shape: 32 4096 (131072)
I0428 20:24:39.017547 12706 net.cpp:137] Memory required for data: 265563904
I0428 20:24:39.017552 12706 layer_factory.hpp:77] Creating layer drop7
I0428 20:24:39.017560 12706 net.cpp:84] Creating Layer drop7
I0428 20:24:39.017563 12706 net.cpp:406] drop7 <- fc7
I0428 20:24:39.017571 12706 net.cpp:367] drop7 -> fc7 (in-place)
I0428 20:24:39.017596 12706 net.cpp:122] Setting up drop7
I0428 20:24:39.017601 12706 net.cpp:129] Top shape: 32 4096 (131072)
I0428 20:24:39.017606 12706 net.cpp:137] Memory required for data: 266088192
I0428 20:24:39.017609 12706 layer_factory.hpp:77] Creating layer fc8
I0428 20:24:39.017618 12706 net.cpp:84] Creating Layer fc8
I0428 20:24:39.017623 12706 net.cpp:406] fc8 <- fc7
I0428 20:24:39.017629 12706 net.cpp:380] fc8 -> fc8
I0428 20:24:39.025547 12706 net.cpp:122] Setting up fc8
I0428 20:24:39.025560 12706 net.cpp:129] Top shape: 32 196 (6272)
I0428 20:24:39.025564 12706 net.cpp:137] Memory required for data: 266113280
I0428 20:24:39.025571 12706 layer_factory.hpp:77] Creating layer fc8_fc8_0_split
I0428 20:24:39.025579 12706 net.cpp:84] Creating Layer fc8_fc8_0_split
I0428 20:24:39.025583 12706 net.cpp:406] fc8_fc8_0_split <- fc8
I0428 20:24:39.025609 12706 net.cpp:380] fc8_fc8_0_split -> fc8_fc8_0_split_0
I0428 20:24:39.025617 12706 net.cpp:380] fc8_fc8_0_split -> fc8_fc8_0_split_1
I0428 20:24:39.025652 12706 net.cpp:122] Setting up fc8_fc8_0_split
I0428 20:24:39.025657 12706 net.cpp:129] Top shape: 32 196 (6272)
I0428 20:24:39.025661 12706 net.cpp:129] Top shape: 32 196 (6272)
I0428 20:24:39.025665 12706 net.cpp:137] Memory required for data: 266163456
I0428 20:24:39.025668 12706 layer_factory.hpp:77] Creating layer accuracy
I0428 20:24:39.025676 12706 net.cpp:84] Creating Layer accuracy
I0428 20:24:39.025681 12706 net.cpp:406] accuracy <- fc8_fc8_0_split_0
I0428 20:24:39.025686 12706 net.cpp:406] accuracy <- label_val-data_1_split_0
I0428 20:24:39.025691 12706 net.cpp:380] accuracy -> accuracy
I0428 20:24:39.025699 12706 net.cpp:122] Setting up accuracy
I0428 20:24:39.025703 12706 net.cpp:129] Top shape: (1)
I0428 20:24:39.025707 12706 net.cpp:137] Memory required for data: 266163460
I0428 20:24:39.025710 12706 layer_factory.hpp:77] Creating layer loss
I0428 20:24:39.025717 12706 net.cpp:84] Creating Layer loss
I0428 20:24:39.025720 12706 net.cpp:406] loss <- fc8_fc8_0_split_1
I0428 20:24:39.025724 12706 net.cpp:406] loss <- label_val-data_1_split_1
I0428 20:24:39.025729 12706 net.cpp:380] loss -> loss
I0428 20:24:39.025736 12706 layer_factory.hpp:77] Creating layer loss
I0428 20:24:39.026389 12706 net.cpp:122] Setting up loss
I0428 20:24:39.026398 12706 net.cpp:129] Top shape: (1)
I0428 20:24:39.026402 12706 net.cpp:132] with loss weight 1
I0428 20:24:39.026410 12706 net.cpp:137] Memory required for data: 266163464
I0428 20:24:39.026414 12706 net.cpp:198] loss needs backward computation.
I0428 20:24:39.026420 12706 net.cpp:200] accuracy does not need backward computation.
I0428 20:24:39.026424 12706 net.cpp:198] fc8_fc8_0_split needs backward computation.
I0428 20:24:39.026428 12706 net.cpp:198] fc8 needs backward computation.
I0428 20:24:39.026432 12706 net.cpp:198] drop7 needs backward computation.
I0428 20:24:39.026435 12706 net.cpp:198] relu7 needs backward computation.
I0428 20:24:39.026438 12706 net.cpp:198] fc7 needs backward computation.
I0428 20:24:39.026441 12706 net.cpp:198] drop6 needs backward computation.
I0428 20:24:39.026445 12706 net.cpp:198] relu6 needs backward computation.
I0428 20:24:39.026448 12706 net.cpp:198] fc6 needs backward computation.
I0428 20:24:39.026453 12706 net.cpp:198] pool5 needs backward computation.
I0428 20:24:39.026455 12706 net.cpp:198] relu5 needs backward computation.
I0428 20:24:39.026459 12706 net.cpp:198] conv5 needs backward computation.
I0428 20:24:39.026463 12706 net.cpp:198] relu4 needs backward computation.
I0428 20:24:39.026466 12706 net.cpp:198] conv4 needs backward computation.
I0428 20:24:39.026470 12706 net.cpp:198] relu3 needs backward computation.
I0428 20:24:39.026473 12706 net.cpp:198] conv3 needs backward computation.
I0428 20:24:39.026477 12706 net.cpp:198] pool2 needs backward computation.
I0428 20:24:39.026480 12706 net.cpp:198] norm2 needs backward computation.
I0428 20:24:39.026484 12706 net.cpp:198] relu2 needs backward computation.
I0428 20:24:39.026487 12706 net.cpp:198] conv2 needs backward computation.
I0428 20:24:39.026491 12706 net.cpp:198] pool1 needs backward computation.
I0428 20:24:39.026494 12706 net.cpp:198] norm1 needs backward computation.
I0428 20:24:39.026499 12706 net.cpp:198] relu1 needs backward computation.
I0428 20:24:39.026501 12706 net.cpp:198] conv1 needs backward computation.
I0428 20:24:39.026505 12706 net.cpp:200] label_val-data_1_split does not need backward computation.
I0428 20:24:39.026510 12706 net.cpp:200] val-data does not need backward computation.
I0428 20:24:39.026513 12706 net.cpp:242] This network produces output accuracy
I0428 20:24:39.026516 12706 net.cpp:242] This network produces output loss
I0428 20:24:39.026533 12706 net.cpp:255] Network initialization done.
I0428 20:24:39.026602 12706 solver.cpp:56] Solver scaffolding done.
I0428 20:24:39.027030 12706 caffe.cpp:248] Starting Optimization
I0428 20:24:39.027040 12706 solver.cpp:272] Solving
I0428 20:24:39.027052 12706 solver.cpp:273] Learning Rate Policy: step
I0428 20:24:39.028324 12706 solver.cpp:330] Iteration 0, Testing net (#0)
I0428 20:24:39.028334 12706 net.cpp:676] Ignoring source layer train-data
I0428 20:24:39.172694 12706 blocking_queue.cpp:49] Waiting for data
I0428 20:24:43.425897 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:24:43.471487 12706 solver.cpp:397] Test net output #0: accuracy = 0.00735294
I0428 20:24:43.471521 12706 solver.cpp:397] Test net output #1: loss = 5.27761 (* 1 = 5.27761 loss)
I0428 20:24:43.563624 12706 solver.cpp:218] Iteration 0 (1.2016e+37 iter/s, 4.53635s/12 iters), loss = 5.29143
I0428 20:24:43.563663 12706 solver.cpp:237] Train net output #0: loss = 5.29143 (* 1 = 5.29143 loss)
I0428 20:24:43.563683 12706 sgd_solver.cpp:105] Iteration 0, lr = 0.01
I0428 20:24:47.506299 12706 solver.cpp:218] Iteration 12 (3.04379 iter/s, 3.94246s/12 iters), loss = 5.28218
I0428 20:24:47.506345 12706 solver.cpp:237] Train net output #0: loss = 5.28218 (* 1 = 5.28218 loss)
I0428 20:24:47.506356 12706 sgd_solver.cpp:105] Iteration 12, lr = 0.01
I0428 20:24:52.480757 12706 solver.cpp:218] Iteration 24 (2.41245 iter/s, 4.9742s/12 iters), loss = 5.28606
I0428 20:24:52.480801 12706 solver.cpp:237] Train net output #0: loss = 5.28606 (* 1 = 5.28606 loss)
I0428 20:24:52.480811 12706 sgd_solver.cpp:105] Iteration 24, lr = 0.01
I0428 20:24:57.515647 12706 solver.cpp:218] Iteration 36 (2.38349 iter/s, 5.03463s/12 iters), loss = 5.30909
I0428 20:24:57.515697 12706 solver.cpp:237] Train net output #0: loss = 5.30909 (* 1 = 5.30909 loss)
I0428 20:24:57.515707 12706 sgd_solver.cpp:105] Iteration 36, lr = 0.01
I0428 20:25:02.636133 12706 solver.cpp:218] Iteration 48 (2.34365 iter/s, 5.12022s/12 iters), loss = 5.27138
I0428 20:25:02.636174 12706 solver.cpp:237] Train net output #0: loss = 5.27138 (* 1 = 5.27138 loss)
I0428 20:25:02.636183 12706 sgd_solver.cpp:105] Iteration 48, lr = 0.01
I0428 20:25:07.701527 12706 solver.cpp:218] Iteration 60 (2.36914 iter/s, 5.06513s/12 iters), loss = 5.30027
I0428 20:25:07.701723 12706 solver.cpp:237] Train net output #0: loss = 5.30027 (* 1 = 5.30027 loss)
I0428 20:25:07.701738 12706 sgd_solver.cpp:105] Iteration 60, lr = 0.01
I0428 20:25:12.718003 12706 solver.cpp:218] Iteration 72 (2.39231 iter/s, 5.01607s/12 iters), loss = 5.28024
I0428 20:25:12.718039 12706 solver.cpp:237] Train net output #0: loss = 5.28024 (* 1 = 5.28024 loss)
I0428 20:25:12.718046 12706 sgd_solver.cpp:105] Iteration 72, lr = 0.01
I0428 20:25:17.712162 12706 solver.cpp:218] Iteration 84 (2.40293 iter/s, 4.99391s/12 iters), loss = 5.30474
I0428 20:25:17.712213 12706 solver.cpp:237] Train net output #0: loss = 5.30474 (* 1 = 5.30474 loss)
I0428 20:25:17.712224 12706 sgd_solver.cpp:105] Iteration 84, lr = 0.01
I0428 20:25:22.833319 12706 solver.cpp:218] Iteration 96 (2.34334 iter/s, 5.12089s/12 iters), loss = 5.28947
I0428 20:25:22.833369 12706 solver.cpp:237] Train net output #0: loss = 5.28947 (* 1 = 5.28947 loss)
I0428 20:25:22.833380 12706 sgd_solver.cpp:105] Iteration 96, lr = 0.01
I0428 20:25:24.573843 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:25:24.885397 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_102.caffemodel
I0428 20:25:27.909919 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_102.solverstate
I0428 20:25:30.310729 12706 solver.cpp:330] Iteration 102, Testing net (#0)
I0428 20:25:30.310747 12706 net.cpp:676] Ignoring source layer train-data
I0428 20:25:34.713126 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:25:34.801100 12706 solver.cpp:397] Test net output #0: accuracy = 0.00245098
I0428 20:25:34.801136 12706 solver.cpp:397] Test net output #1: loss = 5.28996 (* 1 = 5.28996 loss)
I0428 20:25:36.679585 12706 solver.cpp:218] Iteration 108 (0.866699 iter/s, 13.8456s/12 iters), loss = 5.27092
I0428 20:25:36.679620 12706 solver.cpp:237] Train net output #0: loss = 5.27092 (* 1 = 5.27092 loss)
I0428 20:25:36.679627 12706 sgd_solver.cpp:105] Iteration 108, lr = 0.01
I0428 20:25:41.862619 12706 solver.cpp:218] Iteration 120 (2.31537 iter/s, 5.18277s/12 iters), loss = 5.27569
I0428 20:25:41.862805 12706 solver.cpp:237] Train net output #0: loss = 5.27569 (* 1 = 5.27569 loss)
I0428 20:25:41.862820 12706 sgd_solver.cpp:105] Iteration 120, lr = 0.01
I0428 20:25:46.853380 12706 solver.cpp:218] Iteration 132 (2.40463 iter/s, 4.99036s/12 iters), loss = 5.26697
I0428 20:25:46.853420 12706 solver.cpp:237] Train net output #0: loss = 5.26697 (* 1 = 5.26697 loss)
I0428 20:25:46.853427 12706 sgd_solver.cpp:105] Iteration 132, lr = 0.01
I0428 20:25:51.938024 12706 solver.cpp:218] Iteration 144 (2.36017 iter/s, 5.08438s/12 iters), loss = 5.29419
I0428 20:25:51.938062 12706 solver.cpp:237] Train net output #0: loss = 5.29419 (* 1 = 5.29419 loss)
I0428 20:25:51.938069 12706 sgd_solver.cpp:105] Iteration 144, lr = 0.01
I0428 20:25:57.268515 12706 solver.cpp:218] Iteration 156 (2.25133 iter/s, 5.33019s/12 iters), loss = 5.2991
I0428 20:25:57.268563 12706 solver.cpp:237] Train net output #0: loss = 5.2991 (* 1 = 5.2991 loss)
I0428 20:25:57.268573 12706 sgd_solver.cpp:105] Iteration 156, lr = 0.01
I0428 20:26:02.264807 12706 solver.cpp:218] Iteration 168 (2.40191 iter/s, 4.99603s/12 iters), loss = 5.28162
I0428 20:26:02.264842 12706 solver.cpp:237] Train net output #0: loss = 5.28162 (* 1 = 5.28162 loss)
I0428 20:26:02.264851 12706 sgd_solver.cpp:105] Iteration 168, lr = 0.01
I0428 20:26:07.359362 12706 solver.cpp:218] Iteration 180 (2.35558 iter/s, 5.0943s/12 iters), loss = 5.28621
I0428 20:26:07.359401 12706 solver.cpp:237] Train net output #0: loss = 5.28621 (* 1 = 5.28621 loss)
I0428 20:26:07.359408 12706 sgd_solver.cpp:105] Iteration 180, lr = 0.01
I0428 20:26:12.392686 12706 solver.cpp:218] Iteration 192 (2.38424 iter/s, 5.03306s/12 iters), loss = 5.25477
I0428 20:26:12.392802 12706 solver.cpp:237] Train net output #0: loss = 5.25477 (* 1 = 5.25477 loss)
I0428 20:26:12.392813 12706 sgd_solver.cpp:105] Iteration 192, lr = 0.01
I0428 20:26:16.249233 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:26:16.919828 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_204.caffemodel
I0428 20:26:21.124182 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_204.solverstate
I0428 20:26:23.422494 12706 solver.cpp:330] Iteration 204, Testing net (#0)
I0428 20:26:23.422516 12706 net.cpp:676] Ignoring source layer train-data
I0428 20:26:27.652863 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:26:27.779374 12706 solver.cpp:397] Test net output #0: accuracy = 0.00735294
I0428 20:26:27.779402 12706 solver.cpp:397] Test net output #1: loss = 5.24657 (* 1 = 5.24657 loss)
I0428 20:26:27.870724 12706 solver.cpp:218] Iteration 204 (0.77533 iter/s, 15.4773s/12 iters), loss = 5.19937
I0428 20:26:27.870766 12706 solver.cpp:237] Train net output #0: loss = 5.19937 (* 1 = 5.19937 loss)
I0428 20:26:27.870774 12706 sgd_solver.cpp:105] Iteration 204, lr = 0.01
I0428 20:26:31.980767 12706 solver.cpp:218] Iteration 216 (2.91984 iter/s, 4.10982s/12 iters), loss = 5.22281
I0428 20:26:31.980803 12706 solver.cpp:237] Train net output #0: loss = 5.22281 (* 1 = 5.22281 loss)
I0428 20:26:31.980810 12706 sgd_solver.cpp:105] Iteration 216, lr = 0.01
I0428 20:26:37.062791 12706 solver.cpp:218] Iteration 228 (2.36139 iter/s, 5.08176s/12 iters), loss = 5.1616
I0428 20:26:37.062831 12706 solver.cpp:237] Train net output #0: loss = 5.1616 (* 1 = 5.1616 loss)
I0428 20:26:37.062839 12706 sgd_solver.cpp:105] Iteration 228, lr = 0.01
I0428 20:26:42.092401 12706 solver.cpp:218] Iteration 240 (2.38599 iter/s, 5.02935s/12 iters), loss = 5.2643
I0428 20:26:42.092442 12706 solver.cpp:237] Train net output #0: loss = 5.2643 (* 1 = 5.2643 loss)
I0428 20:26:42.092449 12706 sgd_solver.cpp:105] Iteration 240, lr = 0.01
I0428 20:26:47.232293 12706 solver.cpp:218] Iteration 252 (2.3348 iter/s, 5.13962s/12 iters), loss = 5.18511
I0428 20:26:47.232461 12706 solver.cpp:237] Train net output #0: loss = 5.18511 (* 1 = 5.18511 loss)
I0428 20:26:47.232470 12706 sgd_solver.cpp:105] Iteration 252, lr = 0.01
I0428 20:26:52.256480 12706 solver.cpp:218] Iteration 264 (2.38863 iter/s, 5.02379s/12 iters), loss = 5.17006
I0428 20:26:52.256559 12706 solver.cpp:237] Train net output #0: loss = 5.17006 (* 1 = 5.17006 loss)
I0428 20:26:52.256572 12706 sgd_solver.cpp:105] Iteration 264, lr = 0.01
I0428 20:26:57.224998 12706 solver.cpp:218] Iteration 276 (2.41535 iter/s, 4.96823s/12 iters), loss = 5.19768
I0428 20:26:57.225039 12706 solver.cpp:237] Train net output #0: loss = 5.19768 (* 1 = 5.19768 loss)
I0428 20:26:57.225045 12706 sgd_solver.cpp:105] Iteration 276, lr = 0.01
I0428 20:27:02.215555 12706 solver.cpp:218] Iteration 288 (2.40467 iter/s, 4.9903s/12 iters), loss = 5.24911
I0428 20:27:02.215596 12706 solver.cpp:237] Train net output #0: loss = 5.24911 (* 1 = 5.24911 loss)
I0428 20:27:02.215605 12706 sgd_solver.cpp:105] Iteration 288, lr = 0.01
I0428 20:27:07.228657 12706 solver.cpp:218] Iteration 300 (2.39385 iter/s, 5.01284s/12 iters), loss = 5.26008
I0428 20:27:07.228695 12706 solver.cpp:237] Train net output #0: loss = 5.26008 (* 1 = 5.26008 loss)
I0428 20:27:07.228703 12706 sgd_solver.cpp:105] Iteration 300, lr = 0.01
I0428 20:27:08.340306 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:27:09.450397 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_306.caffemodel
I0428 20:27:12.481426 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_306.solverstate
I0428 20:27:14.887609 12706 solver.cpp:330] Iteration 306, Testing net (#0)
I0428 20:27:14.887629 12706 net.cpp:676] Ignoring source layer train-data
I0428 20:27:19.088402 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:27:19.373683 12706 solver.cpp:397] Test net output #0: accuracy = 0.00428922
I0428 20:27:19.373718 12706 solver.cpp:397] Test net output #1: loss = 5.16288 (* 1 = 5.16288 loss)
I0428 20:27:21.276240 12706 solver.cpp:218] Iteration 312 (0.854278 iter/s, 14.0469s/12 iters), loss = 5.21281
I0428 20:27:21.276280 12706 solver.cpp:237] Train net output #0: loss = 5.21281 (* 1 = 5.21281 loss)
I0428 20:27:21.276288 12706 sgd_solver.cpp:105] Iteration 312, lr = 0.01
I0428 20:27:26.330848 12706 solver.cpp:218] Iteration 324 (2.3742 iter/s, 5.05434s/12 iters), loss = 5.16757
I0428 20:27:26.330886 12706 solver.cpp:237] Train net output #0: loss = 5.16757 (* 1 = 5.16757 loss)
I0428 20:27:26.330894 12706 sgd_solver.cpp:105] Iteration 324, lr = 0.01
I0428 20:27:31.394845 12706 solver.cpp:218] Iteration 336 (2.36979 iter/s, 5.06374s/12 iters), loss = 5.20487
I0428 20:27:31.394884 12706 solver.cpp:237] Train net output #0: loss = 5.20487 (* 1 = 5.20487 loss)
I0428 20:27:31.394892 12706 sgd_solver.cpp:105] Iteration 336, lr = 0.01
I0428 20:27:36.374569 12706 solver.cpp:218] Iteration 348 (2.4099 iter/s, 4.97945s/12 iters), loss = 5.13828
I0428 20:27:36.374619 12706 solver.cpp:237] Train net output #0: loss = 5.13828 (* 1 = 5.13828 loss)
I0428 20:27:36.374629 12706 sgd_solver.cpp:105] Iteration 348, lr = 0.01
I0428 20:27:41.390556 12706 solver.cpp:218] Iteration 360 (2.39248 iter/s, 5.01571s/12 iters), loss = 5.13826
I0428 20:27:41.390605 12706 solver.cpp:237] Train net output #0: loss = 5.13826 (* 1 = 5.13826 loss)
I0428 20:27:41.390614 12706 sgd_solver.cpp:105] Iteration 360, lr = 0.01
I0428 20:27:46.346837 12706 solver.cpp:218] Iteration 372 (2.4213 iter/s, 4.95601s/12 iters), loss = 5.18795
I0428 20:27:46.346879 12706 solver.cpp:237] Train net output #0: loss = 5.18795 (* 1 = 5.18795 loss)
I0428 20:27:46.346887 12706 sgd_solver.cpp:105] Iteration 372, lr = 0.01
I0428 20:27:51.564446 12706 solver.cpp:218] Iteration 384 (2.30002 iter/s, 5.21734s/12 iters), loss = 5.16921
I0428 20:27:51.564589 12706 solver.cpp:237] Train net output #0: loss = 5.16921 (* 1 = 5.16921 loss)
I0428 20:27:51.564604 12706 sgd_solver.cpp:105] Iteration 384, lr = 0.01
I0428 20:27:56.503535 12706 solver.cpp:218] Iteration 396 (2.42978 iter/s, 4.93872s/12 iters), loss = 5.09856
I0428 20:27:56.503579 12706 solver.cpp:237] Train net output #0: loss = 5.09856 (* 1 = 5.09856 loss)
I0428 20:27:56.503590 12706 sgd_solver.cpp:105] Iteration 396, lr = 0.01
I0428 20:27:59.631443 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:28:01.055797 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_408.caffemodel
I0428 20:28:05.336104 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_408.solverstate
I0428 20:28:08.797045 12706 solver.cpp:330] Iteration 408, Testing net (#0)
I0428 20:28:08.797070 12706 net.cpp:676] Ignoring source layer train-data
I0428 20:28:13.182163 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:28:13.389407 12706 solver.cpp:397] Test net output #0: accuracy = 0.00980392
I0428 20:28:13.389436 12706 solver.cpp:397] Test net output #1: loss = 5.12893 (* 1 = 5.12893 loss)
I0428 20:28:13.477572 12706 solver.cpp:218] Iteration 408 (0.706994 iter/s, 16.9733s/12 iters), loss = 5.2037
I0428 20:28:13.477612 12706 solver.cpp:237] Train net output #0: loss = 5.2037 (* 1 = 5.2037 loss)
I0428 20:28:13.477619 12706 sgd_solver.cpp:105] Iteration 408, lr = 0.01
I0428 20:28:17.713482 12706 solver.cpp:218] Iteration 420 (2.83307 iter/s, 4.23568s/12 iters), loss = 5.01731
I0428 20:28:17.713519 12706 solver.cpp:237] Train net output #0: loss = 5.01731 (* 1 = 5.01731 loss)
I0428 20:28:17.713527 12706 sgd_solver.cpp:105] Iteration 420, lr = 0.01
I0428 20:28:22.804672 12706 solver.cpp:218] Iteration 432 (2.35713 iter/s, 5.09093s/12 iters), loss = 4.99204
I0428 20:28:22.804774 12706 solver.cpp:237] Train net output #0: loss = 4.99204 (* 1 = 4.99204 loss)
I0428 20:28:22.804783 12706 sgd_solver.cpp:105] Iteration 432, lr = 0.01
I0428 20:28:27.718452 12706 solver.cpp:218] Iteration 444 (2.44227 iter/s, 4.91346s/12 iters), loss = 5.15477
I0428 20:28:27.718497 12706 solver.cpp:237] Train net output #0: loss = 5.15477 (* 1 = 5.15477 loss)
I0428 20:28:27.718504 12706 sgd_solver.cpp:105] Iteration 444, lr = 0.01
I0428 20:28:32.704577 12706 solver.cpp:218] Iteration 456 (2.40681 iter/s, 4.98586s/12 iters), loss = 5.1028
I0428 20:28:32.704617 12706 solver.cpp:237] Train net output #0: loss = 5.1028 (* 1 = 5.1028 loss)
I0428 20:28:32.704624 12706 sgd_solver.cpp:105] Iteration 456, lr = 0.01
I0428 20:28:37.824165 12706 solver.cpp:218] Iteration 468 (2.34406 iter/s, 5.11932s/12 iters), loss = 5.08886
I0428 20:28:37.824198 12706 solver.cpp:237] Train net output #0: loss = 5.08886 (* 1 = 5.08886 loss)
I0428 20:28:37.824205 12706 sgd_solver.cpp:105] Iteration 468, lr = 0.01
I0428 20:28:42.842960 12706 solver.cpp:218] Iteration 480 (2.39114 iter/s, 5.01853s/12 iters), loss = 4.99492
I0428 20:28:42.843012 12706 solver.cpp:237] Train net output #0: loss = 4.99492 (* 1 = 4.99492 loss)
I0428 20:28:42.843024 12706 sgd_solver.cpp:105] Iteration 480, lr = 0.01
I0428 20:28:47.788138 12706 solver.cpp:218] Iteration 492 (2.42674 iter/s, 4.9449s/12 iters), loss = 5.12262
I0428 20:28:47.788185 12706 solver.cpp:237] Train net output #0: loss = 5.12262 (* 1 = 5.12262 loss)
I0428 20:28:47.788194 12706 sgd_solver.cpp:105] Iteration 492, lr = 0.01
I0428 20:28:52.835749 12706 solver.cpp:218] Iteration 504 (2.37749 iter/s, 5.04734s/12 iters), loss = 5.08483
I0428 20:28:52.835867 12706 solver.cpp:237] Train net output #0: loss = 5.08483 (* 1 = 5.08483 loss)
I0428 20:28:52.835880 12706 sgd_solver.cpp:105] Iteration 504, lr = 0.01
I0428 20:28:53.087641 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:28:54.850297 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_510.caffemodel
I0428 20:29:03.658319 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_510.solverstate
I0428 20:29:06.009297 12706 solver.cpp:330] Iteration 510, Testing net (#0)
I0428 20:29:06.009322 12706 net.cpp:676] Ignoring source layer train-data
I0428 20:29:10.149941 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:29:10.394140 12706 solver.cpp:397] Test net output #0: accuracy = 0.0153186
I0428 20:29:10.394173 12706 solver.cpp:397] Test net output #1: loss = 5.05344 (* 1 = 5.05344 loss)
I0428 20:29:12.191030 12706 solver.cpp:218] Iteration 516 (0.620016 iter/s, 19.3543s/12 iters), loss = 4.94848
I0428 20:29:12.191066 12706 solver.cpp:237] Train net output #0: loss = 4.94848 (* 1 = 4.94848 loss)
I0428 20:29:12.191074 12706 sgd_solver.cpp:105] Iteration 516, lr = 0.01
I0428 20:29:17.293308 12706 solver.cpp:218] Iteration 528 (2.35201 iter/s, 5.10202s/12 iters), loss = 5.04661
I0428 20:29:17.293344 12706 solver.cpp:237] Train net output #0: loss = 5.04661 (* 1 = 5.04661 loss)
I0428 20:29:17.293352 12706 sgd_solver.cpp:105] Iteration 528, lr = 0.01
I0428 20:29:22.481540 12706 solver.cpp:218] Iteration 540 (2.31304 iter/s, 5.18797s/12 iters), loss = 5.03488
I0428 20:29:22.481575 12706 solver.cpp:237] Train net output #0: loss = 5.03488 (* 1 = 5.03488 loss)
I0428 20:29:22.481581 12706 sgd_solver.cpp:105] Iteration 540, lr = 0.01
I0428 20:29:27.648321 12706 solver.cpp:218] Iteration 552 (2.32265 iter/s, 5.16652s/12 iters), loss = 5.10945
I0428 20:29:27.648454 12706 solver.cpp:237] Train net output #0: loss = 5.10945 (* 1 = 5.10945 loss)
I0428 20:29:27.648464 12706 sgd_solver.cpp:105] Iteration 552, lr = 0.01
I0428 20:29:32.661311 12706 solver.cpp:218] Iteration 564 (2.39395 iter/s, 5.01264s/12 iters), loss = 5.04825
I0428 20:29:32.661350 12706 solver.cpp:237] Train net output #0: loss = 5.04825 (* 1 = 5.04825 loss)
I0428 20:29:32.661356 12706 sgd_solver.cpp:105] Iteration 564, lr = 0.01
I0428 20:29:37.539175 12706 solver.cpp:218] Iteration 576 (2.46022 iter/s, 4.8776s/12 iters), loss = 4.97921
I0428 20:29:37.539223 12706 solver.cpp:237] Train net output #0: loss = 4.97921 (* 1 = 4.97921 loss)
I0428 20:29:37.539233 12706 sgd_solver.cpp:105] Iteration 576, lr = 0.01
I0428 20:29:42.607089 12706 solver.cpp:218] Iteration 588 (2.36797 iter/s, 5.06764s/12 iters), loss = 4.97785
I0428 20:29:42.607133 12706 solver.cpp:237] Train net output #0: loss = 4.97785 (* 1 = 4.97785 loss)
I0428 20:29:42.607142 12706 sgd_solver.cpp:105] Iteration 588, lr = 0.01
I0428 20:29:47.757767 12706 solver.cpp:218] Iteration 600 (2.32991 iter/s, 5.1504s/12 iters), loss = 5.06965
I0428 20:29:47.757820 12706 solver.cpp:237] Train net output #0: loss = 5.06965 (* 1 = 5.06965 loss)
I0428 20:29:47.757831 12706 sgd_solver.cpp:105] Iteration 600, lr = 0.01
I0428 20:29:50.119458 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:29:52.283453 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_612.caffemodel
I0428 20:29:58.030861 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_612.solverstate
I0428 20:30:02.123643 12706 solver.cpp:330] Iteration 612, Testing net (#0)
I0428 20:30:02.123662 12706 net.cpp:676] Ignoring source layer train-data
I0428 20:30:06.255966 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:30:06.544785 12706 solver.cpp:397] Test net output #0: accuracy = 0.0226716
I0428 20:30:06.544812 12706 solver.cpp:397] Test net output #1: loss = 5.02599 (* 1 = 5.02599 loss)
I0428 20:30:06.635952 12706 solver.cpp:218] Iteration 612 (0.635683 iter/s, 18.8773s/12 iters), loss = 5.0629
I0428 20:30:06.635991 12706 solver.cpp:237] Train net output #0: loss = 5.0629 (* 1 = 5.0629 loss)
I0428 20:30:06.635999 12706 sgd_solver.cpp:105] Iteration 612, lr = 0.01
I0428 20:30:10.833750 12706 solver.cpp:218] Iteration 624 (2.8588 iter/s, 4.19756s/12 iters), loss = 5.12492
I0428 20:30:10.833796 12706 solver.cpp:237] Train net output #0: loss = 5.12492 (* 1 = 5.12492 loss)
I0428 20:30:10.833806 12706 sgd_solver.cpp:105] Iteration 624, lr = 0.01
I0428 20:30:16.073029 12706 solver.cpp:218] Iteration 636 (2.29052 iter/s, 5.239s/12 iters), loss = 5.08846
I0428 20:30:16.073072 12706 solver.cpp:237] Train net output #0: loss = 5.08846 (* 1 = 5.08846 loss)
I0428 20:30:16.073082 12706 sgd_solver.cpp:105] Iteration 636, lr = 0.01
I0428 20:30:21.380877 12706 solver.cpp:218] Iteration 648 (2.26092 iter/s, 5.30757s/12 iters), loss = 5.01197
I0428 20:30:21.380913 12706 solver.cpp:237] Train net output #0: loss = 5.01197 (* 1 = 5.01197 loss)
I0428 20:30:21.380919 12706 sgd_solver.cpp:105] Iteration 648, lr = 0.01
I0428 20:30:26.564358 12706 solver.cpp:218] Iteration 660 (2.31517 iter/s, 5.18321s/12 iters), loss = 5.00125
I0428 20:30:26.564406 12706 solver.cpp:237] Train net output #0: loss = 5.00125 (* 1 = 5.00125 loss)
I0428 20:30:26.564416 12706 sgd_solver.cpp:105] Iteration 660, lr = 0.01
I0428 20:30:31.599032 12706 solver.cpp:218] Iteration 672 (2.3836 iter/s, 5.03441s/12 iters), loss = 4.94989
I0428 20:30:31.599155 12706 solver.cpp:237] Train net output #0: loss = 4.94989 (* 1 = 4.94989 loss)
I0428 20:30:31.599164 12706 sgd_solver.cpp:105] Iteration 672, lr = 0.01
I0428 20:30:36.648258 12706 solver.cpp:218] Iteration 684 (2.37677 iter/s, 5.04888s/12 iters), loss = 4.86938
I0428 20:30:36.648301 12706 solver.cpp:237] Train net output #0: loss = 4.86938 (* 1 = 4.86938 loss)
I0428 20:30:36.648310 12706 sgd_solver.cpp:105] Iteration 684, lr = 0.01
I0428 20:30:37.421749 12706 blocking_queue.cpp:49] Waiting for data
I0428 20:30:41.595868 12706 solver.cpp:218] Iteration 696 (2.42554 iter/s, 4.94735s/12 iters), loss = 4.94811
I0428 20:30:41.595907 12706 solver.cpp:237] Train net output #0: loss = 4.94811 (* 1 = 4.94811 loss)
I0428 20:30:41.595916 12706 sgd_solver.cpp:105] Iteration 696, lr = 0.01
I0428 20:30:46.295594 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:30:46.673637 12706 solver.cpp:218] Iteration 708 (2.36337 iter/s, 5.0775s/12 iters), loss = 4.98244
I0428 20:30:46.673676 12706 solver.cpp:237] Train net output #0: loss = 4.98244 (* 1 = 4.98244 loss)
I0428 20:30:46.673683 12706 sgd_solver.cpp:105] Iteration 708, lr = 0.01
I0428 20:30:48.692042 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_714.caffemodel
I0428 20:30:53.717756 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_714.solverstate
I0428 20:30:57.728801 12706 solver.cpp:330] Iteration 714, Testing net (#0)
I0428 20:30:57.728821 12706 net.cpp:676] Ignoring source layer train-data
I0428 20:31:01.829661 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:31:02.151881 12706 solver.cpp:397] Test net output #0: accuracy = 0.0330882
I0428 20:31:02.151917 12706 solver.cpp:397] Test net output #1: loss = 4.93847 (* 1 = 4.93847 loss)
I0428 20:31:04.152532 12706 solver.cpp:218] Iteration 720 (0.686573 iter/s, 17.4781s/12 iters), loss = 4.92116
I0428 20:31:04.152570 12706 solver.cpp:237] Train net output #0: loss = 4.92116 (* 1 = 4.92116 loss)
I0428 20:31:04.152577 12706 sgd_solver.cpp:105] Iteration 720, lr = 0.01
I0428 20:31:09.293656 12706 solver.cpp:218] Iteration 732 (2.33424 iter/s, 5.14085s/12 iters), loss = 4.90308
I0428 20:31:09.293697 12706 solver.cpp:237] Train net output #0: loss = 4.90308 (* 1 = 4.90308 loss)
I0428 20:31:09.293704 12706 sgd_solver.cpp:105] Iteration 732, lr = 0.01
I0428 20:31:14.268214 12706 solver.cpp:218] Iteration 744 (2.4124 iter/s, 4.97429s/12 iters), loss = 4.91538
I0428 20:31:14.268251 12706 solver.cpp:237] Train net output #0: loss = 4.91538 (* 1 = 4.91538 loss)
I0428 20:31:14.268260 12706 sgd_solver.cpp:105] Iteration 744, lr = 0.01
I0428 20:31:19.345777 12706 solver.cpp:218] Iteration 756 (2.36346 iter/s, 5.0773s/12 iters), loss = 4.84513
I0428 20:31:19.345813 12706 solver.cpp:237] Train net output #0: loss = 4.84513 (* 1 = 4.84513 loss)
I0428 20:31:19.345819 12706 sgd_solver.cpp:105] Iteration 756, lr = 0.01
I0428 20:31:24.304118 12706 solver.cpp:218] Iteration 768 (2.42029 iter/s, 4.95808s/12 iters), loss = 4.84234
I0428 20:31:24.304158 12706 solver.cpp:237] Train net output #0: loss = 4.84234 (* 1 = 4.84234 loss)
I0428 20:31:24.304167 12706 sgd_solver.cpp:105] Iteration 768, lr = 0.01
I0428 20:31:29.300989 12706 solver.cpp:218] Iteration 780 (2.40163 iter/s, 4.99661s/12 iters), loss = 4.83922
I0428 20:31:29.301028 12706 solver.cpp:237] Train net output #0: loss = 4.83922 (* 1 = 4.83922 loss)
I0428 20:31:29.301035 12706 sgd_solver.cpp:105] Iteration 780, lr = 0.01
I0428 20:31:34.263373 12706 solver.cpp:218] Iteration 792 (2.41832 iter/s, 4.96212s/12 iters), loss = 4.77975
I0428 20:31:34.263510 12706 solver.cpp:237] Train net output #0: loss = 4.77975 (* 1 = 4.77975 loss)
I0428 20:31:34.263520 12706 sgd_solver.cpp:105] Iteration 792, lr = 0.01
I0428 20:31:39.263100 12706 solver.cpp:218] Iteration 804 (2.4003 iter/s, 4.99937s/12 iters), loss = 4.8872
I0428 20:31:39.263135 12706 solver.cpp:237] Train net output #0: loss = 4.8872 (* 1 = 4.8872 loss)
I0428 20:31:39.263142 12706 sgd_solver.cpp:105] Iteration 804, lr = 0.01
I0428 20:31:41.002997 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:31:43.772437 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_816.caffemodel
I0428 20:31:46.826117 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_816.solverstate
I0428 20:31:49.767383 12706 solver.cpp:330] Iteration 816, Testing net (#0)
I0428 20:31:49.767405 12706 net.cpp:676] Ignoring source layer train-data
I0428 20:31:54.111266 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:31:54.501572 12706 solver.cpp:397] Test net output #0: accuracy = 0.0410539
I0428 20:31:54.501601 12706 solver.cpp:397] Test net output #1: loss = 4.81085 (* 1 = 4.81085 loss)
I0428 20:31:54.592685 12706 solver.cpp:218] Iteration 816 (0.782835 iter/s, 15.3289s/12 iters), loss = 4.74097
I0428 20:31:54.592726 12706 solver.cpp:237] Train net output #0: loss = 4.74097 (* 1 = 4.74097 loss)
I0428 20:31:54.592734 12706 sgd_solver.cpp:105] Iteration 816, lr = 0.01
I0428 20:31:58.697346 12706 solver.cpp:218] Iteration 828 (2.92367 iter/s, 4.10443s/12 iters), loss = 4.77832
I0428 20:31:58.697384 12706 solver.cpp:237] Train net output #0: loss = 4.77832 (* 1 = 4.77832 loss)
I0428 20:31:58.697392 12706 sgd_solver.cpp:105] Iteration 828, lr = 0.01
I0428 20:32:03.707401 12706 solver.cpp:218] Iteration 840 (2.39531 iter/s, 5.00979s/12 iters), loss = 4.70594
I0428 20:32:03.707440 12706 solver.cpp:237] Train net output #0: loss = 4.70594 (* 1 = 4.70594 loss)
I0428 20:32:03.707448 12706 sgd_solver.cpp:105] Iteration 840, lr = 0.01
I0428 20:32:08.775341 12706 solver.cpp:218] Iteration 852 (2.36795 iter/s, 5.06767s/12 iters), loss = 4.68
I0428 20:32:08.776988 12706 solver.cpp:237] Train net output #0: loss = 4.68 (* 1 = 4.68 loss)
I0428 20:32:08.776998 12706 sgd_solver.cpp:105] Iteration 852, lr = 0.01
I0428 20:32:13.873708 12706 solver.cpp:218] Iteration 864 (2.35456 iter/s, 5.09649s/12 iters), loss = 4.6501
I0428 20:32:13.873747 12706 solver.cpp:237] Train net output #0: loss = 4.6501 (* 1 = 4.6501 loss)
I0428 20:32:13.873755 12706 sgd_solver.cpp:105] Iteration 864, lr = 0.01
I0428 20:32:18.861845 12706 solver.cpp:218] Iteration 876 (2.40584 iter/s, 4.98787s/12 iters), loss = 4.61561
I0428 20:32:18.861893 12706 solver.cpp:237] Train net output #0: loss = 4.61561 (* 1 = 4.61561 loss)
I0428 20:32:18.861903 12706 sgd_solver.cpp:105] Iteration 876, lr = 0.01
I0428 20:32:23.829841 12706 solver.cpp:218] Iteration 888 (2.41559 iter/s, 4.96773s/12 iters), loss = 4.59019
I0428 20:32:23.829879 12706 solver.cpp:237] Train net output #0: loss = 4.59019 (* 1 = 4.59019 loss)
I0428 20:32:23.829886 12706 sgd_solver.cpp:105] Iteration 888, lr = 0.01
I0428 20:32:28.859341 12706 solver.cpp:218] Iteration 900 (2.38605 iter/s, 5.02923s/12 iters), loss = 4.60799
I0428 20:32:28.859396 12706 solver.cpp:237] Train net output #0: loss = 4.60799 (* 1 = 4.60799 loss)
I0428 20:32:28.859411 12706 sgd_solver.cpp:105] Iteration 900, lr = 0.01
I0428 20:32:32.720971 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:32:33.822727 12706 solver.cpp:218] Iteration 912 (2.41784 iter/s, 4.96311s/12 iters), loss = 4.63912
I0428 20:32:33.822782 12706 solver.cpp:237] Train net output #0: loss = 4.63912 (* 1 = 4.63912 loss)
I0428 20:32:33.822793 12706 sgd_solver.cpp:105] Iteration 912, lr = 0.01
I0428 20:32:35.833228 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_918.caffemodel
I0428 20:32:39.528564 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_918.solverstate
I0428 20:32:42.715392 12706 solver.cpp:330] Iteration 918, Testing net (#0)
I0428 20:32:42.715416 12706 net.cpp:676] Ignoring source layer train-data
I0428 20:32:46.785113 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:32:47.221607 12706 solver.cpp:397] Test net output #0: accuracy = 0.0422794
I0428 20:32:47.221637 12706 solver.cpp:397] Test net output #1: loss = 4.71384 (* 1 = 4.71384 loss)
I0428 20:32:49.005980 12706 solver.cpp:218] Iteration 924 (0.790381 iter/s, 15.1826s/12 iters), loss = 4.53074
I0428 20:32:49.006018 12706 solver.cpp:237] Train net output #0: loss = 4.53074 (* 1 = 4.53074 loss)
I0428 20:32:49.006026 12706 sgd_solver.cpp:105] Iteration 924, lr = 0.01
I0428 20:32:54.150028 12706 solver.cpp:218] Iteration 936 (2.33292 iter/s, 5.14377s/12 iters), loss = 4.5496
I0428 20:32:54.150076 12706 solver.cpp:237] Train net output #0: loss = 4.5496 (* 1 = 4.5496 loss)
I0428 20:32:54.150086 12706 sgd_solver.cpp:105] Iteration 936, lr = 0.01
I0428 20:32:59.184612 12706 solver.cpp:218] Iteration 948 (2.38364 iter/s, 5.03431s/12 iters), loss = 4.69094
I0428 20:32:59.184649 12706 solver.cpp:237] Train net output #0: loss = 4.69094 (* 1 = 4.69094 loss)
I0428 20:32:59.184657 12706 sgd_solver.cpp:105] Iteration 948, lr = 0.01
I0428 20:33:04.265327 12706 solver.cpp:218] Iteration 960 (2.36199 iter/s, 5.08045s/12 iters), loss = 4.7689
I0428 20:33:04.265364 12706 solver.cpp:237] Train net output #0: loss = 4.7689 (* 1 = 4.7689 loss)
I0428 20:33:04.265372 12706 sgd_solver.cpp:105] Iteration 960, lr = 0.01
I0428 20:33:09.706800 12706 solver.cpp:218] Iteration 972 (2.2054 iter/s, 5.4412s/12 iters), loss = 4.50204
I0428 20:33:09.706898 12706 solver.cpp:237] Train net output #0: loss = 4.50204 (* 1 = 4.50204 loss)
I0428 20:33:09.706907 12706 sgd_solver.cpp:105] Iteration 972, lr = 0.01
I0428 20:33:14.773289 12706 solver.cpp:218] Iteration 984 (2.36865 iter/s, 5.06617s/12 iters), loss = 4.49709
I0428 20:33:14.773326 12706 solver.cpp:237] Train net output #0: loss = 4.49709 (* 1 = 4.49709 loss)
I0428 20:33:14.773334 12706 sgd_solver.cpp:105] Iteration 984, lr = 0.01
I0428 20:33:19.802474 12706 solver.cpp:218] Iteration 996 (2.38619 iter/s, 5.02893s/12 iters), loss = 4.60646
I0428 20:33:19.802507 12706 solver.cpp:237] Train net output #0: loss = 4.60646 (* 1 = 4.60646 loss)
I0428 20:33:19.802515 12706 sgd_solver.cpp:105] Iteration 996, lr = 0.01
I0428 20:33:24.755105 12706 solver.cpp:218] Iteration 1008 (2.42308 iter/s, 4.95237s/12 iters), loss = 4.60218
I0428 20:33:24.755144 12706 solver.cpp:237] Train net output #0: loss = 4.60218 (* 1 = 4.60218 loss)
I0428 20:33:24.755152 12706 sgd_solver.cpp:105] Iteration 1008, lr = 0.01
I0428 20:33:25.799695 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:33:29.311260 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_1020.caffemodel
I0428 20:33:32.661221 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_1020.solverstate
I0428 20:33:34.978119 12706 solver.cpp:330] Iteration 1020, Testing net (#0)
I0428 20:33:34.978142 12706 net.cpp:676] Ignoring source layer train-data
I0428 20:33:39.084388 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:33:39.521507 12706 solver.cpp:397] Test net output #0: accuracy = 0.0563725
I0428 20:33:39.521538 12706 solver.cpp:397] Test net output #1: loss = 4.52937 (* 1 = 4.52937 loss)
I0428 20:33:39.612741 12706 solver.cpp:218] Iteration 1020 (0.807703 iter/s, 14.857s/12 iters), loss = 4.61199
I0428 20:33:39.612784 12706 solver.cpp:237] Train net output #0: loss = 4.61199 (* 1 = 4.61199 loss)
I0428 20:33:39.612792 12706 sgd_solver.cpp:105] Iteration 1020, lr = 0.01
I0428 20:33:43.794617 12706 solver.cpp:218] Iteration 1032 (2.86969 iter/s, 4.18164s/12 iters), loss = 4.76365
I0428 20:33:43.794735 12706 solver.cpp:237] Train net output #0: loss = 4.76365 (* 1 = 4.76365 loss)
I0428 20:33:43.794744 12706 sgd_solver.cpp:105] Iteration 1032, lr = 0.01
I0428 20:33:48.904567 12706 solver.cpp:218] Iteration 1044 (2.34852 iter/s, 5.1096s/12 iters), loss = 4.5452
I0428 20:33:48.904614 12706 solver.cpp:237] Train net output #0: loss = 4.5452 (* 1 = 4.5452 loss)
I0428 20:33:48.904624 12706 sgd_solver.cpp:105] Iteration 1044, lr = 0.01
I0428 20:33:54.215354 12706 solver.cpp:218] Iteration 1056 (2.25967 iter/s, 5.3105s/12 iters), loss = 4.41323
I0428 20:33:54.215404 12706 solver.cpp:237] Train net output #0: loss = 4.41323 (* 1 = 4.41323 loss)
I0428 20:33:54.215412 12706 sgd_solver.cpp:105] Iteration 1056, lr = 0.01
I0428 20:33:59.304033 12706 solver.cpp:218] Iteration 1068 (2.3583 iter/s, 5.08841s/12 iters), loss = 4.35845
I0428 20:33:59.304072 12706 solver.cpp:237] Train net output #0: loss = 4.35845 (* 1 = 4.35845 loss)
I0428 20:33:59.304080 12706 sgd_solver.cpp:105] Iteration 1068, lr = 0.01
I0428 20:34:04.378875 12706 solver.cpp:218] Iteration 1080 (2.36473 iter/s, 5.07458s/12 iters), loss = 4.42927
I0428 20:34:04.378926 12706 solver.cpp:237] Train net output #0: loss = 4.42927 (* 1 = 4.42927 loss)
I0428 20:34:04.378937 12706 sgd_solver.cpp:105] Iteration 1080, lr = 0.01
I0428 20:34:09.323598 12706 solver.cpp:218] Iteration 1092 (2.42696 iter/s, 4.94445s/12 iters), loss = 4.48657
I0428 20:34:09.323654 12706 solver.cpp:237] Train net output #0: loss = 4.48657 (* 1 = 4.48657 loss)
I0428 20:34:09.323665 12706 sgd_solver.cpp:105] Iteration 1092, lr = 0.01
I0428 20:34:14.274886 12706 solver.cpp:218] Iteration 1104 (2.42375 iter/s, 4.95101s/12 iters), loss = 4.30966
I0428 20:34:14.279783 12706 solver.cpp:237] Train net output #0: loss = 4.30966 (* 1 = 4.30966 loss)
I0428 20:34:14.279793 12706 sgd_solver.cpp:105] Iteration 1104, lr = 0.01
I0428 20:34:17.388648 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:34:19.223731 12706 solver.cpp:218] Iteration 1116 (2.42732 iter/s, 4.94373s/12 iters), loss = 4.70939
I0428 20:34:19.223779 12706 solver.cpp:237] Train net output #0: loss = 4.70939 (* 1 = 4.70939 loss)
I0428 20:34:19.223790 12706 sgd_solver.cpp:105] Iteration 1116, lr = 0.01
I0428 20:34:21.219949 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_1122.caffemodel
I0428 20:34:24.484541 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_1122.solverstate
I0428 20:34:26.807232 12706 solver.cpp:330] Iteration 1122, Testing net (#0)
I0428 20:34:26.807257 12706 net.cpp:676] Ignoring source layer train-data
I0428 20:34:30.822052 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:34:31.304190 12706 solver.cpp:397] Test net output #0: accuracy = 0.0631127
I0428 20:34:31.304219 12706 solver.cpp:397] Test net output #1: loss = 4.49538 (* 1 = 4.49538 loss)
I0428 20:34:33.309934 12706 solver.cpp:218] Iteration 1128 (0.851937 iter/s, 14.0855s/12 iters), loss = 4.27545
I0428 20:34:33.309985 12706 solver.cpp:237] Train net output #0: loss = 4.27545 (* 1 = 4.27545 loss)
I0428 20:34:33.309995 12706 sgd_solver.cpp:105] Iteration 1128, lr = 0.01
I0428 20:34:38.314110 12706 solver.cpp:218] Iteration 1140 (2.39813 iter/s, 5.00391s/12 iters), loss = 4.19452
I0428 20:34:38.314146 12706 solver.cpp:237] Train net output #0: loss = 4.19452 (* 1 = 4.19452 loss)
I0428 20:34:38.314152 12706 sgd_solver.cpp:105] Iteration 1140, lr = 0.01
I0428 20:34:43.267060 12706 solver.cpp:218] Iteration 1152 (2.42293 iter/s, 4.95269s/12 iters), loss = 4.39678
I0428 20:34:43.267102 12706 solver.cpp:237] Train net output #0: loss = 4.39678 (* 1 = 4.39678 loss)
I0428 20:34:43.267112 12706 sgd_solver.cpp:105] Iteration 1152, lr = 0.01
I0428 20:34:48.223526 12706 solver.cpp:218] Iteration 1164 (2.42121 iter/s, 4.9562s/12 iters), loss = 4.34781
I0428 20:34:48.223641 12706 solver.cpp:237] Train net output #0: loss = 4.34781 (* 1 = 4.34781 loss)
I0428 20:34:48.223649 12706 sgd_solver.cpp:105] Iteration 1164, lr = 0.01
I0428 20:34:53.278055 12706 solver.cpp:218] Iteration 1176 (2.37427 iter/s, 5.05419s/12 iters), loss = 4.25122
I0428 20:34:53.278102 12706 solver.cpp:237] Train net output #0: loss = 4.25122 (* 1 = 4.25122 loss)
I0428 20:34:53.278113 12706 sgd_solver.cpp:105] Iteration 1176, lr = 0.01
I0428 20:34:58.395002 12706 solver.cpp:218] Iteration 1188 (2.34528 iter/s, 5.11667s/12 iters), loss = 4.24722
I0428 20:34:58.395051 12706 solver.cpp:237] Train net output #0: loss = 4.24722 (* 1 = 4.24722 loss)
I0428 20:34:58.395062 12706 sgd_solver.cpp:105] Iteration 1188, lr = 0.01
I0428 20:35:03.357470 12706 solver.cpp:218] Iteration 1200 (2.41828 iter/s, 4.9622s/12 iters), loss = 4.35389
I0428 20:35:03.357522 12706 solver.cpp:237] Train net output #0: loss = 4.35389 (* 1 = 4.35389 loss)
I0428 20:35:03.357532 12706 sgd_solver.cpp:105] Iteration 1200, lr = 0.01
I0428 20:35:08.479555 12706 solver.cpp:218] Iteration 1212 (2.34292 iter/s, 5.12181s/12 iters), loss = 4.18935
I0428 20:35:08.479593 12706 solver.cpp:237] Train net output #0: loss = 4.18935 (* 1 = 4.18935 loss)
I0428 20:35:08.479599 12706 sgd_solver.cpp:105] Iteration 1212, lr = 0.01
I0428 20:35:08.767515 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:35:13.061235 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_1224.caffemodel
I0428 20:35:16.066633 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_1224.solverstate
I0428 20:35:18.411463 12706 solver.cpp:330] Iteration 1224, Testing net (#0)
I0428 20:35:18.412057 12706 net.cpp:676] Ignoring source layer train-data
I0428 20:35:22.346189 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:35:22.862835 12706 solver.cpp:397] Test net output #0: accuracy = 0.0925245
I0428 20:35:22.862884 12706 solver.cpp:397] Test net output #1: loss = 4.31139 (* 1 = 4.31139 loss)
I0428 20:35:22.954320 12706 solver.cpp:218] Iteration 1224 (0.829067 iter/s, 14.4741s/12 iters), loss = 4.02489
I0428 20:35:22.954391 12706 solver.cpp:237] Train net output #0: loss = 4.02489 (* 1 = 4.02489 loss)
I0428 20:35:22.954407 12706 sgd_solver.cpp:105] Iteration 1224, lr = 0.01
I0428 20:35:27.228957 12706 solver.cpp:218] Iteration 1236 (2.80742 iter/s, 4.27438s/12 iters), loss = 4.24379
I0428 20:35:27.228999 12706 solver.cpp:237] Train net output #0: loss = 4.24379 (* 1 = 4.24379 loss)
I0428 20:35:27.229007 12706 sgd_solver.cpp:105] Iteration 1236, lr = 0.01
I0428 20:35:32.255141 12706 solver.cpp:218] Iteration 1248 (2.38762 iter/s, 5.02592s/12 iters), loss = 4.46118
I0428 20:35:32.255180 12706 solver.cpp:237] Train net output #0: loss = 4.46118 (* 1 = 4.46118 loss)
I0428 20:35:32.255187 12706 sgd_solver.cpp:105] Iteration 1248, lr = 0.01
I0428 20:35:37.272578 12706 solver.cpp:218] Iteration 1260 (2.39178 iter/s, 5.01718s/12 iters), loss = 4.38455
I0428 20:35:37.272614 12706 solver.cpp:237] Train net output #0: loss = 4.38455 (* 1 = 4.38455 loss)
I0428 20:35:37.272620 12706 sgd_solver.cpp:105] Iteration 1260, lr = 0.01
I0428 20:35:42.323762 12706 solver.cpp:218] Iteration 1272 (2.3758 iter/s, 5.05092s/12 iters), loss = 4.08905
I0428 20:35:42.323803 12706 solver.cpp:237] Train net output #0: loss = 4.08905 (* 1 = 4.08905 loss)
I0428 20:35:42.323812 12706 sgd_solver.cpp:105] Iteration 1272, lr = 0.01
I0428 20:35:47.385121 12706 solver.cpp:218] Iteration 1284 (2.37103 iter/s, 5.06109s/12 iters), loss = 4.15413
I0428 20:35:47.385169 12706 solver.cpp:237] Train net output #0: loss = 4.15413 (* 1 = 4.15413 loss)
I0428 20:35:47.385179 12706 sgd_solver.cpp:105] Iteration 1284, lr = 0.01
I0428 20:35:52.328986 12706 solver.cpp:218] Iteration 1296 (2.42738 iter/s, 4.9436s/12 iters), loss = 3.97416
I0428 20:35:52.329277 12706 solver.cpp:237] Train net output #0: loss = 3.97416 (* 1 = 3.97416 loss)
I0428 20:35:52.329285 12706 sgd_solver.cpp:105] Iteration 1296, lr = 0.01
I0428 20:35:57.390738 12706 solver.cpp:218] Iteration 1308 (2.37096 iter/s, 5.06123s/12 iters), loss = 4.1531
I0428 20:35:57.390789 12706 solver.cpp:237] Train net output #0: loss = 4.1531 (* 1 = 4.1531 loss)
I0428 20:35:57.390801 12706 sgd_solver.cpp:105] Iteration 1308, lr = 0.01
I0428 20:35:59.888847 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:36:02.364836 12706 solver.cpp:218] Iteration 1320 (2.41263 iter/s, 4.97383s/12 iters), loss = 4.2542
I0428 20:36:02.364876 12706 solver.cpp:237] Train net output #0: loss = 4.2542 (* 1 = 4.2542 loss)
I0428 20:36:02.364886 12706 sgd_solver.cpp:105] Iteration 1320, lr = 0.01
I0428 20:36:04.379997 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_1326.caffemodel
I0428 20:36:07.432463 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_1326.solverstate
I0428 20:36:10.193336 12706 solver.cpp:330] Iteration 1326, Testing net (#0)
I0428 20:36:10.193356 12706 net.cpp:676] Ignoring source layer train-data
I0428 20:36:14.007611 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:36:14.641646 12706 solver.cpp:397] Test net output #0: accuracy = 0.0955882
I0428 20:36:14.641674 12706 solver.cpp:397] Test net output #1: loss = 4.21935 (* 1 = 4.21935 loss)
I0428 20:36:16.456501 12706 solver.cpp:218] Iteration 1332 (0.851607 iter/s, 14.091s/12 iters), loss = 4.19503
I0428 20:36:16.456540 12706 solver.cpp:237] Train net output #0: loss = 4.19503 (* 1 = 4.19503 loss)
I0428 20:36:16.456547 12706 sgd_solver.cpp:105] Iteration 1332, lr = 0.01
I0428 20:36:21.523468 12706 solver.cpp:218] Iteration 1344 (2.36841 iter/s, 5.0667s/12 iters), loss = 4.1235
I0428 20:36:21.523510 12706 solver.cpp:237] Train net output #0: loss = 4.1235 (* 1 = 4.1235 loss)
I0428 20:36:21.523519 12706 sgd_solver.cpp:105] Iteration 1344, lr = 0.01
I0428 20:36:26.595331 12706 solver.cpp:218] Iteration 1356 (2.36612 iter/s, 5.07159s/12 iters), loss = 4.04281
I0428 20:36:26.595453 12706 solver.cpp:237] Train net output #0: loss = 4.04281 (* 1 = 4.04281 loss)
I0428 20:36:26.595464 12706 sgd_solver.cpp:105] Iteration 1356, lr = 0.01
I0428 20:36:31.605119 12706 solver.cpp:218] Iteration 1368 (2.39548 iter/s, 5.00944s/12 iters), loss = 3.97023
I0428 20:36:31.605177 12706 solver.cpp:237] Train net output #0: loss = 3.97023 (* 1 = 3.97023 loss)
I0428 20:36:31.605188 12706 sgd_solver.cpp:105] Iteration 1368, lr = 0.01
I0428 20:36:32.783677 12706 blocking_queue.cpp:49] Waiting for data
I0428 20:36:36.535785 12706 solver.cpp:218] Iteration 1380 (2.43388 iter/s, 4.93039s/12 iters), loss = 3.91745
I0428 20:36:36.535843 12706 solver.cpp:237] Train net output #0: loss = 3.91745 (* 1 = 3.91745 loss)
I0428 20:36:36.535856 12706 sgd_solver.cpp:105] Iteration 1380, lr = 0.01
I0428 20:36:41.486912 12706 solver.cpp:218] Iteration 1392 (2.42383 iter/s, 4.95085s/12 iters), loss = 3.94834
I0428 20:36:41.486964 12706 solver.cpp:237] Train net output #0: loss = 3.94834 (* 1 = 3.94834 loss)
I0428 20:36:41.486975 12706 sgd_solver.cpp:105] Iteration 1392, lr = 0.01
I0428 20:36:46.433576 12706 solver.cpp:218] Iteration 1404 (2.42601 iter/s, 4.94639s/12 iters), loss = 3.82025
I0428 20:36:46.433626 12706 solver.cpp:237] Train net output #0: loss = 3.82025 (* 1 = 3.82025 loss)
I0428 20:36:46.433638 12706 sgd_solver.cpp:105] Iteration 1404, lr = 0.01
I0428 20:36:51.039319 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:36:51.389660 12706 solver.cpp:218] Iteration 1416 (2.4214 iter/s, 4.95581s/12 iters), loss = 3.78813
I0428 20:36:51.389708 12706 solver.cpp:237] Train net output #0: loss = 3.78813 (* 1 = 3.78813 loss)
I0428 20:36:51.389721 12706 sgd_solver.cpp:105] Iteration 1416, lr = 0.01
I0428 20:36:55.872941 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_1428.caffemodel
I0428 20:37:00.821403 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_1428.solverstate
I0428 20:37:06.803229 12706 solver.cpp:330] Iteration 1428, Testing net (#0)
I0428 20:37:06.803251 12706 net.cpp:676] Ignoring source layer train-data
I0428 20:37:10.597601 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:37:11.194607 12706 solver.cpp:397] Test net output #0: accuracy = 0.120098
I0428 20:37:11.194638 12706 solver.cpp:397] Test net output #1: loss = 4.02362 (* 1 = 4.02362 loss)
I0428 20:37:11.285707 12706 solver.cpp:218] Iteration 1428 (0.603162 iter/s, 19.8952s/12 iters), loss = 3.99853
I0428 20:37:11.285748 12706 solver.cpp:237] Train net output #0: loss = 3.99853 (* 1 = 3.99853 loss)
I0428 20:37:11.285758 12706 sgd_solver.cpp:105] Iteration 1428, lr = 0.01
I0428 20:37:15.453117 12706 solver.cpp:218] Iteration 1440 (2.87964 iter/s, 4.16718s/12 iters), loss = 3.92101
I0428 20:37:15.453155 12706 solver.cpp:237] Train net output #0: loss = 3.92101 (* 1 = 3.92101 loss)
I0428 20:37:15.453163 12706 sgd_solver.cpp:105] Iteration 1440, lr = 0.01
I0428 20:37:20.545992 12706 solver.cpp:218] Iteration 1452 (2.35636 iter/s, 5.09261s/12 iters), loss = 3.82631
I0428 20:37:20.546033 12706 solver.cpp:237] Train net output #0: loss = 3.82631 (* 1 = 3.82631 loss)
I0428 20:37:20.546041 12706 sgd_solver.cpp:105] Iteration 1452, lr = 0.01
I0428 20:37:25.594096 12706 solver.cpp:218] Iteration 1464 (2.37726 iter/s, 5.04784s/12 iters), loss = 3.82258
I0428 20:37:25.594134 12706 solver.cpp:237] Train net output #0: loss = 3.82258 (* 1 = 3.82258 loss)
I0428 20:37:25.594142 12706 sgd_solver.cpp:105] Iteration 1464, lr = 0.01
I0428 20:37:30.692282 12706 solver.cpp:218] Iteration 1476 (2.3539 iter/s, 5.09791s/12 iters), loss = 4.01379
I0428 20:37:30.692329 12706 solver.cpp:237] Train net output #0: loss = 4.01379 (* 1 = 4.01379 loss)
I0428 20:37:30.692338 12706 sgd_solver.cpp:105] Iteration 1476, lr = 0.01
I0428 20:37:35.662298 12706 solver.cpp:218] Iteration 1488 (2.41461 iter/s, 4.96974s/12 iters), loss = 3.44661
I0428 20:37:35.662531 12706 solver.cpp:237] Train net output #0: loss = 3.44661 (* 1 = 3.44661 loss)
I0428 20:37:35.662544 12706 sgd_solver.cpp:105] Iteration 1488, lr = 0.01
I0428 20:37:40.700284 12706 solver.cpp:218] Iteration 1500 (2.38212 iter/s, 5.03753s/12 iters), loss = 3.91312
I0428 20:37:40.700326 12706 solver.cpp:237] Train net output #0: loss = 3.91312 (* 1 = 3.91312 loss)
I0428 20:37:40.700335 12706 sgd_solver.cpp:105] Iteration 1500, lr = 0.01
I0428 20:37:45.690135 12706 solver.cpp:218] Iteration 1512 (2.40501 iter/s, 4.98959s/12 iters), loss = 3.92722
I0428 20:37:45.690173 12706 solver.cpp:237] Train net output #0: loss = 3.92722 (* 1 = 3.92722 loss)
I0428 20:37:45.690181 12706 sgd_solver.cpp:105] Iteration 1512, lr = 0.01
I0428 20:37:47.475831 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:37:50.734140 12706 solver.cpp:218] Iteration 1524 (2.37918 iter/s, 5.04375s/12 iters), loss = 3.55199
I0428 20:37:50.734176 12706 solver.cpp:237] Train net output #0: loss = 3.55199 (* 1 = 3.55199 loss)
I0428 20:37:50.734187 12706 sgd_solver.cpp:105] Iteration 1524, lr = 0.01
I0428 20:37:52.837091 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_1530.caffemodel
I0428 20:37:55.953896 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_1530.solverstate
I0428 20:37:58.274802 12706 solver.cpp:330] Iteration 1530, Testing net (#0)
I0428 20:37:58.274824 12706 net.cpp:676] Ignoring source layer train-data
I0428 20:38:02.015964 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:38:02.706414 12706 solver.cpp:397] Test net output #0: accuracy = 0.137255
I0428 20:38:02.706449 12706 solver.cpp:397] Test net output #1: loss = 3.85761 (* 1 = 3.85761 loss)
I0428 20:38:04.484869 12706 solver.cpp:218] Iteration 1536 (0.872721 iter/s, 13.7501s/12 iters), loss = 3.57175
I0428 20:38:04.484915 12706 solver.cpp:237] Train net output #0: loss = 3.57175 (* 1 = 3.57175 loss)
I0428 20:38:04.484925 12706 sgd_solver.cpp:105] Iteration 1536, lr = 0.01
I0428 20:38:09.467715 12706 solver.cpp:218] Iteration 1548 (2.40839 iter/s, 4.98258s/12 iters), loss = 3.82188
I0428 20:38:09.467844 12706 solver.cpp:237] Train net output #0: loss = 3.82188 (* 1 = 3.82188 loss)
I0428 20:38:09.467854 12706 sgd_solver.cpp:105] Iteration 1548, lr = 0.01
I0428 20:38:14.407845 12706 solver.cpp:218] Iteration 1560 (2.42926 iter/s, 4.93978s/12 iters), loss = 3.27598
I0428 20:38:14.407891 12706 solver.cpp:237] Train net output #0: loss = 3.27598 (* 1 = 3.27598 loss)
I0428 20:38:14.407900 12706 sgd_solver.cpp:105] Iteration 1560, lr = 0.01
I0428 20:38:19.426761 12706 solver.cpp:218] Iteration 1572 (2.39108 iter/s, 5.01865s/12 iters), loss = 3.54382
I0428 20:38:19.426805 12706 solver.cpp:237] Train net output #0: loss = 3.54382 (* 1 = 3.54382 loss)
I0428 20:38:19.426812 12706 sgd_solver.cpp:105] Iteration 1572, lr = 0.01
I0428 20:38:24.500711 12706 solver.cpp:218] Iteration 1584 (2.36514 iter/s, 5.07369s/12 iters), loss = 3.55506
I0428 20:38:24.500747 12706 solver.cpp:237] Train net output #0: loss = 3.55506 (* 1 = 3.55506 loss)
I0428 20:38:24.500754 12706 sgd_solver.cpp:105] Iteration 1584, lr = 0.01
I0428 20:38:29.407055 12706 solver.cpp:218] Iteration 1596 (2.44594 iter/s, 4.90608s/12 iters), loss = 3.59018
I0428 20:38:29.407106 12706 solver.cpp:237] Train net output #0: loss = 3.59018 (* 1 = 3.59018 loss)
I0428 20:38:29.407119 12706 sgd_solver.cpp:105] Iteration 1596, lr = 0.01
I0428 20:38:34.420662 12706 solver.cpp:218] Iteration 1608 (2.39362 iter/s, 5.01334s/12 iters), loss = 3.37526
I0428 20:38:34.420701 12706 solver.cpp:237] Train net output #0: loss = 3.37526 (* 1 = 3.37526 loss)
I0428 20:38:34.420708 12706 sgd_solver.cpp:105] Iteration 1608, lr = 0.01
I0428 20:38:38.462146 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:38:39.532464 12706 solver.cpp:218] Iteration 1620 (2.34763 iter/s, 5.11154s/12 iters), loss = 3.3656
I0428 20:38:39.532585 12706 solver.cpp:237] Train net output #0: loss = 3.3656 (* 1 = 3.3656 loss)
I0428 20:38:39.532594 12706 sgd_solver.cpp:105] Iteration 1620, lr = 0.01
I0428 20:38:44.189360 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_1632.caffemodel
I0428 20:38:47.135680 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_1632.solverstate
I0428 20:38:49.461428 12706 solver.cpp:330] Iteration 1632, Testing net (#0)
I0428 20:38:49.461452 12706 net.cpp:676] Ignoring source layer train-data
I0428 20:38:53.239271 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:38:53.956323 12706 solver.cpp:397] Test net output #0: accuracy = 0.150123
I0428 20:38:53.956353 12706 solver.cpp:397] Test net output #1: loss = 3.71273 (* 1 = 3.71273 loss)
I0428 20:38:54.047484 12706 solver.cpp:218] Iteration 1632 (0.826766 iter/s, 14.5144s/12 iters), loss = 3.49307
I0428 20:38:54.047525 12706 solver.cpp:237] Train net output #0: loss = 3.49307 (* 1 = 3.49307 loss)
I0428 20:38:54.047534 12706 sgd_solver.cpp:105] Iteration 1632, lr = 0.01
I0428 20:38:58.166568 12706 solver.cpp:218] Iteration 1644 (2.91341 iter/s, 4.11889s/12 iters), loss = 3.4581
I0428 20:38:58.166606 12706 solver.cpp:237] Train net output #0: loss = 3.4581 (* 1 = 3.4581 loss)
I0428 20:38:58.166615 12706 sgd_solver.cpp:105] Iteration 1644, lr = 0.01
I0428 20:39:03.357648 12706 solver.cpp:218] Iteration 1656 (2.31176 iter/s, 5.19085s/12 iters), loss = 3.48018
I0428 20:39:03.357683 12706 solver.cpp:237] Train net output #0: loss = 3.48018 (* 1 = 3.48018 loss)
I0428 20:39:03.357690 12706 sgd_solver.cpp:105] Iteration 1656, lr = 0.01
I0428 20:39:08.376855 12706 solver.cpp:218] Iteration 1668 (2.39092 iter/s, 5.01898s/12 iters), loss = 3.59488
I0428 20:39:08.376899 12706 solver.cpp:237] Train net output #0: loss = 3.59488 (* 1 = 3.59488 loss)
I0428 20:39:08.376909 12706 sgd_solver.cpp:105] Iteration 1668, lr = 0.01
I0428 20:39:13.392880 12706 solver.cpp:218] Iteration 1680 (2.39245 iter/s, 5.01579s/12 iters), loss = 3.23111
I0428 20:39:13.393077 12706 solver.cpp:237] Train net output #0: loss = 3.23111 (* 1 = 3.23111 loss)
I0428 20:39:13.393088 12706 sgd_solver.cpp:105] Iteration 1680, lr = 0.01
I0428 20:39:18.430446 12706 solver.cpp:218] Iteration 1692 (2.38229 iter/s, 5.03718s/12 iters), loss = 3.29203
I0428 20:39:18.430497 12706 solver.cpp:237] Train net output #0: loss = 3.29203 (* 1 = 3.29203 loss)
I0428 20:39:18.430507 12706 sgd_solver.cpp:105] Iteration 1692, lr = 0.01
I0428 20:39:23.503196 12706 solver.cpp:218] Iteration 1704 (2.36569 iter/s, 5.07251s/12 iters), loss = 3.65669
I0428 20:39:23.503233 12706 solver.cpp:237] Train net output #0: loss = 3.65669 (* 1 = 3.65669 loss)
I0428 20:39:23.503242 12706 sgd_solver.cpp:105] Iteration 1704, lr = 0.01
I0428 20:39:28.593595 12706 solver.cpp:218] Iteration 1716 (2.35748 iter/s, 5.09017s/12 iters), loss = 3.60689
I0428 20:39:28.593632 12706 solver.cpp:237] Train net output #0: loss = 3.60689 (* 1 = 3.60689 loss)
I0428 20:39:28.593641 12706 sgd_solver.cpp:105] Iteration 1716, lr = 0.01
I0428 20:39:29.697966 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:39:33.686535 12706 solver.cpp:218] Iteration 1728 (2.35631 iter/s, 5.09272s/12 iters), loss = 3.49156
I0428 20:39:33.686575 12706 solver.cpp:237] Train net output #0: loss = 3.49156 (* 1 = 3.49156 loss)
I0428 20:39:33.686583 12706 sgd_solver.cpp:105] Iteration 1728, lr = 0.01
I0428 20:39:35.774569 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_1734.caffemodel
I0428 20:39:38.777168 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_1734.solverstate
I0428 20:39:41.077116 12706 solver.cpp:330] Iteration 1734, Testing net (#0)
I0428 20:39:41.077136 12706 net.cpp:676] Ignoring source layer train-data
I0428 20:39:44.928548 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:39:45.641106 12706 solver.cpp:397] Test net output #0: accuracy = 0.170956
I0428 20:39:45.641135 12706 solver.cpp:397] Test net output #1: loss = 3.60358 (* 1 = 3.60358 loss)
I0428 20:39:47.387009 12706 solver.cpp:218] Iteration 1740 (0.875915 iter/s, 13.7s/12 iters), loss = 3.43291
I0428 20:39:47.387044 12706 solver.cpp:237] Train net output #0: loss = 3.43291 (* 1 = 3.43291 loss)
I0428 20:39:47.387053 12706 sgd_solver.cpp:105] Iteration 1740, lr = 0.01
I0428 20:39:52.407384 12706 solver.cpp:218] Iteration 1752 (2.39037 iter/s, 5.02015s/12 iters), loss = 3.24684
I0428 20:39:52.407428 12706 solver.cpp:237] Train net output #0: loss = 3.24684 (* 1 = 3.24684 loss)
I0428 20:39:52.407438 12706 sgd_solver.cpp:105] Iteration 1752, lr = 0.01
I0428 20:39:57.379331 12706 solver.cpp:218] Iteration 1764 (2.41365 iter/s, 4.97171s/12 iters), loss = 3.35967
I0428 20:39:57.379379 12706 solver.cpp:237] Train net output #0: loss = 3.35967 (* 1 = 3.35967 loss)
I0428 20:39:57.379387 12706 sgd_solver.cpp:105] Iteration 1764, lr = 0.01
I0428 20:40:02.544178 12706 solver.cpp:218] Iteration 1776 (2.3235 iter/s, 5.16461s/12 iters), loss = 3.3758
I0428 20:40:02.544220 12706 solver.cpp:237] Train net output #0: loss = 3.3758 (* 1 = 3.3758 loss)
I0428 20:40:02.544229 12706 sgd_solver.cpp:105] Iteration 1776, lr = 0.01
I0428 20:40:07.512003 12706 solver.cpp:218] Iteration 1788 (2.41566 iter/s, 4.96759s/12 iters), loss = 3.46005
I0428 20:40:07.512053 12706 solver.cpp:237] Train net output #0: loss = 3.46005 (* 1 = 3.46005 loss)
I0428 20:40:07.512065 12706 sgd_solver.cpp:105] Iteration 1788, lr = 0.01
I0428 20:40:12.612275 12706 solver.cpp:218] Iteration 1800 (2.35293 iter/s, 5.10003s/12 iters), loss = 3.49763
I0428 20:40:12.612332 12706 solver.cpp:237] Train net output #0: loss = 3.49763 (* 1 = 3.49763 loss)
I0428 20:40:12.612344 12706 sgd_solver.cpp:105] Iteration 1800, lr = 0.01
I0428 20:40:17.796118 12706 solver.cpp:218] Iteration 1812 (2.315 iter/s, 5.18359s/12 iters), loss = 3.28699
I0428 20:40:17.796252 12706 solver.cpp:237] Train net output #0: loss = 3.28699 (* 1 = 3.28699 loss)
I0428 20:40:17.796262 12706 sgd_solver.cpp:105] Iteration 1812, lr = 0.01
I0428 20:40:20.952436 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:40:22.825789 12706 solver.cpp:218] Iteration 1824 (2.38599 iter/s, 5.02935s/12 iters), loss = 3.43894
I0428 20:40:22.825829 12706 solver.cpp:237] Train net output #0: loss = 3.43894 (* 1 = 3.43894 loss)
I0428 20:40:22.825835 12706 sgd_solver.cpp:105] Iteration 1824, lr = 0.01
I0428 20:40:27.348453 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_1836.caffemodel
I0428 20:40:30.345894 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_1836.solverstate
I0428 20:40:32.968322 12706 solver.cpp:330] Iteration 1836, Testing net (#0)
I0428 20:40:32.968343 12706 net.cpp:676] Ignoring source layer train-data
I0428 20:40:36.931080 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:40:37.690289 12706 solver.cpp:397] Test net output #0: accuracy = 0.185049
I0428 20:40:37.690322 12706 solver.cpp:397] Test net output #1: loss = 3.51183 (* 1 = 3.51183 loss)
I0428 20:40:37.781682 12706 solver.cpp:218] Iteration 1836 (0.80239 iter/s, 14.9553s/12 iters), loss = 2.99265
I0428 20:40:37.781721 12706 solver.cpp:237] Train net output #0: loss = 2.99265 (* 1 = 2.99265 loss)
I0428 20:40:37.781729 12706 sgd_solver.cpp:105] Iteration 1836, lr = 0.01
I0428 20:40:41.866192 12706 solver.cpp:218] Iteration 1848 (2.93807 iter/s, 4.08431s/12 iters), loss = 3.14189
I0428 20:40:41.866230 12706 solver.cpp:237] Train net output #0: loss = 3.14189 (* 1 = 3.14189 loss)
I0428 20:40:41.866237 12706 sgd_solver.cpp:105] Iteration 1848, lr = 0.01
I0428 20:40:46.852799 12706 solver.cpp:218] Iteration 1860 (2.40656 iter/s, 4.98638s/12 iters), loss = 3.0646
I0428 20:40:46.852847 12706 solver.cpp:237] Train net output #0: loss = 3.0646 (* 1 = 3.0646 loss)
I0428 20:40:46.852857 12706 sgd_solver.cpp:105] Iteration 1860, lr = 0.01
I0428 20:40:51.860129 12706 solver.cpp:218] Iteration 1872 (2.3966 iter/s, 5.00709s/12 iters), loss = 3.02053
I0428 20:40:51.860236 12706 solver.cpp:237] Train net output #0: loss = 3.02053 (* 1 = 3.02053 loss)
I0428 20:40:51.860246 12706 sgd_solver.cpp:105] Iteration 1872, lr = 0.01
I0428 20:40:56.941072 12706 solver.cpp:218] Iteration 1884 (2.36191 iter/s, 5.08064s/12 iters), loss = 3.30862
I0428 20:40:56.941125 12706 solver.cpp:237] Train net output #0: loss = 3.30862 (* 1 = 3.30862 loss)
I0428 20:40:56.941138 12706 sgd_solver.cpp:105] Iteration 1884, lr = 0.01
I0428 20:41:01.933554 12706 solver.cpp:218] Iteration 1896 (2.40373 iter/s, 4.99224s/12 iters), loss = 2.9241
I0428 20:41:01.933604 12706 solver.cpp:237] Train net output #0: loss = 2.9241 (* 1 = 2.9241 loss)
I0428 20:41:01.933615 12706 sgd_solver.cpp:105] Iteration 1896, lr = 0.01
I0428 20:41:06.889567 12706 solver.cpp:218] Iteration 1908 (2.42142 iter/s, 4.95577s/12 iters), loss = 3.40054
I0428 20:41:06.889618 12706 solver.cpp:237] Train net output #0: loss = 3.40054 (* 1 = 3.40054 loss)
I0428 20:41:06.889629 12706 sgd_solver.cpp:105] Iteration 1908, lr = 0.01
I0428 20:41:11.953619 12706 solver.cpp:218] Iteration 1920 (2.36976 iter/s, 5.06381s/12 iters), loss = 3.14541
I0428 20:41:11.953661 12706 solver.cpp:237] Train net output #0: loss = 3.14541 (* 1 = 3.14541 loss)
I0428 20:41:11.953670 12706 sgd_solver.cpp:105] Iteration 1920, lr = 0.01
I0428 20:41:12.267980 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:41:16.888649 12706 solver.cpp:218] Iteration 1932 (2.43171 iter/s, 4.93479s/12 iters), loss = 2.93064
I0428 20:41:16.888695 12706 solver.cpp:237] Train net output #0: loss = 2.93064 (* 1 = 2.93064 loss)
I0428 20:41:16.888705 12706 sgd_solver.cpp:105] Iteration 1932, lr = 0.01
I0428 20:41:18.936112 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_1938.caffemodel
I0428 20:41:21.936772 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_1938.solverstate
I0428 20:41:24.437863 12706 solver.cpp:330] Iteration 1938, Testing net (#0)
I0428 20:41:24.437884 12706 net.cpp:676] Ignoring source layer train-data
I0428 20:41:28.141242 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:41:28.950474 12706 solver.cpp:397] Test net output #0: accuracy = 0.20527
I0428 20:41:28.950505 12706 solver.cpp:397] Test net output #1: loss = 3.38478 (* 1 = 3.38478 loss)
I0428 20:41:30.758297 12706 solver.cpp:218] Iteration 1944 (0.865233 iter/s, 13.8691s/12 iters), loss = 3.075
I0428 20:41:30.758335 12706 solver.cpp:237] Train net output #0: loss = 3.075 (* 1 = 3.075 loss)
I0428 20:41:30.758343 12706 sgd_solver.cpp:105] Iteration 1944, lr = 0.01
I0428 20:41:35.785126 12706 solver.cpp:218] Iteration 1956 (2.3873 iter/s, 5.02659s/12 iters), loss = 3.45374
I0428 20:41:35.785173 12706 solver.cpp:237] Train net output #0: loss = 3.45374 (* 1 = 3.45374 loss)
I0428 20:41:35.785183 12706 sgd_solver.cpp:105] Iteration 1956, lr = 0.01
I0428 20:41:40.812579 12706 solver.cpp:218] Iteration 1968 (2.38701 iter/s, 5.02721s/12 iters), loss = 3.00946
I0428 20:41:40.812624 12706 solver.cpp:237] Train net output #0: loss = 3.00946 (* 1 = 3.00946 loss)
I0428 20:41:40.812634 12706 sgd_solver.cpp:105] Iteration 1968, lr = 0.01
I0428 20:41:45.824275 12706 solver.cpp:218] Iteration 1980 (2.39451 iter/s, 5.01146s/12 iters), loss = 3.15819
I0428 20:41:45.824322 12706 solver.cpp:237] Train net output #0: loss = 3.15819 (* 1 = 3.15819 loss)
I0428 20:41:45.824332 12706 sgd_solver.cpp:105] Iteration 1980, lr = 0.01
I0428 20:41:50.887097 12706 solver.cpp:218] Iteration 1992 (2.37033 iter/s, 5.06258s/12 iters), loss = 3.15182
I0428 20:41:50.887140 12706 solver.cpp:237] Train net output #0: loss = 3.15182 (* 1 = 3.15182 loss)
I0428 20:41:50.887147 12706 sgd_solver.cpp:105] Iteration 1992, lr = 0.01
I0428 20:41:56.378733 12706 solver.cpp:218] Iteration 2004 (2.18524 iter/s, 5.49138s/12 iters), loss = 2.8793
I0428 20:41:56.378836 12706 solver.cpp:237] Train net output #0: loss = 2.8793 (* 1 = 2.8793 loss)
I0428 20:41:56.378844 12706 sgd_solver.cpp:105] Iteration 2004, lr = 0.01
I0428 20:42:01.446063 12706 solver.cpp:218] Iteration 2016 (2.36825 iter/s, 5.06703s/12 iters), loss = 3.1086
I0428 20:42:01.446101 12706 solver.cpp:237] Train net output #0: loss = 3.1086 (* 1 = 3.1086 loss)
I0428 20:42:01.446110 12706 sgd_solver.cpp:105] Iteration 2016, lr = 0.01
I0428 20:42:03.996948 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:42:06.458040 12706 solver.cpp:218] Iteration 2028 (2.39438 iter/s, 5.01174s/12 iters), loss = 2.82859
I0428 20:42:06.458079 12706 solver.cpp:237] Train net output #0: loss = 2.82859 (* 1 = 2.82859 loss)
I0428 20:42:06.458087 12706 sgd_solver.cpp:105] Iteration 2028, lr = 0.01
I0428 20:42:10.993083 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_2040.caffemodel
I0428 20:42:16.825078 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_2040.solverstate
I0428 20:42:19.133291 12706 solver.cpp:330] Iteration 2040, Testing net (#0)
I0428 20:42:19.133311 12706 net.cpp:676] Ignoring source layer train-data
I0428 20:42:22.728410 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:42:23.619459 12706 solver.cpp:397] Test net output #0: accuracy = 0.230392
I0428 20:42:23.619488 12706 solver.cpp:397] Test net output #1: loss = 3.30805 (* 1 = 3.30805 loss)
I0428 20:42:23.708389 12706 solver.cpp:218] Iteration 2040 (0.695665 iter/s, 17.2497s/12 iters), loss = 3.2005
I0428 20:42:23.708428 12706 solver.cpp:237] Train net output #0: loss = 3.2005 (* 1 = 3.2005 loss)
I0428 20:42:23.708436 12706 sgd_solver.cpp:105] Iteration 2040, lr = 0.01
I0428 20:42:27.883394 12706 solver.cpp:218] Iteration 2052 (2.87439 iter/s, 4.1748s/12 iters), loss = 3.04241
I0428 20:42:27.883649 12706 solver.cpp:237] Train net output #0: loss = 3.04241 (* 1 = 3.04241 loss)
I0428 20:42:27.883658 12706 sgd_solver.cpp:105] Iteration 2052, lr = 0.01
I0428 20:42:29.466573 12706 blocking_queue.cpp:49] Waiting for data
I0428 20:42:33.052690 12706 solver.cpp:218] Iteration 2064 (2.3216 iter/s, 5.16884s/12 iters), loss = 3.01188
I0428 20:42:33.052731 12706 solver.cpp:237] Train net output #0: loss = 3.01188 (* 1 = 3.01188 loss)
I0428 20:42:33.052739 12706 sgd_solver.cpp:105] Iteration 2064, lr = 0.01
I0428 20:42:38.146715 12706 solver.cpp:218] Iteration 2076 (2.35581 iter/s, 5.09379s/12 iters), loss = 3.1041
I0428 20:42:38.146754 12706 solver.cpp:237] Train net output #0: loss = 3.1041 (* 1 = 3.1041 loss)
I0428 20:42:38.146762 12706 sgd_solver.cpp:105] Iteration 2076, lr = 0.01
I0428 20:42:43.317369 12706 solver.cpp:218] Iteration 2088 (2.3209 iter/s, 5.17042s/12 iters), loss = 2.79014
I0428 20:42:43.317404 12706 solver.cpp:237] Train net output #0: loss = 2.79014 (* 1 = 2.79014 loss)
I0428 20:42:43.317412 12706 sgd_solver.cpp:105] Iteration 2088, lr = 0.01
I0428 20:42:48.401496 12706 solver.cpp:218] Iteration 2100 (2.3604 iter/s, 5.08389s/12 iters), loss = 2.91555
I0428 20:42:48.401544 12706 solver.cpp:237] Train net output #0: loss = 2.91555 (* 1 = 2.91555 loss)
I0428 20:42:48.401554 12706 sgd_solver.cpp:105] Iteration 2100, lr = 0.01
I0428 20:42:53.398000 12706 solver.cpp:218] Iteration 2112 (2.4018 iter/s, 4.99626s/12 iters), loss = 2.92328
I0428 20:42:53.398049 12706 solver.cpp:237] Train net output #0: loss = 2.92328 (* 1 = 2.92328 loss)
I0428 20:42:53.398058 12706 sgd_solver.cpp:105] Iteration 2112, lr = 0.01
I0428 20:42:58.119292 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:42:58.436779 12706 solver.cpp:218] Iteration 2124 (2.38165 iter/s, 5.03853s/12 iters), loss = 2.86594
I0428 20:42:58.436815 12706 solver.cpp:237] Train net output #0: loss = 2.86594 (* 1 = 2.86594 loss)
I0428 20:42:58.436822 12706 sgd_solver.cpp:105] Iteration 2124, lr = 0.01
I0428 20:43:03.514747 12706 solver.cpp:218] Iteration 2136 (2.36326 iter/s, 5.07774s/12 iters), loss = 2.93485
I0428 20:43:03.514786 12706 solver.cpp:237] Train net output #0: loss = 2.93485 (* 1 = 2.93485 loss)
I0428 20:43:03.514793 12706 sgd_solver.cpp:105] Iteration 2136, lr = 0.01
I0428 20:43:05.525000 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_2142.caffemodel
I0428 20:43:10.791358 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_2142.solverstate
I0428 20:43:13.572592 12706 solver.cpp:330] Iteration 2142, Testing net (#0)
I0428 20:43:13.572613 12706 net.cpp:676] Ignoring source layer train-data
I0428 20:43:17.083783 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:43:18.029374 12706 solver.cpp:397] Test net output #0: accuracy = 0.211397
I0428 20:43:18.029403 12706 solver.cpp:397] Test net output #1: loss = 3.48227 (* 1 = 3.48227 loss)
I0428 20:43:19.729470 12706 solver.cpp:218] Iteration 2148 (0.740097 iter/s, 16.2141s/12 iters), loss = 2.95704
I0428 20:43:19.729506 12706 solver.cpp:237] Train net output #0: loss = 2.95704 (* 1 = 2.95704 loss)
I0428 20:43:19.729516 12706 sgd_solver.cpp:105] Iteration 2148, lr = 0.01
I0428 20:43:24.828826 12706 solver.cpp:218] Iteration 2160 (2.35335 iter/s, 5.09912s/12 iters), loss = 2.83196
I0428 20:43:24.828866 12706 solver.cpp:237] Train net output #0: loss = 2.83196 (* 1 = 2.83196 loss)
I0428 20:43:24.828876 12706 sgd_solver.cpp:105] Iteration 2160, lr = 0.01
I0428 20:43:29.998533 12706 solver.cpp:218] Iteration 2172 (2.32133 iter/s, 5.16946s/12 iters), loss = 2.8809
I0428 20:43:29.998620 12706 solver.cpp:237] Train net output #0: loss = 2.8809 (* 1 = 2.8809 loss)
I0428 20:43:29.998627 12706 sgd_solver.cpp:105] Iteration 2172, lr = 0.01
I0428 20:43:34.968972 12706 solver.cpp:218] Iteration 2184 (2.41441 iter/s, 4.97016s/12 iters), loss = 2.87307
I0428 20:43:34.969010 12706 solver.cpp:237] Train net output #0: loss = 2.87307 (* 1 = 2.87307 loss)
I0428 20:43:34.969019 12706 sgd_solver.cpp:105] Iteration 2184, lr = 0.01
I0428 20:43:40.041208 12706 solver.cpp:218] Iteration 2196 (2.36593 iter/s, 5.072s/12 iters), loss = 2.69641
I0428 20:43:40.041247 12706 solver.cpp:237] Train net output #0: loss = 2.69641 (* 1 = 2.69641 loss)
I0428 20:43:40.041255 12706 sgd_solver.cpp:105] Iteration 2196, lr = 0.01
I0428 20:43:45.235476 12706 solver.cpp:218] Iteration 2208 (2.31035 iter/s, 5.19402s/12 iters), loss = 2.68959
I0428 20:43:45.235524 12706 solver.cpp:237] Train net output #0: loss = 2.68959 (* 1 = 2.68959 loss)
I0428 20:43:45.235534 12706 sgd_solver.cpp:105] Iteration 2208, lr = 0.01
I0428 20:43:50.735162 12706 solver.cpp:218] Iteration 2220 (2.18205 iter/s, 5.49942s/12 iters), loss = 2.78591
I0428 20:43:50.735216 12706 solver.cpp:237] Train net output #0: loss = 2.78591 (* 1 = 2.78591 loss)
I0428 20:43:50.735227 12706 sgd_solver.cpp:105] Iteration 2220, lr = 0.01
I0428 20:43:53.239483 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:43:57.488571 12706 solver.cpp:218] Iteration 2232 (1.77697 iter/s, 6.75309s/12 iters), loss = 2.59611
I0428 20:43:57.488627 12706 solver.cpp:237] Train net output #0: loss = 2.59611 (* 1 = 2.59611 loss)
I0428 20:43:57.488646 12706 sgd_solver.cpp:105] Iteration 2232, lr = 0.01
I0428 20:44:03.550252 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_2244.caffemodel
I0428 20:44:09.260677 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_2244.solverstate
I0428 20:44:11.767500 12706 solver.cpp:330] Iteration 2244, Testing net (#0)
I0428 20:44:11.767521 12706 net.cpp:676] Ignoring source layer train-data
I0428 20:44:16.700424 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:44:18.032629 12706 solver.cpp:397] Test net output #0: accuracy = 0.211397
I0428 20:44:18.032670 12706 solver.cpp:397] Test net output #1: loss = 3.39472 (* 1 = 3.39472 loss)
I0428 20:44:18.117780 12706 solver.cpp:218] Iteration 2244 (0.581723 iter/s, 20.6284s/12 iters), loss = 2.77058
I0428 20:44:18.132561 12706 solver.cpp:237] Train net output #0: loss = 2.77058 (* 1 = 2.77058 loss)
I0428 20:44:18.132586 12706 sgd_solver.cpp:105] Iteration 2244, lr = 0.01
I0428 20:44:23.820724 12706 solver.cpp:218] Iteration 2256 (2.111 iter/s, 5.68452s/12 iters), loss = 3.10505
I0428 20:44:23.820775 12706 solver.cpp:237] Train net output #0: loss = 3.10505 (* 1 = 3.10505 loss)
I0428 20:44:23.820786 12706 sgd_solver.cpp:105] Iteration 2256, lr = 0.01
I0428 20:44:30.443315 12706 solver.cpp:218] Iteration 2268 (1.81206 iter/s, 6.62228s/12 iters), loss = 2.38753
I0428 20:44:30.443364 12706 solver.cpp:237] Train net output #0: loss = 2.38753 (* 1 = 2.38753 loss)
I0428 20:44:30.443375 12706 sgd_solver.cpp:105] Iteration 2268, lr = 0.01
I0428 20:44:36.852728 12706 solver.cpp:218] Iteration 2280 (1.87233 iter/s, 6.40912s/12 iters), loss = 2.63901
I0428 20:44:36.855849 12706 solver.cpp:237] Train net output #0: loss = 2.63901 (* 1 = 2.63901 loss)
I0428 20:44:36.855859 12706 sgd_solver.cpp:105] Iteration 2280, lr = 0.01
I0428 20:44:42.425768 12706 solver.cpp:218] Iteration 2292 (2.15451 iter/s, 5.5697s/12 iters), loss = 2.79669
I0428 20:44:42.425808 12706 solver.cpp:237] Train net output #0: loss = 2.79669 (* 1 = 2.79669 loss)
I0428 20:44:42.425817 12706 sgd_solver.cpp:105] Iteration 2292, lr = 0.01
I0428 20:44:47.631628 12706 solver.cpp:218] Iteration 2304 (2.3052 iter/s, 5.20561s/12 iters), loss = 2.55852
I0428 20:44:47.631665 12706 solver.cpp:237] Train net output #0: loss = 2.55852 (* 1 = 2.55852 loss)
I0428 20:44:47.631672 12706 sgd_solver.cpp:105] Iteration 2304, lr = 0.01
I0428 20:44:52.744473 12706 solver.cpp:218] Iteration 2316 (2.34714 iter/s, 5.1126s/12 iters), loss = 2.87352
I0428 20:44:52.744537 12706 solver.cpp:237] Train net output #0: loss = 2.87352 (* 1 = 2.87352 loss)
I0428 20:44:52.744544 12706 sgd_solver.cpp:105] Iteration 2316, lr = 0.01
I0428 20:44:56.667726 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:44:57.715829 12706 solver.cpp:218] Iteration 2328 (2.41396 iter/s, 4.97109s/12 iters), loss = 2.66561
I0428 20:44:57.715868 12706 solver.cpp:237] Train net output #0: loss = 2.66561 (* 1 = 2.66561 loss)
I0428 20:44:57.715876 12706 sgd_solver.cpp:105] Iteration 2328, lr = 0.01
I0428 20:45:02.714087 12706 solver.cpp:218] Iteration 2340 (2.40095 iter/s, 4.99801s/12 iters), loss = 2.78128
I0428 20:45:02.714135 12706 solver.cpp:237] Train net output #0: loss = 2.78128 (* 1 = 2.78128 loss)
I0428 20:45:02.714145 12706 sgd_solver.cpp:105] Iteration 2340, lr = 0.01
I0428 20:45:04.778841 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_2346.caffemodel
I0428 20:45:12.557958 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_2346.solverstate
I0428 20:45:14.880270 12706 solver.cpp:330] Iteration 2346, Testing net (#0)
I0428 20:45:14.880295 12706 net.cpp:676] Ignoring source layer train-data
I0428 20:45:18.298051 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:45:19.248839 12706 solver.cpp:397] Test net output #0: accuracy = 0.245711
I0428 20:45:19.248873 12706 solver.cpp:397] Test net output #1: loss = 3.2256 (* 1 = 3.2256 loss)
I0428 20:45:21.056221 12706 solver.cpp:218] Iteration 2352 (0.654258 iter/s, 18.3414s/12 iters), loss = 2.384
I0428 20:45:21.056259 12706 solver.cpp:237] Train net output #0: loss = 2.384 (* 1 = 2.384 loss)
I0428 20:45:21.056267 12706 sgd_solver.cpp:105] Iteration 2352, lr = 0.01
I0428 20:45:26.277117 12706 solver.cpp:218] Iteration 2364 (2.29857 iter/s, 5.22064s/12 iters), loss = 2.68053
I0428 20:45:26.277158 12706 solver.cpp:237] Train net output #0: loss = 2.68053 (* 1 = 2.68053 loss)
I0428 20:45:26.277166 12706 sgd_solver.cpp:105] Iteration 2364, lr = 0.01
I0428 20:45:31.268474 12706 solver.cpp:218] Iteration 2376 (2.40427 iter/s, 4.99112s/12 iters), loss = 2.69871
I0428 20:45:31.268532 12706 solver.cpp:237] Train net output #0: loss = 2.69871 (* 1 = 2.69871 loss)
I0428 20:45:31.268543 12706 sgd_solver.cpp:105] Iteration 2376, lr = 0.01
I0428 20:45:36.328157 12706 solver.cpp:218] Iteration 2388 (2.37181 iter/s, 5.05942s/12 iters), loss = 2.37741
I0428 20:45:36.328208 12706 solver.cpp:237] Train net output #0: loss = 2.37741 (* 1 = 2.37741 loss)
I0428 20:45:36.328220 12706 sgd_solver.cpp:105] Iteration 2388, lr = 0.01
I0428 20:45:41.274997 12706 solver.cpp:218] Iteration 2400 (2.42592 iter/s, 4.94658s/12 iters), loss = 2.45368
I0428 20:45:41.275050 12706 solver.cpp:237] Train net output #0: loss = 2.45368 (* 1 = 2.45368 loss)
I0428 20:45:41.275061 12706 sgd_solver.cpp:105] Iteration 2400, lr = 0.01
I0428 20:45:46.214088 12706 solver.cpp:218] Iteration 2412 (2.42972 iter/s, 4.93885s/12 iters), loss = 2.72791
I0428 20:45:46.214195 12706 solver.cpp:237] Train net output #0: loss = 2.72791 (* 1 = 2.72791 loss)
I0428 20:45:46.214202 12706 sgd_solver.cpp:105] Iteration 2412, lr = 0.01
I0428 20:45:51.202477 12706 solver.cpp:218] Iteration 2424 (2.40573 iter/s, 4.98808s/12 iters), loss = 2.44264
I0428 20:45:51.202528 12706 solver.cpp:237] Train net output #0: loss = 2.44264 (* 1 = 2.44264 loss)
I0428 20:45:51.202539 12706 sgd_solver.cpp:105] Iteration 2424, lr = 0.01
I0428 20:45:52.391219 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:45:56.255407 12706 solver.cpp:218] Iteration 2436 (2.37498 iter/s, 5.05268s/12 iters), loss = 2.53433
I0428 20:45:56.255457 12706 solver.cpp:237] Train net output #0: loss = 2.53433 (* 1 = 2.53433 loss)
I0428 20:45:56.255470 12706 sgd_solver.cpp:105] Iteration 2436, lr = 0.01
I0428 20:46:00.742626 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_2448.caffemodel
I0428 20:46:03.783686 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_2448.solverstate
I0428 20:46:06.089864 12706 solver.cpp:330] Iteration 2448, Testing net (#0)
I0428 20:46:06.089884 12706 net.cpp:676] Ignoring source layer train-data
I0428 20:46:09.664258 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:46:10.664822 12706 solver.cpp:397] Test net output #0: accuracy = 0.254902
I0428 20:46:10.664850 12706 solver.cpp:397] Test net output #1: loss = 3.18394 (* 1 = 3.18394 loss)
I0428 20:46:10.755898 12706 solver.cpp:218] Iteration 2448 (0.827593 iter/s, 14.4999s/12 iters), loss = 2.65679
I0428 20:46:10.755939 12706 solver.cpp:237] Train net output #0: loss = 2.65679 (* 1 = 2.65679 loss)
I0428 20:46:10.755945 12706 sgd_solver.cpp:105] Iteration 2448, lr = 0.01
I0428 20:46:14.957677 12706 solver.cpp:218] Iteration 2460 (2.85608 iter/s, 4.20156s/12 iters), loss = 2.24742
I0428 20:46:14.957728 12706 solver.cpp:237] Train net output #0: loss = 2.24742 (* 1 = 2.24742 loss)
I0428 20:46:14.957742 12706 sgd_solver.cpp:105] Iteration 2460, lr = 0.01
I0428 20:46:19.954325 12706 solver.cpp:218] Iteration 2472 (2.40173 iter/s, 4.9964s/12 iters), loss = 2.58129
I0428 20:46:19.954488 12706 solver.cpp:237] Train net output #0: loss = 2.58129 (* 1 = 2.58129 loss)
I0428 20:46:19.954501 12706 sgd_solver.cpp:105] Iteration 2472, lr = 0.01
I0428 20:46:24.929839 12706 solver.cpp:218] Iteration 2484 (2.41198 iter/s, 4.97516s/12 iters), loss = 2.46823
I0428 20:46:24.929878 12706 solver.cpp:237] Train net output #0: loss = 2.46823 (* 1 = 2.46823 loss)
I0428 20:46:24.929885 12706 sgd_solver.cpp:105] Iteration 2484, lr = 0.01
I0428 20:46:29.982542 12706 solver.cpp:218] Iteration 2496 (2.37508 iter/s, 5.05245s/12 iters), loss = 2.59552
I0428 20:46:29.982591 12706 solver.cpp:237] Train net output #0: loss = 2.59552 (* 1 = 2.59552 loss)
I0428 20:46:29.982604 12706 sgd_solver.cpp:105] Iteration 2496, lr = 0.01
I0428 20:46:34.975821 12706 solver.cpp:218] Iteration 2508 (2.40335 iter/s, 4.99303s/12 iters), loss = 2.63574
I0428 20:46:34.975860 12706 solver.cpp:237] Train net output #0: loss = 2.63574 (* 1 = 2.63574 loss)
I0428 20:46:34.975868 12706 sgd_solver.cpp:105] Iteration 2508, lr = 0.01
I0428 20:46:39.957420 12706 solver.cpp:218] Iteration 2520 (2.40898 iter/s, 4.98136s/12 iters), loss = 2.18156
I0428 20:46:39.957458 12706 solver.cpp:237] Train net output #0: loss = 2.18156 (* 1 = 2.18156 loss)
I0428 20:46:39.957465 12706 sgd_solver.cpp:105] Iteration 2520, lr = 0.01
I0428 20:46:43.218472 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:46:45.032543 12706 solver.cpp:218] Iteration 2532 (2.36459 iter/s, 5.07487s/12 iters), loss = 2.11369
I0428 20:46:45.032591 12706 solver.cpp:237] Train net output #0: loss = 2.11369 (* 1 = 2.11369 loss)
I0428 20:46:45.032601 12706 sgd_solver.cpp:105] Iteration 2532, lr = 0.01
I0428 20:46:50.357136 12706 solver.cpp:218] Iteration 2544 (2.2538 iter/s, 5.32433s/12 iters), loss = 2.64379
I0428 20:46:50.357234 12706 solver.cpp:237] Train net output #0: loss = 2.64379 (* 1 = 2.64379 loss)
I0428 20:46:50.357244 12706 sgd_solver.cpp:105] Iteration 2544, lr = 0.01
I0428 20:46:52.468232 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_2550.caffemodel
I0428 20:46:55.865000 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_2550.solverstate
I0428 20:46:59.623210 12706 solver.cpp:330] Iteration 2550, Testing net (#0)
I0428 20:46:59.623236 12706 net.cpp:676] Ignoring source layer train-data
I0428 20:47:02.991276 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:47:04.021020 12706 solver.cpp:397] Test net output #0: accuracy = 0.270833
I0428 20:47:04.021064 12706 solver.cpp:397] Test net output #1: loss = 3.12867 (* 1 = 3.12867 loss)
I0428 20:47:05.740790 12706 solver.cpp:218] Iteration 2556 (0.780084 iter/s, 15.383s/12 iters), loss = 1.98696
I0428 20:47:05.740841 12706 solver.cpp:237] Train net output #0: loss = 1.98696 (* 1 = 1.98696 loss)
I0428 20:47:05.740852 12706 sgd_solver.cpp:105] Iteration 2556, lr = 0.01
I0428 20:47:10.712374 12706 solver.cpp:218] Iteration 2568 (2.41384 iter/s, 4.97133s/12 iters), loss = 2.32276
I0428 20:47:10.712424 12706 solver.cpp:237] Train net output #0: loss = 2.32276 (* 1 = 2.32276 loss)
I0428 20:47:10.712433 12706 sgd_solver.cpp:105] Iteration 2568, lr = 0.01
I0428 20:47:15.732774 12706 solver.cpp:218] Iteration 2580 (2.39037 iter/s, 5.02015s/12 iters), loss = 2.39038
I0428 20:47:15.732811 12706 solver.cpp:237] Train net output #0: loss = 2.39038 (* 1 = 2.39038 loss)
I0428 20:47:15.732818 12706 sgd_solver.cpp:105] Iteration 2580, lr = 0.01
I0428 20:47:20.720162 12706 solver.cpp:218] Iteration 2592 (2.40618 iter/s, 4.98715s/12 iters), loss = 2.14876
I0428 20:47:20.720288 12706 solver.cpp:237] Train net output #0: loss = 2.14876 (* 1 = 2.14876 loss)
I0428 20:47:20.720296 12706 sgd_solver.cpp:105] Iteration 2592, lr = 0.01
I0428 20:47:25.755939 12706 solver.cpp:218] Iteration 2604 (2.38311 iter/s, 5.03545s/12 iters), loss = 2.39977
I0428 20:47:25.755985 12706 solver.cpp:237] Train net output #0: loss = 2.39977 (* 1 = 2.39977 loss)
I0428 20:47:25.755995 12706 sgd_solver.cpp:105] Iteration 2604, lr = 0.01
I0428 20:47:30.723649 12706 solver.cpp:218] Iteration 2616 (2.41572 iter/s, 4.96746s/12 iters), loss = 2.61069
I0428 20:47:30.723695 12706 solver.cpp:237] Train net output #0: loss = 2.61069 (* 1 = 2.61069 loss)
I0428 20:47:30.723707 12706 sgd_solver.cpp:105] Iteration 2616, lr = 0.01
I0428 20:47:35.768278 12706 solver.cpp:218] Iteration 2628 (2.37889 iter/s, 5.04438s/12 iters), loss = 2.39432
I0428 20:47:35.768329 12706 solver.cpp:237] Train net output #0: loss = 2.39432 (* 1 = 2.39432 loss)
I0428 20:47:35.768342 12706 sgd_solver.cpp:105] Iteration 2628, lr = 0.01
I0428 20:47:36.194916 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:47:40.797978 12706 solver.cpp:218] Iteration 2640 (2.38595 iter/s, 5.02944s/12 iters), loss = 2.0814
I0428 20:47:40.798028 12706 solver.cpp:237] Train net output #0: loss = 2.0814 (* 1 = 2.0814 loss)
I0428 20:47:40.798039 12706 sgd_solver.cpp:105] Iteration 2640, lr = 0.01
I0428 20:47:45.332439 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_2652.caffemodel
I0428 20:47:48.468888 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_2652.solverstate
I0428 20:47:53.198165 12706 solver.cpp:330] Iteration 2652, Testing net (#0)
I0428 20:47:53.198292 12706 net.cpp:676] Ignoring source layer train-data
I0428 20:47:56.498179 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:47:57.714661 12706 solver.cpp:397] Test net output #0: accuracy = 0.290441
I0428 20:47:57.714690 12706 solver.cpp:397] Test net output #1: loss = 3.06892 (* 1 = 3.06892 loss)
I0428 20:47:57.805779 12706 solver.cpp:218] Iteration 2652 (0.705588 iter/s, 17.0071s/12 iters), loss = 2.20767
I0428 20:47:57.805830 12706 solver.cpp:237] Train net output #0: loss = 2.20767 (* 1 = 2.20767 loss)
I0428 20:47:57.805841 12706 sgd_solver.cpp:105] Iteration 2652, lr = 0.01
I0428 20:48:01.942234 12706 solver.cpp:218] Iteration 2664 (2.90119 iter/s, 4.13623s/12 iters), loss = 2.54731
I0428 20:48:01.942272 12706 solver.cpp:237] Train net output #0: loss = 2.54731 (* 1 = 2.54731 loss)
I0428 20:48:01.942281 12706 sgd_solver.cpp:105] Iteration 2664, lr = 0.01
I0428 20:48:06.934162 12706 solver.cpp:218] Iteration 2676 (2.404 iter/s, 4.99169s/12 iters), loss = 2.29276
I0428 20:48:06.934197 12706 solver.cpp:237] Train net output #0: loss = 2.29276 (* 1 = 2.29276 loss)
I0428 20:48:06.934204 12706 sgd_solver.cpp:105] Iteration 2676, lr = 0.01
I0428 20:48:11.890655 12706 solver.cpp:218] Iteration 2688 (2.42118 iter/s, 4.95625s/12 iters), loss = 2.22611
I0428 20:48:11.890691 12706 solver.cpp:237] Train net output #0: loss = 2.22611 (* 1 = 2.22611 loss)
I0428 20:48:11.890697 12706 sgd_solver.cpp:105] Iteration 2688, lr = 0.01
I0428 20:48:16.875277 12706 solver.cpp:218] Iteration 2700 (2.40752 iter/s, 4.98438s/12 iters), loss = 2.02233
I0428 20:48:16.875329 12706 solver.cpp:237] Train net output #0: loss = 2.02233 (* 1 = 2.02233 loss)
I0428 20:48:16.875340 12706 sgd_solver.cpp:105] Iteration 2700, lr = 0.01
I0428 20:48:21.816076 12706 solver.cpp:218] Iteration 2712 (2.42888 iter/s, 4.94054s/12 iters), loss = 2.08866
I0428 20:48:21.816134 12706 solver.cpp:237] Train net output #0: loss = 2.08866 (* 1 = 2.08866 loss)
I0428 20:48:21.816145 12706 sgd_solver.cpp:105] Iteration 2712, lr = 0.01
I0428 20:48:26.796190 12706 solver.cpp:218] Iteration 2724 (2.40971 iter/s, 4.97986s/12 iters), loss = 2.1254
I0428 20:48:26.796324 12706 solver.cpp:237] Train net output #0: loss = 2.1254 (* 1 = 2.1254 loss)
I0428 20:48:26.796334 12706 sgd_solver.cpp:105] Iteration 2724, lr = 0.01
I0428 20:48:29.360389 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:48:31.778570 12706 solver.cpp:218] Iteration 2736 (2.40865 iter/s, 4.98204s/12 iters), loss = 2.47892
I0428 20:48:31.778609 12706 solver.cpp:237] Train net output #0: loss = 2.47892 (* 1 = 2.47892 loss)
I0428 20:48:31.778616 12706 sgd_solver.cpp:105] Iteration 2736, lr = 0.01
I0428 20:48:36.812960 12706 solver.cpp:218] Iteration 2748 (2.38372 iter/s, 5.03414s/12 iters), loss = 2.42429
I0428 20:48:36.813001 12706 solver.cpp:237] Train net output #0: loss = 2.42429 (* 1 = 2.42429 loss)
I0428 20:48:36.813009 12706 sgd_solver.cpp:105] Iteration 2748, lr = 0.01
I0428 20:48:38.841418 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_2754.caffemodel
I0428 20:48:43.535764 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_2754.solverstate
I0428 20:48:45.861268 12706 solver.cpp:330] Iteration 2754, Testing net (#0)
I0428 20:48:45.861287 12706 net.cpp:676] Ignoring source layer train-data
I0428 20:48:48.926609 12706 blocking_queue.cpp:49] Waiting for data
I0428 20:48:49.167778 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:48:50.278692 12706 solver.cpp:397] Test net output #0: accuracy = 0.278186
I0428 20:48:50.278728 12706 solver.cpp:397] Test net output #1: loss = 3.12764 (* 1 = 3.12764 loss)
I0428 20:48:52.149085 12706 solver.cpp:218] Iteration 2760 (0.782499 iter/s, 15.3355s/12 iters), loss = 2.29391
I0428 20:48:52.149125 12706 solver.cpp:237] Train net output #0: loss = 2.29391 (* 1 = 2.29391 loss)
I0428 20:48:52.149132 12706 sgd_solver.cpp:105] Iteration 2760, lr = 0.01
I0428 20:48:57.217475 12706 solver.cpp:218] Iteration 2772 (2.36773 iter/s, 5.06814s/12 iters), loss = 2.06439
I0428 20:48:57.217674 12706 solver.cpp:237] Train net output #0: loss = 2.06439 (* 1 = 2.06439 loss)
I0428 20:48:57.217687 12706 sgd_solver.cpp:105] Iteration 2772, lr = 0.01
I0428 20:49:02.223785 12706 solver.cpp:218] Iteration 2784 (2.39716 iter/s, 5.00592s/12 iters), loss = 2.14633
I0428 20:49:02.223819 12706 solver.cpp:237] Train net output #0: loss = 2.14633 (* 1 = 2.14633 loss)
I0428 20:49:02.223825 12706 sgd_solver.cpp:105] Iteration 2784, lr = 0.01
I0428 20:49:07.207787 12706 solver.cpp:218] Iteration 2796 (2.40782 iter/s, 4.98376s/12 iters), loss = 2.03855
I0428 20:49:07.207835 12706 solver.cpp:237] Train net output #0: loss = 2.03855 (* 1 = 2.03855 loss)
I0428 20:49:07.207845 12706 sgd_solver.cpp:105] Iteration 2796, lr = 0.01
I0428 20:49:12.404155 12706 solver.cpp:218] Iteration 2808 (2.30942 iter/s, 5.19611s/12 iters), loss = 2.1553
I0428 20:49:12.404193 12706 solver.cpp:237] Train net output #0: loss = 2.1553 (* 1 = 2.1553 loss)
I0428 20:49:12.404201 12706 sgd_solver.cpp:105] Iteration 2808, lr = 0.01
I0428 20:49:17.546159 12706 solver.cpp:218] Iteration 2820 (2.33383 iter/s, 5.14176s/12 iters), loss = 2.03766
I0428 20:49:17.546196 12706 solver.cpp:237] Train net output #0: loss = 2.03766 (* 1 = 2.03766 loss)
I0428 20:49:17.546203 12706 sgd_solver.cpp:105] Iteration 2820, lr = 0.01
I0428 20:49:22.346823 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:49:22.642498 12706 solver.cpp:218] Iteration 2832 (2.35475 iter/s, 5.09609s/12 iters), loss = 2.05044
I0428 20:49:22.642552 12706 solver.cpp:237] Train net output #0: loss = 2.05044 (* 1 = 2.05044 loss)
I0428 20:49:22.642563 12706 sgd_solver.cpp:105] Iteration 2832, lr = 0.01
I0428 20:49:27.739348 12706 solver.cpp:218] Iteration 2844 (2.35452 iter/s, 5.09658s/12 iters), loss = 2.14735
I0428 20:49:27.739459 12706 solver.cpp:237] Train net output #0: loss = 2.14735 (* 1 = 2.14735 loss)
I0428 20:49:27.739467 12706 sgd_solver.cpp:105] Iteration 2844, lr = 0.01
I0428 20:49:32.370990 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_2856.caffemodel
I0428 20:49:35.417445 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_2856.solverstate
I0428 20:49:37.715641 12706 solver.cpp:330] Iteration 2856, Testing net (#0)
I0428 20:49:37.715662 12706 net.cpp:676] Ignoring source layer train-data
I0428 20:49:41.146922 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:49:42.430007 12706 solver.cpp:397] Test net output #0: accuracy = 0.302696
I0428 20:49:42.430054 12706 solver.cpp:397] Test net output #1: loss = 2.99282 (* 1 = 2.99282 loss)
I0428 20:49:42.521324 12706 solver.cpp:218] Iteration 2856 (0.811838 iter/s, 14.7813s/12 iters), loss = 2.2249
I0428 20:49:42.521366 12706 solver.cpp:237] Train net output #0: loss = 2.2249 (* 1 = 2.2249 loss)
I0428 20:49:42.521374 12706 sgd_solver.cpp:105] Iteration 2856, lr = 0.01
I0428 20:49:46.698514 12706 solver.cpp:218] Iteration 2868 (2.8729 iter/s, 4.17697s/12 iters), loss = 2.08411
I0428 20:49:46.698552 12706 solver.cpp:237] Train net output #0: loss = 2.08411 (* 1 = 2.08411 loss)
I0428 20:49:46.698560 12706 sgd_solver.cpp:105] Iteration 2868, lr = 0.01
I0428 20:49:51.704013 12706 solver.cpp:218] Iteration 2880 (2.39748 iter/s, 5.00526s/12 iters), loss = 2.24992
I0428 20:49:51.704046 12706 solver.cpp:237] Train net output #0: loss = 2.24992 (* 1 = 2.24992 loss)
I0428 20:49:51.704053 12706 sgd_solver.cpp:105] Iteration 2880, lr = 0.01
I0428 20:49:56.685253 12706 solver.cpp:218] Iteration 2892 (2.40915 iter/s, 4.981s/12 iters), loss = 2.28704
I0428 20:49:56.685293 12706 solver.cpp:237] Train net output #0: loss = 2.28704 (* 1 = 2.28704 loss)
I0428 20:49:56.685300 12706 sgd_solver.cpp:105] Iteration 2892, lr = 0.01
I0428 20:50:01.609107 12706 solver.cpp:218] Iteration 2904 (2.43724 iter/s, 4.92361s/12 iters), loss = 1.96764
I0428 20:50:01.609241 12706 solver.cpp:237] Train net output #0: loss = 1.96764 (* 1 = 1.96764 loss)
I0428 20:50:01.609249 12706 sgd_solver.cpp:105] Iteration 2904, lr = 0.01
I0428 20:50:06.597084 12706 solver.cpp:218] Iteration 2916 (2.40595 iter/s, 4.98764s/12 iters), loss = 2.03431
I0428 20:50:06.597131 12706 solver.cpp:237] Train net output #0: loss = 2.03431 (* 1 = 2.03431 loss)
I0428 20:50:06.597141 12706 sgd_solver.cpp:105] Iteration 2916, lr = 0.01
I0428 20:50:11.708240 12706 solver.cpp:218] Iteration 2928 (2.34792 iter/s, 5.1109s/12 iters), loss = 2.00585
I0428 20:50:11.708277 12706 solver.cpp:237] Train net output #0: loss = 2.00585 (* 1 = 2.00585 loss)
I0428 20:50:11.708285 12706 sgd_solver.cpp:105] Iteration 2928, lr = 0.01
I0428 20:50:13.520169 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:50:16.635074 12706 solver.cpp:218] Iteration 2940 (2.43576 iter/s, 4.9266s/12 iters), loss = 1.9404
I0428 20:50:16.635111 12706 solver.cpp:237] Train net output #0: loss = 1.9404 (* 1 = 1.9404 loss)
I0428 20:50:16.635118 12706 sgd_solver.cpp:105] Iteration 2940, lr = 0.01
I0428 20:50:21.721176 12706 solver.cpp:218] Iteration 2952 (2.35949 iter/s, 5.08585s/12 iters), loss = 1.95424
I0428 20:50:21.721218 12706 solver.cpp:237] Train net output #0: loss = 1.95424 (* 1 = 1.95424 loss)
I0428 20:50:21.721225 12706 sgd_solver.cpp:105] Iteration 2952, lr = 0.01
I0428 20:50:23.909996 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_2958.caffemodel
I0428 20:50:26.884239 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_2958.solverstate
I0428 20:50:29.184878 12706 solver.cpp:330] Iteration 2958, Testing net (#0)
I0428 20:50:29.184898 12706 net.cpp:676] Ignoring source layer train-data
I0428 20:50:32.523823 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:50:33.714962 12706 solver.cpp:397] Test net output #0: accuracy = 0.311887
I0428 20:50:33.714996 12706 solver.cpp:397] Test net output #1: loss = 2.97709 (* 1 = 2.97709 loss)
I0428 20:50:35.496628 12706 solver.cpp:218] Iteration 2964 (0.871152 iter/s, 13.7749s/12 iters), loss = 1.9419
I0428 20:50:35.496670 12706 solver.cpp:237] Train net output #0: loss = 1.9419 (* 1 = 1.9419 loss)
I0428 20:50:35.496677 12706 sgd_solver.cpp:105] Iteration 2964, lr = 0.01
I0428 20:50:40.459736 12706 solver.cpp:218] Iteration 2976 (2.41796 iter/s, 4.96286s/12 iters), loss = 1.68951
I0428 20:50:40.459784 12706 solver.cpp:237] Train net output #0: loss = 1.68951 (* 1 = 1.68951 loss)
I0428 20:50:40.459794 12706 sgd_solver.cpp:105] Iteration 2976, lr = 0.01
I0428 20:50:45.666301 12706 solver.cpp:218] Iteration 2988 (2.3049 iter/s, 5.2063s/12 iters), loss = 2.00573
I0428 20:50:45.666337 12706 solver.cpp:237] Train net output #0: loss = 2.00573 (* 1 = 2.00573 loss)
I0428 20:50:45.666343 12706 sgd_solver.cpp:105] Iteration 2988, lr = 0.01
I0428 20:50:50.656255 12706 solver.cpp:218] Iteration 3000 (2.40495 iter/s, 4.98971s/12 iters), loss = 2.0621
I0428 20:50:50.656292 12706 solver.cpp:237] Train net output #0: loss = 2.0621 (* 1 = 2.0621 loss)
I0428 20:50:50.656301 12706 sgd_solver.cpp:105] Iteration 3000, lr = 0.01
I0428 20:50:55.707283 12706 solver.cpp:218] Iteration 3012 (2.37587 iter/s, 5.05078s/12 iters), loss = 1.72
I0428 20:50:55.707321 12706 solver.cpp:237] Train net output #0: loss = 1.72 (* 1 = 1.72 loss)
I0428 20:50:55.707329 12706 sgd_solver.cpp:105] Iteration 3012, lr = 0.01
I0428 20:51:00.702328 12706 solver.cpp:218] Iteration 3024 (2.4025 iter/s, 4.9948s/12 iters), loss = 1.75739
I0428 20:51:00.702366 12706 solver.cpp:237] Train net output #0: loss = 1.75739 (* 1 = 1.75739 loss)
I0428 20:51:00.702374 12706 sgd_solver.cpp:105] Iteration 3024, lr = 0.01
I0428 20:51:04.705543 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:51:05.723644 12706 solver.cpp:218] Iteration 3036 (2.38993 iter/s, 5.02107s/12 iters), loss = 1.75452
I0428 20:51:05.723681 12706 solver.cpp:237] Train net output #0: loss = 1.75452 (* 1 = 1.75452 loss)
I0428 20:51:05.723690 12706 sgd_solver.cpp:105] Iteration 3036, lr = 0.01
I0428 20:51:10.722301 12706 solver.cpp:218] Iteration 3048 (2.40076 iter/s, 4.99841s/12 iters), loss = 2.07051
I0428 20:51:10.722350 12706 solver.cpp:237] Train net output #0: loss = 2.07051 (* 1 = 2.07051 loss)
I0428 20:51:10.722359 12706 sgd_solver.cpp:105] Iteration 3048, lr = 0.01
I0428 20:51:15.303076 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_3060.caffemodel
I0428 20:51:18.286619 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_3060.solverstate
I0428 20:51:20.609342 12706 solver.cpp:330] Iteration 3060, Testing net (#0)
I0428 20:51:20.609369 12706 net.cpp:676] Ignoring source layer train-data
I0428 20:51:23.827495 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:51:25.216012 12706 solver.cpp:397] Test net output #0: accuracy = 0.315564
I0428 20:51:25.216040 12706 solver.cpp:397] Test net output #1: loss = 2.98497 (* 1 = 2.98497 loss)
I0428 20:51:25.307257 12706 solver.cpp:218] Iteration 3060 (0.822801 iter/s, 14.5843s/12 iters), loss = 1.94554
I0428 20:51:25.307294 12706 solver.cpp:237] Train net output #0: loss = 1.94554 (* 1 = 1.94554 loss)
I0428 20:51:25.307303 12706 sgd_solver.cpp:105] Iteration 3060, lr = 0.01
I0428 20:51:29.376282 12706 solver.cpp:218] Iteration 3072 (2.94926 iter/s, 4.06882s/12 iters), loss = 2.19763
I0428 20:51:29.376317 12706 solver.cpp:237] Train net output #0: loss = 2.19763 (* 1 = 2.19763 loss)
I0428 20:51:29.376324 12706 sgd_solver.cpp:105] Iteration 3072, lr = 0.01
I0428 20:51:34.534584 12706 solver.cpp:218] Iteration 3084 (2.32646 iter/s, 5.15804s/12 iters), loss = 2.27134
I0428 20:51:34.534633 12706 solver.cpp:237] Train net output #0: loss = 2.27134 (* 1 = 2.27134 loss)
I0428 20:51:34.534646 12706 sgd_solver.cpp:105] Iteration 3084, lr = 0.01
I0428 20:51:39.574637 12706 solver.cpp:218] Iteration 3096 (2.38105 iter/s, 5.03979s/12 iters), loss = 1.8903
I0428 20:51:39.574790 12706 solver.cpp:237] Train net output #0: loss = 1.8903 (* 1 = 1.8903 loss)
I0428 20:51:39.574800 12706 sgd_solver.cpp:105] Iteration 3096, lr = 0.01
I0428 20:51:44.587414 12706 solver.cpp:218] Iteration 3108 (2.39405 iter/s, 5.01242s/12 iters), loss = 1.84893
I0428 20:51:44.587451 12706 solver.cpp:237] Train net output #0: loss = 1.84893 (* 1 = 1.84893 loss)
I0428 20:51:44.587460 12706 sgd_solver.cpp:105] Iteration 3108, lr = 0.01
I0428 20:51:49.560051 12706 solver.cpp:218] Iteration 3120 (2.41332 iter/s, 4.97239s/12 iters), loss = 1.8405
I0428 20:51:49.560087 12706 solver.cpp:237] Train net output #0: loss = 1.8405 (* 1 = 1.8405 loss)
I0428 20:51:49.560094 12706 sgd_solver.cpp:105] Iteration 3120, lr = 0.01
I0428 20:51:54.731068 12706 solver.cpp:218] Iteration 3132 (2.32074 iter/s, 5.17076s/12 iters), loss = 1.65026
I0428 20:51:54.731109 12706 solver.cpp:237] Train net output #0: loss = 1.65026 (* 1 = 1.65026 loss)
I0428 20:51:54.731117 12706 sgd_solver.cpp:105] Iteration 3132, lr = 0.01
I0428 20:51:55.822222 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:51:59.690349 12706 solver.cpp:218] Iteration 3144 (2.41983 iter/s, 4.95903s/12 iters), loss = 2.11418
I0428 20:51:59.690400 12706 solver.cpp:237] Train net output #0: loss = 2.11418 (* 1 = 2.11418 loss)
I0428 20:51:59.690412 12706 sgd_solver.cpp:105] Iteration 3144, lr = 0.01
I0428 20:52:04.670251 12706 solver.cpp:218] Iteration 3156 (2.40981 iter/s, 4.97965s/12 iters), loss = 1.93569
I0428 20:52:04.670289 12706 solver.cpp:237] Train net output #0: loss = 1.93569 (* 1 = 1.93569 loss)
I0428 20:52:04.670296 12706 sgd_solver.cpp:105] Iteration 3156, lr = 0.01
I0428 20:52:06.695538 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_3162.caffemodel
I0428 20:52:09.669000 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_3162.solverstate
I0428 20:52:11.976763 12706 solver.cpp:330] Iteration 3162, Testing net (#0)
I0428 20:52:11.976788 12706 net.cpp:676] Ignoring source layer train-data
I0428 20:52:15.072616 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:52:16.345961 12706 solver.cpp:397] Test net output #0: accuracy = 0.305147
I0428 20:52:16.346010 12706 solver.cpp:397] Test net output #1: loss = 3.03807 (* 1 = 3.03807 loss)
I0428 20:52:18.114677 12706 solver.cpp:218] Iteration 3168 (0.892602 iter/s, 13.4438s/12 iters), loss = 1.90288
I0428 20:52:18.114732 12706 solver.cpp:237] Train net output #0: loss = 1.90288 (* 1 = 1.90288 loss)
I0428 20:52:18.114742 12706 sgd_solver.cpp:105] Iteration 3168, lr = 0.01
I0428 20:52:23.127926 12706 solver.cpp:218] Iteration 3180 (2.39378 iter/s, 5.01299s/12 iters), loss = 1.84002
I0428 20:52:23.127965 12706 solver.cpp:237] Train net output #0: loss = 1.84002 (* 1 = 1.84002 loss)
I0428 20:52:23.127974 12706 sgd_solver.cpp:105] Iteration 3180, lr = 0.01
I0428 20:52:28.124243 12706 solver.cpp:218] Iteration 3192 (2.40189 iter/s, 4.99606s/12 iters), loss = 1.82067
I0428 20:52:28.124282 12706 solver.cpp:237] Train net output #0: loss = 1.82067 (* 1 = 1.82067 loss)
I0428 20:52:28.124290 12706 sgd_solver.cpp:105] Iteration 3192, lr = 0.01
I0428 20:52:33.136332 12706 solver.cpp:218] Iteration 3204 (2.39433 iter/s, 5.01184s/12 iters), loss = 1.63259
I0428 20:52:33.136371 12706 solver.cpp:237] Train net output #0: loss = 1.63259 (* 1 = 1.63259 loss)
I0428 20:52:33.136379 12706 sgd_solver.cpp:105] Iteration 3204, lr = 0.01
I0428 20:52:38.139871 12706 solver.cpp:218] Iteration 3216 (2.39842 iter/s, 5.00329s/12 iters), loss = 1.52256
I0428 20:52:38.139911 12706 solver.cpp:237] Train net output #0: loss = 1.52256 (* 1 = 1.52256 loss)
I0428 20:52:38.139919 12706 sgd_solver.cpp:105] Iteration 3216, lr = 0.01
I0428 20:52:43.133862 12706 solver.cpp:218] Iteration 3228 (2.40301 iter/s, 4.99374s/12 iters), loss = 2.01588
I0428 20:52:43.133965 12706 solver.cpp:237] Train net output #0: loss = 2.01588 (* 1 = 2.01588 loss)
I0428 20:52:43.133973 12706 sgd_solver.cpp:105] Iteration 3228, lr = 0.01
I0428 20:52:46.372629 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:52:48.112821 12706 solver.cpp:218] Iteration 3240 (2.41029 iter/s, 4.97865s/12 iters), loss = 1.87571
I0428 20:52:48.112859 12706 solver.cpp:237] Train net output #0: loss = 1.87571 (* 1 = 1.87571 loss)
I0428 20:52:48.112867 12706 sgd_solver.cpp:105] Iteration 3240, lr = 0.01
I0428 20:52:53.086712 12706 solver.cpp:218] Iteration 3252 (2.41272 iter/s, 4.97364s/12 iters), loss = 1.85415
I0428 20:52:53.086758 12706 solver.cpp:237] Train net output #0: loss = 1.85415 (* 1 = 1.85415 loss)
I0428 20:52:53.086771 12706 sgd_solver.cpp:105] Iteration 3252, lr = 0.01
I0428 20:52:57.568791 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_3264.caffemodel
I0428 20:53:00.717784 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_3264.solverstate
I0428 20:53:08.918879 12706 solver.cpp:330] Iteration 3264, Testing net (#0)
I0428 20:53:08.918902 12706 net.cpp:676] Ignoring source layer train-data
I0428 20:53:12.295682 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:53:13.635906 12706 solver.cpp:397] Test net output #0: accuracy = 0.301471
I0428 20:53:13.636119 12706 solver.cpp:397] Test net output #1: loss = 3.08197 (* 1 = 3.08197 loss)
I0428 20:53:13.727219 12706 solver.cpp:218] Iteration 3264 (0.581406 iter/s, 20.6396s/12 iters), loss = 2.03543
I0428 20:53:13.727277 12706 solver.cpp:237] Train net output #0: loss = 2.03543 (* 1 = 2.03543 loss)
I0428 20:53:13.727289 12706 sgd_solver.cpp:105] Iteration 3264, lr = 0.01
I0428 20:53:17.830021 12706 solver.cpp:218] Iteration 3276 (2.92499 iter/s, 4.10257s/12 iters), loss = 1.75056
I0428 20:53:17.830058 12706 solver.cpp:237] Train net output #0: loss = 1.75056 (* 1 = 1.75056 loss)
I0428 20:53:17.830066 12706 sgd_solver.cpp:105] Iteration 3276, lr = 0.01
I0428 20:53:22.782373 12706 solver.cpp:218] Iteration 3288 (2.42321 iter/s, 4.9521s/12 iters), loss = 2.00635
I0428 20:53:22.782421 12706 solver.cpp:237] Train net output #0: loss = 2.00635 (* 1 = 2.00635 loss)
I0428 20:53:22.782430 12706 sgd_solver.cpp:105] Iteration 3288, lr = 0.01
I0428 20:53:27.974404 12706 solver.cpp:218] Iteration 3300 (2.31135 iter/s, 5.19177s/12 iters), loss = 1.90967
I0428 20:53:27.974442 12706 solver.cpp:237] Train net output #0: loss = 1.90967 (* 1 = 1.90967 loss)
I0428 20:53:27.974452 12706 sgd_solver.cpp:105] Iteration 3300, lr = 0.01
I0428 20:53:32.954717 12706 solver.cpp:218] Iteration 3312 (2.40961 iter/s, 4.98006s/12 iters), loss = 1.57949
I0428 20:53:32.954756 12706 solver.cpp:237] Train net output #0: loss = 1.57949 (* 1 = 1.57949 loss)
I0428 20:53:32.954763 12706 sgd_solver.cpp:105] Iteration 3312, lr = 0.01
I0428 20:53:38.050678 12706 solver.cpp:218] Iteration 3324 (2.35492 iter/s, 5.09571s/12 iters), loss = 2.15909
I0428 20:53:38.050715 12706 solver.cpp:237] Train net output #0: loss = 2.15909 (* 1 = 2.15909 loss)
I0428 20:53:38.050722 12706 sgd_solver.cpp:105] Iteration 3324, lr = 0.01
I0428 20:53:43.044235 12706 solver.cpp:218] Iteration 3336 (2.40322 iter/s, 4.99331s/12 iters), loss = 1.94987
I0428 20:53:43.044273 12706 solver.cpp:237] Train net output #0: loss = 1.94987 (* 1 = 1.94987 loss)
I0428 20:53:43.044281 12706 sgd_solver.cpp:105] Iteration 3336, lr = 0.01
I0428 20:53:43.524535 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:53:48.129276 12706 solver.cpp:218] Iteration 3348 (2.35998 iter/s, 5.08479s/12 iters), loss = 1.68725
I0428 20:53:48.129370 12706 solver.cpp:237] Train net output #0: loss = 1.68725 (* 1 = 1.68725 loss)
I0428 20:53:48.129379 12706 sgd_solver.cpp:105] Iteration 3348, lr = 0.01
I0428 20:53:53.193868 12706 solver.cpp:218] Iteration 3360 (2.36953 iter/s, 5.06429s/12 iters), loss = 1.89194
I0428 20:53:53.193912 12706 solver.cpp:237] Train net output #0: loss = 1.89194 (* 1 = 1.89194 loss)
I0428 20:53:53.193922 12706 sgd_solver.cpp:105] Iteration 3360, lr = 0.01
I0428 20:53:55.285984 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_3366.caffemodel
I0428 20:53:59.893292 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_3366.solverstate
I0428 20:54:04.841800 12706 solver.cpp:330] Iteration 3366, Testing net (#0)
I0428 20:54:04.841821 12706 net.cpp:676] Ignoring source layer train-data
I0428 20:54:07.898425 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:54:09.241488 12706 solver.cpp:397] Test net output #0: accuracy = 0.313726
I0428 20:54:09.241524 12706 solver.cpp:397] Test net output #1: loss = 3.03921 (* 1 = 3.03921 loss)
I0428 20:54:11.064164 12706 solver.cpp:218] Iteration 3372 (0.671534 iter/s, 17.8695s/12 iters), loss = 2.32568
I0428 20:54:11.064204 12706 solver.cpp:237] Train net output #0: loss = 2.32568 (* 1 = 2.32568 loss)
I0428 20:54:11.064213 12706 sgd_solver.cpp:105] Iteration 3372, lr = 0.0033
I0428 20:54:16.064224 12706 solver.cpp:218] Iteration 3384 (2.40009 iter/s, 4.99981s/12 iters), loss = 1.76731
I0428 20:54:16.064265 12706 solver.cpp:237] Train net output #0: loss = 1.76731 (* 1 = 1.76731 loss)
I0428 20:54:16.064273 12706 sgd_solver.cpp:105] Iteration 3384, lr = 0.0033
I0428 20:54:21.083987 12706 solver.cpp:218] Iteration 3396 (2.39067 iter/s, 5.01951s/12 iters), loss = 1.44014
I0428 20:54:21.084168 12706 solver.cpp:237] Train net output #0: loss = 1.44014 (* 1 = 1.44014 loss)
I0428 20:54:21.084177 12706 sgd_solver.cpp:105] Iteration 3396, lr = 0.0033
I0428 20:54:25.965238 12706 solver.cpp:218] Iteration 3408 (2.45858 iter/s, 4.88087s/12 iters), loss = 1.31226
I0428 20:54:25.965277 12706 solver.cpp:237] Train net output #0: loss = 1.31226 (* 1 = 1.31226 loss)
I0428 20:54:25.965286 12706 sgd_solver.cpp:105] Iteration 3408, lr = 0.0033
I0428 20:54:30.980239 12706 solver.cpp:218] Iteration 3420 (2.39294 iter/s, 5.01474s/12 iters), loss = 1.28755
I0428 20:54:30.980288 12706 solver.cpp:237] Train net output #0: loss = 1.28755 (* 1 = 1.28755 loss)
I0428 20:54:30.980299 12706 sgd_solver.cpp:105] Iteration 3420, lr = 0.0033
I0428 20:54:36.008121 12706 solver.cpp:218] Iteration 3432 (2.38682 iter/s, 5.02762s/12 iters), loss = 1.34279
I0428 20:54:36.008169 12706 solver.cpp:237] Train net output #0: loss = 1.34279 (* 1 = 1.34279 loss)
I0428 20:54:36.008179 12706 sgd_solver.cpp:105] Iteration 3432, lr = 0.0033
I0428 20:54:38.736656 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:54:41.140547 12706 solver.cpp:218] Iteration 3444 (2.33819 iter/s, 5.13217s/12 iters), loss = 1.21308
I0428 20:54:41.140584 12706 solver.cpp:237] Train net output #0: loss = 1.21308 (* 1 = 1.21308 loss)
I0428 20:54:41.140591 12706 sgd_solver.cpp:105] Iteration 3444, lr = 0.0033
I0428 20:54:46.192816 12706 solver.cpp:218] Iteration 3456 (2.37529 iter/s, 5.05202s/12 iters), loss = 1.00225
I0428 20:54:46.192847 12706 solver.cpp:237] Train net output #0: loss = 1.00225 (* 1 = 1.00225 loss)
I0428 20:54:46.192854 12706 sgd_solver.cpp:105] Iteration 3456, lr = 0.0033
I0428 20:54:50.786427 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_3468.caffemodel
I0428 20:54:53.769554 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_3468.solverstate
I0428 20:54:56.078912 12706 solver.cpp:330] Iteration 3468, Testing net (#0)
I0428 20:54:56.078934 12706 net.cpp:676] Ignoring source layer train-data
I0428 20:54:56.530164 12706 blocking_queue.cpp:49] Waiting for data
I0428 20:54:59.196298 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:55:00.581743 12706 solver.cpp:397] Test net output #0: accuracy = 0.382966
I0428 20:55:00.581781 12706 solver.cpp:397] Test net output #1: loss = 2.75536 (* 1 = 2.75536 loss)
I0428 20:55:00.672997 12706 solver.cpp:218] Iteration 3468 (0.828754 iter/s, 14.4796s/12 iters), loss = 1.37412
I0428 20:55:00.673048 12706 solver.cpp:237] Train net output #0: loss = 1.37412 (* 1 = 1.37412 loss)
I0428 20:55:00.673058 12706 sgd_solver.cpp:105] Iteration 3468, lr = 0.0033
I0428 20:55:04.801815 12706 solver.cpp:218] Iteration 3480 (2.90655 iter/s, 4.1286s/12 iters), loss = 1.3067
I0428 20:55:04.801842 12706 solver.cpp:237] Train net output #0: loss = 1.3067 (* 1 = 1.3067 loss)
I0428 20:55:04.801851 12706 sgd_solver.cpp:105] Iteration 3480, lr = 0.0033
I0428 20:55:09.840723 12706 solver.cpp:218] Iteration 3492 (2.38158 iter/s, 5.03866s/12 iters), loss = 1.10601
I0428 20:55:09.840762 12706 solver.cpp:237] Train net output #0: loss = 1.10601 (* 1 = 1.10601 loss)
I0428 20:55:09.840770 12706 sgd_solver.cpp:105] Iteration 3492, lr = 0.0033
I0428 20:55:14.824713 12706 solver.cpp:218] Iteration 3504 (2.40783 iter/s, 4.98374s/12 iters), loss = 1.0471
I0428 20:55:14.824750 12706 solver.cpp:237] Train net output #0: loss = 1.0471 (* 1 = 1.0471 loss)
I0428 20:55:14.824757 12706 sgd_solver.cpp:105] Iteration 3504, lr = 0.0033
I0428 20:55:19.804764 12706 solver.cpp:218] Iteration 3516 (2.40973 iter/s, 4.9798s/12 iters), loss = 1.08384
I0428 20:55:19.804813 12706 solver.cpp:237] Train net output #0: loss = 1.08384 (* 1 = 1.08384 loss)
I0428 20:55:19.804824 12706 sgd_solver.cpp:105] Iteration 3516, lr = 0.0033
I0428 20:55:24.751008 12706 solver.cpp:218] Iteration 3528 (2.42621 iter/s, 4.94599s/12 iters), loss = 0.94875
I0428 20:55:24.751168 12706 solver.cpp:237] Train net output #0: loss = 0.94875 (* 1 = 0.94875 loss)
I0428 20:55:24.751178 12706 sgd_solver.cpp:105] Iteration 3528, lr = 0.0033
I0428 20:55:29.435154 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:55:29.699347 12706 solver.cpp:218] Iteration 3540 (2.42524 iter/s, 4.94797s/12 iters), loss = 1.02435
I0428 20:55:29.699395 12706 solver.cpp:237] Train net output #0: loss = 1.02435 (* 1 = 1.02435 loss)
I0428 20:55:29.699405 12706 sgd_solver.cpp:105] Iteration 3540, lr = 0.0033
I0428 20:55:34.814465 12706 solver.cpp:218] Iteration 3552 (2.34611 iter/s, 5.11484s/12 iters), loss = 0.917648
I0428 20:55:34.814512 12706 solver.cpp:237] Train net output #0: loss = 0.917648 (* 1 = 0.917648 loss)
I0428 20:55:34.814524 12706 sgd_solver.cpp:105] Iteration 3552, lr = 0.0033
I0428 20:55:39.812384 12706 solver.cpp:218] Iteration 3564 (2.40112 iter/s, 4.99766s/12 iters), loss = 1.05389
I0428 20:55:39.812422 12706 solver.cpp:237] Train net output #0: loss = 1.05389 (* 1 = 1.05389 loss)
I0428 20:55:39.812428 12706 sgd_solver.cpp:105] Iteration 3564, lr = 0.0033
I0428 20:55:41.853960 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_3570.caffemodel
I0428 20:55:47.546382 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_3570.solverstate
I0428 20:55:50.868700 12706 solver.cpp:330] Iteration 3570, Testing net (#0)
I0428 20:55:50.868727 12706 net.cpp:676] Ignoring source layer train-data
I0428 20:55:53.863324 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:55:55.315462 12706 solver.cpp:397] Test net output #0: accuracy = 0.38848
I0428 20:55:55.315551 12706 solver.cpp:397] Test net output #1: loss = 2.68436 (* 1 = 2.68436 loss)
I0428 20:55:57.128533 12706 solver.cpp:218] Iteration 3576 (0.693025 iter/s, 17.3154s/12 iters), loss = 0.741467
I0428 20:55:57.128592 12706 solver.cpp:237] Train net output #0: loss = 0.741467 (* 1 = 0.741467 loss)
I0428 20:55:57.128602 12706 sgd_solver.cpp:105] Iteration 3576, lr = 0.0033
I0428 20:56:02.155580 12706 solver.cpp:218] Iteration 3588 (2.38722 iter/s, 5.02677s/12 iters), loss = 1.11298
I0428 20:56:02.155624 12706 solver.cpp:237] Train net output #0: loss = 1.11298 (* 1 = 1.11298 loss)
I0428 20:56:02.155632 12706 sgd_solver.cpp:105] Iteration 3588, lr = 0.0033
I0428 20:56:07.177623 12706 solver.cpp:218] Iteration 3600 (2.38959 iter/s, 5.02179s/12 iters), loss = 0.742997
I0428 20:56:07.177660 12706 solver.cpp:237] Train net output #0: loss = 0.742997 (* 1 = 0.742997 loss)
I0428 20:56:07.177668 12706 sgd_solver.cpp:105] Iteration 3600, lr = 0.0033
I0428 20:56:12.064121 12706 solver.cpp:218] Iteration 3612 (2.45588 iter/s, 4.88624s/12 iters), loss = 0.723775
I0428 20:56:12.064172 12706 solver.cpp:237] Train net output #0: loss = 0.723775 (* 1 = 0.723775 loss)
I0428 20:56:12.064183 12706 sgd_solver.cpp:105] Iteration 3612, lr = 0.0033
I0428 20:56:17.089453 12706 solver.cpp:218] Iteration 3624 (2.38803 iter/s, 5.02506s/12 iters), loss = 0.833997
I0428 20:56:17.089501 12706 solver.cpp:237] Train net output #0: loss = 0.833997 (* 1 = 0.833997 loss)
I0428 20:56:17.089511 12706 sgd_solver.cpp:105] Iteration 3624, lr = 0.0033
I0428 20:56:22.115094 12706 solver.cpp:218] Iteration 3636 (2.38788 iter/s, 5.02538s/12 iters), loss = 0.90391
I0428 20:56:22.115141 12706 solver.cpp:237] Train net output #0: loss = 0.90391 (* 1 = 0.90391 loss)
I0428 20:56:22.115150 12706 sgd_solver.cpp:105] Iteration 3636, lr = 0.0033
I0428 20:56:23.965716 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:56:27.134109 12706 solver.cpp:218] Iteration 3648 (2.39103 iter/s, 5.01876s/12 iters), loss = 0.64161
I0428 20:56:27.134397 12706 solver.cpp:237] Train net output #0: loss = 0.64161 (* 1 = 0.64161 loss)
I0428 20:56:27.134407 12706 sgd_solver.cpp:105] Iteration 3648, lr = 0.0033
I0428 20:56:32.328914 12706 solver.cpp:218] Iteration 3660 (2.31023 iter/s, 5.1943s/12 iters), loss = 0.743666
I0428 20:56:32.328953 12706 solver.cpp:237] Train net output #0: loss = 0.743666 (* 1 = 0.743666 loss)
I0428 20:56:32.328960 12706 sgd_solver.cpp:105] Iteration 3660, lr = 0.0033
I0428 20:56:37.001492 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_3672.caffemodel
I0428 20:56:40.358367 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_3672.solverstate
I0428 20:56:43.158608 12706 solver.cpp:330] Iteration 3672, Testing net (#0)
I0428 20:56:43.158635 12706 net.cpp:676] Ignoring source layer train-data
I0428 20:56:46.179953 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:56:47.654619 12706 solver.cpp:397] Test net output #0: accuracy = 0.39951
I0428 20:56:47.654649 12706 solver.cpp:397] Test net output #1: loss = 2.68745 (* 1 = 2.68745 loss)
I0428 20:56:47.745784 12706 solver.cpp:218] Iteration 3672 (0.778402 iter/s, 15.4162s/12 iters), loss = 1.12168
I0428 20:56:47.745828 12706 solver.cpp:237] Train net output #0: loss = 1.12168 (* 1 = 1.12168 loss)
I0428 20:56:47.745836 12706 sgd_solver.cpp:105] Iteration 3672, lr = 0.0033
I0428 20:56:51.828270 12706 solver.cpp:218] Iteration 3684 (2.93954 iter/s, 4.08227s/12 iters), loss = 0.974605
I0428 20:56:51.828306 12706 solver.cpp:237] Train net output #0: loss = 0.974605 (* 1 = 0.974605 loss)
I0428 20:56:51.828313 12706 sgd_solver.cpp:105] Iteration 3684, lr = 0.0033
I0428 20:56:56.810272 12706 solver.cpp:218] Iteration 3696 (2.4088 iter/s, 4.98174s/12 iters), loss = 1.07041
I0428 20:56:56.810320 12706 solver.cpp:237] Train net output #0: loss = 1.07041 (* 1 = 1.07041 loss)
I0428 20:56:56.810330 12706 sgd_solver.cpp:105] Iteration 3696, lr = 0.0033
I0428 20:57:02.353178 12706 solver.cpp:218] Iteration 3708 (2.16504 iter/s, 5.54263s/12 iters), loss = 0.899311
I0428 20:57:02.353754 12706 solver.cpp:237] Train net output #0: loss = 0.899311 (* 1 = 0.899311 loss)
I0428 20:57:02.353763 12706 sgd_solver.cpp:105] Iteration 3708, lr = 0.0033
I0428 20:57:07.391300 12706 solver.cpp:218] Iteration 3720 (2.38221 iter/s, 5.03734s/12 iters), loss = 0.846305
I0428 20:57:07.391347 12706 solver.cpp:237] Train net output #0: loss = 0.846305 (* 1 = 0.846305 loss)
I0428 20:57:07.391357 12706 sgd_solver.cpp:105] Iteration 3720, lr = 0.0033
I0428 20:57:12.371568 12706 solver.cpp:218] Iteration 3732 (2.40963 iter/s, 4.98002s/12 iters), loss = 0.931289
I0428 20:57:12.371606 12706 solver.cpp:237] Train net output #0: loss = 0.931289 (* 1 = 0.931289 loss)
I0428 20:57:12.371614 12706 sgd_solver.cpp:105] Iteration 3732, lr = 0.0033
I0428 20:57:16.403931 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:57:17.396724 12706 solver.cpp:218] Iteration 3744 (2.38811 iter/s, 5.0249s/12 iters), loss = 0.516166
I0428 20:57:17.396780 12706 solver.cpp:237] Train net output #0: loss = 0.516166 (* 1 = 0.516166 loss)
I0428 20:57:17.396793 12706 sgd_solver.cpp:105] Iteration 3744, lr = 0.0033
I0428 20:57:22.419921 12706 solver.cpp:218] Iteration 3756 (2.38904 iter/s, 5.02293s/12 iters), loss = 0.62962
I0428 20:57:22.419962 12706 solver.cpp:237] Train net output #0: loss = 0.62962 (* 1 = 0.62962 loss)
I0428 20:57:22.419970 12706 sgd_solver.cpp:105] Iteration 3756, lr = 0.0033
I0428 20:57:27.502810 12706 solver.cpp:218] Iteration 3768 (2.36098 iter/s, 5.08263s/12 iters), loss = 0.65621
I0428 20:57:27.502847 12706 solver.cpp:237] Train net output #0: loss = 0.65621 (* 1 = 0.65621 loss)
I0428 20:57:27.502854 12706 sgd_solver.cpp:105] Iteration 3768, lr = 0.0033
I0428 20:57:29.661043 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_3774.caffemodel
I0428 20:57:33.293535 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_3774.solverstate
I0428 20:57:36.911020 12706 solver.cpp:330] Iteration 3774, Testing net (#0)
I0428 20:57:36.911041 12706 net.cpp:676] Ignoring source layer train-data
I0428 20:57:39.981338 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:57:41.737421 12706 solver.cpp:397] Test net output #0: accuracy = 0.393995
I0428 20:57:41.737452 12706 solver.cpp:397] Test net output #1: loss = 2.73793 (* 1 = 2.73793 loss)
I0428 20:57:43.655933 12706 solver.cpp:218] Iteration 3780 (0.742922 iter/s, 16.1524s/12 iters), loss = 0.934682
I0428 20:57:43.655977 12706 solver.cpp:237] Train net output #0: loss = 0.934682 (* 1 = 0.934682 loss)
I0428 20:57:43.655985 12706 sgd_solver.cpp:105] Iteration 3780, lr = 0.0033
I0428 20:57:48.635679 12706 solver.cpp:218] Iteration 3792 (2.40989 iter/s, 4.97949s/12 iters), loss = 0.67972
I0428 20:57:48.635720 12706 solver.cpp:237] Train net output #0: loss = 0.67972 (* 1 = 0.67972 loss)
I0428 20:57:48.635728 12706 sgd_solver.cpp:105] Iteration 3792, lr = 0.0033
I0428 20:57:53.666201 12706 solver.cpp:218] Iteration 3804 (2.38556 iter/s, 5.03026s/12 iters), loss = 0.914784
I0428 20:57:53.666241 12706 solver.cpp:237] Train net output #0: loss = 0.914784 (* 1 = 0.914784 loss)
I0428 20:57:53.666250 12706 sgd_solver.cpp:105] Iteration 3804, lr = 0.0033
I0428 20:57:58.803123 12706 solver.cpp:218] Iteration 3816 (2.33615 iter/s, 5.13666s/12 iters), loss = 0.748039
I0428 20:57:58.803164 12706 solver.cpp:237] Train net output #0: loss = 0.748039 (* 1 = 0.748039 loss)
I0428 20:57:58.803170 12706 sgd_solver.cpp:105] Iteration 3816, lr = 0.0033
I0428 20:58:03.877418 12706 solver.cpp:218] Iteration 3828 (2.36498 iter/s, 5.07404s/12 iters), loss = 0.708493
I0428 20:58:03.877521 12706 solver.cpp:237] Train net output #0: loss = 0.708493 (* 1 = 0.708493 loss)
I0428 20:58:03.877530 12706 sgd_solver.cpp:105] Iteration 3828, lr = 0.0033
I0428 20:58:08.875787 12706 solver.cpp:218] Iteration 3840 (2.40093 iter/s, 4.99806s/12 iters), loss = 0.709723
I0428 20:58:08.875821 12706 solver.cpp:237] Train net output #0: loss = 0.709723 (* 1 = 0.709723 loss)
I0428 20:58:08.875829 12706 sgd_solver.cpp:105] Iteration 3840, lr = 0.0033
I0428 20:58:10.011015 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:58:14.065366 12706 solver.cpp:218] Iteration 3852 (2.31244 iter/s, 5.18932s/12 iters), loss = 0.757948
I0428 20:58:14.065405 12706 solver.cpp:237] Train net output #0: loss = 0.757948 (* 1 = 0.757948 loss)
I0428 20:58:14.065413 12706 sgd_solver.cpp:105] Iteration 3852, lr = 0.0033
I0428 20:58:19.348914 12706 solver.cpp:218] Iteration 3864 (2.27131 iter/s, 5.28329s/12 iters), loss = 0.552474
I0428 20:58:19.348953 12706 solver.cpp:237] Train net output #0: loss = 0.552474 (* 1 = 0.552474 loss)
I0428 20:58:19.348961 12706 sgd_solver.cpp:105] Iteration 3864, lr = 0.0033
I0428 20:58:23.941633 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_3876.caffemodel
I0428 20:58:26.949254 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_3876.solverstate
I0428 20:58:34.410141 12706 solver.cpp:330] Iteration 3876, Testing net (#0)
I0428 20:58:34.410266 12706 net.cpp:676] Ignoring source layer train-data
I0428 20:58:37.297827 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:58:38.844796 12706 solver.cpp:397] Test net output #0: accuracy = 0.414216
I0428 20:58:38.844832 12706 solver.cpp:397] Test net output #1: loss = 2.75932 (* 1 = 2.75932 loss)
I0428 20:58:38.934376 12706 solver.cpp:218] Iteration 3876 (0.612725 iter/s, 19.5846s/12 iters), loss = 0.803232
I0428 20:58:38.934417 12706 solver.cpp:237] Train net output #0: loss = 0.803232 (* 1 = 0.803232 loss)
I0428 20:58:38.934425 12706 sgd_solver.cpp:105] Iteration 3876, lr = 0.0033
I0428 20:58:43.267928 12706 solver.cpp:218] Iteration 3888 (2.76923 iter/s, 4.33333s/12 iters), loss = 0.751456
I0428 20:58:43.267968 12706 solver.cpp:237] Train net output #0: loss = 0.751456 (* 1 = 0.751456 loss)
I0428 20:58:43.267976 12706 sgd_solver.cpp:105] Iteration 3888, lr = 0.0033
I0428 20:58:48.277220 12706 solver.cpp:218] Iteration 3900 (2.39567 iter/s, 5.00903s/12 iters), loss = 0.597172
I0428 20:58:48.277268 12706 solver.cpp:237] Train net output #0: loss = 0.597172 (* 1 = 0.597172 loss)
I0428 20:58:48.277279 12706 sgd_solver.cpp:105] Iteration 3900, lr = 0.0033
I0428 20:58:53.256743 12706 solver.cpp:218] Iteration 3912 (2.41 iter/s, 4.97926s/12 iters), loss = 0.743271
I0428 20:58:53.256796 12706 solver.cpp:237] Train net output #0: loss = 0.743271 (* 1 = 0.743271 loss)
I0428 20:58:53.256809 12706 sgd_solver.cpp:105] Iteration 3912, lr = 0.0033
I0428 20:58:58.247506 12706 solver.cpp:218] Iteration 3924 (2.40457 iter/s, 4.9905s/12 iters), loss = 0.674305
I0428 20:58:58.247545 12706 solver.cpp:237] Train net output #0: loss = 0.674305 (* 1 = 0.674305 loss)
I0428 20:58:58.247551 12706 sgd_solver.cpp:105] Iteration 3924, lr = 0.0033
I0428 20:59:03.360623 12706 solver.cpp:218] Iteration 3936 (2.34702 iter/s, 5.11286s/12 iters), loss = 0.828942
I0428 20:59:03.360662 12706 solver.cpp:237] Train net output #0: loss = 0.828942 (* 1 = 0.828942 loss)
I0428 20:59:03.360669 12706 sgd_solver.cpp:105] Iteration 3936, lr = 0.0033
I0428 20:59:06.705875 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:59:08.321884 12706 solver.cpp:218] Iteration 3948 (2.41886 iter/s, 4.96101s/12 iters), loss = 0.803366
I0428 20:59:08.321925 12706 solver.cpp:237] Train net output #0: loss = 0.803366 (* 1 = 0.803366 loss)
I0428 20:59:08.321933 12706 sgd_solver.cpp:105] Iteration 3948, lr = 0.0033
I0428 20:59:13.274102 12706 solver.cpp:218] Iteration 3960 (2.42328 iter/s, 4.95197s/12 iters), loss = 0.570523
I0428 20:59:13.274139 12706 solver.cpp:237] Train net output #0: loss = 0.570523 (* 1 = 0.570523 loss)
I0428 20:59:13.274147 12706 sgd_solver.cpp:105] Iteration 3960, lr = 0.0033
I0428 20:59:18.277719 12706 solver.cpp:218] Iteration 3972 (2.39839 iter/s, 5.00336s/12 iters), loss = 0.704497
I0428 20:59:18.277772 12706 solver.cpp:237] Train net output #0: loss = 0.704497 (* 1 = 0.704497 loss)
I0428 20:59:18.277782 12706 sgd_solver.cpp:105] Iteration 3972, lr = 0.0033
I0428 20:59:20.351130 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_3978.caffemodel
I0428 20:59:23.504894 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_3978.solverstate
I0428 20:59:26.329298 12706 solver.cpp:330] Iteration 3978, Testing net (#0)
I0428 20:59:26.329317 12706 net.cpp:676] Ignoring source layer train-data
I0428 20:59:29.116284 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 20:59:30.699587 12706 solver.cpp:397] Test net output #0: accuracy = 0.409926
I0428 20:59:30.699621 12706 solver.cpp:397] Test net output #1: loss = 2.73833 (* 1 = 2.73833 loss)
I0428 20:59:32.626076 12706 solver.cpp:218] Iteration 3984 (0.83637 iter/s, 14.3477s/12 iters), loss = 0.631313
I0428 20:59:32.626122 12706 solver.cpp:237] Train net output #0: loss = 0.631313 (* 1 = 0.631313 loss)
I0428 20:59:32.626133 12706 sgd_solver.cpp:105] Iteration 3984, lr = 0.0033
I0428 20:59:37.708612 12706 solver.cpp:218] Iteration 3996 (2.36115 iter/s, 5.08227s/12 iters), loss = 0.60415
I0428 20:59:37.708781 12706 solver.cpp:237] Train net output #0: loss = 0.60415 (* 1 = 0.60415 loss)
I0428 20:59:37.708791 12706 sgd_solver.cpp:105] Iteration 3996, lr = 0.0033
I0428 20:59:42.779599 12706 solver.cpp:218] Iteration 4008 (2.36658 iter/s, 5.0706s/12 iters), loss = 0.654009
I0428 20:59:42.779637 12706 solver.cpp:237] Train net output #0: loss = 0.654009 (* 1 = 0.654009 loss)
I0428 20:59:42.779646 12706 sgd_solver.cpp:105] Iteration 4008, lr = 0.0033
I0428 20:59:47.939836 12706 solver.cpp:218] Iteration 4020 (2.32559 iter/s, 5.15998s/12 iters), loss = 0.783342
I0428 20:59:47.939875 12706 solver.cpp:237] Train net output #0: loss = 0.783342 (* 1 = 0.783342 loss)
I0428 20:59:47.939882 12706 sgd_solver.cpp:105] Iteration 4020, lr = 0.0033
I0428 20:59:52.963829 12706 solver.cpp:218] Iteration 4032 (2.38866 iter/s, 5.02374s/12 iters), loss = 0.724368
I0428 20:59:52.963868 12706 solver.cpp:237] Train net output #0: loss = 0.724368 (* 1 = 0.724368 loss)
I0428 20:59:52.963876 12706 sgd_solver.cpp:105] Iteration 4032, lr = 0.0033
I0428 20:59:57.953364 12706 solver.cpp:218] Iteration 4044 (2.40516 iter/s, 4.98928s/12 iters), loss = 0.601831
I0428 20:59:57.953414 12706 solver.cpp:237] Train net output #0: loss = 0.601831 (* 1 = 0.601831 loss)
I0428 20:59:57.953423 12706 sgd_solver.cpp:105] Iteration 4044, lr = 0.0033
I0428 20:59:58.450372 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:00:02.976436 12706 solver.cpp:218] Iteration 4056 (2.3891 iter/s, 5.02281s/12 iters), loss = 0.582865
I0428 21:00:02.976473 12706 solver.cpp:237] Train net output #0: loss = 0.582865 (* 1 = 0.582865 loss)
I0428 21:00:02.976481 12706 sgd_solver.cpp:105] Iteration 4056, lr = 0.0033
I0428 21:00:08.006969 12706 solver.cpp:218] Iteration 4068 (2.38555 iter/s, 5.03029s/12 iters), loss = 0.576084
I0428 21:00:08.007037 12706 solver.cpp:237] Train net output #0: loss = 0.576084 (* 1 = 0.576084 loss)
I0428 21:00:08.007045 12706 sgd_solver.cpp:105] Iteration 4068, lr = 0.0033
I0428 21:00:12.680670 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_4080.caffemodel
I0428 21:00:15.668421 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_4080.solverstate
I0428 21:00:18.776067 12706 solver.cpp:330] Iteration 4080, Testing net (#0)
I0428 21:00:18.776089 12706 net.cpp:676] Ignoring source layer train-data
I0428 21:00:21.531339 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:00:23.151340 12706 solver.cpp:397] Test net output #0: accuracy = 0.418505
I0428 21:00:23.151368 12706 solver.cpp:397] Test net output #1: loss = 2.75162 (* 1 = 2.75162 loss)
I0428 21:00:23.242347 12706 solver.cpp:218] Iteration 4080 (0.787676 iter/s, 15.2347s/12 iters), loss = 0.610506
I0428 21:00:23.242383 12706 solver.cpp:237] Train net output #0: loss = 0.610506 (* 1 = 0.610506 loss)
I0428 21:00:23.242393 12706 sgd_solver.cpp:105] Iteration 4080, lr = 0.0033
I0428 21:00:27.428232 12706 solver.cpp:218] Iteration 4092 (2.86693 iter/s, 4.18566s/12 iters), loss = 0.548254
I0428 21:00:27.428282 12706 solver.cpp:237] Train net output #0: loss = 0.548254 (* 1 = 0.548254 loss)
I0428 21:00:27.428290 12706 sgd_solver.cpp:105] Iteration 4092, lr = 0.0033
I0428 21:00:32.467375 12706 solver.cpp:218] Iteration 4104 (2.38148 iter/s, 5.03888s/12 iters), loss = 0.582824
I0428 21:00:32.467412 12706 solver.cpp:237] Train net output #0: loss = 0.582824 (* 1 = 0.582824 loss)
I0428 21:00:32.467420 12706 sgd_solver.cpp:105] Iteration 4104, lr = 0.0033
I0428 21:00:37.523690 12706 solver.cpp:218] Iteration 4116 (2.37339 iter/s, 5.05606s/12 iters), loss = 0.466728
I0428 21:00:37.523728 12706 solver.cpp:237] Train net output #0: loss = 0.466728 (* 1 = 0.466728 loss)
I0428 21:00:37.523736 12706 sgd_solver.cpp:105] Iteration 4116, lr = 0.0033
I0428 21:00:42.441450 12706 solver.cpp:218] Iteration 4128 (2.44026 iter/s, 4.91751s/12 iters), loss = 0.56833
I0428 21:00:42.441583 12706 solver.cpp:237] Train net output #0: loss = 0.56833 (* 1 = 0.56833 loss)
I0428 21:00:42.441592 12706 sgd_solver.cpp:105] Iteration 4128, lr = 0.0033
I0428 21:00:47.429478 12706 solver.cpp:218] Iteration 4140 (2.40593 iter/s, 4.98769s/12 iters), loss = 0.442506
I0428 21:00:47.429518 12706 solver.cpp:237] Train net output #0: loss = 0.442506 (* 1 = 0.442506 loss)
I0428 21:00:47.429527 12706 sgd_solver.cpp:105] Iteration 4140, lr = 0.0033
I0428 21:00:50.059468 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:00:52.417567 12706 solver.cpp:218] Iteration 4152 (2.40585 iter/s, 4.98783s/12 iters), loss = 0.58212
I0428 21:00:52.417618 12706 solver.cpp:237] Train net output #0: loss = 0.58212 (* 1 = 0.58212 loss)
I0428 21:00:52.417630 12706 sgd_solver.cpp:105] Iteration 4152, lr = 0.0033
I0428 21:00:53.996716 12706 blocking_queue.cpp:49] Waiting for data
I0428 21:00:57.379143 12706 solver.cpp:218] Iteration 4164 (2.41871 iter/s, 4.96132s/12 iters), loss = 0.535365
I0428 21:00:57.379184 12706 solver.cpp:237] Train net output #0: loss = 0.535365 (* 1 = 0.535365 loss)
I0428 21:00:57.379190 12706 sgd_solver.cpp:105] Iteration 4164, lr = 0.0033
I0428 21:01:02.374049 12706 solver.cpp:218] Iteration 4176 (2.40257 iter/s, 4.99465s/12 iters), loss = 0.604523
I0428 21:01:02.374089 12706 solver.cpp:237] Train net output #0: loss = 0.604523 (* 1 = 0.604523 loss)
I0428 21:01:02.374097 12706 sgd_solver.cpp:105] Iteration 4176, lr = 0.0033
I0428 21:01:04.400128 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_4182.caffemodel
I0428 21:01:07.499331 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_4182.solverstate
I0428 21:01:13.234377 12706 solver.cpp:330] Iteration 4182, Testing net (#0)
I0428 21:01:13.234458 12706 net.cpp:676] Ignoring source layer train-data
I0428 21:01:16.056891 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:01:17.744329 12706 solver.cpp:397] Test net output #0: accuracy = 0.419118
I0428 21:01:17.744364 12706 solver.cpp:397] Test net output #1: loss = 2.76492 (* 1 = 2.76492 loss)
I0428 21:01:19.524567 12706 solver.cpp:218] Iteration 4188 (0.699717 iter/s, 17.1498s/12 iters), loss = 0.375116
I0428 21:01:19.524608 12706 solver.cpp:237] Train net output #0: loss = 0.375116 (* 1 = 0.375116 loss)
I0428 21:01:19.524616 12706 sgd_solver.cpp:105] Iteration 4188, lr = 0.0033
I0428 21:01:24.517283 12706 solver.cpp:218] Iteration 4200 (2.40363 iter/s, 4.99246s/12 iters), loss = 0.673718
I0428 21:01:24.517334 12706 solver.cpp:237] Train net output #0: loss = 0.673718 (* 1 = 0.673718 loss)
I0428 21:01:24.517346 12706 sgd_solver.cpp:105] Iteration 4200, lr = 0.0033
I0428 21:01:29.795167 12706 solver.cpp:218] Iteration 4212 (2.27376 iter/s, 5.27761s/12 iters), loss = 0.485913
I0428 21:01:29.795219 12706 solver.cpp:237] Train net output #0: loss = 0.485913 (* 1 = 0.485913 loss)
I0428 21:01:29.795233 12706 sgd_solver.cpp:105] Iteration 4212, lr = 0.0033
I0428 21:01:34.827198 12706 solver.cpp:218] Iteration 4224 (2.38485 iter/s, 5.03176s/12 iters), loss = 0.571254
I0428 21:01:34.827250 12706 solver.cpp:237] Train net output #0: loss = 0.571254 (* 1 = 0.571254 loss)
I0428 21:01:34.827260 12706 sgd_solver.cpp:105] Iteration 4224, lr = 0.0033
I0428 21:01:40.015648 12706 solver.cpp:218] Iteration 4236 (2.31295 iter/s, 5.18818s/12 iters), loss = 0.480476
I0428 21:01:40.015683 12706 solver.cpp:237] Train net output #0: loss = 0.480476 (* 1 = 0.480476 loss)
I0428 21:01:40.015691 12706 sgd_solver.cpp:105] Iteration 4236, lr = 0.0033
I0428 21:01:44.734218 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:01:44.962615 12706 solver.cpp:218] Iteration 4248 (2.42585 iter/s, 4.94671s/12 iters), loss = 0.497084
I0428 21:01:44.962668 12706 solver.cpp:237] Train net output #0: loss = 0.497084 (* 1 = 0.497084 loss)
I0428 21:01:44.962679 12706 sgd_solver.cpp:105] Iteration 4248, lr = 0.0033
I0428 21:01:50.178326 12706 solver.cpp:218] Iteration 4260 (2.30086 iter/s, 5.21544s/12 iters), loss = 0.547842
I0428 21:01:50.178360 12706 solver.cpp:237] Train net output #0: loss = 0.547842 (* 1 = 0.547842 loss)
I0428 21:01:50.178367 12706 sgd_solver.cpp:105] Iteration 4260, lr = 0.0033
I0428 21:01:55.168821 12706 solver.cpp:218] Iteration 4272 (2.40469 iter/s, 4.99025s/12 iters), loss = 0.481587
I0428 21:01:55.168859 12706 solver.cpp:237] Train net output #0: loss = 0.481587 (* 1 = 0.481587 loss)
I0428 21:01:55.168866 12706 sgd_solver.cpp:105] Iteration 4272, lr = 0.0033
I0428 21:01:59.658695 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_4284.caffemodel
I0428 21:02:02.658404 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_4284.solverstate
I0428 21:02:04.979710 12706 solver.cpp:330] Iteration 4284, Testing net (#0)
I0428 21:02:04.979732 12706 net.cpp:676] Ignoring source layer train-data
I0428 21:02:07.729063 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:02:09.441305 12706 solver.cpp:397] Test net output #0: accuracy = 0.422181
I0428 21:02:09.441341 12706 solver.cpp:397] Test net output #1: loss = 2.77777 (* 1 = 2.77777 loss)
I0428 21:02:09.532570 12706 solver.cpp:218] Iteration 4284 (0.835473 iter/s, 14.3631s/12 iters), loss = 0.444594
I0428 21:02:09.532619 12706 solver.cpp:237] Train net output #0: loss = 0.444594 (* 1 = 0.444594 loss)
I0428 21:02:09.532630 12706 sgd_solver.cpp:105] Iteration 4284, lr = 0.0033
I0428 21:02:13.669237 12706 solver.cpp:218] Iteration 4296 (2.90104 iter/s, 4.13644s/12 iters), loss = 0.510126
I0428 21:02:13.669278 12706 solver.cpp:237] Train net output #0: loss = 0.510126 (* 1 = 0.510126 loss)
I0428 21:02:13.669286 12706 sgd_solver.cpp:105] Iteration 4296, lr = 0.0033
I0428 21:02:18.848417 12706 solver.cpp:218] Iteration 4308 (2.31709 iter/s, 5.17892s/12 iters), loss = 0.39473
I0428 21:02:18.849205 12706 solver.cpp:237] Train net output #0: loss = 0.39473 (* 1 = 0.39473 loss)
I0428 21:02:18.849218 12706 sgd_solver.cpp:105] Iteration 4308, lr = 0.0033
I0428 21:02:23.852972 12706 solver.cpp:218] Iteration 4320 (2.3983 iter/s, 5.00355s/12 iters), loss = 0.536244
I0428 21:02:23.853025 12706 solver.cpp:237] Train net output #0: loss = 0.536244 (* 1 = 0.536244 loss)
I0428 21:02:23.853036 12706 sgd_solver.cpp:105] Iteration 4320, lr = 0.0033
I0428 21:02:28.835093 12706 solver.cpp:218] Iteration 4332 (2.40874 iter/s, 4.98186s/12 iters), loss = 0.5096
I0428 21:02:28.835137 12706 solver.cpp:237] Train net output #0: loss = 0.5096 (* 1 = 0.5096 loss)
I0428 21:02:28.835145 12706 sgd_solver.cpp:105] Iteration 4332, lr = 0.0033
I0428 21:02:33.919028 12706 solver.cpp:218] Iteration 4344 (2.3605 iter/s, 5.08367s/12 iters), loss = 0.411007
I0428 21:02:33.919081 12706 solver.cpp:237] Train net output #0: loss = 0.411007 (* 1 = 0.411007 loss)
I0428 21:02:33.919093 12706 sgd_solver.cpp:105] Iteration 4344, lr = 0.0033
I0428 21:02:35.778841 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:02:38.987841 12706 solver.cpp:218] Iteration 4356 (2.36754 iter/s, 5.06855s/12 iters), loss = 0.452706
I0428 21:02:38.987880 12706 solver.cpp:237] Train net output #0: loss = 0.452706 (* 1 = 0.452706 loss)
I0428 21:02:38.987887 12706 sgd_solver.cpp:105] Iteration 4356, lr = 0.0033
I0428 21:02:44.426163 12706 solver.cpp:218] Iteration 4368 (2.20667 iter/s, 5.43805s/12 iters), loss = 0.410315
I0428 21:02:44.426204 12706 solver.cpp:237] Train net output #0: loss = 0.410315 (* 1 = 0.410315 loss)
I0428 21:02:44.426211 12706 sgd_solver.cpp:105] Iteration 4368, lr = 0.0033
I0428 21:02:49.396032 12706 solver.cpp:218] Iteration 4380 (2.41467 iter/s, 4.96961s/12 iters), loss = 0.497885
I0428 21:02:49.396168 12706 solver.cpp:237] Train net output #0: loss = 0.497885 (* 1 = 0.497885 loss)
I0428 21:02:49.396178 12706 sgd_solver.cpp:105] Iteration 4380, lr = 0.0033
I0428 21:02:51.393647 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_4386.caffemodel
I0428 21:02:55.893327 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_4386.solverstate
I0428 21:02:58.281678 12706 solver.cpp:330] Iteration 4386, Testing net (#0)
I0428 21:02:58.281698 12706 net.cpp:676] Ignoring source layer train-data
I0428 21:03:00.976323 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:03:02.722172 12706 solver.cpp:397] Test net output #0: accuracy = 0.426471
I0428 21:03:02.722206 12706 solver.cpp:397] Test net output #1: loss = 2.80813 (* 1 = 2.80813 loss)
I0428 21:03:04.579584 12706 solver.cpp:218] Iteration 4392 (0.790368 iter/s, 15.1828s/12 iters), loss = 0.535563
I0428 21:03:04.579625 12706 solver.cpp:237] Train net output #0: loss = 0.535563 (* 1 = 0.535563 loss)
I0428 21:03:04.579633 12706 sgd_solver.cpp:105] Iteration 4392, lr = 0.0033
I0428 21:03:09.628880 12706 solver.cpp:218] Iteration 4404 (2.37669 iter/s, 5.04904s/12 iters), loss = 0.482013
I0428 21:03:09.628923 12706 solver.cpp:237] Train net output #0: loss = 0.482013 (* 1 = 0.482013 loss)
I0428 21:03:09.628931 12706 sgd_solver.cpp:105] Iteration 4404, lr = 0.0033
I0428 21:03:14.588946 12706 solver.cpp:218] Iteration 4416 (2.41945 iter/s, 4.95981s/12 iters), loss = 0.359374
I0428 21:03:14.588986 12706 solver.cpp:237] Train net output #0: loss = 0.359374 (* 1 = 0.359374 loss)
I0428 21:03:14.588994 12706 sgd_solver.cpp:105] Iteration 4416, lr = 0.0033
I0428 21:03:19.608654 12706 solver.cpp:218] Iteration 4428 (2.3907 iter/s, 5.01945s/12 iters), loss = 0.309125
I0428 21:03:19.608783 12706 solver.cpp:237] Train net output #0: loss = 0.309125 (* 1 = 0.309125 loss)
I0428 21:03:19.608790 12706 sgd_solver.cpp:105] Iteration 4428, lr = 0.0033
I0428 21:03:24.619685 12706 solver.cpp:218] Iteration 4440 (2.39488 iter/s, 5.01069s/12 iters), loss = 0.529462
I0428 21:03:24.619730 12706 solver.cpp:237] Train net output #0: loss = 0.529462 (* 1 = 0.529462 loss)
I0428 21:03:24.619740 12706 sgd_solver.cpp:105] Iteration 4440, lr = 0.0033
I0428 21:03:28.706094 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:03:29.659024 12706 solver.cpp:218] Iteration 4452 (2.38139 iter/s, 5.03907s/12 iters), loss = 0.558297
I0428 21:03:29.659088 12706 solver.cpp:237] Train net output #0: loss = 0.558297 (* 1 = 0.558297 loss)
I0428 21:03:29.659103 12706 sgd_solver.cpp:105] Iteration 4452, lr = 0.0033
I0428 21:03:34.741684 12706 solver.cpp:218] Iteration 4464 (2.3611 iter/s, 5.08238s/12 iters), loss = 0.701994
I0428 21:03:34.741727 12706 solver.cpp:237] Train net output #0: loss = 0.701994 (* 1 = 0.701994 loss)
I0428 21:03:34.741734 12706 sgd_solver.cpp:105] Iteration 4464, lr = 0.0033
I0428 21:03:39.726780 12706 solver.cpp:218] Iteration 4476 (2.4073 iter/s, 4.98484s/12 iters), loss = 0.349113
I0428 21:03:39.726830 12706 solver.cpp:237] Train net output #0: loss = 0.349113 (* 1 = 0.349113 loss)
I0428 21:03:39.726840 12706 sgd_solver.cpp:105] Iteration 4476, lr = 0.0033
I0428 21:03:44.291810 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_4488.caffemodel
I0428 21:03:51.687142 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_4488.solverstate
I0428 21:03:55.986362 12706 solver.cpp:330] Iteration 4488, Testing net (#0)
I0428 21:03:55.986382 12706 net.cpp:676] Ignoring source layer train-data
I0428 21:03:58.910629 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:04:00.932394 12706 solver.cpp:397] Test net output #0: accuracy = 0.438113
I0428 21:04:00.932423 12706 solver.cpp:397] Test net output #1: loss = 2.72651 (* 1 = 2.72651 loss)
I0428 21:04:01.023607 12706 solver.cpp:218] Iteration 4488 (0.563489 iter/s, 21.2959s/12 iters), loss = 0.419861
I0428 21:04:01.023664 12706 solver.cpp:237] Train net output #0: loss = 0.419861 (* 1 = 0.419861 loss)
I0428 21:04:01.023675 12706 sgd_solver.cpp:105] Iteration 4488, lr = 0.0033
I0428 21:04:06.435261 12706 solver.cpp:218] Iteration 4500 (2.21756 iter/s, 5.41136s/12 iters), loss = 0.522972
I0428 21:04:06.435338 12706 solver.cpp:237] Train net output #0: loss = 0.522972 (* 1 = 0.522972 loss)
I0428 21:04:06.435351 12706 sgd_solver.cpp:105] Iteration 4500, lr = 0.0033
I0428 21:04:13.196135 12706 solver.cpp:218] Iteration 4512 (1.77501 iter/s, 6.76052s/12 iters), loss = 0.561742
I0428 21:04:13.196189 12706 solver.cpp:237] Train net output #0: loss = 0.561742 (* 1 = 0.561742 loss)
I0428 21:04:13.196202 12706 sgd_solver.cpp:105] Iteration 4512, lr = 0.0033
I0428 21:04:19.988554 12706 solver.cpp:218] Iteration 4524 (1.7684 iter/s, 6.7858s/12 iters), loss = 0.363397
I0428 21:04:19.988602 12706 solver.cpp:237] Train net output #0: loss = 0.363397 (* 1 = 0.363397 loss)
I0428 21:04:19.988611 12706 sgd_solver.cpp:105] Iteration 4524, lr = 0.0033
I0428 21:04:26.636559 12706 solver.cpp:218] Iteration 4536 (1.80844 iter/s, 6.63554s/12 iters), loss = 0.287594
I0428 21:04:26.637001 12706 solver.cpp:237] Train net output #0: loss = 0.287594 (* 1 = 0.287594 loss)
I0428 21:04:26.637014 12706 sgd_solver.cpp:105] Iteration 4536, lr = 0.0033
I0428 21:04:33.184572 12706 solver.cpp:218] Iteration 4548 (1.83282 iter/s, 6.54729s/12 iters), loss = 0.633085
I0428 21:04:33.184633 12706 solver.cpp:237] Train net output #0: loss = 0.633085 (* 1 = 0.633085 loss)
I0428 21:04:33.184646 12706 sgd_solver.cpp:105] Iteration 4548, lr = 0.0033
I0428 21:04:34.857131 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:04:39.676385 12706 solver.cpp:218] Iteration 4560 (1.84858 iter/s, 6.49146s/12 iters), loss = 0.595596
I0428 21:04:39.676441 12706 solver.cpp:237] Train net output #0: loss = 0.595596 (* 1 = 0.595596 loss)
I0428 21:04:39.676451 12706 sgd_solver.cpp:105] Iteration 4560, lr = 0.0033
I0428 21:04:46.187669 12706 solver.cpp:218] Iteration 4572 (1.84305 iter/s, 6.51095s/12 iters), loss = 0.441785
I0428 21:04:46.193951 12706 solver.cpp:237] Train net output #0: loss = 0.441785 (* 1 = 0.441785 loss)
I0428 21:04:46.193967 12706 sgd_solver.cpp:105] Iteration 4572, lr = 0.0033
I0428 21:04:52.602020 12706 solver.cpp:218] Iteration 4584 (1.87271 iter/s, 6.40781s/12 iters), loss = 0.472134
I0428 21:04:52.602058 12706 solver.cpp:237] Train net output #0: loss = 0.472134 (* 1 = 0.472134 loss)
I0428 21:04:52.602066 12706 sgd_solver.cpp:105] Iteration 4584, lr = 0.0033
I0428 21:04:54.679634 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_4590.caffemodel
I0428 21:05:01.766712 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_4590.solverstate
I0428 21:05:05.709771 12706 solver.cpp:330] Iteration 4590, Testing net (#0)
I0428 21:05:05.709791 12706 net.cpp:676] Ignoring source layer train-data
I0428 21:05:08.251658 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:05:10.167789 12706 solver.cpp:397] Test net output #0: accuracy = 0.431985
I0428 21:05:10.167824 12706 solver.cpp:397] Test net output #1: loss = 2.77782 (* 1 = 2.77782 loss)
I0428 21:05:12.028223 12706 solver.cpp:218] Iteration 4596 (0.617749 iter/s, 19.4254s/12 iters), loss = 0.35204
I0428 21:05:12.028280 12706 solver.cpp:237] Train net output #0: loss = 0.35204 (* 1 = 0.35204 loss)
I0428 21:05:12.028291 12706 sgd_solver.cpp:105] Iteration 4596, lr = 0.0033
I0428 21:05:17.076135 12706 solver.cpp:218] Iteration 4608 (2.37735 iter/s, 5.04764s/12 iters), loss = 0.460288
I0428 21:05:17.076193 12706 solver.cpp:237] Train net output #0: loss = 0.460288 (* 1 = 0.460288 loss)
I0428 21:05:17.076206 12706 sgd_solver.cpp:105] Iteration 4608, lr = 0.0033
I0428 21:05:22.134289 12706 solver.cpp:218] Iteration 4620 (2.37253 iter/s, 5.05788s/12 iters), loss = 0.336956
I0428 21:05:22.134328 12706 solver.cpp:237] Train net output #0: loss = 0.336956 (* 1 = 0.336956 loss)
I0428 21:05:22.134337 12706 sgd_solver.cpp:105] Iteration 4620, lr = 0.0033
I0428 21:05:27.158193 12706 solver.cpp:218] Iteration 4632 (2.3887 iter/s, 5.02365s/12 iters), loss = 0.442198
I0428 21:05:27.158236 12706 solver.cpp:237] Train net output #0: loss = 0.442198 (* 1 = 0.442198 loss)
I0428 21:05:27.158243 12706 sgd_solver.cpp:105] Iteration 4632, lr = 0.0033
I0428 21:05:32.123378 12706 solver.cpp:218] Iteration 4644 (2.41695 iter/s, 4.96493s/12 iters), loss = 0.341833
I0428 21:05:32.124291 12706 solver.cpp:237] Train net output #0: loss = 0.341833 (* 1 = 0.341833 loss)
I0428 21:05:32.124300 12706 sgd_solver.cpp:105] Iteration 4644, lr = 0.0033
I0428 21:05:35.515708 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:05:37.187249 12706 solver.cpp:218] Iteration 4656 (2.37026 iter/s, 5.06274s/12 iters), loss = 0.486179
I0428 21:05:37.187288 12706 solver.cpp:237] Train net output #0: loss = 0.486179 (* 1 = 0.486179 loss)
I0428 21:05:37.187295 12706 sgd_solver.cpp:105] Iteration 4656, lr = 0.0033
I0428 21:05:42.265005 12706 solver.cpp:218] Iteration 4668 (2.36337 iter/s, 5.07749s/12 iters), loss = 0.330437
I0428 21:05:42.265055 12706 solver.cpp:237] Train net output #0: loss = 0.330437 (* 1 = 0.330437 loss)
I0428 21:05:42.265067 12706 sgd_solver.cpp:105] Iteration 4668, lr = 0.0033
I0428 21:05:47.318521 12706 solver.cpp:218] Iteration 4680 (2.37471 iter/s, 5.05325s/12 iters), loss = 0.493968
I0428 21:05:47.318559 12706 solver.cpp:237] Train net output #0: loss = 0.493968 (* 1 = 0.493968 loss)
I0428 21:05:47.318567 12706 sgd_solver.cpp:105] Iteration 4680, lr = 0.0033
I0428 21:05:51.892719 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_4692.caffemodel
I0428 21:05:56.913825 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_4692.solverstate
I0428 21:05:59.230207 12706 solver.cpp:330] Iteration 4692, Testing net (#0)
I0428 21:05:59.230227 12706 net.cpp:676] Ignoring source layer train-data
I0428 21:06:01.734526 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:06:03.600232 12706 solver.cpp:397] Test net output #0: accuracy = 0.438726
I0428 21:06:03.601271 12706 solver.cpp:397] Test net output #1: loss = 2.76814 (* 1 = 2.76814 loss)
I0428 21:06:03.689260 12706 solver.cpp:218] Iteration 4692 (0.733047 iter/s, 16.37s/12 iters), loss = 0.264952
I0428 21:06:03.689307 12706 solver.cpp:237] Train net output #0: loss = 0.264952 (* 1 = 0.264952 loss)
I0428 21:06:03.689316 12706 sgd_solver.cpp:105] Iteration 4692, lr = 0.0033
I0428 21:06:07.862632 12706 solver.cpp:218] Iteration 4704 (2.87553 iter/s, 4.17314s/12 iters), loss = 0.426525
I0428 21:06:07.862669 12706 solver.cpp:237] Train net output #0: loss = 0.426525 (* 1 = 0.426525 loss)
I0428 21:06:07.862677 12706 sgd_solver.cpp:105] Iteration 4704, lr = 0.0033
I0428 21:06:12.862131 12706 solver.cpp:218] Iteration 4716 (2.40036 iter/s, 4.99924s/12 iters), loss = 0.409188
I0428 21:06:12.862172 12706 solver.cpp:237] Train net output #0: loss = 0.409188 (* 1 = 0.409188 loss)
I0428 21:06:12.862179 12706 sgd_solver.cpp:105] Iteration 4716, lr = 0.0033
I0428 21:06:17.872170 12706 solver.cpp:218] Iteration 4728 (2.39532 iter/s, 5.00978s/12 iters), loss = 0.368475
I0428 21:06:17.872217 12706 solver.cpp:237] Train net output #0: loss = 0.368475 (* 1 = 0.368475 loss)
I0428 21:06:17.872226 12706 sgd_solver.cpp:105] Iteration 4728, lr = 0.0033
I0428 21:06:22.893159 12706 solver.cpp:218] Iteration 4740 (2.39009 iter/s, 5.02072s/12 iters), loss = 0.386467
I0428 21:06:22.893200 12706 solver.cpp:237] Train net output #0: loss = 0.386467 (* 1 = 0.386467 loss)
I0428 21:06:22.893208 12706 sgd_solver.cpp:105] Iteration 4740, lr = 0.0033
I0428 21:06:27.939088 12706 solver.cpp:218] Iteration 4752 (2.37828 iter/s, 5.04567s/12 iters), loss = 0.385102
I0428 21:06:27.939127 12706 solver.cpp:237] Train net output #0: loss = 0.385102 (* 1 = 0.385102 loss)
I0428 21:06:27.939134 12706 sgd_solver.cpp:105] Iteration 4752, lr = 0.0033
I0428 21:06:28.469666 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:06:33.231767 12706 solver.cpp:218] Iteration 4764 (2.2674 iter/s, 5.29241s/12 iters), loss = 0.405964
I0428 21:06:33.231809 12706 solver.cpp:237] Train net output #0: loss = 0.405964 (* 1 = 0.405964 loss)
I0428 21:06:33.231817 12706 sgd_solver.cpp:105] Iteration 4764, lr = 0.0033
I0428 21:06:38.210481 12706 solver.cpp:218] Iteration 4776 (2.41038 iter/s, 4.97846s/12 iters), loss = 0.327694
I0428 21:06:38.210603 12706 solver.cpp:237] Train net output #0: loss = 0.327694 (* 1 = 0.327694 loss)
I0428 21:06:38.210613 12706 sgd_solver.cpp:105] Iteration 4776, lr = 0.0033
I0428 21:06:43.216531 12706 solver.cpp:218] Iteration 4788 (2.39727 iter/s, 5.00569s/12 iters), loss = 0.261156
I0428 21:06:43.216572 12706 solver.cpp:237] Train net output #0: loss = 0.261156 (* 1 = 0.261156 loss)
I0428 21:06:43.216580 12706 sgd_solver.cpp:105] Iteration 4788, lr = 0.0033
I0428 21:06:45.251616 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_4794.caffemodel
I0428 21:06:50.145084 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_4794.solverstate
I0428 21:06:52.649878 12706 solver.cpp:330] Iteration 4794, Testing net (#0)
I0428 21:06:52.649899 12706 net.cpp:676] Ignoring source layer train-data
I0428 21:06:55.114401 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:06:57.020614 12706 solver.cpp:397] Test net output #0: accuracy = 0.44424
I0428 21:06:57.020653 12706 solver.cpp:397] Test net output #1: loss = 2.80667 (* 1 = 2.80667 loss)
I0428 21:06:58.809676 12706 solver.cpp:218] Iteration 4800 (0.769603 iter/s, 15.5925s/12 iters), loss = 0.247915
I0428 21:06:58.809729 12706 solver.cpp:237] Train net output #0: loss = 0.247915 (* 1 = 0.247915 loss)
I0428 21:06:58.809741 12706 sgd_solver.cpp:105] Iteration 4800, lr = 0.0033
I0428 21:07:03.752374 12706 solver.cpp:218] Iteration 4812 (2.42795 iter/s, 4.94244s/12 iters), loss = 0.396264
I0428 21:07:03.752411 12706 solver.cpp:237] Train net output #0: loss = 0.396264 (* 1 = 0.396264 loss)
I0428 21:07:03.752419 12706 sgd_solver.cpp:105] Iteration 4812, lr = 0.0033
I0428 21:07:08.731305 12706 solver.cpp:218] Iteration 4824 (2.41028 iter/s, 4.97868s/12 iters), loss = 0.402967
I0428 21:07:08.731395 12706 solver.cpp:237] Train net output #0: loss = 0.402967 (* 1 = 0.402967 loss)
I0428 21:07:08.731403 12706 sgd_solver.cpp:105] Iteration 4824, lr = 0.0033
I0428 21:07:13.704604 12706 solver.cpp:218] Iteration 4836 (2.41303 iter/s, 4.97299s/12 iters), loss = 0.471328
I0428 21:07:13.704653 12706 solver.cpp:237] Train net output #0: loss = 0.471328 (* 1 = 0.471328 loss)
I0428 21:07:13.704665 12706 sgd_solver.cpp:105] Iteration 4836, lr = 0.0033
I0428 21:07:15.749792 12706 blocking_queue.cpp:49] Waiting for data
I0428 21:07:18.810003 12706 solver.cpp:218] Iteration 4848 (2.35058 iter/s, 5.10513s/12 iters), loss = 0.460617
I0428 21:07:18.810056 12706 solver.cpp:237] Train net output #0: loss = 0.460617 (* 1 = 0.460617 loss)
I0428 21:07:18.810066 12706 sgd_solver.cpp:105] Iteration 4848, lr = 0.0033
I0428 21:07:21.566778 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:07:23.954252 12706 solver.cpp:218] Iteration 4860 (2.33283 iter/s, 5.14398s/12 iters), loss = 0.483028
I0428 21:07:23.954288 12706 solver.cpp:237] Train net output #0: loss = 0.483028 (* 1 = 0.483028 loss)
I0428 21:07:23.954295 12706 sgd_solver.cpp:105] Iteration 4860, lr = 0.0033
I0428 21:07:28.979605 12706 solver.cpp:218] Iteration 4872 (2.38801 iter/s, 5.0251s/12 iters), loss = 0.438406
I0428 21:07:28.979647 12706 solver.cpp:237] Train net output #0: loss = 0.438406 (* 1 = 0.438406 loss)
I0428 21:07:28.979657 12706 sgd_solver.cpp:105] Iteration 4872, lr = 0.0033
I0428 21:07:34.054240 12706 solver.cpp:218] Iteration 4884 (2.36482 iter/s, 5.07437s/12 iters), loss = 0.326908
I0428 21:07:34.054278 12706 solver.cpp:237] Train net output #0: loss = 0.326908 (* 1 = 0.326908 loss)
I0428 21:07:34.054286 12706 sgd_solver.cpp:105] Iteration 4884, lr = 0.0033
I0428 21:07:38.616856 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_4896.caffemodel
I0428 21:07:43.548121 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_4896.solverstate
I0428 21:07:45.861982 12706 solver.cpp:330] Iteration 4896, Testing net (#0)
I0428 21:07:45.862002 12706 net.cpp:676] Ignoring source layer train-data
I0428 21:07:48.413508 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:07:50.542310 12706 solver.cpp:397] Test net output #0: accuracy = 0.429534
I0428 21:07:50.542343 12706 solver.cpp:397] Test net output #1: loss = 2.85585 (* 1 = 2.85585 loss)
I0428 21:07:50.633435 12706 solver.cpp:218] Iteration 4896 (0.72383 iter/s, 16.5785s/12 iters), loss = 0.349147
I0428 21:07:50.633474 12706 solver.cpp:237] Train net output #0: loss = 0.349147 (* 1 = 0.349147 loss)
I0428 21:07:50.633482 12706 sgd_solver.cpp:105] Iteration 4896, lr = 0.0033
I0428 21:07:54.765046 12706 solver.cpp:218] Iteration 4908 (2.90459 iter/s, 4.13139s/12 iters), loss = 0.3666
I0428 21:07:54.765095 12706 solver.cpp:237] Train net output #0: loss = 0.3666 (* 1 = 0.3666 loss)
I0428 21:07:54.765106 12706 sgd_solver.cpp:105] Iteration 4908, lr = 0.0033
I0428 21:07:59.763568 12706 solver.cpp:218] Iteration 4920 (2.40084 iter/s, 4.99826s/12 iters), loss = 0.312915
I0428 21:07:59.763620 12706 solver.cpp:237] Train net output #0: loss = 0.312915 (* 1 = 0.312915 loss)
I0428 21:07:59.763633 12706 sgd_solver.cpp:105] Iteration 4920, lr = 0.0033
I0428 21:08:04.700623 12706 solver.cpp:218] Iteration 4932 (2.43073 iter/s, 4.93679s/12 iters), loss = 0.174883
I0428 21:08:04.700661 12706 solver.cpp:237] Train net output #0: loss = 0.174883 (* 1 = 0.174883 loss)
I0428 21:08:04.700670 12706 sgd_solver.cpp:105] Iteration 4932, lr = 0.0033
I0428 21:08:09.765694 12706 solver.cpp:218] Iteration 4944 (2.36929 iter/s, 5.06481s/12 iters), loss = 0.371562
I0428 21:08:09.765731 12706 solver.cpp:237] Train net output #0: loss = 0.371562 (* 1 = 0.371562 loss)
I0428 21:08:09.765738 12706 sgd_solver.cpp:105] Iteration 4944, lr = 0.0033
I0428 21:08:14.811188 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:08:15.011786 12706 solver.cpp:218] Iteration 4956 (2.28753 iter/s, 5.24582s/12 iters), loss = 0.359019
I0428 21:08:15.011838 12706 solver.cpp:237] Train net output #0: loss = 0.359019 (* 1 = 0.359019 loss)
I0428 21:08:15.011852 12706 sgd_solver.cpp:105] Iteration 4956, lr = 0.0033
I0428 21:08:19.979899 12706 solver.cpp:218] Iteration 4968 (2.41553 iter/s, 4.96785s/12 iters), loss = 0.332583
I0428 21:08:19.979941 12706 solver.cpp:237] Train net output #0: loss = 0.332583 (* 1 = 0.332583 loss)
I0428 21:08:19.979948 12706 sgd_solver.cpp:105] Iteration 4968, lr = 0.0033
I0428 21:08:24.931396 12706 solver.cpp:218] Iteration 4980 (2.42363 iter/s, 4.95124s/12 iters), loss = 0.302794
I0428 21:08:24.931437 12706 solver.cpp:237] Train net output #0: loss = 0.302794 (* 1 = 0.302794 loss)
I0428 21:08:24.931445 12706 sgd_solver.cpp:105] Iteration 4980, lr = 0.0033
I0428 21:08:29.987200 12706 solver.cpp:218] Iteration 4992 (2.37363 iter/s, 5.05554s/12 iters), loss = 0.354204
I0428 21:08:29.987246 12706 solver.cpp:237] Train net output #0: loss = 0.354204 (* 1 = 0.354204 loss)
I0428 21:08:29.987257 12706 sgd_solver.cpp:105] Iteration 4992, lr = 0.0033
I0428 21:08:32.000840 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_4998.caffemodel
I0428 21:08:36.671130 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_4998.solverstate
I0428 21:08:39.048705 12706 solver.cpp:330] Iteration 4998, Testing net (#0)
I0428 21:08:39.048723 12706 net.cpp:676] Ignoring source layer train-data
I0428 21:08:41.446276 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:08:43.527789 12706 solver.cpp:397] Test net output #0: accuracy = 0.431373
I0428 21:08:43.527819 12706 solver.cpp:397] Test net output #1: loss = 2.92469 (* 1 = 2.92469 loss)
I0428 21:08:45.350821 12706 solver.cpp:218] Iteration 5004 (0.7811 iter/s, 15.3629s/12 iters), loss = 0.381204
I0428 21:08:45.351393 12706 solver.cpp:237] Train net output #0: loss = 0.381204 (* 1 = 0.381204 loss)
I0428 21:08:45.351405 12706 sgd_solver.cpp:105] Iteration 5004, lr = 0.0033
I0428 21:08:50.394773 12706 solver.cpp:218] Iteration 5016 (2.37946 iter/s, 5.04317s/12 iters), loss = 0.404826
I0428 21:08:50.394814 12706 solver.cpp:237] Train net output #0: loss = 0.404826 (* 1 = 0.404826 loss)
I0428 21:08:50.394822 12706 sgd_solver.cpp:105] Iteration 5016, lr = 0.0033
I0428 21:08:55.548277 12706 solver.cpp:218] Iteration 5028 (2.32863 iter/s, 5.15324s/12 iters), loss = 0.449339
I0428 21:08:55.548317 12706 solver.cpp:237] Train net output #0: loss = 0.449339 (* 1 = 0.449339 loss)
I0428 21:08:55.548326 12706 sgd_solver.cpp:105] Iteration 5028, lr = 0.0033
I0428 21:09:00.783301 12706 solver.cpp:218] Iteration 5040 (2.29237 iter/s, 5.23476s/12 iters), loss = 0.23924
I0428 21:09:00.783341 12706 solver.cpp:237] Train net output #0: loss = 0.23924 (* 1 = 0.23924 loss)
I0428 21:09:00.783349 12706 sgd_solver.cpp:105] Iteration 5040, lr = 0.0033
I0428 21:09:05.776791 12706 solver.cpp:218] Iteration 5052 (2.40325 iter/s, 4.99323s/12 iters), loss = 0.481111
I0428 21:09:05.776844 12706 solver.cpp:237] Train net output #0: loss = 0.481111 (* 1 = 0.481111 loss)
I0428 21:09:05.776854 12706 sgd_solver.cpp:105] Iteration 5052, lr = 0.0033
I0428 21:09:07.653136 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:09:10.705020 12706 solver.cpp:218] Iteration 5064 (2.43508 iter/s, 4.92796s/12 iters), loss = 0.196735
I0428 21:09:10.705076 12706 solver.cpp:237] Train net output #0: loss = 0.196735 (* 1 = 0.196735 loss)
I0428 21:09:10.705087 12706 sgd_solver.cpp:105] Iteration 5064, lr = 0.0033
I0428 21:09:15.656080 12706 solver.cpp:218] Iteration 5076 (2.42386 iter/s, 4.95079s/12 iters), loss = 0.379436
I0428 21:09:15.656208 12706 solver.cpp:237] Train net output #0: loss = 0.379436 (* 1 = 0.379436 loss)
I0428 21:09:15.656219 12706 sgd_solver.cpp:105] Iteration 5076, lr = 0.0033
I0428 21:09:20.728102 12706 solver.cpp:218] Iteration 5088 (2.36608 iter/s, 5.07167s/12 iters), loss = 0.371385
I0428 21:09:20.728149 12706 solver.cpp:237] Train net output #0: loss = 0.371385 (* 1 = 0.371385 loss)
I0428 21:09:20.728159 12706 sgd_solver.cpp:105] Iteration 5088, lr = 0.0033
I0428 21:09:25.255103 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_5100.caffemodel
I0428 21:09:31.655558 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_5100.solverstate
I0428 21:09:35.833298 12706 solver.cpp:330] Iteration 5100, Testing net (#0)
I0428 21:09:35.833317 12706 net.cpp:676] Ignoring source layer train-data
I0428 21:09:38.207561 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:09:40.277238 12706 solver.cpp:397] Test net output #0: accuracy = 0.433824
I0428 21:09:40.277267 12706 solver.cpp:397] Test net output #1: loss = 2.82484 (* 1 = 2.82484 loss)
I0428 21:09:40.368433 12706 solver.cpp:218] Iteration 5100 (0.611014 iter/s, 19.6395s/12 iters), loss = 0.21364
I0428 21:09:40.368474 12706 solver.cpp:237] Train net output #0: loss = 0.21364 (* 1 = 0.21364 loss)
I0428 21:09:40.368482 12706 sgd_solver.cpp:105] Iteration 5100, lr = 0.0033
I0428 21:09:44.542076 12706 solver.cpp:218] Iteration 5112 (2.87534 iter/s, 4.17342s/12 iters), loss = 0.336575
I0428 21:09:44.542114 12706 solver.cpp:237] Train net output #0: loss = 0.336575 (* 1 = 0.336575 loss)
I0428 21:09:44.542124 12706 sgd_solver.cpp:105] Iteration 5112, lr = 0.0033
I0428 21:09:49.717072 12706 solver.cpp:218] Iteration 5124 (2.31896 iter/s, 5.17473s/12 iters), loss = 0.111621
I0428 21:09:49.717911 12706 solver.cpp:237] Train net output #0: loss = 0.111621 (* 1 = 0.111621 loss)
I0428 21:09:49.717921 12706 sgd_solver.cpp:105] Iteration 5124, lr = 0.0033
I0428 21:09:54.727998 12706 solver.cpp:218] Iteration 5136 (2.39527 iter/s, 5.00987s/12 iters), loss = 0.310025
I0428 21:09:54.728037 12706 solver.cpp:237] Train net output #0: loss = 0.310025 (* 1 = 0.310025 loss)
I0428 21:09:54.728044 12706 sgd_solver.cpp:105] Iteration 5136, lr = 0.0033
I0428 21:09:59.743273 12706 solver.cpp:218] Iteration 5148 (2.39281 iter/s, 5.01502s/12 iters), loss = 0.343134
I0428 21:09:59.743311 12706 solver.cpp:237] Train net output #0: loss = 0.343134 (* 1 = 0.343134 loss)
I0428 21:09:59.743319 12706 sgd_solver.cpp:105] Iteration 5148, lr = 0.0033
I0428 21:10:03.995862 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:10:05.055874 12706 solver.cpp:218] Iteration 5160 (2.25889 iter/s, 5.31233s/12 iters), loss = 0.334161
I0428 21:10:05.055912 12706 solver.cpp:237] Train net output #0: loss = 0.334161 (* 1 = 0.334161 loss)
I0428 21:10:05.055919 12706 sgd_solver.cpp:105] Iteration 5160, lr = 0.0033
I0428 21:10:10.155489 12706 solver.cpp:218] Iteration 5172 (2.35324 iter/s, 5.09936s/12 iters), loss = 0.341002
I0428 21:10:10.155529 12706 solver.cpp:237] Train net output #0: loss = 0.341002 (* 1 = 0.341002 loss)
I0428 21:10:10.155537 12706 sgd_solver.cpp:105] Iteration 5172, lr = 0.0033
I0428 21:10:15.222647 12706 solver.cpp:218] Iteration 5184 (2.36831 iter/s, 5.0669s/12 iters), loss = 0.243585
I0428 21:10:15.222687 12706 solver.cpp:237] Train net output #0: loss = 0.243585 (* 1 = 0.243585 loss)
I0428 21:10:15.222694 12706 sgd_solver.cpp:105] Iteration 5184, lr = 0.0033
I0428 21:10:20.204566 12706 solver.cpp:218] Iteration 5196 (2.40883 iter/s, 4.98166s/12 iters), loss = 0.304079
I0428 21:10:20.204717 12706 solver.cpp:237] Train net output #0: loss = 0.304079 (* 1 = 0.304079 loss)
I0428 21:10:20.204726 12706 sgd_solver.cpp:105] Iteration 5196, lr = 0.0033
I0428 21:10:22.255371 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_5202.caffemodel
I0428 21:10:28.544049 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_5202.solverstate
I0428 21:10:33.522436 12706 solver.cpp:330] Iteration 5202, Testing net (#0)
I0428 21:10:33.522462 12706 net.cpp:676] Ignoring source layer train-data
I0428 21:10:35.830726 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:10:37.904873 12706 solver.cpp:397] Test net output #0: accuracy = 0.43076
I0428 21:10:37.904909 12706 solver.cpp:397] Test net output #1: loss = 2.92024 (* 1 = 2.92024 loss)
I0428 21:10:39.721457 12706 solver.cpp:218] Iteration 5208 (0.614882 iter/s, 19.5159s/12 iters), loss = 0.273461
I0428 21:10:39.721514 12706 solver.cpp:237] Train net output #0: loss = 0.273461 (* 1 = 0.273461 loss)
I0428 21:10:39.721525 12706 sgd_solver.cpp:105] Iteration 5208, lr = 0.0033
I0428 21:10:44.663946 12706 solver.cpp:218] Iteration 5220 (2.42806 iter/s, 4.94222s/12 iters), loss = 0.469452
I0428 21:10:44.663995 12706 solver.cpp:237] Train net output #0: loss = 0.469452 (* 1 = 0.469452 loss)
I0428 21:10:44.664007 12706 sgd_solver.cpp:105] Iteration 5220, lr = 0.0033
I0428 21:10:49.623631 12706 solver.cpp:218] Iteration 5232 (2.41964 iter/s, 4.95942s/12 iters), loss = 0.319197
I0428 21:10:49.623673 12706 solver.cpp:237] Train net output #0: loss = 0.319197 (* 1 = 0.319197 loss)
I0428 21:10:49.623682 12706 sgd_solver.cpp:105] Iteration 5232, lr = 0.0033
I0428 21:10:54.600883 12706 solver.cpp:218] Iteration 5244 (2.4111 iter/s, 4.97699s/12 iters), loss = 0.269854
I0428 21:10:54.601008 12706 solver.cpp:237] Train net output #0: loss = 0.269854 (* 1 = 0.269854 loss)
I0428 21:10:54.601018 12706 sgd_solver.cpp:105] Iteration 5244, lr = 0.0033
I0428 21:10:59.781889 12706 solver.cpp:218] Iteration 5256 (2.31631 iter/s, 5.18066s/12 iters), loss = 0.323339
I0428 21:10:59.781941 12706 solver.cpp:237] Train net output #0: loss = 0.323339 (* 1 = 0.323339 loss)
I0428 21:10:59.781951 12706 sgd_solver.cpp:105] Iteration 5256, lr = 0.0033
I0428 21:11:01.077137 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:11:04.902228 12706 solver.cpp:218] Iteration 5268 (2.34372 iter/s, 5.12007s/12 iters), loss = 0.211216
I0428 21:11:04.902267 12706 solver.cpp:237] Train net output #0: loss = 0.211216 (* 1 = 0.211216 loss)
I0428 21:11:04.902276 12706 sgd_solver.cpp:105] Iteration 5268, lr = 0.0033
I0428 21:11:09.877760 12706 solver.cpp:218] Iteration 5280 (2.41193 iter/s, 4.97528s/12 iters), loss = 0.408039
I0428 21:11:09.877799 12706 solver.cpp:237] Train net output #0: loss = 0.408039 (* 1 = 0.408039 loss)
I0428 21:11:09.877807 12706 sgd_solver.cpp:105] Iteration 5280, lr = 0.0033
I0428 21:11:14.923030 12706 solver.cpp:218] Iteration 5292 (2.37859 iter/s, 5.04501s/12 iters), loss = 0.402282
I0428 21:11:14.923069 12706 solver.cpp:237] Train net output #0: loss = 0.402282 (* 1 = 0.402282 loss)
I0428 21:11:14.923077 12706 sgd_solver.cpp:105] Iteration 5292, lr = 0.0033
I0428 21:11:19.483853 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_5304.caffemodel
I0428 21:11:22.919971 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_5304.solverstate
I0428 21:11:25.522742 12706 solver.cpp:330] Iteration 5304, Testing net (#0)
I0428 21:11:25.522856 12706 net.cpp:676] Ignoring source layer train-data
I0428 21:11:27.779378 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:11:29.953378 12706 solver.cpp:397] Test net output #0: accuracy = 0.427696
I0428 21:11:29.953413 12706 solver.cpp:397] Test net output #1: loss = 2.91632 (* 1 = 2.91632 loss)
I0428 21:11:30.044477 12706 solver.cpp:218] Iteration 5304 (0.79361 iter/s, 15.1208s/12 iters), loss = 0.381061
I0428 21:11:30.044556 12706 solver.cpp:237] Train net output #0: loss = 0.381061 (* 1 = 0.381061 loss)
I0428 21:11:30.044564 12706 sgd_solver.cpp:105] Iteration 5304, lr = 0.0033
I0428 21:11:34.200131 12706 solver.cpp:218] Iteration 5316 (2.88781 iter/s, 4.15539s/12 iters), loss = 0.323973
I0428 21:11:34.200172 12706 solver.cpp:237] Train net output #0: loss = 0.323973 (* 1 = 0.323973 loss)
I0428 21:11:34.200179 12706 sgd_solver.cpp:105] Iteration 5316, lr = 0.0033
I0428 21:11:39.192939 12706 solver.cpp:218] Iteration 5328 (2.40358 iter/s, 4.99255s/12 iters), loss = 0.159596
I0428 21:11:39.192979 12706 solver.cpp:237] Train net output #0: loss = 0.159596 (* 1 = 0.159596 loss)
I0428 21:11:39.192987 12706 sgd_solver.cpp:105] Iteration 5328, lr = 0.0033
I0428 21:11:44.238482 12706 solver.cpp:218] Iteration 5340 (2.37846 iter/s, 5.04528s/12 iters), loss = 0.245687
I0428 21:11:44.238519 12706 solver.cpp:237] Train net output #0: loss = 0.245687 (* 1 = 0.245687 loss)
I0428 21:11:44.238526 12706 sgd_solver.cpp:105] Iteration 5340, lr = 0.0033
I0428 21:11:49.159981 12706 solver.cpp:218] Iteration 5352 (2.43841 iter/s, 4.92124s/12 iters), loss = 0.316982
I0428 21:11:49.160019 12706 solver.cpp:237] Train net output #0: loss = 0.316982 (* 1 = 0.316982 loss)
I0428 21:11:49.160027 12706 sgd_solver.cpp:105] Iteration 5352, lr = 0.0033
I0428 21:11:52.589423 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:11:54.157619 12706 solver.cpp:218] Iteration 5364 (2.40126 iter/s, 4.99738s/12 iters), loss = 0.43139
I0428 21:11:54.157660 12706 solver.cpp:237] Train net output #0: loss = 0.43139 (* 1 = 0.43139 loss)
I0428 21:11:54.157666 12706 sgd_solver.cpp:105] Iteration 5364, lr = 0.0033
I0428 21:11:59.268456 12706 solver.cpp:218] Iteration 5376 (2.34807 iter/s, 5.11058s/12 iters), loss = 0.275529
I0428 21:11:59.268589 12706 solver.cpp:237] Train net output #0: loss = 0.275529 (* 1 = 0.275529 loss)
I0428 21:11:59.268599 12706 sgd_solver.cpp:105] Iteration 5376, lr = 0.0033
I0428 21:12:04.312788 12706 solver.cpp:218] Iteration 5388 (2.37907 iter/s, 5.04398s/12 iters), loss = 0.394183
I0428 21:12:04.312827 12706 solver.cpp:237] Train net output #0: loss = 0.394183 (* 1 = 0.394183 loss)
I0428 21:12:04.312836 12706 sgd_solver.cpp:105] Iteration 5388, lr = 0.0033
I0428 21:12:09.270185 12706 solver.cpp:218] Iteration 5400 (2.42075 iter/s, 4.95714s/12 iters), loss = 0.370258
I0428 21:12:09.270229 12706 solver.cpp:237] Train net output #0: loss = 0.370258 (* 1 = 0.370258 loss)
I0428 21:12:09.270238 12706 sgd_solver.cpp:105] Iteration 5400, lr = 0.0033
I0428 21:12:11.347826 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_5406.caffemodel
I0428 21:12:20.470715 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_5406.solverstate
I0428 21:12:22.788542 12706 solver.cpp:330] Iteration 5406, Testing net (#0)
I0428 21:12:22.788568 12706 net.cpp:676] Ignoring source layer train-data
I0428 21:12:25.014417 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:12:27.155175 12706 solver.cpp:397] Test net output #0: accuracy = 0.445466
I0428 21:12:27.155205 12706 solver.cpp:397] Test net output #1: loss = 2.82147 (* 1 = 2.82147 loss)
I0428 21:12:28.862206 12706 solver.cpp:218] Iteration 5412 (0.612521 iter/s, 19.5912s/12 iters), loss = 0.288305
I0428 21:12:28.862246 12706 solver.cpp:237] Train net output #0: loss = 0.288305 (* 1 = 0.288305 loss)
I0428 21:12:28.862253 12706 sgd_solver.cpp:105] Iteration 5412, lr = 0.0033
I0428 21:12:33.829100 12706 solver.cpp:218] Iteration 5424 (2.41612 iter/s, 4.96664s/12 iters), loss = 0.295073
I0428 21:12:33.829582 12706 solver.cpp:237] Train net output #0: loss = 0.295073 (* 1 = 0.295073 loss)
I0428 21:12:33.829591 12706 sgd_solver.cpp:105] Iteration 5424, lr = 0.0033
I0428 21:12:38.809810 12706 solver.cpp:218] Iteration 5436 (2.40963 iter/s, 4.98001s/12 iters), loss = 0.254635
I0428 21:12:38.809859 12706 solver.cpp:237] Train net output #0: loss = 0.254635 (* 1 = 0.254635 loss)
I0428 21:12:38.809871 12706 sgd_solver.cpp:105] Iteration 5436, lr = 0.0033
I0428 21:12:43.719589 12706 solver.cpp:218] Iteration 5448 (2.44423 iter/s, 4.90952s/12 iters), loss = 0.349421
I0428 21:12:43.719632 12706 solver.cpp:237] Train net output #0: loss = 0.349421 (* 1 = 0.349421 loss)
I0428 21:12:43.719641 12706 sgd_solver.cpp:105] Iteration 5448, lr = 0.0033
I0428 21:12:48.761430 12706 solver.cpp:218] Iteration 5460 (2.38021 iter/s, 5.04158s/12 iters), loss = 0.188558
I0428 21:12:48.761469 12706 solver.cpp:237] Train net output #0: loss = 0.188558 (* 1 = 0.188558 loss)
I0428 21:12:48.761476 12706 sgd_solver.cpp:105] Iteration 5460, lr = 0.0033
I0428 21:12:49.317417 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:12:53.713593 12706 solver.cpp:218] Iteration 5472 (2.42331 iter/s, 4.9519s/12 iters), loss = 0.20274
I0428 21:12:53.713634 12706 solver.cpp:237] Train net output #0: loss = 0.20274 (* 1 = 0.20274 loss)
I0428 21:12:53.713641 12706 sgd_solver.cpp:105] Iteration 5472, lr = 0.0033
I0428 21:12:58.713387 12706 solver.cpp:218] Iteration 5484 (2.40022 iter/s, 4.99954s/12 iters), loss = 0.261499
I0428 21:12:58.713424 12706 solver.cpp:237] Train net output #0: loss = 0.261499 (* 1 = 0.261499 loss)
I0428 21:12:58.713431 12706 sgd_solver.cpp:105] Iteration 5484, lr = 0.0033
I0428 21:13:03.709909 12706 solver.cpp:218] Iteration 5496 (2.40179 iter/s, 4.99626s/12 iters), loss = 0.411727
I0428 21:13:03.709946 12706 solver.cpp:237] Train net output #0: loss = 0.411727 (* 1 = 0.411727 loss)
I0428 21:13:03.709954 12706 sgd_solver.cpp:105] Iteration 5496, lr = 0.0033
I0428 21:13:08.315176 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_5508.caffemodel
I0428 21:13:16.532522 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_5508.solverstate
I0428 21:13:19.808828 12706 solver.cpp:330] Iteration 5508, Testing net (#0)
I0428 21:13:19.808849 12706 net.cpp:676] Ignoring source layer train-data
I0428 21:13:22.267159 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:13:24.520623 12706 solver.cpp:397] Test net output #0: accuracy = 0.447304
I0428 21:13:24.520651 12706 solver.cpp:397] Test net output #1: loss = 2.84811 (* 1 = 2.84811 loss)
I0428 21:13:24.611626 12706 solver.cpp:218] Iteration 5508 (0.574141 iter/s, 20.9008s/12 iters), loss = 0.347159
I0428 21:13:24.611678 12706 solver.cpp:237] Train net output #0: loss = 0.347158 (* 1 = 0.347158 loss)
I0428 21:13:24.611690 12706 sgd_solver.cpp:105] Iteration 5508, lr = 0.0033
I0428 21:13:28.950397 12706 solver.cpp:218] Iteration 5520 (2.76591 iter/s, 4.33853s/12 iters), loss = 0.179511
I0428 21:13:28.950438 12706 solver.cpp:237] Train net output #0: loss = 0.179511 (* 1 = 0.179511 loss)
I0428 21:13:28.950446 12706 sgd_solver.cpp:105] Iteration 5520, lr = 0.0033
I0428 21:13:31.329874 12706 blocking_queue.cpp:49] Waiting for data
I0428 21:13:33.857486 12706 solver.cpp:218] Iteration 5532 (2.44557 iter/s, 4.90683s/12 iters), loss = 0.350366
I0428 21:13:33.857523 12706 solver.cpp:237] Train net output #0: loss = 0.350366 (* 1 = 0.350366 loss)
I0428 21:13:33.857530 12706 sgd_solver.cpp:105] Iteration 5532, lr = 0.0033
I0428 21:13:38.846004 12706 solver.cpp:218] Iteration 5544 (2.40565 iter/s, 4.98826s/12 iters), loss = 0.265833
I0428 21:13:38.846314 12706 solver.cpp:237] Train net output #0: loss = 0.265833 (* 1 = 0.265833 loss)
I0428 21:13:38.846323 12706 sgd_solver.cpp:105] Iteration 5544, lr = 0.0033
I0428 21:13:43.918467 12706 solver.cpp:218] Iteration 5556 (2.36596 iter/s, 5.07193s/12 iters), loss = 0.198844
I0428 21:13:43.918510 12706 solver.cpp:237] Train net output #0: loss = 0.198844 (* 1 = 0.198844 loss)
I0428 21:13:43.918517 12706 sgd_solver.cpp:105] Iteration 5556, lr = 0.0033
I0428 21:13:46.591426 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:13:48.880270 12706 solver.cpp:218] Iteration 5568 (2.4186 iter/s, 4.96155s/12 iters), loss = 0.243543
I0428 21:13:48.880311 12706 solver.cpp:237] Train net output #0: loss = 0.243543 (* 1 = 0.243543 loss)
I0428 21:13:48.880319 12706 sgd_solver.cpp:105] Iteration 5568, lr = 0.0033
I0428 21:13:53.852320 12706 solver.cpp:218] Iteration 5580 (2.41362 iter/s, 4.97179s/12 iters), loss = 0.229847
I0428 21:13:53.852357 12706 solver.cpp:237] Train net output #0: loss = 0.229847 (* 1 = 0.229847 loss)
I0428 21:13:53.852365 12706 sgd_solver.cpp:105] Iteration 5580, lr = 0.0033
I0428 21:13:58.919535 12706 solver.cpp:218] Iteration 5592 (2.36829 iter/s, 5.06696s/12 iters), loss = 0.240517
I0428 21:13:58.919574 12706 solver.cpp:237] Train net output #0: loss = 0.240517 (* 1 = 0.240517 loss)
I0428 21:13:58.919580 12706 sgd_solver.cpp:105] Iteration 5592, lr = 0.0033
I0428 21:14:03.930975 12706 solver.cpp:218] Iteration 5604 (2.39464 iter/s, 5.01118s/12 iters), loss = 0.355097
I0428 21:14:03.931013 12706 solver.cpp:237] Train net output #0: loss = 0.355097 (* 1 = 0.355097 loss)
I0428 21:14:03.931021 12706 sgd_solver.cpp:105] Iteration 5604, lr = 0.0033
I0428 21:14:06.001837 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_5610.caffemodel
I0428 21:14:09.452814 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_5610.solverstate
I0428 21:14:14.952239 12706 solver.cpp:330] Iteration 5610, Testing net (#0)
I0428 21:14:14.952258 12706 net.cpp:676] Ignoring source layer train-data
I0428 21:14:17.089843 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:14:19.317842 12706 solver.cpp:397] Test net output #0: accuracy = 0.431985
I0428 21:14:19.317868 12706 solver.cpp:397] Test net output #1: loss = 2.9265 (* 1 = 2.9265 loss)
I0428 21:14:21.218400 12706 solver.cpp:218] Iteration 5616 (0.694177 iter/s, 17.2867s/12 iters), loss = 0.299313
I0428 21:14:21.218442 12706 solver.cpp:237] Train net output #0: loss = 0.299313 (* 1 = 0.299313 loss)
I0428 21:14:21.218451 12706 sgd_solver.cpp:105] Iteration 5616, lr = 0.0033
I0428 21:14:26.188148 12706 solver.cpp:218] Iteration 5628 (2.41474 iter/s, 4.96949s/12 iters), loss = 0.221031
I0428 21:14:26.188197 12706 solver.cpp:237] Train net output #0: loss = 0.221031 (* 1 = 0.221031 loss)
I0428 21:14:26.188207 12706 sgd_solver.cpp:105] Iteration 5628, lr = 0.0033
I0428 21:14:31.264444 12706 solver.cpp:218] Iteration 5640 (2.36405 iter/s, 5.07603s/12 iters), loss = 0.185933
I0428 21:14:31.264510 12706 solver.cpp:237] Train net output #0: loss = 0.185933 (* 1 = 0.185933 loss)
I0428 21:14:31.264520 12706 sgd_solver.cpp:105] Iteration 5640, lr = 0.0033
I0428 21:14:36.277259 12706 solver.cpp:218] Iteration 5652 (2.39399 iter/s, 5.01255s/12 iters), loss = 0.28865
I0428 21:14:36.277312 12706 solver.cpp:237] Train net output #0: loss = 0.28865 (* 1 = 0.28865 loss)
I0428 21:14:36.277324 12706 sgd_solver.cpp:105] Iteration 5652, lr = 0.0033
I0428 21:14:41.407605 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:14:41.577381 12706 solver.cpp:218] Iteration 5664 (2.26422 iter/s, 5.29985s/12 iters), loss = 0.287587
I0428 21:14:41.577416 12706 solver.cpp:237] Train net output #0: loss = 0.287587 (* 1 = 0.287587 loss)
I0428 21:14:41.577425 12706 sgd_solver.cpp:105] Iteration 5664, lr = 0.0033
I0428 21:14:46.761126 12706 solver.cpp:218] Iteration 5676 (2.31505 iter/s, 5.18348s/12 iters), loss = 0.326491
I0428 21:14:46.761169 12706 solver.cpp:237] Train net output #0: loss = 0.326491 (* 1 = 0.326491 loss)
I0428 21:14:46.761178 12706 sgd_solver.cpp:105] Iteration 5676, lr = 0.0033
I0428 21:14:51.786442 12706 solver.cpp:218] Iteration 5688 (2.38804 iter/s, 5.02505s/12 iters), loss = 0.213295
I0428 21:14:51.786489 12706 solver.cpp:237] Train net output #0: loss = 0.213295 (* 1 = 0.213295 loss)
I0428 21:14:51.786499 12706 sgd_solver.cpp:105] Iteration 5688, lr = 0.0033
I0428 21:14:56.857029 12706 solver.cpp:218] Iteration 5700 (2.36671 iter/s, 5.07032s/12 iters), loss = 0.204233
I0428 21:14:56.857069 12706 solver.cpp:237] Train net output #0: loss = 0.204233 (* 1 = 0.204233 loss)
I0428 21:14:56.857075 12706 sgd_solver.cpp:105] Iteration 5700, lr = 0.0033
I0428 21:15:01.379631 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_5712.caffemodel
I0428 21:15:05.117522 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_5712.solverstate
I0428 21:15:09.293097 12706 solver.cpp:330] Iteration 5712, Testing net (#0)
I0428 21:15:09.293118 12706 net.cpp:676] Ignoring source layer train-data
I0428 21:15:11.400288 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:15:13.671192 12706 solver.cpp:397] Test net output #0: accuracy = 0.436887
I0428 21:15:13.671316 12706 solver.cpp:397] Test net output #1: loss = 3.00527 (* 1 = 3.00527 loss)
I0428 21:15:13.762639 12706 solver.cpp:218] Iteration 5712 (0.709855 iter/s, 16.9049s/12 iters), loss = 0.321613
I0428 21:15:13.762676 12706 solver.cpp:237] Train net output #0: loss = 0.321613 (* 1 = 0.321613 loss)
I0428 21:15:13.762682 12706 sgd_solver.cpp:105] Iteration 5712, lr = 0.0033
I0428 21:15:17.886137 12706 solver.cpp:218] Iteration 5724 (2.91031 iter/s, 4.12327s/12 iters), loss = 0.36981
I0428 21:15:17.886189 12706 solver.cpp:237] Train net output #0: loss = 0.36981 (* 1 = 0.36981 loss)
I0428 21:15:17.886199 12706 sgd_solver.cpp:105] Iteration 5724, lr = 0.0033
I0428 21:15:22.880530 12706 solver.cpp:218] Iteration 5736 (2.40283 iter/s, 4.99411s/12 iters), loss = 0.258614
I0428 21:15:22.880573 12706 solver.cpp:237] Train net output #0: loss = 0.258614 (* 1 = 0.258614 loss)
I0428 21:15:22.880579 12706 sgd_solver.cpp:105] Iteration 5736, lr = 0.0033
I0428 21:15:27.923768 12706 solver.cpp:218] Iteration 5748 (2.37955 iter/s, 5.04297s/12 iters), loss = 0.232987
I0428 21:15:27.923820 12706 solver.cpp:237] Train net output #0: loss = 0.232987 (* 1 = 0.232987 loss)
I0428 21:15:27.923832 12706 sgd_solver.cpp:105] Iteration 5748, lr = 0.0033
I0428 21:15:33.040040 12706 solver.cpp:218] Iteration 5760 (2.34558 iter/s, 5.116s/12 iters), loss = 0.251065
I0428 21:15:33.040091 12706 solver.cpp:237] Train net output #0: loss = 0.251065 (* 1 = 0.251065 loss)
I0428 21:15:33.040099 12706 sgd_solver.cpp:105] Iteration 5760, lr = 0.0033
I0428 21:15:35.063468 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:15:38.191030 12706 solver.cpp:218] Iteration 5772 (2.32977 iter/s, 5.15072s/12 iters), loss = 0.231317
I0428 21:15:38.191068 12706 solver.cpp:237] Train net output #0: loss = 0.231317 (* 1 = 0.231317 loss)
I0428 21:15:38.191076 12706 sgd_solver.cpp:105] Iteration 5772, lr = 0.0033
I0428 21:15:43.458853 12706 solver.cpp:218] Iteration 5784 (2.2781 iter/s, 5.26756s/12 iters), loss = 0.295089
I0428 21:15:43.458889 12706 solver.cpp:237] Train net output #0: loss = 0.295089 (* 1 = 0.295089 loss)
I0428 21:15:43.458895 12706 sgd_solver.cpp:105] Iteration 5784, lr = 0.0033
I0428 21:15:48.557463 12706 solver.cpp:218] Iteration 5796 (2.3537 iter/s, 5.09835s/12 iters), loss = 0.193089
I0428 21:15:48.557601 12706 solver.cpp:237] Train net output #0: loss = 0.193089 (* 1 = 0.193089 loss)
I0428 21:15:48.557610 12706 sgd_solver.cpp:105] Iteration 5796, lr = 0.0033
I0428 21:15:53.635798 12706 solver.cpp:218] Iteration 5808 (2.36315 iter/s, 5.07798s/12 iters), loss = 0.214345
I0428 21:15:53.635838 12706 solver.cpp:237] Train net output #0: loss = 0.214345 (* 1 = 0.214345 loss)
I0428 21:15:53.635846 12706 sgd_solver.cpp:105] Iteration 5808, lr = 0.0033
I0428 21:15:55.672925 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_5814.caffemodel
I0428 21:15:58.626700 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_5814.solverstate
I0428 21:16:00.951581 12706 solver.cpp:330] Iteration 5814, Testing net (#0)
I0428 21:16:00.951608 12706 net.cpp:676] Ignoring source layer train-data
I0428 21:16:03.153848 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:16:05.461941 12706 solver.cpp:397] Test net output #0: accuracy = 0.44424
I0428 21:16:05.461977 12706 solver.cpp:397] Test net output #1: loss = 2.8615 (* 1 = 2.8615 loss)
I0428 21:16:07.266494 12706 solver.cpp:218] Iteration 5820 (0.880405 iter/s, 13.6301s/12 iters), loss = 0.141211
I0428 21:16:07.266535 12706 solver.cpp:237] Train net output #0: loss = 0.141211 (* 1 = 0.141211 loss)
I0428 21:16:07.266542 12706 sgd_solver.cpp:105] Iteration 5820, lr = 0.0033
I0428 21:16:12.295357 12706 solver.cpp:218] Iteration 5832 (2.38635 iter/s, 5.02861s/12 iters), loss = 0.347434
I0428 21:16:12.295394 12706 solver.cpp:237] Train net output #0: loss = 0.347434 (* 1 = 0.347434 loss)
I0428 21:16:12.295403 12706 sgd_solver.cpp:105] Iteration 5832, lr = 0.0033
I0428 21:16:17.425215 12706 solver.cpp:218] Iteration 5844 (2.33937 iter/s, 5.12959s/12 iters), loss = 0.341905
I0428 21:16:17.425256 12706 solver.cpp:237] Train net output #0: loss = 0.341905 (* 1 = 0.341905 loss)
I0428 21:16:17.425262 12706 sgd_solver.cpp:105] Iteration 5844, lr = 0.0033
I0428 21:16:22.355746 12706 solver.cpp:218] Iteration 5856 (2.43394 iter/s, 4.93027s/12 iters), loss = 0.314005
I0428 21:16:22.355845 12706 solver.cpp:237] Train net output #0: loss = 0.314005 (* 1 = 0.314005 loss)
I0428 21:16:22.355854 12706 sgd_solver.cpp:105] Iteration 5856, lr = 0.0033
I0428 21:16:26.531855 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:16:27.338483 12706 solver.cpp:218] Iteration 5868 (2.40847 iter/s, 4.98242s/12 iters), loss = 0.24056
I0428 21:16:27.338531 12706 solver.cpp:237] Train net output #0: loss = 0.24056 (* 1 = 0.24056 loss)
I0428 21:16:27.338541 12706 sgd_solver.cpp:105] Iteration 5868, lr = 0.0033
I0428 21:16:32.343705 12706 solver.cpp:218] Iteration 5880 (2.39762 iter/s, 5.00496s/12 iters), loss = 0.141765
I0428 21:16:32.343745 12706 solver.cpp:237] Train net output #0: loss = 0.141765 (* 1 = 0.141765 loss)
I0428 21:16:32.343753 12706 sgd_solver.cpp:105] Iteration 5880, lr = 0.0033
I0428 21:16:37.347124 12706 solver.cpp:218] Iteration 5892 (2.39849 iter/s, 5.00316s/12 iters), loss = 0.256125
I0428 21:16:37.347177 12706 solver.cpp:237] Train net output #0: loss = 0.256125 (* 1 = 0.256125 loss)
I0428 21:16:37.347188 12706 sgd_solver.cpp:105] Iteration 5892, lr = 0.0033
I0428 21:16:42.329835 12706 solver.cpp:218] Iteration 5904 (2.40846 iter/s, 4.98244s/12 iters), loss = 0.292822
I0428 21:16:42.329888 12706 solver.cpp:237] Train net output #0: loss = 0.292821 (* 1 = 0.292821 loss)
I0428 21:16:42.329897 12706 sgd_solver.cpp:105] Iteration 5904, lr = 0.0033
I0428 21:16:46.938171 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_5916.caffemodel
I0428 21:16:49.999119 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_5916.solverstate
I0428 21:16:52.311547 12706 solver.cpp:330] Iteration 5916, Testing net (#0)
I0428 21:16:52.311566 12706 net.cpp:676] Ignoring source layer train-data
I0428 21:16:54.549556 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:16:57.057765 12706 solver.cpp:397] Test net output #0: accuracy = 0.447917
I0428 21:16:57.057802 12706 solver.cpp:397] Test net output #1: loss = 2.87957 (* 1 = 2.87957 loss)
I0428 21:16:57.148761 12706 solver.cpp:218] Iteration 5916 (0.809812 iter/s, 14.8182s/12 iters), loss = 0.200553
I0428 21:16:57.148823 12706 solver.cpp:237] Train net output #0: loss = 0.200553 (* 1 = 0.200553 loss)
I0428 21:16:57.148833 12706 sgd_solver.cpp:105] Iteration 5916, lr = 0.0033
I0428 21:17:01.313271 12706 solver.cpp:218] Iteration 5928 (2.88166 iter/s, 4.16427s/12 iters), loss = 0.256961
I0428 21:17:01.313310 12706 solver.cpp:237] Train net output #0: loss = 0.256961 (* 1 = 0.256961 loss)
I0428 21:17:01.313318 12706 sgd_solver.cpp:105] Iteration 5928, lr = 0.0033
I0428 21:17:06.372511 12706 solver.cpp:218] Iteration 5940 (2.37203 iter/s, 5.05895s/12 iters), loss = 0.33652
I0428 21:17:06.372563 12706 solver.cpp:237] Train net output #0: loss = 0.33652 (* 1 = 0.33652 loss)
I0428 21:17:06.372575 12706 sgd_solver.cpp:105] Iteration 5940, lr = 0.0033
I0428 21:17:11.397296 12706 solver.cpp:218] Iteration 5952 (2.38829 iter/s, 5.02452s/12 iters), loss = 0.230513
I0428 21:17:11.397336 12706 solver.cpp:237] Train net output #0: loss = 0.230513 (* 1 = 0.230513 loss)
I0428 21:17:11.397342 12706 sgd_solver.cpp:105] Iteration 5952, lr = 0.0033
I0428 21:17:16.395586 12706 solver.cpp:218] Iteration 5964 (2.40095 iter/s, 4.99803s/12 iters), loss = 0.283109
I0428 21:17:16.395625 12706 solver.cpp:237] Train net output #0: loss = 0.283109 (* 1 = 0.283109 loss)
I0428 21:17:16.395634 12706 sgd_solver.cpp:105] Iteration 5964, lr = 0.0033
I0428 21:17:17.696370 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:17:21.324944 12706 solver.cpp:218] Iteration 5976 (2.43452 iter/s, 4.92911s/12 iters), loss = 0.22751
I0428 21:17:21.324981 12706 solver.cpp:237] Train net output #0: loss = 0.227509 (* 1 = 0.227509 loss)
I0428 21:17:21.324988 12706 sgd_solver.cpp:105] Iteration 5976, lr = 0.0033
I0428 21:17:26.341182 12706 solver.cpp:218] Iteration 5988 (2.39235 iter/s, 5.01598s/12 iters), loss = 0.147982
I0428 21:17:26.341310 12706 solver.cpp:237] Train net output #0: loss = 0.147982 (* 1 = 0.147982 loss)
I0428 21:17:26.341318 12706 sgd_solver.cpp:105] Iteration 5988, lr = 0.0033
I0428 21:17:31.318143 12706 solver.cpp:218] Iteration 6000 (2.41128 iter/s, 4.97661s/12 iters), loss = 0.148961
I0428 21:17:31.318193 12706 solver.cpp:237] Train net output #0: loss = 0.148961 (* 1 = 0.148961 loss)
I0428 21:17:31.318203 12706 sgd_solver.cpp:105] Iteration 6000, lr = 0.0033
I0428 21:17:36.305603 12706 solver.cpp:218] Iteration 6012 (2.40616 iter/s, 4.98719s/12 iters), loss = 0.212099
I0428 21:17:36.305660 12706 solver.cpp:237] Train net output #0: loss = 0.212099 (* 1 = 0.212099 loss)
I0428 21:17:36.305672 12706 sgd_solver.cpp:105] Iteration 6012, lr = 0.0033
I0428 21:17:38.326282 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_6018.caffemodel
I0428 21:17:41.285583 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_6018.solverstate
I0428 21:17:44.648840 12706 solver.cpp:330] Iteration 6018, Testing net (#0)
I0428 21:17:44.648861 12706 net.cpp:676] Ignoring source layer train-data
I0428 21:17:46.676000 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:17:49.152734 12706 solver.cpp:397] Test net output #0: accuracy = 0.460172
I0428 21:17:49.152762 12706 solver.cpp:397] Test net output #1: loss = 2.82432 (* 1 = 2.82432 loss)
I0428 21:17:50.941113 12706 solver.cpp:218] Iteration 6024 (0.819961 iter/s, 14.6348s/12 iters), loss = 0.204117
I0428 21:17:50.941151 12706 solver.cpp:237] Train net output #0: loss = 0.204117 (* 1 = 0.204117 loss)
I0428 21:17:50.941159 12706 sgd_solver.cpp:105] Iteration 6024, lr = 0.0033
I0428 21:17:55.966558 12706 solver.cpp:218] Iteration 6036 (2.38797 iter/s, 5.02519s/12 iters), loss = 0.27167
I0428 21:17:55.966598 12706 solver.cpp:237] Train net output #0: loss = 0.27167 (* 1 = 0.27167 loss)
I0428 21:17:55.966605 12706 sgd_solver.cpp:105] Iteration 6036, lr = 0.0033
I0428 21:18:00.971177 12706 solver.cpp:218] Iteration 6048 (2.39791 iter/s, 5.00436s/12 iters), loss = 0.141974
I0428 21:18:00.971313 12706 solver.cpp:237] Train net output #0: loss = 0.141974 (* 1 = 0.141974 loss)
I0428 21:18:00.971323 12706 sgd_solver.cpp:105] Iteration 6048, lr = 0.0033
I0428 21:18:05.947316 12706 solver.cpp:218] Iteration 6060 (2.41168 iter/s, 4.97579s/12 iters), loss = 0.212944
I0428 21:18:05.947360 12706 solver.cpp:237] Train net output #0: loss = 0.212944 (* 1 = 0.212944 loss)
I0428 21:18:05.947367 12706 sgd_solver.cpp:105] Iteration 6060, lr = 0.0033
I0428 21:18:09.426573 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:18:10.964736 12706 solver.cpp:218] Iteration 6072 (2.39179 iter/s, 5.01716s/12 iters), loss = 0.174907
I0428 21:18:10.964776 12706 solver.cpp:237] Train net output #0: loss = 0.174907 (* 1 = 0.174907 loss)
I0428 21:18:10.964784 12706 sgd_solver.cpp:105] Iteration 6072, lr = 0.0033
I0428 21:18:15.934703 12706 solver.cpp:218] Iteration 6084 (2.41463 iter/s, 4.96971s/12 iters), loss = 0.199965
I0428 21:18:15.934744 12706 solver.cpp:237] Train net output #0: loss = 0.199965 (* 1 = 0.199965 loss)
I0428 21:18:15.934752 12706 sgd_solver.cpp:105] Iteration 6084, lr = 0.0033
I0428 21:18:20.887331 12706 solver.cpp:218] Iteration 6096 (2.42308 iter/s, 4.95236s/12 iters), loss = 0.213151
I0428 21:18:20.887377 12706 solver.cpp:237] Train net output #0: loss = 0.213151 (* 1 = 0.213151 loss)
I0428 21:18:20.887387 12706 sgd_solver.cpp:105] Iteration 6096, lr = 0.0033
I0428 21:18:25.848997 12706 solver.cpp:218] Iteration 6108 (2.41867 iter/s, 4.96141s/12 iters), loss = 0.143447
I0428 21:18:25.849041 12706 solver.cpp:237] Train net output #0: loss = 0.143447 (* 1 = 0.143447 loss)
I0428 21:18:25.849048 12706 sgd_solver.cpp:105] Iteration 6108, lr = 0.0033
I0428 21:18:30.388665 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_6120.caffemodel
I0428 21:18:35.737898 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_6120.solverstate
I0428 21:18:38.052934 12706 solver.cpp:330] Iteration 6120, Testing net (#0)
I0428 21:18:38.052955 12706 net.cpp:676] Ignoring source layer train-data
I0428 21:18:40.108263 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:18:42.779873 12706 solver.cpp:397] Test net output #0: accuracy = 0.446691
I0428 21:18:42.779918 12706 solver.cpp:397] Test net output #1: loss = 2.86967 (* 1 = 2.86967 loss)
I0428 21:18:42.871421 12706 solver.cpp:218] Iteration 6120 (0.704984 iter/s, 17.0217s/12 iters), loss = 0.165654
I0428 21:18:42.871484 12706 solver.cpp:237] Train net output #0: loss = 0.165654 (* 1 = 0.165654 loss)
I0428 21:18:42.871497 12706 sgd_solver.cpp:105] Iteration 6120, lr = 0.0033
I0428 21:18:47.067060 12706 solver.cpp:218] Iteration 6132 (2.86028 iter/s, 4.19539s/12 iters), loss = 0.105726
I0428 21:18:47.067116 12706 solver.cpp:237] Train net output #0: loss = 0.105726 (* 1 = 0.105726 loss)
I0428 21:18:47.067126 12706 sgd_solver.cpp:105] Iteration 6132, lr = 0.0033
I0428 21:18:52.013875 12706 solver.cpp:218] Iteration 6144 (2.42594 iter/s, 4.94654s/12 iters), loss = 0.273345
I0428 21:18:52.013926 12706 solver.cpp:237] Train net output #0: loss = 0.273345 (* 1 = 0.273345 loss)
I0428 21:18:52.013936 12706 sgd_solver.cpp:105] Iteration 6144, lr = 0.0033
I0428 21:18:56.969365 12706 solver.cpp:218] Iteration 6156 (2.42169 iter/s, 4.95522s/12 iters), loss = 0.232665
I0428 21:18:56.969408 12706 solver.cpp:237] Train net output #0: loss = 0.232665 (* 1 = 0.232665 loss)
I0428 21:18:56.969415 12706 sgd_solver.cpp:105] Iteration 6156, lr = 0.0033
I0428 21:19:01.945871 12706 solver.cpp:218] Iteration 6168 (2.41146 iter/s, 4.97624s/12 iters), loss = 0.223553
I0428 21:19:01.945924 12706 solver.cpp:237] Train net output #0: loss = 0.223553 (* 1 = 0.223553 loss)
I0428 21:19:01.945935 12706 sgd_solver.cpp:105] Iteration 6168, lr = 0.0033
I0428 21:19:02.546965 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:19:06.989848 12706 solver.cpp:218] Iteration 6180 (2.3792 iter/s, 5.04371s/12 iters), loss = 0.187733
I0428 21:19:06.991765 12706 solver.cpp:237] Train net output #0: loss = 0.187733 (* 1 = 0.187733 loss)
I0428 21:19:06.991775 12706 sgd_solver.cpp:105] Iteration 6180, lr = 0.0033
I0428 21:19:11.986459 12706 solver.cpp:218] Iteration 6192 (2.40265 iter/s, 4.99448s/12 iters), loss = 0.153974
I0428 21:19:11.986500 12706 solver.cpp:237] Train net output #0: loss = 0.153974 (* 1 = 0.153974 loss)
I0428 21:19:11.986508 12706 sgd_solver.cpp:105] Iteration 6192, lr = 0.0033
I0428 21:19:17.113467 12706 solver.cpp:218] Iteration 6204 (2.34067 iter/s, 5.12675s/12 iters), loss = 0.296913
I0428 21:19:17.113507 12706 solver.cpp:237] Train net output #0: loss = 0.296913 (* 1 = 0.296913 loss)
I0428 21:19:17.113515 12706 sgd_solver.cpp:105] Iteration 6204, lr = 0.0033
I0428 21:19:22.163889 12706 solver.cpp:218] Iteration 6216 (2.37616 iter/s, 5.05016s/12 iters), loss = 0.17147
I0428 21:19:22.163942 12706 solver.cpp:237] Train net output #0: loss = 0.17147 (* 1 = 0.17147 loss)
I0428 21:19:22.163954 12706 sgd_solver.cpp:105] Iteration 6216, lr = 0.0033
I0428 21:19:24.257460 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_6222.caffemodel
I0428 21:19:27.348740 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_6222.solverstate
I0428 21:19:29.727298 12706 solver.cpp:330] Iteration 6222, Testing net (#0)
I0428 21:19:29.727321 12706 net.cpp:676] Ignoring source layer train-data
I0428 21:19:31.670027 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:19:32.952039 12706 blocking_queue.cpp:49] Waiting for data
I0428 21:19:34.135499 12706 solver.cpp:397] Test net output #0: accuracy = 0.459559
I0428 21:19:34.135553 12706 solver.cpp:397] Test net output #1: loss = 2.93974 (* 1 = 2.93974 loss)
I0428 21:19:35.815304 12706 solver.cpp:218] Iteration 6228 (0.87907 iter/s, 13.6508s/12 iters), loss = 0.274425
I0428 21:19:35.815346 12706 solver.cpp:237] Train net output #0: loss = 0.274425 (* 1 = 0.274425 loss)
I0428 21:19:35.815353 12706 sgd_solver.cpp:105] Iteration 6228, lr = 0.0033
I0428 21:19:40.873488 12706 solver.cpp:218] Iteration 6240 (2.37251 iter/s, 5.05792s/12 iters), loss = 0.142005
I0428 21:19:40.873579 12706 solver.cpp:237] Train net output #0: loss = 0.142005 (* 1 = 0.142005 loss)
I0428 21:19:40.873587 12706 sgd_solver.cpp:105] Iteration 6240, lr = 0.0033
I0428 21:19:45.877990 12706 solver.cpp:218] Iteration 6252 (2.39799 iter/s, 5.00419s/12 iters), loss = 0.11542
I0428 21:19:45.878029 12706 solver.cpp:237] Train net output #0: loss = 0.11542 (* 1 = 0.11542 loss)
I0428 21:19:45.878038 12706 sgd_solver.cpp:105] Iteration 6252, lr = 0.0033
I0428 21:19:51.009603 12706 solver.cpp:218] Iteration 6264 (2.33857 iter/s, 5.13135s/12 iters), loss = 0.264816
I0428 21:19:51.009641 12706 solver.cpp:237] Train net output #0: loss = 0.264816 (* 1 = 0.264816 loss)
I0428 21:19:51.009649 12706 sgd_solver.cpp:105] Iteration 6264, lr = 0.0033
I0428 21:19:53.718880 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:19:55.992821 12706 solver.cpp:218] Iteration 6276 (2.40821 iter/s, 4.98296s/12 iters), loss = 0.06701
I0428 21:19:55.992871 12706 solver.cpp:237] Train net output #0: loss = 0.06701 (* 1 = 0.06701 loss)
I0428 21:19:55.992882 12706 sgd_solver.cpp:105] Iteration 6276, lr = 0.0033
I0428 21:20:00.964030 12706 solver.cpp:218] Iteration 6288 (2.41403 iter/s, 4.97094s/12 iters), loss = 0.204674
I0428 21:20:00.964066 12706 solver.cpp:237] Train net output #0: loss = 0.204674 (* 1 = 0.204674 loss)
I0428 21:20:00.964076 12706 sgd_solver.cpp:105] Iteration 6288, lr = 0.0033
I0428 21:20:06.252972 12706 solver.cpp:218] Iteration 6300 (2.269 iter/s, 5.28868s/12 iters), loss = 0.145467
I0428 21:20:06.253012 12706 solver.cpp:237] Train net output #0: loss = 0.145467 (* 1 = 0.145467 loss)
I0428 21:20:06.253021 12706 sgd_solver.cpp:105] Iteration 6300, lr = 0.0033
I0428 21:20:11.183816 12706 solver.cpp:218] Iteration 6312 (2.43378 iter/s, 4.93059s/12 iters), loss = 0.135477
I0428 21:20:11.183938 12706 solver.cpp:237] Train net output #0: loss = 0.135477 (* 1 = 0.135477 loss)
I0428 21:20:11.183948 12706 sgd_solver.cpp:105] Iteration 6312, lr = 0.0033
I0428 21:20:15.817842 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_6324.caffemodel
I0428 21:20:18.857158 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_6324.solverstate
I0428 21:20:21.993453 12706 solver.cpp:330] Iteration 6324, Testing net (#0)
I0428 21:20:21.993477 12706 net.cpp:676] Ignoring source layer train-data
I0428 21:20:23.983837 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:20:26.853768 12706 solver.cpp:397] Test net output #0: accuracy = 0.446078
I0428 21:20:26.853801 12706 solver.cpp:397] Test net output #1: loss = 2.97308 (* 1 = 2.97308 loss)
I0428 21:20:26.944844 12706 solver.cpp:218] Iteration 6324 (0.761409 iter/s, 15.7603s/12 iters), loss = 0.124652
I0428 21:20:26.944882 12706 solver.cpp:237] Train net output #0: loss = 0.124652 (* 1 = 0.124652 loss)
I0428 21:20:26.944890 12706 sgd_solver.cpp:105] Iteration 6324, lr = 0.0033
I0428 21:20:31.066226 12706 solver.cpp:218] Iteration 6336 (2.9118 iter/s, 4.12116s/12 iters), loss = 0.146827
I0428 21:20:31.066280 12706 solver.cpp:237] Train net output #0: loss = 0.146827 (* 1 = 0.146827 loss)
I0428 21:20:31.066293 12706 sgd_solver.cpp:105] Iteration 6336, lr = 0.0033
I0428 21:20:36.027798 12706 solver.cpp:218] Iteration 6348 (2.41872 iter/s, 4.9613s/12 iters), loss = 0.169666
I0428 21:20:36.027853 12706 solver.cpp:237] Train net output #0: loss = 0.169666 (* 1 = 0.169666 loss)
I0428 21:20:36.027864 12706 sgd_solver.cpp:105] Iteration 6348, lr = 0.0033
I0428 21:20:41.055276 12706 solver.cpp:218] Iteration 6360 (2.38701 iter/s, 5.0272s/12 iters), loss = 0.152908
I0428 21:20:41.055322 12706 solver.cpp:237] Train net output #0: loss = 0.152908 (* 1 = 0.152908 loss)
I0428 21:20:41.055332 12706 sgd_solver.cpp:105] Iteration 6360, lr = 0.0033
I0428 21:20:45.948781 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:20:46.090754 12706 solver.cpp:218] Iteration 6372 (2.38322 iter/s, 5.03521s/12 iters), loss = 0.165967
I0428 21:20:46.090795 12706 solver.cpp:237] Train net output #0: loss = 0.165967 (* 1 = 0.165967 loss)
I0428 21:20:46.090804 12706 sgd_solver.cpp:105] Iteration 6372, lr = 0.0033
I0428 21:20:51.043412 12706 solver.cpp:218] Iteration 6384 (2.42307 iter/s, 4.9524s/12 iters), loss = 0.159763
I0428 21:20:51.043453 12706 solver.cpp:237] Train net output #0: loss = 0.159763 (* 1 = 0.159763 loss)
I0428 21:20:51.043462 12706 sgd_solver.cpp:105] Iteration 6384, lr = 0.0033
I0428 21:20:55.991799 12706 solver.cpp:218] Iteration 6396 (2.42516 iter/s, 4.94813s/12 iters), loss = 0.224759
I0428 21:20:55.991837 12706 solver.cpp:237] Train net output #0: loss = 0.224759 (* 1 = 0.224759 loss)
I0428 21:20:55.991844 12706 sgd_solver.cpp:105] Iteration 6396, lr = 0.0033
I0428 21:21:00.962515 12706 solver.cpp:218] Iteration 6408 (2.41426 iter/s, 4.97046s/12 iters), loss = 0.20822
I0428 21:21:00.962555 12706 solver.cpp:237] Train net output #0: loss = 0.20822 (* 1 = 0.20822 loss)
I0428 21:21:00.962564 12706 sgd_solver.cpp:105] Iteration 6408, lr = 0.0033
I0428 21:21:05.945454 12706 solver.cpp:218] Iteration 6420 (2.40834 iter/s, 4.98269s/12 iters), loss = 0.138466
I0428 21:21:05.945490 12706 solver.cpp:237] Train net output #0: loss = 0.138466 (* 1 = 0.138466 loss)
I0428 21:21:05.945497 12706 sgd_solver.cpp:105] Iteration 6420, lr = 0.0033
I0428 21:21:07.946269 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_6426.caffemodel
I0428 21:21:13.157615 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_6426.solverstate
I0428 21:21:21.165716 12706 solver.cpp:330] Iteration 6426, Testing net (#0)
I0428 21:21:21.165787 12706 net.cpp:676] Ignoring source layer train-data
I0428 21:21:23.323930 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:21:26.114276 12706 solver.cpp:397] Test net output #0: accuracy = 0.454044
I0428 21:21:26.114306 12706 solver.cpp:397] Test net output #1: loss = 2.90021 (* 1 = 2.90021 loss)
I0428 21:21:27.906230 12706 solver.cpp:218] Iteration 6432 (0.546452 iter/s, 21.9598s/12 iters), loss = 0.148677
I0428 21:21:27.906282 12706 solver.cpp:237] Train net output #0: loss = 0.148677 (* 1 = 0.148677 loss)
I0428 21:21:27.906293 12706 sgd_solver.cpp:105] Iteration 6432, lr = 0.0033
I0428 21:21:32.902437 12706 solver.cpp:218] Iteration 6444 (2.40195 iter/s, 4.99594s/12 iters), loss = 0.411168
I0428 21:21:32.902478 12706 solver.cpp:237] Train net output #0: loss = 0.411168 (* 1 = 0.411168 loss)
I0428 21:21:32.902487 12706 sgd_solver.cpp:105] Iteration 6444, lr = 0.0033
I0428 21:21:37.896343 12706 solver.cpp:218] Iteration 6456 (2.40306 iter/s, 4.99364s/12 iters), loss = 0.165605
I0428 21:21:37.896411 12706 solver.cpp:237] Train net output #0: loss = 0.165605 (* 1 = 0.165605 loss)
I0428 21:21:37.896438 12706 sgd_solver.cpp:105] Iteration 6456, lr = 0.0033
I0428 21:21:43.075453 12706 solver.cpp:218] Iteration 6468 (2.31713 iter/s, 5.17882s/12 iters), loss = 0.176867
I0428 21:21:43.075493 12706 solver.cpp:237] Train net output #0: loss = 0.176867 (* 1 = 0.176867 loss)
I0428 21:21:43.075500 12706 sgd_solver.cpp:105] Iteration 6468, lr = 0.0033
I0428 21:21:45.063951 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:21:48.131263 12706 solver.cpp:218] Iteration 6480 (2.37363 iter/s, 5.05554s/12 iters), loss = 0.196547
I0428 21:21:48.131306 12706 solver.cpp:237] Train net output #0: loss = 0.196547 (* 1 = 0.196547 loss)
I0428 21:21:48.131317 12706 sgd_solver.cpp:105] Iteration 6480, lr = 0.0033
I0428 21:21:53.261080 12706 solver.cpp:218] Iteration 6492 (2.33938 iter/s, 5.12956s/12 iters), loss = 0.179797
I0428 21:21:53.263530 12706 solver.cpp:237] Train net output #0: loss = 0.179797 (* 1 = 0.179797 loss)
I0428 21:21:53.263538 12706 sgd_solver.cpp:105] Iteration 6492, lr = 0.0033
I0428 21:21:58.271822 12706 solver.cpp:218] Iteration 6504 (2.39613 iter/s, 5.00808s/12 iters), loss = 0.208338
I0428 21:21:58.271862 12706 solver.cpp:237] Train net output #0: loss = 0.208338 (* 1 = 0.208338 loss)
I0428 21:21:58.271868 12706 sgd_solver.cpp:105] Iteration 6504, lr = 0.0033
I0428 21:22:03.236922 12706 solver.cpp:218] Iteration 6516 (2.41699 iter/s, 4.96485s/12 iters), loss = 0.127007
I0428 21:22:03.236960 12706 solver.cpp:237] Train net output #0: loss = 0.127007 (* 1 = 0.127007 loss)
I0428 21:22:03.236968 12706 sgd_solver.cpp:105] Iteration 6516, lr = 0.0033
I0428 21:22:07.781323 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_6528.caffemodel
I0428 21:22:13.961561 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_6528.solverstate
I0428 21:22:16.659251 12706 solver.cpp:330] Iteration 6528, Testing net (#0)
I0428 21:22:16.659271 12706 net.cpp:676] Ignoring source layer train-data
I0428 21:22:18.468941 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:22:21.054086 12706 solver.cpp:397] Test net output #0: accuracy = 0.454657
I0428 21:22:21.054122 12706 solver.cpp:397] Test net output #1: loss = 2.85505 (* 1 = 2.85505 loss)
I0428 21:22:21.145613 12706 solver.cpp:218] Iteration 6528 (0.670095 iter/s, 17.9079s/12 iters), loss = 0.168046
I0428 21:22:21.145655 12706 solver.cpp:237] Train net output #0: loss = 0.168046 (* 1 = 0.168046 loss)
I0428 21:22:21.145664 12706 sgd_solver.cpp:105] Iteration 6528, lr = 0.0033
I0428 21:22:25.235383 12706 solver.cpp:218] Iteration 6540 (2.93431 iter/s, 4.08955s/12 iters), loss = 0.1898
I0428 21:22:25.236599 12706 solver.cpp:237] Train net output #0: loss = 0.1898 (* 1 = 0.1898 loss)
I0428 21:22:25.236608 12706 sgd_solver.cpp:105] Iteration 6540, lr = 0.0033
I0428 21:22:30.259176 12706 solver.cpp:218] Iteration 6552 (2.38931 iter/s, 5.02236s/12 iters), loss = 0.326923
I0428 21:22:30.259213 12706 solver.cpp:237] Train net output #0: loss = 0.326923 (* 1 = 0.326923 loss)
I0428 21:22:30.259222 12706 sgd_solver.cpp:105] Iteration 6552, lr = 0.0033
I0428 21:22:35.245834 12706 solver.cpp:218] Iteration 6564 (2.40654 iter/s, 4.9864s/12 iters), loss = 0.0819587
I0428 21:22:35.245873 12706 solver.cpp:237] Train net output #0: loss = 0.0819587 (* 1 = 0.0819587 loss)
I0428 21:22:35.245882 12706 sgd_solver.cpp:105] Iteration 6564, lr = 0.0033
I0428 21:22:39.403880 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:22:40.186054 12706 solver.cpp:218] Iteration 6576 (2.42917 iter/s, 4.93996s/12 iters), loss = 0.15258
I0428 21:22:40.186092 12706 solver.cpp:237] Train net output #0: loss = 0.15258 (* 1 = 0.15258 loss)
I0428 21:22:40.186100 12706 sgd_solver.cpp:105] Iteration 6576, lr = 0.0033
I0428 21:22:45.257149 12706 solver.cpp:218] Iteration 6588 (2.36647 iter/s, 5.07083s/12 iters), loss = 0.134167
I0428 21:22:45.257191 12706 solver.cpp:237] Train net output #0: loss = 0.134167 (* 1 = 0.134167 loss)
I0428 21:22:45.257198 12706 sgd_solver.cpp:105] Iteration 6588, lr = 0.0033
I0428 21:22:50.279367 12706 solver.cpp:218] Iteration 6600 (2.38951 iter/s, 5.02196s/12 iters), loss = 0.122801
I0428 21:22:50.279407 12706 solver.cpp:237] Train net output #0: loss = 0.122801 (* 1 = 0.122801 loss)
I0428 21:22:50.279414 12706 sgd_solver.cpp:105] Iteration 6600, lr = 0.0033
I0428 21:22:55.266422 12706 solver.cpp:218] Iteration 6612 (2.40636 iter/s, 4.98679s/12 iters), loss = 0.191176
I0428 21:22:55.266582 12706 solver.cpp:237] Train net output #0: loss = 0.191176 (* 1 = 0.191176 loss)
I0428 21:22:55.266594 12706 sgd_solver.cpp:105] Iteration 6612, lr = 0.0033
I0428 21:23:00.421974 12706 solver.cpp:218] Iteration 6624 (2.32776 iter/s, 5.15518s/12 iters), loss = 0.239542
I0428 21:23:00.422011 12706 solver.cpp:237] Train net output #0: loss = 0.239542 (* 1 = 0.239542 loss)
I0428 21:23:00.422020 12706 sgd_solver.cpp:105] Iteration 6624, lr = 0.0033
I0428 21:23:02.440032 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_6630.caffemodel
I0428 21:23:08.666702 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_6630.solverstate
I0428 21:23:10.988960 12706 solver.cpp:330] Iteration 6630, Testing net (#0)
I0428 21:23:10.988979 12706 net.cpp:676] Ignoring source layer train-data
I0428 21:23:12.826414 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:23:15.538409 12706 solver.cpp:397] Test net output #0: accuracy = 0.438726
I0428 21:23:15.538441 12706 solver.cpp:397] Test net output #1: loss = 2.92445 (* 1 = 2.92445 loss)
I0428 21:23:17.305989 12706 solver.cpp:218] Iteration 6636 (0.710762 iter/s, 16.8833s/12 iters), loss = 0.332828
I0428 21:23:17.306025 12706 solver.cpp:237] Train net output #0: loss = 0.332828 (* 1 = 0.332828 loss)
I0428 21:23:17.306032 12706 sgd_solver.cpp:105] Iteration 6636, lr = 0.0033
I0428 21:23:22.264781 12706 solver.cpp:218] Iteration 6648 (2.42007 iter/s, 4.95854s/12 iters), loss = 0.155939
I0428 21:23:22.264822 12706 solver.cpp:237] Train net output #0: loss = 0.155939 (* 1 = 0.155939 loss)
I0428 21:23:22.264828 12706 sgd_solver.cpp:105] Iteration 6648, lr = 0.0033
I0428 21:23:27.211072 12706 solver.cpp:218] Iteration 6660 (2.42619 iter/s, 4.94604s/12 iters), loss = 0.206227
I0428 21:23:27.211177 12706 solver.cpp:237] Train net output #0: loss = 0.206227 (* 1 = 0.206227 loss)
I0428 21:23:27.211186 12706 sgd_solver.cpp:105] Iteration 6660, lr = 0.0033
I0428 21:23:32.241943 12706 solver.cpp:218] Iteration 6672 (2.38543 iter/s, 5.03054s/12 iters), loss = 0.0905216
I0428 21:23:32.241997 12706 solver.cpp:237] Train net output #0: loss = 0.0905216 (* 1 = 0.0905216 loss)
I0428 21:23:32.242008 12706 sgd_solver.cpp:105] Iteration 6672, lr = 0.0033
I0428 21:23:33.572222 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:23:37.138092 12706 solver.cpp:218] Iteration 6684 (2.45104 iter/s, 4.89589s/12 iters), loss = 0.195958
I0428 21:23:37.138131 12706 solver.cpp:237] Train net output #0: loss = 0.195958 (* 1 = 0.195958 loss)
I0428 21:23:37.138139 12706 sgd_solver.cpp:105] Iteration 6684, lr = 0.0033
I0428 21:23:42.131364 12706 solver.cpp:218] Iteration 6696 (2.40336 iter/s, 4.99301s/12 iters), loss = 0.168336
I0428 21:23:42.131403 12706 solver.cpp:237] Train net output #0: loss = 0.168336 (* 1 = 0.168336 loss)
I0428 21:23:42.131412 12706 sgd_solver.cpp:105] Iteration 6696, lr = 0.0033
I0428 21:23:47.111541 12706 solver.cpp:218] Iteration 6708 (2.40968 iter/s, 4.97992s/12 iters), loss = 0.327894
I0428 21:23:47.111590 12706 solver.cpp:237] Train net output #0: loss = 0.327894 (* 1 = 0.327894 loss)
I0428 21:23:47.111601 12706 sgd_solver.cpp:105] Iteration 6708, lr = 0.0033
I0428 21:23:52.144258 12706 solver.cpp:218] Iteration 6720 (2.38453 iter/s, 5.03245s/12 iters), loss = 0.142411
I0428 21:23:52.144299 12706 solver.cpp:237] Train net output #0: loss = 0.142411 (* 1 = 0.142411 loss)
I0428 21:23:52.144307 12706 sgd_solver.cpp:105] Iteration 6720, lr = 0.0033
I0428 21:23:56.685684 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_6732.caffemodel
I0428 21:24:01.098367 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_6732.solverstate
I0428 21:24:05.027812 12706 solver.cpp:330] Iteration 6732, Testing net (#0)
I0428 21:24:05.027837 12706 net.cpp:676] Ignoring source layer train-data
I0428 21:24:06.932188 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:24:09.627202 12706 solver.cpp:397] Test net output #0: accuracy = 0.459559
I0428 21:24:09.627233 12706 solver.cpp:397] Test net output #1: loss = 2.9489 (* 1 = 2.9489 loss)
I0428 21:24:09.718204 12706 solver.cpp:218] Iteration 6732 (0.682859 iter/s, 17.5732s/12 iters), loss = 0.133949
I0428 21:24:09.718250 12706 solver.cpp:237] Train net output #0: loss = 0.133949 (* 1 = 0.133949 loss)
I0428 21:24:09.718258 12706 sgd_solver.cpp:105] Iteration 6732, lr = 0.001089
I0428 21:24:13.908012 12706 solver.cpp:218] Iteration 6744 (2.86425 iter/s, 4.18958s/12 iters), loss = 0.169843
I0428 21:24:13.908051 12706 solver.cpp:237] Train net output #0: loss = 0.169843 (* 1 = 0.169843 loss)
I0428 21:24:13.908058 12706 sgd_solver.cpp:105] Iteration 6744, lr = 0.001089
I0428 21:24:18.942559 12706 solver.cpp:218] Iteration 6756 (2.38365 iter/s, 5.03429s/12 iters), loss = 0.132038
I0428 21:24:18.942598 12706 solver.cpp:237] Train net output #0: loss = 0.132038 (* 1 = 0.132038 loss)
I0428 21:24:18.942607 12706 sgd_solver.cpp:105] Iteration 6756, lr = 0.001089
I0428 21:24:24.049487 12706 solver.cpp:218] Iteration 6768 (2.34987 iter/s, 5.10667s/12 iters), loss = 0.264296
I0428 21:24:24.049521 12706 solver.cpp:237] Train net output #0: loss = 0.264296 (* 1 = 0.264296 loss)
I0428 21:24:24.049530 12706 sgd_solver.cpp:105] Iteration 6768, lr = 0.001089
I0428 21:24:27.533114 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:24:29.051950 12706 solver.cpp:218] Iteration 6780 (2.39894 iter/s, 5.00221s/12 iters), loss = 0.167798
I0428 21:24:29.052001 12706 solver.cpp:237] Train net output #0: loss = 0.167798 (* 1 = 0.167798 loss)
I0428 21:24:29.052011 12706 sgd_solver.cpp:105] Iteration 6780, lr = 0.001089
I0428 21:24:33.984815 12706 solver.cpp:218] Iteration 6792 (2.43279 iter/s, 4.9326s/12 iters), loss = 0.162229
I0428 21:24:33.984915 12706 solver.cpp:237] Train net output #0: loss = 0.162229 (* 1 = 0.162229 loss)
I0428 21:24:33.984922 12706 sgd_solver.cpp:105] Iteration 6792, lr = 0.001089
I0428 21:24:39.016852 12706 solver.cpp:218] Iteration 6804 (2.38487 iter/s, 5.03172s/12 iters), loss = 0.177937
I0428 21:24:39.016893 12706 solver.cpp:237] Train net output #0: loss = 0.177937 (* 1 = 0.177937 loss)
I0428 21:24:39.016901 12706 sgd_solver.cpp:105] Iteration 6804, lr = 0.001089
I0428 21:24:44.040823 12706 solver.cpp:218] Iteration 6816 (2.38867 iter/s, 5.02371s/12 iters), loss = 0.119158
I0428 21:24:44.040868 12706 solver.cpp:237] Train net output #0: loss = 0.119158 (* 1 = 0.119158 loss)
I0428 21:24:44.040877 12706 sgd_solver.cpp:105] Iteration 6816, lr = 0.001089
I0428 21:24:49.066766 12706 solver.cpp:218] Iteration 6828 (2.38774 iter/s, 5.02568s/12 iters), loss = 0.102073
I0428 21:24:49.066807 12706 solver.cpp:237] Train net output #0: loss = 0.102073 (* 1 = 0.102073 loss)
I0428 21:24:49.066814 12706 sgd_solver.cpp:105] Iteration 6828, lr = 0.001089
I0428 21:24:51.135586 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_6834.caffemodel
I0428 21:24:54.453385 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_6834.solverstate
I0428 21:24:56.815773 12706 solver.cpp:330] Iteration 6834, Testing net (#0)
I0428 21:24:56.815793 12706 net.cpp:676] Ignoring source layer train-data
I0428 21:24:58.680418 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:25:01.568814 12706 solver.cpp:397] Test net output #0: accuracy = 0.471814
I0428 21:25:01.568842 12706 solver.cpp:397] Test net output #1: loss = 2.81353 (* 1 = 2.81353 loss)
I0428 21:25:03.320609 12706 solver.cpp:218] Iteration 6840 (0.841916 iter/s, 14.2532s/12 iters), loss = 0.0810977
I0428 21:25:03.320650 12706 solver.cpp:237] Train net output #0: loss = 0.0810977 (* 1 = 0.0810977 loss)
I0428 21:25:03.320657 12706 sgd_solver.cpp:105] Iteration 6840, lr = 0.001089
I0428 21:25:08.294407 12706 solver.cpp:218] Iteration 6852 (2.41277 iter/s, 4.97354s/12 iters), loss = 0.124055
I0428 21:25:08.294529 12706 solver.cpp:237] Train net output #0: loss = 0.124055 (* 1 = 0.124055 loss)
I0428 21:25:08.294538 12706 sgd_solver.cpp:105] Iteration 6852, lr = 0.001089
I0428 21:25:13.286010 12706 solver.cpp:218] Iteration 6864 (2.4042 iter/s, 4.99127s/12 iters), loss = 0.087376
I0428 21:25:13.286047 12706 solver.cpp:237] Train net output #0: loss = 0.087376 (* 1 = 0.087376 loss)
I0428 21:25:13.286056 12706 sgd_solver.cpp:105] Iteration 6864, lr = 0.001089
I0428 21:25:18.452479 12706 solver.cpp:218] Iteration 6876 (2.32279 iter/s, 5.16619s/12 iters), loss = 0.161578
I0428 21:25:18.452553 12706 solver.cpp:237] Train net output #0: loss = 0.161578 (* 1 = 0.161578 loss)
I0428 21:25:18.452560 12706 sgd_solver.cpp:105] Iteration 6876, lr = 0.001089
I0428 21:25:19.103672 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:25:23.660854 12706 solver.cpp:218] Iteration 6888 (2.30411 iter/s, 5.20808s/12 iters), loss = 0.0896417
I0428 21:25:23.660897 12706 solver.cpp:237] Train net output #0: loss = 0.0896417 (* 1 = 0.0896417 loss)
I0428 21:25:23.660903 12706 sgd_solver.cpp:105] Iteration 6888, lr = 0.001089
I0428 21:25:28.818325 12706 solver.cpp:218] Iteration 6900 (2.32684 iter/s, 5.1572s/12 iters), loss = 0.0661293
I0428 21:25:28.818365 12706 solver.cpp:237] Train net output #0: loss = 0.0661293 (* 1 = 0.0661293 loss)
I0428 21:25:28.818374 12706 sgd_solver.cpp:105] Iteration 6900, lr = 0.001089
I0428 21:25:33.748229 12706 solver.cpp:218] Iteration 6912 (2.43425 iter/s, 4.92965s/12 iters), loss = 0.0661208
I0428 21:25:33.748268 12706 solver.cpp:237] Train net output #0: loss = 0.0661208 (* 1 = 0.0661208 loss)
I0428 21:25:33.748276 12706 sgd_solver.cpp:105] Iteration 6912, lr = 0.001089
I0428 21:25:38.894615 12706 solver.cpp:218] Iteration 6924 (2.33185 iter/s, 5.14613s/12 iters), loss = 0.148333
I0428 21:25:38.894706 12706 solver.cpp:237] Train net output #0: loss = 0.148333 (* 1 = 0.148333 loss)
I0428 21:25:38.894716 12706 sgd_solver.cpp:105] Iteration 6924, lr = 0.001089
I0428 21:25:43.331717 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_6936.caffemodel
I0428 21:25:46.333750 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_6936.solverstate
I0428 21:25:48.631532 12706 solver.cpp:330] Iteration 6936, Testing net (#0)
I0428 21:25:48.631552 12706 net.cpp:676] Ignoring source layer train-data
I0428 21:25:49.200278 12706 blocking_queue.cpp:49] Waiting for data
I0428 21:25:50.255888 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:25:53.005287 12706 solver.cpp:397] Test net output #0: accuracy = 0.469976
I0428 21:25:53.005319 12706 solver.cpp:397] Test net output #1: loss = 2.84868 (* 1 = 2.84868 loss)
I0428 21:25:53.096313 12706 solver.cpp:218] Iteration 6936 (0.84501 iter/s, 14.201s/12 iters), loss = 0.176958
I0428 21:25:53.096345 12706 solver.cpp:237] Train net output #0: loss = 0.176958 (* 1 = 0.176958 loss)
I0428 21:25:53.096354 12706 sgd_solver.cpp:105] Iteration 6936, lr = 0.001089
I0428 21:25:57.207904 12706 solver.cpp:218] Iteration 6948 (2.91873 iter/s, 4.11138s/12 iters), loss = 0.0850389
I0428 21:25:57.207943 12706 solver.cpp:237] Train net output #0: loss = 0.0850389 (* 1 = 0.0850389 loss)
I0428 21:25:57.207950 12706 sgd_solver.cpp:105] Iteration 6948, lr = 0.001089
I0428 21:26:02.285666 12706 solver.cpp:218] Iteration 6960 (2.36337 iter/s, 5.0775s/12 iters), loss = 0.0311
I0428 21:26:02.285715 12706 solver.cpp:237] Train net output #0: loss = 0.0311 (* 1 = 0.0311 loss)
I0428 21:26:02.285724 12706 sgd_solver.cpp:105] Iteration 6960, lr = 0.001089
I0428 21:26:07.245126 12706 solver.cpp:218] Iteration 6972 (2.41975 iter/s, 4.9592s/12 iters), loss = 0.0465024
I0428 21:26:07.245167 12706 solver.cpp:237] Train net output #0: loss = 0.0465024 (* 1 = 0.0465024 loss)
I0428 21:26:07.245173 12706 sgd_solver.cpp:105] Iteration 6972, lr = 0.001089
I0428 21:26:09.995080 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:26:12.253108 12706 solver.cpp:218] Iteration 6984 (2.3963 iter/s, 5.00773s/12 iters), loss = 0.0488531
I0428 21:26:12.253149 12706 solver.cpp:237] Train net output #0: loss = 0.0488531 (* 1 = 0.0488531 loss)
I0428 21:26:12.253157 12706 sgd_solver.cpp:105] Iteration 6984, lr = 0.001089
I0428 21:26:17.340847 12706 solver.cpp:218] Iteration 6996 (2.35874 iter/s, 5.08747s/12 iters), loss = 0.147202
I0428 21:26:17.340893 12706 solver.cpp:237] Train net output #0: loss = 0.147202 (* 1 = 0.147202 loss)
I0428 21:26:17.340904 12706 sgd_solver.cpp:105] Iteration 6996, lr = 0.001089
I0428 21:26:22.343328 12706 solver.cpp:218] Iteration 7008 (2.39893 iter/s, 5.00222s/12 iters), loss = 0.096167
I0428 21:26:22.343367 12706 solver.cpp:237] Train net output #0: loss = 0.096167 (* 1 = 0.096167 loss)
I0428 21:26:22.343375 12706 sgd_solver.cpp:105] Iteration 7008, lr = 0.001089
I0428 21:26:27.361549 12706 solver.cpp:218] Iteration 7020 (2.39141 iter/s, 5.01796s/12 iters), loss = 0.145965
I0428 21:26:27.361599 12706 solver.cpp:237] Train net output #0: loss = 0.145965 (* 1 = 0.145965 loss)
I0428 21:26:27.361610 12706 sgd_solver.cpp:105] Iteration 7020, lr = 0.001089
I0428 21:26:32.353802 12706 solver.cpp:218] Iteration 7032 (2.40385 iter/s, 4.99199s/12 iters), loss = 0.0655391
I0428 21:26:32.353842 12706 solver.cpp:237] Train net output #0: loss = 0.0655391 (* 1 = 0.0655391 loss)
I0428 21:26:32.353849 12706 sgd_solver.cpp:105] Iteration 7032, lr = 0.001089
I0428 21:26:34.363929 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_7038.caffemodel
I0428 21:26:39.656391 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_7038.solverstate
I0428 21:26:41.965142 12706 solver.cpp:330] Iteration 7038, Testing net (#0)
I0428 21:26:41.965248 12706 net.cpp:676] Ignoring source layer train-data
I0428 21:26:43.549845 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:26:46.329190 12706 solver.cpp:397] Test net output #0: accuracy = 0.478554
I0428 21:26:46.329231 12706 solver.cpp:397] Test net output #1: loss = 2.8101 (* 1 = 2.8101 loss)
I0428 21:26:48.133229 12706 solver.cpp:218] Iteration 7044 (0.760518 iter/s, 15.7787s/12 iters), loss = 0.0865784
I0428 21:26:48.133270 12706 solver.cpp:237] Train net output #0: loss = 0.0865785 (* 1 = 0.0865785 loss)
I0428 21:26:48.133278 12706 sgd_solver.cpp:105] Iteration 7044, lr = 0.001089
I0428 21:26:53.434082 12706 solver.cpp:218] Iteration 7056 (2.2639 iter/s, 5.30058s/12 iters), loss = 0.160413
I0428 21:26:53.434120 12706 solver.cpp:237] Train net output #0: loss = 0.160413 (* 1 = 0.160413 loss)
I0428 21:26:53.434128 12706 sgd_solver.cpp:105] Iteration 7056, lr = 0.001089
I0428 21:26:58.466985 12706 solver.cpp:218] Iteration 7068 (2.38443 iter/s, 5.03264s/12 iters), loss = 0.118695
I0428 21:26:58.467028 12706 solver.cpp:237] Train net output #0: loss = 0.118695 (* 1 = 0.118695 loss)
I0428 21:26:58.467038 12706 sgd_solver.cpp:105] Iteration 7068, lr = 0.001089
I0428 21:27:03.534669 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:27:03.643559 12706 solver.cpp:218] Iteration 7080 (2.31826 iter/s, 5.17631s/12 iters), loss = 0.176484
I0428 21:27:03.643600 12706 solver.cpp:237] Train net output #0: loss = 0.176484 (* 1 = 0.176484 loss)
I0428 21:27:03.643607 12706 sgd_solver.cpp:105] Iteration 7080, lr = 0.001089
I0428 21:27:08.602705 12706 solver.cpp:218] Iteration 7092 (2.4199 iter/s, 4.95889s/12 iters), loss = 0.0995807
I0428 21:27:08.602748 12706 solver.cpp:237] Train net output #0: loss = 0.0995807 (* 1 = 0.0995807 loss)
I0428 21:27:08.602756 12706 sgd_solver.cpp:105] Iteration 7092, lr = 0.001089
I0428 21:27:13.666699 12706 solver.cpp:218] Iteration 7104 (2.3698 iter/s, 5.06373s/12 iters), loss = 0.133243
I0428 21:27:13.666829 12706 solver.cpp:237] Train net output #0: loss = 0.133243 (* 1 = 0.133243 loss)
I0428 21:27:13.666838 12706 sgd_solver.cpp:105] Iteration 7104, lr = 0.001089
I0428 21:27:18.729176 12706 solver.cpp:218] Iteration 7116 (2.37054 iter/s, 5.06213s/12 iters), loss = 0.18423
I0428 21:27:18.729215 12706 solver.cpp:237] Train net output #0: loss = 0.18423 (* 1 = 0.18423 loss)
I0428 21:27:18.729223 12706 sgd_solver.cpp:105] Iteration 7116, lr = 0.001089
I0428 21:27:23.779968 12706 solver.cpp:218] Iteration 7128 (2.37599 iter/s, 5.05053s/12 iters), loss = 0.140864
I0428 21:27:23.780019 12706 solver.cpp:237] Train net output #0: loss = 0.140864 (* 1 = 0.140864 loss)
I0428 21:27:23.780030 12706 sgd_solver.cpp:105] Iteration 7128, lr = 0.001089
I0428 21:27:28.372160 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_7140.caffemodel
I0428 21:27:31.746001 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_7140.solverstate
I0428 21:27:34.074465 12706 solver.cpp:330] Iteration 7140, Testing net (#0)
I0428 21:27:34.074494 12706 net.cpp:676] Ignoring source layer train-data
I0428 21:27:35.617231 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:27:38.447572 12706 solver.cpp:397] Test net output #0: accuracy = 0.473039
I0428 21:27:38.447602 12706 solver.cpp:397] Test net output #1: loss = 2.87396 (* 1 = 2.87396 loss)
I0428 21:27:38.538790 12706 solver.cpp:218] Iteration 7140 (0.813109 iter/s, 14.7582s/12 iters), loss = 0.107736
I0428 21:27:38.538831 12706 solver.cpp:237] Train net output #0: loss = 0.107736 (* 1 = 0.107736 loss)
I0428 21:27:38.538839 12706 sgd_solver.cpp:105] Iteration 7140, lr = 0.001089
I0428 21:27:42.717031 12706 solver.cpp:218] Iteration 7152 (2.87218 iter/s, 4.17801s/12 iters), loss = 0.115206
I0428 21:27:42.717077 12706 solver.cpp:237] Train net output #0: loss = 0.115206 (* 1 = 0.115206 loss)
I0428 21:27:42.717087 12706 sgd_solver.cpp:105] Iteration 7152, lr = 0.001089
I0428 21:27:47.848961 12706 solver.cpp:218] Iteration 7164 (2.33842 iter/s, 5.13166s/12 iters), loss = 0.24577
I0428 21:27:47.849090 12706 solver.cpp:237] Train net output #0: loss = 0.24577 (* 1 = 0.24577 loss)
I0428 21:27:47.849099 12706 sgd_solver.cpp:105] Iteration 7164, lr = 0.001089
I0428 21:27:52.886638 12706 solver.cpp:218] Iteration 7176 (2.38221 iter/s, 5.03733s/12 iters), loss = 0.0694418
I0428 21:27:52.886677 12706 solver.cpp:237] Train net output #0: loss = 0.0694418 (* 1 = 0.0694418 loss)
I0428 21:27:52.886685 12706 sgd_solver.cpp:105] Iteration 7176, lr = 0.001089
I0428 21:27:54.986681 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:27:57.968674 12706 solver.cpp:218] Iteration 7188 (2.36138 iter/s, 5.08178s/12 iters), loss = 0.106903
I0428 21:27:57.968711 12706 solver.cpp:237] Train net output #0: loss = 0.106903 (* 1 = 0.106903 loss)
I0428 21:27:57.968720 12706 sgd_solver.cpp:105] Iteration 7188, lr = 0.001089
I0428 21:28:03.068342 12706 solver.cpp:218] Iteration 7200 (2.35321 iter/s, 5.09941s/12 iters), loss = 0.0513866
I0428 21:28:03.068383 12706 solver.cpp:237] Train net output #0: loss = 0.0513866 (* 1 = 0.0513866 loss)
I0428 21:28:03.068392 12706 sgd_solver.cpp:105] Iteration 7200, lr = 0.001089
I0428 21:28:08.107687 12706 solver.cpp:218] Iteration 7212 (2.38139 iter/s, 5.03908s/12 iters), loss = 0.148308
I0428 21:28:08.107728 12706 solver.cpp:237] Train net output #0: loss = 0.148308 (* 1 = 0.148308 loss)
I0428 21:28:08.107738 12706 sgd_solver.cpp:105] Iteration 7212, lr = 0.001089
I0428 21:28:13.172910 12706 solver.cpp:218] Iteration 7224 (2.36922 iter/s, 5.06496s/12 iters), loss = 0.139596
I0428 21:28:13.172950 12706 solver.cpp:237] Train net output #0: loss = 0.139596 (* 1 = 0.139596 loss)
I0428 21:28:13.172956 12706 sgd_solver.cpp:105] Iteration 7224, lr = 0.001089
I0428 21:28:18.232926 12706 solver.cpp:218] Iteration 7236 (2.37166 iter/s, 5.05975s/12 iters), loss = 0.11452
I0428 21:28:18.233098 12706 solver.cpp:237] Train net output #0: loss = 0.11452 (* 1 = 0.11452 loss)
I0428 21:28:18.233111 12706 sgd_solver.cpp:105] Iteration 7236, lr = 0.001089
I0428 21:28:20.230170 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_7242.caffemodel
I0428 21:28:23.203284 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_7242.solverstate
I0428 21:28:25.502740 12706 solver.cpp:330] Iteration 7242, Testing net (#0)
I0428 21:28:25.502760 12706 net.cpp:676] Ignoring source layer train-data
I0428 21:28:27.072733 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:28:29.934335 12706 solver.cpp:397] Test net output #0: accuracy = 0.469976
I0428 21:28:29.934371 12706 solver.cpp:397] Test net output #1: loss = 2.85504 (* 1 = 2.85504 loss)
I0428 21:28:31.724980 12706 solver.cpp:218] Iteration 7248 (0.88946 iter/s, 13.4913s/12 iters), loss = 0.0856156
I0428 21:28:31.725019 12706 solver.cpp:237] Train net output #0: loss = 0.0856156 (* 1 = 0.0856156 loss)
I0428 21:28:31.725028 12706 sgd_solver.cpp:105] Iteration 7248, lr = 0.001089
I0428 21:28:36.791332 12706 solver.cpp:218] Iteration 7260 (2.36869 iter/s, 5.06609s/12 iters), loss = 0.0613333
I0428 21:28:36.791383 12706 solver.cpp:237] Train net output #0: loss = 0.0613333 (* 1 = 0.0613333 loss)
I0428 21:28:36.791393 12706 sgd_solver.cpp:105] Iteration 7260, lr = 0.001089
I0428 21:28:41.793550 12706 solver.cpp:218] Iteration 7272 (2.39906 iter/s, 5.00195s/12 iters), loss = 0.22111
I0428 21:28:41.793591 12706 solver.cpp:237] Train net output #0: loss = 0.22111 (* 1 = 0.22111 loss)
I0428 21:28:41.793601 12706 sgd_solver.cpp:105] Iteration 7272, lr = 0.001089
I0428 21:28:46.080910 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:28:46.828384 12706 solver.cpp:218] Iteration 7284 (2.38352 iter/s, 5.03457s/12 iters), loss = 0.0577585
I0428 21:28:46.828431 12706 solver.cpp:237] Train net output #0: loss = 0.0577585 (* 1 = 0.0577585 loss)
I0428 21:28:46.828440 12706 sgd_solver.cpp:105] Iteration 7284, lr = 0.001089
I0428 21:28:51.811204 12706 solver.cpp:218] Iteration 7296 (2.4084 iter/s, 4.98256s/12 iters), loss = 0.086106
I0428 21:28:51.811311 12706 solver.cpp:237] Train net output #0: loss = 0.086106 (* 1 = 0.086106 loss)
I0428 21:28:51.811321 12706 sgd_solver.cpp:105] Iteration 7296, lr = 0.001089
I0428 21:28:56.844272 12706 solver.cpp:218] Iteration 7308 (2.38438 iter/s, 5.03275s/12 iters), loss = 0.152071
I0428 21:28:56.844310 12706 solver.cpp:237] Train net output #0: loss = 0.152071 (* 1 = 0.152071 loss)
I0428 21:28:56.844316 12706 sgd_solver.cpp:105] Iteration 7308, lr = 0.001089
I0428 21:29:01.786348 12706 solver.cpp:218] Iteration 7320 (2.42825 iter/s, 4.94183s/12 iters), loss = 0.0734508
I0428 21:29:01.786386 12706 solver.cpp:237] Train net output #0: loss = 0.0734508 (* 1 = 0.0734508 loss)
I0428 21:29:01.786394 12706 sgd_solver.cpp:105] Iteration 7320, lr = 0.001089
I0428 21:29:06.792673 12706 solver.cpp:218] Iteration 7332 (2.39709 iter/s, 5.00607s/12 iters), loss = 0.16602
I0428 21:29:06.792712 12706 solver.cpp:237] Train net output #0: loss = 0.16602 (* 1 = 0.16602 loss)
I0428 21:29:06.792721 12706 sgd_solver.cpp:105] Iteration 7332, lr = 0.001089
I0428 21:29:11.343580 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_7344.caffemodel
I0428 21:29:18.613646 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_7344.solverstate
I0428 21:29:20.932360 12706 solver.cpp:330] Iteration 7344, Testing net (#0)
I0428 21:29:20.932385 12706 net.cpp:676] Ignoring source layer train-data
I0428 21:29:22.420846 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:29:25.511711 12706 solver.cpp:397] Test net output #0: accuracy = 0.482843
I0428 21:29:25.511737 12706 solver.cpp:397] Test net output #1: loss = 2.85495 (* 1 = 2.85495 loss)
I0428 21:29:25.602736 12706 solver.cpp:218] Iteration 7344 (0.637984 iter/s, 18.8092s/12 iters), loss = 0.132574
I0428 21:29:25.602778 12706 solver.cpp:237] Train net output #0: loss = 0.132574 (* 1 = 0.132574 loss)
I0428 21:29:25.602787 12706 sgd_solver.cpp:105] Iteration 7344, lr = 0.001089
I0428 21:29:29.788211 12706 solver.cpp:218] Iteration 7356 (2.86721 iter/s, 4.18525s/12 iters), loss = 0.0938312
I0428 21:29:29.788249 12706 solver.cpp:237] Train net output #0: loss = 0.0938312 (* 1 = 0.0938312 loss)
I0428 21:29:29.788256 12706 sgd_solver.cpp:105] Iteration 7356, lr = 0.001089
I0428 21:29:34.810633 12706 solver.cpp:218] Iteration 7368 (2.38941 iter/s, 5.02216s/12 iters), loss = 0.0962281
I0428 21:29:34.810674 12706 solver.cpp:237] Train net output #0: loss = 0.0962281 (* 1 = 0.0962281 loss)
I0428 21:29:34.810683 12706 sgd_solver.cpp:105] Iteration 7368, lr = 0.001089
I0428 21:29:39.796314 12706 solver.cpp:218] Iteration 7380 (2.40702 iter/s, 4.98542s/12 iters), loss = 0.10614
I0428 21:29:39.796351 12706 solver.cpp:237] Train net output #0: loss = 0.10614 (* 1 = 0.10614 loss)
I0428 21:29:39.796358 12706 sgd_solver.cpp:105] Iteration 7380, lr = 0.001089
I0428 21:29:41.195143 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:29:44.807767 12706 solver.cpp:218] Iteration 7392 (2.39464 iter/s, 5.0112s/12 iters), loss = 0.132766
I0428 21:29:44.807806 12706 solver.cpp:237] Train net output #0: loss = 0.132766 (* 1 = 0.132766 loss)
I0428 21:29:44.807814 12706 sgd_solver.cpp:105] Iteration 7392, lr = 0.001089
I0428 21:29:49.783152 12706 solver.cpp:218] Iteration 7404 (2.412 iter/s, 4.97513s/12 iters), loss = 0.09385
I0428 21:29:49.783198 12706 solver.cpp:237] Train net output #0: loss = 0.09385 (* 1 = 0.09385 loss)
I0428 21:29:49.783208 12706 sgd_solver.cpp:105] Iteration 7404, lr = 0.001089
I0428 21:29:54.890656 12706 solver.cpp:218] Iteration 7416 (2.34961 iter/s, 5.10724s/12 iters), loss = 0.0466667
I0428 21:29:54.890765 12706 solver.cpp:237] Train net output #0: loss = 0.0466667 (* 1 = 0.0466667 loss)
I0428 21:29:54.890776 12706 sgd_solver.cpp:105] Iteration 7416, lr = 0.001089
I0428 21:29:59.922477 12706 solver.cpp:218] Iteration 7428 (2.38498 iter/s, 5.03149s/12 iters), loss = 0.131002
I0428 21:29:59.922528 12706 solver.cpp:237] Train net output #0: loss = 0.131002 (* 1 = 0.131002 loss)
I0428 21:29:59.922539 12706 sgd_solver.cpp:105] Iteration 7428, lr = 0.001089
I0428 21:30:05.213625 12706 solver.cpp:218] Iteration 7440 (2.26806 iter/s, 5.29087s/12 iters), loss = 0.0889192
I0428 21:30:05.213673 12706 solver.cpp:237] Train net output #0: loss = 0.0889192 (* 1 = 0.0889192 loss)
I0428 21:30:05.213683 12706 sgd_solver.cpp:105] Iteration 7440, lr = 0.001089
I0428 21:30:07.215687 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_7446.caffemodel
I0428 21:30:10.255283 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_7446.solverstate
I0428 21:30:15.473829 12706 solver.cpp:330] Iteration 7446, Testing net (#0)
I0428 21:30:15.473851 12706 net.cpp:676] Ignoring source layer train-data
I0428 21:30:16.967814 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:30:19.906641 12706 solver.cpp:397] Test net output #0: accuracy = 0.481005
I0428 21:30:19.906673 12706 solver.cpp:397] Test net output #1: loss = 2.86335 (* 1 = 2.86335 loss)
I0428 21:30:21.700925 12706 solver.cpp:218] Iteration 7452 (0.727865 iter/s, 16.4866s/12 iters), loss = 0.149104
I0428 21:30:21.700973 12706 solver.cpp:237] Train net output #0: loss = 0.149104 (* 1 = 0.149104 loss)
I0428 21:30:21.700984 12706 sgd_solver.cpp:105] Iteration 7452, lr = 0.001089
I0428 21:30:26.711850 12706 solver.cpp:218] Iteration 7464 (2.39489 iter/s, 5.01066s/12 iters), loss = 0.0895666
I0428 21:30:26.711977 12706 solver.cpp:237] Train net output #0: loss = 0.0895666 (* 1 = 0.0895666 loss)
I0428 21:30:26.711987 12706 sgd_solver.cpp:105] Iteration 7464, lr = 0.001089
I0428 21:30:31.714150 12706 solver.cpp:218] Iteration 7476 (2.39906 iter/s, 5.00196s/12 iters), loss = 0.085595
I0428 21:30:31.714195 12706 solver.cpp:237] Train net output #0: loss = 0.085595 (* 1 = 0.085595 loss)
I0428 21:30:31.714205 12706 sgd_solver.cpp:105] Iteration 7476, lr = 0.001089
I0428 21:30:35.218294 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:30:36.722391 12706 solver.cpp:218] Iteration 7488 (2.39618 iter/s, 5.00798s/12 iters), loss = 0.0908585
I0428 21:30:36.722442 12706 solver.cpp:237] Train net output #0: loss = 0.0908585 (* 1 = 0.0908585 loss)
I0428 21:30:36.722455 12706 sgd_solver.cpp:105] Iteration 7488, lr = 0.001089
I0428 21:30:41.690516 12706 solver.cpp:218] Iteration 7500 (2.41553 iter/s, 4.96786s/12 iters), loss = 0.13297
I0428 21:30:41.690554 12706 solver.cpp:237] Train net output #0: loss = 0.13297 (* 1 = 0.13297 loss)
I0428 21:30:41.690563 12706 sgd_solver.cpp:105] Iteration 7500, lr = 0.001089
I0428 21:30:46.684372 12706 solver.cpp:218] Iteration 7512 (2.40308 iter/s, 4.9936s/12 iters), loss = 0.0576721
I0428 21:30:46.684412 12706 solver.cpp:237] Train net output #0: loss = 0.0576721 (* 1 = 0.0576721 loss)
I0428 21:30:46.684420 12706 sgd_solver.cpp:105] Iteration 7512, lr = 0.001089
I0428 21:30:51.634842 12706 solver.cpp:218] Iteration 7524 (2.42414 iter/s, 4.95021s/12 iters), loss = 0.140544
I0428 21:30:51.634881 12706 solver.cpp:237] Train net output #0: loss = 0.140544 (* 1 = 0.140544 loss)
I0428 21:30:51.634889 12706 sgd_solver.cpp:105] Iteration 7524, lr = 0.001089
I0428 21:30:56.779167 12706 solver.cpp:218] Iteration 7536 (2.33279 iter/s, 5.14406s/12 iters), loss = 0.157699
I0428 21:30:56.779274 12706 solver.cpp:237] Train net output #0: loss = 0.157699 (* 1 = 0.157699 loss)
I0428 21:30:56.779284 12706 sgd_solver.cpp:105] Iteration 7536, lr = 0.001089
I0428 21:31:01.387565 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_7548.caffemodel
I0428 21:31:06.210667 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_7548.solverstate
I0428 21:31:09.824832 12706 solver.cpp:330] Iteration 7548, Testing net (#0)
I0428 21:31:09.824851 12706 net.cpp:676] Ignoring source layer train-data
I0428 21:31:11.245721 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:31:14.232126 12706 solver.cpp:397] Test net output #0: accuracy = 0.494485
I0428 21:31:14.232168 12706 solver.cpp:397] Test net output #1: loss = 2.86721 (* 1 = 2.86721 loss)
I0428 21:31:14.323555 12706 solver.cpp:218] Iteration 7548 (0.684012 iter/s, 17.5436s/12 iters), loss = 0.0546887
I0428 21:31:14.323596 12706 solver.cpp:237] Train net output #0: loss = 0.0546887 (* 1 = 0.0546887 loss)
I0428 21:31:14.323604 12706 sgd_solver.cpp:105] Iteration 7548, lr = 0.001089
I0428 21:31:18.479076 12706 solver.cpp:218] Iteration 7560 (2.88788 iter/s, 4.1553s/12 iters), loss = 0.111065
I0428 21:31:18.479122 12706 solver.cpp:237] Train net output #0: loss = 0.111065 (* 1 = 0.111065 loss)
I0428 21:31:18.479131 12706 sgd_solver.cpp:105] Iteration 7560, lr = 0.001089
I0428 21:31:23.614048 12706 solver.cpp:218] Iteration 7572 (2.33704 iter/s, 5.1347s/12 iters), loss = 0.0667364
I0428 21:31:23.614089 12706 solver.cpp:237] Train net output #0: loss = 0.0667364 (* 1 = 0.0667364 loss)
I0428 21:31:23.614096 12706 sgd_solver.cpp:105] Iteration 7572, lr = 0.001089
I0428 21:31:28.870750 12706 solver.cpp:218] Iteration 7584 (2.28292 iter/s, 5.25643s/12 iters), loss = 0.140243
I0428 21:31:28.871150 12706 solver.cpp:237] Train net output #0: loss = 0.140243 (* 1 = 0.140243 loss)
I0428 21:31:28.871161 12706 sgd_solver.cpp:105] Iteration 7584, lr = 0.001089
I0428 21:31:29.524019 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:31:33.890461 12706 solver.cpp:218] Iteration 7596 (2.39087 iter/s, 5.0191s/12 iters), loss = 0.0515186
I0428 21:31:33.890502 12706 solver.cpp:237] Train net output #0: loss = 0.0515186 (* 1 = 0.0515186 loss)
I0428 21:31:33.890511 12706 sgd_solver.cpp:105] Iteration 7596, lr = 0.001089
I0428 21:31:38.980368 12706 solver.cpp:218] Iteration 7608 (2.35773 iter/s, 5.08965s/12 iters), loss = 0.052067
I0428 21:31:38.980407 12706 solver.cpp:237] Train net output #0: loss = 0.052067 (* 1 = 0.052067 loss)
I0428 21:31:38.980415 12706 sgd_solver.cpp:105] Iteration 7608, lr = 0.001089
I0428 21:31:44.655496 12706 solver.cpp:218] Iteration 7620 (2.1146 iter/s, 5.67485s/12 iters), loss = 0.0677752
I0428 21:31:44.655539 12706 solver.cpp:237] Train net output #0: loss = 0.0677752 (* 1 = 0.0677752 loss)
I0428 21:31:44.655548 12706 sgd_solver.cpp:105] Iteration 7620, lr = 0.001089
I0428 21:31:47.257087 12706 blocking_queue.cpp:49] Waiting for data
I0428 21:31:49.863749 12706 solver.cpp:218] Iteration 7632 (2.30416 iter/s, 5.20798s/12 iters), loss = 0.0437375
I0428 21:31:49.863787 12706 solver.cpp:237] Train net output #0: loss = 0.0437375 (* 1 = 0.0437375 loss)
I0428 21:31:49.863795 12706 sgd_solver.cpp:105] Iteration 7632, lr = 0.001089
I0428 21:31:54.918133 12706 solver.cpp:218] Iteration 7644 (2.3743 iter/s, 5.05413s/12 iters), loss = 0.0732981
I0428 21:31:54.918165 12706 solver.cpp:237] Train net output #0: loss = 0.0732981 (* 1 = 0.0732981 loss)
I0428 21:31:54.918172 12706 sgd_solver.cpp:105] Iteration 7644, lr = 0.001089
I0428 21:31:56.975415 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_7650.caffemodel
I0428 21:32:00.020902 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_7650.solverstate
I0428 21:32:02.411976 12706 solver.cpp:330] Iteration 7650, Testing net (#0)
I0428 21:32:02.411996 12706 net.cpp:676] Ignoring source layer train-data
I0428 21:32:03.790983 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:32:06.842041 12706 solver.cpp:397] Test net output #0: accuracy = 0.477328
I0428 21:32:06.842069 12706 solver.cpp:397] Test net output #1: loss = 2.8937 (* 1 = 2.8937 loss)
I0428 21:32:08.645573 12706 solver.cpp:218] Iteration 7656 (0.8742 iter/s, 13.7268s/12 iters), loss = 0.135288
I0428 21:32:08.645617 12706 solver.cpp:237] Train net output #0: loss = 0.135288 (* 1 = 0.135288 loss)
I0428 21:32:08.645625 12706 sgd_solver.cpp:105] Iteration 7656, lr = 0.001089
I0428 21:32:13.545297 12706 solver.cpp:218] Iteration 7668 (2.44924 iter/s, 4.89947s/12 iters), loss = 0.0351948
I0428 21:32:13.545336 12706 solver.cpp:237] Train net output #0: loss = 0.0351948 (* 1 = 0.0351948 loss)
I0428 21:32:13.545346 12706 sgd_solver.cpp:105] Iteration 7668, lr = 0.001089
I0428 21:32:18.597990 12706 solver.cpp:218] Iteration 7680 (2.37509 iter/s, 5.05243s/12 iters), loss = 0.0391968
I0428 21:32:18.598031 12706 solver.cpp:237] Train net output #0: loss = 0.0391968 (* 1 = 0.0391968 loss)
I0428 21:32:18.598037 12706 sgd_solver.cpp:105] Iteration 7680, lr = 0.001089
I0428 21:32:21.386317 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:32:23.592999 12706 solver.cpp:218] Iteration 7692 (2.40253 iter/s, 4.99474s/12 iters), loss = 0.125753
I0428 21:32:23.593047 12706 solver.cpp:237] Train net output #0: loss = 0.125753 (* 1 = 0.125753 loss)
I0428 21:32:23.593060 12706 sgd_solver.cpp:105] Iteration 7692, lr = 0.001089
I0428 21:32:28.509491 12706 solver.cpp:218] Iteration 7704 (2.44089 iter/s, 4.91624s/12 iters), loss = 0.113133
I0428 21:32:28.509531 12706 solver.cpp:237] Train net output #0: loss = 0.113133 (* 1 = 0.113133 loss)
I0428 21:32:28.509541 12706 sgd_solver.cpp:105] Iteration 7704, lr = 0.001089
I0428 21:32:33.521869 12706 solver.cpp:218] Iteration 7716 (2.3942 iter/s, 5.01212s/12 iters), loss = 0.102394
I0428 21:32:33.522044 12706 solver.cpp:237] Train net output #0: loss = 0.102394 (* 1 = 0.102394 loss)
I0428 21:32:33.522055 12706 sgd_solver.cpp:105] Iteration 7716, lr = 0.001089
I0428 21:32:38.514322 12706 solver.cpp:218] Iteration 7728 (2.40381 iter/s, 4.99207s/12 iters), loss = 0.0698483
I0428 21:32:38.514360 12706 solver.cpp:237] Train net output #0: loss = 0.0698483 (* 1 = 0.0698483 loss)
I0428 21:32:38.514369 12706 sgd_solver.cpp:105] Iteration 7728, lr = 0.001089
I0428 21:32:43.521979 12706 solver.cpp:218] Iteration 7740 (2.39645 iter/s, 5.0074s/12 iters), loss = 0.051849
I0428 21:32:43.522019 12706 solver.cpp:237] Train net output #0: loss = 0.051849 (* 1 = 0.051849 loss)
I0428 21:32:43.522027 12706 sgd_solver.cpp:105] Iteration 7740, lr = 0.001089
I0428 21:32:48.066179 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_7752.caffemodel
I0428 21:32:52.380852 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_7752.solverstate
I0428 21:32:54.681468 12706 solver.cpp:330] Iteration 7752, Testing net (#0)
I0428 21:32:54.681490 12706 net.cpp:676] Ignoring source layer train-data
I0428 21:32:55.982801 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:32:59.052183 12706 solver.cpp:397] Test net output #0: accuracy = 0.481005
I0428 21:32:59.052219 12706 solver.cpp:397] Test net output #1: loss = 2.89554 (* 1 = 2.89554 loss)
I0428 21:32:59.143420 12706 solver.cpp:218] Iteration 7752 (0.768209 iter/s, 15.6207s/12 iters), loss = 0.133933
I0428 21:32:59.143476 12706 solver.cpp:237] Train net output #0: loss = 0.133933 (* 1 = 0.133933 loss)
I0428 21:32:59.143488 12706 sgd_solver.cpp:105] Iteration 7752, lr = 0.001089
I0428 21:33:03.323211 12706 solver.cpp:218] Iteration 7764 (2.87112 iter/s, 4.17955s/12 iters), loss = 0.0378148
I0428 21:33:03.323261 12706 solver.cpp:237] Train net output #0: loss = 0.0378148 (* 1 = 0.0378148 loss)
I0428 21:33:03.323271 12706 sgd_solver.cpp:105] Iteration 7764, lr = 0.001089
I0428 21:33:08.286967 12706 solver.cpp:218] Iteration 7776 (2.41765 iter/s, 4.96349s/12 iters), loss = 0.117968
I0428 21:33:08.287078 12706 solver.cpp:237] Train net output #0: loss = 0.117968 (* 1 = 0.117968 loss)
I0428 21:33:08.287087 12706 sgd_solver.cpp:105] Iteration 7776, lr = 0.001089
I0428 21:33:13.288412 12706 solver.cpp:218] Iteration 7788 (2.39946 iter/s, 5.00112s/12 iters), loss = 0.0739104
I0428 21:33:13.288453 12706 solver.cpp:237] Train net output #0: loss = 0.0739104 (* 1 = 0.0739104 loss)
I0428 21:33:13.288460 12706 sgd_solver.cpp:105] Iteration 7788, lr = 0.001089
I0428 21:33:13.295821 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:33:18.334368 12706 solver.cpp:218] Iteration 7800 (2.37826 iter/s, 5.0457s/12 iters), loss = 0.0571482
I0428 21:33:18.334401 12706 solver.cpp:237] Train net output #0: loss = 0.0571482 (* 1 = 0.0571482 loss)
I0428 21:33:18.334409 12706 sgd_solver.cpp:105] Iteration 7800, lr = 0.001089
I0428 21:33:23.381500 12706 solver.cpp:218] Iteration 7812 (2.37771 iter/s, 5.04688s/12 iters), loss = 0.0559218
I0428 21:33:23.381541 12706 solver.cpp:237] Train net output #0: loss = 0.0559218 (* 1 = 0.0559218 loss)
I0428 21:33:23.381548 12706 sgd_solver.cpp:105] Iteration 7812, lr = 0.001089
I0428 21:33:28.490041 12706 solver.cpp:218] Iteration 7824 (2.34913 iter/s, 5.10828s/12 iters), loss = 0.0723264
I0428 21:33:28.490078 12706 solver.cpp:237] Train net output #0: loss = 0.0723264 (* 1 = 0.0723264 loss)
I0428 21:33:28.490085 12706 sgd_solver.cpp:105] Iteration 7824, lr = 0.001089
I0428 21:33:33.549487 12706 solver.cpp:218] Iteration 7836 (2.37192 iter/s, 5.05919s/12 iters), loss = 0.0962648
I0428 21:33:33.549536 12706 solver.cpp:237] Train net output #0: loss = 0.0962648 (* 1 = 0.0962648 loss)
I0428 21:33:33.549547 12706 sgd_solver.cpp:105] Iteration 7836, lr = 0.001089
I0428 21:33:38.739768 12706 solver.cpp:218] Iteration 7848 (2.31214 iter/s, 5.19001s/12 iters), loss = 0.113258
I0428 21:33:38.739909 12706 solver.cpp:237] Train net output #0: loss = 0.113258 (* 1 = 0.113258 loss)
I0428 21:33:38.739918 12706 sgd_solver.cpp:105] Iteration 7848, lr = 0.001089
I0428 21:33:40.926877 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_7854.caffemodel
I0428 21:33:43.970643 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_7854.solverstate
I0428 21:33:46.317646 12706 solver.cpp:330] Iteration 7854, Testing net (#0)
I0428 21:33:46.317668 12706 net.cpp:676] Ignoring source layer train-data
I0428 21:33:47.681813 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:33:50.940349 12706 solver.cpp:397] Test net output #0: accuracy = 0.479779
I0428 21:33:50.940380 12706 solver.cpp:397] Test net output #1: loss = 2.88506 (* 1 = 2.88506 loss)
I0428 21:33:52.897267 12706 solver.cpp:218] Iteration 7860 (0.847651 iter/s, 14.1568s/12 iters), loss = 0.139455
I0428 21:33:52.897316 12706 solver.cpp:237] Train net output #0: loss = 0.139455 (* 1 = 0.139455 loss)
I0428 21:33:52.897327 12706 sgd_solver.cpp:105] Iteration 7860, lr = 0.001089
I0428 21:33:58.013765 12706 solver.cpp:218] Iteration 7872 (2.34548 iter/s, 5.11623s/12 iters), loss = 0.217478
I0428 21:33:58.013804 12706 solver.cpp:237] Train net output #0: loss = 0.217478 (* 1 = 0.217478 loss)
I0428 21:33:58.013813 12706 sgd_solver.cpp:105] Iteration 7872, lr = 0.001089
I0428 21:34:03.049480 12706 solver.cpp:218] Iteration 7884 (2.3831 iter/s, 5.03545s/12 iters), loss = 0.117509
I0428 21:34:03.049528 12706 solver.cpp:237] Train net output #0: loss = 0.117509 (* 1 = 0.117509 loss)
I0428 21:34:03.049537 12706 sgd_solver.cpp:105] Iteration 7884, lr = 0.001089
I0428 21:34:05.229287 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:34:08.071012 12706 solver.cpp:218] Iteration 7896 (2.38984 iter/s, 5.02126s/12 iters), loss = 0.101155
I0428 21:34:08.071064 12706 solver.cpp:237] Train net output #0: loss = 0.101155 (* 1 = 0.101155 loss)
I0428 21:34:08.071075 12706 sgd_solver.cpp:105] Iteration 7896, lr = 0.001089
I0428 21:34:13.080735 12706 solver.cpp:218] Iteration 7908 (2.39547 iter/s, 5.00946s/12 iters), loss = 0.0543697
I0428 21:34:13.081182 12706 solver.cpp:237] Train net output #0: loss = 0.0543697 (* 1 = 0.0543697 loss)
I0428 21:34:13.081214 12706 sgd_solver.cpp:105] Iteration 7908, lr = 0.001089
I0428 21:34:18.305749 12706 solver.cpp:218] Iteration 7920 (2.29694 iter/s, 5.22435s/12 iters), loss = 0.0758368
I0428 21:34:18.305783 12706 solver.cpp:237] Train net output #0: loss = 0.0758368 (* 1 = 0.0758368 loss)
I0428 21:34:18.305791 12706 sgd_solver.cpp:105] Iteration 7920, lr = 0.001089
I0428 21:34:23.441826 12706 solver.cpp:218] Iteration 7932 (2.33653 iter/s, 5.13582s/12 iters), loss = 0.0761899
I0428 21:34:23.441867 12706 solver.cpp:237] Train net output #0: loss = 0.0761899 (* 1 = 0.0761899 loss)
I0428 21:34:23.441874 12706 sgd_solver.cpp:105] Iteration 7932, lr = 0.001089
I0428 21:34:28.503142 12706 solver.cpp:218] Iteration 7944 (2.37105 iter/s, 5.06106s/12 iters), loss = 0.0948766
I0428 21:34:28.503183 12706 solver.cpp:237] Train net output #0: loss = 0.0948766 (* 1 = 0.0948766 loss)
I0428 21:34:28.503191 12706 sgd_solver.cpp:105] Iteration 7944, lr = 0.001089
I0428 21:34:33.031806 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_7956.caffemodel
I0428 21:34:36.180577 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_7956.solverstate
I0428 21:34:38.537869 12706 solver.cpp:330] Iteration 7956, Testing net (#0)
I0428 21:34:38.537889 12706 net.cpp:676] Ignoring source layer train-data
I0428 21:34:39.844776 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:34:43.180193 12706 solver.cpp:397] Test net output #0: accuracy = 0.487745
I0428 21:34:43.180344 12706 solver.cpp:397] Test net output #1: loss = 2.91863 (* 1 = 2.91863 loss)
I0428 21:34:43.271239 12706 solver.cpp:218] Iteration 7956 (0.812598 iter/s, 14.7674s/12 iters), loss = 0.0400579
I0428 21:34:43.271276 12706 solver.cpp:237] Train net output #0: loss = 0.0400579 (* 1 = 0.0400579 loss)
I0428 21:34:43.271284 12706 sgd_solver.cpp:105] Iteration 7956, lr = 0.001089
I0428 21:34:47.453526 12706 solver.cpp:218] Iteration 7968 (2.8694 iter/s, 4.18207s/12 iters), loss = 0.0737687
I0428 21:34:47.453568 12706 solver.cpp:237] Train net output #0: loss = 0.0737687 (* 1 = 0.0737687 loss)
I0428 21:34:47.453577 12706 sgd_solver.cpp:105] Iteration 7968, lr = 0.001089
I0428 21:34:52.482437 12706 solver.cpp:218] Iteration 7980 (2.38633 iter/s, 5.02865s/12 iters), loss = 0.0306791
I0428 21:34:52.482481 12706 solver.cpp:237] Train net output #0: loss = 0.0306792 (* 1 = 0.0306792 loss)
I0428 21:34:52.482491 12706 sgd_solver.cpp:105] Iteration 7980, lr = 0.001089
I0428 21:34:56.828351 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:34:57.538671 12706 solver.cpp:218] Iteration 7992 (2.37343 iter/s, 5.05597s/12 iters), loss = 0.0892637
I0428 21:34:57.538712 12706 solver.cpp:237] Train net output #0: loss = 0.0892637 (* 1 = 0.0892637 loss)
I0428 21:34:57.538720 12706 sgd_solver.cpp:105] Iteration 7992, lr = 0.001089
I0428 21:35:02.653101 12706 solver.cpp:218] Iteration 8004 (2.34642 iter/s, 5.11417s/12 iters), loss = 0.0766483
I0428 21:35:02.653148 12706 solver.cpp:237] Train net output #0: loss = 0.0766483 (* 1 = 0.0766483 loss)
I0428 21:35:02.653159 12706 sgd_solver.cpp:105] Iteration 8004, lr = 0.001089
I0428 21:35:08.036345 12706 solver.cpp:218] Iteration 8016 (2.22925 iter/s, 5.38297s/12 iters), loss = 0.115489
I0428 21:35:08.036386 12706 solver.cpp:237] Train net output #0: loss = 0.115489 (* 1 = 0.115489 loss)
I0428 21:35:08.036393 12706 sgd_solver.cpp:105] Iteration 8016, lr = 0.001089
I0428 21:35:13.136243 12706 solver.cpp:218] Iteration 8028 (2.35311 iter/s, 5.09964s/12 iters), loss = 0.085488
I0428 21:35:13.136284 12706 solver.cpp:237] Train net output #0: loss = 0.085488 (* 1 = 0.085488 loss)
I0428 21:35:13.136292 12706 sgd_solver.cpp:105] Iteration 8028, lr = 0.001089
I0428 21:35:18.312842 12706 solver.cpp:218] Iteration 8040 (2.31824 iter/s, 5.17633s/12 iters), loss = 0.0983743
I0428 21:35:18.312937 12706 solver.cpp:237] Train net output #0: loss = 0.0983743 (* 1 = 0.0983743 loss)
I0428 21:35:18.312945 12706 sgd_solver.cpp:105] Iteration 8040, lr = 0.001089
I0428 21:35:23.303890 12706 solver.cpp:218] Iteration 8052 (2.40445 iter/s, 4.99074s/12 iters), loss = 0.106418
I0428 21:35:23.303930 12706 solver.cpp:237] Train net output #0: loss = 0.106418 (* 1 = 0.106418 loss)
I0428 21:35:23.303937 12706 sgd_solver.cpp:105] Iteration 8052, lr = 0.001089
I0428 21:35:25.324975 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_8058.caffemodel
I0428 21:35:28.356228 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_8058.solverstate
I0428 21:35:30.688308 12706 solver.cpp:330] Iteration 8058, Testing net (#0)
I0428 21:35:30.688333 12706 net.cpp:676] Ignoring source layer train-data
I0428 21:35:31.863014 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:35:35.045408 12706 solver.cpp:397] Test net output #0: accuracy = 0.474877
I0428 21:35:35.045442 12706 solver.cpp:397] Test net output #1: loss = 2.89403 (* 1 = 2.89403 loss)
I0428 21:35:36.775730 12706 solver.cpp:218] Iteration 8064 (0.890787 iter/s, 13.4712s/12 iters), loss = 0.0833512
I0428 21:35:36.775770 12706 solver.cpp:237] Train net output #0: loss = 0.0833512 (* 1 = 0.0833512 loss)
I0428 21:35:36.775779 12706 sgd_solver.cpp:105] Iteration 8064, lr = 0.001089
I0428 21:35:41.754101 12706 solver.cpp:218] Iteration 8076 (2.41055 iter/s, 4.97812s/12 iters), loss = 0.017895
I0428 21:35:41.754139 12706 solver.cpp:237] Train net output #0: loss = 0.017895 (* 1 = 0.017895 loss)
I0428 21:35:41.754148 12706 sgd_solver.cpp:105] Iteration 8076, lr = 0.001089
I0428 21:35:47.096132 12706 solver.cpp:218] Iteration 8088 (2.24645 iter/s, 5.34176s/12 iters), loss = 0.0784133
I0428 21:35:47.096170 12706 solver.cpp:237] Train net output #0: loss = 0.0784133 (* 1 = 0.0784133 loss)
I0428 21:35:47.096177 12706 sgd_solver.cpp:105] Iteration 8088, lr = 0.001089
I0428 21:35:48.591395 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:35:52.364657 12706 solver.cpp:218] Iteration 8100 (2.27779 iter/s, 5.26826s/12 iters), loss = 0.100199
I0428 21:35:52.364696 12706 solver.cpp:237] Train net output #0: loss = 0.100199 (* 1 = 0.100199 loss)
I0428 21:35:52.364703 12706 sgd_solver.cpp:105] Iteration 8100, lr = 0.001089
I0428 21:35:57.304720 12706 solver.cpp:218] Iteration 8112 (2.42924 iter/s, 4.93981s/12 iters), loss = 0.0630071
I0428 21:35:57.304764 12706 solver.cpp:237] Train net output #0: loss = 0.0630071 (* 1 = 0.0630071 loss)
I0428 21:35:57.304774 12706 sgd_solver.cpp:105] Iteration 8112, lr = 0.001089
I0428 21:36:02.266367 12706 solver.cpp:218] Iteration 8124 (2.41868 iter/s, 4.96139s/12 iters), loss = 0.0970289
I0428 21:36:02.266409 12706 solver.cpp:237] Train net output #0: loss = 0.0970289 (* 1 = 0.0970289 loss)
I0428 21:36:02.266417 12706 sgd_solver.cpp:105] Iteration 8124, lr = 0.001089
I0428 21:36:07.305996 12706 solver.cpp:218] Iteration 8136 (2.38125 iter/s, 5.03937s/12 iters), loss = 0.0622873
I0428 21:36:07.306038 12706 solver.cpp:237] Train net output #0: loss = 0.0622872 (* 1 = 0.0622872 loss)
I0428 21:36:07.306046 12706 sgd_solver.cpp:105] Iteration 8136, lr = 0.001089
I0428 21:36:12.322782 12706 solver.cpp:218] Iteration 8148 (2.39209 iter/s, 5.01653s/12 iters), loss = 0.11639
I0428 21:36:12.322822 12706 solver.cpp:237] Train net output #0: loss = 0.11639 (* 1 = 0.11639 loss)
I0428 21:36:12.322830 12706 sgd_solver.cpp:105] Iteration 8148, lr = 0.001089
I0428 21:36:16.967128 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_8160.caffemodel
I0428 21:36:19.963340 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_8160.solverstate
I0428 21:36:22.294793 12706 solver.cpp:330] Iteration 8160, Testing net (#0)
I0428 21:36:22.294817 12706 net.cpp:676] Ignoring source layer train-data
I0428 21:36:23.525765 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:36:26.741935 12706 solver.cpp:397] Test net output #0: accuracy = 0.481618
I0428 21:36:26.741972 12706 solver.cpp:397] Test net output #1: loss = 2.93585 (* 1 = 2.93585 loss)
I0428 21:36:26.833093 12706 solver.cpp:218] Iteration 8160 (0.827035 iter/s, 14.5097s/12 iters), loss = 0.0645081
I0428 21:36:26.833149 12706 solver.cpp:237] Train net output #0: loss = 0.0645081 (* 1 = 0.0645081 loss)
I0428 21:36:26.833163 12706 sgd_solver.cpp:105] Iteration 8160, lr = 0.001089
I0428 21:36:30.909142 12706 solver.cpp:218] Iteration 8172 (2.9442 iter/s, 4.07581s/12 iters), loss = 0.0243885
I0428 21:36:30.909183 12706 solver.cpp:237] Train net output #0: loss = 0.0243884 (* 1 = 0.0243884 loss)
I0428 21:36:30.909189 12706 sgd_solver.cpp:105] Iteration 8172, lr = 0.001089
I0428 21:36:35.961424 12706 solver.cpp:218] Iteration 8184 (2.37528 iter/s, 5.05203s/12 iters), loss = 0.0486615
I0428 21:36:35.961462 12706 solver.cpp:237] Train net output #0: loss = 0.0486615 (* 1 = 0.0486615 loss)
I0428 21:36:35.961470 12706 sgd_solver.cpp:105] Iteration 8184, lr = 0.001089
I0428 21:36:39.569178 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:36:41.085023 12706 solver.cpp:218] Iteration 8196 (2.34222 iter/s, 5.12334s/12 iters), loss = 0.0918965
I0428 21:36:41.085062 12706 solver.cpp:237] Train net output #0: loss = 0.0918964 (* 1 = 0.0918964 loss)
I0428 21:36:41.085069 12706 sgd_solver.cpp:105] Iteration 8196, lr = 0.001089
I0428 21:36:45.992815 12706 solver.cpp:218] Iteration 8208 (2.44522 iter/s, 4.90754s/12 iters), loss = 0.0600031
I0428 21:36:45.992871 12706 solver.cpp:237] Train net output #0: loss = 0.0600031 (* 1 = 0.0600031 loss)
I0428 21:36:45.992882 12706 sgd_solver.cpp:105] Iteration 8208, lr = 0.001089
I0428 21:36:50.944218 12706 solver.cpp:218] Iteration 8220 (2.42369 iter/s, 4.95113s/12 iters), loss = 0.155839
I0428 21:36:50.944388 12706 solver.cpp:237] Train net output #0: loss = 0.155839 (* 1 = 0.155839 loss)
I0428 21:36:50.944399 12706 sgd_solver.cpp:105] Iteration 8220, lr = 0.001089
I0428 21:36:55.956267 12706 solver.cpp:218] Iteration 8232 (2.39441 iter/s, 5.01167s/12 iters), loss = 0.0611514
I0428 21:36:55.956306 12706 solver.cpp:237] Train net output #0: loss = 0.0611513 (* 1 = 0.0611513 loss)
I0428 21:36:55.956315 12706 sgd_solver.cpp:105] Iteration 8232, lr = 0.001089
I0428 21:37:00.955263 12706 solver.cpp:218] Iteration 8244 (2.40061 iter/s, 4.99874s/12 iters), loss = 0.0548256
I0428 21:37:00.955317 12706 solver.cpp:237] Train net output #0: loss = 0.0548255 (* 1 = 0.0548255 loss)
I0428 21:37:00.955329 12706 sgd_solver.cpp:105] Iteration 8244, lr = 0.001089
I0428 21:37:06.003584 12706 solver.cpp:218] Iteration 8256 (2.37716 iter/s, 5.04805s/12 iters), loss = 0.0184278
I0428 21:37:06.003639 12706 solver.cpp:237] Train net output #0: loss = 0.0184278 (* 1 = 0.0184278 loss)
I0428 21:37:06.003650 12706 sgd_solver.cpp:105] Iteration 8256, lr = 0.001089
I0428 21:37:08.034849 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_8262.caffemodel
I0428 21:37:12.668656 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_8262.solverstate
I0428 21:37:16.144788 12706 solver.cpp:330] Iteration 8262, Testing net (#0)
I0428 21:37:16.144809 12706 net.cpp:676] Ignoring source layer train-data
I0428 21:37:17.276976 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:37:20.561616 12706 solver.cpp:397] Test net output #0: accuracy = 0.477328
I0428 21:37:20.561648 12706 solver.cpp:397] Test net output #1: loss = 2.94581 (* 1 = 2.94581 loss)
I0428 21:37:22.387259 12706 solver.cpp:218] Iteration 8268 (0.732469 iter/s, 16.3829s/12 iters), loss = 0.0967479
I0428 21:37:22.387358 12706 solver.cpp:237] Train net output #0: loss = 0.0967479 (* 1 = 0.0967479 loss)
I0428 21:37:22.387367 12706 sgd_solver.cpp:105] Iteration 8268, lr = 0.001089
I0428 21:37:27.416010 12706 solver.cpp:218] Iteration 8280 (2.38643 iter/s, 5.02844s/12 iters), loss = 0.0206434
I0428 21:37:27.416050 12706 solver.cpp:237] Train net output #0: loss = 0.0206433 (* 1 = 0.0206433 loss)
I0428 21:37:27.416057 12706 sgd_solver.cpp:105] Iteration 8280, lr = 0.001089
I0428 21:37:32.466193 12706 solver.cpp:218] Iteration 8292 (2.37627 iter/s, 5.04992s/12 iters), loss = 0.0430265
I0428 21:37:32.466238 12706 solver.cpp:237] Train net output #0: loss = 0.0430265 (* 1 = 0.0430265 loss)
I0428 21:37:32.466248 12706 sgd_solver.cpp:105] Iteration 8292, lr = 0.001089
I0428 21:37:33.160873 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:37:37.587707 12706 solver.cpp:218] Iteration 8304 (2.34318 iter/s, 5.12125s/12 iters), loss = 0.116292
I0428 21:37:37.587744 12706 solver.cpp:237] Train net output #0: loss = 0.116292 (* 1 = 0.116292 loss)
I0428 21:37:37.587752 12706 sgd_solver.cpp:105] Iteration 8304, lr = 0.001089
I0428 21:37:40.535035 12706 blocking_queue.cpp:49] Waiting for data
I0428 21:37:42.668365 12706 solver.cpp:218] Iteration 8316 (2.36202 iter/s, 5.0804s/12 iters), loss = 0.0539016
I0428 21:37:42.668404 12706 solver.cpp:237] Train net output #0: loss = 0.0539015 (* 1 = 0.0539015 loss)
I0428 21:37:42.668411 12706 sgd_solver.cpp:105] Iteration 8316, lr = 0.001089
I0428 21:37:47.797329 12706 solver.cpp:218] Iteration 8328 (2.33977 iter/s, 5.1287s/12 iters), loss = 0.0829287
I0428 21:37:47.797379 12706 solver.cpp:237] Train net output #0: loss = 0.0829286 (* 1 = 0.0829286 loss)
I0428 21:37:47.797394 12706 sgd_solver.cpp:105] Iteration 8328, lr = 0.001089
I0428 21:37:52.881130 12706 solver.cpp:218] Iteration 8340 (2.36056 iter/s, 5.08353s/12 iters), loss = 0.112547
I0428 21:37:52.881928 12706 solver.cpp:237] Train net output #0: loss = 0.112547 (* 1 = 0.112547 loss)
I0428 21:37:52.881942 12706 sgd_solver.cpp:105] Iteration 8340, lr = 0.001089
I0428 21:37:57.862795 12706 solver.cpp:218] Iteration 8352 (2.40932 iter/s, 4.98065s/12 iters), loss = 0.115838
I0428 21:37:57.862844 12706 solver.cpp:237] Train net output #0: loss = 0.115838 (* 1 = 0.115838 loss)
I0428 21:37:57.862856 12706 sgd_solver.cpp:105] Iteration 8352, lr = 0.001089
I0428 21:38:02.431892 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_8364.caffemodel
I0428 21:38:06.154392 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_8364.solverstate
I0428 21:38:10.450094 12706 solver.cpp:330] Iteration 8364, Testing net (#0)
I0428 21:38:10.450115 12706 net.cpp:676] Ignoring source layer train-data
I0428 21:38:11.509554 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:38:14.803468 12706 solver.cpp:397] Test net output #0: accuracy = 0.481005
I0428 21:38:14.803498 12706 solver.cpp:397] Test net output #1: loss = 2.90626 (* 1 = 2.90626 loss)
I0428 21:38:14.894549 12706 solver.cpp:218] Iteration 8364 (0.704597 iter/s, 17.031s/12 iters), loss = 0.0824622
I0428 21:38:14.894590 12706 solver.cpp:237] Train net output #0: loss = 0.0824622 (* 1 = 0.0824622 loss)
I0428 21:38:14.894598 12706 sgd_solver.cpp:105] Iteration 8364, lr = 0.001089
I0428 21:38:19.086202 12706 solver.cpp:218] Iteration 8376 (2.86299 iter/s, 4.19142s/12 iters), loss = 0.083121
I0428 21:38:19.086256 12706 solver.cpp:237] Train net output #0: loss = 0.083121 (* 1 = 0.083121 loss)
I0428 21:38:19.086266 12706 sgd_solver.cpp:105] Iteration 8376, lr = 0.001089
I0428 21:38:24.235291 12706 solver.cpp:218] Iteration 8388 (2.33063 iter/s, 5.14882s/12 iters), loss = 0.0883393
I0428 21:38:24.235401 12706 solver.cpp:237] Train net output #0: loss = 0.0883392 (* 1 = 0.0883392 loss)
I0428 21:38:24.235409 12706 sgd_solver.cpp:105] Iteration 8388, lr = 0.001089
I0428 21:38:27.160789 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:38:29.377120 12706 solver.cpp:218] Iteration 8400 (2.33395 iter/s, 5.1415s/12 iters), loss = 0.191851
I0428 21:38:29.377156 12706 solver.cpp:237] Train net output #0: loss = 0.191851 (* 1 = 0.191851 loss)
I0428 21:38:29.377163 12706 sgd_solver.cpp:105] Iteration 8400, lr = 0.001089
I0428 21:38:34.277798 12706 solver.cpp:218] Iteration 8412 (2.44877 iter/s, 4.90043s/12 iters), loss = 0.0675648
I0428 21:38:34.277846 12706 solver.cpp:237] Train net output #0: loss = 0.0675647 (* 1 = 0.0675647 loss)
I0428 21:38:34.277855 12706 sgd_solver.cpp:105] Iteration 8412, lr = 0.001089
I0428 21:38:39.304800 12706 solver.cpp:218] Iteration 8424 (2.38724 iter/s, 5.02674s/12 iters), loss = 0.0190015
I0428 21:38:39.304843 12706 solver.cpp:237] Train net output #0: loss = 0.0190014 (* 1 = 0.0190014 loss)
I0428 21:38:39.304850 12706 sgd_solver.cpp:105] Iteration 8424, lr = 0.001089
I0428 21:38:44.280829 12706 solver.cpp:218] Iteration 8436 (2.41169 iter/s, 4.97577s/12 iters), loss = 0.0312313
I0428 21:38:44.280880 12706 solver.cpp:237] Train net output #0: loss = 0.0312312 (* 1 = 0.0312312 loss)
I0428 21:38:44.280894 12706 sgd_solver.cpp:105] Iteration 8436, lr = 0.001089
I0428 21:38:49.499276 12706 solver.cpp:218] Iteration 8448 (2.29966 iter/s, 5.21817s/12 iters), loss = 0.0932917
I0428 21:38:49.499326 12706 solver.cpp:237] Train net output #0: loss = 0.0932916 (* 1 = 0.0932916 loss)
I0428 21:38:49.499338 12706 sgd_solver.cpp:105] Iteration 8448, lr = 0.001089
I0428 21:38:54.430341 12706 solver.cpp:218] Iteration 8460 (2.43368 iter/s, 4.9308s/12 iters), loss = 0.0306705
I0428 21:38:54.443569 12706 solver.cpp:237] Train net output #0: loss = 0.0306704 (* 1 = 0.0306704 loss)
I0428 21:38:54.443580 12706 sgd_solver.cpp:105] Iteration 8460, lr = 0.001089
I0428 21:38:56.557744 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_8466.caffemodel
I0428 21:39:02.711725 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_8466.solverstate
I0428 21:39:05.811851 12706 solver.cpp:330] Iteration 8466, Testing net (#0)
I0428 21:39:05.811873 12706 net.cpp:676] Ignoring source layer train-data
I0428 21:39:06.839169 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:39:10.185276 12706 solver.cpp:397] Test net output #0: accuracy = 0.480392
I0428 21:39:10.185303 12706 solver.cpp:397] Test net output #1: loss = 2.85781 (* 1 = 2.85781 loss)
I0428 21:39:12.031563 12706 solver.cpp:218] Iteration 8472 (0.682312 iter/s, 17.5873s/12 iters), loss = 0.0762337
I0428 21:39:12.031617 12706 solver.cpp:237] Train net output #0: loss = 0.0762336 (* 1 = 0.0762336 loss)
I0428 21:39:12.031628 12706 sgd_solver.cpp:105] Iteration 8472, lr = 0.001089
I0428 21:39:16.988425 12706 solver.cpp:218] Iteration 8484 (2.42102 iter/s, 4.9566s/12 iters), loss = 0.0718966
I0428 21:39:16.988478 12706 solver.cpp:237] Train net output #0: loss = 0.0718965 (* 1 = 0.0718965 loss)
I0428 21:39:16.988523 12706 sgd_solver.cpp:105] Iteration 8484, lr = 0.001089
I0428 21:39:21.943430 12706 solver.cpp:218] Iteration 8496 (2.42192 iter/s, 4.95474s/12 iters), loss = 0.0543083
I0428 21:39:21.943485 12706 solver.cpp:237] Train net output #0: loss = 0.0543082 (* 1 = 0.0543082 loss)
I0428 21:39:21.943496 12706 sgd_solver.cpp:105] Iteration 8496, lr = 0.001089
I0428 21:39:21.981277 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:39:26.888469 12706 solver.cpp:218] Iteration 8508 (2.42681 iter/s, 4.94477s/12 iters), loss = 0.0548894
I0428 21:39:26.888641 12706 solver.cpp:237] Train net output #0: loss = 0.0548893 (* 1 = 0.0548893 loss)
I0428 21:39:26.888653 12706 sgd_solver.cpp:105] Iteration 8508, lr = 0.001089
I0428 21:39:31.891803 12706 solver.cpp:218] Iteration 8520 (2.39859 iter/s, 5.00295s/12 iters), loss = 0.0946366
I0428 21:39:31.891842 12706 solver.cpp:237] Train net output #0: loss = 0.0946365 (* 1 = 0.0946365 loss)
I0428 21:39:31.891849 12706 sgd_solver.cpp:105] Iteration 8520, lr = 0.001089
I0428 21:39:36.895030 12706 solver.cpp:218] Iteration 8532 (2.39858 iter/s, 5.00297s/12 iters), loss = 0.109737
I0428 21:39:36.895077 12706 solver.cpp:237] Train net output #0: loss = 0.109737 (* 1 = 0.109737 loss)
I0428 21:39:36.895090 12706 sgd_solver.cpp:105] Iteration 8532, lr = 0.001089
I0428 21:39:41.902976 12706 solver.cpp:218] Iteration 8544 (2.39632 iter/s, 5.00768s/12 iters), loss = 0.10099
I0428 21:39:41.903019 12706 solver.cpp:237] Train net output #0: loss = 0.100989 (* 1 = 0.100989 loss)
I0428 21:39:41.903028 12706 sgd_solver.cpp:105] Iteration 8544, lr = 0.001089
I0428 21:39:46.866788 12706 solver.cpp:218] Iteration 8556 (2.41762 iter/s, 4.96356s/12 iters), loss = 0.0254716
I0428 21:39:46.866827 12706 solver.cpp:237] Train net output #0: loss = 0.0254715 (* 1 = 0.0254715 loss)
I0428 21:39:46.866833 12706 sgd_solver.cpp:105] Iteration 8556, lr = 0.001089
I0428 21:39:51.720480 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_8568.caffemodel
I0428 21:39:57.705138 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_8568.solverstate
I0428 21:40:00.011369 12706 solver.cpp:330] Iteration 8568, Testing net (#0)
I0428 21:40:00.011391 12706 net.cpp:676] Ignoring source layer train-data
I0428 21:40:00.989476 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:40:04.367013 12706 solver.cpp:397] Test net output #0: accuracy = 0.474877
I0428 21:40:04.367039 12706 solver.cpp:397] Test net output #1: loss = 2.91377 (* 1 = 2.91377 loss)
I0428 21:40:04.458186 12706 solver.cpp:218] Iteration 8568 (0.682181 iter/s, 17.5906s/12 iters), loss = 0.0829402
I0428 21:40:04.458250 12706 solver.cpp:237] Train net output #0: loss = 0.0829401 (* 1 = 0.0829401 loss)
I0428 21:40:04.458262 12706 sgd_solver.cpp:105] Iteration 8568, lr = 0.001089
I0428 21:40:08.584940 12706 solver.cpp:218] Iteration 8580 (2.90802 iter/s, 4.12652s/12 iters), loss = 0.0293
I0428 21:40:08.584980 12706 solver.cpp:237] Train net output #0: loss = 0.0292999 (* 1 = 0.0292999 loss)
I0428 21:40:08.584987 12706 sgd_solver.cpp:105] Iteration 8580, lr = 0.001089
I0428 21:40:13.563423 12706 solver.cpp:218] Iteration 8592 (2.41049 iter/s, 4.97823s/12 iters), loss = 0.141591
I0428 21:40:13.563463 12706 solver.cpp:237] Train net output #0: loss = 0.141591 (* 1 = 0.141591 loss)
I0428 21:40:13.563472 12706 sgd_solver.cpp:105] Iteration 8592, lr = 0.001089
I0428 21:40:15.719264 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:40:18.531616 12706 solver.cpp:218] Iteration 8604 (2.41549 iter/s, 4.96794s/12 iters), loss = 0.0986137
I0428 21:40:18.531657 12706 solver.cpp:237] Train net output #0: loss = 0.0986136 (* 1 = 0.0986136 loss)
I0428 21:40:18.531666 12706 sgd_solver.cpp:105] Iteration 8604, lr = 0.001089
I0428 21:40:23.613870 12706 solver.cpp:218] Iteration 8616 (2.36128 iter/s, 5.08199s/12 iters), loss = 0.0688999
I0428 21:40:23.613919 12706 solver.cpp:237] Train net output #0: loss = 0.0688998 (* 1 = 0.0688998 loss)
I0428 21:40:23.613931 12706 sgd_solver.cpp:105] Iteration 8616, lr = 0.001089
I0428 21:40:28.627494 12706 solver.cpp:218] Iteration 8628 (2.3936 iter/s, 5.01336s/12 iters), loss = 0.0714297
I0428 21:40:28.627625 12706 solver.cpp:237] Train net output #0: loss = 0.0714296 (* 1 = 0.0714296 loss)
I0428 21:40:28.627635 12706 sgd_solver.cpp:105] Iteration 8628, lr = 0.001089
I0428 21:40:33.642177 12706 solver.cpp:218] Iteration 8640 (2.39314 iter/s, 5.01433s/12 iters), loss = 0.0318604
I0428 21:40:33.642227 12706 solver.cpp:237] Train net output #0: loss = 0.0318602 (* 1 = 0.0318602 loss)
I0428 21:40:33.642238 12706 sgd_solver.cpp:105] Iteration 8640, lr = 0.001089
I0428 21:40:38.623919 12706 solver.cpp:218] Iteration 8652 (2.40893 iter/s, 4.98147s/12 iters), loss = 0.0516579
I0428 21:40:38.623973 12706 solver.cpp:237] Train net output #0: loss = 0.0516578 (* 1 = 0.0516578 loss)
I0428 21:40:38.623986 12706 sgd_solver.cpp:105] Iteration 8652, lr = 0.001089
I0428 21:40:43.559500 12706 solver.cpp:218] Iteration 8664 (2.43146 iter/s, 4.93531s/12 iters), loss = 0.0464016
I0428 21:40:43.559559 12706 solver.cpp:237] Train net output #0: loss = 0.0464015 (* 1 = 0.0464015 loss)
I0428 21:40:43.559571 12706 sgd_solver.cpp:105] Iteration 8664, lr = 0.001089
I0428 21:40:45.570437 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_8670.caffemodel
I0428 21:40:51.342519 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_8670.solverstate
I0428 21:40:57.215279 12706 solver.cpp:330] Iteration 8670, Testing net (#0)
I0428 21:40:57.215301 12706 net.cpp:676] Ignoring source layer train-data
I0428 21:40:58.199633 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:41:01.625700 12706 solver.cpp:397] Test net output #0: accuracy = 0.481618
I0428 21:41:01.625792 12706 solver.cpp:397] Test net output #1: loss = 2.91658 (* 1 = 2.91658 loss)
I0428 21:41:03.470157 12706 solver.cpp:218] Iteration 8676 (0.602719 iter/s, 19.9098s/12 iters), loss = 0.0815001
I0428 21:41:03.470198 12706 solver.cpp:237] Train net output #0: loss = 0.0815 (* 1 = 0.0815 loss)
I0428 21:41:03.470206 12706 sgd_solver.cpp:105] Iteration 8676, lr = 0.001089
I0428 21:41:08.478142 12706 solver.cpp:218] Iteration 8688 (2.3963 iter/s, 5.00772s/12 iters), loss = 0.0784149
I0428 21:41:08.478195 12706 solver.cpp:237] Train net output #0: loss = 0.0784148 (* 1 = 0.0784148 loss)
I0428 21:41:08.478207 12706 sgd_solver.cpp:105] Iteration 8688, lr = 0.001089
I0428 21:41:12.814393 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:41:13.491876 12706 solver.cpp:218] Iteration 8700 (2.39355 iter/s, 5.01346s/12 iters), loss = 0.0416996
I0428 21:41:13.491926 12706 solver.cpp:237] Train net output #0: loss = 0.0416995 (* 1 = 0.0416995 loss)
I0428 21:41:13.491936 12706 sgd_solver.cpp:105] Iteration 8700, lr = 0.001089
I0428 21:41:18.501735 12706 solver.cpp:218] Iteration 8712 (2.3954 iter/s, 5.00959s/12 iters), loss = 0.0855449
I0428 21:41:18.501775 12706 solver.cpp:237] Train net output #0: loss = 0.0855448 (* 1 = 0.0855448 loss)
I0428 21:41:18.501781 12706 sgd_solver.cpp:105] Iteration 8712, lr = 0.001089
I0428 21:41:23.594110 12706 solver.cpp:218] Iteration 8724 (2.35658 iter/s, 5.09212s/12 iters), loss = 0.0394227
I0428 21:41:23.594151 12706 solver.cpp:237] Train net output #0: loss = 0.0394226 (* 1 = 0.0394226 loss)
I0428 21:41:23.594158 12706 sgd_solver.cpp:105] Iteration 8724, lr = 0.001089
I0428 21:41:28.694984 12706 solver.cpp:218] Iteration 8736 (2.35266 iter/s, 5.10061s/12 iters), loss = 0.0315748
I0428 21:41:28.695025 12706 solver.cpp:237] Train net output #0: loss = 0.0315746 (* 1 = 0.0315746 loss)
I0428 21:41:28.695034 12706 sgd_solver.cpp:105] Iteration 8736, lr = 0.001089
I0428 21:41:34.132985 12706 solver.cpp:218] Iteration 8748 (2.20681 iter/s, 5.43772s/12 iters), loss = 0.0462158
I0428 21:41:34.133117 12706 solver.cpp:237] Train net output #0: loss = 0.0462157 (* 1 = 0.0462157 loss)
I0428 21:41:34.133127 12706 sgd_solver.cpp:105] Iteration 8748, lr = 0.001089
I0428 21:41:39.122800 12706 solver.cpp:218] Iteration 8760 (2.40507 iter/s, 4.98947s/12 iters), loss = 0.0994687
I0428 21:41:39.122846 12706 solver.cpp:237] Train net output #0: loss = 0.0994686 (* 1 = 0.0994686 loss)
I0428 21:41:39.122856 12706 sgd_solver.cpp:105] Iteration 8760, lr = 0.001089
I0428 21:41:43.668280 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_8772.caffemodel
I0428 21:41:47.142143 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_8772.solverstate
I0428 21:41:49.446815 12706 solver.cpp:330] Iteration 8772, Testing net (#0)
I0428 21:41:49.446836 12706 net.cpp:676] Ignoring source layer train-data
I0428 21:41:50.413043 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:41:54.079578 12706 solver.cpp:397] Test net output #0: accuracy = 0.473039
I0428 21:41:54.079629 12706 solver.cpp:397] Test net output #1: loss = 2.94066 (* 1 = 2.94066 loss)
I0428 21:41:54.170997 12706 solver.cpp:218] Iteration 8772 (0.797473 iter/s, 15.0475s/12 iters), loss = 0.0737871
I0428 21:41:54.171046 12706 solver.cpp:237] Train net output #0: loss = 0.073787 (* 1 = 0.073787 loss)
I0428 21:41:54.171056 12706 sgd_solver.cpp:105] Iteration 8772, lr = 0.001089
I0428 21:41:58.378623 12706 solver.cpp:218] Iteration 8784 (2.85213 iter/s, 4.20739s/12 iters), loss = 0.0869201
I0428 21:41:58.378670 12706 solver.cpp:237] Train net output #0: loss = 0.08692 (* 1 = 0.08692 loss)
I0428 21:41:58.378680 12706 sgd_solver.cpp:105] Iteration 8784, lr = 0.001089
I0428 21:42:03.403388 12706 solver.cpp:218] Iteration 8796 (2.3883 iter/s, 5.0245s/12 iters), loss = 0.0895353
I0428 21:42:03.403437 12706 solver.cpp:237] Train net output #0: loss = 0.0895352 (* 1 = 0.0895352 loss)
I0428 21:42:03.403446 12706 sgd_solver.cpp:105] Iteration 8796, lr = 0.001089
I0428 21:42:04.867924 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:42:08.502468 12706 solver.cpp:218] Iteration 8808 (2.35349 iter/s, 5.09881s/12 iters), loss = 0.0938769
I0428 21:42:08.502506 12706 solver.cpp:237] Train net output #0: loss = 0.0938768 (* 1 = 0.0938768 loss)
I0428 21:42:08.502514 12706 sgd_solver.cpp:105] Iteration 8808, lr = 0.001089
I0428 21:42:13.482764 12706 solver.cpp:218] Iteration 8820 (2.40962 iter/s, 4.98004s/12 iters), loss = 0.104076
I0428 21:42:13.482808 12706 solver.cpp:237] Train net output #0: loss = 0.104076 (* 1 = 0.104076 loss)
I0428 21:42:13.482815 12706 sgd_solver.cpp:105] Iteration 8820, lr = 0.001089
I0428 21:42:18.505862 12706 solver.cpp:218] Iteration 8832 (2.38909 iter/s, 5.02284s/12 iters), loss = 0.0878844
I0428 21:42:18.505903 12706 solver.cpp:237] Train net output #0: loss = 0.0878843 (* 1 = 0.0878843 loss)
I0428 21:42:18.505911 12706 sgd_solver.cpp:105] Iteration 8832, lr = 0.001089
I0428 21:42:23.442610 12706 solver.cpp:218] Iteration 8844 (2.43087 iter/s, 4.9365s/12 iters), loss = 0.0900452
I0428 21:42:23.442649 12706 solver.cpp:237] Train net output #0: loss = 0.0900451 (* 1 = 0.0900451 loss)
I0428 21:42:23.442658 12706 sgd_solver.cpp:105] Iteration 8844, lr = 0.001089
I0428 21:42:28.386523 12706 solver.cpp:218] Iteration 8856 (2.42735 iter/s, 4.94366s/12 iters), loss = 0.0561041
I0428 21:42:28.386564 12706 solver.cpp:237] Train net output #0: loss = 0.056104 (* 1 = 0.056104 loss)
I0428 21:42:28.386571 12706 sgd_solver.cpp:105] Iteration 8856, lr = 0.001089
I0428 21:42:33.438671 12706 solver.cpp:218] Iteration 8868 (2.37535 iter/s, 5.05189s/12 iters), loss = 0.0361497
I0428 21:42:33.438710 12706 solver.cpp:237] Train net output #0: loss = 0.0361496 (* 1 = 0.0361496 loss)
I0428 21:42:33.438719 12706 sgd_solver.cpp:105] Iteration 8868, lr = 0.001089
I0428 21:42:35.468405 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_8874.caffemodel
I0428 21:42:39.183676 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_8874.solverstate
I0428 21:42:41.513319 12706 solver.cpp:330] Iteration 8874, Testing net (#0)
I0428 21:42:41.513339 12706 net.cpp:676] Ignoring source layer train-data
I0428 21:42:42.473796 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:42:45.983048 12706 solver.cpp:397] Test net output #0: accuracy = 0.490196
I0428 21:42:45.983075 12706 solver.cpp:397] Test net output #1: loss = 2.86671 (* 1 = 2.86671 loss)
I0428 21:42:47.807337 12706 solver.cpp:218] Iteration 8880 (0.835187 iter/s, 14.368s/12 iters), loss = 0.0715229
I0428 21:42:47.807379 12706 solver.cpp:237] Train net output #0: loss = 0.0715228 (* 1 = 0.0715228 loss)
I0428 21:42:47.807387 12706 sgd_solver.cpp:105] Iteration 8880, lr = 0.001089
I0428 21:42:52.813644 12706 solver.cpp:218] Iteration 8892 (2.39711 iter/s, 5.00603s/12 iters), loss = 0.0393073
I0428 21:42:52.813709 12706 solver.cpp:237] Train net output #0: loss = 0.0393072 (* 1 = 0.0393072 loss)
I0428 21:42:52.813732 12706 sgd_solver.cpp:105] Iteration 8892, lr = 0.001089
I0428 21:42:56.403892 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:42:57.872519 12706 solver.cpp:218] Iteration 8904 (2.3722 iter/s, 5.0586s/12 iters), loss = 0.161895
I0428 21:42:57.872555 12706 solver.cpp:237] Train net output #0: loss = 0.161895 (* 1 = 0.161895 loss)
I0428 21:42:57.872561 12706 sgd_solver.cpp:105] Iteration 8904, lr = 0.001089
I0428 21:43:02.854984 12706 solver.cpp:218] Iteration 8916 (2.40857 iter/s, 4.98221s/12 iters), loss = 0.0714387
I0428 21:43:02.855021 12706 solver.cpp:237] Train net output #0: loss = 0.0714386 (* 1 = 0.0714386 loss)
I0428 21:43:02.855028 12706 sgd_solver.cpp:105] Iteration 8916, lr = 0.001089
I0428 21:43:07.854655 12706 solver.cpp:218] Iteration 8928 (2.40028 iter/s, 4.99941s/12 iters), loss = 0.022057
I0428 21:43:07.854781 12706 solver.cpp:237] Train net output #0: loss = 0.0220569 (* 1 = 0.0220569 loss)
I0428 21:43:07.854795 12706 sgd_solver.cpp:105] Iteration 8928, lr = 0.001089
I0428 21:43:12.822928 12706 solver.cpp:218] Iteration 8940 (2.41549 iter/s, 4.96793s/12 iters), loss = 0.117636
I0428 21:43:12.822965 12706 solver.cpp:237] Train net output #0: loss = 0.117636 (* 1 = 0.117636 loss)
I0428 21:43:12.822973 12706 sgd_solver.cpp:105] Iteration 8940, lr = 0.001089
I0428 21:43:17.840673 12706 solver.cpp:218] Iteration 8952 (2.39164 iter/s, 5.01749s/12 iters), loss = 0.0567784
I0428 21:43:17.840725 12706 solver.cpp:237] Train net output #0: loss = 0.0567783 (* 1 = 0.0567783 loss)
I0428 21:43:17.840737 12706 sgd_solver.cpp:105] Iteration 8952, lr = 0.001089
I0428 21:43:22.993480 12706 solver.cpp:218] Iteration 8964 (2.32895 iter/s, 5.15253s/12 iters), loss = 0.0658838
I0428 21:43:22.993522 12706 solver.cpp:237] Train net output #0: loss = 0.0658837 (* 1 = 0.0658837 loss)
I0428 21:43:22.993532 12706 sgd_solver.cpp:105] Iteration 8964, lr = 0.001089
I0428 21:43:27.577152 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_8976.caffemodel
I0428 21:43:30.720345 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_8976.solverstate
I0428 21:43:33.025365 12706 solver.cpp:330] Iteration 8976, Testing net (#0)
I0428 21:43:33.025386 12706 net.cpp:676] Ignoring source layer train-data
I0428 21:43:33.935111 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:43:37.819545 12706 solver.cpp:397] Test net output #0: accuracy = 0.474877
I0428 21:43:37.819584 12706 solver.cpp:397] Test net output #1: loss = 2.95017 (* 1 = 2.95017 loss)
I0428 21:43:37.910836 12706 solver.cpp:218] Iteration 8976 (0.804468 iter/s, 14.9167s/12 iters), loss = 0.0874956
I0428 21:43:37.910969 12706 solver.cpp:237] Train net output #0: loss = 0.0874955 (* 1 = 0.0874955 loss)
I0428 21:43:37.910979 12706 sgd_solver.cpp:105] Iteration 8976, lr = 0.001089
I0428 21:43:42.042771 12706 solver.cpp:218] Iteration 8988 (2.90443 iter/s, 4.13162s/12 iters), loss = 0.104078
I0428 21:43:42.042805 12706 solver.cpp:237] Train net output #0: loss = 0.104078 (* 1 = 0.104078 loss)
I0428 21:43:42.042812 12706 sgd_solver.cpp:105] Iteration 8988, lr = 0.001089
I0428 21:43:45.332700 12706 blocking_queue.cpp:49] Waiting for data
I0428 21:43:47.119168 12706 solver.cpp:218] Iteration 9000 (2.364 iter/s, 5.07614s/12 iters), loss = 0.145233
I0428 21:43:47.119210 12706 solver.cpp:237] Train net output #0: loss = 0.145233 (* 1 = 0.145233 loss)
I0428 21:43:47.119217 12706 sgd_solver.cpp:105] Iteration 9000, lr = 0.001089
I0428 21:43:47.836531 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:43:52.267238 12706 solver.cpp:218] Iteration 9012 (2.33109 iter/s, 5.1478s/12 iters), loss = 0.0476139
I0428 21:43:52.267287 12706 solver.cpp:237] Train net output #0: loss = 0.0476138 (* 1 = 0.0476138 loss)
I0428 21:43:52.267298 12706 sgd_solver.cpp:105] Iteration 9012, lr = 0.001089
I0428 21:43:57.233620 12706 solver.cpp:218] Iteration 9024 (2.41637 iter/s, 4.96612s/12 iters), loss = 0.0438171
I0428 21:43:57.233660 12706 solver.cpp:237] Train net output #0: loss = 0.043817 (* 1 = 0.043817 loss)
I0428 21:43:57.233669 12706 sgd_solver.cpp:105] Iteration 9024, lr = 0.001089
I0428 21:44:02.367707 12706 solver.cpp:218] Iteration 9036 (2.33744 iter/s, 5.13382s/12 iters), loss = 0.1148
I0428 21:44:02.367755 12706 solver.cpp:237] Train net output #0: loss = 0.1148 (* 1 = 0.1148 loss)
I0428 21:44:02.367765 12706 sgd_solver.cpp:105] Iteration 9036, lr = 0.001089
I0428 21:44:07.657310 12706 solver.cpp:218] Iteration 9048 (2.26872 iter/s, 5.28932s/12 iters), loss = 0.0742189
I0428 21:44:07.657351 12706 solver.cpp:237] Train net output #0: loss = 0.0742188 (* 1 = 0.0742188 loss)
I0428 21:44:07.657358 12706 sgd_solver.cpp:105] Iteration 9048, lr = 0.001089
I0428 21:44:12.748831 12706 solver.cpp:218] Iteration 9060 (2.35698 iter/s, 5.09125s/12 iters), loss = 0.0831241
I0428 21:44:12.748952 12706 solver.cpp:237] Train net output #0: loss = 0.083124 (* 1 = 0.083124 loss)
I0428 21:44:12.748962 12706 sgd_solver.cpp:105] Iteration 9060, lr = 0.001089
I0428 21:44:17.811287 12706 solver.cpp:218] Iteration 9072 (2.37055 iter/s, 5.06212s/12 iters), loss = 0.0636416
I0428 21:44:17.811324 12706 solver.cpp:237] Train net output #0: loss = 0.0636415 (* 1 = 0.0636415 loss)
I0428 21:44:17.811332 12706 sgd_solver.cpp:105] Iteration 9072, lr = 0.001089
I0428 21:44:19.855341 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_9078.caffemodel
I0428 21:44:22.852250 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_9078.solverstate
I0428 21:44:25.243160 12706 solver.cpp:330] Iteration 9078, Testing net (#0)
I0428 21:44:25.243180 12706 net.cpp:676] Ignoring source layer train-data
I0428 21:44:26.109232 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:44:29.996821 12706 solver.cpp:397] Test net output #0: accuracy = 0.48652
I0428 21:44:29.996858 12706 solver.cpp:397] Test net output #1: loss = 2.9221 (* 1 = 2.9221 loss)
I0428 21:44:31.781213 12706 solver.cpp:218] Iteration 9084 (0.859026 iter/s, 13.9693s/12 iters), loss = 0.0977359
I0428 21:44:31.781255 12706 solver.cpp:237] Train net output #0: loss = 0.0977358 (* 1 = 0.0977358 loss)
I0428 21:44:31.781263 12706 sgd_solver.cpp:105] Iteration 9084, lr = 0.001089
I0428 21:44:36.760890 12706 solver.cpp:218] Iteration 9096 (2.40992 iter/s, 4.97942s/12 iters), loss = 0.0630977
I0428 21:44:36.760928 12706 solver.cpp:237] Train net output #0: loss = 0.0630976 (* 1 = 0.0630976 loss)
I0428 21:44:36.760936 12706 sgd_solver.cpp:105] Iteration 9096, lr = 0.001089
I0428 21:44:39.743588 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:44:41.807906 12706 solver.cpp:218] Iteration 9108 (2.37776 iter/s, 5.04676s/12 iters), loss = 0.125787
I0428 21:44:41.807950 12706 solver.cpp:237] Train net output #0: loss = 0.125787 (* 1 = 0.125787 loss)
I0428 21:44:41.807960 12706 sgd_solver.cpp:105] Iteration 9108, lr = 0.001089
I0428 21:44:46.876829 12706 solver.cpp:218] Iteration 9120 (2.36749 iter/s, 5.06866s/12 iters), loss = 0.0851174
I0428 21:44:46.877002 12706 solver.cpp:237] Train net output #0: loss = 0.0851172 (* 1 = 0.0851172 loss)
I0428 21:44:46.877013 12706 sgd_solver.cpp:105] Iteration 9120, lr = 0.001089
I0428 21:44:52.043862 12706 solver.cpp:218] Iteration 9132 (2.32259 iter/s, 5.16664s/12 iters), loss = 0.0892425
I0428 21:44:52.043905 12706 solver.cpp:237] Train net output #0: loss = 0.0892424 (* 1 = 0.0892424 loss)
I0428 21:44:52.043913 12706 sgd_solver.cpp:105] Iteration 9132, lr = 0.001089
I0428 21:44:57.037920 12706 solver.cpp:218] Iteration 9144 (2.40298 iter/s, 4.9938s/12 iters), loss = 0.094213
I0428 21:44:57.037961 12706 solver.cpp:237] Train net output #0: loss = 0.0942129 (* 1 = 0.0942129 loss)
I0428 21:44:57.037968 12706 sgd_solver.cpp:105] Iteration 9144, lr = 0.001089
I0428 21:45:02.101480 12706 solver.cpp:218] Iteration 9156 (2.37 iter/s, 5.06329s/12 iters), loss = 0.0901643
I0428 21:45:02.101531 12706 solver.cpp:237] Train net output #0: loss = 0.0901642 (* 1 = 0.0901642 loss)
I0428 21:45:02.101541 12706 sgd_solver.cpp:105] Iteration 9156, lr = 0.001089
I0428 21:45:07.189669 12706 solver.cpp:218] Iteration 9168 (2.35853 iter/s, 5.08792s/12 iters), loss = 0.0713
I0428 21:45:07.189723 12706 solver.cpp:237] Train net output #0: loss = 0.0712999 (* 1 = 0.0712999 loss)
I0428 21:45:07.189735 12706 sgd_solver.cpp:105] Iteration 9168, lr = 0.001089
I0428 21:45:11.712379 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_9180.caffemodel
I0428 21:45:16.785578 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_9180.solverstate
I0428 21:45:19.112735 12706 solver.cpp:330] Iteration 9180, Testing net (#0)
I0428 21:45:19.112829 12706 net.cpp:676] Ignoring source layer train-data
I0428 21:45:19.860169 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:45:23.526439 12706 solver.cpp:397] Test net output #0: accuracy = 0.48223
I0428 21:45:23.526469 12706 solver.cpp:397] Test net output #1: loss = 2.94429 (* 1 = 2.94429 loss)
I0428 21:45:23.617470 12706 solver.cpp:218] Iteration 9180 (0.730502 iter/s, 16.4271s/12 iters), loss = 0.0382103
I0428 21:45:23.617511 12706 solver.cpp:237] Train net output #0: loss = 0.0382102 (* 1 = 0.0382102 loss)
I0428 21:45:23.617518 12706 sgd_solver.cpp:105] Iteration 9180, lr = 0.001089
I0428 21:45:27.776849 12706 solver.cpp:218] Iteration 9192 (2.8852 iter/s, 4.15915s/12 iters), loss = 0.0914765
I0428 21:45:27.776896 12706 solver.cpp:237] Train net output #0: loss = 0.0914764 (* 1 = 0.0914764 loss)
I0428 21:45:27.776906 12706 sgd_solver.cpp:105] Iteration 9192, lr = 0.001089
I0428 21:45:32.852344 12706 solver.cpp:218] Iteration 9204 (2.36443 iter/s, 5.07523s/12 iters), loss = 0.0216979
I0428 21:45:32.852385 12706 solver.cpp:237] Train net output #0: loss = 0.0216978 (* 1 = 0.0216978 loss)
I0428 21:45:32.852392 12706 sgd_solver.cpp:105] Iteration 9204, lr = 0.001089
I0428 21:45:32.921448 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:45:37.844228 12706 solver.cpp:218] Iteration 9216 (2.40403 iter/s, 4.99163s/12 iters), loss = 0.0427616
I0428 21:45:37.844267 12706 solver.cpp:237] Train net output #0: loss = 0.0427615 (* 1 = 0.0427615 loss)
I0428 21:45:37.844275 12706 sgd_solver.cpp:105] Iteration 9216, lr = 0.001089
I0428 21:45:42.852548 12706 solver.cpp:218] Iteration 9228 (2.39614 iter/s, 5.00806s/12 iters), loss = 0.0905139
I0428 21:45:42.852591 12706 solver.cpp:237] Train net output #0: loss = 0.0905138 (* 1 = 0.0905138 loss)
I0428 21:45:42.852598 12706 sgd_solver.cpp:105] Iteration 9228, lr = 0.001089
I0428 21:45:48.031406 12706 solver.cpp:218] Iteration 9240 (2.31724 iter/s, 5.17858s/12 iters), loss = 0.0777099
I0428 21:45:48.031461 12706 solver.cpp:237] Train net output #0: loss = 0.0777098 (* 1 = 0.0777098 loss)
I0428 21:45:48.031473 12706 sgd_solver.cpp:105] Iteration 9240, lr = 0.001089
I0428 21:45:53.056763 12706 solver.cpp:218] Iteration 9252 (2.38802 iter/s, 5.02509s/12 iters), loss = 0.08198
I0428 21:45:53.056893 12706 solver.cpp:237] Train net output #0: loss = 0.0819799 (* 1 = 0.0819799 loss)
I0428 21:45:53.056902 12706 sgd_solver.cpp:105] Iteration 9252, lr = 0.001089
I0428 21:45:58.003823 12706 solver.cpp:218] Iteration 9264 (2.42585 iter/s, 4.94671s/12 iters), loss = 0.04695
I0428 21:45:58.003871 12706 solver.cpp:237] Train net output #0: loss = 0.0469499 (* 1 = 0.0469499 loss)
I0428 21:45:58.003881 12706 sgd_solver.cpp:105] Iteration 9264, lr = 0.001089
I0428 21:46:03.129240 12706 solver.cpp:218] Iteration 9276 (2.34139 iter/s, 5.12515s/12 iters), loss = 0.0514634
I0428 21:46:03.129285 12706 solver.cpp:237] Train net output #0: loss = 0.0514633 (* 1 = 0.0514633 loss)
I0428 21:46:03.129294 12706 sgd_solver.cpp:105] Iteration 9276, lr = 0.001089
I0428 21:46:05.176520 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_9282.caffemodel
I0428 21:46:08.183416 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_9282.solverstate
I0428 21:46:10.478821 12706 solver.cpp:330] Iteration 9282, Testing net (#0)
I0428 21:46:10.478842 12706 net.cpp:676] Ignoring source layer train-data
I0428 21:46:11.205271 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:46:14.967948 12706 solver.cpp:397] Test net output #0: accuracy = 0.488971
I0428 21:46:14.967978 12706 solver.cpp:397] Test net output #1: loss = 2.90682 (* 1 = 2.90682 loss)
I0428 21:46:16.679965 12706 solver.cpp:218] Iteration 9288 (0.885601 iter/s, 13.5501s/12 iters), loss = 0.12703
I0428 21:46:16.680004 12706 solver.cpp:237] Train net output #0: loss = 0.12703 (* 1 = 0.12703 loss)
I0428 21:46:16.680012 12706 sgd_solver.cpp:105] Iteration 9288, lr = 0.001089
I0428 21:46:21.621961 12706 solver.cpp:218] Iteration 9300 (2.42829 iter/s, 4.94174s/12 iters), loss = 0.119266
I0428 21:46:21.622010 12706 solver.cpp:237] Train net output #0: loss = 0.119265 (* 1 = 0.119265 loss)
I0428 21:46:21.622023 12706 sgd_solver.cpp:105] Iteration 9300, lr = 0.001089
I0428 21:46:23.824609 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:46:26.636612 12706 solver.cpp:218] Iteration 9312 (2.39311 iter/s, 5.01439s/12 iters), loss = 0.0670263
I0428 21:46:26.636653 12706 solver.cpp:237] Train net output #0: loss = 0.0670262 (* 1 = 0.0670262 loss)
I0428 21:46:26.636660 12706 sgd_solver.cpp:105] Iteration 9312, lr = 0.001089
I0428 21:46:31.727085 12706 solver.cpp:218] Iteration 9324 (2.35747 iter/s, 5.09021s/12 iters), loss = 0.0517768
I0428 21:46:31.727131 12706 solver.cpp:237] Train net output #0: loss = 0.0517767 (* 1 = 0.0517767 loss)
I0428 21:46:31.727140 12706 sgd_solver.cpp:105] Iteration 9324, lr = 0.001089
I0428 21:46:36.735148 12706 solver.cpp:218] Iteration 9336 (2.39626 iter/s, 5.0078s/12 iters), loss = 0.0405403
I0428 21:46:36.735188 12706 solver.cpp:237] Train net output #0: loss = 0.0405402 (* 1 = 0.0405402 loss)
I0428 21:46:36.735194 12706 sgd_solver.cpp:105] Iteration 9336, lr = 0.001089
I0428 21:46:41.731433 12706 solver.cpp:218] Iteration 9348 (2.40191 iter/s, 4.99603s/12 iters), loss = 0.0227913
I0428 21:46:41.731472 12706 solver.cpp:237] Train net output #0: loss = 0.0227913 (* 1 = 0.0227913 loss)
I0428 21:46:41.731480 12706 sgd_solver.cpp:105] Iteration 9348, lr = 0.001089
I0428 21:46:46.750283 12706 solver.cpp:218] Iteration 9360 (2.39111 iter/s, 5.01859s/12 iters), loss = 0.0919691
I0428 21:46:46.750321 12706 solver.cpp:237] Train net output #0: loss = 0.091969 (* 1 = 0.091969 loss)
I0428 21:46:46.750329 12706 sgd_solver.cpp:105] Iteration 9360, lr = 0.001089
I0428 21:46:51.747483 12706 solver.cpp:218] Iteration 9372 (2.40147 iter/s, 4.99695s/12 iters), loss = 0.0286256
I0428 21:46:51.747521 12706 solver.cpp:237] Train net output #0: loss = 0.0286255 (* 1 = 0.0286255 loss)
I0428 21:46:51.747529 12706 sgd_solver.cpp:105] Iteration 9372, lr = 0.001089
I0428 21:46:56.243990 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_9384.caffemodel
I0428 21:46:59.267041 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_9384.solverstate
I0428 21:47:01.589957 12706 solver.cpp:330] Iteration 9384, Testing net (#0)
I0428 21:47:01.589977 12706 net.cpp:676] Ignoring source layer train-data
I0428 21:47:02.295737 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:47:06.017108 12706 solver.cpp:397] Test net output #0: accuracy = 0.482843
I0428 21:47:06.017138 12706 solver.cpp:397] Test net output #1: loss = 2.92875 (* 1 = 2.92875 loss)
I0428 21:47:06.108383 12706 solver.cpp:218] Iteration 9384 (0.835638 iter/s, 14.3603s/12 iters), loss = 0.0803457
I0428 21:47:06.108422 12706 solver.cpp:237] Train net output #0: loss = 0.0803456 (* 1 = 0.0803456 loss)
I0428 21:47:06.108429 12706 sgd_solver.cpp:105] Iteration 9384, lr = 0.001089
I0428 21:47:10.437887 12706 solver.cpp:218] Iteration 9396 (2.77182 iter/s, 4.32929s/12 iters), loss = 0.0384287
I0428 21:47:10.437932 12706 solver.cpp:237] Train net output #0: loss = 0.0384286 (* 1 = 0.0384286 loss)
I0428 21:47:10.437942 12706 sgd_solver.cpp:105] Iteration 9396, lr = 0.001089
I0428 21:47:14.738718 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:47:15.389184 12706 solver.cpp:218] Iteration 9408 (2.42373 iter/s, 4.95105s/12 iters), loss = 0.0458629
I0428 21:47:15.389236 12706 solver.cpp:237] Train net output #0: loss = 0.0458628 (* 1 = 0.0458628 loss)
I0428 21:47:15.389248 12706 sgd_solver.cpp:105] Iteration 9408, lr = 0.001089
I0428 21:47:20.367246 12706 solver.cpp:218] Iteration 9420 (2.4107 iter/s, 4.97781s/12 iters), loss = 0.0384818
I0428 21:47:20.367287 12706 solver.cpp:237] Train net output #0: loss = 0.0384817 (* 1 = 0.0384817 loss)
I0428 21:47:20.367295 12706 sgd_solver.cpp:105] Iteration 9420, lr = 0.001089
I0428 21:47:25.357236 12706 solver.cpp:218] Iteration 9432 (2.40493 iter/s, 4.98975s/12 iters), loss = 0.0302065
I0428 21:47:25.357272 12706 solver.cpp:237] Train net output #0: loss = 0.0302064 (* 1 = 0.0302064 loss)
I0428 21:47:25.357280 12706 sgd_solver.cpp:105] Iteration 9432, lr = 0.001089
I0428 21:47:30.354285 12706 solver.cpp:218] Iteration 9444 (2.40153 iter/s, 4.99681s/12 iters), loss = 0.0685632
I0428 21:47:30.354391 12706 solver.cpp:237] Train net output #0: loss = 0.0685631 (* 1 = 0.0685631 loss)
I0428 21:47:30.354401 12706 sgd_solver.cpp:105] Iteration 9444, lr = 0.001089
I0428 21:47:35.342316 12706 solver.cpp:218] Iteration 9456 (2.40591 iter/s, 4.98773s/12 iters), loss = 0.0835415
I0428 21:47:35.342353 12706 solver.cpp:237] Train net output #0: loss = 0.0835414 (* 1 = 0.0835414 loss)
I0428 21:47:35.342361 12706 sgd_solver.cpp:105] Iteration 9456, lr = 0.001089
I0428 21:47:40.352705 12706 solver.cpp:218] Iteration 9468 (2.39514 iter/s, 5.01014s/12 iters), loss = 0.0702096
I0428 21:47:40.352747 12706 solver.cpp:237] Train net output #0: loss = 0.0702095 (* 1 = 0.0702095 loss)
I0428 21:47:40.352756 12706 sgd_solver.cpp:105] Iteration 9468, lr = 0.001089
I0428 21:47:45.387475 12706 solver.cpp:218] Iteration 9480 (2.38354 iter/s, 5.03452s/12 iters), loss = 0.0588635
I0428 21:47:45.387518 12706 solver.cpp:237] Train net output #0: loss = 0.0588634 (* 1 = 0.0588634 loss)
I0428 21:47:45.387526 12706 sgd_solver.cpp:105] Iteration 9480, lr = 0.001089
I0428 21:47:47.422688 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_9486.caffemodel
I0428 21:47:50.386886 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_9486.solverstate
I0428 21:47:53.169914 12706 solver.cpp:330] Iteration 9486, Testing net (#0)
I0428 21:47:53.169935 12706 net.cpp:676] Ignoring source layer train-data
I0428 21:47:53.846169 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:47:57.609078 12706 solver.cpp:397] Test net output #0: accuracy = 0.492034
I0428 21:47:57.609107 12706 solver.cpp:397] Test net output #1: loss = 2.91531 (* 1 = 2.91531 loss)
I0428 21:47:59.437903 12706 solver.cpp:218] Iteration 9492 (0.854102 iter/s, 14.0498s/12 iters), loss = 0.0297134
I0428 21:47:59.437945 12706 solver.cpp:237] Train net output #0: loss = 0.0297133 (* 1 = 0.0297133 loss)
I0428 21:47:59.437953 12706 sgd_solver.cpp:105] Iteration 9492, lr = 0.001089
I0428 21:48:04.430806 12706 solver.cpp:218] Iteration 9504 (2.40353 iter/s, 4.99266s/12 iters), loss = 0.0524265
I0428 21:48:04.430934 12706 solver.cpp:237] Train net output #0: loss = 0.0524264 (* 1 = 0.0524264 loss)
I0428 21:48:04.430943 12706 sgd_solver.cpp:105] Iteration 9504, lr = 0.001089
I0428 21:48:05.897439 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:48:09.415364 12706 solver.cpp:218] Iteration 9516 (2.40759 iter/s, 4.98423s/12 iters), loss = 0.0689336
I0428 21:48:09.415402 12706 solver.cpp:237] Train net output #0: loss = 0.0689335 (* 1 = 0.0689335 loss)
I0428 21:48:09.415410 12706 sgd_solver.cpp:105] Iteration 9516, lr = 0.001089
I0428 21:48:14.395716 12706 solver.cpp:218] Iteration 9528 (2.40958 iter/s, 4.98011s/12 iters), loss = 0.050126
I0428 21:48:14.395759 12706 solver.cpp:237] Train net output #0: loss = 0.0501259 (* 1 = 0.0501259 loss)
I0428 21:48:14.395767 12706 sgd_solver.cpp:105] Iteration 9528, lr = 0.001089
I0428 21:48:19.425254 12706 solver.cpp:218] Iteration 9540 (2.38602 iter/s, 5.02929s/12 iters), loss = 0.02097
I0428 21:48:19.425295 12706 solver.cpp:237] Train net output #0: loss = 0.0209699 (* 1 = 0.0209699 loss)
I0428 21:48:19.425303 12706 sgd_solver.cpp:105] Iteration 9540, lr = 0.001089
I0428 21:48:24.603173 12706 solver.cpp:218] Iteration 9552 (2.31765 iter/s, 5.17767s/12 iters), loss = 0.0290458
I0428 21:48:24.603211 12706 solver.cpp:237] Train net output #0: loss = 0.0290457 (* 1 = 0.0290457 loss)
I0428 21:48:24.603219 12706 sgd_solver.cpp:105] Iteration 9552, lr = 0.001089
I0428 21:48:29.785813 12706 solver.cpp:218] Iteration 9564 (2.31553 iter/s, 5.18239s/12 iters), loss = 0.0102034
I0428 21:48:29.785866 12706 solver.cpp:237] Train net output #0: loss = 0.0102033 (* 1 = 0.0102033 loss)
I0428 21:48:29.785877 12706 sgd_solver.cpp:105] Iteration 9564, lr = 0.001089
I0428 21:48:34.912555 12706 solver.cpp:218] Iteration 9576 (2.34079 iter/s, 5.12648s/12 iters), loss = 0.0230891
I0428 21:48:34.912693 12706 solver.cpp:237] Train net output #0: loss = 0.023089 (* 1 = 0.023089 loss)
I0428 21:48:34.912703 12706 sgd_solver.cpp:105] Iteration 9576, lr = 0.001089
I0428 21:48:39.427470 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_9588.caffemodel
I0428 21:48:42.457842 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_9588.solverstate
I0428 21:48:44.773703 12706 solver.cpp:330] Iteration 9588, Testing net (#0)
I0428 21:48:44.773730 12706 net.cpp:676] Ignoring source layer train-data
I0428 21:48:45.437321 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:48:49.351016 12706 solver.cpp:397] Test net output #0: accuracy = 0.481005
I0428 21:48:49.351042 12706 solver.cpp:397] Test net output #1: loss = 2.91021 (* 1 = 2.91021 loss)
I0428 21:48:49.440973 12706 solver.cpp:218] Iteration 9588 (0.826007 iter/s, 14.5277s/12 iters), loss = 0.0581321
I0428 21:48:49.441012 12706 solver.cpp:237] Train net output #0: loss = 0.0581321 (* 1 = 0.0581321 loss)
I0428 21:48:49.441020 12706 sgd_solver.cpp:105] Iteration 9588, lr = 0.001089
I0428 21:48:53.580667 12706 solver.cpp:218] Iteration 9600 (2.89892 iter/s, 4.13948s/12 iters), loss = 0.0624055
I0428 21:48:53.580725 12706 solver.cpp:237] Train net output #0: loss = 0.0624054 (* 1 = 0.0624054 loss)
I0428 21:48:53.580737 12706 sgd_solver.cpp:105] Iteration 9600, lr = 0.001089
I0428 21:48:57.200829 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:48:58.576787 12706 solver.cpp:218] Iteration 9612 (2.40199 iter/s, 4.99586s/12 iters), loss = 0.0632656
I0428 21:48:58.576829 12706 solver.cpp:237] Train net output #0: loss = 0.0632655 (* 1 = 0.0632655 loss)
I0428 21:48:58.576838 12706 sgd_solver.cpp:105] Iteration 9612, lr = 0.001089
I0428 21:49:03.786444 12706 solver.cpp:218] Iteration 9624 (2.30353 iter/s, 5.20941s/12 iters), loss = 0.0320644
I0428 21:49:03.786482 12706 solver.cpp:237] Train net output #0: loss = 0.0320643 (* 1 = 0.0320643 loss)
I0428 21:49:03.786491 12706 sgd_solver.cpp:105] Iteration 9624, lr = 0.001089
I0428 21:49:09.095270 12706 solver.cpp:218] Iteration 9636 (2.2605 iter/s, 5.30857s/12 iters), loss = 0.0524799
I0428 21:49:09.095398 12706 solver.cpp:237] Train net output #0: loss = 0.0524798 (* 1 = 0.0524798 loss)
I0428 21:49:09.095408 12706 sgd_solver.cpp:105] Iteration 9636, lr = 0.001089
I0428 21:49:14.168061 12706 solver.cpp:218] Iteration 9648 (2.36572 iter/s, 5.07246s/12 iters), loss = 0.0610613
I0428 21:49:14.168099 12706 solver.cpp:237] Train net output #0: loss = 0.0610612 (* 1 = 0.0610612 loss)
I0428 21:49:14.168107 12706 sgd_solver.cpp:105] Iteration 9648, lr = 0.001089
I0428 21:49:19.145118 12706 solver.cpp:218] Iteration 9660 (2.41118 iter/s, 4.97681s/12 iters), loss = 0.0656969
I0428 21:49:19.145160 12706 solver.cpp:237] Train net output #0: loss = 0.0656968 (* 1 = 0.0656968 loss)
I0428 21:49:19.145169 12706 sgd_solver.cpp:105] Iteration 9660, lr = 0.001089
I0428 21:49:24.126785 12706 solver.cpp:218] Iteration 9672 (2.40895 iter/s, 4.98142s/12 iters), loss = 0.0312568
I0428 21:49:24.126837 12706 solver.cpp:237] Train net output #0: loss = 0.0312567 (* 1 = 0.0312567 loss)
I0428 21:49:24.126849 12706 sgd_solver.cpp:105] Iteration 9672, lr = 0.001089
I0428 21:49:29.086292 12706 solver.cpp:218] Iteration 9684 (2.41972 iter/s, 4.95926s/12 iters), loss = 0.0471117
I0428 21:49:29.086330 12706 solver.cpp:237] Train net output #0: loss = 0.0471116 (* 1 = 0.0471116 loss)
I0428 21:49:29.086338 12706 sgd_solver.cpp:105] Iteration 9684, lr = 0.001089
I0428 21:49:31.106071 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_9690.caffemodel
I0428 21:49:34.122129 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_9690.solverstate
I0428 21:49:36.458976 12706 solver.cpp:330] Iteration 9690, Testing net (#0)
I0428 21:49:36.459002 12706 net.cpp:676] Ignoring source layer train-data
I0428 21:49:37.163360 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:49:40.176146 12706 blocking_queue.cpp:49] Waiting for data
I0428 21:49:41.176254 12706 solver.cpp:397] Test net output #0: accuracy = 0.495098
I0428 21:49:41.176290 12706 solver.cpp:397] Test net output #1: loss = 2.92746 (* 1 = 2.92746 loss)
I0428 21:49:43.010423 12706 solver.cpp:218] Iteration 9696 (0.86185 iter/s, 13.9235s/12 iters), loss = 0.0998214
I0428 21:49:43.010474 12706 solver.cpp:237] Train net output #0: loss = 0.0998213 (* 1 = 0.0998213 loss)
I0428 21:49:43.010486 12706 sgd_solver.cpp:105] Iteration 9696, lr = 0.001089
I0428 21:49:47.996263 12706 solver.cpp:218] Iteration 9708 (2.40694 iter/s, 4.98558s/12 iters), loss = 0.0508432
I0428 21:49:47.996315 12706 solver.cpp:237] Train net output #0: loss = 0.0508431 (* 1 = 0.0508431 loss)
I0428 21:49:47.996323 12706 sgd_solver.cpp:105] Iteration 9708, lr = 0.001089
I0428 21:49:48.747584 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:49:53.012087 12706 solver.cpp:218] Iteration 9720 (2.39255 iter/s, 5.01557s/12 iters), loss = 0.0383375
I0428 21:49:53.012126 12706 solver.cpp:237] Train net output #0: loss = 0.0383374 (* 1 = 0.0383374 loss)
I0428 21:49:53.012135 12706 sgd_solver.cpp:105] Iteration 9720, lr = 0.001089
I0428 21:49:57.929997 12706 solver.cpp:218] Iteration 9732 (2.44018 iter/s, 4.91767s/12 iters), loss = 0.087425
I0428 21:49:57.930040 12706 solver.cpp:237] Train net output #0: loss = 0.0874249 (* 1 = 0.0874249 loss)
I0428 21:49:57.930047 12706 sgd_solver.cpp:105] Iteration 9732, lr = 0.001089
I0428 21:50:02.901943 12706 solver.cpp:218] Iteration 9744 (2.41366 iter/s, 4.9717s/12 iters), loss = 0.109718
I0428 21:50:02.901983 12706 solver.cpp:237] Train net output #0: loss = 0.109718 (* 1 = 0.109718 loss)
I0428 21:50:02.901990 12706 sgd_solver.cpp:105] Iteration 9744, lr = 0.001089
I0428 21:50:07.934415 12706 solver.cpp:218] Iteration 9756 (2.38463 iter/s, 5.03222s/12 iters), loss = 0.139124
I0428 21:50:07.934465 12706 solver.cpp:237] Train net output #0: loss = 0.139123 (* 1 = 0.139123 loss)
I0428 21:50:07.934478 12706 sgd_solver.cpp:105] Iteration 9756, lr = 0.001089
I0428 21:50:13.069272 12706 solver.cpp:218] Iteration 9768 (2.33709 iter/s, 5.1346s/12 iters), loss = 0.0479156
I0428 21:50:13.069404 12706 solver.cpp:237] Train net output #0: loss = 0.0479155 (* 1 = 0.0479155 loss)
I0428 21:50:13.069413 12706 sgd_solver.cpp:105] Iteration 9768, lr = 0.001089
I0428 21:50:18.296741 12706 solver.cpp:218] Iteration 9780 (2.29572 iter/s, 5.22713s/12 iters), loss = 0.0368152
I0428 21:50:18.296782 12706 solver.cpp:237] Train net output #0: loss = 0.0368151 (* 1 = 0.0368151 loss)
I0428 21:50:18.296789 12706 sgd_solver.cpp:105] Iteration 9780, lr = 0.001089
I0428 21:50:22.897003 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_9792.caffemodel
I0428 21:50:25.896963 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_9792.solverstate
I0428 21:50:28.205152 12706 solver.cpp:330] Iteration 9792, Testing net (#0)
I0428 21:50:28.205173 12706 net.cpp:676] Ignoring source layer train-data
I0428 21:50:28.716235 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:50:32.636200 12706 solver.cpp:397] Test net output #0: accuracy = 0.482843
I0428 21:50:32.636229 12706 solver.cpp:397] Test net output #1: loss = 2.98763 (* 1 = 2.98763 loss)
I0428 21:50:32.725363 12706 solver.cpp:218] Iteration 9792 (0.831715 iter/s, 14.428s/12 iters), loss = 0.0248426
I0428 21:50:32.725399 12706 solver.cpp:237] Train net output #0: loss = 0.0248425 (* 1 = 0.0248425 loss)
I0428 21:50:32.725407 12706 sgd_solver.cpp:105] Iteration 9792, lr = 0.001089
I0428 21:50:36.885408 12706 solver.cpp:218] Iteration 9804 (2.88473 iter/s, 4.15983s/12 iters), loss = 0.0318532
I0428 21:50:36.885452 12706 solver.cpp:237] Train net output #0: loss = 0.0318531 (* 1 = 0.0318531 loss)
I0428 21:50:36.885462 12706 sgd_solver.cpp:105] Iteration 9804, lr = 0.001089
I0428 21:50:40.025935 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:50:42.149384 12706 solver.cpp:218] Iteration 9816 (2.27976 iter/s, 5.26372s/12 iters), loss = 0.0145586
I0428 21:50:42.149422 12706 solver.cpp:237] Train net output #0: loss = 0.0145585 (* 1 = 0.0145585 loss)
I0428 21:50:42.149430 12706 sgd_solver.cpp:105] Iteration 9816, lr = 0.001089
I0428 21:50:47.288328 12706 solver.cpp:218] Iteration 9828 (2.33522 iter/s, 5.13869s/12 iters), loss = 0.0916999
I0428 21:50:47.288431 12706 solver.cpp:237] Train net output #0: loss = 0.0916998 (* 1 = 0.0916998 loss)
I0428 21:50:47.288441 12706 sgd_solver.cpp:105] Iteration 9828, lr = 0.001089
I0428 21:50:52.221863 12706 solver.cpp:218] Iteration 9840 (2.43248 iter/s, 4.93323s/12 iters), loss = 0.0902294
I0428 21:50:52.221904 12706 solver.cpp:237] Train net output #0: loss = 0.0902293 (* 1 = 0.0902293 loss)
I0428 21:50:52.221912 12706 sgd_solver.cpp:105] Iteration 9840, lr = 0.001089
I0428 21:50:57.211051 12706 solver.cpp:218] Iteration 9852 (2.40532 iter/s, 4.98894s/12 iters), loss = 0.0165531
I0428 21:50:57.211103 12706 solver.cpp:237] Train net output #0: loss = 0.016553 (* 1 = 0.016553 loss)
I0428 21:50:57.211113 12706 sgd_solver.cpp:105] Iteration 9852, lr = 0.001089
I0428 21:51:02.215992 12706 solver.cpp:218] Iteration 9864 (2.39775 iter/s, 5.00468s/12 iters), loss = 0.0456974
I0428 21:51:02.216032 12706 solver.cpp:237] Train net output #0: loss = 0.0456973 (* 1 = 0.0456973 loss)
I0428 21:51:02.216039 12706 sgd_solver.cpp:105] Iteration 9864, lr = 0.001089
I0428 21:51:07.181787 12706 solver.cpp:218] Iteration 9876 (2.41665 iter/s, 4.96555s/12 iters), loss = 0.0518208
I0428 21:51:07.181825 12706 solver.cpp:237] Train net output #0: loss = 0.0518207 (* 1 = 0.0518207 loss)
I0428 21:51:07.181833 12706 sgd_solver.cpp:105] Iteration 9876, lr = 0.001089
I0428 21:51:12.184078 12706 solver.cpp:218] Iteration 9888 (2.39902 iter/s, 5.00204s/12 iters), loss = 0.0563308
I0428 21:51:12.184130 12706 solver.cpp:237] Train net output #0: loss = 0.0563307 (* 1 = 0.0563307 loss)
I0428 21:51:12.184142 12706 sgd_solver.cpp:105] Iteration 9888, lr = 0.001089
I0428 21:51:14.233505 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_9894.caffemodel
I0428 21:51:17.314540 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_9894.solverstate
I0428 21:51:19.620817 12706 solver.cpp:330] Iteration 9894, Testing net (#0)
I0428 21:51:19.620836 12706 net.cpp:676] Ignoring source layer train-data
I0428 21:51:20.114207 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:51:24.005076 12706 solver.cpp:397] Test net output #0: accuracy = 0.490196
I0428 21:51:24.005113 12706 solver.cpp:397] Test net output #1: loss = 2.96899 (* 1 = 2.96899 loss)
I0428 21:51:25.839130 12706 solver.cpp:218] Iteration 9900 (0.878834 iter/s, 13.6545s/12 iters), loss = 0.0635881
I0428 21:51:25.839169 12706 solver.cpp:237] Train net output #0: loss = 0.063588 (* 1 = 0.063588 loss)
I0428 21:51:25.839177 12706 sgd_solver.cpp:105] Iteration 9900, lr = 0.001089
I0428 21:51:30.827603 12706 solver.cpp:218] Iteration 9912 (2.40566 iter/s, 4.98823s/12 iters), loss = 0.0406142
I0428 21:51:30.827646 12706 solver.cpp:237] Train net output #0: loss = 0.0406141 (* 1 = 0.0406141 loss)
I0428 21:51:30.827652 12706 sgd_solver.cpp:105] Iteration 9912, lr = 0.001089
I0428 21:51:30.928310 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:51:35.964102 12706 solver.cpp:218] Iteration 9924 (2.33634 iter/s, 5.13625s/12 iters), loss = 0.0634765
I0428 21:51:35.964139 12706 solver.cpp:237] Train net output #0: loss = 0.0634764 (* 1 = 0.0634764 loss)
I0428 21:51:35.964146 12706 sgd_solver.cpp:105] Iteration 9924, lr = 0.001089
I0428 21:51:41.099325 12706 solver.cpp:218] Iteration 9936 (2.33692 iter/s, 5.13497s/12 iters), loss = 0.0821398
I0428 21:51:41.099364 12706 solver.cpp:237] Train net output #0: loss = 0.0821397 (* 1 = 0.0821397 loss)
I0428 21:51:41.099370 12706 sgd_solver.cpp:105] Iteration 9936, lr = 0.001089
I0428 21:51:46.058533 12706 solver.cpp:218] Iteration 9948 (2.41986 iter/s, 4.95896s/12 iters), loss = 0.0425372
I0428 21:51:46.058589 12706 solver.cpp:237] Train net output #0: loss = 0.042537 (* 1 = 0.042537 loss)
I0428 21:51:46.058601 12706 sgd_solver.cpp:105] Iteration 9948, lr = 0.001089
I0428 21:51:51.336932 12706 solver.cpp:218] Iteration 9960 (2.27353 iter/s, 5.27813s/12 iters), loss = 0.032539
I0428 21:51:51.337077 12706 solver.cpp:237] Train net output #0: loss = 0.0325389 (* 1 = 0.0325389 loss)
I0428 21:51:51.337091 12706 sgd_solver.cpp:105] Iteration 9960, lr = 0.001089
I0428 21:51:56.305279 12706 solver.cpp:218] Iteration 9972 (2.41546 iter/s, 4.968s/12 iters), loss = 0.0230884
I0428 21:51:56.305320 12706 solver.cpp:237] Train net output #0: loss = 0.0230883 (* 1 = 0.0230883 loss)
I0428 21:51:56.305327 12706 sgd_solver.cpp:105] Iteration 9972, lr = 0.001089
I0428 21:52:01.311707 12706 solver.cpp:218] Iteration 9984 (2.39704 iter/s, 5.00618s/12 iters), loss = 0.0607333
I0428 21:52:01.311749 12706 solver.cpp:237] Train net output #0: loss = 0.0607332 (* 1 = 0.0607332 loss)
I0428 21:52:01.311758 12706 sgd_solver.cpp:105] Iteration 9984, lr = 0.001089
I0428 21:52:06.067561 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_9996.caffemodel
I0428 21:52:11.695420 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_9996.solverstate
I0428 21:52:14.854516 12706 solver.cpp:330] Iteration 9996, Testing net (#0)
I0428 21:52:14.854534 12706 net.cpp:676] Ignoring source layer train-data
I0428 21:52:15.350662 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:52:19.341770 12706 solver.cpp:397] Test net output #0: accuracy = 0.496324
I0428 21:52:19.341804 12706 solver.cpp:397] Test net output #1: loss = 2.9877 (* 1 = 2.9877 loss)
I0428 21:52:19.430188 12706 solver.cpp:218] Iteration 9996 (0.662335 iter/s, 18.1177s/12 iters), loss = 0.0511511
I0428 21:52:19.430229 12706 solver.cpp:237] Train net output #0: loss = 0.051151 (* 1 = 0.051151 loss)
I0428 21:52:19.430238 12706 sgd_solver.cpp:105] Iteration 9996, lr = 0.001089
I0428 21:52:23.555281 12706 solver.cpp:218] Iteration 10008 (2.90918 iter/s, 4.12488s/12 iters), loss = 0.0785383
I0428 21:52:23.555450 12706 solver.cpp:237] Train net output #0: loss = 0.0785382 (* 1 = 0.0785382 loss)
I0428 21:52:23.555460 12706 sgd_solver.cpp:105] Iteration 10008, lr = 0.001089
I0428 21:52:25.853806 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:52:28.673980 12706 solver.cpp:218] Iteration 10020 (2.34452 iter/s, 5.11831s/12 iters), loss = 0.0461769
I0428 21:52:28.674041 12706 solver.cpp:237] Train net output #0: loss = 0.0461768 (* 1 = 0.0461768 loss)
I0428 21:52:28.674054 12706 sgd_solver.cpp:105] Iteration 10020, lr = 0.001089
I0428 21:52:33.761346 12706 solver.cpp:218] Iteration 10032 (2.35891 iter/s, 5.0871s/12 iters), loss = 0.149061
I0428 21:52:33.761389 12706 solver.cpp:237] Train net output #0: loss = 0.149061 (* 1 = 0.149061 loss)
I0428 21:52:33.761396 12706 sgd_solver.cpp:105] Iteration 10032, lr = 0.001089
I0428 21:52:38.782472 12706 solver.cpp:218] Iteration 10044 (2.39002 iter/s, 5.02087s/12 iters), loss = 0.0145358
I0428 21:52:38.782514 12706 solver.cpp:237] Train net output #0: loss = 0.0145357 (* 1 = 0.0145357 loss)
I0428 21:52:38.782522 12706 sgd_solver.cpp:105] Iteration 10044, lr = 0.001089
I0428 21:52:43.839375 12706 solver.cpp:218] Iteration 10056 (2.37311 iter/s, 5.05665s/12 iters), loss = 0.0536143
I0428 21:52:43.839416 12706 solver.cpp:237] Train net output #0: loss = 0.0536142 (* 1 = 0.0536142 loss)
I0428 21:52:43.839424 12706 sgd_solver.cpp:105] Iteration 10056, lr = 0.001089
I0428 21:52:49.144996 12706 solver.cpp:218] Iteration 10068 (2.26186 iter/s, 5.30536s/12 iters), loss = 0.168502
I0428 21:52:49.145037 12706 solver.cpp:237] Train net output #0: loss = 0.168502 (* 1 = 0.168502 loss)
I0428 21:52:49.145045 12706 sgd_solver.cpp:105] Iteration 10068, lr = 0.001089
I0428 21:52:54.112646 12706 solver.cpp:218] Iteration 10080 (2.41575 iter/s, 4.9674s/12 iters), loss = 0.0555607
I0428 21:52:54.112771 12706 solver.cpp:237] Train net output #0: loss = 0.0555606 (* 1 = 0.0555606 loss)
I0428 21:52:54.112783 12706 sgd_solver.cpp:105] Iteration 10080, lr = 0.001089
I0428 21:52:58.962725 12706 solver.cpp:218] Iteration 10092 (2.47435 iter/s, 4.84975s/12 iters), loss = 0.0208502
I0428 21:52:58.962779 12706 solver.cpp:237] Train net output #0: loss = 0.0208501 (* 1 = 0.0208501 loss)
I0428 21:52:58.962790 12706 sgd_solver.cpp:105] Iteration 10092, lr = 0.001089
I0428 21:53:01.009920 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_10098.caffemodel
I0428 21:53:04.190343 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_10098.solverstate
I0428 21:53:07.049883 12706 solver.cpp:330] Iteration 10098, Testing net (#0)
I0428 21:53:07.049904 12706 net.cpp:676] Ignoring source layer train-data
I0428 21:53:07.450299 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:53:11.444013 12706 solver.cpp:397] Test net output #0: accuracy = 0.493873
I0428 21:53:11.444049 12706 solver.cpp:397] Test net output #1: loss = 2.9998 (* 1 = 2.9998 loss)
I0428 21:53:13.290215 12706 solver.cpp:218] Iteration 10104 (0.837587 iter/s, 14.3269s/12 iters), loss = 0.0402978
I0428 21:53:13.290268 12706 solver.cpp:237] Train net output #0: loss = 0.0402977 (* 1 = 0.0402977 loss)
I0428 21:53:13.290280 12706 sgd_solver.cpp:105] Iteration 10104, lr = 0.00035937
I0428 21:53:17.699613 12726 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:53:18.319775 12706 solver.cpp:218] Iteration 10116 (2.38602 iter/s, 5.0293s/12 iters), loss = 0.0312306
I0428 21:53:18.319823 12706 solver.cpp:237] Train net output #0: loss = 0.0312305 (* 1 = 0.0312305 loss)
I0428 21:53:18.319833 12706 sgd_solver.cpp:105] Iteration 10116, lr = 0.00035937
I0428 21:53:23.419812 12706 solver.cpp:218] Iteration 10128 (2.35305 iter/s, 5.09977s/12 iters), loss = 0.0695461
I0428 21:53:23.419865 12706 solver.cpp:237] Train net output #0: loss = 0.069546 (* 1 = 0.069546 loss)
I0428 21:53:23.419878 12706 sgd_solver.cpp:105] Iteration 10128, lr = 0.00035937
I0428 21:53:28.723917 12706 solver.cpp:218] Iteration 10140 (2.26251 iter/s, 5.30383s/12 iters), loss = 0.091385
I0428 21:53:28.724045 12706 solver.cpp:237] Train net output #0: loss = 0.0913848 (* 1 = 0.0913848 loss)
I0428 21:53:28.724056 12706 sgd_solver.cpp:105] Iteration 10140, lr = 0.00035937
I0428 21:53:33.905113 12706 solver.cpp:218] Iteration 10152 (2.31622 iter/s, 5.18085s/12 iters), loss = 0.0550383
I0428 21:53:33.905159 12706 solver.cpp:237] Train net output #0: loss = 0.0550382 (* 1 = 0.0550382 loss)
I0428 21:53:33.905167 12706 sgd_solver.cpp:105] Iteration 10152, lr = 0.00035937
I0428 21:53:38.877424 12706 solver.cpp:218] Iteration 10164 (2.41349 iter/s, 4.97206s/12 iters), loss = 0.0885418
I0428 21:53:38.877465 12706 solver.cpp:237] Train net output #0: loss = 0.0885416 (* 1 = 0.0885416 loss)
I0428 21:53:38.877475 12706 sgd_solver.cpp:105] Iteration 10164, lr = 0.00035937
I0428 21:53:44.008286 12706 solver.cpp:218] Iteration 10176 (2.3389 iter/s, 5.13061s/12 iters), loss = 0.0196274
I0428 21:53:44.008337 12706 solver.cpp:237] Train net output #0: loss = 0.0196272 (* 1 = 0.0196272 loss)
I0428 21:53:44.008347 12706 sgd_solver.cpp:105] Iteration 10176, lr = 0.00035937
I0428 21:53:49.021462 12706 solver.cpp:218] Iteration 10188 (2.39382 iter/s, 5.01292s/12 iters), loss = 0.0768356
I0428 21:53:49.021517 12706 solver.cpp:237] Train net output #0: loss = 0.0768354 (* 1 = 0.0768354 loss)
I0428 21:53:49.021528 12706 sgd_solver.cpp:105] Iteration 10188, lr = 0.00035937
I0428 21:53:53.567714 12706 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_10200.caffemodel
I0428 21:53:57.495208 12706 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_10200.solverstate
I0428 21:54:00.321566 12706 solver.cpp:310] Iteration 10200, loss = 0.0376387
I0428 21:54:00.321669 12706 solver.cpp:330] Iteration 10200, Testing net (#0)
I0428 21:54:00.321678 12706 net.cpp:676] Ignoring source layer train-data
I0428 21:54:00.724592 12748 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:54:04.904820 12706 solver.cpp:397] Test net output #0: accuracy = 0.492034
I0428 21:54:04.904858 12706 solver.cpp:397] Test net output #1: loss = 3.01253 (* 1 = 3.01253 loss)
I0428 21:54:04.904866 12706 solver.cpp:315] Optimization Done.
I0428 21:54:04.904871 12706 caffe.cpp:259] Optimization Done.