DIGITS-CNN/cars/lr-investigations/sigmoid/1e-2/50_0.025/caffe_output.log

4568 lines
360 KiB
Plaintext
Raw Normal View History

2021-04-29 00:53:46 +01:00
I0428 21:19:30.683805 25678 upgrade_proto.cpp:1082] Attempting to upgrade input file specified using deprecated 'solver_type' field (enum)': /mnt/bigdisk/DIGITS-AMB-2/digits/jobs/20210428-201811-d8e6/solver.prototxt
I0428 21:19:30.683943 25678 upgrade_proto.cpp:1089] Successfully upgraded file specified using deprecated 'solver_type' field (enum) to 'type' field (string).
W0428 21:19:30.683948 25678 upgrade_proto.cpp:1091] Note that future Caffe releases will only support 'type' field (string) for a solver's type.
I0428 21:19:30.684008 25678 caffe.cpp:218] Using GPUs 1
I0428 21:19:30.723389 25678 caffe.cpp:223] GPU 1: GeForce RTX 2080
I0428 21:19:31.065616 25678 solver.cpp:44] Initializing solver from parameters:
test_iter: 51
test_interval: 102
base_lr: 0.01
display: 12
max_iter: 10200
lr_policy: "sigmoid"
gamma: -0.00024509805
momentum: 0.9
weight_decay: 0.0001
stepsize: 5100
snapshot: 102
snapshot_prefix: "snapshot"
solver_mode: GPU
device_id: 1
net: "train_val.prototxt"
train_state {
level: 0
stage: ""
}
type: "SGD"
I0428 21:19:31.066479 25678 solver.cpp:87] Creating training net from net file: train_val.prototxt
I0428 21:19:31.067139 25678 net.cpp:294] The NetState phase (0) differed from the phase (1) specified by a rule in layer val-data
I0428 21:19:31.067154 25678 net.cpp:294] The NetState phase (0) differed from the phase (1) specified by a rule in layer accuracy
I0428 21:19:31.067292 25678 net.cpp:51] Initializing net from parameters:
state {
phase: TRAIN
level: 0
stage: ""
}
layer {
name: "train-data"
type: "Data"
top: "data"
top: "label"
include {
phase: TRAIN
}
transform_param {
mirror: true
crop_size: 227
mean_file: "/mnt/bigdisk/DIGITS-AMB-2/digits/jobs/20210419-113214-d311/mean.binaryproto"
}
data_param {
source: "/mnt/bigdisk/DIGITS-AMB-2/digits/jobs/20210419-113214-d311/train_db"
batch_size: 128
backend: LMDB
}
}
layer {
name: "conv1"
type: "Convolution"
bottom: "data"
top: "conv1"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 96
kernel_size: 11
stride: 4
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "relu1"
type: "ReLU"
bottom: "conv1"
top: "conv1"
}
layer {
name: "norm1"
type: "LRN"
bottom: "conv1"
top: "norm1"
lrn_param {
local_size: 5
alpha: 0.0001
beta: 0.75
}
}
layer {
name: "pool1"
type: "Pooling"
bottom: "norm1"
top: "pool1"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layer {
name: "conv2"
type: "Convolution"
bottom: "pool1"
top: "conv2"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 256
pad: 2
kernel_size: 5
group: 2
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layer {
name: "relu2"
type: "ReLU"
bottom: "conv2"
top: "conv2"
}
layer {
name: "norm2"
type: "LRN"
bottom: "conv2"
top: "norm2"
lrn_param {
local_size: 5
alpha: 0.0001
beta: 0.75
}
}
layer {
name: "pool2"
type: "Pooling"
bottom: "norm2"
top: "pool2"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layer {
name: "conv3"
type: "Convolution"
bottom: "pool2"
top: "conv3"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 384
pad: 1
kernel_size: 3
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "relu3"
type: "ReLU"
bottom: "conv3"
top: "conv3"
}
layer {
name: "conv4"
type: "Convolution"
bottom: "conv3"
top: "conv4"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 384
pad: 1
kernel_size: 3
group: 2
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layer {
name: "relu4"
type: "ReLU"
bottom: "conv4"
top: "conv4"
}
layer {
name: "conv5"
type: "Convolution"
bottom: "conv4"
top: "conv5"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 256
pad: 1
kernel_size: 3
group: 2
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layer {
name: "relu5"
type: "ReLU"
bottom: "conv5"
top: "conv5"
}
layer {
name: "pool5"
type: "Pooling"
bottom: "conv5"
top: "pool5"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layer {
name: "fc6"
type: "InnerProduct"
bottom: "pool5"
top: "fc6"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 4096
weight_filler {
type: "gaussian"
std: 0.005
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layer {
name: "relu6"
type: "ReLU"
bottom: "fc6"
top: "fc6"
}
layer {
name: "drop6"
type: "Dropout"
bottom: "fc6"
top: "fc6"
dropout_param {
dropout_ratio: 0.5
}
}
layer {
name: "fc7"
type: "InnerProduct"
bottom: "fc6"
top: "fc7"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 4096
weight_filler {
type: "gaussian"
std: 0.005
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layer {
name: "relu7"
type: "ReLU"
bottom: "fc7"
top: "fc7"
}
layer {
name: "drop7"
type: "Dropout"
bottom: "fc7"
top: "fc7"
dropout_param {
dropout_ratio: 0.5
}
}
layer {
name: "fc8"
type: "InnerProduct"
bottom: "fc7"
top: "fc8"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 196
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "fc8"
bottom: "label"
top: "loss"
}
I0428 21:19:31.067379 25678 layer_factory.hpp:77] Creating layer train-data
I0428 21:19:31.101217 25678 db_lmdb.cpp:35] Opened lmdb /mnt/bigdisk/DIGITS-AMB-2/digits/jobs/20210419-113214-d311/train_db
I0428 21:19:31.126984 25678 net.cpp:84] Creating Layer train-data
I0428 21:19:31.127030 25678 net.cpp:380] train-data -> data
I0428 21:19:31.127068 25678 net.cpp:380] train-data -> label
I0428 21:19:31.127094 25678 data_transformer.cpp:25] Loading mean file from: /mnt/bigdisk/DIGITS-AMB-2/digits/jobs/20210419-113214-d311/mean.binaryproto
I0428 21:19:31.136636 25678 data_layer.cpp:45] output data size: 128,3,227,227
I0428 21:19:31.273425 25678 net.cpp:122] Setting up train-data
I0428 21:19:31.273447 25678 net.cpp:129] Top shape: 128 3 227 227 (19787136)
I0428 21:19:31.273452 25678 net.cpp:129] Top shape: 128 (128)
I0428 21:19:31.273453 25678 net.cpp:137] Memory required for data: 79149056
I0428 21:19:31.273463 25678 layer_factory.hpp:77] Creating layer conv1
I0428 21:19:31.273488 25678 net.cpp:84] Creating Layer conv1
I0428 21:19:31.273494 25678 net.cpp:406] conv1 <- data
I0428 21:19:31.273504 25678 net.cpp:380] conv1 -> conv1
I0428 21:19:32.169718 25678 net.cpp:122] Setting up conv1
I0428 21:19:32.169741 25678 net.cpp:129] Top shape: 128 96 55 55 (37171200)
I0428 21:19:32.169744 25678 net.cpp:137] Memory required for data: 227833856
I0428 21:19:32.169764 25678 layer_factory.hpp:77] Creating layer relu1
I0428 21:19:32.169773 25678 net.cpp:84] Creating Layer relu1
I0428 21:19:32.169777 25678 net.cpp:406] relu1 <- conv1
I0428 21:19:32.169782 25678 net.cpp:367] relu1 -> conv1 (in-place)
I0428 21:19:32.170110 25678 net.cpp:122] Setting up relu1
I0428 21:19:32.170123 25678 net.cpp:129] Top shape: 128 96 55 55 (37171200)
I0428 21:19:32.170126 25678 net.cpp:137] Memory required for data: 376518656
I0428 21:19:32.170130 25678 layer_factory.hpp:77] Creating layer norm1
I0428 21:19:32.170138 25678 net.cpp:84] Creating Layer norm1
I0428 21:19:32.170161 25678 net.cpp:406] norm1 <- conv1
I0428 21:19:32.170167 25678 net.cpp:380] norm1 -> norm1
I0428 21:19:32.170691 25678 net.cpp:122] Setting up norm1
I0428 21:19:32.170701 25678 net.cpp:129] Top shape: 128 96 55 55 (37171200)
I0428 21:19:32.170704 25678 net.cpp:137] Memory required for data: 525203456
I0428 21:19:32.170708 25678 layer_factory.hpp:77] Creating layer pool1
I0428 21:19:32.170715 25678 net.cpp:84] Creating Layer pool1
I0428 21:19:32.170718 25678 net.cpp:406] pool1 <- norm1
I0428 21:19:32.170722 25678 net.cpp:380] pool1 -> pool1
I0428 21:19:32.170753 25678 net.cpp:122] Setting up pool1
I0428 21:19:32.170758 25678 net.cpp:129] Top shape: 128 96 27 27 (8957952)
I0428 21:19:32.170761 25678 net.cpp:137] Memory required for data: 561035264
I0428 21:19:32.170764 25678 layer_factory.hpp:77] Creating layer conv2
I0428 21:19:32.170773 25678 net.cpp:84] Creating Layer conv2
I0428 21:19:32.170776 25678 net.cpp:406] conv2 <- pool1
I0428 21:19:32.170780 25678 net.cpp:380] conv2 -> conv2
I0428 21:19:32.178409 25678 net.cpp:122] Setting up conv2
I0428 21:19:32.178427 25678 net.cpp:129] Top shape: 128 256 27 27 (23887872)
I0428 21:19:32.178431 25678 net.cpp:137] Memory required for data: 656586752
I0428 21:19:32.178439 25678 layer_factory.hpp:77] Creating layer relu2
I0428 21:19:32.178447 25678 net.cpp:84] Creating Layer relu2
I0428 21:19:32.178450 25678 net.cpp:406] relu2 <- conv2
I0428 21:19:32.178455 25678 net.cpp:367] relu2 -> conv2 (in-place)
I0428 21:19:32.178954 25678 net.cpp:122] Setting up relu2
I0428 21:19:32.178964 25678 net.cpp:129] Top shape: 128 256 27 27 (23887872)
I0428 21:19:32.178967 25678 net.cpp:137] Memory required for data: 752138240
I0428 21:19:32.178970 25678 layer_factory.hpp:77] Creating layer norm2
I0428 21:19:32.178977 25678 net.cpp:84] Creating Layer norm2
I0428 21:19:32.178979 25678 net.cpp:406] norm2 <- conv2
I0428 21:19:32.178984 25678 net.cpp:380] norm2 -> norm2
I0428 21:19:32.179311 25678 net.cpp:122] Setting up norm2
I0428 21:19:32.179320 25678 net.cpp:129] Top shape: 128 256 27 27 (23887872)
I0428 21:19:32.179323 25678 net.cpp:137] Memory required for data: 847689728
I0428 21:19:32.179327 25678 layer_factory.hpp:77] Creating layer pool2
I0428 21:19:32.179332 25678 net.cpp:84] Creating Layer pool2
I0428 21:19:32.179335 25678 net.cpp:406] pool2 <- norm2
I0428 21:19:32.179340 25678 net.cpp:380] pool2 -> pool2
I0428 21:19:32.179364 25678 net.cpp:122] Setting up pool2
I0428 21:19:32.179369 25678 net.cpp:129] Top shape: 128 256 13 13 (5537792)
I0428 21:19:32.179371 25678 net.cpp:137] Memory required for data: 869840896
I0428 21:19:32.179374 25678 layer_factory.hpp:77] Creating layer conv3
I0428 21:19:32.179383 25678 net.cpp:84] Creating Layer conv3
I0428 21:19:32.179385 25678 net.cpp:406] conv3 <- pool2
I0428 21:19:32.179389 25678 net.cpp:380] conv3 -> conv3
I0428 21:19:32.190026 25678 net.cpp:122] Setting up conv3
I0428 21:19:32.190042 25678 net.cpp:129] Top shape: 128 384 13 13 (8306688)
I0428 21:19:32.190045 25678 net.cpp:137] Memory required for data: 903067648
I0428 21:19:32.190057 25678 layer_factory.hpp:77] Creating layer relu3
I0428 21:19:32.190064 25678 net.cpp:84] Creating Layer relu3
I0428 21:19:32.190068 25678 net.cpp:406] relu3 <- conv3
I0428 21:19:32.190075 25678 net.cpp:367] relu3 -> conv3 (in-place)
I0428 21:19:32.190678 25678 net.cpp:122] Setting up relu3
I0428 21:19:32.190688 25678 net.cpp:129] Top shape: 128 384 13 13 (8306688)
I0428 21:19:32.190691 25678 net.cpp:137] Memory required for data: 936294400
I0428 21:19:32.190694 25678 layer_factory.hpp:77] Creating layer conv4
I0428 21:19:32.190704 25678 net.cpp:84] Creating Layer conv4
I0428 21:19:32.190708 25678 net.cpp:406] conv4 <- conv3
I0428 21:19:32.190714 25678 net.cpp:380] conv4 -> conv4
I0428 21:19:32.202229 25678 net.cpp:122] Setting up conv4
I0428 21:19:32.202247 25678 net.cpp:129] Top shape: 128 384 13 13 (8306688)
I0428 21:19:32.202250 25678 net.cpp:137] Memory required for data: 969521152
I0428 21:19:32.202260 25678 layer_factory.hpp:77] Creating layer relu4
I0428 21:19:32.202268 25678 net.cpp:84] Creating Layer relu4
I0428 21:19:32.202289 25678 net.cpp:406] relu4 <- conv4
I0428 21:19:32.202297 25678 net.cpp:367] relu4 -> conv4 (in-place)
I0428 21:19:32.202857 25678 net.cpp:122] Setting up relu4
I0428 21:19:32.202865 25678 net.cpp:129] Top shape: 128 384 13 13 (8306688)
I0428 21:19:32.202868 25678 net.cpp:137] Memory required for data: 1002747904
I0428 21:19:32.202872 25678 layer_factory.hpp:77] Creating layer conv5
I0428 21:19:32.202883 25678 net.cpp:84] Creating Layer conv5
I0428 21:19:32.202885 25678 net.cpp:406] conv5 <- conv4
I0428 21:19:32.202893 25678 net.cpp:380] conv5 -> conv5
I0428 21:19:32.212374 25678 net.cpp:122] Setting up conv5
I0428 21:19:32.212393 25678 net.cpp:129] Top shape: 128 256 13 13 (5537792)
I0428 21:19:32.212396 25678 net.cpp:137] Memory required for data: 1024899072
I0428 21:19:32.212409 25678 layer_factory.hpp:77] Creating layer relu5
I0428 21:19:32.212417 25678 net.cpp:84] Creating Layer relu5
I0428 21:19:32.212421 25678 net.cpp:406] relu5 <- conv5
I0428 21:19:32.212430 25678 net.cpp:367] relu5 -> conv5 (in-place)
I0428 21:19:32.212977 25678 net.cpp:122] Setting up relu5
I0428 21:19:32.212990 25678 net.cpp:129] Top shape: 128 256 13 13 (5537792)
I0428 21:19:32.212992 25678 net.cpp:137] Memory required for data: 1047050240
I0428 21:19:32.212996 25678 layer_factory.hpp:77] Creating layer pool5
I0428 21:19:32.213002 25678 net.cpp:84] Creating Layer pool5
I0428 21:19:32.213006 25678 net.cpp:406] pool5 <- conv5
I0428 21:19:32.213011 25678 net.cpp:380] pool5 -> pool5
I0428 21:19:32.213047 25678 net.cpp:122] Setting up pool5
I0428 21:19:32.213052 25678 net.cpp:129] Top shape: 128 256 6 6 (1179648)
I0428 21:19:32.213055 25678 net.cpp:137] Memory required for data: 1051768832
I0428 21:19:32.213057 25678 layer_factory.hpp:77] Creating layer fc6
I0428 21:19:32.213068 25678 net.cpp:84] Creating Layer fc6
I0428 21:19:32.213070 25678 net.cpp:406] fc6 <- pool5
I0428 21:19:32.213075 25678 net.cpp:380] fc6 -> fc6
I0428 21:19:32.572074 25678 net.cpp:122] Setting up fc6
I0428 21:19:32.572093 25678 net.cpp:129] Top shape: 128 4096 (524288)
I0428 21:19:32.572098 25678 net.cpp:137] Memory required for data: 1053865984
I0428 21:19:32.572105 25678 layer_factory.hpp:77] Creating layer relu6
I0428 21:19:32.572113 25678 net.cpp:84] Creating Layer relu6
I0428 21:19:32.572118 25678 net.cpp:406] relu6 <- fc6
I0428 21:19:32.572124 25678 net.cpp:367] relu6 -> fc6 (in-place)
I0428 21:19:32.572878 25678 net.cpp:122] Setting up relu6
I0428 21:19:32.572890 25678 net.cpp:129] Top shape: 128 4096 (524288)
I0428 21:19:32.572892 25678 net.cpp:137] Memory required for data: 1055963136
I0428 21:19:32.572896 25678 layer_factory.hpp:77] Creating layer drop6
I0428 21:19:32.572902 25678 net.cpp:84] Creating Layer drop6
I0428 21:19:32.572906 25678 net.cpp:406] drop6 <- fc6
I0428 21:19:32.572909 25678 net.cpp:367] drop6 -> fc6 (in-place)
I0428 21:19:32.572938 25678 net.cpp:122] Setting up drop6
I0428 21:19:32.572943 25678 net.cpp:129] Top shape: 128 4096 (524288)
I0428 21:19:32.572945 25678 net.cpp:137] Memory required for data: 1058060288
I0428 21:19:32.572948 25678 layer_factory.hpp:77] Creating layer fc7
I0428 21:19:32.572955 25678 net.cpp:84] Creating Layer fc7
I0428 21:19:32.572958 25678 net.cpp:406] fc7 <- fc6
I0428 21:19:32.572962 25678 net.cpp:380] fc7 -> fc7
I0428 21:19:32.732419 25678 net.cpp:122] Setting up fc7
I0428 21:19:32.732440 25678 net.cpp:129] Top shape: 128 4096 (524288)
I0428 21:19:32.732443 25678 net.cpp:137] Memory required for data: 1060157440
I0428 21:19:32.732452 25678 layer_factory.hpp:77] Creating layer relu7
I0428 21:19:32.732460 25678 net.cpp:84] Creating Layer relu7
I0428 21:19:32.732462 25678 net.cpp:406] relu7 <- fc7
I0428 21:19:32.732470 25678 net.cpp:367] relu7 -> fc7 (in-place)
I0428 21:19:32.732975 25678 net.cpp:122] Setting up relu7
I0428 21:19:32.732990 25678 net.cpp:129] Top shape: 128 4096 (524288)
I0428 21:19:32.732993 25678 net.cpp:137] Memory required for data: 1062254592
I0428 21:19:32.732996 25678 layer_factory.hpp:77] Creating layer drop7
I0428 21:19:32.733001 25678 net.cpp:84] Creating Layer drop7
I0428 21:19:32.733027 25678 net.cpp:406] drop7 <- fc7
I0428 21:19:32.733034 25678 net.cpp:367] drop7 -> fc7 (in-place)
I0428 21:19:32.733058 25678 net.cpp:122] Setting up drop7
I0428 21:19:32.733063 25678 net.cpp:129] Top shape: 128 4096 (524288)
I0428 21:19:32.733067 25678 net.cpp:137] Memory required for data: 1064351744
I0428 21:19:32.733068 25678 layer_factory.hpp:77] Creating layer fc8
I0428 21:19:32.733076 25678 net.cpp:84] Creating Layer fc8
I0428 21:19:32.733079 25678 net.cpp:406] fc8 <- fc7
I0428 21:19:32.733084 25678 net.cpp:380] fc8 -> fc8
I0428 21:19:32.741024 25678 net.cpp:122] Setting up fc8
I0428 21:19:32.741042 25678 net.cpp:129] Top shape: 128 196 (25088)
I0428 21:19:32.741045 25678 net.cpp:137] Memory required for data: 1064452096
I0428 21:19:32.741055 25678 layer_factory.hpp:77] Creating layer loss
I0428 21:19:32.741063 25678 net.cpp:84] Creating Layer loss
I0428 21:19:32.741066 25678 net.cpp:406] loss <- fc8
I0428 21:19:32.741071 25678 net.cpp:406] loss <- label
I0428 21:19:32.741080 25678 net.cpp:380] loss -> loss
I0428 21:19:32.741089 25678 layer_factory.hpp:77] Creating layer loss
I0428 21:19:32.741950 25678 net.cpp:122] Setting up loss
I0428 21:19:32.741958 25678 net.cpp:129] Top shape: (1)
I0428 21:19:32.741961 25678 net.cpp:132] with loss weight 1
I0428 21:19:32.741978 25678 net.cpp:137] Memory required for data: 1064452100
I0428 21:19:32.741982 25678 net.cpp:198] loss needs backward computation.
I0428 21:19:32.741988 25678 net.cpp:198] fc8 needs backward computation.
I0428 21:19:32.741991 25678 net.cpp:198] drop7 needs backward computation.
I0428 21:19:32.741994 25678 net.cpp:198] relu7 needs backward computation.
I0428 21:19:32.741997 25678 net.cpp:198] fc7 needs backward computation.
I0428 21:19:32.742000 25678 net.cpp:198] drop6 needs backward computation.
I0428 21:19:32.742002 25678 net.cpp:198] relu6 needs backward computation.
I0428 21:19:32.742005 25678 net.cpp:198] fc6 needs backward computation.
I0428 21:19:32.742008 25678 net.cpp:198] pool5 needs backward computation.
I0428 21:19:32.742012 25678 net.cpp:198] relu5 needs backward computation.
I0428 21:19:32.742014 25678 net.cpp:198] conv5 needs backward computation.
I0428 21:19:32.742017 25678 net.cpp:198] relu4 needs backward computation.
I0428 21:19:32.742019 25678 net.cpp:198] conv4 needs backward computation.
I0428 21:19:32.742022 25678 net.cpp:198] relu3 needs backward computation.
I0428 21:19:32.742025 25678 net.cpp:198] conv3 needs backward computation.
I0428 21:19:32.742028 25678 net.cpp:198] pool2 needs backward computation.
I0428 21:19:32.742031 25678 net.cpp:198] norm2 needs backward computation.
I0428 21:19:32.742033 25678 net.cpp:198] relu2 needs backward computation.
I0428 21:19:32.742036 25678 net.cpp:198] conv2 needs backward computation.
I0428 21:19:32.742039 25678 net.cpp:198] pool1 needs backward computation.
I0428 21:19:32.742043 25678 net.cpp:198] norm1 needs backward computation.
I0428 21:19:32.742045 25678 net.cpp:198] relu1 needs backward computation.
I0428 21:19:32.742048 25678 net.cpp:198] conv1 needs backward computation.
I0428 21:19:32.742050 25678 net.cpp:200] train-data does not need backward computation.
I0428 21:19:32.742053 25678 net.cpp:242] This network produces output loss
I0428 21:19:32.742070 25678 net.cpp:255] Network initialization done.
I0428 21:19:32.742575 25678 solver.cpp:172] Creating test net (#0) specified by net file: train_val.prototxt
I0428 21:19:32.742614 25678 net.cpp:294] The NetState phase (1) differed from the phase (0) specified by a rule in layer train-data
I0428 21:19:32.742750 25678 net.cpp:51] Initializing net from parameters:
state {
phase: TEST
}
layer {
name: "val-data"
type: "Data"
top: "data"
top: "label"
include {
phase: TEST
}
transform_param {
crop_size: 227
mean_file: "/mnt/bigdisk/DIGITS-AMB-2/digits/jobs/20210419-113214-d311/mean.binaryproto"
}
data_param {
source: "/mnt/bigdisk/DIGITS-AMB-2/digits/jobs/20210419-113214-d311/val_db"
batch_size: 32
backend: LMDB
}
}
layer {
name: "conv1"
type: "Convolution"
bottom: "data"
top: "conv1"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 96
kernel_size: 11
stride: 4
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "relu1"
type: "ReLU"
bottom: "conv1"
top: "conv1"
}
layer {
name: "norm1"
type: "LRN"
bottom: "conv1"
top: "norm1"
lrn_param {
local_size: 5
alpha: 0.0001
beta: 0.75
}
}
layer {
name: "pool1"
type: "Pooling"
bottom: "norm1"
top: "pool1"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layer {
name: "conv2"
type: "Convolution"
bottom: "pool1"
top: "conv2"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 256
pad: 2
kernel_size: 5
group: 2
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layer {
name: "relu2"
type: "ReLU"
bottom: "conv2"
top: "conv2"
}
layer {
name: "norm2"
type: "LRN"
bottom: "conv2"
top: "norm2"
lrn_param {
local_size: 5
alpha: 0.0001
beta: 0.75
}
}
layer {
name: "pool2"
type: "Pooling"
bottom: "norm2"
top: "pool2"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layer {
name: "conv3"
type: "Convolution"
bottom: "pool2"
top: "conv3"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 384
pad: 1
kernel_size: 3
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "relu3"
type: "ReLU"
bottom: "conv3"
top: "conv3"
}
layer {
name: "conv4"
type: "Convolution"
bottom: "conv3"
top: "conv4"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 384
pad: 1
kernel_size: 3
group: 2
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layer {
name: "relu4"
type: "ReLU"
bottom: "conv4"
top: "conv4"
}
layer {
name: "conv5"
type: "Convolution"
bottom: "conv4"
top: "conv5"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 256
pad: 1
kernel_size: 3
group: 2
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layer {
name: "relu5"
type: "ReLU"
bottom: "conv5"
top: "conv5"
}
layer {
name: "pool5"
type: "Pooling"
bottom: "conv5"
top: "pool5"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
}
layer {
name: "fc6"
type: "InnerProduct"
bottom: "pool5"
top: "fc6"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 4096
weight_filler {
type: "gaussian"
std: 0.005
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layer {
name: "relu6"
type: "ReLU"
bottom: "fc6"
top: "fc6"
}
layer {
name: "drop6"
type: "Dropout"
bottom: "fc6"
top: "fc6"
dropout_param {
dropout_ratio: 0.5
}
}
layer {
name: "fc7"
type: "InnerProduct"
bottom: "fc6"
top: "fc7"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 4096
weight_filler {
type: "gaussian"
std: 0.005
}
bias_filler {
type: "constant"
value: 0.1
}
}
}
layer {
name: "relu7"
type: "ReLU"
bottom: "fc7"
top: "fc7"
}
layer {
name: "drop7"
type: "Dropout"
bottom: "fc7"
top: "fc7"
dropout_param {
dropout_ratio: 0.5
}
}
layer {
name: "fc8"
type: "InnerProduct"
bottom: "fc7"
top: "fc8"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 196
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value: 0
}
}
}
layer {
name: "accuracy"
type: "Accuracy"
bottom: "fc8"
bottom: "label"
top: "accuracy"
include {
phase: TEST
}
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "fc8"
bottom: "label"
top: "loss"
}
I0428 21:19:32.742861 25678 layer_factory.hpp:77] Creating layer val-data
I0428 21:19:32.765134 25678 db_lmdb.cpp:35] Opened lmdb /mnt/bigdisk/DIGITS-AMB-2/digits/jobs/20210419-113214-d311/val_db
I0428 21:19:32.959810 25678 net.cpp:84] Creating Layer val-data
I0428 21:19:32.959839 25678 net.cpp:380] val-data -> data
I0428 21:19:32.959851 25678 net.cpp:380] val-data -> label
I0428 21:19:32.959859 25678 data_transformer.cpp:25] Loading mean file from: /mnt/bigdisk/DIGITS-AMB-2/digits/jobs/20210419-113214-d311/mean.binaryproto
I0428 21:19:33.041352 25678 data_layer.cpp:45] output data size: 32,3,227,227
I0428 21:19:33.086179 25678 net.cpp:122] Setting up val-data
I0428 21:19:33.086199 25678 net.cpp:129] Top shape: 32 3 227 227 (4946784)
I0428 21:19:33.086205 25678 net.cpp:129] Top shape: 32 (32)
I0428 21:19:33.086207 25678 net.cpp:137] Memory required for data: 19787264
I0428 21:19:33.086212 25678 layer_factory.hpp:77] Creating layer label_val-data_1_split
I0428 21:19:33.086225 25678 net.cpp:84] Creating Layer label_val-data_1_split
I0428 21:19:33.086228 25678 net.cpp:406] label_val-data_1_split <- label
I0428 21:19:33.086236 25678 net.cpp:380] label_val-data_1_split -> label_val-data_1_split_0
I0428 21:19:33.086246 25678 net.cpp:380] label_val-data_1_split -> label_val-data_1_split_1
I0428 21:19:33.086338 25678 net.cpp:122] Setting up label_val-data_1_split
I0428 21:19:33.086345 25678 net.cpp:129] Top shape: 32 (32)
I0428 21:19:33.086349 25678 net.cpp:129] Top shape: 32 (32)
I0428 21:19:33.086351 25678 net.cpp:137] Memory required for data: 19787520
I0428 21:19:33.086354 25678 layer_factory.hpp:77] Creating layer conv1
I0428 21:19:33.086365 25678 net.cpp:84] Creating Layer conv1
I0428 21:19:33.086369 25678 net.cpp:406] conv1 <- data
I0428 21:19:33.086374 25678 net.cpp:380] conv1 -> conv1
I0428 21:19:33.089615 25678 net.cpp:122] Setting up conv1
I0428 21:19:33.089628 25678 net.cpp:129] Top shape: 32 96 55 55 (9292800)
I0428 21:19:33.089632 25678 net.cpp:137] Memory required for data: 56958720
I0428 21:19:33.089641 25678 layer_factory.hpp:77] Creating layer relu1
I0428 21:19:33.089649 25678 net.cpp:84] Creating Layer relu1
I0428 21:19:33.089655 25678 net.cpp:406] relu1 <- conv1
I0428 21:19:33.089660 25678 net.cpp:367] relu1 -> conv1 (in-place)
I0428 21:19:33.090018 25678 net.cpp:122] Setting up relu1
I0428 21:19:33.090030 25678 net.cpp:129] Top shape: 32 96 55 55 (9292800)
I0428 21:19:33.090034 25678 net.cpp:137] Memory required for data: 94129920
I0428 21:19:33.090036 25678 layer_factory.hpp:77] Creating layer norm1
I0428 21:19:33.090045 25678 net.cpp:84] Creating Layer norm1
I0428 21:19:33.090049 25678 net.cpp:406] norm1 <- conv1
I0428 21:19:33.090054 25678 net.cpp:380] norm1 -> norm1
I0428 21:19:33.090622 25678 net.cpp:122] Setting up norm1
I0428 21:19:33.090633 25678 net.cpp:129] Top shape: 32 96 55 55 (9292800)
I0428 21:19:33.090636 25678 net.cpp:137] Memory required for data: 131301120
I0428 21:19:33.090641 25678 layer_factory.hpp:77] Creating layer pool1
I0428 21:19:33.090647 25678 net.cpp:84] Creating Layer pool1
I0428 21:19:33.090651 25678 net.cpp:406] pool1 <- norm1
I0428 21:19:33.090656 25678 net.cpp:380] pool1 -> pool1
I0428 21:19:33.090682 25678 net.cpp:122] Setting up pool1
I0428 21:19:33.090687 25678 net.cpp:129] Top shape: 32 96 27 27 (2239488)
I0428 21:19:33.090690 25678 net.cpp:137] Memory required for data: 140259072
I0428 21:19:33.090693 25678 layer_factory.hpp:77] Creating layer conv2
I0428 21:19:33.090701 25678 net.cpp:84] Creating Layer conv2
I0428 21:19:33.090704 25678 net.cpp:406] conv2 <- pool1
I0428 21:19:33.090730 25678 net.cpp:380] conv2 -> conv2
I0428 21:19:33.099594 25678 net.cpp:122] Setting up conv2
I0428 21:19:33.099611 25678 net.cpp:129] Top shape: 32 256 27 27 (5971968)
I0428 21:19:33.099614 25678 net.cpp:137] Memory required for data: 164146944
I0428 21:19:33.099624 25678 layer_factory.hpp:77] Creating layer relu2
I0428 21:19:33.099632 25678 net.cpp:84] Creating Layer relu2
I0428 21:19:33.099635 25678 net.cpp:406] relu2 <- conv2
I0428 21:19:33.099642 25678 net.cpp:367] relu2 -> conv2 (in-place)
I0428 21:19:33.100261 25678 net.cpp:122] Setting up relu2
I0428 21:19:33.100272 25678 net.cpp:129] Top shape: 32 256 27 27 (5971968)
I0428 21:19:33.100275 25678 net.cpp:137] Memory required for data: 188034816
I0428 21:19:33.100278 25678 layer_factory.hpp:77] Creating layer norm2
I0428 21:19:33.100289 25678 net.cpp:84] Creating Layer norm2
I0428 21:19:33.100292 25678 net.cpp:406] norm2 <- conv2
I0428 21:19:33.100298 25678 net.cpp:380] norm2 -> norm2
I0428 21:19:33.101125 25678 net.cpp:122] Setting up norm2
I0428 21:19:33.101136 25678 net.cpp:129] Top shape: 32 256 27 27 (5971968)
I0428 21:19:33.101140 25678 net.cpp:137] Memory required for data: 211922688
I0428 21:19:33.101142 25678 layer_factory.hpp:77] Creating layer pool2
I0428 21:19:33.101150 25678 net.cpp:84] Creating Layer pool2
I0428 21:19:33.101153 25678 net.cpp:406] pool2 <- norm2
I0428 21:19:33.101161 25678 net.cpp:380] pool2 -> pool2
I0428 21:19:33.101191 25678 net.cpp:122] Setting up pool2
I0428 21:19:33.101195 25678 net.cpp:129] Top shape: 32 256 13 13 (1384448)
I0428 21:19:33.101198 25678 net.cpp:137] Memory required for data: 217460480
I0428 21:19:33.101202 25678 layer_factory.hpp:77] Creating layer conv3
I0428 21:19:33.101212 25678 net.cpp:84] Creating Layer conv3
I0428 21:19:33.101214 25678 net.cpp:406] conv3 <- pool2
I0428 21:19:33.101222 25678 net.cpp:380] conv3 -> conv3
I0428 21:19:33.114195 25678 net.cpp:122] Setting up conv3
I0428 21:19:33.114212 25678 net.cpp:129] Top shape: 32 384 13 13 (2076672)
I0428 21:19:33.114215 25678 net.cpp:137] Memory required for data: 225767168
I0428 21:19:33.114226 25678 layer_factory.hpp:77] Creating layer relu3
I0428 21:19:33.114235 25678 net.cpp:84] Creating Layer relu3
I0428 21:19:33.114239 25678 net.cpp:406] relu3 <- conv3
I0428 21:19:33.114245 25678 net.cpp:367] relu3 -> conv3 (in-place)
I0428 21:19:33.114897 25678 net.cpp:122] Setting up relu3
I0428 21:19:33.114908 25678 net.cpp:129] Top shape: 32 384 13 13 (2076672)
I0428 21:19:33.114912 25678 net.cpp:137] Memory required for data: 234073856
I0428 21:19:33.114914 25678 layer_factory.hpp:77] Creating layer conv4
I0428 21:19:33.114926 25678 net.cpp:84] Creating Layer conv4
I0428 21:19:33.114929 25678 net.cpp:406] conv4 <- conv3
I0428 21:19:33.114938 25678 net.cpp:380] conv4 -> conv4
I0428 21:19:33.126451 25678 net.cpp:122] Setting up conv4
I0428 21:19:33.126468 25678 net.cpp:129] Top shape: 32 384 13 13 (2076672)
I0428 21:19:33.126472 25678 net.cpp:137] Memory required for data: 242380544
I0428 21:19:33.126480 25678 layer_factory.hpp:77] Creating layer relu4
I0428 21:19:33.126492 25678 net.cpp:84] Creating Layer relu4
I0428 21:19:33.126497 25678 net.cpp:406] relu4 <- conv4
I0428 21:19:33.126502 25678 net.cpp:367] relu4 -> conv4 (in-place)
I0428 21:19:33.126940 25678 net.cpp:122] Setting up relu4
I0428 21:19:33.126950 25678 net.cpp:129] Top shape: 32 384 13 13 (2076672)
I0428 21:19:33.126952 25678 net.cpp:137] Memory required for data: 250687232
I0428 21:19:33.126955 25678 layer_factory.hpp:77] Creating layer conv5
I0428 21:19:33.126966 25678 net.cpp:84] Creating Layer conv5
I0428 21:19:33.126971 25678 net.cpp:406] conv5 <- conv4
I0428 21:19:33.126977 25678 net.cpp:380] conv5 -> conv5
I0428 21:19:33.138010 25678 net.cpp:122] Setting up conv5
I0428 21:19:33.138029 25678 net.cpp:129] Top shape: 32 256 13 13 (1384448)
I0428 21:19:33.138032 25678 net.cpp:137] Memory required for data: 256225024
I0428 21:19:33.138047 25678 layer_factory.hpp:77] Creating layer relu5
I0428 21:19:33.138056 25678 net.cpp:84] Creating Layer relu5
I0428 21:19:33.138079 25678 net.cpp:406] relu5 <- conv5
I0428 21:19:33.138087 25678 net.cpp:367] relu5 -> conv5 (in-place)
I0428 21:19:33.138716 25678 net.cpp:122] Setting up relu5
I0428 21:19:33.138727 25678 net.cpp:129] Top shape: 32 256 13 13 (1384448)
I0428 21:19:33.138731 25678 net.cpp:137] Memory required for data: 261762816
I0428 21:19:33.138733 25678 layer_factory.hpp:77] Creating layer pool5
I0428 21:19:33.138746 25678 net.cpp:84] Creating Layer pool5
I0428 21:19:33.138749 25678 net.cpp:406] pool5 <- conv5
I0428 21:19:33.138754 25678 net.cpp:380] pool5 -> pool5
I0428 21:19:33.138794 25678 net.cpp:122] Setting up pool5
I0428 21:19:33.138800 25678 net.cpp:129] Top shape: 32 256 6 6 (294912)
I0428 21:19:33.138803 25678 net.cpp:137] Memory required for data: 262942464
I0428 21:19:33.138805 25678 layer_factory.hpp:77] Creating layer fc6
I0428 21:19:33.138813 25678 net.cpp:84] Creating Layer fc6
I0428 21:19:33.138816 25678 net.cpp:406] fc6 <- pool5
I0428 21:19:33.138823 25678 net.cpp:380] fc6 -> fc6
I0428 21:19:33.548929 25678 net.cpp:122] Setting up fc6
I0428 21:19:33.548949 25678 net.cpp:129] Top shape: 32 4096 (131072)
I0428 21:19:33.548952 25678 net.cpp:137] Memory required for data: 263466752
I0428 21:19:33.548961 25678 layer_factory.hpp:77] Creating layer relu6
I0428 21:19:33.548969 25678 net.cpp:84] Creating Layer relu6
I0428 21:19:33.548974 25678 net.cpp:406] relu6 <- fc6
I0428 21:19:33.548979 25678 net.cpp:367] relu6 -> fc6 (in-place)
I0428 21:19:33.549772 25678 net.cpp:122] Setting up relu6
I0428 21:19:33.549782 25678 net.cpp:129] Top shape: 32 4096 (131072)
I0428 21:19:33.549784 25678 net.cpp:137] Memory required for data: 263991040
I0428 21:19:33.549788 25678 layer_factory.hpp:77] Creating layer drop6
I0428 21:19:33.549796 25678 net.cpp:84] Creating Layer drop6
I0428 21:19:33.549799 25678 net.cpp:406] drop6 <- fc6
I0428 21:19:33.549803 25678 net.cpp:367] drop6 -> fc6 (in-place)
I0428 21:19:33.549829 25678 net.cpp:122] Setting up drop6
I0428 21:19:33.549832 25678 net.cpp:129] Top shape: 32 4096 (131072)
I0428 21:19:33.549835 25678 net.cpp:137] Memory required for data: 264515328
I0428 21:19:33.549839 25678 layer_factory.hpp:77] Creating layer fc7
I0428 21:19:33.549844 25678 net.cpp:84] Creating Layer fc7
I0428 21:19:33.549847 25678 net.cpp:406] fc7 <- fc6
I0428 21:19:33.549852 25678 net.cpp:380] fc7 -> fc7
I0428 21:19:33.709386 25678 net.cpp:122] Setting up fc7
I0428 21:19:33.709408 25678 net.cpp:129] Top shape: 32 4096 (131072)
I0428 21:19:33.709410 25678 net.cpp:137] Memory required for data: 265039616
I0428 21:19:33.709419 25678 layer_factory.hpp:77] Creating layer relu7
I0428 21:19:33.709427 25678 net.cpp:84] Creating Layer relu7
I0428 21:19:33.709431 25678 net.cpp:406] relu7 <- fc7
I0428 21:19:33.709436 25678 net.cpp:367] relu7 -> fc7 (in-place)
I0428 21:19:33.709937 25678 net.cpp:122] Setting up relu7
I0428 21:19:33.709946 25678 net.cpp:129] Top shape: 32 4096 (131072)
I0428 21:19:33.709949 25678 net.cpp:137] Memory required for data: 265563904
I0428 21:19:33.709952 25678 layer_factory.hpp:77] Creating layer drop7
I0428 21:19:33.709959 25678 net.cpp:84] Creating Layer drop7
I0428 21:19:33.709961 25678 net.cpp:406] drop7 <- fc7
I0428 21:19:33.709969 25678 net.cpp:367] drop7 -> fc7 (in-place)
I0428 21:19:33.709991 25678 net.cpp:122] Setting up drop7
I0428 21:19:33.709998 25678 net.cpp:129] Top shape: 32 4096 (131072)
I0428 21:19:33.710000 25678 net.cpp:137] Memory required for data: 266088192
I0428 21:19:33.710003 25678 layer_factory.hpp:77] Creating layer fc8
I0428 21:19:33.710009 25678 net.cpp:84] Creating Layer fc8
I0428 21:19:33.710012 25678 net.cpp:406] fc8 <- fc7
I0428 21:19:33.710019 25678 net.cpp:380] fc8 -> fc8
I0428 21:19:33.717861 25678 net.cpp:122] Setting up fc8
I0428 21:19:33.717876 25678 net.cpp:129] Top shape: 32 196 (6272)
I0428 21:19:33.717880 25678 net.cpp:137] Memory required for data: 266113280
I0428 21:19:33.717886 25678 layer_factory.hpp:77] Creating layer fc8_fc8_0_split
I0428 21:19:33.717893 25678 net.cpp:84] Creating Layer fc8_fc8_0_split
I0428 21:19:33.717897 25678 net.cpp:406] fc8_fc8_0_split <- fc8
I0428 21:19:33.717924 25678 net.cpp:380] fc8_fc8_0_split -> fc8_fc8_0_split_0
I0428 21:19:33.717931 25678 net.cpp:380] fc8_fc8_0_split -> fc8_fc8_0_split_1
I0428 21:19:33.717964 25678 net.cpp:122] Setting up fc8_fc8_0_split
I0428 21:19:33.717969 25678 net.cpp:129] Top shape: 32 196 (6272)
I0428 21:19:33.717972 25678 net.cpp:129] Top shape: 32 196 (6272)
I0428 21:19:33.717974 25678 net.cpp:137] Memory required for data: 266163456
I0428 21:19:33.717978 25678 layer_factory.hpp:77] Creating layer accuracy
I0428 21:19:33.717983 25678 net.cpp:84] Creating Layer accuracy
I0428 21:19:33.717986 25678 net.cpp:406] accuracy <- fc8_fc8_0_split_0
I0428 21:19:33.717990 25678 net.cpp:406] accuracy <- label_val-data_1_split_0
I0428 21:19:33.717994 25678 net.cpp:380] accuracy -> accuracy
I0428 21:19:33.718001 25678 net.cpp:122] Setting up accuracy
I0428 21:19:33.718004 25678 net.cpp:129] Top shape: (1)
I0428 21:19:33.718008 25678 net.cpp:137] Memory required for data: 266163460
I0428 21:19:33.718009 25678 layer_factory.hpp:77] Creating layer loss
I0428 21:19:33.718014 25678 net.cpp:84] Creating Layer loss
I0428 21:19:33.718016 25678 net.cpp:406] loss <- fc8_fc8_0_split_1
I0428 21:19:33.718019 25678 net.cpp:406] loss <- label_val-data_1_split_1
I0428 21:19:33.718025 25678 net.cpp:380] loss -> loss
I0428 21:19:33.718031 25678 layer_factory.hpp:77] Creating layer loss
I0428 21:19:33.719230 25678 net.cpp:122] Setting up loss
I0428 21:19:33.719239 25678 net.cpp:129] Top shape: (1)
I0428 21:19:33.719242 25678 net.cpp:132] with loss weight 1
I0428 21:19:33.719251 25678 net.cpp:137] Memory required for data: 266163464
I0428 21:19:33.719254 25678 net.cpp:198] loss needs backward computation.
I0428 21:19:33.719259 25678 net.cpp:200] accuracy does not need backward computation.
I0428 21:19:33.719262 25678 net.cpp:198] fc8_fc8_0_split needs backward computation.
I0428 21:19:33.719265 25678 net.cpp:198] fc8 needs backward computation.
I0428 21:19:33.719269 25678 net.cpp:198] drop7 needs backward computation.
I0428 21:19:33.719271 25678 net.cpp:198] relu7 needs backward computation.
I0428 21:19:33.719274 25678 net.cpp:198] fc7 needs backward computation.
I0428 21:19:33.719276 25678 net.cpp:198] drop6 needs backward computation.
I0428 21:19:33.719278 25678 net.cpp:198] relu6 needs backward computation.
I0428 21:19:33.719281 25678 net.cpp:198] fc6 needs backward computation.
I0428 21:19:33.719285 25678 net.cpp:198] pool5 needs backward computation.
I0428 21:19:33.719287 25678 net.cpp:198] relu5 needs backward computation.
I0428 21:19:33.719290 25678 net.cpp:198] conv5 needs backward computation.
I0428 21:19:33.719293 25678 net.cpp:198] relu4 needs backward computation.
I0428 21:19:33.719296 25678 net.cpp:198] conv4 needs backward computation.
I0428 21:19:33.719300 25678 net.cpp:198] relu3 needs backward computation.
I0428 21:19:33.719301 25678 net.cpp:198] conv3 needs backward computation.
I0428 21:19:33.719305 25678 net.cpp:198] pool2 needs backward computation.
I0428 21:19:33.719307 25678 net.cpp:198] norm2 needs backward computation.
I0428 21:19:33.719310 25678 net.cpp:198] relu2 needs backward computation.
I0428 21:19:33.719313 25678 net.cpp:198] conv2 needs backward computation.
I0428 21:19:33.719316 25678 net.cpp:198] pool1 needs backward computation.
I0428 21:19:33.719318 25678 net.cpp:198] norm1 needs backward computation.
I0428 21:19:33.719321 25678 net.cpp:198] relu1 needs backward computation.
I0428 21:19:33.719324 25678 net.cpp:198] conv1 needs backward computation.
I0428 21:19:33.719327 25678 net.cpp:200] label_val-data_1_split does not need backward computation.
I0428 21:19:33.719331 25678 net.cpp:200] val-data does not need backward computation.
I0428 21:19:33.719333 25678 net.cpp:242] This network produces output accuracy
I0428 21:19:33.719337 25678 net.cpp:242] This network produces output loss
I0428 21:19:33.719353 25678 net.cpp:255] Network initialization done.
I0428 21:19:33.719421 25678 solver.cpp:56] Solver scaffolding done.
I0428 21:19:33.719763 25678 caffe.cpp:248] Starting Optimization
I0428 21:19:33.719784 25678 solver.cpp:272] Solving
I0428 21:19:33.719787 25678 solver.cpp:273] Learning Rate Policy: sigmoid
I0428 21:19:33.721407 25678 solver.cpp:330] Iteration 0, Testing net (#0)
I0428 21:19:33.721417 25678 net.cpp:676] Ignoring source layer train-data
I0428 21:19:33.799715 25678 blocking_queue.cpp:49] Waiting for data
I0428 21:19:38.411484 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:19:38.458668 25678 solver.cpp:397] Test net output #0: accuracy = 0.00612745
I0428 21:19:38.458719 25678 solver.cpp:397] Test net output #1: loss = 5.2815 (* 1 = 5.2815 loss)
I0428 21:19:38.556989 25678 solver.cpp:218] Iteration 0 (0 iter/s, 4.83716s/12 iters), loss = 5.26237
I0428 21:19:38.558512 25678 solver.cpp:237] Train net output #0: loss = 5.26237 (* 1 = 5.26237 loss)
I0428 21:19:38.558548 25678 sgd_solver.cpp:105] Iteration 0, lr = 0.007773
I0428 21:19:42.704954 25678 solver.cpp:218] Iteration 12 (2.89405 iter/s, 4.14644s/12 iters), loss = 5.29615
I0428 21:19:42.704996 25678 solver.cpp:237] Train net output #0: loss = 5.29615 (* 1 = 5.29615 loss)
I0428 21:19:42.705004 25678 sgd_solver.cpp:105] Iteration 12, lr = 0.0077679
I0428 21:19:47.834722 25678 solver.cpp:218] Iteration 24 (2.33931 iter/s, 5.12971s/12 iters), loss = 5.28565
I0428 21:19:47.834765 25678 solver.cpp:237] Train net output #0: loss = 5.28565 (* 1 = 5.28565 loss)
I0428 21:19:47.834774 25678 sgd_solver.cpp:105] Iteration 24, lr = 0.0077628
I0428 21:19:52.813623 25678 solver.cpp:218] Iteration 36 (2.41019 iter/s, 4.97885s/12 iters), loss = 5.29923
I0428 21:19:52.813665 25678 solver.cpp:237] Train net output #0: loss = 5.29923 (* 1 = 5.29923 loss)
I0428 21:19:52.813674 25678 sgd_solver.cpp:105] Iteration 36, lr = 0.00775769
I0428 21:19:58.076273 25678 solver.cpp:218] Iteration 48 (2.28024 iter/s, 5.2626s/12 iters), loss = 5.30939
I0428 21:19:58.076318 25678 solver.cpp:237] Train net output #0: loss = 5.30939 (* 1 = 5.30939 loss)
I0428 21:19:58.076326 25678 sgd_solver.cpp:105] Iteration 48, lr = 0.00775257
I0428 21:20:03.308701 25678 solver.cpp:218] Iteration 60 (2.29342 iter/s, 5.23237s/12 iters), loss = 5.2791
I0428 21:20:03.308815 25678 solver.cpp:237] Train net output #0: loss = 5.2791 (* 1 = 5.2791 loss)
I0428 21:20:03.308830 25678 sgd_solver.cpp:105] Iteration 60, lr = 0.00774744
I0428 21:20:08.288162 25678 solver.cpp:218] Iteration 72 (2.40995 iter/s, 4.97935s/12 iters), loss = 5.28277
I0428 21:20:08.288206 25678 solver.cpp:237] Train net output #0: loss = 5.28277 (* 1 = 5.28277 loss)
I0428 21:20:08.288213 25678 sgd_solver.cpp:105] Iteration 72, lr = 0.0077423
I0428 21:20:13.753893 25678 solver.cpp:218] Iteration 84 (2.19552 iter/s, 5.46568s/12 iters), loss = 5.28053
I0428 21:20:13.753937 25678 solver.cpp:237] Train net output #0: loss = 5.28053 (* 1 = 5.28053 loss)
I0428 21:20:13.753945 25678 sgd_solver.cpp:105] Iteration 84, lr = 0.00773716
I0428 21:20:18.930649 25678 solver.cpp:218] Iteration 96 (2.31807 iter/s, 5.17671s/12 iters), loss = 5.29096
I0428 21:20:18.930688 25678 solver.cpp:237] Train net output #0: loss = 5.29096 (* 1 = 5.29096 loss)
I0428 21:20:18.930696 25678 sgd_solver.cpp:105] Iteration 96, lr = 0.007732
I0428 21:20:21.148102 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:20:21.497334 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_102.caffemodel
I0428 21:20:24.683889 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_102.solverstate
I0428 21:20:27.119033 25678 solver.cpp:330] Iteration 102, Testing net (#0)
I0428 21:20:27.119060 25678 net.cpp:676] Ignoring source layer train-data
I0428 21:20:31.820196 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:20:31.908972 25678 solver.cpp:397] Test net output #0: accuracy = 0.00551471
I0428 21:20:31.909010 25678 solver.cpp:397] Test net output #1: loss = 5.28691 (* 1 = 5.28691 loss)
I0428 21:20:33.658936 25678 solver.cpp:218] Iteration 108 (0.81476 iter/s, 14.7283s/12 iters), loss = 5.29406
I0428 21:20:33.659092 25678 solver.cpp:237] Train net output #0: loss = 5.29406 (* 1 = 5.29406 loss)
I0428 21:20:33.659102 25678 sgd_solver.cpp:105] Iteration 108, lr = 0.00772684
I0428 21:20:38.659301 25678 solver.cpp:218] Iteration 120 (2.3999 iter/s, 5.0002s/12 iters), loss = 5.28229
I0428 21:20:38.659343 25678 solver.cpp:237] Train net output #0: loss = 5.28229 (* 1 = 5.28229 loss)
I0428 21:20:38.659349 25678 sgd_solver.cpp:105] Iteration 120, lr = 0.00772167
I0428 21:20:43.631043 25678 solver.cpp:218] Iteration 132 (2.41367 iter/s, 4.97169s/12 iters), loss = 5.28393
I0428 21:20:43.631085 25678 solver.cpp:237] Train net output #0: loss = 5.28393 (* 1 = 5.28393 loss)
I0428 21:20:43.631093 25678 sgd_solver.cpp:105] Iteration 132, lr = 0.00771649
I0428 21:20:48.568228 25678 solver.cpp:218] Iteration 144 (2.43056 iter/s, 4.93713s/12 iters), loss = 5.2964
I0428 21:20:48.568271 25678 solver.cpp:237] Train net output #0: loss = 5.2964 (* 1 = 5.2964 loss)
I0428 21:20:48.568279 25678 sgd_solver.cpp:105] Iteration 144, lr = 0.00771131
I0428 21:20:53.467175 25678 solver.cpp:218] Iteration 156 (2.44953 iter/s, 4.89889s/12 iters), loss = 5.2851
I0428 21:20:53.467214 25678 solver.cpp:237] Train net output #0: loss = 5.2851 (* 1 = 5.2851 loss)
I0428 21:20:53.467222 25678 sgd_solver.cpp:105] Iteration 156, lr = 0.00770611
I0428 21:20:58.437062 25678 solver.cpp:218] Iteration 168 (2.41457 iter/s, 4.96983s/12 iters), loss = 5.28689
I0428 21:20:58.437108 25678 solver.cpp:237] Train net output #0: loss = 5.28689 (* 1 = 5.28689 loss)
I0428 21:20:58.437115 25678 sgd_solver.cpp:105] Iteration 168, lr = 0.00770091
I0428 21:21:03.416064 25678 solver.cpp:218] Iteration 180 (2.41015 iter/s, 4.97894s/12 iters), loss = 5.30201
I0428 21:21:03.416131 25678 solver.cpp:237] Train net output #0: loss = 5.30201 (* 1 = 5.30201 loss)
I0428 21:21:03.416144 25678 sgd_solver.cpp:105] Iteration 180, lr = 0.0076957
I0428 21:21:08.386520 25678 solver.cpp:218] Iteration 192 (2.4143 iter/s, 4.97039s/12 iters), loss = 5.2695
I0428 21:21:08.386622 25678 solver.cpp:237] Train net output #0: loss = 5.2695 (* 1 = 5.2695 loss)
I0428 21:21:08.386632 25678 sgd_solver.cpp:105] Iteration 192, lr = 0.00769048
I0428 21:21:12.288822 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:21:12.969208 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_204.caffemodel
I0428 21:21:17.607077 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_204.solverstate
I0428 21:21:21.013933 25678 solver.cpp:330] Iteration 204, Testing net (#0)
I0428 21:21:21.013952 25678 net.cpp:676] Ignoring source layer train-data
I0428 21:21:25.670838 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:21:25.810997 25678 solver.cpp:397] Test net output #0: accuracy = 0.00857843
I0428 21:21:25.811049 25678 solver.cpp:397] Test net output #1: loss = 5.21659 (* 1 = 5.21659 loss)
I0428 21:21:25.908457 25678 solver.cpp:218] Iteration 204 (0.684859 iter/s, 17.5219s/12 iters), loss = 5.26392
I0428 21:21:25.908501 25678 solver.cpp:237] Train net output #0: loss = 5.26392 (* 1 = 5.26392 loss)
I0428 21:21:25.908509 25678 sgd_solver.cpp:105] Iteration 204, lr = 0.00768525
I0428 21:21:30.090422 25678 solver.cpp:218] Iteration 216 (2.8695 iter/s, 4.18191s/12 iters), loss = 5.22504
I0428 21:21:30.090466 25678 solver.cpp:237] Train net output #0: loss = 5.22504 (* 1 = 5.22504 loss)
I0428 21:21:30.090474 25678 sgd_solver.cpp:105] Iteration 216, lr = 0.00768001
I0428 21:21:35.006945 25678 solver.cpp:218] Iteration 228 (2.44077 iter/s, 4.91647s/12 iters), loss = 5.26057
I0428 21:21:35.006989 25678 solver.cpp:237] Train net output #0: loss = 5.26057 (* 1 = 5.26057 loss)
I0428 21:21:35.006996 25678 sgd_solver.cpp:105] Iteration 228, lr = 0.00767477
I0428 21:21:40.005973 25678 solver.cpp:218] Iteration 240 (2.40049 iter/s, 4.99898s/12 iters), loss = 5.12104
I0428 21:21:40.006094 25678 solver.cpp:237] Train net output #0: loss = 5.12104 (* 1 = 5.12104 loss)
I0428 21:21:40.006104 25678 sgd_solver.cpp:105] Iteration 240, lr = 0.00766951
I0428 21:21:45.067353 25678 solver.cpp:218] Iteration 252 (2.37095 iter/s, 5.06125s/12 iters), loss = 5.17375
I0428 21:21:45.067394 25678 solver.cpp:237] Train net output #0: loss = 5.17375 (* 1 = 5.17375 loss)
I0428 21:21:45.067402 25678 sgd_solver.cpp:105] Iteration 252, lr = 0.00766425
I0428 21:21:50.057668 25678 solver.cpp:218] Iteration 264 (2.40468 iter/s, 4.99026s/12 iters), loss = 5.16057
I0428 21:21:50.057716 25678 solver.cpp:237] Train net output #0: loss = 5.16057 (* 1 = 5.16057 loss)
I0428 21:21:50.057725 25678 sgd_solver.cpp:105] Iteration 264, lr = 0.00765898
I0428 21:21:55.047379 25678 solver.cpp:218] Iteration 276 (2.40497 iter/s, 4.98966s/12 iters), loss = 5.23742
I0428 21:21:55.047417 25678 solver.cpp:237] Train net output #0: loss = 5.23742 (* 1 = 5.23742 loss)
I0428 21:21:55.047425 25678 sgd_solver.cpp:105] Iteration 276, lr = 0.00765371
I0428 21:22:00.033577 25678 solver.cpp:218] Iteration 288 (2.40666 iter/s, 4.98615s/12 iters), loss = 5.19424
I0428 21:22:00.033617 25678 solver.cpp:237] Train net output #0: loss = 5.19424 (* 1 = 5.19424 loss)
I0428 21:22:00.033625 25678 sgd_solver.cpp:105] Iteration 288, lr = 0.00764842
I0428 21:22:04.973552 25678 solver.cpp:218] Iteration 300 (2.42918 iter/s, 4.93993s/12 iters), loss = 5.12123
I0428 21:22:04.973593 25678 solver.cpp:237] Train net output #0: loss = 5.12123 (* 1 = 5.12123 loss)
I0428 21:22:04.973603 25678 sgd_solver.cpp:105] Iteration 300, lr = 0.00764313
I0428 21:22:05.957769 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:22:07.001407 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_306.caffemodel
I0428 21:22:11.638350 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_306.solverstate
I0428 21:22:14.723425 25678 solver.cpp:330] Iteration 306, Testing net (#0)
I0428 21:22:14.723443 25678 net.cpp:676] Ignoring source layer train-data
I0428 21:22:19.063474 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:22:19.229009 25678 solver.cpp:397] Test net output #0: accuracy = 0.0110294
I0428 21:22:19.229044 25678 solver.cpp:397] Test net output #1: loss = 5.1525 (* 1 = 5.1525 loss)
I0428 21:22:21.149619 25678 solver.cpp:218] Iteration 312 (0.741837 iter/s, 16.1761s/12 iters), loss = 5.17279
I0428 21:22:21.149662 25678 solver.cpp:237] Train net output #0: loss = 5.17279 (* 1 = 5.17279 loss)
I0428 21:22:21.149670 25678 sgd_solver.cpp:105] Iteration 312, lr = 0.00763782
I0428 21:22:26.149955 25678 solver.cpp:218] Iteration 324 (2.39986 iter/s, 5.00028s/12 iters), loss = 5.11593
I0428 21:22:26.149996 25678 solver.cpp:237] Train net output #0: loss = 5.11593 (* 1 = 5.11593 loss)
I0428 21:22:26.150004 25678 sgd_solver.cpp:105] Iteration 324, lr = 0.00763251
I0428 21:22:31.152563 25678 solver.cpp:218] Iteration 336 (2.39877 iter/s, 5.00256s/12 iters), loss = 5.12173
I0428 21:22:31.152612 25678 solver.cpp:237] Train net output #0: loss = 5.12173 (* 1 = 5.12173 loss)
I0428 21:22:31.152623 25678 sgd_solver.cpp:105] Iteration 336, lr = 0.00762719
I0428 21:22:36.124389 25678 solver.cpp:218] Iteration 348 (2.41363 iter/s, 4.97177s/12 iters), loss = 5.13588
I0428 21:22:36.124434 25678 solver.cpp:237] Train net output #0: loss = 5.13588 (* 1 = 5.13588 loss)
I0428 21:22:36.124444 25678 sgd_solver.cpp:105] Iteration 348, lr = 0.00762187
I0428 21:22:41.098584 25678 solver.cpp:218] Iteration 360 (2.41248 iter/s, 4.97414s/12 iters), loss = 5.10006
I0428 21:22:41.098639 25678 solver.cpp:237] Train net output #0: loss = 5.10006 (* 1 = 5.10006 loss)
I0428 21:22:41.098649 25678 sgd_solver.cpp:105] Iteration 360, lr = 0.00761653
I0428 21:22:46.090620 25678 solver.cpp:218] Iteration 372 (2.40386 iter/s, 4.99197s/12 iters), loss = 5.05192
I0428 21:22:46.090732 25678 solver.cpp:237] Train net output #0: loss = 5.05192 (* 1 = 5.05192 loss)
I0428 21:22:46.090741 25678 sgd_solver.cpp:105] Iteration 372, lr = 0.00761119
I0428 21:22:50.984438 25678 solver.cpp:218] Iteration 384 (2.45213 iter/s, 4.8937s/12 iters), loss = 5.09393
I0428 21:22:50.984479 25678 solver.cpp:237] Train net output #0: loss = 5.09393 (* 1 = 5.09393 loss)
I0428 21:22:50.984488 25678 sgd_solver.cpp:105] Iteration 384, lr = 0.00760584
I0428 21:22:55.979244 25678 solver.cpp:218] Iteration 396 (2.40252 iter/s, 4.99476s/12 iters), loss = 5.14519
I0428 21:22:55.979287 25678 solver.cpp:237] Train net output #0: loss = 5.14519 (* 1 = 5.14519 loss)
I0428 21:22:55.979295 25678 sgd_solver.cpp:105] Iteration 396, lr = 0.00760048
I0428 21:22:59.090941 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:23:00.498906 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_408.caffemodel
I0428 21:23:03.609530 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_408.solverstate
I0428 21:23:06.548506 25678 solver.cpp:330] Iteration 408, Testing net (#0)
I0428 21:23:06.548529 25678 net.cpp:676] Ignoring source layer train-data
I0428 21:23:11.136025 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:23:11.366497 25678 solver.cpp:397] Test net output #0: accuracy = 0.0128676
I0428 21:23:11.366533 25678 solver.cpp:397] Test net output #1: loss = 5.08549 (* 1 = 5.08549 loss)
I0428 21:23:11.463479 25678 solver.cpp:218] Iteration 408 (0.774983 iter/s, 15.4842s/12 iters), loss = 5.00455
I0428 21:23:11.463523 25678 solver.cpp:237] Train net output #0: loss = 5.00455 (* 1 = 5.00455 loss)
I0428 21:23:11.463531 25678 sgd_solver.cpp:105] Iteration 408, lr = 0.00759511
I0428 21:23:15.665180 25678 solver.cpp:218] Iteration 420 (2.85602 iter/s, 4.20165s/12 iters), loss = 5.04889
I0428 21:23:15.665220 25678 solver.cpp:237] Train net output #0: loss = 5.04889 (* 1 = 5.04889 loss)
I0428 21:23:15.665227 25678 sgd_solver.cpp:105] Iteration 420, lr = 0.00758973
I0428 21:23:20.622265 25678 solver.cpp:218] Iteration 432 (2.4208 iter/s, 4.95704s/12 iters), loss = 5.07372
I0428 21:23:20.622352 25678 solver.cpp:237] Train net output #0: loss = 5.07372 (* 1 = 5.07372 loss)
I0428 21:23:20.622361 25678 sgd_solver.cpp:105] Iteration 432, lr = 0.00758435
I0428 21:23:25.609344 25678 solver.cpp:218] Iteration 444 (2.40626 iter/s, 4.98699s/12 iters), loss = 5.10313
I0428 21:23:25.609387 25678 solver.cpp:237] Train net output #0: loss = 5.10313 (* 1 = 5.10313 loss)
I0428 21:23:25.609395 25678 sgd_solver.cpp:105] Iteration 444, lr = 0.00757896
I0428 21:23:30.590241 25678 solver.cpp:218] Iteration 456 (2.40923 iter/s, 4.98085s/12 iters), loss = 5.0591
I0428 21:23:30.590284 25678 solver.cpp:237] Train net output #0: loss = 5.0591 (* 1 = 5.0591 loss)
I0428 21:23:30.590292 25678 sgd_solver.cpp:105] Iteration 456, lr = 0.00757355
I0428 21:23:35.524721 25678 solver.cpp:218] Iteration 468 (2.43189 iter/s, 4.93443s/12 iters), loss = 5.08018
I0428 21:23:35.524765 25678 solver.cpp:237] Train net output #0: loss = 5.08018 (* 1 = 5.08018 loss)
I0428 21:23:35.524773 25678 sgd_solver.cpp:105] Iteration 468, lr = 0.00756815
I0428 21:23:40.491209 25678 solver.cpp:218] Iteration 480 (2.41622 iter/s, 4.96643s/12 iters), loss = 4.97432
I0428 21:23:40.491271 25678 solver.cpp:237] Train net output #0: loss = 4.97432 (* 1 = 4.97432 loss)
I0428 21:23:40.491284 25678 sgd_solver.cpp:105] Iteration 480, lr = 0.00756273
I0428 21:23:45.470165 25678 solver.cpp:218] Iteration 492 (2.41018 iter/s, 4.97889s/12 iters), loss = 5.03099
I0428 21:23:45.470207 25678 solver.cpp:237] Train net output #0: loss = 5.03099 (* 1 = 5.03099 loss)
I0428 21:23:45.470216 25678 sgd_solver.cpp:105] Iteration 492, lr = 0.0075573
I0428 21:23:50.449944 25678 solver.cpp:218] Iteration 504 (2.40977 iter/s, 4.97973s/12 iters), loss = 5.08953
I0428 21:23:50.449987 25678 solver.cpp:237] Train net output #0: loss = 5.08953 (* 1 = 5.08953 loss)
I0428 21:23:50.449995 25678 sgd_solver.cpp:105] Iteration 504, lr = 0.00755187
I0428 21:23:50.695816 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:23:52.486944 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_510.caffemodel
I0428 21:23:55.593178 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_510.solverstate
I0428 21:23:58.158638 25678 solver.cpp:330] Iteration 510, Testing net (#0)
I0428 21:23:58.158670 25678 net.cpp:676] Ignoring source layer train-data
I0428 21:24:02.667300 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:24:02.933302 25678 solver.cpp:397] Test net output #0: accuracy = 0.0196078
I0428 21:24:02.933348 25678 solver.cpp:397] Test net output #1: loss = 5.04636 (* 1 = 5.04636 loss)
I0428 21:24:04.829771 25678 solver.cpp:218] Iteration 516 (0.834504 iter/s, 14.3798s/12 iters), loss = 5.0316
I0428 21:24:04.829813 25678 solver.cpp:237] Train net output #0: loss = 5.0316 (* 1 = 5.0316 loss)
I0428 21:24:04.829820 25678 sgd_solver.cpp:105] Iteration 516, lr = 0.00754643
I0428 21:24:09.788139 25678 solver.cpp:218] Iteration 528 (2.42018 iter/s, 4.95832s/12 iters), loss = 5.05365
I0428 21:24:09.788178 25678 solver.cpp:237] Train net output #0: loss = 5.05365 (* 1 = 5.05365 loss)
I0428 21:24:09.788187 25678 sgd_solver.cpp:105] Iteration 528, lr = 0.00754098
I0428 21:24:14.774227 25678 solver.cpp:218] Iteration 540 (2.40672 iter/s, 4.98604s/12 iters), loss = 5.09461
I0428 21:24:14.774269 25678 solver.cpp:237] Train net output #0: loss = 5.09461 (* 1 = 5.09461 loss)
I0428 21:24:14.774276 25678 sgd_solver.cpp:105] Iteration 540, lr = 0.00753552
I0428 21:24:19.796352 25678 solver.cpp:218] Iteration 552 (2.38945 iter/s, 5.02207s/12 iters), loss = 4.91775
I0428 21:24:19.796396 25678 solver.cpp:237] Train net output #0: loss = 4.91775 (* 1 = 4.91775 loss)
I0428 21:24:19.796406 25678 sgd_solver.cpp:105] Iteration 552, lr = 0.00753005
I0428 21:24:24.793119 25678 solver.cpp:218] Iteration 564 (2.40158 iter/s, 4.99672s/12 iters), loss = 4.96759
I0428 21:24:24.793210 25678 solver.cpp:237] Train net output #0: loss = 4.96759 (* 1 = 4.96759 loss)
I0428 21:24:24.793218 25678 sgd_solver.cpp:105] Iteration 564, lr = 0.00752458
I0428 21:24:29.798395 25678 solver.cpp:218] Iteration 576 (2.39751 iter/s, 5.00518s/12 iters), loss = 5.07755
I0428 21:24:29.798437 25678 solver.cpp:237] Train net output #0: loss = 5.07755 (* 1 = 5.07755 loss)
I0428 21:24:29.798445 25678 sgd_solver.cpp:105] Iteration 576, lr = 0.0075191
I0428 21:24:34.728363 25678 solver.cpp:218] Iteration 588 (2.43412 iter/s, 4.92992s/12 iters), loss = 5.00455
I0428 21:24:34.728417 25678 solver.cpp:237] Train net output #0: loss = 5.00455 (* 1 = 5.00455 loss)
I0428 21:24:34.728427 25678 sgd_solver.cpp:105] Iteration 588, lr = 0.00751361
I0428 21:24:39.718497 25678 solver.cpp:218] Iteration 600 (2.40477 iter/s, 4.99007s/12 iters), loss = 5.01536
I0428 21:24:39.718536 25678 solver.cpp:237] Train net output #0: loss = 5.01536 (* 1 = 5.01536 loss)
I0428 21:24:39.718544 25678 sgd_solver.cpp:105] Iteration 600, lr = 0.00750811
I0428 21:24:42.096312 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:24:44.254314 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_612.caffemodel
I0428 21:24:47.347823 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_612.solverstate
I0428 21:24:49.888868 25678 solver.cpp:330] Iteration 612, Testing net (#0)
I0428 21:24:49.888890 25678 net.cpp:676] Ignoring source layer train-data
I0428 21:24:54.084177 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:24:54.378872 25678 solver.cpp:397] Test net output #0: accuracy = 0.0220588
I0428 21:24:54.378908 25678 solver.cpp:397] Test net output #1: loss = 4.97545 (* 1 = 4.97545 loss)
I0428 21:24:54.475785 25678 solver.cpp:218] Iteration 612 (0.813159 iter/s, 14.7573s/12 iters), loss = 4.92939
I0428 21:24:54.475834 25678 solver.cpp:237] Train net output #0: loss = 4.92939 (* 1 = 4.92939 loss)
I0428 21:24:54.475844 25678 sgd_solver.cpp:105] Iteration 612, lr = 0.0075026
I0428 21:24:58.892971 25678 solver.cpp:218] Iteration 624 (2.7167 iter/s, 4.41713s/12 iters), loss = 5.01716
I0428 21:24:58.893115 25678 solver.cpp:237] Train net output #0: loss = 5.01716 (* 1 = 5.01716 loss)
I0428 21:24:58.893123 25678 sgd_solver.cpp:105] Iteration 624, lr = 0.00749709
I0428 21:25:04.051805 25678 solver.cpp:218] Iteration 636 (2.32617 iter/s, 5.15868s/12 iters), loss = 4.9462
I0428 21:25:04.051851 25678 solver.cpp:237] Train net output #0: loss = 4.9462 (* 1 = 4.9462 loss)
I0428 21:25:04.051859 25678 sgd_solver.cpp:105] Iteration 636, lr = 0.00749156
I0428 21:25:09.066753 25678 solver.cpp:218] Iteration 648 (2.39287 iter/s, 5.01489s/12 iters), loss = 4.94684
I0428 21:25:09.066799 25678 solver.cpp:237] Train net output #0: loss = 4.94684 (* 1 = 4.94684 loss)
I0428 21:25:09.066808 25678 sgd_solver.cpp:105] Iteration 648, lr = 0.00748603
I0428 21:25:14.065037 25678 solver.cpp:218] Iteration 660 (2.40085 iter/s, 4.99823s/12 iters), loss = 4.97647
I0428 21:25:14.065079 25678 solver.cpp:237] Train net output #0: loss = 4.97647 (* 1 = 4.97647 loss)
I0428 21:25:14.065088 25678 sgd_solver.cpp:105] Iteration 660, lr = 0.00748049
I0428 21:25:19.086989 25678 solver.cpp:218] Iteration 672 (2.38953 iter/s, 5.0219s/12 iters), loss = 4.82095
I0428 21:25:19.087033 25678 solver.cpp:237] Train net output #0: loss = 4.82095 (* 1 = 4.82095 loss)
I0428 21:25:19.087042 25678 sgd_solver.cpp:105] Iteration 672, lr = 0.00747495
I0428 21:25:24.119990 25678 solver.cpp:218] Iteration 684 (2.38429 iter/s, 5.03295s/12 iters), loss = 4.95757
I0428 21:25:24.120031 25678 solver.cpp:237] Train net output #0: loss = 4.95757 (* 1 = 4.95757 loss)
I0428 21:25:24.120039 25678 sgd_solver.cpp:105] Iteration 684, lr = 0.00746939
I0428 21:25:24.481426 25678 blocking_queue.cpp:49] Waiting for data
I0428 21:25:29.170748 25678 solver.cpp:218] Iteration 696 (2.3759 iter/s, 5.05071s/12 iters), loss = 4.83143
I0428 21:25:29.170837 25678 solver.cpp:237] Train net output #0: loss = 4.83143 (* 1 = 4.83143 loss)
I0428 21:25:29.170850 25678 sgd_solver.cpp:105] Iteration 696, lr = 0.00746383
I0428 21:25:33.829030 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:25:34.218097 25678 solver.cpp:218] Iteration 708 (2.37753 iter/s, 5.04725s/12 iters), loss = 4.84624
I0428 21:25:34.218144 25678 solver.cpp:237] Train net output #0: loss = 4.84624 (* 1 = 4.84624 loss)
I0428 21:25:34.218152 25678 sgd_solver.cpp:105] Iteration 708, lr = 0.00745825
I0428 21:25:36.277667 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_714.caffemodel
I0428 21:25:40.276362 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_714.solverstate
I0428 21:25:42.655437 25678 solver.cpp:330] Iteration 714, Testing net (#0)
I0428 21:25:42.655457 25678 net.cpp:676] Ignoring source layer train-data
I0428 21:25:47.077750 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:25:47.408881 25678 solver.cpp:397] Test net output #0: accuracy = 0.0269608
I0428 21:25:47.408931 25678 solver.cpp:397] Test net output #1: loss = 4.91498 (* 1 = 4.91498 loss)
I0428 21:25:49.248090 25678 solver.cpp:218] Iteration 720 (0.798405 iter/s, 15.03s/12 iters), loss = 4.79377
I0428 21:25:49.248136 25678 solver.cpp:237] Train net output #0: loss = 4.79377 (* 1 = 4.79377 loss)
I0428 21:25:49.248145 25678 sgd_solver.cpp:105] Iteration 720, lr = 0.00745268
I0428 21:25:54.221334 25678 solver.cpp:218] Iteration 732 (2.41294 iter/s, 4.97319s/12 iters), loss = 4.86498
I0428 21:25:54.221380 25678 solver.cpp:237] Train net output #0: loss = 4.86498 (* 1 = 4.86498 loss)
I0428 21:25:54.221388 25678 sgd_solver.cpp:105] Iteration 732, lr = 0.00744709
I0428 21:25:59.155922 25678 solver.cpp:218] Iteration 744 (2.43184 iter/s, 4.93453s/12 iters), loss = 4.74293
I0428 21:25:59.155962 25678 solver.cpp:237] Train net output #0: loss = 4.74293 (* 1 = 4.74293 loss)
I0428 21:25:59.155970 25678 sgd_solver.cpp:105] Iteration 744, lr = 0.00744149
I0428 21:26:04.206827 25678 solver.cpp:218] Iteration 756 (2.37583 iter/s, 5.05086s/12 iters), loss = 4.85969
I0428 21:26:04.206974 25678 solver.cpp:237] Train net output #0: loss = 4.85969 (* 1 = 4.85969 loss)
I0428 21:26:04.206984 25678 sgd_solver.cpp:105] Iteration 756, lr = 0.00743589
I0428 21:26:09.204241 25678 solver.cpp:218] Iteration 768 (2.40132 iter/s, 4.99726s/12 iters), loss = 4.7992
I0428 21:26:09.204289 25678 solver.cpp:237] Train net output #0: loss = 4.7992 (* 1 = 4.7992 loss)
I0428 21:26:09.204298 25678 sgd_solver.cpp:105] Iteration 768, lr = 0.00743028
I0428 21:26:14.155660 25678 solver.cpp:218] Iteration 780 (2.42357 iter/s, 4.95137s/12 iters), loss = 4.80152
I0428 21:26:14.155701 25678 solver.cpp:237] Train net output #0: loss = 4.80152 (* 1 = 4.80152 loss)
I0428 21:26:14.155710 25678 sgd_solver.cpp:105] Iteration 780, lr = 0.00742466
I0428 21:26:19.148640 25678 solver.cpp:218] Iteration 792 (2.4034 iter/s, 4.99293s/12 iters), loss = 4.80467
I0428 21:26:19.148695 25678 solver.cpp:237] Train net output #0: loss = 4.80467 (* 1 = 4.80467 loss)
I0428 21:26:19.148707 25678 sgd_solver.cpp:105] Iteration 792, lr = 0.00741903
I0428 21:26:24.148645 25678 solver.cpp:218] Iteration 804 (2.40002 iter/s, 4.99995s/12 iters), loss = 4.94273
I0428 21:26:24.148689 25678 solver.cpp:237] Train net output #0: loss = 4.94273 (* 1 = 4.94273 loss)
I0428 21:26:24.148697 25678 sgd_solver.cpp:105] Iteration 804, lr = 0.00741339
I0428 21:26:25.891165 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:26:28.689550 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_816.caffemodel
I0428 21:26:32.996932 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_816.solverstate
I0428 21:26:36.779186 25678 solver.cpp:330] Iteration 816, Testing net (#0)
I0428 21:26:36.779242 25678 net.cpp:676] Ignoring source layer train-data
I0428 21:26:40.972683 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:26:41.337575 25678 solver.cpp:397] Test net output #0: accuracy = 0.0416667
I0428 21:26:41.337610 25678 solver.cpp:397] Test net output #1: loss = 4.82587 (* 1 = 4.82587 loss)
I0428 21:26:41.435238 25678 solver.cpp:218] Iteration 816 (0.69418 iter/s, 17.2866s/12 iters), loss = 4.85455
I0428 21:26:41.435290 25678 solver.cpp:237] Train net output #0: loss = 4.85455 (* 1 = 4.85455 loss)
I0428 21:26:41.435300 25678 sgd_solver.cpp:105] Iteration 816, lr = 0.00740775
I0428 21:26:45.612639 25678 solver.cpp:218] Iteration 828 (2.87264 iter/s, 4.17734s/12 iters), loss = 4.84276
I0428 21:26:45.612681 25678 solver.cpp:237] Train net output #0: loss = 4.84276 (* 1 = 4.84276 loss)
I0428 21:26:45.612690 25678 sgd_solver.cpp:105] Iteration 828, lr = 0.0074021
I0428 21:26:50.509768 25678 solver.cpp:218] Iteration 840 (2.45044 iter/s, 4.89707s/12 iters), loss = 4.84859
I0428 21:26:50.509824 25678 solver.cpp:237] Train net output #0: loss = 4.84859 (* 1 = 4.84859 loss)
I0428 21:26:50.509837 25678 sgd_solver.cpp:105] Iteration 840, lr = 0.00739644
I0428 21:26:55.515744 25678 solver.cpp:218] Iteration 852 (2.39716 iter/s, 5.00592s/12 iters), loss = 4.92197
I0428 21:26:55.515784 25678 solver.cpp:237] Train net output #0: loss = 4.92197 (* 1 = 4.92197 loss)
I0428 21:26:55.515791 25678 sgd_solver.cpp:105] Iteration 852, lr = 0.00739077
I0428 21:27:00.515283 25678 solver.cpp:218] Iteration 864 (2.40024 iter/s, 4.99949s/12 iters), loss = 4.79391
I0428 21:27:00.515324 25678 solver.cpp:237] Train net output #0: loss = 4.79391 (* 1 = 4.79391 loss)
I0428 21:27:00.515331 25678 sgd_solver.cpp:105] Iteration 864, lr = 0.00738509
I0428 21:27:05.460707 25678 solver.cpp:218] Iteration 876 (2.42651 iter/s, 4.94538s/12 iters), loss = 4.96927
I0428 21:27:05.460750 25678 solver.cpp:237] Train net output #0: loss = 4.96927 (* 1 = 4.96927 loss)
I0428 21:27:05.460757 25678 sgd_solver.cpp:105] Iteration 876, lr = 0.00737941
I0428 21:27:10.461503 25678 solver.cpp:218] Iteration 888 (2.39964 iter/s, 5.00075s/12 iters), loss = 4.86616
I0428 21:27:10.461632 25678 solver.cpp:237] Train net output #0: loss = 4.86616 (* 1 = 4.86616 loss)
I0428 21:27:10.461640 25678 sgd_solver.cpp:105] Iteration 888, lr = 0.00737372
I0428 21:27:15.447310 25678 solver.cpp:218] Iteration 900 (2.40689 iter/s, 4.98568s/12 iters), loss = 4.74726
I0428 21:27:15.447355 25678 solver.cpp:237] Train net output #0: loss = 4.74726 (* 1 = 4.74726 loss)
I0428 21:27:15.447363 25678 sgd_solver.cpp:105] Iteration 900, lr = 0.00736802
I0428 21:27:19.323832 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:27:20.443816 25678 solver.cpp:218] Iteration 912 (2.4017 iter/s, 4.99645s/12 iters), loss = 4.76072
I0428 21:27:20.443858 25678 solver.cpp:237] Train net output #0: loss = 4.76072 (* 1 = 4.76072 loss)
I0428 21:27:20.443866 25678 sgd_solver.cpp:105] Iteration 912, lr = 0.00736231
I0428 21:27:22.465332 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_918.caffemodel
I0428 21:27:25.593797 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_918.solverstate
I0428 21:27:27.962738 25678 solver.cpp:330] Iteration 918, Testing net (#0)
I0428 21:27:27.962765 25678 net.cpp:676] Ignoring source layer train-data
I0428 21:27:32.404142 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:27:32.848834 25678 solver.cpp:397] Test net output #0: accuracy = 0.0435049
I0428 21:27:32.848865 25678 solver.cpp:397] Test net output #1: loss = 4.71447 (* 1 = 4.71447 loss)
I0428 21:27:34.682183 25678 solver.cpp:218] Iteration 924 (0.842795 iter/s, 14.2383s/12 iters), loss = 4.58855
I0428 21:27:34.682230 25678 solver.cpp:237] Train net output #0: loss = 4.58855 (* 1 = 4.58855 loss)
I0428 21:27:34.682238 25678 sgd_solver.cpp:105] Iteration 924, lr = 0.0073566
I0428 21:27:39.727458 25678 solver.cpp:218] Iteration 936 (2.37849 iter/s, 5.04522s/12 iters), loss = 4.81106
I0428 21:27:39.727497 25678 solver.cpp:237] Train net output #0: loss = 4.81106 (* 1 = 4.81106 loss)
I0428 21:27:39.727505 25678 sgd_solver.cpp:105] Iteration 936, lr = 0.00735087
I0428 21:27:44.692629 25678 solver.cpp:218] Iteration 948 (2.41686 iter/s, 4.96513s/12 iters), loss = 4.5468
I0428 21:27:44.692698 25678 solver.cpp:237] Train net output #0: loss = 4.5468 (* 1 = 4.5468 loss)
I0428 21:27:44.692706 25678 sgd_solver.cpp:105] Iteration 948, lr = 0.00734514
I0428 21:27:49.688022 25678 solver.cpp:218] Iteration 960 (2.40225 iter/s, 4.99532s/12 iters), loss = 4.66395
I0428 21:27:49.688069 25678 solver.cpp:237] Train net output #0: loss = 4.66395 (* 1 = 4.66395 loss)
I0428 21:27:49.688077 25678 sgd_solver.cpp:105] Iteration 960, lr = 0.0073394
I0428 21:27:54.670202 25678 solver.cpp:218] Iteration 972 (2.40861 iter/s, 4.98213s/12 iters), loss = 4.45701
I0428 21:27:54.670243 25678 solver.cpp:237] Train net output #0: loss = 4.45701 (* 1 = 4.45701 loss)
I0428 21:27:54.670251 25678 sgd_solver.cpp:105] Iteration 972, lr = 0.00733365
I0428 21:27:59.644603 25678 solver.cpp:218] Iteration 984 (2.41237 iter/s, 4.97435s/12 iters), loss = 4.50237
I0428 21:27:59.644644 25678 solver.cpp:237] Train net output #0: loss = 4.50237 (* 1 = 4.50237 loss)
I0428 21:27:59.644651 25678 sgd_solver.cpp:105] Iteration 984, lr = 0.0073279
I0428 21:28:04.603775 25678 solver.cpp:218] Iteration 996 (2.41978 iter/s, 4.95912s/12 iters), loss = 4.62041
I0428 21:28:04.603821 25678 solver.cpp:237] Train net output #0: loss = 4.62041 (* 1 = 4.62041 loss)
I0428 21:28:04.603829 25678 sgd_solver.cpp:105] Iteration 996, lr = 0.00732214
I0428 21:28:09.617130 25678 solver.cpp:218] Iteration 1008 (2.39363 iter/s, 5.0133s/12 iters), loss = 4.65476
I0428 21:28:09.617172 25678 solver.cpp:237] Train net output #0: loss = 4.65476 (* 1 = 4.65476 loss)
I0428 21:28:09.617179 25678 sgd_solver.cpp:105] Iteration 1008, lr = 0.00731636
I0428 21:28:10.631601 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:28:14.154036 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_1020.caffemodel
I0428 21:28:17.439692 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_1020.solverstate
I0428 21:28:19.967902 25678 solver.cpp:330] Iteration 1020, Testing net (#0)
I0428 21:28:19.967922 25678 net.cpp:676] Ignoring source layer train-data
I0428 21:28:24.547700 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:28:25.036103 25678 solver.cpp:397] Test net output #0: accuracy = 0.0496324
I0428 21:28:25.036141 25678 solver.cpp:397] Test net output #1: loss = 4.58823 (* 1 = 4.58823 loss)
I0428 21:28:25.132179 25678 solver.cpp:218] Iteration 1020 (0.773444 iter/s, 15.515s/12 iters), loss = 4.65017
I0428 21:28:25.132230 25678 solver.cpp:237] Train net output #0: loss = 4.65017 (* 1 = 4.65017 loss)
I0428 21:28:25.132237 25678 sgd_solver.cpp:105] Iteration 1020, lr = 0.00731059
I0428 21:28:29.341383 25678 solver.cpp:218] Iteration 1032 (2.85093 iter/s, 4.20915s/12 iters), loss = 4.47918
I0428 21:28:29.341423 25678 solver.cpp:237] Train net output #0: loss = 4.47918 (* 1 = 4.47918 loss)
I0428 21:28:29.341430 25678 sgd_solver.cpp:105] Iteration 1032, lr = 0.0073048
I0428 21:28:34.267078 25678 solver.cpp:218] Iteration 1044 (2.43623 iter/s, 4.92565s/12 iters), loss = 4.44722
I0428 21:28:34.267119 25678 solver.cpp:237] Train net output #0: loss = 4.44722 (* 1 = 4.44722 loss)
I0428 21:28:34.267127 25678 sgd_solver.cpp:105] Iteration 1044, lr = 0.007299
I0428 21:28:39.239471 25678 solver.cpp:218] Iteration 1056 (2.41335 iter/s, 4.97234s/12 iters), loss = 4.53646
I0428 21:28:39.239514 25678 solver.cpp:237] Train net output #0: loss = 4.53646 (* 1 = 4.53646 loss)
I0428 21:28:39.239522 25678 sgd_solver.cpp:105] Iteration 1056, lr = 0.0072932
I0428 21:28:44.253227 25678 solver.cpp:218] Iteration 1068 (2.39344 iter/s, 5.0137s/12 iters), loss = 4.5749
I0428 21:28:44.253295 25678 solver.cpp:237] Train net output #0: loss = 4.5749 (* 1 = 4.5749 loss)
I0428 21:28:44.253309 25678 sgd_solver.cpp:105] Iteration 1068, lr = 0.00728739
I0428 21:28:49.243396 25678 solver.cpp:218] Iteration 1080 (2.40476 iter/s, 4.9901s/12 iters), loss = 4.46919
I0428 21:28:49.243508 25678 solver.cpp:237] Train net output #0: loss = 4.46919 (* 1 = 4.46919 loss)
I0428 21:28:49.243517 25678 sgd_solver.cpp:105] Iteration 1080, lr = 0.00728157
I0428 21:28:54.233705 25678 solver.cpp:218] Iteration 1092 (2.40472 iter/s, 4.9902s/12 iters), loss = 4.39394
I0428 21:28:54.233742 25678 solver.cpp:237] Train net output #0: loss = 4.39394 (* 1 = 4.39394 loss)
I0428 21:28:54.233752 25678 sgd_solver.cpp:105] Iteration 1092, lr = 0.00727575
I0428 21:28:59.221866 25678 solver.cpp:218] Iteration 1104 (2.40571 iter/s, 4.98812s/12 iters), loss = 4.67674
I0428 21:28:59.221907 25678 solver.cpp:237] Train net output #0: loss = 4.67674 (* 1 = 4.67674 loss)
I0428 21:28:59.221915 25678 sgd_solver.cpp:105] Iteration 1104, lr = 0.00726991
I0428 21:29:02.376062 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:29:04.228386 25678 solver.cpp:218] Iteration 1116 (2.3969 iter/s, 5.00647s/12 iters), loss = 4.16902
I0428 21:29:04.228431 25678 solver.cpp:237] Train net output #0: loss = 4.16902 (* 1 = 4.16902 loss)
I0428 21:29:04.228440 25678 sgd_solver.cpp:105] Iteration 1116, lr = 0.00726407
I0428 21:29:06.299962 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_1122.caffemodel
I0428 21:29:09.374454 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_1122.solverstate
I0428 21:29:11.768739 25678 solver.cpp:330] Iteration 1122, Testing net (#0)
I0428 21:29:11.768759 25678 net.cpp:676] Ignoring source layer train-data
I0428 21:29:16.098100 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:29:16.638420 25678 solver.cpp:397] Test net output #0: accuracy = 0.0716912
I0428 21:29:16.638458 25678 solver.cpp:397] Test net output #1: loss = 4.45828 (* 1 = 4.45828 loss)
I0428 21:29:18.427124 25678 solver.cpp:218] Iteration 1128 (0.845148 iter/s, 14.1987s/12 iters), loss = 4.34723
I0428 21:29:18.427191 25678 solver.cpp:237] Train net output #0: loss = 4.34723 (* 1 = 4.34723 loss)
I0428 21:29:18.427204 25678 sgd_solver.cpp:105] Iteration 1128, lr = 0.00725822
I0428 21:29:23.417183 25678 solver.cpp:218] Iteration 1140 (2.40481 iter/s, 4.99s/12 iters), loss = 4.50787
I0428 21:29:23.417340 25678 solver.cpp:237] Train net output #0: loss = 4.50787 (* 1 = 4.50787 loss)
I0428 21:29:23.417349 25678 sgd_solver.cpp:105] Iteration 1140, lr = 0.00725237
I0428 21:29:28.403894 25678 solver.cpp:218] Iteration 1152 (2.40647 iter/s, 4.98655s/12 iters), loss = 4.40894
I0428 21:29:28.403936 25678 solver.cpp:237] Train net output #0: loss = 4.40894 (* 1 = 4.40894 loss)
I0428 21:29:28.403944 25678 sgd_solver.cpp:105] Iteration 1152, lr = 0.0072465
I0428 21:29:33.369415 25678 solver.cpp:218] Iteration 1164 (2.41669 iter/s, 4.96547s/12 iters), loss = 4.21559
I0428 21:29:33.369457 25678 solver.cpp:237] Train net output #0: loss = 4.21559 (* 1 = 4.21559 loss)
I0428 21:29:33.369464 25678 sgd_solver.cpp:105] Iteration 1164, lr = 0.00724063
I0428 21:29:38.358964 25678 solver.cpp:218] Iteration 1176 (2.40505 iter/s, 4.9895s/12 iters), loss = 4.53548
I0428 21:29:38.359005 25678 solver.cpp:237] Train net output #0: loss = 4.53548 (* 1 = 4.53548 loss)
I0428 21:29:38.359014 25678 sgd_solver.cpp:105] Iteration 1176, lr = 0.00723475
I0428 21:29:43.390981 25678 solver.cpp:218] Iteration 1188 (2.38475 iter/s, 5.03197s/12 iters), loss = 4.28383
I0428 21:29:43.391022 25678 solver.cpp:237] Train net output #0: loss = 4.28383 (* 1 = 4.28383 loss)
I0428 21:29:43.391031 25678 sgd_solver.cpp:105] Iteration 1188, lr = 0.00722886
I0428 21:29:48.394291 25678 solver.cpp:218] Iteration 1200 (2.39843 iter/s, 5.00326s/12 iters), loss = 4.26031
I0428 21:29:48.394331 25678 solver.cpp:237] Train net output #0: loss = 4.26031 (* 1 = 4.26031 loss)
I0428 21:29:48.394340 25678 sgd_solver.cpp:105] Iteration 1200, lr = 0.00722297
I0428 21:29:53.327134 25678 solver.cpp:218] Iteration 1212 (2.4327 iter/s, 4.9328s/12 iters), loss = 4.28538
I0428 21:29:53.327174 25678 solver.cpp:237] Train net output #0: loss = 4.28538 (* 1 = 4.28538 loss)
I0428 21:29:53.327183 25678 sgd_solver.cpp:105] Iteration 1212, lr = 0.00721706
I0428 21:29:53.603611 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:29:57.887022 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_1224.caffemodel
I0428 21:30:00.973563 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_1224.solverstate
I0428 21:30:03.338650 25678 solver.cpp:330] Iteration 1224, Testing net (#0)
I0428 21:30:03.338670 25678 net.cpp:676] Ignoring source layer train-data
I0428 21:30:07.309785 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:30:07.841495 25678 solver.cpp:397] Test net output #0: accuracy = 0.0698529
I0428 21:30:07.841543 25678 solver.cpp:397] Test net output #1: loss = 4.43342 (* 1 = 4.43342 loss)
I0428 21:30:07.938674 25678 solver.cpp:218] Iteration 1224 (0.82127 iter/s, 14.6115s/12 iters), loss = 4.26021
I0428 21:30:07.938722 25678 solver.cpp:237] Train net output #0: loss = 4.26021 (* 1 = 4.26021 loss)
I0428 21:30:07.938730 25678 sgd_solver.cpp:105] Iteration 1224, lr = 0.00721115
I0428 21:30:12.087469 25678 solver.cpp:218] Iteration 1236 (2.89244 iter/s, 4.14874s/12 iters), loss = 4.2432
I0428 21:30:12.087510 25678 solver.cpp:237] Train net output #0: loss = 4.2432 (* 1 = 4.2432 loss)
I0428 21:30:12.087518 25678 sgd_solver.cpp:105] Iteration 1236, lr = 0.00720523
I0428 21:30:17.045192 25678 solver.cpp:218] Iteration 1248 (2.42049 iter/s, 4.95767s/12 iters), loss = 4.3011
I0428 21:30:17.045233 25678 solver.cpp:237] Train net output #0: loss = 4.3011 (* 1 = 4.3011 loss)
I0428 21:30:17.045240 25678 sgd_solver.cpp:105] Iteration 1248, lr = 0.00719931
I0428 21:30:22.050760 25678 solver.cpp:218] Iteration 1260 (2.39735 iter/s, 5.00552s/12 iters), loss = 4.30884
I0428 21:30:22.050801 25678 solver.cpp:237] Train net output #0: loss = 4.30884 (* 1 = 4.30884 loss)
I0428 21:30:22.050808 25678 sgd_solver.cpp:105] Iteration 1260, lr = 0.00719337
I0428 21:30:27.027586 25678 solver.cpp:218] Iteration 1272 (2.4112 iter/s, 4.97678s/12 iters), loss = 4.0446
I0428 21:30:27.027704 25678 solver.cpp:237] Train net output #0: loss = 4.0446 (* 1 = 4.0446 loss)
I0428 21:30:27.027714 25678 sgd_solver.cpp:105] Iteration 1272, lr = 0.00718743
I0428 21:30:31.980806 25678 solver.cpp:218] Iteration 1284 (2.42272 iter/s, 4.9531s/12 iters), loss = 4.24073
I0428 21:30:31.980847 25678 solver.cpp:237] Train net output #0: loss = 4.24073 (* 1 = 4.24073 loss)
I0428 21:30:31.980855 25678 sgd_solver.cpp:105] Iteration 1284, lr = 0.00718148
I0428 21:30:36.953611 25678 solver.cpp:218] Iteration 1296 (2.41315 iter/s, 4.97276s/12 iters), loss = 4.35081
I0428 21:30:36.953652 25678 solver.cpp:237] Train net output #0: loss = 4.35081 (* 1 = 4.35081 loss)
I0428 21:30:36.953660 25678 sgd_solver.cpp:105] Iteration 1296, lr = 0.00717552
I0428 21:30:41.909576 25678 solver.cpp:218] Iteration 1308 (2.42134 iter/s, 4.95593s/12 iters), loss = 3.89
I0428 21:30:41.909615 25678 solver.cpp:237] Train net output #0: loss = 3.89 (* 1 = 3.89 loss)
I0428 21:30:41.909622 25678 sgd_solver.cpp:105] Iteration 1308, lr = 0.00716956
I0428 21:30:44.418989 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:30:46.938589 25678 solver.cpp:218] Iteration 1320 (2.38617 iter/s, 5.02897s/12 iters), loss = 4.0681
I0428 21:30:46.938634 25678 solver.cpp:237] Train net output #0: loss = 4.0681 (* 1 = 4.0681 loss)
I0428 21:30:46.938642 25678 sgd_solver.cpp:105] Iteration 1320, lr = 0.00716359
I0428 21:30:48.958933 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_1326.caffemodel
I0428 21:30:52.157627 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_1326.solverstate
I0428 21:30:54.885759 25678 solver.cpp:330] Iteration 1326, Testing net (#0)
I0428 21:30:54.885782 25678 net.cpp:676] Ignoring source layer train-data
I0428 21:30:59.056653 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:30:59.680058 25678 solver.cpp:397] Test net output #0: accuracy = 0.0845588
I0428 21:30:59.680100 25678 solver.cpp:397] Test net output #1: loss = 4.31475 (* 1 = 4.31475 loss)
I0428 21:31:01.555008 25678 solver.cpp:218] Iteration 1332 (0.820996 iter/s, 14.6164s/12 iters), loss = 4.28735
I0428 21:31:01.555074 25678 solver.cpp:237] Train net output #0: loss = 4.28735 (* 1 = 4.28735 loss)
I0428 21:31:01.555086 25678 sgd_solver.cpp:105] Iteration 1332, lr = 0.00715761
I0428 21:31:06.528250 25678 solver.cpp:218] Iteration 1344 (2.41295 iter/s, 4.97317s/12 iters), loss = 3.8445
I0428 21:31:06.528292 25678 solver.cpp:237] Train net output #0: loss = 3.8445 (* 1 = 3.8445 loss)
I0428 21:31:06.528301 25678 sgd_solver.cpp:105] Iteration 1344, lr = 0.00715162
I0428 21:31:11.497253 25678 solver.cpp:218] Iteration 1356 (2.415 iter/s, 4.96895s/12 iters), loss = 4.18792
I0428 21:31:11.497296 25678 solver.cpp:237] Train net output #0: loss = 4.18792 (* 1 = 4.18792 loss)
I0428 21:31:11.497303 25678 sgd_solver.cpp:105] Iteration 1356, lr = 0.00714562
I0428 21:31:16.391461 25678 solver.cpp:218] Iteration 1368 (2.45191 iter/s, 4.89415s/12 iters), loss = 4.15152
I0428 21:31:16.391505 25678 solver.cpp:237] Train net output #0: loss = 4.15152 (* 1 = 4.15152 loss)
I0428 21:31:16.391513 25678 sgd_solver.cpp:105] Iteration 1368, lr = 0.00713962
I0428 21:31:17.167193 25678 blocking_queue.cpp:49] Waiting for data
I0428 21:31:21.357545 25678 solver.cpp:218] Iteration 1380 (2.41642 iter/s, 4.96603s/12 iters), loss = 3.82707
I0428 21:31:21.357585 25678 solver.cpp:237] Train net output #0: loss = 3.82707 (* 1 = 3.82707 loss)
I0428 21:31:21.357594 25678 sgd_solver.cpp:105] Iteration 1380, lr = 0.00713361
I0428 21:31:26.205281 25678 solver.cpp:218] Iteration 1392 (2.47541 iter/s, 4.84769s/12 iters), loss = 4.00745
I0428 21:31:26.205323 25678 solver.cpp:237] Train net output #0: loss = 4.00745 (* 1 = 4.00745 loss)
I0428 21:31:26.205332 25678 sgd_solver.cpp:105] Iteration 1392, lr = 0.00712759
I0428 21:31:31.261678 25678 solver.cpp:218] Iteration 1404 (2.37326 iter/s, 5.05634s/12 iters), loss = 3.90219
I0428 21:31:31.261809 25678 solver.cpp:237] Train net output #0: loss = 3.90219 (* 1 = 3.90219 loss)
I0428 21:31:31.261819 25678 sgd_solver.cpp:105] Iteration 1404, lr = 0.00712157
I0428 21:31:35.911412 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:31:36.263398 25678 solver.cpp:218] Iteration 1416 (2.39924 iter/s, 5.00159s/12 iters), loss = 3.91913
I0428 21:31:36.263442 25678 solver.cpp:237] Train net output #0: loss = 3.91913 (* 1 = 3.91913 loss)
I0428 21:31:36.263450 25678 sgd_solver.cpp:105] Iteration 1416, lr = 0.00711554
I0428 21:31:40.795331 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_1428.caffemodel
I0428 21:31:44.553558 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_1428.solverstate
I0428 21:31:46.907135 25678 solver.cpp:330] Iteration 1428, Testing net (#0)
I0428 21:31:46.907155 25678 net.cpp:676] Ignoring source layer train-data
I0428 21:31:50.767143 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:31:51.380282 25678 solver.cpp:397] Test net output #0: accuracy = 0.0974265
I0428 21:31:51.380324 25678 solver.cpp:397] Test net output #1: loss = 4.13718 (* 1 = 4.13718 loss)
I0428 21:31:51.478042 25678 solver.cpp:218] Iteration 1428 (0.788715 iter/s, 15.2146s/12 iters), loss = 3.913
I0428 21:31:51.478085 25678 solver.cpp:237] Train net output #0: loss = 3.913 (* 1 = 3.913 loss)
I0428 21:31:51.478093 25678 sgd_solver.cpp:105] Iteration 1428, lr = 0.00710949
I0428 21:31:55.641513 25678 solver.cpp:218] Iteration 1440 (2.88225 iter/s, 4.16342s/12 iters), loss = 3.94044
I0428 21:31:55.641557 25678 solver.cpp:237] Train net output #0: loss = 3.94044 (* 1 = 3.94044 loss)
I0428 21:31:55.641566 25678 sgd_solver.cpp:105] Iteration 1440, lr = 0.00710345
I0428 21:32:00.561628 25678 solver.cpp:218] Iteration 1452 (2.43899 iter/s, 4.92007s/12 iters), loss = 3.71327
I0428 21:32:00.561669 25678 solver.cpp:237] Train net output #0: loss = 3.71327 (* 1 = 3.71327 loss)
I0428 21:32:00.561677 25678 sgd_solver.cpp:105] Iteration 1452, lr = 0.00709739
I0428 21:32:05.548866 25678 solver.cpp:218] Iteration 1464 (2.40617 iter/s, 4.98719s/12 iters), loss = 4.01036
I0428 21:32:05.548957 25678 solver.cpp:237] Train net output #0: loss = 4.01036 (* 1 = 4.01036 loss)
I0428 21:32:05.548966 25678 sgd_solver.cpp:105] Iteration 1464, lr = 0.00709133
I0428 21:32:10.535822 25678 solver.cpp:218] Iteration 1476 (2.40632 iter/s, 4.98686s/12 iters), loss = 3.81154
I0428 21:32:10.535864 25678 solver.cpp:237] Train net output #0: loss = 3.81154 (* 1 = 3.81154 loss)
I0428 21:32:10.535872 25678 sgd_solver.cpp:105] Iteration 1476, lr = 0.00708526
I0428 21:32:15.525352 25678 solver.cpp:218] Iteration 1488 (2.40506 iter/s, 4.98948s/12 iters), loss = 3.92125
I0428 21:32:15.525393 25678 solver.cpp:237] Train net output #0: loss = 3.92125 (* 1 = 3.92125 loss)
I0428 21:32:15.525400 25678 sgd_solver.cpp:105] Iteration 1488, lr = 0.00707918
I0428 21:32:20.424845 25678 solver.cpp:218] Iteration 1500 (2.44926 iter/s, 4.89945s/12 iters), loss = 3.7924
I0428 21:32:20.424888 25678 solver.cpp:237] Train net output #0: loss = 3.7924 (* 1 = 3.7924 loss)
I0428 21:32:20.424898 25678 sgd_solver.cpp:105] Iteration 1500, lr = 0.0070731
I0428 21:32:25.424065 25678 solver.cpp:218] Iteration 1512 (2.4004 iter/s, 4.99918s/12 iters), loss = 3.76194
I0428 21:32:25.424105 25678 solver.cpp:237] Train net output #0: loss = 3.76194 (* 1 = 3.76194 loss)
I0428 21:32:25.424113 25678 sgd_solver.cpp:105] Iteration 1512, lr = 0.007067
I0428 21:32:27.198592 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:32:30.431047 25678 solver.cpp:218] Iteration 1524 (2.39668 iter/s, 5.00694s/12 iters), loss = 3.8173
I0428 21:32:30.431088 25678 solver.cpp:237] Train net output #0: loss = 3.8173 (* 1 = 3.8173 loss)
I0428 21:32:30.431097 25678 sgd_solver.cpp:105] Iteration 1524, lr = 0.0070609
I0428 21:32:32.480401 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_1530.caffemodel
I0428 21:32:35.570999 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_1530.solverstate
I0428 21:32:37.946055 25678 solver.cpp:330] Iteration 1530, Testing net (#0)
I0428 21:32:37.946074 25678 net.cpp:676] Ignoring source layer train-data
I0428 21:32:42.044476 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:32:42.751271 25678 solver.cpp:397] Test net output #0: accuracy = 0.112745
I0428 21:32:42.751302 25678 solver.cpp:397] Test net output #1: loss = 3.9928 (* 1 = 3.9928 loss)
I0428 21:32:44.574898 25678 solver.cpp:218] Iteration 1536 (0.848427 iter/s, 14.1438s/12 iters), loss = 3.72664
I0428 21:32:44.574951 25678 solver.cpp:237] Train net output #0: loss = 3.72664 (* 1 = 3.72664 loss)
I0428 21:32:44.574960 25678 sgd_solver.cpp:105] Iteration 1536, lr = 0.0070548
I0428 21:32:49.590009 25678 solver.cpp:218] Iteration 1548 (2.3928 iter/s, 5.01505s/12 iters), loss = 3.74798
I0428 21:32:49.590049 25678 solver.cpp:237] Train net output #0: loss = 3.74798 (* 1 = 3.74798 loss)
I0428 21:32:49.590057 25678 sgd_solver.cpp:105] Iteration 1548, lr = 0.00704868
I0428 21:32:54.635948 25678 solver.cpp:218] Iteration 1560 (2.37817 iter/s, 5.04589s/12 iters), loss = 3.82097
I0428 21:32:54.635990 25678 solver.cpp:237] Train net output #0: loss = 3.82097 (* 1 = 3.82097 loss)
I0428 21:32:54.635999 25678 sgd_solver.cpp:105] Iteration 1560, lr = 0.00704256
I0428 21:32:59.630671 25678 solver.cpp:218] Iteration 1572 (2.40256 iter/s, 4.99467s/12 iters), loss = 3.52865
I0428 21:32:59.630715 25678 solver.cpp:237] Train net output #0: loss = 3.52865 (* 1 = 3.52865 loss)
I0428 21:32:59.630723 25678 sgd_solver.cpp:105] Iteration 1572, lr = 0.00703643
I0428 21:33:04.593961 25678 solver.cpp:218] Iteration 1584 (2.41778 iter/s, 4.96324s/12 iters), loss = 3.94547
I0428 21:33:04.594000 25678 solver.cpp:237] Train net output #0: loss = 3.94547 (* 1 = 3.94547 loss)
I0428 21:33:04.594008 25678 sgd_solver.cpp:105] Iteration 1584, lr = 0.00703029
I0428 21:33:09.510648 25678 solver.cpp:218] Iteration 1596 (2.44069 iter/s, 4.91663s/12 iters), loss = 3.84274
I0428 21:33:09.510758 25678 solver.cpp:237] Train net output #0: loss = 3.84274 (* 1 = 3.84274 loss)
I0428 21:33:09.510767 25678 sgd_solver.cpp:105] Iteration 1596, lr = 0.00702415
I0428 21:33:14.486866 25678 solver.cpp:218] Iteration 1608 (2.41153 iter/s, 4.9761s/12 iters), loss = 3.46996
I0428 21:33:14.486909 25678 solver.cpp:237] Train net output #0: loss = 3.46996 (* 1 = 3.46996 loss)
I0428 21:33:14.486917 25678 sgd_solver.cpp:105] Iteration 1608, lr = 0.007018
I0428 21:33:18.386554 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:33:19.484793 25678 solver.cpp:218] Iteration 1620 (2.40102 iter/s, 4.99788s/12 iters), loss = 3.51152
I0428 21:33:19.484835 25678 solver.cpp:237] Train net output #0: loss = 3.51152 (* 1 = 3.51152 loss)
I0428 21:33:19.484843 25678 sgd_solver.cpp:105] Iteration 1620, lr = 0.00701184
I0428 21:33:24.035156 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_1632.caffemodel
I0428 21:33:27.515434 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_1632.solverstate
I0428 21:33:31.437981 25678 solver.cpp:330] Iteration 1632, Testing net (#0)
I0428 21:33:31.438005 25678 net.cpp:676] Ignoring source layer train-data
I0428 21:33:35.535662 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:33:36.290232 25678 solver.cpp:397] Test net output #0: accuracy = 0.145833
I0428 21:33:36.290269 25678 solver.cpp:397] Test net output #1: loss = 3.89195 (* 1 = 3.89195 loss)
I0428 21:33:36.387450 25678 solver.cpp:218] Iteration 1632 (0.709949 iter/s, 16.9026s/12 iters), loss = 3.69071
I0428 21:33:36.387495 25678 solver.cpp:237] Train net output #0: loss = 3.69071 (* 1 = 3.69071 loss)
I0428 21:33:36.387503 25678 sgd_solver.cpp:105] Iteration 1632, lr = 0.00700567
I0428 21:33:40.600823 25678 solver.cpp:218] Iteration 1644 (2.84811 iter/s, 4.21332s/12 iters), loss = 3.70553
I0428 21:33:40.600998 25678 solver.cpp:237] Train net output #0: loss = 3.70553 (* 1 = 3.70553 loss)
I0428 21:33:40.601008 25678 sgd_solver.cpp:105] Iteration 1644, lr = 0.0069995
I0428 21:33:45.556383 25678 solver.cpp:218] Iteration 1656 (2.42161 iter/s, 4.95537s/12 iters), loss = 3.63831
I0428 21:33:45.556437 25678 solver.cpp:237] Train net output #0: loss = 3.63831 (* 1 = 3.63831 loss)
I0428 21:33:45.556450 25678 sgd_solver.cpp:105] Iteration 1656, lr = 0.00699332
I0428 21:33:50.554198 25678 solver.cpp:218] Iteration 1668 (2.40108 iter/s, 4.99775s/12 iters), loss = 3.68308
I0428 21:33:50.554244 25678 solver.cpp:237] Train net output #0: loss = 3.68308 (* 1 = 3.68308 loss)
I0428 21:33:50.554251 25678 sgd_solver.cpp:105] Iteration 1668, lr = 0.00698713
I0428 21:33:55.545601 25678 solver.cpp:218] Iteration 1680 (2.40416 iter/s, 4.99135s/12 iters), loss = 3.12527
I0428 21:33:55.545641 25678 solver.cpp:237] Train net output #0: loss = 3.12527 (* 1 = 3.12527 loss)
I0428 21:33:55.545650 25678 sgd_solver.cpp:105] Iteration 1680, lr = 0.00698093
I0428 21:34:00.532800 25678 solver.cpp:218] Iteration 1692 (2.40618 iter/s, 4.98715s/12 iters), loss = 3.46765
I0428 21:34:00.532840 25678 solver.cpp:237] Train net output #0: loss = 3.46765 (* 1 = 3.46765 loss)
I0428 21:34:00.532847 25678 sgd_solver.cpp:105] Iteration 1692, lr = 0.00697473
I0428 21:34:05.494617 25678 solver.cpp:218] Iteration 1704 (2.41849 iter/s, 4.96177s/12 iters), loss = 3.5676
I0428 21:34:05.494660 25678 solver.cpp:237] Train net output #0: loss = 3.5676 (* 1 = 3.5676 loss)
I0428 21:34:05.494669 25678 sgd_solver.cpp:105] Iteration 1704, lr = 0.00696852
I0428 21:34:10.721024 25678 solver.cpp:218] Iteration 1716 (2.29606 iter/s, 5.22635s/12 iters), loss = 3.73369
I0428 21:34:10.721120 25678 solver.cpp:237] Train net output #0: loss = 3.73369 (* 1 = 3.73369 loss)
I0428 21:34:10.721129 25678 sgd_solver.cpp:105] Iteration 1716, lr = 0.00696231
I0428 21:34:11.829767 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:34:15.797822 25678 solver.cpp:218] Iteration 1728 (2.36374 iter/s, 5.0767s/12 iters), loss = 3.69665
I0428 21:34:15.797864 25678 solver.cpp:237] Train net output #0: loss = 3.69665 (* 1 = 3.69665 loss)
I0428 21:34:15.797873 25678 sgd_solver.cpp:105] Iteration 1728, lr = 0.00695608
I0428 21:34:17.800220 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_1734.caffemodel
I0428 21:34:21.174240 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_1734.solverstate
I0428 21:34:23.540341 25678 solver.cpp:330] Iteration 1734, Testing net (#0)
I0428 21:34:23.540360 25678 net.cpp:676] Ignoring source layer train-data
I0428 21:34:27.575884 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:34:28.366160 25678 solver.cpp:397] Test net output #0: accuracy = 0.167892
I0428 21:34:28.366209 25678 solver.cpp:397] Test net output #1: loss = 3.70775 (* 1 = 3.70775 loss)
I0428 21:34:30.224403 25678 solver.cpp:218] Iteration 1740 (0.831799 iter/s, 14.4266s/12 iters), loss = 3.31333
I0428 21:34:30.224444 25678 solver.cpp:237] Train net output #0: loss = 3.31333 (* 1 = 3.31333 loss)
I0428 21:34:30.224452 25678 sgd_solver.cpp:105] Iteration 1740, lr = 0.00694985
I0428 21:34:35.217278 25678 solver.cpp:218] Iteration 1752 (2.40345 iter/s, 4.99283s/12 iters), loss = 3.47045
I0428 21:34:35.217319 25678 solver.cpp:237] Train net output #0: loss = 3.47045 (* 1 = 3.47045 loss)
I0428 21:34:35.217327 25678 sgd_solver.cpp:105] Iteration 1752, lr = 0.00694361
I0428 21:34:40.203238 25678 solver.cpp:218] Iteration 1764 (2.40678 iter/s, 4.98591s/12 iters), loss = 3.32374
I0428 21:34:40.203284 25678 solver.cpp:237] Train net output #0: loss = 3.32374 (* 1 = 3.32374 loss)
I0428 21:34:40.203292 25678 sgd_solver.cpp:105] Iteration 1764, lr = 0.00693737
I0428 21:34:45.166743 25678 solver.cpp:218] Iteration 1776 (2.41768 iter/s, 4.96344s/12 iters), loss = 3.33823
I0428 21:34:45.166944 25678 solver.cpp:237] Train net output #0: loss = 3.33823 (* 1 = 3.33823 loss)
I0428 21:34:45.166958 25678 sgd_solver.cpp:105] Iteration 1776, lr = 0.00693111
I0428 21:34:50.186591 25678 solver.cpp:218] Iteration 1788 (2.39061 iter/s, 5.01965s/12 iters), loss = 3.12758
I0428 21:34:50.186632 25678 solver.cpp:237] Train net output #0: loss = 3.12758 (* 1 = 3.12758 loss)
I0428 21:34:50.186640 25678 sgd_solver.cpp:105] Iteration 1788, lr = 0.00692485
I0428 21:34:55.147976 25678 solver.cpp:218] Iteration 1800 (2.41871 iter/s, 4.96133s/12 iters), loss = 3.39192
I0428 21:34:55.148030 25678 solver.cpp:237] Train net output #0: loss = 3.39192 (* 1 = 3.39192 loss)
I0428 21:34:55.148042 25678 sgd_solver.cpp:105] Iteration 1800, lr = 0.00691859
I0428 21:35:00.126893 25678 solver.cpp:218] Iteration 1812 (2.41019 iter/s, 4.97886s/12 iters), loss = 3.25981
I0428 21:35:00.126936 25678 solver.cpp:237] Train net output #0: loss = 3.25981 (* 1 = 3.25981 loss)
I0428 21:35:00.126946 25678 sgd_solver.cpp:105] Iteration 1812, lr = 0.00691231
I0428 21:35:03.311338 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:35:05.139591 25678 solver.cpp:218] Iteration 1824 (2.39394 iter/s, 5.01265s/12 iters), loss = 3.07462
I0428 21:35:05.139636 25678 solver.cpp:237] Train net output #0: loss = 3.07462 (* 1 = 3.07462 loss)
I0428 21:35:05.139644 25678 sgd_solver.cpp:105] Iteration 1824, lr = 0.00690603
I0428 21:35:09.717871 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_1836.caffemodel
I0428 21:35:12.805938 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_1836.solverstate
I0428 21:35:16.513490 25678 solver.cpp:330] Iteration 1836, Testing net (#0)
I0428 21:35:16.513547 25678 net.cpp:676] Ignoring source layer train-data
I0428 21:35:20.501281 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:35:21.348832 25678 solver.cpp:397] Test net output #0: accuracy = 0.169118
I0428 21:35:21.348876 25678 solver.cpp:397] Test net output #1: loss = 3.71795 (* 1 = 3.71795 loss)
I0428 21:35:21.445601 25678 solver.cpp:218] Iteration 1836 (0.735926 iter/s, 16.306s/12 iters), loss = 3.46043
I0428 21:35:21.445643 25678 solver.cpp:237] Train net output #0: loss = 3.46043 (* 1 = 3.46043 loss)
I0428 21:35:21.445652 25678 sgd_solver.cpp:105] Iteration 1836, lr = 0.00689974
I0428 21:35:25.637305 25678 solver.cpp:218] Iteration 1848 (2.86283 iter/s, 4.19165s/12 iters), loss = 3.36741
I0428 21:35:25.637348 25678 solver.cpp:237] Train net output #0: loss = 3.36741 (* 1 = 3.36741 loss)
I0428 21:35:25.637356 25678 sgd_solver.cpp:105] Iteration 1848, lr = 0.00689345
I0428 21:35:30.673801 25678 solver.cpp:218] Iteration 1860 (2.38263 iter/s, 5.03645s/12 iters), loss = 3.16747
I0428 21:35:30.673841 25678 solver.cpp:237] Train net output #0: loss = 3.16747 (* 1 = 3.16747 loss)
I0428 21:35:30.673849 25678 sgd_solver.cpp:105] Iteration 1860, lr = 0.00688715
I0428 21:35:35.719664 25678 solver.cpp:218] Iteration 1872 (2.37821 iter/s, 5.04581s/12 iters), loss = 3.36751
I0428 21:35:35.719705 25678 solver.cpp:237] Train net output #0: loss = 3.36751 (* 1 = 3.36751 loss)
I0428 21:35:35.719713 25678 sgd_solver.cpp:105] Iteration 1872, lr = 0.00688084
I0428 21:35:40.745175 25678 solver.cpp:218] Iteration 1884 (2.38784 iter/s, 5.02546s/12 iters), loss = 3.16837
I0428 21:35:40.745219 25678 solver.cpp:237] Train net output #0: loss = 3.16837 (* 1 = 3.16837 loss)
I0428 21:35:40.745226 25678 sgd_solver.cpp:105] Iteration 1884, lr = 0.00687452
I0428 21:35:45.802778 25678 solver.cpp:218] Iteration 1896 (2.37269 iter/s, 5.05755s/12 iters), loss = 3.018
I0428 21:35:45.802821 25678 solver.cpp:237] Train net output #0: loss = 3.018 (* 1 = 3.018 loss)
I0428 21:35:45.802829 25678 sgd_solver.cpp:105] Iteration 1896, lr = 0.0068682
I0428 21:35:50.800529 25678 solver.cpp:218] Iteration 1908 (2.4011 iter/s, 4.9977s/12 iters), loss = 3.2822
I0428 21:35:50.800673 25678 solver.cpp:237] Train net output #0: loss = 3.2822 (* 1 = 3.2822 loss)
I0428 21:35:50.800683 25678 sgd_solver.cpp:105] Iteration 1908, lr = 0.00686187
I0428 21:35:55.763454 25678 solver.cpp:218] Iteration 1920 (2.418 iter/s, 4.96278s/12 iters), loss = 2.98648
I0428 21:35:55.763494 25678 solver.cpp:237] Train net output #0: loss = 2.98648 (* 1 = 2.98648 loss)
I0428 21:35:55.763501 25678 sgd_solver.cpp:105] Iteration 1920, lr = 0.00685553
I0428 21:35:56.099231 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:36:01.037837 25678 solver.cpp:218] Iteration 1932 (2.27517 iter/s, 5.27434s/12 iters), loss = 3.25734
I0428 21:36:01.037878 25678 solver.cpp:237] Train net output #0: loss = 3.25734 (* 1 = 3.25734 loss)
I0428 21:36:01.037884 25678 sgd_solver.cpp:105] Iteration 1932, lr = 0.00684919
I0428 21:36:03.039587 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_1938.caffemodel
I0428 21:36:06.143776 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_1938.solverstate
I0428 21:36:08.502878 25678 solver.cpp:330] Iteration 1938, Testing net (#0)
I0428 21:36:08.502908 25678 net.cpp:676] Ignoring source layer train-data
I0428 21:36:12.437525 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:36:13.316900 25678 solver.cpp:397] Test net output #0: accuracy = 0.19424
I0428 21:36:13.316949 25678 solver.cpp:397] Test net output #1: loss = 3.52876 (* 1 = 3.52876 loss)
I0428 21:36:15.156378 25678 solver.cpp:218] Iteration 1944 (0.849948 iter/s, 14.1185s/12 iters), loss = 3.1938
I0428 21:36:15.156438 25678 solver.cpp:237] Train net output #0: loss = 3.1938 (* 1 = 3.1938 loss)
I0428 21:36:15.156452 25678 sgd_solver.cpp:105] Iteration 1944, lr = 0.00684284
I0428 21:36:20.157691 25678 solver.cpp:218] Iteration 1956 (2.3994 iter/s, 5.00125s/12 iters), loss = 3.03452
I0428 21:36:20.157730 25678 solver.cpp:237] Train net output #0: loss = 3.03452 (* 1 = 3.03452 loss)
I0428 21:36:20.157739 25678 sgd_solver.cpp:105] Iteration 1956, lr = 0.00683648
I0428 21:36:25.164475 25678 solver.cpp:218] Iteration 1968 (2.39677 iter/s, 5.00674s/12 iters), loss = 3.01593
I0428 21:36:25.164569 25678 solver.cpp:237] Train net output #0: loss = 3.01593 (* 1 = 3.01593 loss)
I0428 21:36:25.164578 25678 sgd_solver.cpp:105] Iteration 1968, lr = 0.00683012
I0428 21:36:30.124346 25678 solver.cpp:218] Iteration 1980 (2.41947 iter/s, 4.95977s/12 iters), loss = 2.74424
I0428 21:36:30.124387 25678 solver.cpp:237] Train net output #0: loss = 2.74424 (* 1 = 2.74424 loss)
I0428 21:36:30.124394 25678 sgd_solver.cpp:105] Iteration 1980, lr = 0.00682375
I0428 21:36:35.096365 25678 solver.cpp:218] Iteration 1992 (2.41353 iter/s, 4.97197s/12 iters), loss = 2.96032
I0428 21:36:35.096413 25678 solver.cpp:237] Train net output #0: loss = 2.96032 (* 1 = 2.96032 loss)
I0428 21:36:35.096421 25678 sgd_solver.cpp:105] Iteration 1992, lr = 0.00681737
I0428 21:36:40.062170 25678 solver.cpp:218] Iteration 2004 (2.41655 iter/s, 4.96575s/12 iters), loss = 3.04241
I0428 21:36:40.062213 25678 solver.cpp:237] Train net output #0: loss = 3.04241 (* 1 = 3.04241 loss)
I0428 21:36:40.062222 25678 sgd_solver.cpp:105] Iteration 2004, lr = 0.00681098
I0428 21:36:45.040522 25678 solver.cpp:218] Iteration 2016 (2.41046 iter/s, 4.9783s/12 iters), loss = 2.64143
I0428 21:36:45.040582 25678 solver.cpp:237] Train net output #0: loss = 2.64143 (* 1 = 2.64143 loss)
I0428 21:36:45.040597 25678 sgd_solver.cpp:105] Iteration 2016, lr = 0.00680459
I0428 21:36:47.591629 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:36:50.047765 25678 solver.cpp:218] Iteration 2028 (2.39656 iter/s, 5.00718s/12 iters), loss = 2.9913
I0428 21:36:50.047807 25678 solver.cpp:237] Train net output #0: loss = 2.9913 (* 1 = 2.9913 loss)
I0428 21:36:50.047816 25678 sgd_solver.cpp:105] Iteration 2028, lr = 0.00679819
I0428 21:36:54.560025 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_2040.caffemodel
I0428 21:36:57.653303 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_2040.solverstate
I0428 21:37:00.625630 25678 solver.cpp:330] Iteration 2040, Testing net (#0)
I0428 21:37:00.625648 25678 net.cpp:676] Ignoring source layer train-data
I0428 21:37:04.500519 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:37:05.426996 25678 solver.cpp:397] Test net output #0: accuracy = 0.213848
I0428 21:37:05.427032 25678 solver.cpp:397] Test net output #1: loss = 3.40774 (* 1 = 3.40774 loss)
I0428 21:37:05.523718 25678 solver.cpp:218] Iteration 2040 (0.775398 iter/s, 15.4759s/12 iters), loss = 2.98908
I0428 21:37:05.523767 25678 solver.cpp:237] Train net output #0: loss = 2.98908 (* 1 = 2.98908 loss)
I0428 21:37:05.523775 25678 sgd_solver.cpp:105] Iteration 2040, lr = 0.00679179
I0428 21:37:09.862800 25678 solver.cpp:218] Iteration 2052 (2.7656 iter/s, 4.33902s/12 iters), loss = 2.67028
I0428 21:37:09.862840 25678 solver.cpp:237] Train net output #0: loss = 2.67028 (* 1 = 2.67028 loss)
I0428 21:37:09.862848 25678 sgd_solver.cpp:105] Iteration 2052, lr = 0.00678538
I0428 21:37:11.055109 25678 blocking_queue.cpp:49] Waiting for data
I0428 21:37:15.956609 25678 solver.cpp:218] Iteration 2064 (1.96923 iter/s, 6.09375s/12 iters), loss = 2.91391
I0428 21:37:15.966096 25678 solver.cpp:237] Train net output #0: loss = 2.91391 (* 1 = 2.91391 loss)
I0428 21:37:15.966121 25678 sgd_solver.cpp:105] Iteration 2064, lr = 0.00677896
I0428 21:37:22.630834 25678 solver.cpp:218] Iteration 2076 (1.80052 iter/s, 6.66475s/12 iters), loss = 3.18328
I0428 21:37:22.630893 25678 solver.cpp:237] Train net output #0: loss = 3.18328 (* 1 = 3.18328 loss)
I0428 21:37:22.630905 25678 sgd_solver.cpp:105] Iteration 2076, lr = 0.00677253
I0428 21:37:28.795356 25678 solver.cpp:218] Iteration 2088 (1.94665 iter/s, 6.16445s/12 iters), loss = 2.62257
I0428 21:37:28.795491 25678 solver.cpp:237] Train net output #0: loss = 2.62257 (* 1 = 2.62257 loss)
I0428 21:37:28.795503 25678 sgd_solver.cpp:105] Iteration 2088, lr = 0.0067661
I0428 21:37:34.976336 25678 solver.cpp:218] Iteration 2100 (1.94148 iter/s, 6.18084s/12 iters), loss = 2.57732
I0428 21:37:34.976380 25678 solver.cpp:237] Train net output #0: loss = 2.57732 (* 1 = 2.57732 loss)
I0428 21:37:34.976388 25678 sgd_solver.cpp:105] Iteration 2100, lr = 0.00675966
I0428 21:37:40.960944 25678 solver.cpp:218] Iteration 2112 (2.00516 iter/s, 5.98455s/12 iters), loss = 2.64081
I0428 21:37:40.960999 25678 solver.cpp:237] Train net output #0: loss = 2.64081 (* 1 = 2.64081 loss)
I0428 21:37:40.961009 25678 sgd_solver.cpp:105] Iteration 2112, lr = 0.00675321
I0428 21:37:46.600898 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:37:46.957921 25678 solver.cpp:218] Iteration 2124 (2.00103 iter/s, 5.99692s/12 iters), loss = 2.68659
I0428 21:37:46.957962 25678 solver.cpp:237] Train net output #0: loss = 2.68659 (* 1 = 2.68659 loss)
I0428 21:37:46.957970 25678 sgd_solver.cpp:105] Iteration 2124, lr = 0.00674676
I0428 21:37:52.826745 25678 solver.cpp:218] Iteration 2136 (2.04472 iter/s, 5.86877s/12 iters), loss = 2.69111
I0428 21:37:52.826799 25678 solver.cpp:237] Train net output #0: loss = 2.69111 (* 1 = 2.69111 loss)
I0428 21:37:52.826810 25678 sgd_solver.cpp:105] Iteration 2136, lr = 0.0067403
I0428 21:37:55.331244 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_2142.caffemodel
I0428 21:37:58.856024 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_2142.solverstate
I0428 21:38:01.709776 25678 solver.cpp:330] Iteration 2142, Testing net (#0)
I0428 21:38:01.709795 25678 net.cpp:676] Ignoring source layer train-data
I0428 21:38:06.096498 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:38:07.226574 25678 solver.cpp:397] Test net output #0: accuracy = 0.232843
I0428 21:38:07.226620 25678 solver.cpp:397] Test net output #1: loss = 3.35472 (* 1 = 3.35472 loss)
I0428 21:38:09.464047 25678 solver.cpp:218] Iteration 2148 (0.721272 iter/s, 16.6373s/12 iters), loss = 3.01592
I0428 21:38:09.464110 25678 solver.cpp:237] Train net output #0: loss = 3.01592 (* 1 = 3.01592 loss)
I0428 21:38:09.464121 25678 sgd_solver.cpp:105] Iteration 2148, lr = 0.00673384
I0428 21:38:15.492502 25678 solver.cpp:218] Iteration 2160 (1.99058 iter/s, 6.02838s/12 iters), loss = 2.46498
I0428 21:38:15.492558 25678 solver.cpp:237] Train net output #0: loss = 2.46498 (* 1 = 2.46498 loss)
I0428 21:38:15.492568 25678 sgd_solver.cpp:105] Iteration 2160, lr = 0.00672737
I0428 21:38:21.357357 25678 solver.cpp:218] Iteration 2172 (2.04611 iter/s, 5.86479s/12 iters), loss = 2.64978
I0428 21:38:21.357415 25678 solver.cpp:237] Train net output #0: loss = 2.64978 (* 1 = 2.64978 loss)
I0428 21:38:21.357427 25678 sgd_solver.cpp:105] Iteration 2172, lr = 0.00672089
I0428 21:38:27.422749 25678 solver.cpp:218] Iteration 2184 (1.97846 iter/s, 6.06532s/12 iters), loss = 2.50572
I0428 21:38:27.422796 25678 solver.cpp:237] Train net output #0: loss = 2.50572 (* 1 = 2.50572 loss)
I0428 21:38:27.422803 25678 sgd_solver.cpp:105] Iteration 2184, lr = 0.0067144
I0428 21:38:33.409812 25678 solver.cpp:218] Iteration 2196 (2.00434 iter/s, 5.98701s/12 iters), loss = 2.71903
I0428 21:38:33.409967 25678 solver.cpp:237] Train net output #0: loss = 2.71903 (* 1 = 2.71903 loss)
I0428 21:38:33.409977 25678 sgd_solver.cpp:105] Iteration 2196, lr = 0.00670791
I0428 21:38:39.048563 25678 solver.cpp:218] Iteration 2208 (2.12819 iter/s, 5.63859s/12 iters), loss = 2.65423
I0428 21:38:39.048606 25678 solver.cpp:237] Train net output #0: loss = 2.65423 (* 1 = 2.65423 loss)
I0428 21:38:39.048614 25678 sgd_solver.cpp:105] Iteration 2208, lr = 0.00670141
I0428 21:38:44.041234 25678 solver.cpp:218] Iteration 2220 (2.40355 iter/s, 4.99262s/12 iters), loss = 2.78374
I0428 21:38:44.041275 25678 solver.cpp:237] Train net output #0: loss = 2.78374 (* 1 = 2.78374 loss)
I0428 21:38:44.041282 25678 sgd_solver.cpp:105] Iteration 2220, lr = 0.00669491
I0428 21:38:45.842640 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:38:49.048113 25678 solver.cpp:218] Iteration 2232 (2.39672 iter/s, 5.00683s/12 iters), loss = 2.6399
I0428 21:38:49.048147 25678 solver.cpp:237] Train net output #0: loss = 2.6399 (* 1 = 2.6399 loss)
I0428 21:38:49.048154 25678 sgd_solver.cpp:105] Iteration 2232, lr = 0.0066884
I0428 21:38:53.520103 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_2244.caffemodel
I0428 21:38:56.708186 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_2244.solverstate
I0428 21:38:59.645045 25678 solver.cpp:330] Iteration 2244, Testing net (#0)
I0428 21:38:59.645062 25678 net.cpp:676] Ignoring source layer train-data
I0428 21:39:03.467072 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:39:04.481870 25678 solver.cpp:397] Test net output #0: accuracy = 0.251226
I0428 21:39:04.481917 25678 solver.cpp:397] Test net output #1: loss = 3.20929 (* 1 = 3.20929 loss)
I0428 21:39:04.579571 25678 solver.cpp:218] Iteration 2244 (0.772627 iter/s, 15.5314s/12 iters), loss = 2.94043
I0428 21:39:04.579614 25678 solver.cpp:237] Train net output #0: loss = 2.94043 (* 1 = 2.94043 loss)
I0428 21:39:04.579623 25678 sgd_solver.cpp:105] Iteration 2244, lr = 0.00668188
I0428 21:39:08.752718 25678 solver.cpp:218] Iteration 2256 (2.87557 iter/s, 4.17309s/12 iters), loss = 2.38681
I0428 21:39:08.752769 25678 solver.cpp:237] Train net output #0: loss = 2.38681 (* 1 = 2.38681 loss)
I0428 21:39:08.752779 25678 sgd_solver.cpp:105] Iteration 2256, lr = 0.00667535
I0428 21:39:13.710319 25678 solver.cpp:218] Iteration 2268 (2.42055 iter/s, 4.95755s/12 iters), loss = 2.76037
I0428 21:39:13.710361 25678 solver.cpp:237] Train net output #0: loss = 2.76037 (* 1 = 2.76037 loss)
I0428 21:39:13.710368 25678 sgd_solver.cpp:105] Iteration 2268, lr = 0.00666882
I0428 21:39:18.665542 25678 solver.cpp:218] Iteration 2280 (2.42171 iter/s, 4.95517s/12 iters), loss = 2.53863
I0428 21:39:18.665581 25678 solver.cpp:237] Train net output #0: loss = 2.53863 (* 1 = 2.53863 loss)
I0428 21:39:18.665591 25678 sgd_solver.cpp:105] Iteration 2280, lr = 0.00666229
I0428 21:39:23.626665 25678 solver.cpp:218] Iteration 2292 (2.41883 iter/s, 4.96107s/12 iters), loss = 2.89907
I0428 21:39:23.626713 25678 solver.cpp:237] Train net output #0: loss = 2.89907 (* 1 = 2.89907 loss)
I0428 21:39:23.626720 25678 sgd_solver.cpp:105] Iteration 2292, lr = 0.00665574
I0428 21:39:28.587484 25678 solver.cpp:218] Iteration 2304 (2.41898 iter/s, 4.96076s/12 iters), loss = 2.66579
I0428 21:39:28.587523 25678 solver.cpp:237] Train net output #0: loss = 2.66579 (* 1 = 2.66579 loss)
I0428 21:39:28.587532 25678 sgd_solver.cpp:105] Iteration 2304, lr = 0.00664919
I0428 21:39:33.564575 25678 solver.cpp:218] Iteration 2316 (2.41107 iter/s, 4.97705s/12 iters), loss = 2.25307
I0428 21:39:33.564719 25678 solver.cpp:237] Train net output #0: loss = 2.25307 (* 1 = 2.25307 loss)
I0428 21:39:33.564728 25678 sgd_solver.cpp:105] Iteration 2316, lr = 0.00664264
I0428 21:39:37.448768 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:39:38.498829 25678 solver.cpp:218] Iteration 2328 (2.43206 iter/s, 4.9341s/12 iters), loss = 2.25623
I0428 21:39:38.498884 25678 solver.cpp:237] Train net output #0: loss = 2.25623 (* 1 = 2.25623 loss)
I0428 21:39:38.498898 25678 sgd_solver.cpp:105] Iteration 2328, lr = 0.00663607
I0428 21:39:43.450068 25678 solver.cpp:218] Iteration 2340 (2.42367 iter/s, 4.95118s/12 iters), loss = 2.48537
I0428 21:39:43.450109 25678 solver.cpp:237] Train net output #0: loss = 2.48537 (* 1 = 2.48537 loss)
I0428 21:39:43.450116 25678 sgd_solver.cpp:105] Iteration 2340, lr = 0.00662951
I0428 21:39:45.457157 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_2346.caffemodel
I0428 21:39:51.312394 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_2346.solverstate
I0428 21:39:56.226143 25678 solver.cpp:330] Iteration 2346, Testing net (#0)
I0428 21:39:56.226167 25678 net.cpp:676] Ignoring source layer train-data
I0428 21:40:00.026986 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:40:01.095536 25678 solver.cpp:397] Test net output #0: accuracy = 0.272059
I0428 21:40:01.095579 25678 solver.cpp:397] Test net output #1: loss = 3.11705 (* 1 = 3.11705 loss)
I0428 21:40:02.919211 25678 solver.cpp:218] Iteration 2352 (0.616361 iter/s, 19.4691s/12 iters), loss = 2.46214
I0428 21:40:02.919257 25678 solver.cpp:237] Train net output #0: loss = 2.46214 (* 1 = 2.46214 loss)
I0428 21:40:02.919265 25678 sgd_solver.cpp:105] Iteration 2352, lr = 0.00662293
I0428 21:40:07.893000 25678 solver.cpp:218] Iteration 2364 (2.41267 iter/s, 4.97373s/12 iters), loss = 2.51996
I0428 21:40:07.893100 25678 solver.cpp:237] Train net output #0: loss = 2.51996 (* 1 = 2.51996 loss)
I0428 21:40:07.893110 25678 sgd_solver.cpp:105] Iteration 2364, lr = 0.00661635
I0428 21:40:12.903264 25678 solver.cpp:218] Iteration 2376 (2.39513 iter/s, 5.01016s/12 iters), loss = 2.29494
I0428 21:40:12.903302 25678 solver.cpp:237] Train net output #0: loss = 2.29494 (* 1 = 2.29494 loss)
I0428 21:40:12.903309 25678 sgd_solver.cpp:105] Iteration 2376, lr = 0.00660976
I0428 21:40:17.851115 25678 solver.cpp:218] Iteration 2388 (2.42532 iter/s, 4.94781s/12 iters), loss = 2.01996
I0428 21:40:17.851153 25678 solver.cpp:237] Train net output #0: loss = 2.01996 (* 1 = 2.01996 loss)
I0428 21:40:17.851161 25678 sgd_solver.cpp:105] Iteration 2388, lr = 0.00660317
I0428 21:40:22.841127 25678 solver.cpp:218] Iteration 2400 (2.40483 iter/s, 4.98997s/12 iters), loss = 2.37961
I0428 21:40:22.841166 25678 solver.cpp:237] Train net output #0: loss = 2.37961 (* 1 = 2.37961 loss)
I0428 21:40:22.841172 25678 sgd_solver.cpp:105] Iteration 2400, lr = 0.00659657
I0428 21:40:27.807210 25678 solver.cpp:218] Iteration 2412 (2.41641 iter/s, 4.96604s/12 iters), loss = 2.15297
I0428 21:40:27.807252 25678 solver.cpp:237] Train net output #0: loss = 2.15297 (* 1 = 2.15297 loss)
I0428 21:40:27.807260 25678 sgd_solver.cpp:105] Iteration 2412, lr = 0.00658996
I0428 21:40:32.763789 25678 solver.cpp:218] Iteration 2424 (2.42105 iter/s, 4.95653s/12 iters), loss = 2.14409
I0428 21:40:32.763830 25678 solver.cpp:237] Train net output #0: loss = 2.14409 (* 1 = 2.14409 loss)
I0428 21:40:32.763839 25678 sgd_solver.cpp:105] Iteration 2424, lr = 0.00658335
I0428 21:40:33.845639 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:40:37.760383 25678 solver.cpp:218] Iteration 2436 (2.40166 iter/s, 4.99654s/12 iters), loss = 2.53041
I0428 21:40:37.760426 25678 solver.cpp:237] Train net output #0: loss = 2.53041 (* 1 = 2.53041 loss)
I0428 21:40:37.760433 25678 sgd_solver.cpp:105] Iteration 2436, lr = 0.00657673
I0428 21:40:42.264740 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_2448.caffemodel
I0428 21:40:45.682766 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_2448.solverstate
I0428 21:40:50.962992 25678 solver.cpp:330] Iteration 2448, Testing net (#0)
I0428 21:40:50.963016 25678 net.cpp:676] Ignoring source layer train-data
I0428 21:40:54.678565 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:40:55.769096 25678 solver.cpp:397] Test net output #0: accuracy = 0.265319
I0428 21:40:55.769140 25678 solver.cpp:397] Test net output #1: loss = 3.1353 (* 1 = 3.1353 loss)
I0428 21:40:55.865885 25678 solver.cpp:218] Iteration 2448 (0.662783 iter/s, 18.1055s/12 iters), loss = 2.63864
I0428 21:40:55.865927 25678 solver.cpp:237] Train net output #0: loss = 2.63864 (* 1 = 2.63864 loss)
I0428 21:40:55.865936 25678 sgd_solver.cpp:105] Iteration 2448, lr = 0.0065701
I0428 21:41:00.013288 25678 solver.cpp:218] Iteration 2460 (2.89342 iter/s, 4.14735s/12 iters), loss = 2.26408
I0428 21:41:00.013329 25678 solver.cpp:237] Train net output #0: loss = 2.26408 (* 1 = 2.26408 loss)
I0428 21:41:00.013336 25678 sgd_solver.cpp:105] Iteration 2460, lr = 0.00656347
I0428 21:41:05.004554 25678 solver.cpp:218] Iteration 2472 (2.40422 iter/s, 4.99122s/12 iters), loss = 1.9507
I0428 21:41:05.004593 25678 solver.cpp:237] Train net output #0: loss = 1.9507 (* 1 = 1.9507 loss)
I0428 21:41:05.004601 25678 sgd_solver.cpp:105] Iteration 2472, lr = 0.00655684
I0428 21:41:09.942103 25678 solver.cpp:218] Iteration 2484 (2.43038 iter/s, 4.9375s/12 iters), loss = 2.22024
I0428 21:41:09.942142 25678 solver.cpp:237] Train net output #0: loss = 2.22024 (* 1 = 2.22024 loss)
I0428 21:41:09.942149 25678 sgd_solver.cpp:105] Iteration 2484, lr = 0.00655019
I0428 21:41:14.915426 25678 solver.cpp:218] Iteration 2496 (2.4129 iter/s, 4.97327s/12 iters), loss = 1.89934
I0428 21:41:14.915530 25678 solver.cpp:237] Train net output #0: loss = 1.89934 (* 1 = 1.89934 loss)
I0428 21:41:14.915539 25678 sgd_solver.cpp:105] Iteration 2496, lr = 0.00654354
I0428 21:41:19.897858 25678 solver.cpp:218] Iteration 2508 (2.40852 iter/s, 4.98232s/12 iters), loss = 2.42374
I0428 21:41:19.897903 25678 solver.cpp:237] Train net output #0: loss = 2.42374 (* 1 = 2.42374 loss)
I0428 21:41:19.897912 25678 sgd_solver.cpp:105] Iteration 2508, lr = 0.00653689
I0428 21:41:24.844300 25678 solver.cpp:218] Iteration 2520 (2.42601 iter/s, 4.94638s/12 iters), loss = 2.46794
I0428 21:41:24.844344 25678 solver.cpp:237] Train net output #0: loss = 2.46794 (* 1 = 2.46794 loss)
I0428 21:41:24.844353 25678 sgd_solver.cpp:105] Iteration 2520, lr = 0.00653023
I0428 21:41:28.059886 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:41:29.838467 25678 solver.cpp:218] Iteration 2532 (2.40283 iter/s, 4.99411s/12 iters), loss = 1.9605
I0428 21:41:29.838508 25678 solver.cpp:237] Train net output #0: loss = 1.9605 (* 1 = 1.9605 loss)
I0428 21:41:29.838516 25678 sgd_solver.cpp:105] Iteration 2532, lr = 0.00652356
I0428 21:41:34.801056 25678 solver.cpp:218] Iteration 2544 (2.41812 iter/s, 4.96254s/12 iters), loss = 2.08051
I0428 21:41:34.801095 25678 solver.cpp:237] Train net output #0: loss = 2.08051 (* 1 = 2.08051 loss)
I0428 21:41:34.801103 25678 sgd_solver.cpp:105] Iteration 2544, lr = 0.00651689
I0428 21:41:36.812321 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_2550.caffemodel
I0428 21:41:40.217684 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_2550.solverstate
I0428 21:41:42.672412 25678 solver.cpp:330] Iteration 2550, Testing net (#0)
I0428 21:41:42.672430 25678 net.cpp:676] Ignoring source layer train-data
I0428 21:41:46.329916 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:41:47.473979 25678 solver.cpp:397] Test net output #0: accuracy = 0.289216
I0428 21:41:47.474011 25678 solver.cpp:397] Test net output #1: loss = 3.13162 (* 1 = 3.13162 loss)
I0428 21:41:49.300236 25678 solver.cpp:218] Iteration 2556 (0.827635 iter/s, 14.4991s/12 iters), loss = 2.15429
I0428 21:41:49.300274 25678 solver.cpp:237] Train net output #0: loss = 2.15429 (* 1 = 2.15429 loss)
I0428 21:41:49.300282 25678 sgd_solver.cpp:105] Iteration 2556, lr = 0.00651021
I0428 21:41:54.233960 25678 solver.cpp:218] Iteration 2568 (2.43226 iter/s, 4.93367s/12 iters), loss = 1.94845
I0428 21:41:54.234004 25678 solver.cpp:237] Train net output #0: loss = 1.94845 (* 1 = 1.94845 loss)
I0428 21:41:54.234012 25678 sgd_solver.cpp:105] Iteration 2568, lr = 0.00650352
I0428 21:41:59.202867 25678 solver.cpp:218] Iteration 2580 (2.41504 iter/s, 4.96885s/12 iters), loss = 2.28589
I0428 21:41:59.202908 25678 solver.cpp:237] Train net output #0: loss = 2.28589 (* 1 = 2.28589 loss)
I0428 21:41:59.202916 25678 sgd_solver.cpp:105] Iteration 2580, lr = 0.00649683
I0428 21:42:04.167640 25678 solver.cpp:218] Iteration 2592 (2.41706 iter/s, 4.96472s/12 iters), loss = 1.95303
I0428 21:42:04.167680 25678 solver.cpp:237] Train net output #0: loss = 1.95303 (* 1 = 1.95303 loss)
I0428 21:42:04.167686 25678 sgd_solver.cpp:105] Iteration 2592, lr = 0.00649014
I0428 21:42:09.097851 25678 solver.cpp:218] Iteration 2604 (2.434 iter/s, 4.93016s/12 iters), loss = 1.88237
I0428 21:42:09.097892 25678 solver.cpp:237] Train net output #0: loss = 1.88237 (* 1 = 1.88237 loss)
I0428 21:42:09.097900 25678 sgd_solver.cpp:105] Iteration 2604, lr = 0.00648343
I0428 21:42:14.086865 25678 solver.cpp:218] Iteration 2616 (2.40531 iter/s, 4.98896s/12 iters), loss = 2.36965
I0428 21:42:14.086903 25678 solver.cpp:237] Train net output #0: loss = 2.36965 (* 1 = 2.36965 loss)
I0428 21:42:14.086911 25678 sgd_solver.cpp:105] Iteration 2616, lr = 0.00647672
I0428 21:42:19.068701 25678 solver.cpp:218] Iteration 2628 (2.40877 iter/s, 4.98179s/12 iters), loss = 2.22529
I0428 21:42:19.068804 25678 solver.cpp:237] Train net output #0: loss = 2.22529 (* 1 = 2.22529 loss)
I0428 21:42:19.068812 25678 sgd_solver.cpp:105] Iteration 2628, lr = 0.00647001
I0428 21:42:19.501452 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:42:24.024688 25678 solver.cpp:218] Iteration 2640 (2.42137 iter/s, 4.95588s/12 iters), loss = 2.22322
I0428 21:42:24.024725 25678 solver.cpp:237] Train net output #0: loss = 2.22322 (* 1 = 2.22322 loss)
I0428 21:42:24.024734 25678 sgd_solver.cpp:105] Iteration 2640, lr = 0.00646329
I0428 21:42:28.392104 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_2652.caffemodel
I0428 21:42:31.439628 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_2652.solverstate
I0428 21:42:34.434919 25678 solver.cpp:330] Iteration 2652, Testing net (#0)
I0428 21:42:34.434938 25678 net.cpp:676] Ignoring source layer train-data
I0428 21:42:37.809504 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:42:38.894372 25678 solver.cpp:397] Test net output #0: accuracy = 0.306985
I0428 21:42:38.894405 25678 solver.cpp:397] Test net output #1: loss = 2.99236 (* 1 = 2.99236 loss)
I0428 21:42:38.992800 25678 solver.cpp:218] Iteration 2652 (0.801707 iter/s, 14.9681s/12 iters), loss = 2.2563
I0428 21:42:38.992861 25678 solver.cpp:237] Train net output #0: loss = 2.2563 (* 1 = 2.2563 loss)
I0428 21:42:38.992873 25678 sgd_solver.cpp:105] Iteration 2652, lr = 0.00645656
I0428 21:42:43.131108 25678 solver.cpp:218] Iteration 2664 (2.89978 iter/s, 4.13824s/12 iters), loss = 1.99963
I0428 21:42:43.131147 25678 solver.cpp:237] Train net output #0: loss = 1.99963 (* 1 = 1.99963 loss)
I0428 21:42:43.131155 25678 sgd_solver.cpp:105] Iteration 2664, lr = 0.00644983
I0428 21:42:48.114444 25678 solver.cpp:218] Iteration 2676 (2.40805 iter/s, 4.98329s/12 iters), loss = 1.68382
I0428 21:42:48.114481 25678 solver.cpp:237] Train net output #0: loss = 1.68382 (* 1 = 1.68382 loss)
I0428 21:42:48.114490 25678 sgd_solver.cpp:105] Iteration 2676, lr = 0.00644309
I0428 21:42:53.088225 25678 solver.cpp:218] Iteration 2688 (2.41267 iter/s, 4.97373s/12 iters), loss = 1.84354
I0428 21:42:53.088348 25678 solver.cpp:237] Train net output #0: loss = 1.84354 (* 1 = 1.84354 loss)
I0428 21:42:53.088357 25678 sgd_solver.cpp:105] Iteration 2688, lr = 0.00643635
I0428 21:42:58.020812 25678 solver.cpp:218] Iteration 2700 (2.43286 iter/s, 4.93246s/12 iters), loss = 2.32824
I0428 21:42:58.020849 25678 solver.cpp:237] Train net output #0: loss = 2.32824 (* 1 = 2.32824 loss)
I0428 21:42:58.020857 25678 sgd_solver.cpp:105] Iteration 2700, lr = 0.0064296
I0428 21:43:02.992132 25678 solver.cpp:218] Iteration 2712 (2.41387 iter/s, 4.97127s/12 iters), loss = 2.47025
I0428 21:43:02.992166 25678 solver.cpp:237] Train net output #0: loss = 2.47025 (* 1 = 2.47025 loss)
I0428 21:43:02.992173 25678 sgd_solver.cpp:105] Iteration 2712, lr = 0.00642285
I0428 21:43:07.982803 25678 solver.cpp:218] Iteration 2724 (2.40451 iter/s, 4.99063s/12 iters), loss = 1.92152
I0428 21:43:07.982842 25678 solver.cpp:237] Train net output #0: loss = 1.92152 (* 1 = 1.92152 loss)
I0428 21:43:07.982849 25678 sgd_solver.cpp:105] Iteration 2724, lr = 0.00641609
I0428 21:43:10.543006 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:43:12.967654 25678 solver.cpp:218] Iteration 2736 (2.40732 iter/s, 4.9848s/12 iters), loss = 2.05437
I0428 21:43:12.967695 25678 solver.cpp:237] Train net output #0: loss = 2.05437 (* 1 = 2.05437 loss)
I0428 21:43:12.967703 25678 sgd_solver.cpp:105] Iteration 2736, lr = 0.00640932
I0428 21:43:17.907527 25678 solver.cpp:218] Iteration 2748 (2.42924 iter/s, 4.93982s/12 iters), loss = 1.98636
I0428 21:43:17.907568 25678 solver.cpp:237] Train net output #0: loss = 1.98636 (* 1 = 1.98636 loss)
I0428 21:43:17.907575 25678 sgd_solver.cpp:105] Iteration 2748, lr = 0.00640255
I0428 21:43:19.916887 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_2754.caffemodel
I0428 21:43:23.386688 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_2754.solverstate
I0428 21:43:25.741719 25678 solver.cpp:330] Iteration 2754, Testing net (#0)
I0428 21:43:25.741736 25678 net.cpp:676] Ignoring source layer train-data
I0428 21:43:29.040870 25678 blocking_queue.cpp:49] Waiting for data
I0428 21:43:29.410908 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:43:30.644318 25678 solver.cpp:397] Test net output #0: accuracy = 0.31924
I0428 21:43:30.644366 25678 solver.cpp:397] Test net output #1: loss = 2.93335 (* 1 = 2.93335 loss)
I0428 21:43:32.459861 25678 solver.cpp:218] Iteration 2760 (0.824612 iter/s, 14.5523s/12 iters), loss = 1.95112
I0428 21:43:32.459909 25678 solver.cpp:237] Train net output #0: loss = 1.95112 (* 1 = 1.95112 loss)
I0428 21:43:32.459918 25678 sgd_solver.cpp:105] Iteration 2760, lr = 0.00639577
I0428 21:43:37.412770 25678 solver.cpp:218] Iteration 2772 (2.42285 iter/s, 4.95285s/12 iters), loss = 2.14346
I0428 21:43:37.412811 25678 solver.cpp:237] Train net output #0: loss = 2.14346 (* 1 = 2.14346 loss)
I0428 21:43:37.412818 25678 sgd_solver.cpp:105] Iteration 2772, lr = 0.00638899
I0428 21:43:42.422233 25678 solver.cpp:218] Iteration 2784 (2.39549 iter/s, 5.00942s/12 iters), loss = 2.35309
I0428 21:43:42.422271 25678 solver.cpp:237] Train net output #0: loss = 2.35309 (* 1 = 2.35309 loss)
I0428 21:43:42.422278 25678 sgd_solver.cpp:105] Iteration 2784, lr = 0.0063822
I0428 21:43:47.410113 25678 solver.cpp:218] Iteration 2796 (2.40586 iter/s, 4.98783s/12 iters), loss = 1.99944
I0428 21:43:47.410152 25678 solver.cpp:237] Train net output #0: loss = 1.99944 (* 1 = 1.99944 loss)
I0428 21:43:47.410161 25678 sgd_solver.cpp:105] Iteration 2796, lr = 0.00637541
I0428 21:43:52.340299 25678 solver.cpp:218] Iteration 2808 (2.43401 iter/s, 4.93013s/12 iters), loss = 1.89689
I0428 21:43:52.340338 25678 solver.cpp:237] Train net output #0: loss = 1.89689 (* 1 = 1.89689 loss)
I0428 21:43:52.340346 25678 sgd_solver.cpp:105] Iteration 2808, lr = 0.00636861
I0428 21:43:57.379379 25678 solver.cpp:218] Iteration 2820 (2.38141 iter/s, 5.03903s/12 iters), loss = 1.6665
I0428 21:43:57.379534 25678 solver.cpp:237] Train net output #0: loss = 1.6665 (* 1 = 1.6665 loss)
I0428 21:43:57.379544 25678 sgd_solver.cpp:105] Iteration 2820, lr = 0.0063618
I0428 21:44:02.141621 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:44:02.433295 25678 solver.cpp:218] Iteration 2832 (2.37447 iter/s, 5.05375s/12 iters), loss = 1.62252
I0428 21:44:02.433338 25678 solver.cpp:237] Train net output #0: loss = 1.62252 (* 1 = 1.62252 loss)
I0428 21:44:02.433346 25678 sgd_solver.cpp:105] Iteration 2832, lr = 0.00635499
I0428 21:44:07.477011 25678 solver.cpp:218] Iteration 2844 (2.37922 iter/s, 5.04366s/12 iters), loss = 1.78396
I0428 21:44:07.477051 25678 solver.cpp:237] Train net output #0: loss = 1.78396 (* 1 = 1.78396 loss)
I0428 21:44:07.477057 25678 sgd_solver.cpp:105] Iteration 2844, lr = 0.00634818
I0428 21:44:12.001986 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_2856.caffemodel
I0428 21:44:15.672978 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_2856.solverstate
I0428 21:44:18.041700 25678 solver.cpp:330] Iteration 2856, Testing net (#0)
I0428 21:44:18.041719 25678 net.cpp:676] Ignoring source layer train-data
I0428 21:44:21.407336 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:44:22.591773 25678 solver.cpp:397] Test net output #0: accuracy = 0.335172
I0428 21:44:22.591820 25678 solver.cpp:397] Test net output #1: loss = 2.84201 (* 1 = 2.84201 loss)
I0428 21:44:22.688715 25678 solver.cpp:218] Iteration 2856 (0.788868 iter/s, 15.2117s/12 iters), loss = 1.81627
I0428 21:44:22.688760 25678 solver.cpp:237] Train net output #0: loss = 1.81627 (* 1 = 1.81627 loss)
I0428 21:44:22.688769 25678 sgd_solver.cpp:105] Iteration 2856, lr = 0.00634136
I0428 21:44:26.839519 25678 solver.cpp:218] Iteration 2868 (2.89105 iter/s, 4.15074s/12 iters), loss = 1.65505
I0428 21:44:26.839560 25678 solver.cpp:237] Train net output #0: loss = 1.65505 (* 1 = 1.65505 loss)
I0428 21:44:26.839568 25678 sgd_solver.cpp:105] Iteration 2868, lr = 0.00633453
I0428 21:44:31.821866 25678 solver.cpp:218] Iteration 2880 (2.40853 iter/s, 4.9823s/12 iters), loss = 1.71467
I0428 21:44:31.821931 25678 solver.cpp:237] Train net output #0: loss = 1.71467 (* 1 = 1.71467 loss)
I0428 21:44:31.821940 25678 sgd_solver.cpp:105] Iteration 2880, lr = 0.0063277
I0428 21:44:36.800297 25678 solver.cpp:218] Iteration 2892 (2.41043 iter/s, 4.97836s/12 iters), loss = 1.72819
I0428 21:44:36.800328 25678 solver.cpp:237] Train net output #0: loss = 1.72819 (* 1 = 1.72819 loss)
I0428 21:44:36.800334 25678 sgd_solver.cpp:105] Iteration 2892, lr = 0.00632086
I0428 21:44:41.821853 25678 solver.cpp:218] Iteration 2904 (2.38972 iter/s, 5.02152s/12 iters), loss = 1.45613
I0428 21:44:41.821892 25678 solver.cpp:237] Train net output #0: loss = 1.45613 (* 1 = 1.45613 loss)
I0428 21:44:41.821899 25678 sgd_solver.cpp:105] Iteration 2904, lr = 0.00631402
I0428 21:44:46.857213 25678 solver.cpp:218] Iteration 2916 (2.38317 iter/s, 5.03531s/12 iters), loss = 1.81019
I0428 21:44:46.857259 25678 solver.cpp:237] Train net output #0: loss = 1.81019 (* 1 = 1.81019 loss)
I0428 21:44:46.857266 25678 sgd_solver.cpp:105] Iteration 2916, lr = 0.00630717
I0428 21:44:51.864584 25678 solver.cpp:218] Iteration 2928 (2.39649 iter/s, 5.00732s/12 iters), loss = 1.73948
I0428 21:44:51.864624 25678 solver.cpp:237] Train net output #0: loss = 1.73948 (* 1 = 1.73948 loss)
I0428 21:44:51.864631 25678 sgd_solver.cpp:105] Iteration 2928, lr = 0.00630032
I0428 21:44:53.710671 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:44:56.869207 25678 solver.cpp:218] Iteration 2940 (2.39781 iter/s, 5.00457s/12 iters), loss = 1.71774
I0428 21:44:56.869247 25678 solver.cpp:237] Train net output #0: loss = 1.71774 (* 1 = 1.71774 loss)
I0428 21:44:56.869256 25678 sgd_solver.cpp:105] Iteration 2940, lr = 0.00629346
I0428 21:45:01.792639 25678 solver.cpp:218] Iteration 2952 (2.43735 iter/s, 4.92337s/12 iters), loss = 1.75274
I0428 21:45:01.792693 25678 solver.cpp:237] Train net output #0: loss = 1.75274 (* 1 = 1.75274 loss)
I0428 21:45:01.792708 25678 sgd_solver.cpp:105] Iteration 2952, lr = 0.0062866
I0428 21:45:03.828528 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_2958.caffemodel
I0428 21:45:06.957280 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_2958.solverstate
I0428 21:45:09.316972 25678 solver.cpp:330] Iteration 2958, Testing net (#0)
I0428 21:45:09.316992 25678 net.cpp:676] Ignoring source layer train-data
I0428 21:45:12.634686 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:45:13.883500 25678 solver.cpp:397] Test net output #0: accuracy = 0.330882
I0428 21:45:13.883548 25678 solver.cpp:397] Test net output #1: loss = 2.93891 (* 1 = 2.93891 loss)
I0428 21:45:15.685281 25678 solver.cpp:218] Iteration 2964 (0.86377 iter/s, 13.8926s/12 iters), loss = 1.71152
I0428 21:45:15.685320 25678 solver.cpp:237] Train net output #0: loss = 1.71152 (* 1 = 1.71152 loss)
I0428 21:45:15.685328 25678 sgd_solver.cpp:105] Iteration 2964, lr = 0.00627973
I0428 21:45:20.597803 25678 solver.cpp:218] Iteration 2976 (2.44276 iter/s, 4.91247s/12 iters), loss = 1.88991
I0428 21:45:20.597843 25678 solver.cpp:237] Train net output #0: loss = 1.88991 (* 1 = 1.88991 loss)
I0428 21:45:20.597851 25678 sgd_solver.cpp:105] Iteration 2976, lr = 0.00627285
I0428 21:45:25.552042 25678 solver.cpp:218] Iteration 2988 (2.42219 iter/s, 4.95419s/12 iters), loss = 1.92746
I0428 21:45:25.552086 25678 solver.cpp:237] Train net output #0: loss = 1.92746 (* 1 = 1.92746 loss)
I0428 21:45:25.552094 25678 sgd_solver.cpp:105] Iteration 2988, lr = 0.00626597
I0428 21:45:30.509104 25678 solver.cpp:218] Iteration 3000 (2.42081 iter/s, 4.95701s/12 iters), loss = 1.71641
I0428 21:45:30.509146 25678 solver.cpp:237] Train net output #0: loss = 1.71641 (* 1 = 1.71641 loss)
I0428 21:45:30.509155 25678 sgd_solver.cpp:105] Iteration 3000, lr = 0.00625909
I0428 21:45:35.487538 25678 solver.cpp:218] Iteration 3012 (2.41042 iter/s, 4.97838s/12 iters), loss = 1.98697
I0428 21:45:35.487612 25678 solver.cpp:237] Train net output #0: loss = 1.98697 (* 1 = 1.98697 loss)
I0428 21:45:35.487620 25678 sgd_solver.cpp:105] Iteration 3012, lr = 0.0062522
I0428 21:45:40.446410 25678 solver.cpp:218] Iteration 3024 (2.41995 iter/s, 4.95878s/12 iters), loss = 1.4681
I0428 21:45:40.446455 25678 solver.cpp:237] Train net output #0: loss = 1.4681 (* 1 = 1.4681 loss)
I0428 21:45:40.446462 25678 sgd_solver.cpp:105] Iteration 3024, lr = 0.00624531
I0428 21:45:44.409579 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:45:45.433588 25678 solver.cpp:218] Iteration 3036 (2.4062 iter/s, 4.98713s/12 iters), loss = 1.60778
I0428 21:45:45.433629 25678 solver.cpp:237] Train net output #0: loss = 1.60778 (* 1 = 1.60778 loss)
I0428 21:45:45.433636 25678 sgd_solver.cpp:105] Iteration 3036, lr = 0.00623841
I0428 21:45:50.387832 25678 solver.cpp:218] Iteration 3048 (2.42219 iter/s, 4.9542s/12 iters), loss = 1.54648
I0428 21:45:50.387869 25678 solver.cpp:237] Train net output #0: loss = 1.54648 (* 1 = 1.54648 loss)
I0428 21:45:50.387877 25678 sgd_solver.cpp:105] Iteration 3048, lr = 0.0062315
I0428 21:45:54.858395 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_3060.caffemodel
I0428 21:45:57.938385 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_3060.solverstate
I0428 21:46:01.754580 25678 solver.cpp:330] Iteration 3060, Testing net (#0)
I0428 21:46:01.754604 25678 net.cpp:676] Ignoring source layer train-data
I0428 21:46:05.207160 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:46:06.562716 25678 solver.cpp:397] Test net output #0: accuracy = 0.319853
I0428 21:46:06.562916 25678 solver.cpp:397] Test net output #1: loss = 2.93443 (* 1 = 2.93443 loss)
I0428 21:46:06.661566 25678 solver.cpp:218] Iteration 3060 (0.737386 iter/s, 16.2737s/12 iters), loss = 1.57909
I0428 21:46:06.661608 25678 solver.cpp:237] Train net output #0: loss = 1.57909 (* 1 = 1.57909 loss)
I0428 21:46:06.661617 25678 sgd_solver.cpp:105] Iteration 3060, lr = 0.00622459
I0428 21:46:10.774612 25678 solver.cpp:218] Iteration 3072 (2.91759 iter/s, 4.11299s/12 iters), loss = 1.42345
I0428 21:46:10.774653 25678 solver.cpp:237] Train net output #0: loss = 1.42345 (* 1 = 1.42345 loss)
I0428 21:46:10.774660 25678 sgd_solver.cpp:105] Iteration 3072, lr = 0.00621768
I0428 21:46:15.676544 25678 solver.cpp:218] Iteration 3084 (2.44804 iter/s, 4.90188s/12 iters), loss = 1.83944
I0428 21:46:15.676582 25678 solver.cpp:237] Train net output #0: loss = 1.83944 (* 1 = 1.83944 loss)
I0428 21:46:15.676590 25678 sgd_solver.cpp:105] Iteration 3084, lr = 0.00621076
I0428 21:46:20.624819 25678 solver.cpp:218] Iteration 3096 (2.42511 iter/s, 4.94823s/12 iters), loss = 1.62396
I0428 21:46:20.624858 25678 solver.cpp:237] Train net output #0: loss = 1.62396 (* 1 = 1.62396 loss)
I0428 21:46:20.624866 25678 sgd_solver.cpp:105] Iteration 3096, lr = 0.00620384
I0428 21:46:25.560832 25678 solver.cpp:218] Iteration 3108 (2.43114 iter/s, 4.93596s/12 iters), loss = 1.42819
I0428 21:46:25.560870 25678 solver.cpp:237] Train net output #0: loss = 1.42819 (* 1 = 1.42819 loss)
I0428 21:46:25.560879 25678 sgd_solver.cpp:105] Iteration 3108, lr = 0.00619691
I0428 21:46:30.502903 25678 solver.cpp:218] Iteration 3120 (2.42816 iter/s, 4.94202s/12 iters), loss = 1.6173
I0428 21:46:30.502948 25678 solver.cpp:237] Train net output #0: loss = 1.6173 (* 1 = 1.6173 loss)
I0428 21:46:30.502956 25678 sgd_solver.cpp:105] Iteration 3120, lr = 0.00618997
I0428 21:46:35.496039 25678 solver.cpp:218] Iteration 3132 (2.40332 iter/s, 4.99308s/12 iters), loss = 1.31744
I0428 21:46:35.496084 25678 solver.cpp:237] Train net output #0: loss = 1.31744 (* 1 = 1.31744 loss)
I0428 21:46:35.496093 25678 sgd_solver.cpp:105] Iteration 3132, lr = 0.00618303
I0428 21:46:36.535358 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:46:40.412791 25678 solver.cpp:218] Iteration 3144 (2.44067 iter/s, 4.91669s/12 iters), loss = 1.40736
I0428 21:46:40.412915 25678 solver.cpp:237] Train net output #0: loss = 1.40736 (* 1 = 1.40736 loss)
I0428 21:46:40.412925 25678 sgd_solver.cpp:105] Iteration 3144, lr = 0.00617609
I0428 21:46:45.351163 25678 solver.cpp:218] Iteration 3156 (2.43001 iter/s, 4.93824s/12 iters), loss = 1.60324
I0428 21:46:45.351202 25678 solver.cpp:237] Train net output #0: loss = 1.60324 (* 1 = 1.60324 loss)
I0428 21:46:45.351210 25678 sgd_solver.cpp:105] Iteration 3156, lr = 0.00616914
I0428 21:46:47.352691 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_3162.caffemodel
I0428 21:46:51.727483 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_3162.solverstate
I0428 21:46:54.109081 25678 solver.cpp:330] Iteration 3162, Testing net (#0)
I0428 21:46:54.109098 25678 net.cpp:676] Ignoring source layer train-data
I0428 21:46:57.487377 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:46:58.890465 25678 solver.cpp:397] Test net output #0: accuracy = 0.341299
I0428 21:46:58.890513 25678 solver.cpp:397] Test net output #1: loss = 2.90587 (* 1 = 2.90587 loss)
I0428 21:47:00.731508 25678 solver.cpp:218] Iteration 3168 (0.780218 iter/s, 15.3803s/12 iters), loss = 1.50998
I0428 21:47:00.731551 25678 solver.cpp:237] Train net output #0: loss = 1.50998 (* 1 = 1.50998 loss)
I0428 21:47:00.731559 25678 sgd_solver.cpp:105] Iteration 3168, lr = 0.00616219
I0428 21:47:05.687319 25678 solver.cpp:218] Iteration 3180 (2.42143 iter/s, 4.95576s/12 iters), loss = 1.32182
I0428 21:47:05.687359 25678 solver.cpp:237] Train net output #0: loss = 1.32182 (* 1 = 1.32182 loss)
I0428 21:47:05.687367 25678 sgd_solver.cpp:105] Iteration 3180, lr = 0.00615523
I0428 21:47:10.655462 25678 solver.cpp:218] Iteration 3192 (2.41541 iter/s, 4.96809s/12 iters), loss = 2.08105
I0428 21:47:10.655568 25678 solver.cpp:237] Train net output #0: loss = 2.08105 (* 1 = 2.08105 loss)
I0428 21:47:10.655577 25678 sgd_solver.cpp:105] Iteration 3192, lr = 0.00614827
I0428 21:47:15.620353 25678 solver.cpp:218] Iteration 3204 (2.41703 iter/s, 4.96477s/12 iters), loss = 1.33023
I0428 21:47:15.620396 25678 solver.cpp:237] Train net output #0: loss = 1.33023 (* 1 = 1.33023 loss)
I0428 21:47:15.620404 25678 sgd_solver.cpp:105] Iteration 3204, lr = 0.0061413
I0428 21:47:20.561326 25678 solver.cpp:218] Iteration 3216 (2.4287 iter/s, 4.94092s/12 iters), loss = 1.77789
I0428 21:47:20.561367 25678 solver.cpp:237] Train net output #0: loss = 1.77789 (* 1 = 1.77789 loss)
I0428 21:47:20.561375 25678 sgd_solver.cpp:105] Iteration 3216, lr = 0.00613433
I0428 21:47:25.557507 25678 solver.cpp:218] Iteration 3228 (2.40186 iter/s, 4.99613s/12 iters), loss = 1.23356
I0428 21:47:25.557546 25678 solver.cpp:237] Train net output #0: loss = 1.23356 (* 1 = 1.23356 loss)
I0428 21:47:25.557554 25678 sgd_solver.cpp:105] Iteration 3228, lr = 0.00612735
I0428 21:47:28.791335 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:47:30.546353 25678 solver.cpp:218] Iteration 3240 (2.40539 iter/s, 4.9888s/12 iters), loss = 1.24635
I0428 21:47:30.546393 25678 solver.cpp:237] Train net output #0: loss = 1.24635 (* 1 = 1.24635 loss)
I0428 21:47:30.546401 25678 sgd_solver.cpp:105] Iteration 3240, lr = 0.00612037
I0428 21:47:35.480286 25678 solver.cpp:218] Iteration 3252 (2.43216 iter/s, 4.93388s/12 iters), loss = 1.54419
I0428 21:47:35.480325 25678 solver.cpp:237] Train net output #0: loss = 1.54419 (* 1 = 1.54419 loss)
I0428 21:47:35.480334 25678 sgd_solver.cpp:105] Iteration 3252, lr = 0.00611338
I0428 21:47:39.947120 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_3264.caffemodel
I0428 21:47:43.036698 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_3264.solverstate
I0428 21:47:45.429945 25678 solver.cpp:330] Iteration 3264, Testing net (#0)
I0428 21:47:45.429963 25678 net.cpp:676] Ignoring source layer train-data
I0428 21:47:48.768518 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:47:50.214850 25678 solver.cpp:397] Test net output #0: accuracy = 0.34375
I0428 21:47:50.214886 25678 solver.cpp:397] Test net output #1: loss = 2.92824 (* 1 = 2.92824 loss)
I0428 21:47:50.311911 25678 solver.cpp:218] Iteration 3264 (0.809084 iter/s, 14.8316s/12 iters), loss = 1.51439
I0428 21:47:50.311954 25678 solver.cpp:237] Train net output #0: loss = 1.51439 (* 1 = 1.51439 loss)
I0428 21:47:50.311961 25678 sgd_solver.cpp:105] Iteration 3264, lr = 0.00610639
I0428 21:47:54.758846 25678 solver.cpp:218] Iteration 3276 (2.69852 iter/s, 4.44688s/12 iters), loss = 1.15048
I0428 21:47:54.758888 25678 solver.cpp:237] Train net output #0: loss = 1.15048 (* 1 = 1.15048 loss)
I0428 21:47:54.758894 25678 sgd_solver.cpp:105] Iteration 3276, lr = 0.0060994
I0428 21:47:59.726858 25678 solver.cpp:218] Iteration 3288 (2.41548 iter/s, 4.96796s/12 iters), loss = 1.43327
I0428 21:47:59.726903 25678 solver.cpp:237] Train net output #0: loss = 1.43327 (* 1 = 1.43327 loss)
I0428 21:47:59.726912 25678 sgd_solver.cpp:105] Iteration 3288, lr = 0.0060924
I0428 21:48:04.681432 25678 solver.cpp:218] Iteration 3300 (2.42203 iter/s, 4.95451s/12 iters), loss = 1.34491
I0428 21:48:04.681473 25678 solver.cpp:237] Train net output #0: loss = 1.34491 (* 1 = 1.34491 loss)
I0428 21:48:04.681480 25678 sgd_solver.cpp:105] Iteration 3300, lr = 0.00608539
I0428 21:48:09.638857 25678 solver.cpp:218] Iteration 3312 (2.42064 iter/s, 4.95737s/12 iters), loss = 1.18876
I0428 21:48:09.638900 25678 solver.cpp:237] Train net output #0: loss = 1.18876 (* 1 = 1.18876 loss)
I0428 21:48:09.638907 25678 sgd_solver.cpp:105] Iteration 3312, lr = 0.00607838
I0428 21:48:14.623406 25678 solver.cpp:218] Iteration 3324 (2.40746 iter/s, 4.9845s/12 iters), loss = 1.6752
I0428 21:48:14.623533 25678 solver.cpp:237] Train net output #0: loss = 1.6752 (* 1 = 1.6752 loss)
I0428 21:48:14.623543 25678 sgd_solver.cpp:105] Iteration 3324, lr = 0.00607137
I0428 21:48:19.571427 25678 solver.cpp:218] Iteration 3336 (2.42528 iter/s, 4.94789s/12 iters), loss = 1.43252
I0428 21:48:19.571470 25678 solver.cpp:237] Train net output #0: loss = 1.43252 (* 1 = 1.43252 loss)
I0428 21:48:19.571477 25678 sgd_solver.cpp:105] Iteration 3336, lr = 0.00606435
I0428 21:48:20.034801 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:48:24.558780 25678 solver.cpp:218] Iteration 3348 (2.40611 iter/s, 4.9873s/12 iters), loss = 1.33656
I0428 21:48:24.558820 25678 solver.cpp:237] Train net output #0: loss = 1.33656 (* 1 = 1.33656 loss)
I0428 21:48:24.558828 25678 sgd_solver.cpp:105] Iteration 3348, lr = 0.00605733
I0428 21:48:29.509342 25678 solver.cpp:218] Iteration 3360 (2.42399 iter/s, 4.95051s/12 iters), loss = 1.07857
I0428 21:48:29.509388 25678 solver.cpp:237] Train net output #0: loss = 1.07857 (* 1 = 1.07857 loss)
I0428 21:48:29.509395 25678 sgd_solver.cpp:105] Iteration 3360, lr = 0.00605031
I0428 21:48:31.527916 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_3366.caffemodel
I0428 21:48:34.614858 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_3366.solverstate
I0428 21:48:37.000357 25678 solver.cpp:330] Iteration 3366, Testing net (#0)
I0428 21:48:37.000380 25678 net.cpp:676] Ignoring source layer train-data
I0428 21:48:40.349364 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:48:41.732429 25678 solver.cpp:397] Test net output #0: accuracy = 0.363358
I0428 21:48:41.732468 25678 solver.cpp:397] Test net output #1: loss = 2.85466 (* 1 = 2.85466 loss)
I0428 21:48:43.522859 25678 solver.cpp:218] Iteration 3372 (0.856319 iter/s, 14.0135s/12 iters), loss = 1.42077
I0428 21:48:43.522899 25678 solver.cpp:237] Train net output #0: loss = 1.42077 (* 1 = 1.42077 loss)
I0428 21:48:43.522907 25678 sgd_solver.cpp:105] Iteration 3372, lr = 0.00604327
I0428 21:48:48.474954 25678 solver.cpp:218] Iteration 3384 (2.42324 iter/s, 4.95204s/12 iters), loss = 1.21974
I0428 21:48:48.475044 25678 solver.cpp:237] Train net output #0: loss = 1.21974 (* 1 = 1.21974 loss)
I0428 21:48:48.475054 25678 sgd_solver.cpp:105] Iteration 3384, lr = 0.00603624
I0428 21:48:53.358179 25678 solver.cpp:218] Iteration 3396 (2.45744 iter/s, 4.88312s/12 iters), loss = 1.39181
I0428 21:48:53.358222 25678 solver.cpp:237] Train net output #0: loss = 1.39181 (* 1 = 1.39181 loss)
I0428 21:48:53.358229 25678 sgd_solver.cpp:105] Iteration 3396, lr = 0.0060292
I0428 21:48:58.323925 25678 solver.cpp:218] Iteration 3408 (2.41658 iter/s, 4.9657s/12 iters), loss = 1.45313
I0428 21:48:58.323966 25678 solver.cpp:237] Train net output #0: loss = 1.45313 (* 1 = 1.45313 loss)
I0428 21:48:58.323973 25678 sgd_solver.cpp:105] Iteration 3408, lr = 0.00602216
I0428 21:49:03.286516 25678 solver.cpp:218] Iteration 3420 (2.41811 iter/s, 4.96254s/12 iters), loss = 1.18781
I0428 21:49:03.286554 25678 solver.cpp:237] Train net output #0: loss = 1.18781 (* 1 = 1.18781 loss)
I0428 21:49:03.286561 25678 sgd_solver.cpp:105] Iteration 3420, lr = 0.00601511
I0428 21:49:08.209340 25678 solver.cpp:218] Iteration 3432 (2.43765 iter/s, 4.92278s/12 iters), loss = 1.25081
I0428 21:49:08.209376 25678 solver.cpp:237] Train net output #0: loss = 1.25081 (* 1 = 1.25081 loss)
I0428 21:49:08.209384 25678 sgd_solver.cpp:105] Iteration 3432, lr = 0.00600806
I0428 21:49:10.785552 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:49:13.152470 25678 solver.cpp:218] Iteration 3444 (2.42763 iter/s, 4.94309s/12 iters), loss = 1.20448
I0428 21:49:13.152508 25678 solver.cpp:237] Train net output #0: loss = 1.20448 (* 1 = 1.20448 loss)
I0428 21:49:13.152515 25678 sgd_solver.cpp:105] Iteration 3444, lr = 0.006001
I0428 21:49:18.010931 25678 solver.cpp:218] Iteration 3456 (2.46994 iter/s, 4.85842s/12 iters), loss = 1.23912
I0428 21:49:18.010967 25678 solver.cpp:237] Train net output #0: loss = 1.23912 (* 1 = 1.23912 loss)
I0428 21:49:18.010973 25678 sgd_solver.cpp:105] Iteration 3456, lr = 0.00599394
I0428 21:49:22.418409 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_3468.caffemodel
I0428 21:49:26.884220 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_3468.solverstate
I0428 21:49:29.247284 25678 solver.cpp:330] Iteration 3468, Testing net (#0)
I0428 21:49:29.247303 25678 net.cpp:676] Ignoring source layer train-data
I0428 21:49:29.563562 25678 blocking_queue.cpp:49] Waiting for data
I0428 21:49:32.502116 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:49:34.030750 25678 solver.cpp:397] Test net output #0: accuracy = 0.359069
I0428 21:49:34.030800 25678 solver.cpp:397] Test net output #1: loss = 2.93984 (* 1 = 2.93984 loss)
I0428 21:49:34.127688 25678 solver.cpp:218] Iteration 3468 (0.744568 iter/s, 16.1167s/12 iters), loss = 1.55185
I0428 21:49:34.127728 25678 solver.cpp:237] Train net output #0: loss = 1.55185 (* 1 = 1.55185 loss)
I0428 21:49:34.127737 25678 sgd_solver.cpp:105] Iteration 3468, lr = 0.00598688
I0428 21:49:38.292045 25678 solver.cpp:218] Iteration 3480 (2.88163 iter/s, 4.16431s/12 iters), loss = 1.37847
I0428 21:49:38.292083 25678 solver.cpp:237] Train net output #0: loss = 1.37847 (* 1 = 1.37847 loss)
I0428 21:49:38.292090 25678 sgd_solver.cpp:105] Iteration 3480, lr = 0.00597981
I0428 21:49:43.189759 25678 solver.cpp:218] Iteration 3492 (2.45015 iter/s, 4.89767s/12 iters), loss = 1.2122
I0428 21:49:43.189793 25678 solver.cpp:237] Train net output #0: loss = 1.2122 (* 1 = 1.2122 loss)
I0428 21:49:43.189801 25678 sgd_solver.cpp:105] Iteration 3492, lr = 0.00597274
I0428 21:49:48.138561 25678 solver.cpp:218] Iteration 3504 (2.42485 iter/s, 4.94876s/12 iters), loss = 0.980533
I0428 21:49:48.138607 25678 solver.cpp:237] Train net output #0: loss = 0.980533 (* 1 = 0.980533 loss)
I0428 21:49:48.138617 25678 sgd_solver.cpp:105] Iteration 3504, lr = 0.00596566
I0428 21:49:52.996069 25678 solver.cpp:218] Iteration 3516 (2.47043 iter/s, 4.85746s/12 iters), loss = 1.28731
I0428 21:49:52.996171 25678 solver.cpp:237] Train net output #0: loss = 1.28731 (* 1 = 1.28731 loss)
I0428 21:49:52.996181 25678 sgd_solver.cpp:105] Iteration 3516, lr = 0.00595858
I0428 21:49:57.900710 25678 solver.cpp:218] Iteration 3528 (2.44671 iter/s, 4.90454s/12 iters), loss = 0.905951
I0428 21:49:57.900748 25678 solver.cpp:237] Train net output #0: loss = 0.905951 (* 1 = 0.905951 loss)
I0428 21:49:57.900755 25678 sgd_solver.cpp:105] Iteration 3528, lr = 0.00595149
I0428 21:50:02.608824 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:50:02.866906 25678 solver.cpp:218] Iteration 3540 (2.41636 iter/s, 4.96615s/12 iters), loss = 1.38521
I0428 21:50:02.866947 25678 solver.cpp:237] Train net output #0: loss = 1.38521 (* 1 = 1.38521 loss)
I0428 21:50:02.866956 25678 sgd_solver.cpp:105] Iteration 3540, lr = 0.0059444
I0428 21:50:07.803165 25678 solver.cpp:218] Iteration 3552 (2.43101 iter/s, 4.93621s/12 iters), loss = 1.1241
I0428 21:50:07.803205 25678 solver.cpp:237] Train net output #0: loss = 1.1241 (* 1 = 1.1241 loss)
I0428 21:50:07.803212 25678 sgd_solver.cpp:105] Iteration 3552, lr = 0.00593731
I0428 21:50:12.772089 25678 solver.cpp:218] Iteration 3564 (2.41503 iter/s, 4.96887s/12 iters), loss = 1.42065
I0428 21:50:12.772125 25678 solver.cpp:237] Train net output #0: loss = 1.42065 (* 1 = 1.42065 loss)
I0428 21:50:12.772133 25678 sgd_solver.cpp:105] Iteration 3564, lr = 0.00593022
I0428 21:50:14.783504 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_3570.caffemodel
I0428 21:50:18.456956 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_3570.solverstate
I0428 21:50:20.865530 25678 solver.cpp:330] Iteration 3570, Testing net (#0)
I0428 21:50:20.865547 25678 net.cpp:676] Ignoring source layer train-data
I0428 21:50:23.846907 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:50:25.429203 25678 solver.cpp:397] Test net output #0: accuracy = 0.387255
I0428 21:50:25.429251 25678 solver.cpp:397] Test net output #1: loss = 2.82252 (* 1 = 2.82252 loss)
I0428 21:50:27.225608 25678 solver.cpp:218] Iteration 3576 (0.83025 iter/s, 14.4535s/12 iters), loss = 1.10678
I0428 21:50:27.225654 25678 solver.cpp:237] Train net output #0: loss = 1.10678 (* 1 = 1.10678 loss)
I0428 21:50:27.225662 25678 sgd_solver.cpp:105] Iteration 3576, lr = 0.00592312
I0428 21:50:32.217646 25678 solver.cpp:218] Iteration 3588 (2.40386 iter/s, 4.99198s/12 iters), loss = 1.32507
I0428 21:50:32.217689 25678 solver.cpp:237] Train net output #0: loss = 1.32507 (* 1 = 1.32507 loss)
I0428 21:50:32.217696 25678 sgd_solver.cpp:105] Iteration 3588, lr = 0.00591601
I0428 21:50:37.174518 25678 solver.cpp:218] Iteration 3600 (2.42091 iter/s, 4.95682s/12 iters), loss = 0.987328
I0428 21:50:37.174562 25678 solver.cpp:237] Train net output #0: loss = 0.987328 (* 1 = 0.987328 loss)
I0428 21:50:37.174571 25678 sgd_solver.cpp:105] Iteration 3600, lr = 0.0059089
I0428 21:50:42.148960 25678 solver.cpp:218] Iteration 3612 (2.41236 iter/s, 4.97439s/12 iters), loss = 1.24531
I0428 21:50:42.149000 25678 solver.cpp:237] Train net output #0: loss = 1.24531 (* 1 = 1.24531 loss)
I0428 21:50:42.149008 25678 sgd_solver.cpp:105] Iteration 3612, lr = 0.00590179
I0428 21:50:47.126298 25678 solver.cpp:218] Iteration 3624 (2.41095 iter/s, 4.97728s/12 iters), loss = 1.09743
I0428 21:50:47.126338 25678 solver.cpp:237] Train net output #0: loss = 1.09743 (* 1 = 1.09743 loss)
I0428 21:50:47.126346 25678 sgd_solver.cpp:105] Iteration 3624, lr = 0.00589468
I0428 21:50:52.095430 25678 solver.cpp:218] Iteration 3636 (2.41493 iter/s, 4.96908s/12 iters), loss = 1.24711
I0428 21:50:52.095472 25678 solver.cpp:237] Train net output #0: loss = 1.24711 (* 1 = 1.24711 loss)
I0428 21:50:52.095480 25678 sgd_solver.cpp:105] Iteration 3636, lr = 0.00588756
I0428 21:50:53.970284 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:50:57.085410 25678 solver.cpp:218] Iteration 3648 (2.40484 iter/s, 4.98993s/12 iters), loss = 1.19929
I0428 21:50:57.085448 25678 solver.cpp:237] Train net output #0: loss = 1.19929 (* 1 = 1.19929 loss)
I0428 21:50:57.085455 25678 sgd_solver.cpp:105] Iteration 3648, lr = 0.00588043
I0428 21:51:02.066696 25678 solver.cpp:218] Iteration 3660 (2.40904 iter/s, 4.98124s/12 iters), loss = 0.875398
I0428 21:51:02.066740 25678 solver.cpp:237] Train net output #0: loss = 0.875398 (* 1 = 0.875398 loss)
I0428 21:51:02.066747 25678 sgd_solver.cpp:105] Iteration 3660, lr = 0.00587331
I0428 21:51:06.590828 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_3672.caffemodel
I0428 21:51:09.653414 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_3672.solverstate
I0428 21:51:12.012717 25678 solver.cpp:330] Iteration 3672, Testing net (#0)
I0428 21:51:12.012737 25678 net.cpp:676] Ignoring source layer train-data
I0428 21:51:15.062721 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:51:16.571354 25678 solver.cpp:397] Test net output #0: accuracy = 0.365196
I0428 21:51:16.571400 25678 solver.cpp:397] Test net output #1: loss = 2.87742 (* 1 = 2.87742 loss)
I0428 21:51:16.668184 25678 solver.cpp:218] Iteration 3672 (0.821836 iter/s, 14.6014s/12 iters), loss = 0.930787
I0428 21:51:16.668226 25678 solver.cpp:237] Train net output #0: loss = 0.930787 (* 1 = 0.930787 loss)
I0428 21:51:16.668233 25678 sgd_solver.cpp:105] Iteration 3672, lr = 0.00586618
I0428 21:51:20.890301 25678 solver.cpp:218] Iteration 3684 (2.84221 iter/s, 4.22207s/12 iters), loss = 1.1702
I0428 21:51:20.890339 25678 solver.cpp:237] Train net output #0: loss = 1.1702 (* 1 = 1.1702 loss)
I0428 21:51:20.890348 25678 sgd_solver.cpp:105] Iteration 3684, lr = 0.00585904
I0428 21:51:25.931685 25678 solver.cpp:218] Iteration 3696 (2.38032 iter/s, 5.04133s/12 iters), loss = 0.955437
I0428 21:51:25.931813 25678 solver.cpp:237] Train net output #0: loss = 0.955437 (* 1 = 0.955437 loss)
I0428 21:51:25.931821 25678 sgd_solver.cpp:105] Iteration 3696, lr = 0.0058519
I0428 21:51:30.962139 25678 solver.cpp:218] Iteration 3708 (2.38554 iter/s, 5.03032s/12 iters), loss = 0.948532
I0428 21:51:30.962178 25678 solver.cpp:237] Train net output #0: loss = 0.948532 (* 1 = 0.948532 loss)
I0428 21:51:30.962185 25678 sgd_solver.cpp:105] Iteration 3708, lr = 0.00584476
I0428 21:51:36.044157 25678 solver.cpp:218] Iteration 3720 (2.36129 iter/s, 5.08196s/12 iters), loss = 1.13787
I0428 21:51:36.044198 25678 solver.cpp:237] Train net output #0: loss = 1.13787 (* 1 = 1.13787 loss)
I0428 21:51:36.044205 25678 sgd_solver.cpp:105] Iteration 3720, lr = 0.00583762
I0428 21:51:41.097470 25678 solver.cpp:218] Iteration 3732 (2.3747 iter/s, 5.05327s/12 iters), loss = 1.1098
I0428 21:51:41.097508 25678 solver.cpp:237] Train net output #0: loss = 1.1098 (* 1 = 1.1098 loss)
I0428 21:51:41.097517 25678 sgd_solver.cpp:105] Iteration 3732, lr = 0.00583047
I0428 21:51:45.098603 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:51:46.102835 25678 solver.cpp:218] Iteration 3744 (2.39745 iter/s, 5.00531s/12 iters), loss = 1.24559
I0428 21:51:46.102880 25678 solver.cpp:237] Train net output #0: loss = 1.24559 (* 1 = 1.24559 loss)
I0428 21:51:46.102886 25678 sgd_solver.cpp:105] Iteration 3744, lr = 0.00582332
I0428 21:51:51.118376 25678 solver.cpp:218] Iteration 3756 (2.39259 iter/s, 5.01548s/12 iters), loss = 0.992876
I0428 21:51:51.118418 25678 solver.cpp:237] Train net output #0: loss = 0.992876 (* 1 = 0.992876 loss)
I0428 21:51:51.118427 25678 sgd_solver.cpp:105] Iteration 3756, lr = 0.00581616
I0428 21:51:56.146005 25678 solver.cpp:218] Iteration 3768 (2.38684 iter/s, 5.02758s/12 iters), loss = 0.884202
I0428 21:51:56.146119 25678 solver.cpp:237] Train net output #0: loss = 0.884202 (* 1 = 0.884202 loss)
I0428 21:51:56.146127 25678 sgd_solver.cpp:105] Iteration 3768, lr = 0.005809
I0428 21:51:58.174959 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_3774.caffemodel
I0428 21:52:01.282402 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_3774.solverstate
I0428 21:52:05.203596 25678 solver.cpp:330] Iteration 3774, Testing net (#0)
I0428 21:52:05.203615 25678 net.cpp:676] Ignoring source layer train-data
I0428 21:52:08.407712 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:52:10.083796 25678 solver.cpp:397] Test net output #0: accuracy = 0.364583
I0428 21:52:10.083840 25678 solver.cpp:397] Test net output #1: loss = 2.85758 (* 1 = 2.85758 loss)
I0428 21:52:11.888341 25678 solver.cpp:218] Iteration 3780 (0.762281 iter/s, 15.7422s/12 iters), loss = 0.913048
I0428 21:52:11.888384 25678 solver.cpp:237] Train net output #0: loss = 0.913048 (* 1 = 0.913048 loss)
I0428 21:52:11.888392 25678 sgd_solver.cpp:105] Iteration 3780, lr = 0.00580184
I0428 21:52:16.856281 25678 solver.cpp:218] Iteration 3792 (2.41552 iter/s, 4.96788s/12 iters), loss = 1.05661
I0428 21:52:16.856320 25678 solver.cpp:237] Train net output #0: loss = 1.05661 (* 1 = 1.05661 loss)
I0428 21:52:16.856328 25678 sgd_solver.cpp:105] Iteration 3792, lr = 0.00579468
I0428 21:52:21.755460 25678 solver.cpp:218] Iteration 3804 (2.44942 iter/s, 4.89913s/12 iters), loss = 1.11722
I0428 21:52:21.755499 25678 solver.cpp:237] Train net output #0: loss = 1.11722 (* 1 = 1.11722 loss)
I0428 21:52:21.755506 25678 sgd_solver.cpp:105] Iteration 3804, lr = 0.00578751
I0428 21:52:26.737385 25678 solver.cpp:218] Iteration 3816 (2.40873 iter/s, 4.98187s/12 iters), loss = 0.877677
I0428 21:52:26.737525 25678 solver.cpp:237] Train net output #0: loss = 0.877677 (* 1 = 0.877677 loss)
I0428 21:52:26.737535 25678 sgd_solver.cpp:105] Iteration 3816, lr = 0.00578034
I0428 21:52:31.702505 25678 solver.cpp:218] Iteration 3828 (2.41693 iter/s, 4.96497s/12 iters), loss = 0.998182
I0428 21:52:31.702550 25678 solver.cpp:237] Train net output #0: loss = 0.998182 (* 1 = 0.998182 loss)
I0428 21:52:31.702559 25678 sgd_solver.cpp:105] Iteration 3828, lr = 0.00577316
I0428 21:52:36.670372 25678 solver.cpp:218] Iteration 3840 (2.41555 iter/s, 4.96781s/12 iters), loss = 0.927215
I0428 21:52:36.670410 25678 solver.cpp:237] Train net output #0: loss = 0.927215 (* 1 = 0.927215 loss)
I0428 21:52:36.670418 25678 sgd_solver.cpp:105] Iteration 3840, lr = 0.00576598
I0428 21:52:37.809937 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:52:41.649592 25678 solver.cpp:218] Iteration 3852 (2.41004 iter/s, 4.97917s/12 iters), loss = 0.912852
I0428 21:52:41.649631 25678 solver.cpp:237] Train net output #0: loss = 0.912852 (* 1 = 0.912852 loss)
I0428 21:52:41.649639 25678 sgd_solver.cpp:105] Iteration 3852, lr = 0.0057588
I0428 21:52:46.600879 25678 solver.cpp:218] Iteration 3864 (2.42364 iter/s, 4.95123s/12 iters), loss = 0.850753
I0428 21:52:46.600921 25678 solver.cpp:237] Train net output #0: loss = 0.850753 (* 1 = 0.850753 loss)
I0428 21:52:46.600930 25678 sgd_solver.cpp:105] Iteration 3864, lr = 0.00575161
I0428 21:52:51.089404 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_3876.caffemodel
I0428 21:52:54.205943 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_3876.solverstate
I0428 21:52:56.572338 25678 solver.cpp:330] Iteration 3876, Testing net (#0)
I0428 21:52:56.572356 25678 net.cpp:676] Ignoring source layer train-data
I0428 21:52:59.659821 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:53:01.385275 25678 solver.cpp:397] Test net output #0: accuracy = 0.36826
I0428 21:53:01.385335 25678 solver.cpp:397] Test net output #1: loss = 2.97138 (* 1 = 2.97138 loss)
I0428 21:53:01.482831 25678 solver.cpp:218] Iteration 3876 (0.806348 iter/s, 14.8819s/12 iters), loss = 1.15221
I0428 21:53:01.482872 25678 solver.cpp:237] Train net output #0: loss = 1.15221 (* 1 = 1.15221 loss)
I0428 21:53:01.482882 25678 sgd_solver.cpp:105] Iteration 3876, lr = 0.00574443
I0428 21:53:05.636749 25678 solver.cpp:218] Iteration 3888 (2.88888 iter/s, 4.15386s/12 iters), loss = 0.861544
I0428 21:53:05.636795 25678 solver.cpp:237] Train net output #0: loss = 0.861544 (* 1 = 0.861544 loss)
I0428 21:53:05.636804 25678 sgd_solver.cpp:105] Iteration 3888, lr = 0.00573723
I0428 21:53:10.676690 25678 solver.cpp:218] Iteration 3900 (2.38101 iter/s, 5.03989s/12 iters), loss = 0.958929
I0428 21:53:10.676734 25678 solver.cpp:237] Train net output #0: loss = 0.958929 (* 1 = 0.958929 loss)
I0428 21:53:10.676743 25678 sgd_solver.cpp:105] Iteration 3900, lr = 0.00573004
I0428 21:53:15.701812 25678 solver.cpp:218] Iteration 3912 (2.38803 iter/s, 5.02507s/12 iters), loss = 1.02496
I0428 21:53:15.701851 25678 solver.cpp:237] Train net output #0: loss = 1.02496 (* 1 = 1.02496 loss)
I0428 21:53:15.701858 25678 sgd_solver.cpp:105] Iteration 3912, lr = 0.00572284
I0428 21:53:20.721098 25678 solver.cpp:218] Iteration 3924 (2.3908 iter/s, 5.01924s/12 iters), loss = 0.791202
I0428 21:53:20.721140 25678 solver.cpp:237] Train net output #0: loss = 0.791202 (* 1 = 0.791202 loss)
I0428 21:53:20.721148 25678 sgd_solver.cpp:105] Iteration 3924, lr = 0.00571564
I0428 21:53:25.744988 25678 solver.cpp:218] Iteration 3936 (2.38861 iter/s, 5.02384s/12 iters), loss = 1.2049
I0428 21:53:25.745030 25678 solver.cpp:237] Train net output #0: loss = 1.2049 (* 1 = 1.2049 loss)
I0428 21:53:25.745038 25678 sgd_solver.cpp:105] Iteration 3936, lr = 0.00570844
I0428 21:53:29.172240 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:53:30.816185 25678 solver.cpp:218] Iteration 3948 (2.36633 iter/s, 5.07115s/12 iters), loss = 1.08704
I0428 21:53:30.816289 25678 solver.cpp:237] Train net output #0: loss = 1.08704 (* 1 = 1.08704 loss)
I0428 21:53:30.816299 25678 sgd_solver.cpp:105] Iteration 3948, lr = 0.00570123
I0428 21:53:35.760452 25678 solver.cpp:218] Iteration 3960 (2.42711 iter/s, 4.94415s/12 iters), loss = 1.00952
I0428 21:53:35.760495 25678 solver.cpp:237] Train net output #0: loss = 1.00952 (* 1 = 1.00952 loss)
I0428 21:53:35.760502 25678 sgd_solver.cpp:105] Iteration 3960, lr = 0.00569402
I0428 21:53:40.736081 25678 solver.cpp:218] Iteration 3972 (2.41178 iter/s, 4.97557s/12 iters), loss = 1.07595
I0428 21:53:40.736129 25678 solver.cpp:237] Train net output #0: loss = 1.07595 (* 1 = 1.07595 loss)
I0428 21:53:40.736136 25678 sgd_solver.cpp:105] Iteration 3972, lr = 0.00568681
I0428 21:53:42.752597 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_3978.caffemodel
I0428 21:53:47.158176 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_3978.solverstate
I0428 21:53:50.021811 25678 solver.cpp:330] Iteration 3978, Testing net (#0)
I0428 21:53:50.021831 25678 net.cpp:676] Ignoring source layer train-data
I0428 21:53:53.065047 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:53:54.821360 25678 solver.cpp:397] Test net output #0: accuracy = 0.362132
I0428 21:53:54.821405 25678 solver.cpp:397] Test net output #1: loss = 2.9693 (* 1 = 2.9693 loss)
I0428 21:53:56.747217 25678 solver.cpp:218] Iteration 3984 (0.74948 iter/s, 16.0111s/12 iters), loss = 0.694615
I0428 21:53:56.747262 25678 solver.cpp:237] Train net output #0: loss = 0.694615 (* 1 = 0.694615 loss)
I0428 21:53:56.747269 25678 sgd_solver.cpp:105] Iteration 3984, lr = 0.00567959
I0428 21:54:01.799129 25678 solver.cpp:218] Iteration 3996 (2.37536 iter/s, 5.05186s/12 iters), loss = 1.04216
I0428 21:54:01.799193 25678 solver.cpp:237] Train net output #0: loss = 1.04216 (* 1 = 1.04216 loss)
I0428 21:54:01.799201 25678 sgd_solver.cpp:105] Iteration 3996, lr = 0.00567237
I0428 21:54:06.748793 25678 solver.cpp:218] Iteration 4008 (2.42444 iter/s, 4.94959s/12 iters), loss = 0.884232
I0428 21:54:06.748838 25678 solver.cpp:237] Train net output #0: loss = 0.884232 (* 1 = 0.884232 loss)
I0428 21:54:06.748847 25678 sgd_solver.cpp:105] Iteration 4008, lr = 0.00566515
I0428 21:54:11.720355 25678 solver.cpp:218] Iteration 4020 (2.41375 iter/s, 4.97151s/12 iters), loss = 0.82545
I0428 21:54:11.720393 25678 solver.cpp:237] Train net output #0: loss = 0.82545 (* 1 = 0.82545 loss)
I0428 21:54:11.720402 25678 sgd_solver.cpp:105] Iteration 4020, lr = 0.00565793
I0428 21:54:16.702075 25678 solver.cpp:218] Iteration 4032 (2.40883 iter/s, 4.98167s/12 iters), loss = 1.19955
I0428 21:54:16.702116 25678 solver.cpp:237] Train net output #0: loss = 1.19955 (* 1 = 1.19955 loss)
I0428 21:54:16.702124 25678 sgd_solver.cpp:105] Iteration 4032, lr = 0.0056507
I0428 21:54:21.657367 25678 solver.cpp:218] Iteration 4044 (2.42168 iter/s, 4.95524s/12 iters), loss = 0.748348
I0428 21:54:21.657413 25678 solver.cpp:237] Train net output #0: loss = 0.748348 (* 1 = 0.748348 loss)
I0428 21:54:21.657420 25678 sgd_solver.cpp:105] Iteration 4044, lr = 0.00564347
I0428 21:54:22.151901 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:54:26.646198 25678 solver.cpp:218] Iteration 4056 (2.4054 iter/s, 4.98878s/12 iters), loss = 1.26481
I0428 21:54:26.646236 25678 solver.cpp:237] Train net output #0: loss = 1.26481 (* 1 = 1.26481 loss)
I0428 21:54:26.646243 25678 sgd_solver.cpp:105] Iteration 4056, lr = 0.00563624
I0428 21:54:31.602977 25678 solver.cpp:218] Iteration 4068 (2.42095 iter/s, 4.95673s/12 iters), loss = 0.9846
I0428 21:54:31.603016 25678 solver.cpp:237] Train net output #0: loss = 0.9846 (* 1 = 0.9846 loss)
I0428 21:54:31.603024 25678 sgd_solver.cpp:105] Iteration 4068, lr = 0.005629
I0428 21:54:36.089594 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_4080.caffemodel
I0428 21:54:39.162938 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_4080.solverstate
I0428 21:54:42.770758 25678 solver.cpp:330] Iteration 4080, Testing net (#0)
I0428 21:54:42.770782 25678 net.cpp:676] Ignoring source layer train-data
I0428 21:54:45.829063 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:54:47.522276 25678 solver.cpp:397] Test net output #0: accuracy = 0.385417
I0428 21:54:47.522325 25678 solver.cpp:397] Test net output #1: loss = 2.98773 (* 1 = 2.98773 loss)
I0428 21:54:47.619693 25678 solver.cpp:218] Iteration 4080 (0.749219 iter/s, 16.0167s/12 iters), loss = 0.868573
I0428 21:54:47.619733 25678 solver.cpp:237] Train net output #0: loss = 0.868573 (* 1 = 0.868573 loss)
I0428 21:54:47.619742 25678 sgd_solver.cpp:105] Iteration 4080, lr = 0.00562176
I0428 21:54:51.758745 25678 solver.cpp:218] Iteration 4092 (2.89925 iter/s, 4.139s/12 iters), loss = 0.802637
I0428 21:54:51.758785 25678 solver.cpp:237] Train net output #0: loss = 0.802637 (* 1 = 0.802637 loss)
I0428 21:54:51.758793 25678 sgd_solver.cpp:105] Iteration 4092, lr = 0.00561452
I0428 21:54:56.745743 25678 solver.cpp:218] Iteration 4104 (2.40628 iter/s, 4.98695s/12 iters), loss = 0.964841
I0428 21:54:56.745784 25678 solver.cpp:237] Train net output #0: loss = 0.964841 (* 1 = 0.964841 loss)
I0428 21:54:56.745791 25678 sgd_solver.cpp:105] Iteration 4104, lr = 0.00560728
I0428 21:55:01.690671 25678 solver.cpp:218] Iteration 4116 (2.42675 iter/s, 4.94488s/12 iters), loss = 0.902241
I0428 21:55:01.690711 25678 solver.cpp:237] Train net output #0: loss = 0.902241 (* 1 = 0.902241 loss)
I0428 21:55:01.690721 25678 sgd_solver.cpp:105] Iteration 4116, lr = 0.00560004
I0428 21:55:06.685266 25678 solver.cpp:218] Iteration 4128 (2.40262 iter/s, 4.99455s/12 iters), loss = 0.666271
I0428 21:55:06.685390 25678 solver.cpp:237] Train net output #0: loss = 0.666271 (* 1 = 0.666271 loss)
I0428 21:55:06.685400 25678 sgd_solver.cpp:105] Iteration 4128, lr = 0.00559279
I0428 21:55:11.678267 25678 solver.cpp:218] Iteration 4140 (2.40343 iter/s, 4.99286s/12 iters), loss = 0.795333
I0428 21:55:11.678313 25678 solver.cpp:237] Train net output #0: loss = 0.795333 (* 1 = 0.795333 loss)
I0428 21:55:11.678323 25678 sgd_solver.cpp:105] Iteration 4140, lr = 0.00558554
I0428 21:55:14.288832 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:55:16.643412 25678 solver.cpp:218] Iteration 4152 (2.41687 iter/s, 4.96509s/12 iters), loss = 0.983388
I0428 21:55:16.643453 25678 solver.cpp:237] Train net output #0: loss = 0.983388 (* 1 = 0.983388 loss)
I0428 21:55:16.643461 25678 sgd_solver.cpp:105] Iteration 4152, lr = 0.00557828
I0428 21:55:17.844643 25678 blocking_queue.cpp:49] Waiting for data
I0428 21:55:21.596438 25678 solver.cpp:218] Iteration 4164 (2.42278 iter/s, 4.95298s/12 iters), loss = 0.866622
I0428 21:55:21.596478 25678 solver.cpp:237] Train net output #0: loss = 0.866622 (* 1 = 0.866622 loss)
I0428 21:55:21.596485 25678 sgd_solver.cpp:105] Iteration 4164, lr = 0.00557103
I0428 21:55:26.578809 25678 solver.cpp:218] Iteration 4176 (2.40852 iter/s, 4.98232s/12 iters), loss = 0.745677
I0428 21:55:26.578860 25678 solver.cpp:237] Train net output #0: loss = 0.745677 (* 1 = 0.745677 loss)
I0428 21:55:26.578869 25678 sgd_solver.cpp:105] Iteration 4176, lr = 0.00556377
I0428 21:55:28.588156 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_4182.caffemodel
I0428 21:55:31.651407 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_4182.solverstate
I0428 21:55:34.621810 25678 solver.cpp:330] Iteration 4182, Testing net (#0)
I0428 21:55:34.621824 25678 net.cpp:676] Ignoring source layer train-data
I0428 21:55:37.587635 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:55:39.428521 25678 solver.cpp:397] Test net output #0: accuracy = 0.382966
I0428 21:55:39.428570 25678 solver.cpp:397] Test net output #1: loss = 2.90333 (* 1 = 2.90333 loss)
I0428 21:55:41.275939 25678 solver.cpp:218] Iteration 4188 (0.816489 iter/s, 14.6971s/12 iters), loss = 0.814901
I0428 21:55:41.275981 25678 solver.cpp:237] Train net output #0: loss = 0.814901 (* 1 = 0.814901 loss)
I0428 21:55:41.275988 25678 sgd_solver.cpp:105] Iteration 4188, lr = 0.00555651
I0428 21:55:46.282701 25678 solver.cpp:218] Iteration 4200 (2.39678 iter/s, 5.00671s/12 iters), loss = 0.976634
I0428 21:55:46.282750 25678 solver.cpp:237] Train net output #0: loss = 0.976634 (* 1 = 0.976634 loss)
I0428 21:55:46.282758 25678 sgd_solver.cpp:105] Iteration 4200, lr = 0.00554925
I0428 21:55:51.283578 25678 solver.cpp:218] Iteration 4212 (2.39961 iter/s, 5.00082s/12 iters), loss = 0.606541
I0428 21:55:51.283622 25678 solver.cpp:237] Train net output #0: loss = 0.606541 (* 1 = 0.606541 loss)
I0428 21:55:51.283630 25678 sgd_solver.cpp:105] Iteration 4212, lr = 0.00554198
I0428 21:55:56.227128 25678 solver.cpp:218] Iteration 4224 (2.42743 iter/s, 4.94349s/12 iters), loss = 0.712126
I0428 21:55:56.227178 25678 solver.cpp:237] Train net output #0: loss = 0.712126 (* 1 = 0.712126 loss)
I0428 21:55:56.227188 25678 sgd_solver.cpp:105] Iteration 4224, lr = 0.00553471
I0428 21:56:01.224889 25678 solver.cpp:218] Iteration 4236 (2.4011 iter/s, 4.9977s/12 iters), loss = 0.789095
I0428 21:56:01.224928 25678 solver.cpp:237] Train net output #0: loss = 0.789095 (* 1 = 0.789095 loss)
I0428 21:56:01.224937 25678 sgd_solver.cpp:105] Iteration 4236, lr = 0.00552744
I0428 21:56:05.977640 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:56:06.207078 25678 solver.cpp:218] Iteration 4248 (2.4086 iter/s, 4.98214s/12 iters), loss = 0.880758
I0428 21:56:06.207118 25678 solver.cpp:237] Train net output #0: loss = 0.880758 (* 1 = 0.880758 loss)
I0428 21:56:06.207125 25678 sgd_solver.cpp:105] Iteration 4248, lr = 0.00552017
I0428 21:56:11.106554 25678 solver.cpp:218] Iteration 4260 (2.44927 iter/s, 4.89942s/12 iters), loss = 0.666367
I0428 21:56:11.106714 25678 solver.cpp:237] Train net output #0: loss = 0.666367 (* 1 = 0.666367 loss)
I0428 21:56:11.106722 25678 sgd_solver.cpp:105] Iteration 4260, lr = 0.0055129
I0428 21:56:16.065847 25678 solver.cpp:218] Iteration 4272 (2.41978 iter/s, 4.95913s/12 iters), loss = 0.682719
I0428 21:56:16.065884 25678 solver.cpp:237] Train net output #0: loss = 0.682719 (* 1 = 0.682719 loss)
I0428 21:56:16.065892 25678 sgd_solver.cpp:105] Iteration 4272, lr = 0.00550562
I0428 21:56:20.497304 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_4284.caffemodel
I0428 21:56:23.612010 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_4284.solverstate
I0428 21:56:26.151993 25678 solver.cpp:330] Iteration 4284, Testing net (#0)
I0428 21:56:26.152010 25678 net.cpp:676] Ignoring source layer train-data
I0428 21:56:29.071979 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:56:30.952538 25678 solver.cpp:397] Test net output #0: accuracy = 0.389093
I0428 21:56:30.952585 25678 solver.cpp:397] Test net output #1: loss = 2.86061 (* 1 = 2.86061 loss)
I0428 21:56:31.049767 25678 solver.cpp:218] Iteration 4284 (0.80086 iter/s, 14.9839s/12 iters), loss = 0.809251
I0428 21:56:31.049806 25678 solver.cpp:237] Train net output #0: loss = 0.809251 (* 1 = 0.809251 loss)
I0428 21:56:31.049813 25678 sgd_solver.cpp:105] Iteration 4284, lr = 0.00549834
I0428 21:56:35.177935 25678 solver.cpp:218] Iteration 4296 (2.90689 iter/s, 4.12812s/12 iters), loss = 0.746521
I0428 21:56:35.177973 25678 solver.cpp:237] Train net output #0: loss = 0.746521 (* 1 = 0.746521 loss)
I0428 21:56:35.177980 25678 sgd_solver.cpp:105] Iteration 4296, lr = 0.00549106
I0428 21:56:40.098685 25678 solver.cpp:218] Iteration 4308 (2.43867 iter/s, 4.92071s/12 iters), loss = 0.684475
I0428 21:56:40.098718 25678 solver.cpp:237] Train net output #0: loss = 0.684475 (* 1 = 0.684475 loss)
I0428 21:56:40.098726 25678 sgd_solver.cpp:105] Iteration 4308, lr = 0.00548378
I0428 21:56:45.055649 25678 solver.cpp:218] Iteration 4320 (2.42086 iter/s, 4.95692s/12 iters), loss = 0.371536
I0428 21:56:45.055788 25678 solver.cpp:237] Train net output #0: loss = 0.371536 (* 1 = 0.371536 loss)
I0428 21:56:45.055796 25678 sgd_solver.cpp:105] Iteration 4320, lr = 0.00547649
I0428 21:56:50.042048 25678 solver.cpp:218] Iteration 4332 (2.40661 iter/s, 4.98626s/12 iters), loss = 1.00971
I0428 21:56:50.042084 25678 solver.cpp:237] Train net output #0: loss = 1.00971 (* 1 = 1.00971 loss)
I0428 21:56:50.042093 25678 sgd_solver.cpp:105] Iteration 4332, lr = 0.0054692
I0428 21:56:54.978950 25678 solver.cpp:218] Iteration 4344 (2.4307 iter/s, 4.93686s/12 iters), loss = 0.761321
I0428 21:56:54.978989 25678 solver.cpp:237] Train net output #0: loss = 0.761321 (* 1 = 0.761321 loss)
I0428 21:56:54.978997 25678 sgd_solver.cpp:105] Iteration 4344, lr = 0.00546191
I0428 21:56:56.880511 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:57:00.015061 25678 solver.cpp:218] Iteration 4356 (2.38281 iter/s, 5.03607s/12 iters), loss = 0.708874
I0428 21:57:00.015094 25678 solver.cpp:237] Train net output #0: loss = 0.708874 (* 1 = 0.708874 loss)
I0428 21:57:00.015100 25678 sgd_solver.cpp:105] Iteration 4356, lr = 0.00545462
I0428 21:57:04.984808 25678 solver.cpp:218] Iteration 4368 (2.41463 iter/s, 4.96971s/12 iters), loss = 0.660627
I0428 21:57:04.984843 25678 solver.cpp:237] Train net output #0: loss = 0.660627 (* 1 = 0.660627 loss)
I0428 21:57:04.984850 25678 sgd_solver.cpp:105] Iteration 4368, lr = 0.00544733
I0428 21:57:09.944974 25678 solver.cpp:218] Iteration 4380 (2.41929 iter/s, 4.96012s/12 iters), loss = 0.489257
I0428 21:57:09.945012 25678 solver.cpp:237] Train net output #0: loss = 0.489257 (* 1 = 0.489257 loss)
I0428 21:57:09.945019 25678 sgd_solver.cpp:105] Iteration 4380, lr = 0.00544003
I0428 21:57:11.954259 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_4386.caffemodel
I0428 21:57:15.038403 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_4386.solverstate
I0428 21:57:17.399718 25678 solver.cpp:330] Iteration 4386, Testing net (#0)
I0428 21:57:17.399796 25678 net.cpp:676] Ignoring source layer train-data
I0428 21:57:20.274492 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:57:22.187000 25678 solver.cpp:397] Test net output #0: accuracy = 0.403799
I0428 21:57:22.187045 25678 solver.cpp:397] Test net output #1: loss = 2.7943 (* 1 = 2.7943 loss)
I0428 21:57:24.016758 25678 solver.cpp:218] Iteration 4392 (0.852772 iter/s, 14.0718s/12 iters), loss = 0.480845
I0428 21:57:24.016798 25678 solver.cpp:237] Train net output #0: loss = 0.480845 (* 1 = 0.480845 loss)
I0428 21:57:24.016804 25678 sgd_solver.cpp:105] Iteration 4392, lr = 0.00543274
I0428 21:57:28.950640 25678 solver.cpp:218] Iteration 4404 (2.43219 iter/s, 4.93383s/12 iters), loss = 0.702453
I0428 21:57:28.950677 25678 solver.cpp:237] Train net output #0: loss = 0.702453 (* 1 = 0.702453 loss)
I0428 21:57:28.950685 25678 sgd_solver.cpp:105] Iteration 4404, lr = 0.00542544
I0428 21:57:33.981559 25678 solver.cpp:218] Iteration 4416 (2.38527 iter/s, 5.03088s/12 iters), loss = 0.68417
I0428 21:57:33.981593 25678 solver.cpp:237] Train net output #0: loss = 0.68417 (* 1 = 0.68417 loss)
I0428 21:57:33.981600 25678 sgd_solver.cpp:105] Iteration 4416, lr = 0.00541814
I0428 21:57:38.992506 25678 solver.cpp:218] Iteration 4428 (2.39478 iter/s, 5.0109s/12 iters), loss = 0.732942
I0428 21:57:38.992544 25678 solver.cpp:237] Train net output #0: loss = 0.732942 (* 1 = 0.732942 loss)
I0428 21:57:38.992552 25678 sgd_solver.cpp:105] Iteration 4428, lr = 0.00541084
I0428 21:57:43.944129 25678 solver.cpp:218] Iteration 4440 (2.42347 iter/s, 4.95158s/12 iters), loss = 0.531838
I0428 21:57:43.944169 25678 solver.cpp:237] Train net output #0: loss = 0.531838 (* 1 = 0.531838 loss)
I0428 21:57:43.944177 25678 sgd_solver.cpp:105] Iteration 4440, lr = 0.00540353
I0428 21:57:47.930837 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:57:48.882130 25678 solver.cpp:218] Iteration 4452 (2.43016 iter/s, 4.93796s/12 iters), loss = 0.595135
I0428 21:57:48.882166 25678 solver.cpp:237] Train net output #0: loss = 0.595135 (* 1 = 0.595135 loss)
I0428 21:57:48.882174 25678 sgd_solver.cpp:105] Iteration 4452, lr = 0.00539623
I0428 21:57:53.826161 25678 solver.cpp:218] Iteration 4464 (2.42719 iter/s, 4.94399s/12 iters), loss = 0.709964
I0428 21:57:53.826198 25678 solver.cpp:237] Train net output #0: loss = 0.709964 (* 1 = 0.709964 loss)
I0428 21:57:53.826205 25678 sgd_solver.cpp:105] Iteration 4464, lr = 0.00538892
I0428 21:57:58.717237 25678 solver.cpp:218] Iteration 4476 (2.45347 iter/s, 4.89104s/12 iters), loss = 0.597241
I0428 21:57:58.717273 25678 solver.cpp:237] Train net output #0: loss = 0.597241 (* 1 = 0.597241 loss)
I0428 21:57:58.717281 25678 sgd_solver.cpp:105] Iteration 4476, lr = 0.00538161
I0428 21:58:03.217303 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_4488.caffemodel
I0428 21:58:06.299645 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_4488.solverstate
I0428 21:58:08.662106 25678 solver.cpp:330] Iteration 4488, Testing net (#0)
I0428 21:58:08.662124 25678 net.cpp:676] Ignoring source layer train-data
I0428 21:58:11.500062 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:58:13.339416 25678 solver.cpp:397] Test net output #0: accuracy = 0.403186
I0428 21:58:13.339459 25678 solver.cpp:397] Test net output #1: loss = 2.8853 (* 1 = 2.8853 loss)
I0428 21:58:13.436471 25678 solver.cpp:218] Iteration 4488 (0.815262 iter/s, 14.7192s/12 iters), loss = 0.690616
I0428 21:58:13.436511 25678 solver.cpp:237] Train net output #0: loss = 0.690616 (* 1 = 0.690616 loss)
I0428 21:58:13.436519 25678 sgd_solver.cpp:105] Iteration 4488, lr = 0.0053743
I0428 21:58:17.516990 25678 solver.cpp:218] Iteration 4500 (2.94084 iter/s, 4.08047s/12 iters), loss = 0.526062
I0428 21:58:17.517027 25678 solver.cpp:237] Train net output #0: loss = 0.526062 (* 1 = 0.526062 loss)
I0428 21:58:17.517035 25678 sgd_solver.cpp:105] Iteration 4500, lr = 0.00536699
I0428 21:58:22.407549 25678 solver.cpp:218] Iteration 4512 (2.45373 iter/s, 4.89051s/12 iters), loss = 0.548347
I0428 21:58:22.407644 25678 solver.cpp:237] Train net output #0: loss = 0.548347 (* 1 = 0.548347 loss)
I0428 21:58:22.407653 25678 sgd_solver.cpp:105] Iteration 4512, lr = 0.00535967
I0428 21:58:27.259742 25678 solver.cpp:218] Iteration 4524 (2.47316 iter/s, 4.85208s/12 iters), loss = 0.45843
I0428 21:58:27.259780 25678 solver.cpp:237] Train net output #0: loss = 0.45843 (* 1 = 0.45843 loss)
I0428 21:58:27.259788 25678 sgd_solver.cpp:105] Iteration 4524, lr = 0.00535236
I0428 21:58:32.109400 25678 solver.cpp:218] Iteration 4536 (2.47442 iter/s, 4.84961s/12 iters), loss = 0.597991
I0428 21:58:32.109441 25678 solver.cpp:237] Train net output #0: loss = 0.597991 (* 1 = 0.597991 loss)
I0428 21:58:32.109448 25678 sgd_solver.cpp:105] Iteration 4536, lr = 0.00534504
I0428 21:58:37.055697 25678 solver.cpp:218] Iteration 4548 (2.42608 iter/s, 4.94624s/12 iters), loss = 0.479062
I0428 21:58:37.055743 25678 solver.cpp:237] Train net output #0: loss = 0.479062 (* 1 = 0.479062 loss)
I0428 21:58:37.055752 25678 sgd_solver.cpp:105] Iteration 4548, lr = 0.00533772
I0428 21:58:38.308517 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:58:41.980082 25678 solver.cpp:218] Iteration 4560 (2.43688 iter/s, 4.92434s/12 iters), loss = 0.615106
I0428 21:58:41.980118 25678 solver.cpp:237] Train net output #0: loss = 0.615106 (* 1 = 0.615106 loss)
I0428 21:58:41.980125 25678 sgd_solver.cpp:105] Iteration 4560, lr = 0.0053304
I0428 21:58:46.867861 25678 solver.cpp:218] Iteration 4572 (2.45513 iter/s, 4.88773s/12 iters), loss = 0.555514
I0428 21:58:46.867899 25678 solver.cpp:237] Train net output #0: loss = 0.555514 (* 1 = 0.555514 loss)
I0428 21:58:46.867908 25678 sgd_solver.cpp:105] Iteration 4572, lr = 0.00532308
I0428 21:58:51.810590 25678 solver.cpp:218] Iteration 4584 (2.42783 iter/s, 4.94268s/12 iters), loss = 0.441836
I0428 21:58:51.810631 25678 solver.cpp:237] Train net output #0: loss = 0.441836 (* 1 = 0.441836 loss)
I0428 21:58:51.810638 25678 sgd_solver.cpp:105] Iteration 4584, lr = 0.00531576
I0428 21:58:53.797904 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_4590.caffemodel
I0428 21:58:57.266716 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_4590.solverstate
I0428 21:59:00.134490 25678 solver.cpp:330] Iteration 4590, Testing net (#0)
I0428 21:59:00.134508 25678 net.cpp:676] Ignoring source layer train-data
I0428 21:59:02.952572 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:59:05.031577 25678 solver.cpp:397] Test net output #0: accuracy = 0.389706
I0428 21:59:05.031625 25678 solver.cpp:397] Test net output #1: loss = 2.86584 (* 1 = 2.86584 loss)
I0428 21:59:06.831317 25678 solver.cpp:218] Iteration 4596 (0.798898 iter/s, 15.0207s/12 iters), loss = 0.656748
I0428 21:59:06.831362 25678 solver.cpp:237] Train net output #0: loss = 0.656748 (* 1 = 0.656748 loss)
I0428 21:59:06.831369 25678 sgd_solver.cpp:105] Iteration 4596, lr = 0.00530843
I0428 21:59:11.740458 25678 solver.cpp:218] Iteration 4608 (2.44444 iter/s, 4.90909s/12 iters), loss = 0.698939
I0428 21:59:11.740494 25678 solver.cpp:237] Train net output #0: loss = 0.698939 (* 1 = 0.698939 loss)
I0428 21:59:11.740501 25678 sgd_solver.cpp:105] Iteration 4608, lr = 0.00530111
I0428 21:59:16.704663 25678 solver.cpp:218] Iteration 4620 (2.41733 iter/s, 4.96416s/12 iters), loss = 0.381076
I0428 21:59:16.704699 25678 solver.cpp:237] Train net output #0: loss = 0.381076 (* 1 = 0.381076 loss)
I0428 21:59:16.704707 25678 sgd_solver.cpp:105] Iteration 4620, lr = 0.00529378
I0428 21:59:21.656700 25678 solver.cpp:218] Iteration 4632 (2.42327 iter/s, 4.952s/12 iters), loss = 0.545307
I0428 21:59:21.656735 25678 solver.cpp:237] Train net output #0: loss = 0.545307 (* 1 = 0.545307 loss)
I0428 21:59:21.656742 25678 sgd_solver.cpp:105] Iteration 4632, lr = 0.00528645
I0428 21:59:26.623879 25678 solver.cpp:218] Iteration 4644 (2.41588 iter/s, 4.96713s/12 iters), loss = 0.545767
I0428 21:59:26.623991 25678 solver.cpp:237] Train net output #0: loss = 0.545767 (* 1 = 0.545767 loss)
I0428 21:59:26.623999 25678 sgd_solver.cpp:105] Iteration 4644, lr = 0.00527912
I0428 21:59:30.000939 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:59:31.610889 25678 solver.cpp:218] Iteration 4656 (2.40631 iter/s, 4.9869s/12 iters), loss = 0.534958
I0428 21:59:31.610926 25678 solver.cpp:237] Train net output #0: loss = 0.534958 (* 1 = 0.534958 loss)
I0428 21:59:31.610934 25678 sgd_solver.cpp:105] Iteration 4656, lr = 0.00527179
I0428 21:59:36.535184 25678 solver.cpp:218] Iteration 4668 (2.43692 iter/s, 4.92425s/12 iters), loss = 0.47123
I0428 21:59:36.535221 25678 solver.cpp:237] Train net output #0: loss = 0.47123 (* 1 = 0.47123 loss)
I0428 21:59:36.535229 25678 sgd_solver.cpp:105] Iteration 4668, lr = 0.00526446
I0428 21:59:41.514446 25678 solver.cpp:218] Iteration 4680 (2.41002 iter/s, 4.97921s/12 iters), loss = 0.68228
I0428 21:59:41.514483 25678 solver.cpp:237] Train net output #0: loss = 0.68228 (* 1 = 0.68228 loss)
I0428 21:59:41.514490 25678 sgd_solver.cpp:105] Iteration 4680, lr = 0.00525713
I0428 21:59:45.985121 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_4692.caffemodel
I0428 21:59:49.068034 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_4692.solverstate
I0428 21:59:51.422698 25678 solver.cpp:330] Iteration 4692, Testing net (#0)
I0428 21:59:51.422719 25678 net.cpp:676] Ignoring source layer train-data
I0428 21:59:54.098816 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 21:59:55.989902 25678 solver.cpp:397] Test net output #0: accuracy = 0.386029
I0428 21:59:55.989943 25678 solver.cpp:397] Test net output #1: loss = 2.91752 (* 1 = 2.91752 loss)
I0428 21:59:56.087810 25678 solver.cpp:218] Iteration 4692 (0.823422 iter/s, 14.5733s/12 iters), loss = 0.465216
I0428 21:59:56.087850 25678 solver.cpp:237] Train net output #0: loss = 0.465216 (* 1 = 0.465216 loss)
I0428 21:59:56.087858 25678 sgd_solver.cpp:105] Iteration 4692, lr = 0.00524979
I0428 22:00:00.194895 25678 solver.cpp:218] Iteration 4704 (2.92182 iter/s, 4.10703s/12 iters), loss = 0.421842
I0428 22:00:00.195041 25678 solver.cpp:237] Train net output #0: loss = 0.421842 (* 1 = 0.421842 loss)
I0428 22:00:00.195051 25678 sgd_solver.cpp:105] Iteration 4704, lr = 0.00524246
I0428 22:00:05.060210 25678 solver.cpp:218] Iteration 4716 (2.46652 iter/s, 4.86516s/12 iters), loss = 0.681388
I0428 22:00:05.060250 25678 solver.cpp:237] Train net output #0: loss = 0.681388 (* 1 = 0.681388 loss)
I0428 22:00:05.060256 25678 sgd_solver.cpp:105] Iteration 4716, lr = 0.00523512
I0428 22:00:09.993031 25678 solver.cpp:218] Iteration 4728 (2.43271 iter/s, 4.93277s/12 iters), loss = 0.756981
I0428 22:00:09.993072 25678 solver.cpp:237] Train net output #0: loss = 0.756981 (* 1 = 0.756981 loss)
I0428 22:00:09.993079 25678 sgd_solver.cpp:105] Iteration 4728, lr = 0.00522778
I0428 22:00:14.937988 25678 solver.cpp:218] Iteration 4740 (2.42674 iter/s, 4.94491s/12 iters), loss = 0.488391
I0428 22:00:14.938025 25678 solver.cpp:237] Train net output #0: loss = 0.488391 (* 1 = 0.488391 loss)
I0428 22:00:14.938032 25678 sgd_solver.cpp:105] Iteration 4740, lr = 0.00522045
I0428 22:00:19.882584 25678 solver.cpp:218] Iteration 4752 (2.42692 iter/s, 4.94454s/12 iters), loss = 0.576898
I0428 22:00:19.882634 25678 solver.cpp:237] Train net output #0: loss = 0.576898 (* 1 = 0.576898 loss)
I0428 22:00:19.882642 25678 sgd_solver.cpp:105] Iteration 4752, lr = 0.00521311
I0428 22:00:20.404546 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:00:24.773231 25678 solver.cpp:218] Iteration 4764 (2.45369 iter/s, 4.89059s/12 iters), loss = 0.718185
I0428 22:00:24.773268 25678 solver.cpp:237] Train net output #0: loss = 0.718185 (* 1 = 0.718185 loss)
I0428 22:00:24.773275 25678 sgd_solver.cpp:105] Iteration 4764, lr = 0.00520577
I0428 22:00:29.701999 25678 solver.cpp:218] Iteration 4776 (2.43471 iter/s, 4.92872s/12 iters), loss = 0.539716
I0428 22:00:29.702037 25678 solver.cpp:237] Train net output #0: loss = 0.539716 (* 1 = 0.539716 loss)
I0428 22:00:29.702045 25678 sgd_solver.cpp:105] Iteration 4776, lr = 0.00519843
I0428 22:00:34.668509 25678 solver.cpp:218] Iteration 4788 (2.41621 iter/s, 4.96646s/12 iters), loss = 0.492461
I0428 22:00:34.668606 25678 solver.cpp:237] Train net output #0: loss = 0.492461 (* 1 = 0.492461 loss)
I0428 22:00:34.668615 25678 sgd_solver.cpp:105] Iteration 4788, lr = 0.00519108
I0428 22:00:36.669332 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_4794.caffemodel
I0428 22:00:39.749512 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_4794.solverstate
I0428 22:00:42.107163 25678 solver.cpp:330] Iteration 4794, Testing net (#0)
I0428 22:00:42.107182 25678 net.cpp:676] Ignoring source layer train-data
I0428 22:00:44.808044 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:00:46.922067 25678 solver.cpp:397] Test net output #0: accuracy = 0.41299
I0428 22:00:46.922113 25678 solver.cpp:397] Test net output #1: loss = 2.94077 (* 1 = 2.94077 loss)
I0428 22:00:48.749536 25678 solver.cpp:218] Iteration 4800 (0.852216 iter/s, 14.0809s/12 iters), loss = 0.366546
I0428 22:00:48.749575 25678 solver.cpp:237] Train net output #0: loss = 0.366546 (* 1 = 0.366546 loss)
I0428 22:00:48.749583 25678 sgd_solver.cpp:105] Iteration 4800, lr = 0.00518374
I0428 22:00:53.753340 25678 solver.cpp:218] Iteration 4812 (2.3982 iter/s, 5.00375s/12 iters), loss = 0.574266
I0428 22:00:53.753378 25678 solver.cpp:237] Train net output #0: loss = 0.574266 (* 1 = 0.574266 loss)
I0428 22:00:53.753386 25678 sgd_solver.cpp:105] Iteration 4812, lr = 0.0051764
I0428 22:00:58.753926 25678 solver.cpp:218] Iteration 4824 (2.39974 iter/s, 5.00054s/12 iters), loss = 0.678792
I0428 22:00:58.753963 25678 solver.cpp:237] Train net output #0: loss = 0.678792 (* 1 = 0.678792 loss)
I0428 22:00:58.753970 25678 sgd_solver.cpp:105] Iteration 4824, lr = 0.00516905
I0428 22:01:03.708783 25678 solver.cpp:218] Iteration 4836 (2.42189 iter/s, 4.95481s/12 iters), loss = 0.515253
I0428 22:01:03.708822 25678 solver.cpp:237] Train net output #0: loss = 0.515253 (* 1 = 0.515253 loss)
I0428 22:01:03.708828 25678 sgd_solver.cpp:105] Iteration 4836, lr = 0.00516171
I0428 22:01:05.318146 25678 blocking_queue.cpp:49] Waiting for data
I0428 22:01:08.664331 25678 solver.cpp:218] Iteration 4848 (2.42155 iter/s, 4.9555s/12 iters), loss = 0.64349
I0428 22:01:08.664371 25678 solver.cpp:237] Train net output #0: loss = 0.64349 (* 1 = 0.64349 loss)
I0428 22:01:08.664377 25678 sgd_solver.cpp:105] Iteration 4848, lr = 0.00515436
I0428 22:01:11.297170 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:01:13.629416 25678 solver.cpp:218] Iteration 4860 (2.4169 iter/s, 4.96504s/12 iters), loss = 0.600638
I0428 22:01:13.629452 25678 solver.cpp:237] Train net output #0: loss = 0.600638 (* 1 = 0.600638 loss)
I0428 22:01:13.629459 25678 sgd_solver.cpp:105] Iteration 4860, lr = 0.00514702
I0428 22:01:18.528939 25678 solver.cpp:218] Iteration 4872 (2.44924 iter/s, 4.89948s/12 iters), loss = 0.5688
I0428 22:01:18.528978 25678 solver.cpp:237] Train net output #0: loss = 0.5688 (* 1 = 0.5688 loss)
I0428 22:01:18.528986 25678 sgd_solver.cpp:105] Iteration 4872, lr = 0.00513967
I0428 22:01:23.501945 25678 solver.cpp:218] Iteration 4884 (2.41305 iter/s, 4.97296s/12 iters), loss = 0.532153
I0428 22:01:23.501982 25678 solver.cpp:237] Train net output #0: loss = 0.532153 (* 1 = 0.532153 loss)
I0428 22:01:23.501989 25678 sgd_solver.cpp:105] Iteration 4884, lr = 0.00513232
I0428 22:01:27.960325 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_4896.caffemodel
I0428 22:01:31.020777 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_4896.solverstate
I0428 22:01:33.916576 25678 solver.cpp:330] Iteration 4896, Testing net (#0)
I0428 22:01:33.916594 25678 net.cpp:676] Ignoring source layer train-data
I0428 22:01:36.486523 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:01:38.469620 25678 solver.cpp:397] Test net output #0: accuracy = 0.39277
I0428 22:01:38.469667 25678 solver.cpp:397] Test net output #1: loss = 2.9123 (* 1 = 2.9123 loss)
I0428 22:01:38.567392 25678 solver.cpp:218] Iteration 4896 (0.796527 iter/s, 15.0654s/12 iters), loss = 0.474775
I0428 22:01:38.567435 25678 solver.cpp:237] Train net output #0: loss = 0.474775 (* 1 = 0.474775 loss)
I0428 22:01:38.567445 25678 sgd_solver.cpp:105] Iteration 4896, lr = 0.00512497
I0428 22:01:42.654870 25678 solver.cpp:218] Iteration 4908 (2.93583 iter/s, 4.08743s/12 iters), loss = 0.482759
I0428 22:01:42.654907 25678 solver.cpp:237] Train net output #0: loss = 0.482759 (* 1 = 0.482759 loss)
I0428 22:01:42.654914 25678 sgd_solver.cpp:105] Iteration 4908, lr = 0.00511763
I0428 22:01:47.595162 25678 solver.cpp:218] Iteration 4920 (2.42903 iter/s, 4.94024s/12 iters), loss = 0.382436
I0428 22:01:47.595198 25678 solver.cpp:237] Train net output #0: loss = 0.382436 (* 1 = 0.382436 loss)
I0428 22:01:47.595206 25678 sgd_solver.cpp:105] Iteration 4920, lr = 0.00511028
I0428 22:01:52.498692 25678 solver.cpp:218] Iteration 4932 (2.44724 iter/s, 4.90348s/12 iters), loss = 0.408585
I0428 22:01:52.498728 25678 solver.cpp:237] Train net output #0: loss = 0.408585 (* 1 = 0.408585 loss)
I0428 22:01:52.498736 25678 sgd_solver.cpp:105] Iteration 4932, lr = 0.00510293
I0428 22:01:57.456699 25678 solver.cpp:218] Iteration 4944 (2.42035 iter/s, 4.95796s/12 iters), loss = 0.504375
I0428 22:01:57.456737 25678 solver.cpp:237] Train net output #0: loss = 0.504375 (* 1 = 0.504375 loss)
I0428 22:01:57.456744 25678 sgd_solver.cpp:105] Iteration 4944, lr = 0.00509558
I0428 22:02:02.200556 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:02:02.401258 25678 solver.cpp:218] Iteration 4956 (2.42694 iter/s, 4.94451s/12 iters), loss = 0.338239
I0428 22:02:02.401304 25678 solver.cpp:237] Train net output #0: loss = 0.338239 (* 1 = 0.338239 loss)
I0428 22:02:02.401314 25678 sgd_solver.cpp:105] Iteration 4956, lr = 0.00508823
I0428 22:02:07.292258 25678 solver.cpp:218] Iteration 4968 (2.45351 iter/s, 4.89095s/12 iters), loss = 0.627206
I0428 22:02:07.292389 25678 solver.cpp:237] Train net output #0: loss = 0.627206 (* 1 = 0.627206 loss)
I0428 22:02:07.292399 25678 sgd_solver.cpp:105] Iteration 4968, lr = 0.00508088
I0428 22:02:12.192502 25678 solver.cpp:218] Iteration 4980 (2.44893 iter/s, 4.9001s/12 iters), loss = 0.398942
I0428 22:02:12.192540 25678 solver.cpp:237] Train net output #0: loss = 0.398942 (* 1 = 0.398942 loss)
I0428 22:02:12.192548 25678 sgd_solver.cpp:105] Iteration 4980, lr = 0.00507352
I0428 22:02:17.124521 25678 solver.cpp:218] Iteration 4992 (2.4331 iter/s, 4.93197s/12 iters), loss = 0.539997
I0428 22:02:17.124557 25678 solver.cpp:237] Train net output #0: loss = 0.539997 (* 1 = 0.539997 loss)
I0428 22:02:17.124564 25678 sgd_solver.cpp:105] Iteration 4992, lr = 0.00506617
I0428 22:02:19.162211 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_4998.caffemodel
I0428 22:02:22.237820 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_4998.solverstate
I0428 22:02:24.595755 25678 solver.cpp:330] Iteration 4998, Testing net (#0)
I0428 22:02:24.595772 25678 net.cpp:676] Ignoring source layer train-data
I0428 22:02:27.224295 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:02:29.455832 25678 solver.cpp:397] Test net output #0: accuracy = 0.409926
I0428 22:02:29.455880 25678 solver.cpp:397] Test net output #1: loss = 2.86135 (* 1 = 2.86135 loss)
I0428 22:02:31.249984 25678 solver.cpp:218] Iteration 5004 (0.849532 iter/s, 14.1254s/12 iters), loss = 0.284921
I0428 22:02:31.250023 25678 solver.cpp:237] Train net output #0: loss = 0.284921 (* 1 = 0.284921 loss)
I0428 22:02:31.250031 25678 sgd_solver.cpp:105] Iteration 5004, lr = 0.00505882
I0428 22:02:36.169636 25678 solver.cpp:218] Iteration 5016 (2.43922 iter/s, 4.9196s/12 iters), loss = 0.433954
I0428 22:02:36.169673 25678 solver.cpp:237] Train net output #0: loss = 0.433954 (* 1 = 0.433954 loss)
I0428 22:02:36.169680 25678 sgd_solver.cpp:105] Iteration 5016, lr = 0.00505147
I0428 22:02:41.062885 25678 solver.cpp:218] Iteration 5028 (2.45238 iter/s, 4.8932s/12 iters), loss = 0.483063
I0428 22:02:41.062968 25678 solver.cpp:237] Train net output #0: loss = 0.483063 (* 1 = 0.483063 loss)
I0428 22:02:41.062976 25678 sgd_solver.cpp:105] Iteration 5028, lr = 0.00504412
I0428 22:02:45.982223 25678 solver.cpp:218] Iteration 5040 (2.4394 iter/s, 4.91925s/12 iters), loss = 0.539963
I0428 22:02:45.982264 25678 solver.cpp:237] Train net output #0: loss = 0.539963 (* 1 = 0.539963 loss)
I0428 22:02:45.982272 25678 sgd_solver.cpp:105] Iteration 5040, lr = 0.00503676
I0428 22:02:50.893318 25678 solver.cpp:218] Iteration 5052 (2.44347 iter/s, 4.91105s/12 iters), loss = 0.707016
I0428 22:02:50.893357 25678 solver.cpp:237] Train net output #0: loss = 0.707016 (* 1 = 0.707016 loss)
I0428 22:02:50.893365 25678 sgd_solver.cpp:105] Iteration 5052, lr = 0.00502941
I0428 22:02:52.797111 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:02:55.868304 25678 solver.cpp:218] Iteration 5064 (2.41209 iter/s, 4.97494s/12 iters), loss = 0.481569
I0428 22:02:55.868338 25678 solver.cpp:237] Train net output #0: loss = 0.481569 (* 1 = 0.481569 loss)
I0428 22:02:55.868345 25678 sgd_solver.cpp:105] Iteration 5064, lr = 0.00502206
I0428 22:03:00.754372 25678 solver.cpp:218] Iteration 5076 (2.45599 iter/s, 4.88602s/12 iters), loss = 0.518925
I0428 22:03:00.754412 25678 solver.cpp:237] Train net output #0: loss = 0.518925 (* 1 = 0.518925 loss)
I0428 22:03:00.754420 25678 sgd_solver.cpp:105] Iteration 5076, lr = 0.00501471
I0428 22:03:05.705006 25678 solver.cpp:218] Iteration 5088 (2.42395 iter/s, 4.95059s/12 iters), loss = 0.37136
I0428 22:03:05.705044 25678 solver.cpp:237] Train net output #0: loss = 0.37136 (* 1 = 0.37136 loss)
I0428 22:03:05.705051 25678 sgd_solver.cpp:105] Iteration 5088, lr = 0.00500735
I0428 22:03:10.177369 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_5100.caffemodel
I0428 22:03:13.245601 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_5100.solverstate
I0428 22:03:15.599082 25678 solver.cpp:330] Iteration 5100, Testing net (#0)
I0428 22:03:15.599102 25678 net.cpp:676] Ignoring source layer train-data
I0428 22:03:18.159291 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:03:20.282320 25678 solver.cpp:397] Test net output #0: accuracy = 0.417892
I0428 22:03:20.282366 25678 solver.cpp:397] Test net output #1: loss = 2.91472 (* 1 = 2.91472 loss)
I0428 22:03:20.379879 25678 solver.cpp:218] Iteration 5100 (0.817727 iter/s, 14.6748s/12 iters), loss = 0.39039
I0428 22:03:20.379926 25678 solver.cpp:237] Train net output #0: loss = 0.39039 (* 1 = 0.39039 loss)
I0428 22:03:20.379935 25678 sgd_solver.cpp:105] Iteration 5100, lr = 0.005
I0428 22:03:24.497071 25678 solver.cpp:218] Iteration 5112 (2.91465 iter/s, 4.11714s/12 iters), loss = 0.610116
I0428 22:03:24.497112 25678 solver.cpp:237] Train net output #0: loss = 0.610116 (* 1 = 0.610116 loss)
I0428 22:03:24.497118 25678 sgd_solver.cpp:105] Iteration 5112, lr = 0.00499265
I0428 22:03:29.446102 25678 solver.cpp:218] Iteration 5124 (2.42474 iter/s, 4.94898s/12 iters), loss = 0.393953
I0428 22:03:29.446139 25678 solver.cpp:237] Train net output #0: loss = 0.393953 (* 1 = 0.393953 loss)
I0428 22:03:29.446147 25678 sgd_solver.cpp:105] Iteration 5124, lr = 0.00498529
I0428 22:03:34.353170 25678 solver.cpp:218] Iteration 5136 (2.44548 iter/s, 4.90702s/12 iters), loss = 0.505094
I0428 22:03:34.353210 25678 solver.cpp:237] Train net output #0: loss = 0.505094 (* 1 = 0.505094 loss)
I0428 22:03:34.353219 25678 sgd_solver.cpp:105] Iteration 5136, lr = 0.00497794
I0428 22:03:39.338415 25678 solver.cpp:218] Iteration 5148 (2.40713 iter/s, 4.98519s/12 iters), loss = 0.56983
I0428 22:03:39.338452 25678 solver.cpp:237] Train net output #0: loss = 0.56983 (* 1 = 0.56983 loss)
I0428 22:03:39.338459 25678 sgd_solver.cpp:105] Iteration 5148, lr = 0.00497059
I0428 22:03:43.339527 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:03:44.297785 25678 solver.cpp:218] Iteration 5160 (2.41969 iter/s, 4.95931s/12 iters), loss = 0.37081
I0428 22:03:44.297825 25678 solver.cpp:237] Train net output #0: loss = 0.37081 (* 1 = 0.37081 loss)
I0428 22:03:44.297833 25678 sgd_solver.cpp:105] Iteration 5160, lr = 0.00496324
I0428 22:03:49.225809 25678 solver.cpp:218] Iteration 5172 (2.43508 iter/s, 4.92798s/12 iters), loss = 0.571413
I0428 22:03:49.225845 25678 solver.cpp:237] Train net output #0: loss = 0.571413 (* 1 = 0.571413 loss)
I0428 22:03:49.225853 25678 sgd_solver.cpp:105] Iteration 5172, lr = 0.00495588
I0428 22:03:54.161708 25678 solver.cpp:218] Iteration 5184 (2.43119 iter/s, 4.93585s/12 iters), loss = 0.340549
I0428 22:03:54.161747 25678 solver.cpp:237] Train net output #0: loss = 0.340549 (* 1 = 0.340549 loss)
I0428 22:03:54.161756 25678 sgd_solver.cpp:105] Iteration 5184, lr = 0.00494853
I0428 22:03:59.043126 25678 solver.cpp:218] Iteration 5196 (2.45833 iter/s, 4.88137s/12 iters), loss = 0.432803
I0428 22:03:59.043162 25678 solver.cpp:237] Train net output #0: loss = 0.432803 (* 1 = 0.432803 loss)
I0428 22:03:59.043170 25678 sgd_solver.cpp:105] Iteration 5196, lr = 0.00494118
I0428 22:04:01.041791 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_5202.caffemodel
I0428 22:04:04.137902 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_5202.solverstate
I0428 22:04:06.497189 25678 solver.cpp:330] Iteration 5202, Testing net (#0)
I0428 22:04:06.497205 25678 net.cpp:676] Ignoring source layer train-data
I0428 22:04:08.974560 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:04:11.079416 25678 solver.cpp:397] Test net output #0: accuracy = 0.42402
I0428 22:04:11.079461 25678 solver.cpp:397] Test net output #1: loss = 2.84089 (* 1 = 2.84089 loss)
I0428 22:04:12.890087 25678 solver.cpp:218] Iteration 5208 (0.866618 iter/s, 13.8469s/12 iters), loss = 0.531777
I0428 22:04:12.890126 25678 solver.cpp:237] Train net output #0: loss = 0.531777 (* 1 = 0.531777 loss)
I0428 22:04:12.890133 25678 sgd_solver.cpp:105] Iteration 5208, lr = 0.00493383
I0428 22:04:17.865969 25678 solver.cpp:218] Iteration 5220 (2.41166 iter/s, 4.97583s/12 iters), loss = 0.295627
I0428 22:04:17.866088 25678 solver.cpp:237] Train net output #0: loss = 0.295627 (* 1 = 0.295627 loss)
I0428 22:04:17.866097 25678 sgd_solver.cpp:105] Iteration 5220, lr = 0.00492648
I0428 22:04:22.783114 25678 solver.cpp:218] Iteration 5232 (2.4405 iter/s, 4.91702s/12 iters), loss = 0.634428
I0428 22:04:22.783154 25678 solver.cpp:237] Train net output #0: loss = 0.634428 (* 1 = 0.634428 loss)
I0428 22:04:22.783160 25678 sgd_solver.cpp:105] Iteration 5232, lr = 0.00491912
I0428 22:04:27.709304 25678 solver.cpp:218] Iteration 5244 (2.43599 iter/s, 4.92614s/12 iters), loss = 0.324166
I0428 22:04:27.709345 25678 solver.cpp:237] Train net output #0: loss = 0.324166 (* 1 = 0.324166 loss)
I0428 22:04:27.709353 25678 sgd_solver.cpp:105] Iteration 5244, lr = 0.00491177
I0428 22:04:32.639351 25678 solver.cpp:218] Iteration 5256 (2.43408 iter/s, 4.93s/12 iters), loss = 0.57906
I0428 22:04:32.639386 25678 solver.cpp:237] Train net output #0: loss = 0.57906 (* 1 = 0.57906 loss)
I0428 22:04:32.639394 25678 sgd_solver.cpp:105] Iteration 5256, lr = 0.00490442
I0428 22:04:33.918012 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:04:37.615571 25678 solver.cpp:218] Iteration 5268 (2.41149 iter/s, 4.97618s/12 iters), loss = 0.554367
I0428 22:04:37.615607 25678 solver.cpp:237] Train net output #0: loss = 0.554367 (* 1 = 0.554367 loss)
I0428 22:04:37.615614 25678 sgd_solver.cpp:105] Iteration 5268, lr = 0.00489707
I0428 22:04:42.540686 25678 solver.cpp:218] Iteration 5280 (2.43652 iter/s, 4.92507s/12 iters), loss = 0.440984
I0428 22:04:42.540725 25678 solver.cpp:237] Train net output #0: loss = 0.440984 (* 1 = 0.440984 loss)
I0428 22:04:42.540733 25678 sgd_solver.cpp:105] Iteration 5280, lr = 0.00488972
I0428 22:04:47.499298 25678 solver.cpp:218] Iteration 5292 (2.42006 iter/s, 4.95856s/12 iters), loss = 0.370915
I0428 22:04:47.499334 25678 solver.cpp:237] Train net output #0: loss = 0.370915 (* 1 = 0.370915 loss)
I0428 22:04:47.499342 25678 sgd_solver.cpp:105] Iteration 5292, lr = 0.00488237
I0428 22:04:51.984392 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_5304.caffemodel
I0428 22:04:55.075551 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_5304.solverstate
I0428 22:04:57.672312 25678 solver.cpp:330] Iteration 5304, Testing net (#0)
I0428 22:04:57.672328 25678 net.cpp:676] Ignoring source layer train-data
I0428 22:05:00.147002 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:05:02.475477 25678 solver.cpp:397] Test net output #0: accuracy = 0.408088
I0428 22:05:02.475523 25678 solver.cpp:397] Test net output #1: loss = 2.93617 (* 1 = 2.93617 loss)
I0428 22:05:02.573223 25678 solver.cpp:218] Iteration 5304 (0.796079 iter/s, 15.0739s/12 iters), loss = 0.321281
I0428 22:05:02.573269 25678 solver.cpp:237] Train net output #0: loss = 0.321281 (* 1 = 0.321281 loss)
I0428 22:05:02.573278 25678 sgd_solver.cpp:105] Iteration 5304, lr = 0.00487503
I0428 22:05:06.657114 25678 solver.cpp:218] Iteration 5316 (2.93842 iter/s, 4.08383s/12 iters), loss = 0.669556
I0428 22:05:06.657153 25678 solver.cpp:237] Train net output #0: loss = 0.669556 (* 1 = 0.669556 loss)
I0428 22:05:06.657161 25678 sgd_solver.cpp:105] Iteration 5316, lr = 0.00486768
I0428 22:05:11.615443 25678 solver.cpp:218] Iteration 5328 (2.4202 iter/s, 4.95828s/12 iters), loss = 0.375518
I0428 22:05:11.615484 25678 solver.cpp:237] Train net output #0: loss = 0.375518 (* 1 = 0.375518 loss)
I0428 22:05:11.615491 25678 sgd_solver.cpp:105] Iteration 5328, lr = 0.00486033
I0428 22:05:16.509220 25678 solver.cpp:218] Iteration 5340 (2.45212 iter/s, 4.89372s/12 iters), loss = 0.665178
I0428 22:05:16.509255 25678 solver.cpp:237] Train net output #0: loss = 0.665178 (* 1 = 0.665178 loss)
I0428 22:05:16.509263 25678 sgd_solver.cpp:105] Iteration 5340, lr = 0.00485298
I0428 22:05:21.449929 25678 solver.cpp:218] Iteration 5352 (2.42883 iter/s, 4.94066s/12 iters), loss = 0.386766
I0428 22:05:21.449966 25678 solver.cpp:237] Train net output #0: loss = 0.386766 (* 1 = 0.386766 loss)
I0428 22:05:21.449975 25678 sgd_solver.cpp:105] Iteration 5352, lr = 0.00484564
I0428 22:05:24.805836 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:05:26.354233 25678 solver.cpp:218] Iteration 5364 (2.44686 iter/s, 4.90425s/12 iters), loss = 0.482448
I0428 22:05:26.354265 25678 solver.cpp:237] Train net output #0: loss = 0.482448 (* 1 = 0.482448 loss)
I0428 22:05:26.354274 25678 sgd_solver.cpp:105] Iteration 5364, lr = 0.00483829
I0428 22:05:31.262143 25678 solver.cpp:218] Iteration 5376 (2.44506 iter/s, 4.90786s/12 iters), loss = 0.331808
I0428 22:05:31.262183 25678 solver.cpp:237] Train net output #0: loss = 0.331808 (* 1 = 0.331808 loss)
I0428 22:05:31.262192 25678 sgd_solver.cpp:105] Iteration 5376, lr = 0.00483095
I0428 22:05:36.148412 25678 solver.cpp:218] Iteration 5388 (2.45589 iter/s, 4.88621s/12 iters), loss = 0.432656
I0428 22:05:36.148450 25678 solver.cpp:237] Train net output #0: loss = 0.432656 (* 1 = 0.432656 loss)
I0428 22:05:36.148458 25678 sgd_solver.cpp:105] Iteration 5388, lr = 0.0048236
I0428 22:05:41.000152 25678 solver.cpp:218] Iteration 5400 (2.47337 iter/s, 4.85169s/12 iters), loss = 0.297752
I0428 22:05:41.000188 25678 solver.cpp:237] Train net output #0: loss = 0.297752 (* 1 = 0.297752 loss)
I0428 22:05:41.000196 25678 sgd_solver.cpp:105] Iteration 5400, lr = 0.00481626
I0428 22:05:42.987596 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_5406.caffemodel
I0428 22:05:46.136970 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_5406.solverstate
I0428 22:05:48.490736 25678 solver.cpp:330] Iteration 5406, Testing net (#0)
I0428 22:05:48.490748 25678 net.cpp:676] Ignoring source layer train-data
I0428 22:05:50.976562 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:05:53.368059 25678 solver.cpp:397] Test net output #0: accuracy = 0.413603
I0428 22:05:53.368108 25678 solver.cpp:397] Test net output #1: loss = 2.95942 (* 1 = 2.95942 loss)
I0428 22:05:55.180202 25678 solver.cpp:218] Iteration 5412 (0.846262 iter/s, 14.18s/12 iters), loss = 0.563565
I0428 22:05:55.180316 25678 solver.cpp:237] Train net output #0: loss = 0.563565 (* 1 = 0.563565 loss)
I0428 22:05:55.180325 25678 sgd_solver.cpp:105] Iteration 5412, lr = 0.00480892
I0428 22:06:00.129026 25678 solver.cpp:218] Iteration 5424 (2.42488 iter/s, 4.9487s/12 iters), loss = 0.384689
I0428 22:06:00.129065 25678 solver.cpp:237] Train net output #0: loss = 0.384689 (* 1 = 0.384689 loss)
I0428 22:06:00.129072 25678 sgd_solver.cpp:105] Iteration 5424, lr = 0.00480157
I0428 22:06:05.023491 25678 solver.cpp:218] Iteration 5436 (2.45178 iter/s, 4.89441s/12 iters), loss = 0.386121
I0428 22:06:05.023526 25678 solver.cpp:237] Train net output #0: loss = 0.386121 (* 1 = 0.386121 loss)
I0428 22:06:05.023533 25678 sgd_solver.cpp:105] Iteration 5436, lr = 0.00479423
I0428 22:06:09.980276 25678 solver.cpp:218] Iteration 5448 (2.42095 iter/s, 4.95673s/12 iters), loss = 0.616018
I0428 22:06:09.980314 25678 solver.cpp:237] Train net output #0: loss = 0.616018 (* 1 = 0.616018 loss)
I0428 22:06:09.980321 25678 sgd_solver.cpp:105] Iteration 5448, lr = 0.00478689
I0428 22:06:14.887689 25678 solver.cpp:218] Iteration 5460 (2.44531 iter/s, 4.90736s/12 iters), loss = 0.395515
I0428 22:06:14.887727 25678 solver.cpp:237] Train net output #0: loss = 0.395515 (* 1 = 0.395515 loss)
I0428 22:06:14.887735 25678 sgd_solver.cpp:105] Iteration 5460, lr = 0.00477955
I0428 22:06:15.437407 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:06:19.839138 25678 solver.cpp:218] Iteration 5472 (2.42356 iter/s, 4.9514s/12 iters), loss = 0.413346
I0428 22:06:19.839171 25678 solver.cpp:237] Train net output #0: loss = 0.413346 (* 1 = 0.413346 loss)
I0428 22:06:19.839179 25678 sgd_solver.cpp:105] Iteration 5472, lr = 0.00477222
I0428 22:06:24.726994 25678 solver.cpp:218] Iteration 5484 (2.45509 iter/s, 4.88781s/12 iters), loss = 0.662474
I0428 22:06:24.727030 25678 solver.cpp:237] Train net output #0: loss = 0.662474 (* 1 = 0.662474 loss)
I0428 22:06:24.727036 25678 sgd_solver.cpp:105] Iteration 5484, lr = 0.00476488
I0428 22:06:29.696738 25678 solver.cpp:218] Iteration 5496 (2.41464 iter/s, 4.96969s/12 iters), loss = 0.468108
I0428 22:06:29.696854 25678 solver.cpp:237] Train net output #0: loss = 0.468108 (* 1 = 0.468108 loss)
I0428 22:06:29.696863 25678 sgd_solver.cpp:105] Iteration 5496, lr = 0.00475754
I0428 22:06:34.182725 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_5508.caffemodel
I0428 22:06:37.319797 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_5508.solverstate
I0428 22:06:39.686179 25678 solver.cpp:330] Iteration 5508, Testing net (#0)
I0428 22:06:39.686197 25678 net.cpp:676] Ignoring source layer train-data
I0428 22:06:42.111789 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:06:44.570627 25678 solver.cpp:397] Test net output #0: accuracy = 0.422794
I0428 22:06:44.570674 25678 solver.cpp:397] Test net output #1: loss = 2.94261 (* 1 = 2.94261 loss)
I0428 22:06:44.667799 25678 solver.cpp:218] Iteration 5508 (0.801553 iter/s, 14.9709s/12 iters), loss = 0.328649
I0428 22:06:44.667837 25678 solver.cpp:237] Train net output #0: loss = 0.328649 (* 1 = 0.328649 loss)
I0428 22:06:44.667845 25678 sgd_solver.cpp:105] Iteration 5508, lr = 0.00475021
I0428 22:06:48.771435 25678 solver.cpp:218] Iteration 5520 (2.92428 iter/s, 4.10358s/12 iters), loss = 0.411532
I0428 22:06:48.771476 25678 solver.cpp:237] Train net output #0: loss = 0.411532 (* 1 = 0.411532 loss)
I0428 22:06:48.771484 25678 sgd_solver.cpp:105] Iteration 5520, lr = 0.00474287
I0428 22:06:50.745213 25678 blocking_queue.cpp:49] Waiting for data
I0428 22:06:53.651675 25678 solver.cpp:218] Iteration 5532 (2.45892 iter/s, 4.88019s/12 iters), loss = 0.485105
I0428 22:06:53.651711 25678 solver.cpp:237] Train net output #0: loss = 0.485105 (* 1 = 0.485105 loss)
I0428 22:06:53.651719 25678 sgd_solver.cpp:105] Iteration 5532, lr = 0.00473554
I0428 22:06:58.544757 25678 solver.cpp:218] Iteration 5544 (2.45247 iter/s, 4.89302s/12 iters), loss = 0.343812
I0428 22:06:58.544800 25678 solver.cpp:237] Train net output #0: loss = 0.343812 (* 1 = 0.343812 loss)
I0428 22:06:58.544808 25678 sgd_solver.cpp:105] Iteration 5544, lr = 0.00472821
I0428 22:07:03.517174 25678 solver.cpp:218] Iteration 5556 (2.41334 iter/s, 4.97236s/12 iters), loss = 0.389313
I0428 22:07:03.517251 25678 solver.cpp:237] Train net output #0: loss = 0.389313 (* 1 = 0.389313 loss)
I0428 22:07:03.517258 25678 sgd_solver.cpp:105] Iteration 5556, lr = 0.00472088
I0428 22:07:06.172696 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:07:08.493086 25678 solver.cpp:218] Iteration 5568 (2.41166 iter/s, 4.97583s/12 iters), loss = 0.366594
I0428 22:07:08.493120 25678 solver.cpp:237] Train net output #0: loss = 0.366594 (* 1 = 0.366594 loss)
I0428 22:07:08.493129 25678 sgd_solver.cpp:105] Iteration 5568, lr = 0.00471355
I0428 22:07:13.380509 25678 solver.cpp:218] Iteration 5580 (2.45531 iter/s, 4.88738s/12 iters), loss = 0.403147
I0428 22:07:13.380545 25678 solver.cpp:237] Train net output #0: loss = 0.403147 (* 1 = 0.403147 loss)
I0428 22:07:13.380553 25678 sgd_solver.cpp:105] Iteration 5580, lr = 0.00470622
I0428 22:07:18.335330 25678 solver.cpp:218] Iteration 5592 (2.42191 iter/s, 4.95477s/12 iters), loss = 0.343261
I0428 22:07:18.335366 25678 solver.cpp:237] Train net output #0: loss = 0.343261 (* 1 = 0.343261 loss)
I0428 22:07:18.335373 25678 sgd_solver.cpp:105] Iteration 5592, lr = 0.00469889
I0428 22:07:23.245626 25678 solver.cpp:218] Iteration 5604 (2.44387 iter/s, 4.91025s/12 iters), loss = 0.274508
I0428 22:07:23.245664 25678 solver.cpp:237] Train net output #0: loss = 0.274508 (* 1 = 0.274508 loss)
I0428 22:07:23.245673 25678 sgd_solver.cpp:105] Iteration 5604, lr = 0.00469157
I0428 22:07:25.239087 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_5610.caffemodel
I0428 22:07:28.322655 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_5610.solverstate
I0428 22:07:30.686338 25678 solver.cpp:330] Iteration 5610, Testing net (#0)
I0428 22:07:30.686357 25678 net.cpp:676] Ignoring source layer train-data
I0428 22:07:33.028430 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:07:35.491165 25678 solver.cpp:397] Test net output #0: accuracy = 0.433211
I0428 22:07:35.491356 25678 solver.cpp:397] Test net output #1: loss = 2.96536 (* 1 = 2.96536 loss)
I0428 22:07:37.297616 25678 solver.cpp:218] Iteration 5616 (0.853975 iter/s, 14.0519s/12 iters), loss = 0.324191
I0428 22:07:37.297657 25678 solver.cpp:237] Train net output #0: loss = 0.324191 (* 1 = 0.324191 loss)
I0428 22:07:37.297664 25678 sgd_solver.cpp:105] Iteration 5616, lr = 0.00468424
I0428 22:07:42.268122 25678 solver.cpp:218] Iteration 5628 (2.41427 iter/s, 4.97044s/12 iters), loss = 0.309141
I0428 22:07:42.268162 25678 solver.cpp:237] Train net output #0: loss = 0.309141 (* 1 = 0.309141 loss)
I0428 22:07:42.268170 25678 sgd_solver.cpp:105] Iteration 5628, lr = 0.00467692
I0428 22:07:47.239117 25678 solver.cpp:218] Iteration 5640 (2.41403 iter/s, 4.97094s/12 iters), loss = 0.436203
I0428 22:07:47.239159 25678 solver.cpp:237] Train net output #0: loss = 0.436203 (* 1 = 0.436203 loss)
I0428 22:07:47.239167 25678 sgd_solver.cpp:105] Iteration 5640, lr = 0.0046696
I0428 22:07:52.235862 25678 solver.cpp:218] Iteration 5652 (2.40159 iter/s, 4.99669s/12 iters), loss = 0.41526
I0428 22:07:52.235896 25678 solver.cpp:237] Train net output #0: loss = 0.41526 (* 1 = 0.41526 loss)
I0428 22:07:52.235904 25678 sgd_solver.cpp:105] Iteration 5652, lr = 0.00466228
I0428 22:07:57.074016 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:07:57.247692 25678 solver.cpp:218] Iteration 5664 (2.39436 iter/s, 5.01178s/12 iters), loss = 0.620229
I0428 22:07:57.247732 25678 solver.cpp:237] Train net output #0: loss = 0.620229 (* 1 = 0.620229 loss)
I0428 22:07:57.247740 25678 sgd_solver.cpp:105] Iteration 5664, lr = 0.00465496
I0428 22:08:02.214999 25678 solver.cpp:218] Iteration 5676 (2.41582 iter/s, 4.96725s/12 iters), loss = 0.44222
I0428 22:08:02.215035 25678 solver.cpp:237] Train net output #0: loss = 0.44222 (* 1 = 0.44222 loss)
I0428 22:08:02.215044 25678 sgd_solver.cpp:105] Iteration 5676, lr = 0.00464764
I0428 22:08:07.199224 25678 solver.cpp:218] Iteration 5688 (2.40762 iter/s, 4.98417s/12 iters), loss = 0.432149
I0428 22:08:07.199314 25678 solver.cpp:237] Train net output #0: loss = 0.432149 (* 1 = 0.432149 loss)
I0428 22:08:07.199323 25678 sgd_solver.cpp:105] Iteration 5688, lr = 0.00464033
I0428 22:08:12.196542 25678 solver.cpp:218] Iteration 5700 (2.40134 iter/s, 4.99722s/12 iters), loss = 0.335056
I0428 22:08:12.196579 25678 solver.cpp:237] Train net output #0: loss = 0.335056 (* 1 = 0.335056 loss)
I0428 22:08:12.196586 25678 sgd_solver.cpp:105] Iteration 5700, lr = 0.00463301
I0428 22:08:16.757716 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_5712.caffemodel
I0428 22:08:20.339059 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_5712.solverstate
I0428 22:08:22.697393 25678 solver.cpp:330] Iteration 5712, Testing net (#0)
I0428 22:08:22.697412 25678 net.cpp:676] Ignoring source layer train-data
I0428 22:08:24.997340 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:08:27.495092 25678 solver.cpp:397] Test net output #0: accuracy = 0.428922
I0428 22:08:27.495144 25678 solver.cpp:397] Test net output #1: loss = 2.94314 (* 1 = 2.94314 loss)
I0428 22:08:27.592047 25678 solver.cpp:218] Iteration 5712 (0.779451 iter/s, 15.3955s/12 iters), loss = 0.486494
I0428 22:08:27.592093 25678 solver.cpp:237] Train net output #0: loss = 0.486494 (* 1 = 0.486494 loss)
I0428 22:08:27.592101 25678 sgd_solver.cpp:105] Iteration 5712, lr = 0.0046257
I0428 22:08:31.799628 25678 solver.cpp:218] Iteration 5724 (2.85204 iter/s, 4.20752s/12 iters), loss = 0.370563
I0428 22:08:31.799664 25678 solver.cpp:237] Train net output #0: loss = 0.370563 (* 1 = 0.370563 loss)
I0428 22:08:31.799671 25678 sgd_solver.cpp:105] Iteration 5724, lr = 0.00461839
I0428 22:08:36.787129 25678 solver.cpp:218] Iteration 5736 (2.40604 iter/s, 4.98745s/12 iters), loss = 0.302272
I0428 22:08:36.787168 25678 solver.cpp:237] Train net output #0: loss = 0.302272 (* 1 = 0.302272 loss)
I0428 22:08:36.787174 25678 sgd_solver.cpp:105] Iteration 5736, lr = 0.00461108
I0428 22:08:41.783761 25678 solver.cpp:218] Iteration 5748 (2.40164 iter/s, 4.99658s/12 iters), loss = 0.377086
I0428 22:08:41.783871 25678 solver.cpp:237] Train net output #0: loss = 0.377086 (* 1 = 0.377086 loss)
I0428 22:08:41.783880 25678 sgd_solver.cpp:105] Iteration 5748, lr = 0.00460377
I0428 22:08:46.803429 25678 solver.cpp:218] Iteration 5760 (2.39065 iter/s, 5.01955s/12 iters), loss = 0.386592
I0428 22:08:46.803467 25678 solver.cpp:237] Train net output #0: loss = 0.386592 (* 1 = 0.386592 loss)
I0428 22:08:46.803476 25678 sgd_solver.cpp:105] Iteration 5760, lr = 0.00459647
I0428 22:08:48.780313 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:08:51.814019 25678 solver.cpp:218] Iteration 5772 (2.39495 iter/s, 5.01053s/12 iters), loss = 0.5494
I0428 22:08:51.814061 25678 solver.cpp:237] Train net output #0: loss = 0.5494 (* 1 = 0.5494 loss)
I0428 22:08:51.814069 25678 sgd_solver.cpp:105] Iteration 5772, lr = 0.00458916
I0428 22:08:56.792043 25678 solver.cpp:218] Iteration 5784 (2.41062 iter/s, 4.97797s/12 iters), loss = 0.232545
I0428 22:08:56.792080 25678 solver.cpp:237] Train net output #0: loss = 0.232545 (* 1 = 0.232545 loss)
I0428 22:08:56.792088 25678 sgd_solver.cpp:105] Iteration 5784, lr = 0.00458186
I0428 22:09:01.790031 25678 solver.cpp:218] Iteration 5796 (2.40099 iter/s, 4.99794s/12 iters), loss = 0.303619
I0428 22:09:01.790071 25678 solver.cpp:237] Train net output #0: loss = 0.303619 (* 1 = 0.303619 loss)
I0428 22:09:01.790078 25678 sgd_solver.cpp:105] Iteration 5796, lr = 0.00457456
I0428 22:09:06.809473 25678 solver.cpp:218] Iteration 5808 (2.39073 iter/s, 5.01939s/12 iters), loss = 0.303505
I0428 22:09:06.809514 25678 solver.cpp:237] Train net output #0: loss = 0.303505 (* 1 = 0.303505 loss)
I0428 22:09:06.809521 25678 sgd_solver.cpp:105] Iteration 5808, lr = 0.00456726
I0428 22:09:08.823776 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_5814.caffemodel
I0428 22:09:13.305542 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_5814.solverstate
I0428 22:09:15.674623 25678 solver.cpp:330] Iteration 5814, Testing net (#0)
I0428 22:09:15.674641 25678 net.cpp:676] Ignoring source layer train-data
I0428 22:09:17.937268 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:09:20.479981 25678 solver.cpp:397] Test net output #0: accuracy = 0.441176
I0428 22:09:20.480029 25678 solver.cpp:397] Test net output #1: loss = 2.84019 (* 1 = 2.84019 loss)
I0428 22:09:22.284828 25678 solver.cpp:218] Iteration 5820 (0.775429 iter/s, 15.4753s/12 iters), loss = 0.228905
I0428 22:09:22.284870 25678 solver.cpp:237] Train net output #0: loss = 0.228905 (* 1 = 0.228905 loss)
I0428 22:09:22.284878 25678 sgd_solver.cpp:105] Iteration 5820, lr = 0.00455996
I0428 22:09:27.210721 25678 solver.cpp:218] Iteration 5832 (2.43613 iter/s, 4.92584s/12 iters), loss = 0.374261
I0428 22:09:27.210759 25678 solver.cpp:237] Train net output #0: loss = 0.374261 (* 1 = 0.374261 loss)
I0428 22:09:27.210767 25678 sgd_solver.cpp:105] Iteration 5832, lr = 0.00455267
I0428 22:09:32.160156 25678 solver.cpp:218] Iteration 5844 (2.42454 iter/s, 4.94938s/12 iters), loss = 0.508916
I0428 22:09:32.160193 25678 solver.cpp:237] Train net output #0: loss = 0.508916 (* 1 = 0.508916 loss)
I0428 22:09:32.160202 25678 sgd_solver.cpp:105] Iteration 5844, lr = 0.00454538
I0428 22:09:37.137598 25678 solver.cpp:218] Iteration 5856 (2.4109 iter/s, 4.97739s/12 iters), loss = 0.316098
I0428 22:09:37.137634 25678 solver.cpp:237] Train net output #0: loss = 0.316098 (* 1 = 0.316098 loss)
I0428 22:09:37.137641 25678 sgd_solver.cpp:105] Iteration 5856, lr = 0.00453809
I0428 22:09:41.289842 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:09:42.096096 25678 solver.cpp:218] Iteration 5868 (2.42011 iter/s, 4.95845s/12 iters), loss = 0.245991
I0428 22:09:42.096133 25678 solver.cpp:237] Train net output #0: loss = 0.245991 (* 1 = 0.245991 loss)
I0428 22:09:42.096141 25678 sgd_solver.cpp:105] Iteration 5868, lr = 0.0045308
I0428 22:09:47.063426 25678 solver.cpp:218] Iteration 5880 (2.41581 iter/s, 4.96728s/12 iters), loss = 0.32865
I0428 22:09:47.063606 25678 solver.cpp:237] Train net output #0: loss = 0.32865 (* 1 = 0.32865 loss)
I0428 22:09:47.063618 25678 sgd_solver.cpp:105] Iteration 5880, lr = 0.00452351
I0428 22:09:52.057518 25678 solver.cpp:218] Iteration 5892 (2.40293 iter/s, 4.9939s/12 iters), loss = 0.239476
I0428 22:09:52.057559 25678 solver.cpp:237] Train net output #0: loss = 0.239476 (* 1 = 0.239476 loss)
I0428 22:09:52.057567 25678 sgd_solver.cpp:105] Iteration 5892, lr = 0.00451622
I0428 22:09:56.961135 25678 solver.cpp:218] Iteration 5904 (2.4472 iter/s, 4.90356s/12 iters), loss = 0.536335
I0428 22:09:56.961171 25678 solver.cpp:237] Train net output #0: loss = 0.536335 (* 1 = 0.536335 loss)
I0428 22:09:56.961179 25678 sgd_solver.cpp:105] Iteration 5904, lr = 0.00450894
I0428 22:10:01.394506 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_5916.caffemodel
I0428 22:10:04.454036 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_5916.solverstate
I0428 22:10:06.810686 25678 solver.cpp:330] Iteration 5916, Testing net (#0)
I0428 22:10:06.810714 25678 net.cpp:676] Ignoring source layer train-data
I0428 22:10:08.998056 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:10:11.380705 25678 solver.cpp:397] Test net output #0: accuracy = 0.435662
I0428 22:10:11.380739 25678 solver.cpp:397] Test net output #1: loss = 2.94021 (* 1 = 2.94021 loss)
I0428 22:10:11.477473 25678 solver.cpp:218] Iteration 5916 (0.826658 iter/s, 14.5163s/12 iters), loss = 0.326553
I0428 22:10:11.477543 25678 solver.cpp:237] Train net output #0: loss = 0.326553 (* 1 = 0.326553 loss)
I0428 22:10:11.477560 25678 sgd_solver.cpp:105] Iteration 5916, lr = 0.00450166
I0428 22:10:15.613636 25678 solver.cpp:218] Iteration 5928 (2.90129 iter/s, 4.13609s/12 iters), loss = 0.175282
I0428 22:10:15.613672 25678 solver.cpp:237] Train net output #0: loss = 0.175282 (* 1 = 0.175282 loss)
I0428 22:10:15.613679 25678 sgd_solver.cpp:105] Iteration 5928, lr = 0.00449438
I0428 22:10:20.521390 25678 solver.cpp:218] Iteration 5940 (2.44513 iter/s, 4.90771s/12 iters), loss = 0.275416
I0428 22:10:20.521450 25678 solver.cpp:237] Train net output #0: loss = 0.275416 (* 1 = 0.275416 loss)
I0428 22:10:20.521457 25678 sgd_solver.cpp:105] Iteration 5940, lr = 0.0044871
I0428 22:10:25.480741 25678 solver.cpp:218] Iteration 5952 (2.41971 iter/s, 4.95928s/12 iters), loss = 0.291158
I0428 22:10:25.480779 25678 solver.cpp:237] Train net output #0: loss = 0.291158 (* 1 = 0.291158 loss)
I0428 22:10:25.480787 25678 sgd_solver.cpp:105] Iteration 5952, lr = 0.00447983
I0428 22:10:30.406826 25678 solver.cpp:218] Iteration 5964 (2.43604 iter/s, 4.92604s/12 iters), loss = 0.302681
I0428 22:10:30.406858 25678 solver.cpp:237] Train net output #0: loss = 0.302681 (* 1 = 0.302681 loss)
I0428 22:10:30.406867 25678 sgd_solver.cpp:105] Iteration 5964, lr = 0.00447256
I0428 22:10:31.720382 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:10:35.362406 25678 solver.cpp:218] Iteration 5976 (2.42153 iter/s, 4.95554s/12 iters), loss = 0.367752
I0428 22:10:35.362442 25678 solver.cpp:237] Train net output #0: loss = 0.367752 (* 1 = 0.367752 loss)
I0428 22:10:35.362448 25678 sgd_solver.cpp:105] Iteration 5976, lr = 0.00446529
I0428 22:10:40.257866 25678 solver.cpp:218] Iteration 5988 (2.45128 iter/s, 4.89541s/12 iters), loss = 0.136044
I0428 22:10:40.257900 25678 solver.cpp:237] Train net output #0: loss = 0.136044 (* 1 = 0.136044 loss)
I0428 22:10:40.257908 25678 sgd_solver.cpp:105] Iteration 5988, lr = 0.00445802
I0428 22:10:45.222206 25678 solver.cpp:218] Iteration 6000 (2.41726 iter/s, 4.9643s/12 iters), loss = 0.264366
I0428 22:10:45.222244 25678 solver.cpp:237] Train net output #0: loss = 0.264366 (* 1 = 0.264366 loss)
I0428 22:10:45.222250 25678 sgd_solver.cpp:105] Iteration 6000, lr = 0.00445075
I0428 22:10:50.148766 25678 solver.cpp:218] Iteration 6012 (2.4358 iter/s, 4.92651s/12 iters), loss = 0.273784
I0428 22:10:50.148802 25678 solver.cpp:237] Train net output #0: loss = 0.273784 (* 1 = 0.273784 loss)
I0428 22:10:50.148809 25678 sgd_solver.cpp:105] Iteration 6012, lr = 0.00444349
I0428 22:10:52.163929 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_6018.caffemodel
I0428 22:10:55.238267 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_6018.solverstate
I0428 22:10:57.593353 25678 solver.cpp:330] Iteration 6018, Testing net (#0)
I0428 22:10:57.593374 25678 net.cpp:676] Ignoring source layer train-data
I0428 22:10:59.901054 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:11:02.604328 25678 solver.cpp:397] Test net output #0: accuracy = 0.441789
I0428 22:11:02.604370 25678 solver.cpp:397] Test net output #1: loss = 2.8424 (* 1 = 2.8424 loss)
I0428 22:11:04.400775 25678 solver.cpp:218] Iteration 6024 (0.841989 iter/s, 14.252s/12 iters), loss = 0.44778
I0428 22:11:04.400813 25678 solver.cpp:237] Train net output #0: loss = 0.44778 (* 1 = 0.44778 loss)
I0428 22:11:04.400820 25678 sgd_solver.cpp:105] Iteration 6024, lr = 0.00443623
I0428 22:11:09.350334 25678 solver.cpp:218] Iteration 6036 (2.42448 iter/s, 4.94951s/12 iters), loss = 0.215603
I0428 22:11:09.350374 25678 solver.cpp:237] Train net output #0: loss = 0.215603 (* 1 = 0.215603 loss)
I0428 22:11:09.350381 25678 sgd_solver.cpp:105] Iteration 6036, lr = 0.00442897
I0428 22:11:14.248483 25678 solver.cpp:218] Iteration 6048 (2.44993 iter/s, 4.89809s/12 iters), loss = 0.321975
I0428 22:11:14.248524 25678 solver.cpp:237] Train net output #0: loss = 0.321975 (* 1 = 0.321975 loss)
I0428 22:11:14.248531 25678 sgd_solver.cpp:105] Iteration 6048, lr = 0.00442172
I0428 22:11:19.178514 25678 solver.cpp:218] Iteration 6060 (2.43409 iter/s, 4.92998s/12 iters), loss = 0.429551
I0428 22:11:19.178552 25678 solver.cpp:237] Train net output #0: loss = 0.429551 (* 1 = 0.429551 loss)
I0428 22:11:19.178560 25678 sgd_solver.cpp:105] Iteration 6060, lr = 0.00441446
I0428 22:11:22.548863 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:11:24.057449 25678 solver.cpp:218] Iteration 6072 (2.45958 iter/s, 4.87888s/12 iters), loss = 0.271909
I0428 22:11:24.057487 25678 solver.cpp:237] Train net output #0: loss = 0.271909 (* 1 = 0.271909 loss)
I0428 22:11:24.057495 25678 sgd_solver.cpp:105] Iteration 6072, lr = 0.00440721
I0428 22:11:28.887781 25678 solver.cpp:218] Iteration 6084 (2.48433 iter/s, 4.83028s/12 iters), loss = 0.375299
I0428 22:11:28.887817 25678 solver.cpp:237] Train net output #0: loss = 0.375299 (* 1 = 0.375299 loss)
I0428 22:11:28.887825 25678 sgd_solver.cpp:105] Iteration 6084, lr = 0.00439996
I0428 22:11:33.794132 25678 solver.cpp:218] Iteration 6096 (2.44583 iter/s, 4.9063s/12 iters), loss = 0.255188
I0428 22:11:33.794170 25678 solver.cpp:237] Train net output #0: loss = 0.255188 (* 1 = 0.255188 loss)
I0428 22:11:33.794178 25678 sgd_solver.cpp:105] Iteration 6096, lr = 0.00439272
I0428 22:11:38.743086 25678 solver.cpp:218] Iteration 6108 (2.42478 iter/s, 4.9489s/12 iters), loss = 0.245115
I0428 22:11:38.743122 25678 solver.cpp:237] Train net output #0: loss = 0.245115 (* 1 = 0.245115 loss)
I0428 22:11:38.743130 25678 sgd_solver.cpp:105] Iteration 6108, lr = 0.00438548
I0428 22:11:43.206661 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_6120.caffemodel
I0428 22:11:46.965382 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_6120.solverstate
I0428 22:11:51.085598 25678 solver.cpp:330] Iteration 6120, Testing net (#0)
I0428 22:11:51.085623 25678 net.cpp:676] Ignoring source layer train-data
I0428 22:11:53.224880 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:11:55.888777 25678 solver.cpp:397] Test net output #0: accuracy = 0.430147
I0428 22:11:55.888823 25678 solver.cpp:397] Test net output #1: loss = 2.9591 (* 1 = 2.9591 loss)
I0428 22:11:55.985548 25678 solver.cpp:218] Iteration 6120 (0.695958 iter/s, 17.2424s/12 iters), loss = 0.283783
I0428 22:11:55.985594 25678 solver.cpp:237] Train net output #0: loss = 0.283783 (* 1 = 0.283783 loss)
I0428 22:11:55.985601 25678 sgd_solver.cpp:105] Iteration 6120, lr = 0.00437823
I0428 22:12:00.148701 25678 solver.cpp:218] Iteration 6132 (2.88247 iter/s, 4.1631s/12 iters), loss = 0.220706
I0428 22:12:00.148737 25678 solver.cpp:237] Train net output #0: loss = 0.220706 (* 1 = 0.220706 loss)
I0428 22:12:00.148744 25678 sgd_solver.cpp:105] Iteration 6132, lr = 0.004371
I0428 22:12:05.082681 25678 solver.cpp:218] Iteration 6144 (2.43214 iter/s, 4.93393s/12 iters), loss = 0.332598
I0428 22:12:05.082722 25678 solver.cpp:237] Train net output #0: loss = 0.332598 (* 1 = 0.332598 loss)
I0428 22:12:05.082731 25678 sgd_solver.cpp:105] Iteration 6144, lr = 0.00436376
I0428 22:12:09.994899 25678 solver.cpp:218] Iteration 6156 (2.44291 iter/s, 4.91217s/12 iters), loss = 0.277061
I0428 22:12:09.994935 25678 solver.cpp:237] Train net output #0: loss = 0.277061 (* 1 = 0.277061 loss)
I0428 22:12:09.994942 25678 sgd_solver.cpp:105] Iteration 6156, lr = 0.00435653
I0428 22:12:14.963086 25678 solver.cpp:218] Iteration 6168 (2.41539 iter/s, 4.96814s/12 iters), loss = 0.258008
I0428 22:12:14.963121 25678 solver.cpp:237] Train net output #0: loss = 0.258008 (* 1 = 0.258008 loss)
I0428 22:12:14.963129 25678 sgd_solver.cpp:105] Iteration 6168, lr = 0.0043493
I0428 22:12:15.546782 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:12:19.948235 25678 solver.cpp:218] Iteration 6180 (2.40717 iter/s, 4.9851s/12 iters), loss = 0.344595
I0428 22:12:19.948271 25678 solver.cpp:237] Train net output #0: loss = 0.344595 (* 1 = 0.344595 loss)
I0428 22:12:19.948279 25678 sgd_solver.cpp:105] Iteration 6180, lr = 0.00434207
I0428 22:12:24.836289 25678 solver.cpp:218] Iteration 6192 (2.45499 iter/s, 4.88801s/12 iters), loss = 0.243105
I0428 22:12:24.836405 25678 solver.cpp:237] Train net output #0: loss = 0.243105 (* 1 = 0.243105 loss)
I0428 22:12:24.836414 25678 sgd_solver.cpp:105] Iteration 6192, lr = 0.00433485
I0428 22:12:29.831848 25678 solver.cpp:218] Iteration 6204 (2.40219 iter/s, 4.99543s/12 iters), loss = 0.36171
I0428 22:12:29.831883 25678 solver.cpp:237] Train net output #0: loss = 0.36171 (* 1 = 0.36171 loss)
I0428 22:12:29.831890 25678 sgd_solver.cpp:105] Iteration 6204, lr = 0.00432763
I0428 22:12:34.810554 25678 solver.cpp:218] Iteration 6216 (2.41029 iter/s, 4.97866s/12 iters), loss = 0.234761
I0428 22:12:34.810590 25678 solver.cpp:237] Train net output #0: loss = 0.234761 (* 1 = 0.234761 loss)
I0428 22:12:34.810604 25678 sgd_solver.cpp:105] Iteration 6216, lr = 0.00432041
I0428 22:12:36.797646 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_6222.caffemodel
I0428 22:12:39.856395 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_6222.solverstate
I0428 22:12:42.216190 25678 solver.cpp:330] Iteration 6222, Testing net (#0)
I0428 22:12:42.216210 25678 net.cpp:676] Ignoring source layer train-data
I0428 22:12:44.272223 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:12:45.489363 25678 blocking_queue.cpp:49] Waiting for data
I0428 22:12:46.782706 25678 solver.cpp:397] Test net output #0: accuracy = 0.45098
I0428 22:12:46.782739 25678 solver.cpp:397] Test net output #1: loss = 2.92695 (* 1 = 2.92695 loss)
I0428 22:12:48.575881 25678 solver.cpp:218] Iteration 6228 (0.871758 iter/s, 13.7653s/12 iters), loss = 0.37063
I0428 22:12:48.575919 25678 solver.cpp:237] Train net output #0: loss = 0.37063 (* 1 = 0.37063 loss)
I0428 22:12:48.575927 25678 sgd_solver.cpp:105] Iteration 6228, lr = 0.00431319
I0428 22:12:53.523303 25678 solver.cpp:218] Iteration 6240 (2.42553 iter/s, 4.94737s/12 iters), loss = 0.18242
I0428 22:12:53.523340 25678 solver.cpp:237] Train net output #0: loss = 0.18242 (* 1 = 0.18242 loss)
I0428 22:12:53.523348 25678 sgd_solver.cpp:105] Iteration 6240, lr = 0.00430598
I0428 22:12:58.375375 25678 solver.cpp:218] Iteration 6252 (2.4732 iter/s, 4.85202s/12 iters), loss = 0.221591
I0428 22:12:58.375496 25678 solver.cpp:237] Train net output #0: loss = 0.221591 (* 1 = 0.221591 loss)
I0428 22:12:58.375505 25678 sgd_solver.cpp:105] Iteration 6252, lr = 0.00429877
I0428 22:13:03.258813 25678 solver.cpp:218] Iteration 6264 (2.45735 iter/s, 4.8833s/12 iters), loss = 0.308068
I0428 22:13:03.258852 25678 solver.cpp:237] Train net output #0: loss = 0.308068 (* 1 = 0.308068 loss)
I0428 22:13:03.258859 25678 sgd_solver.cpp:105] Iteration 6264, lr = 0.00429156
I0428 22:13:05.936650 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:13:08.223178 25678 solver.cpp:218] Iteration 6276 (2.41725 iter/s, 4.96432s/12 iters), loss = 0.245856
I0428 22:13:08.223212 25678 solver.cpp:237] Train net output #0: loss = 0.245856 (* 1 = 0.245856 loss)
I0428 22:13:08.223219 25678 sgd_solver.cpp:105] Iteration 6276, lr = 0.00428436
I0428 22:13:13.104373 25678 solver.cpp:218] Iteration 6288 (2.45844 iter/s, 4.88115s/12 iters), loss = 0.53498
I0428 22:13:13.104408 25678 solver.cpp:237] Train net output #0: loss = 0.53498 (* 1 = 0.53498 loss)
I0428 22:13:13.104416 25678 sgd_solver.cpp:105] Iteration 6288, lr = 0.00427716
I0428 22:13:18.072444 25678 solver.cpp:218] Iteration 6300 (2.41545 iter/s, 4.96802s/12 iters), loss = 0.258528
I0428 22:13:18.072484 25678 solver.cpp:237] Train net output #0: loss = 0.258528 (* 1 = 0.258528 loss)
I0428 22:13:18.072492 25678 sgd_solver.cpp:105] Iteration 6300, lr = 0.00426996
I0428 22:13:22.955196 25678 solver.cpp:218] Iteration 6312 (2.45766 iter/s, 4.8827s/12 iters), loss = 0.309362
I0428 22:13:22.955233 25678 solver.cpp:237] Train net output #0: loss = 0.309362 (* 1 = 0.309362 loss)
I0428 22:13:22.955240 25678 sgd_solver.cpp:105] Iteration 6312, lr = 0.00426277
I0428 22:13:27.395365 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_6324.caffemodel
I0428 22:13:30.470432 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_6324.solverstate
I0428 22:13:32.821902 25678 solver.cpp:330] Iteration 6324, Testing net (#0)
I0428 22:13:32.821926 25678 net.cpp:676] Ignoring source layer train-data
I0428 22:13:34.834444 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:13:37.391726 25678 solver.cpp:397] Test net output #0: accuracy = 0.456495
I0428 22:13:37.391769 25678 solver.cpp:397] Test net output #1: loss = 2.84237 (* 1 = 2.84237 loss)
I0428 22:13:37.487135 25678 solver.cpp:218] Iteration 6324 (0.82577 iter/s, 14.5319s/12 iters), loss = 0.328125
I0428 22:13:37.487164 25678 solver.cpp:237] Train net output #0: loss = 0.328125 (* 1 = 0.328125 loss)
I0428 22:13:37.487170 25678 sgd_solver.cpp:105] Iteration 6324, lr = 0.00425557
I0428 22:13:41.619907 25678 solver.cpp:218] Iteration 6336 (2.90365 iter/s, 4.13273s/12 iters), loss = 0.235427
I0428 22:13:41.619951 25678 solver.cpp:237] Train net output #0: loss = 0.235427 (* 1 = 0.235427 loss)
I0428 22:13:41.619959 25678 sgd_solver.cpp:105] Iteration 6336, lr = 0.00424839
I0428 22:13:46.585140 25678 solver.cpp:218] Iteration 6348 (2.41683 iter/s, 4.96518s/12 iters), loss = 0.151957
I0428 22:13:46.585176 25678 solver.cpp:237] Train net output #0: loss = 0.151957 (* 1 = 0.151957 loss)
I0428 22:13:46.585183 25678 sgd_solver.cpp:105] Iteration 6348, lr = 0.0042412
I0428 22:13:51.544353 25678 solver.cpp:218] Iteration 6360 (2.41976 iter/s, 4.95916s/12 iters), loss = 0.28252
I0428 22:13:51.544394 25678 solver.cpp:237] Train net output #0: loss = 0.28252 (* 1 = 0.28252 loss)
I0428 22:13:51.544400 25678 sgd_solver.cpp:105] Iteration 6360, lr = 0.00423402
I0428 22:13:56.363463 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:13:56.504640 25678 solver.cpp:218] Iteration 6372 (2.41924 iter/s, 4.96024s/12 iters), loss = 0.313016
I0428 22:13:56.504678 25678 solver.cpp:237] Train net output #0: loss = 0.313016 (* 1 = 0.313016 loss)
I0428 22:13:56.504684 25678 sgd_solver.cpp:105] Iteration 6372, lr = 0.00422684
I0428 22:14:01.437840 25678 solver.cpp:218] Iteration 6384 (2.43252 iter/s, 4.93315s/12 iters), loss = 0.148011
I0428 22:14:01.437952 25678 solver.cpp:237] Train net output #0: loss = 0.148011 (* 1 = 0.148011 loss)
I0428 22:14:01.437963 25678 sgd_solver.cpp:105] Iteration 6384, lr = 0.00421966
I0428 22:14:06.375620 25678 solver.cpp:218] Iteration 6396 (2.4303 iter/s, 4.93766s/12 iters), loss = 0.197472
I0428 22:14:06.375654 25678 solver.cpp:237] Train net output #0: loss = 0.197472 (* 1 = 0.197472 loss)
I0428 22:14:06.375661 25678 sgd_solver.cpp:105] Iteration 6396, lr = 0.00421249
I0428 22:14:11.398975 25678 solver.cpp:218] Iteration 6408 (2.38886 iter/s, 5.02331s/12 iters), loss = 0.200585
I0428 22:14:11.399011 25678 solver.cpp:237] Train net output #0: loss = 0.200585 (* 1 = 0.200585 loss)
I0428 22:14:11.399019 25678 sgd_solver.cpp:105] Iteration 6408, lr = 0.00420532
I0428 22:14:16.403134 25678 solver.cpp:218] Iteration 6420 (2.39803 iter/s, 5.00411s/12 iters), loss = 0.32353
I0428 22:14:16.403172 25678 solver.cpp:237] Train net output #0: loss = 0.32353 (* 1 = 0.32353 loss)
I0428 22:14:16.403178 25678 sgd_solver.cpp:105] Iteration 6420, lr = 0.00419816
I0428 22:14:18.432340 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_6426.caffemodel
I0428 22:14:21.498950 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_6426.solverstate
I0428 22:14:23.867862 25678 solver.cpp:330] Iteration 6426, Testing net (#0)
I0428 22:14:23.867882 25678 net.cpp:676] Ignoring source layer train-data
I0428 22:14:25.857303 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:14:28.655706 25678 solver.cpp:397] Test net output #0: accuracy = 0.454657
I0428 22:14:28.655755 25678 solver.cpp:397] Test net output #1: loss = 2.89762 (* 1 = 2.89762 loss)
I0428 22:14:30.466347 25678 solver.cpp:218] Iteration 6432 (0.853293 iter/s, 14.0632s/12 iters), loss = 0.318362
I0428 22:14:30.466387 25678 solver.cpp:237] Train net output #0: loss = 0.318362 (* 1 = 0.318362 loss)
I0428 22:14:30.466394 25678 sgd_solver.cpp:105] Iteration 6432, lr = 0.004191
I0428 22:14:35.369710 25678 solver.cpp:218] Iteration 6444 (2.44732 iter/s, 4.90332s/12 iters), loss = 0.138143
I0428 22:14:35.369823 25678 solver.cpp:237] Train net output #0: loss = 0.138143 (* 1 = 0.138143 loss)
I0428 22:14:35.369832 25678 sgd_solver.cpp:105] Iteration 6444, lr = 0.00418384
I0428 22:14:40.282620 25678 solver.cpp:218] Iteration 6456 (2.44261 iter/s, 4.91279s/12 iters), loss = 0.179773
I0428 22:14:40.282660 25678 solver.cpp:237] Train net output #0: loss = 0.179773 (* 1 = 0.179773 loss)
I0428 22:14:40.282667 25678 sgd_solver.cpp:105] Iteration 6456, lr = 0.00417668
I0428 22:14:45.243232 25678 solver.cpp:218] Iteration 6468 (2.41908 iter/s, 4.96056s/12 iters), loss = 0.280347
I0428 22:14:45.243266 25678 solver.cpp:237] Train net output #0: loss = 0.280347 (* 1 = 0.280347 loss)
I0428 22:14:45.243273 25678 sgd_solver.cpp:105] Iteration 6468, lr = 0.00416953
I0428 22:14:47.211143 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:14:50.216281 25678 solver.cpp:218] Iteration 6480 (2.41303 iter/s, 4.973s/12 iters), loss = 0.13262
I0428 22:14:50.216318 25678 solver.cpp:237] Train net output #0: loss = 0.13262 (* 1 = 0.13262 loss)
I0428 22:14:50.216326 25678 sgd_solver.cpp:105] Iteration 6480, lr = 0.00416238
I0428 22:14:55.124789 25678 solver.cpp:218] Iteration 6492 (2.44476 iter/s, 4.90845s/12 iters), loss = 0.224896
I0428 22:14:55.124825 25678 solver.cpp:237] Train net output #0: loss = 0.224896 (* 1 = 0.224896 loss)
I0428 22:14:55.124835 25678 sgd_solver.cpp:105] Iteration 6492, lr = 0.00415524
I0428 22:15:00.089452 25678 solver.cpp:218] Iteration 6504 (2.41711 iter/s, 4.96461s/12 iters), loss = 0.264289
I0428 22:15:00.089493 25678 solver.cpp:237] Train net output #0: loss = 0.264289 (* 1 = 0.264289 loss)
I0428 22:15:00.089501 25678 sgd_solver.cpp:105] Iteration 6504, lr = 0.0041481
I0428 22:15:05.009274 25678 solver.cpp:218] Iteration 6516 (2.43914 iter/s, 4.91977s/12 iters), loss = 0.240881
I0428 22:15:05.009311 25678 solver.cpp:237] Train net output #0: loss = 0.240881 (* 1 = 0.240881 loss)
I0428 22:15:05.009318 25678 sgd_solver.cpp:105] Iteration 6516, lr = 0.00414096
I0428 22:15:09.396179 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_6528.caffemodel
I0428 22:15:12.465929 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_6528.solverstate
I0428 22:15:14.819437 25678 solver.cpp:330] Iteration 6528, Testing net (#0)
I0428 22:15:14.819454 25678 net.cpp:676] Ignoring source layer train-data
I0428 22:15:16.691658 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:15:19.314503 25678 solver.cpp:397] Test net output #0: accuracy = 0.44424
I0428 22:15:19.314551 25678 solver.cpp:397] Test net output #1: loss = 2.88961 (* 1 = 2.88961 loss)
I0428 22:15:19.412195 25678 solver.cpp:218] Iteration 6528 (0.833167 iter/s, 14.4029s/12 iters), loss = 0.185151
I0428 22:15:19.412235 25678 solver.cpp:237] Train net output #0: loss = 0.185151 (* 1 = 0.185151 loss)
I0428 22:15:19.412245 25678 sgd_solver.cpp:105] Iteration 6528, lr = 0.00413382
I0428 22:15:23.488497 25678 solver.cpp:218] Iteration 6540 (2.94388 iter/s, 4.07625s/12 iters), loss = 0.254923
I0428 22:15:23.488533 25678 solver.cpp:237] Train net output #0: loss = 0.254923 (* 1 = 0.254923 loss)
I0428 22:15:23.488539 25678 sgd_solver.cpp:105] Iteration 6540, lr = 0.00412669
I0428 22:15:28.421604 25678 solver.cpp:218] Iteration 6552 (2.43257 iter/s, 4.93306s/12 iters), loss = 0.214237
I0428 22:15:28.421639 25678 solver.cpp:237] Train net output #0: loss = 0.214237 (* 1 = 0.214237 loss)
I0428 22:15:28.421648 25678 sgd_solver.cpp:105] Iteration 6552, lr = 0.00411957
I0428 22:15:33.348971 25678 solver.cpp:218] Iteration 6564 (2.4354 iter/s, 4.92732s/12 iters), loss = 0.261868
I0428 22:15:33.349006 25678 solver.cpp:237] Train net output #0: loss = 0.261868 (* 1 = 0.261868 loss)
I0428 22:15:33.349014 25678 sgd_solver.cpp:105] Iteration 6564, lr = 0.00411244
I0428 22:15:37.552707 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:15:38.324013 25678 solver.cpp:218] Iteration 6576 (2.41206 iter/s, 4.975s/12 iters), loss = 0.295968
I0428 22:15:38.324049 25678 solver.cpp:237] Train net output #0: loss = 0.295968 (* 1 = 0.295968 loss)
I0428 22:15:38.324056 25678 sgd_solver.cpp:105] Iteration 6576, lr = 0.00410532
I0428 22:15:43.171628 25678 solver.cpp:218] Iteration 6588 (2.47547 iter/s, 4.84757s/12 iters), loss = 0.243774
I0428 22:15:43.171705 25678 solver.cpp:237] Train net output #0: loss = 0.243774 (* 1 = 0.243774 loss)
I0428 22:15:43.171713 25678 sgd_solver.cpp:105] Iteration 6588, lr = 0.00409821
I0428 22:15:48.049726 25678 solver.cpp:218] Iteration 6600 (2.46002 iter/s, 4.87801s/12 iters), loss = 0.233344
I0428 22:15:48.049764 25678 solver.cpp:237] Train net output #0: loss = 0.233344 (* 1 = 0.233344 loss)
I0428 22:15:48.049772 25678 sgd_solver.cpp:105] Iteration 6600, lr = 0.0040911
I0428 22:15:52.892920 25678 solver.cpp:218] Iteration 6612 (2.47773 iter/s, 4.84314s/12 iters), loss = 0.314723
I0428 22:15:52.892956 25678 solver.cpp:237] Train net output #0: loss = 0.314723 (* 1 = 0.314723 loss)
I0428 22:15:52.892963 25678 sgd_solver.cpp:105] Iteration 6612, lr = 0.00408399
I0428 22:15:57.828570 25678 solver.cpp:218] Iteration 6624 (2.43131 iter/s, 4.9356s/12 iters), loss = 0.196125
I0428 22:15:57.828606 25678 solver.cpp:237] Train net output #0: loss = 0.196125 (* 1 = 0.196125 loss)
I0428 22:15:57.828613 25678 sgd_solver.cpp:105] Iteration 6624, lr = 0.00407688
I0428 22:15:59.823191 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_6630.caffemodel
I0428 22:16:02.882108 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_6630.solverstate
I0428 22:16:05.235038 25678 solver.cpp:330] Iteration 6630, Testing net (#0)
I0428 22:16:05.235061 25678 net.cpp:676] Ignoring source layer train-data
I0428 22:16:07.158852 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:16:10.057066 25678 solver.cpp:397] Test net output #0: accuracy = 0.456495
I0428 22:16:10.057113 25678 solver.cpp:397] Test net output #1: loss = 2.86492 (* 1 = 2.86492 loss)
I0428 22:16:11.880908 25678 solver.cpp:218] Iteration 6636 (0.853953 iter/s, 14.0523s/12 iters), loss = 0.307979
I0428 22:16:11.880949 25678 solver.cpp:237] Train net output #0: loss = 0.307979 (* 1 = 0.307979 loss)
I0428 22:16:11.880956 25678 sgd_solver.cpp:105] Iteration 6636, lr = 0.00406978
I0428 22:16:16.816627 25678 solver.cpp:218] Iteration 6648 (2.43128 iter/s, 4.93567s/12 iters), loss = 0.280689
I0428 22:16:16.816717 25678 solver.cpp:237] Train net output #0: loss = 0.280689 (* 1 = 0.280689 loss)
I0428 22:16:16.816725 25678 sgd_solver.cpp:105] Iteration 6648, lr = 0.00406269
I0428 22:16:21.802240 25678 solver.cpp:218] Iteration 6660 (2.40697 iter/s, 4.98551s/12 iters), loss = 0.138965
I0428 22:16:21.802278 25678 solver.cpp:237] Train net output #0: loss = 0.138965 (* 1 = 0.138965 loss)
I0428 22:16:21.802285 25678 sgd_solver.cpp:105] Iteration 6660, lr = 0.0040556
I0428 22:16:26.826498 25678 solver.cpp:218] Iteration 6672 (2.38843 iter/s, 5.02421s/12 iters), loss = 0.210462
I0428 22:16:26.826535 25678 solver.cpp:237] Train net output #0: loss = 0.210462 (* 1 = 0.210462 loss)
I0428 22:16:26.826543 25678 sgd_solver.cpp:105] Iteration 6672, lr = 0.00404851
I0428 22:16:28.181687 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:16:31.845341 25678 solver.cpp:218] Iteration 6684 (2.39101 iter/s, 5.0188s/12 iters), loss = 0.219299
I0428 22:16:31.845378 25678 solver.cpp:237] Train net output #0: loss = 0.219299 (* 1 = 0.219299 loss)
I0428 22:16:31.845386 25678 sgd_solver.cpp:105] Iteration 6684, lr = 0.00404142
I0428 22:16:36.762045 25678 solver.cpp:218] Iteration 6696 (2.44068 iter/s, 4.91666s/12 iters), loss = 0.223122
I0428 22:16:36.762080 25678 solver.cpp:237] Train net output #0: loss = 0.223122 (* 1 = 0.223122 loss)
I0428 22:16:36.762089 25678 sgd_solver.cpp:105] Iteration 6696, lr = 0.00403434
I0428 22:16:41.740936 25678 solver.cpp:218] Iteration 6708 (2.4102 iter/s, 4.97884s/12 iters), loss = 0.216666
I0428 22:16:41.740978 25678 solver.cpp:237] Train net output #0: loss = 0.216666 (* 1 = 0.216666 loss)
I0428 22:16:41.740986 25678 sgd_solver.cpp:105] Iteration 6708, lr = 0.00402726
I0428 22:16:46.740197 25678 solver.cpp:218] Iteration 6720 (2.40038 iter/s, 4.99921s/12 iters), loss = 0.150554
I0428 22:16:46.740237 25678 solver.cpp:237] Train net output #0: loss = 0.150554 (* 1 = 0.150554 loss)
I0428 22:16:46.740244 25678 sgd_solver.cpp:105] Iteration 6720, lr = 0.00402019
I0428 22:16:51.227288 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_6732.caffemodel
I0428 22:16:54.287976 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_6732.solverstate
I0428 22:16:56.957257 25678 solver.cpp:330] Iteration 6732, Testing net (#0)
I0428 22:16:56.957276 25678 net.cpp:676] Ignoring source layer train-data
I0428 22:16:58.834869 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:17:01.580785 25678 solver.cpp:397] Test net output #0: accuracy = 0.455882
I0428 22:17:01.580832 25678 solver.cpp:397] Test net output #1: loss = 2.96401 (* 1 = 2.96401 loss)
I0428 22:17:01.678107 25678 solver.cpp:218] Iteration 6732 (0.803328 iter/s, 14.9379s/12 iters), loss = 0.090909
I0428 22:17:01.678153 25678 solver.cpp:237] Train net output #0: loss = 0.090909 (* 1 = 0.090909 loss)
I0428 22:17:01.678161 25678 sgd_solver.cpp:105] Iteration 6732, lr = 0.00401312
I0428 22:17:05.818738 25678 solver.cpp:218] Iteration 6744 (2.89815 iter/s, 4.14057s/12 iters), loss = 0.143885
I0428 22:17:05.818775 25678 solver.cpp:237] Train net output #0: loss = 0.143885 (* 1 = 0.143885 loss)
I0428 22:17:05.818783 25678 sgd_solver.cpp:105] Iteration 6744, lr = 0.00400606
I0428 22:17:10.750655 25678 solver.cpp:218] Iteration 6756 (2.43316 iter/s, 4.93187s/12 iters), loss = 0.164653
I0428 22:17:10.750691 25678 solver.cpp:237] Train net output #0: loss = 0.164653 (* 1 = 0.164653 loss)
I0428 22:17:10.750699 25678 sgd_solver.cpp:105] Iteration 6756, lr = 0.003999
I0428 22:17:15.706023 25678 solver.cpp:218] Iteration 6768 (2.42164 iter/s, 4.95531s/12 iters), loss = 0.138112
I0428 22:17:15.706076 25678 solver.cpp:237] Train net output #0: loss = 0.138112 (* 1 = 0.138112 loss)
I0428 22:17:15.706089 25678 sgd_solver.cpp:105] Iteration 6768, lr = 0.00399194
I0428 22:17:19.159435 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:17:20.657260 25678 solver.cpp:218] Iteration 6780 (2.42367 iter/s, 4.95118s/12 iters), loss = 0.202658
I0428 22:17:20.657297 25678 solver.cpp:237] Train net output #0: loss = 0.202658 (* 1 = 0.202658 loss)
I0428 22:17:20.657305 25678 sgd_solver.cpp:105] Iteration 6780, lr = 0.00398489
I0428 22:17:25.607178 25678 solver.cpp:218] Iteration 6792 (2.42431 iter/s, 4.94987s/12 iters), loss = 0.138391
I0428 22:17:25.607285 25678 solver.cpp:237] Train net output #0: loss = 0.138391 (* 1 = 0.138391 loss)
I0428 22:17:25.607293 25678 sgd_solver.cpp:105] Iteration 6792, lr = 0.00397784
I0428 22:17:30.508978 25678 solver.cpp:218] Iteration 6804 (2.44814 iter/s, 4.90168s/12 iters), loss = 0.10775
I0428 22:17:30.509016 25678 solver.cpp:237] Train net output #0: loss = 0.10775 (* 1 = 0.10775 loss)
I0428 22:17:30.509024 25678 sgd_solver.cpp:105] Iteration 6804, lr = 0.0039708
I0428 22:17:35.459610 25678 solver.cpp:218] Iteration 6816 (2.42396 iter/s, 4.95058s/12 iters), loss = 0.244684
I0428 22:17:35.459645 25678 solver.cpp:237] Train net output #0: loss = 0.244684 (* 1 = 0.244684 loss)
I0428 22:17:35.459652 25678 sgd_solver.cpp:105] Iteration 6816, lr = 0.00396376
I0428 22:17:40.389600 25678 solver.cpp:218] Iteration 6828 (2.43411 iter/s, 4.92994s/12 iters), loss = 0.308779
I0428 22:17:40.389636 25678 solver.cpp:237] Train net output #0: loss = 0.308779 (* 1 = 0.308779 loss)
I0428 22:17:40.389644 25678 sgd_solver.cpp:105] Iteration 6828, lr = 0.00395672
I0428 22:17:42.417280 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_6834.caffemodel
I0428 22:17:45.479593 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_6834.solverstate
I0428 22:17:47.890425 25678 solver.cpp:330] Iteration 6834, Testing net (#0)
I0428 22:17:47.890444 25678 net.cpp:676] Ignoring source layer train-data
I0428 22:17:49.792335 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:17:52.634457 25678 solver.cpp:397] Test net output #0: accuracy = 0.441789
I0428 22:17:52.634500 25678 solver.cpp:397] Test net output #1: loss = 3.05218 (* 1 = 3.05218 loss)
I0428 22:17:54.429845 25678 solver.cpp:218] Iteration 6840 (0.854688 iter/s, 14.0402s/12 iters), loss = 0.319313
I0428 22:17:54.429883 25678 solver.cpp:237] Train net output #0: loss = 0.319313 (* 1 = 0.319313 loss)
I0428 22:17:54.429891 25678 sgd_solver.cpp:105] Iteration 6840, lr = 0.00394969
I0428 22:17:59.347667 25678 solver.cpp:218] Iteration 6852 (2.44013 iter/s, 4.91777s/12 iters), loss = 0.31454
I0428 22:17:59.347824 25678 solver.cpp:237] Train net output #0: loss = 0.31454 (* 1 = 0.31454 loss)
I0428 22:17:59.347834 25678 sgd_solver.cpp:105] Iteration 6852, lr = 0.00394267
I0428 22:18:04.279992 25678 solver.cpp:218] Iteration 6864 (2.43301 iter/s, 4.93216s/12 iters), loss = 0.277892
I0428 22:18:04.280030 25678 solver.cpp:237] Train net output #0: loss = 0.277892 (* 1 = 0.277892 loss)
I0428 22:18:04.280038 25678 sgd_solver.cpp:105] Iteration 6864, lr = 0.00393565
I0428 22:18:09.250584 25678 solver.cpp:218] Iteration 6876 (2.41422 iter/s, 4.97054s/12 iters), loss = 0.182474
I0428 22:18:09.250627 25678 solver.cpp:237] Train net output #0: loss = 0.182474 (* 1 = 0.182474 loss)
I0428 22:18:09.250634 25678 sgd_solver.cpp:105] Iteration 6876, lr = 0.00392863
I0428 22:18:09.861168 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:18:14.221101 25678 solver.cpp:218] Iteration 6888 (2.41426 iter/s, 4.97046s/12 iters), loss = 0.134687
I0428 22:18:14.221140 25678 solver.cpp:237] Train net output #0: loss = 0.134687 (* 1 = 0.134687 loss)
I0428 22:18:14.221148 25678 sgd_solver.cpp:105] Iteration 6888, lr = 0.00392162
I0428 22:18:19.129150 25678 solver.cpp:218] Iteration 6900 (2.44499 iter/s, 4.90799s/12 iters), loss = 0.317768
I0428 22:18:19.129187 25678 solver.cpp:237] Train net output #0: loss = 0.317768 (* 1 = 0.317768 loss)
I0428 22:18:19.129195 25678 sgd_solver.cpp:105] Iteration 6900, lr = 0.00391461
I0428 22:18:24.046834 25678 solver.cpp:218] Iteration 6912 (2.4402 iter/s, 4.91763s/12 iters), loss = 0.124759
I0428 22:18:24.046871 25678 solver.cpp:237] Train net output #0: loss = 0.124759 (* 1 = 0.124759 loss)
I0428 22:18:24.046878 25678 sgd_solver.cpp:105] Iteration 6912, lr = 0.0039076
I0428 22:18:29.018311 25678 solver.cpp:218] Iteration 6924 (2.4138 iter/s, 4.97142s/12 iters), loss = 0.185391
I0428 22:18:29.018362 25678 solver.cpp:237] Train net output #0: loss = 0.185391 (* 1 = 0.185391 loss)
I0428 22:18:29.018373 25678 sgd_solver.cpp:105] Iteration 6924, lr = 0.0039006
I0428 22:18:33.388377 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_6936.caffemodel
I0428 22:18:36.457295 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_6936.solverstate
I0428 22:18:38.815863 25678 solver.cpp:330] Iteration 6936, Testing net (#0)
I0428 22:18:38.815881 25678 net.cpp:676] Ignoring source layer train-data
I0428 22:18:39.323941 25678 blocking_queue.cpp:49] Waiting for data
I0428 22:18:40.596449 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:18:43.639297 25678 solver.cpp:397] Test net output #0: accuracy = 0.438113
I0428 22:18:43.639343 25678 solver.cpp:397] Test net output #1: loss = 3.03992 (* 1 = 3.03992 loss)
I0428 22:18:43.736696 25678 solver.cpp:218] Iteration 6936 (0.81531 iter/s, 14.7183s/12 iters), loss = 0.1585
I0428 22:18:43.736739 25678 solver.cpp:237] Train net output #0: loss = 0.1585 (* 1 = 0.1585 loss)
I0428 22:18:43.736747 25678 sgd_solver.cpp:105] Iteration 6936, lr = 0.00389361
I0428 22:18:47.843418 25678 solver.cpp:218] Iteration 6948 (2.92208 iter/s, 4.10667s/12 iters), loss = 0.166502
I0428 22:18:47.843458 25678 solver.cpp:237] Train net output #0: loss = 0.166502 (* 1 = 0.166502 loss)
I0428 22:18:47.843466 25678 sgd_solver.cpp:105] Iteration 6948, lr = 0.00388662
I0428 22:18:52.768735 25678 solver.cpp:218] Iteration 6960 (2.43641 iter/s, 4.92527s/12 iters), loss = 0.207773
I0428 22:18:52.768775 25678 solver.cpp:237] Train net output #0: loss = 0.207773 (* 1 = 0.207773 loss)
I0428 22:18:52.768783 25678 sgd_solver.cpp:105] Iteration 6960, lr = 0.00387963
I0428 22:18:57.734331 25678 solver.cpp:218] Iteration 6972 (2.41665 iter/s, 4.96554s/12 iters), loss = 0.186489
I0428 22:18:57.734366 25678 solver.cpp:237] Train net output #0: loss = 0.186489 (* 1 = 0.186489 loss)
I0428 22:18:57.734373 25678 sgd_solver.cpp:105] Iteration 6972, lr = 0.00387265
I0428 22:19:00.440760 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:19:02.660449 25678 solver.cpp:218] Iteration 6984 (2.43602 iter/s, 4.92607s/12 iters), loss = 0.254294
I0428 22:19:02.660486 25678 solver.cpp:237] Train net output #0: loss = 0.254294 (* 1 = 0.254294 loss)
I0428 22:19:02.660493 25678 sgd_solver.cpp:105] Iteration 6984, lr = 0.00386567
I0428 22:19:07.604207 25678 solver.cpp:218] Iteration 6996 (2.42733 iter/s, 4.94371s/12 iters), loss = 0.26883
I0428 22:19:07.604326 25678 solver.cpp:237] Train net output #0: loss = 0.268831 (* 1 = 0.268831 loss)
I0428 22:19:07.604336 25678 sgd_solver.cpp:105] Iteration 6996, lr = 0.0038587
I0428 22:19:12.511965 25678 solver.cpp:218] Iteration 7008 (2.44517 iter/s, 4.90763s/12 iters), loss = 0.16202
I0428 22:19:12.511999 25678 solver.cpp:237] Train net output #0: loss = 0.16202 (* 1 = 0.16202 loss)
I0428 22:19:12.512007 25678 sgd_solver.cpp:105] Iteration 7008, lr = 0.00385173
I0428 22:19:17.477605 25678 solver.cpp:218] Iteration 7020 (2.41663 iter/s, 4.96559s/12 iters), loss = 0.183218
I0428 22:19:17.477645 25678 solver.cpp:237] Train net output #0: loss = 0.183218 (* 1 = 0.183218 loss)
I0428 22:19:17.477653 25678 sgd_solver.cpp:105] Iteration 7020, lr = 0.00384477
I0428 22:19:22.396235 25678 solver.cpp:218] Iteration 7032 (2.43973 iter/s, 4.91858s/12 iters), loss = 0.299548
I0428 22:19:22.396275 25678 solver.cpp:237] Train net output #0: loss = 0.299548 (* 1 = 0.299548 loss)
I0428 22:19:22.396283 25678 sgd_solver.cpp:105] Iteration 7032, lr = 0.00383781
I0428 22:19:24.430968 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_7038.caffemodel
I0428 22:19:27.494779 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_7038.solverstate
I0428 22:19:29.846026 25678 solver.cpp:330] Iteration 7038, Testing net (#0)
I0428 22:19:29.846043 25678 net.cpp:676] Ignoring source layer train-data
I0428 22:19:31.591377 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:19:34.662793 25678 solver.cpp:397] Test net output #0: accuracy = 0.45098
I0428 22:19:34.662840 25678 solver.cpp:397] Test net output #1: loss = 2.86739 (* 1 = 2.86739 loss)
I0428 22:19:36.576864 25678 solver.cpp:218] Iteration 7044 (0.846227 iter/s, 14.1806s/12 iters), loss = 0.268325
I0428 22:19:36.576900 25678 solver.cpp:237] Train net output #0: loss = 0.268325 (* 1 = 0.268325 loss)
I0428 22:19:36.576907 25678 sgd_solver.cpp:105] Iteration 7044, lr = 0.00383086
I0428 22:19:41.480638 25678 solver.cpp:218] Iteration 7056 (2.44712 iter/s, 4.90373s/12 iters), loss = 0.186086
I0428 22:19:41.480756 25678 solver.cpp:237] Train net output #0: loss = 0.186086 (* 1 = 0.186086 loss)
I0428 22:19:41.480764 25678 sgd_solver.cpp:105] Iteration 7056, lr = 0.00382391
I0428 22:19:46.440519 25678 solver.cpp:218] Iteration 7068 (2.41948 iter/s, 4.95975s/12 iters), loss = 0.0950835
I0428 22:19:46.440553 25678 solver.cpp:237] Train net output #0: loss = 0.0950835 (* 1 = 0.0950835 loss)
I0428 22:19:46.440562 25678 sgd_solver.cpp:105] Iteration 7068, lr = 0.00381697
I0428 22:19:51.256425 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:19:51.370059 25678 solver.cpp:218] Iteration 7080 (2.43433 iter/s, 4.92949s/12 iters), loss = 0.419166
I0428 22:19:51.370095 25678 solver.cpp:237] Train net output #0: loss = 0.419166 (* 1 = 0.419166 loss)
I0428 22:19:51.370102 25678 sgd_solver.cpp:105] Iteration 7080, lr = 0.00381003
I0428 22:19:56.308043 25678 solver.cpp:218] Iteration 7092 (2.43017 iter/s, 4.93794s/12 iters), loss = 0.114093
I0428 22:19:56.308080 25678 solver.cpp:237] Train net output #0: loss = 0.114093 (* 1 = 0.114093 loss)
I0428 22:19:56.308089 25678 sgd_solver.cpp:105] Iteration 7092, lr = 0.00380309
I0428 22:20:01.267971 25678 solver.cpp:218] Iteration 7104 (2.41941 iter/s, 4.95988s/12 iters), loss = 0.214306
I0428 22:20:01.268004 25678 solver.cpp:237] Train net output #0: loss = 0.214307 (* 1 = 0.214307 loss)
I0428 22:20:01.268013 25678 sgd_solver.cpp:105] Iteration 7104, lr = 0.00379616
I0428 22:20:06.198825 25678 solver.cpp:218] Iteration 7116 (2.43368 iter/s, 4.93081s/12 iters), loss = 0.201169
I0428 22:20:06.198866 25678 solver.cpp:237] Train net output #0: loss = 0.201169 (* 1 = 0.201169 loss)
I0428 22:20:06.198874 25678 sgd_solver.cpp:105] Iteration 7116, lr = 0.00378924
I0428 22:20:11.140504 25678 solver.cpp:218] Iteration 7128 (2.42835 iter/s, 4.94162s/12 iters), loss = 0.195682
I0428 22:20:11.140545 25678 solver.cpp:237] Train net output #0: loss = 0.195682 (* 1 = 0.195682 loss)
I0428 22:20:11.140553 25678 sgd_solver.cpp:105] Iteration 7128, lr = 0.00378232
I0428 22:20:15.566015 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_7140.caffemodel
I0428 22:20:18.632576 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_7140.solverstate
I0428 22:20:21.213518 25678 solver.cpp:330] Iteration 7140, Testing net (#0)
I0428 22:20:21.213536 25678 net.cpp:676] Ignoring source layer train-data
I0428 22:20:22.903007 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:20:26.038205 25678 solver.cpp:397] Test net output #0: accuracy = 0.469976
I0428 22:20:26.038251 25678 solver.cpp:397] Test net output #1: loss = 2.89931 (* 1 = 2.89931 loss)
I0428 22:20:26.133736 25678 solver.cpp:218] Iteration 7140 (0.800364 iter/s, 14.9932s/12 iters), loss = 0.0933569
I0428 22:20:26.133776 25678 solver.cpp:237] Train net output #0: loss = 0.0933569 (* 1 = 0.0933569 loss)
I0428 22:20:26.133785 25678 sgd_solver.cpp:105] Iteration 7140, lr = 0.00377541
I0428 22:20:30.264701 25678 solver.cpp:218] Iteration 7152 (2.90493 iter/s, 4.13091s/12 iters), loss = 0.150504
I0428 22:20:30.264744 25678 solver.cpp:237] Train net output #0: loss = 0.150504 (* 1 = 0.150504 loss)
I0428 22:20:30.264752 25678 sgd_solver.cpp:105] Iteration 7152, lr = 0.0037685
I0428 22:20:35.160995 25678 solver.cpp:218] Iteration 7164 (2.45086 iter/s, 4.89624s/12 iters), loss = 0.225072
I0428 22:20:35.161037 25678 solver.cpp:237] Train net output #0: loss = 0.225072 (* 1 = 0.225072 loss)
I0428 22:20:35.161046 25678 sgd_solver.cpp:105] Iteration 7164, lr = 0.00376159
I0428 22:20:40.129412 25678 solver.cpp:218] Iteration 7176 (2.41528 iter/s, 4.96837s/12 iters), loss = 0.10968
I0428 22:20:40.129448 25678 solver.cpp:237] Train net output #0: loss = 0.10968 (* 1 = 0.10968 loss)
I0428 22:20:40.129456 25678 sgd_solver.cpp:105] Iteration 7176, lr = 0.00375469
I0428 22:20:42.219564 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:20:45.072391 25678 solver.cpp:218] Iteration 7188 (2.42771 iter/s, 4.94293s/12 iters), loss = 0.11011
I0428 22:20:45.072424 25678 solver.cpp:237] Train net output #0: loss = 0.11011 (* 1 = 0.11011 loss)
I0428 22:20:45.072432 25678 sgd_solver.cpp:105] Iteration 7188, lr = 0.0037478
I0428 22:20:50.003865 25678 solver.cpp:218] Iteration 7200 (2.43337 iter/s, 4.93143s/12 iters), loss = 0.178425
I0428 22:20:50.003933 25678 solver.cpp:237] Train net output #0: loss = 0.178425 (* 1 = 0.178425 loss)
I0428 22:20:50.003942 25678 sgd_solver.cpp:105] Iteration 7200, lr = 0.00374091
I0428 22:20:54.993778 25678 solver.cpp:218] Iteration 7212 (2.40489 iter/s, 4.98984s/12 iters), loss = 0.0903224
I0428 22:20:54.993813 25678 solver.cpp:237] Train net output #0: loss = 0.0903224 (* 1 = 0.0903224 loss)
I0428 22:20:54.993820 25678 sgd_solver.cpp:105] Iteration 7212, lr = 0.00373403
I0428 22:20:59.856818 25678 solver.cpp:218] Iteration 7224 (2.46762 iter/s, 4.86299s/12 iters), loss = 0.108853
I0428 22:20:59.856860 25678 solver.cpp:237] Train net output #0: loss = 0.108853 (* 1 = 0.108853 loss)
I0428 22:20:59.856868 25678 sgd_solver.cpp:105] Iteration 7224, lr = 0.00372715
I0428 22:21:04.752876 25678 solver.cpp:218] Iteration 7236 (2.45098 iter/s, 4.896s/12 iters), loss = 0.182307
I0428 22:21:04.752912 25678 solver.cpp:237] Train net output #0: loss = 0.182307 (* 1 = 0.182307 loss)
I0428 22:21:04.752919 25678 sgd_solver.cpp:105] Iteration 7236, lr = 0.00372027
I0428 22:21:06.747339 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_7242.caffemodel
I0428 22:21:09.816020 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_7242.solverstate
I0428 22:21:12.187969 25678 solver.cpp:330] Iteration 7242, Testing net (#0)
I0428 22:21:12.187988 25678 net.cpp:676] Ignoring source layer train-data
I0428 22:21:13.869815 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:21:17.040055 25678 solver.cpp:397] Test net output #0: accuracy = 0.453431
I0428 22:21:17.040103 25678 solver.cpp:397] Test net output #1: loss = 3.02628 (* 1 = 3.02628 loss)
I0428 22:21:18.835377 25678 solver.cpp:218] Iteration 7248 (0.852124 iter/s, 14.0825s/12 iters), loss = 0.164585
I0428 22:21:18.835419 25678 solver.cpp:237] Train net output #0: loss = 0.164585 (* 1 = 0.164585 loss)
I0428 22:21:18.835427 25678 sgd_solver.cpp:105] Iteration 7248, lr = 0.0037134
I0428 22:21:23.801877 25678 solver.cpp:218] Iteration 7260 (2.41622 iter/s, 4.96644s/12 iters), loss = 0.0980094
I0428 22:21:23.801982 25678 solver.cpp:237] Train net output #0: loss = 0.0980094 (* 1 = 0.0980094 loss)
I0428 22:21:23.801991 25678 sgd_solver.cpp:105] Iteration 7260, lr = 0.00370654
I0428 22:21:28.752830 25678 solver.cpp:218] Iteration 7272 (2.42383 iter/s, 4.95084s/12 iters), loss = 0.0653802
I0428 22:21:28.752869 25678 solver.cpp:237] Train net output #0: loss = 0.0653802 (* 1 = 0.0653802 loss)
I0428 22:21:28.752877 25678 sgd_solver.cpp:105] Iteration 7272, lr = 0.00369968
I0428 22:21:32.985172 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:21:33.724303 25678 solver.cpp:218] Iteration 7284 (2.4138 iter/s, 4.97142s/12 iters), loss = 0.24343
I0428 22:21:33.724337 25678 solver.cpp:237] Train net output #0: loss = 0.24343 (* 1 = 0.24343 loss)
I0428 22:21:33.724345 25678 sgd_solver.cpp:105] Iteration 7284, lr = 0.00369283
I0428 22:21:38.679849 25678 solver.cpp:218] Iteration 7296 (2.42155 iter/s, 4.9555s/12 iters), loss = 0.0812997
I0428 22:21:38.679885 25678 solver.cpp:237] Train net output #0: loss = 0.0812997 (* 1 = 0.0812997 loss)
I0428 22:21:38.679893 25678 sgd_solver.cpp:105] Iteration 7296, lr = 0.00368598
I0428 22:21:43.524765 25678 solver.cpp:218] Iteration 7308 (2.47685 iter/s, 4.84486s/12 iters), loss = 0.0890299
I0428 22:21:43.524803 25678 solver.cpp:237] Train net output #0: loss = 0.0890299 (* 1 = 0.0890299 loss)
I0428 22:21:43.524812 25678 sgd_solver.cpp:105] Iteration 7308, lr = 0.00367914
I0428 22:21:48.440402 25678 solver.cpp:218] Iteration 7320 (2.44121 iter/s, 4.91559s/12 iters), loss = 0.131968
I0428 22:21:48.440440 25678 solver.cpp:237] Train net output #0: loss = 0.131968 (* 1 = 0.131968 loss)
I0428 22:21:48.440448 25678 sgd_solver.cpp:105] Iteration 7320, lr = 0.0036723
I0428 22:21:53.429270 25678 solver.cpp:218] Iteration 7332 (2.40538 iter/s, 4.98883s/12 iters), loss = 0.174329
I0428 22:21:53.429299 25678 solver.cpp:237] Train net output #0: loss = 0.174329 (* 1 = 0.174329 loss)
I0428 22:21:53.429306 25678 sgd_solver.cpp:105] Iteration 7332, lr = 0.00366547
I0428 22:21:57.925370 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_7344.caffemodel
I0428 22:22:00.996825 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_7344.solverstate
I0428 22:22:03.381690 25678 solver.cpp:330] Iteration 7344, Testing net (#0)
I0428 22:22:03.381716 25678 net.cpp:676] Ignoring source layer train-data
I0428 22:22:05.090957 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:22:08.298198 25678 solver.cpp:397] Test net output #0: accuracy = 0.452206
I0428 22:22:08.298245 25678 solver.cpp:397] Test net output #1: loss = 3.0462 (* 1 = 3.0462 loss)
I0428 22:22:08.394928 25678 solver.cpp:218] Iteration 7344 (0.801838 iter/s, 14.9656s/12 iters), loss = 0.0460839
I0428 22:22:08.394969 25678 solver.cpp:237] Train net output #0: loss = 0.0460839 (* 1 = 0.0460839 loss)
I0428 22:22:08.394977 25678 sgd_solver.cpp:105] Iteration 7344, lr = 0.00365864
I0428 22:22:12.532882 25678 solver.cpp:218] Iteration 7356 (2.90002 iter/s, 4.1379s/12 iters), loss = 0.180103
I0428 22:22:12.532922 25678 solver.cpp:237] Train net output #0: loss = 0.180103 (* 1 = 0.180103 loss)
I0428 22:22:12.532930 25678 sgd_solver.cpp:105] Iteration 7356, lr = 0.00365182
I0428 22:22:17.539405 25678 solver.cpp:218] Iteration 7368 (2.3969 iter/s, 5.00647s/12 iters), loss = 0.162925
I0428 22:22:17.539443 25678 solver.cpp:237] Train net output #0: loss = 0.162925 (* 1 = 0.162925 loss)
I0428 22:22:17.539450 25678 sgd_solver.cpp:105] Iteration 7368, lr = 0.00364501
I0428 22:22:22.565893 25678 solver.cpp:218] Iteration 7380 (2.38738 iter/s, 5.02644s/12 iters), loss = 0.0647968
I0428 22:22:22.565932 25678 solver.cpp:237] Train net output #0: loss = 0.0647968 (* 1 = 0.0647968 loss)
I0428 22:22:22.565940 25678 sgd_solver.cpp:105] Iteration 7380, lr = 0.0036382
I0428 22:22:23.944463 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:22:27.560397 25678 solver.cpp:218] Iteration 7392 (2.40267 iter/s, 4.99445s/12 iters), loss = 0.11206
I0428 22:22:27.560437 25678 solver.cpp:237] Train net output #0: loss = 0.11206 (* 1 = 0.11206 loss)
I0428 22:22:27.560446 25678 sgd_solver.cpp:105] Iteration 7392, lr = 0.00363139
I0428 22:22:32.557514 25678 solver.cpp:218] Iteration 7404 (2.40141 iter/s, 4.99707s/12 iters), loss = 0.0737182
I0428 22:22:32.557664 25678 solver.cpp:237] Train net output #0: loss = 0.0737182 (* 1 = 0.0737182 loss)
I0428 22:22:32.557673 25678 sgd_solver.cpp:105] Iteration 7404, lr = 0.00362459
I0428 22:22:37.585088 25678 solver.cpp:218] Iteration 7416 (2.38691 iter/s, 5.02742s/12 iters), loss = 0.143244
I0428 22:22:37.585124 25678 solver.cpp:237] Train net output #0: loss = 0.143244 (* 1 = 0.143244 loss)
I0428 22:22:37.585130 25678 sgd_solver.cpp:105] Iteration 7416, lr = 0.0036178
I0428 22:22:42.499817 25678 solver.cpp:218] Iteration 7428 (2.44167 iter/s, 4.91468s/12 iters), loss = 0.194078
I0428 22:22:42.499857 25678 solver.cpp:237] Train net output #0: loss = 0.194078 (* 1 = 0.194078 loss)
I0428 22:22:42.499864 25678 sgd_solver.cpp:105] Iteration 7428, lr = 0.00361101
I0428 22:22:47.457535 25678 solver.cpp:218] Iteration 7440 (2.42049 iter/s, 4.95767s/12 iters), loss = 0.217392
I0428 22:22:47.457569 25678 solver.cpp:237] Train net output #0: loss = 0.217393 (* 1 = 0.217393 loss)
I0428 22:22:47.457576 25678 sgd_solver.cpp:105] Iteration 7440, lr = 0.00360423
I0428 22:22:49.449195 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_7446.caffemodel
I0428 22:22:53.285390 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_7446.solverstate
I0428 22:22:55.641312 25678 solver.cpp:330] Iteration 7446, Testing net (#0)
I0428 22:22:55.641331 25678 net.cpp:676] Ignoring source layer train-data
I0428 22:22:57.234694 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:23:00.526592 25678 solver.cpp:397] Test net output #0: accuracy = 0.45098
I0428 22:23:00.526628 25678 solver.cpp:397] Test net output #1: loss = 2.97243 (* 1 = 2.97243 loss)
I0428 22:23:02.353099 25678 solver.cpp:218] Iteration 7452 (0.805611 iter/s, 14.8955s/12 iters), loss = 0.107953
I0428 22:23:02.353137 25678 solver.cpp:237] Train net output #0: loss = 0.107953 (* 1 = 0.107953 loss)
I0428 22:23:02.353144 25678 sgd_solver.cpp:105] Iteration 7452, lr = 0.00359745
I0428 22:23:07.256846 25678 solver.cpp:218] Iteration 7464 (2.44713 iter/s, 4.9037s/12 iters), loss = 0.0650911
I0428 22:23:07.256912 25678 solver.cpp:237] Train net output #0: loss = 0.0650911 (* 1 = 0.0650911 loss)
I0428 22:23:07.256922 25678 sgd_solver.cpp:105] Iteration 7464, lr = 0.00359068
I0428 22:23:12.214171 25678 solver.cpp:218] Iteration 7476 (2.4207 iter/s, 4.95725s/12 iters), loss = 0.0526145
I0428 22:23:12.214208 25678 solver.cpp:237] Train net output #0: loss = 0.0526146 (* 1 = 0.0526146 loss)
I0428 22:23:12.214215 25678 sgd_solver.cpp:105] Iteration 7476, lr = 0.00358391
I0428 22:23:15.689878 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:23:17.158272 25678 solver.cpp:218] Iteration 7488 (2.42716 iter/s, 4.94405s/12 iters), loss = 0.186712
I0428 22:23:17.158310 25678 solver.cpp:237] Train net output #0: loss = 0.186712 (* 1 = 0.186712 loss)
I0428 22:23:17.158318 25678 sgd_solver.cpp:105] Iteration 7488, lr = 0.00357715
I0428 22:23:22.049419 25678 solver.cpp:218] Iteration 7500 (2.45344 iter/s, 4.8911s/12 iters), loss = 0.249523
I0428 22:23:22.049461 25678 solver.cpp:237] Train net output #0: loss = 0.249523 (* 1 = 0.249523 loss)
I0428 22:23:22.049469 25678 sgd_solver.cpp:105] Iteration 7500, lr = 0.0035704
I0428 22:23:26.915335 25678 solver.cpp:218] Iteration 7512 (2.46616 iter/s, 4.86586s/12 iters), loss = 0.1124
I0428 22:23:26.915374 25678 solver.cpp:237] Train net output #0: loss = 0.1124 (* 1 = 0.1124 loss)
I0428 22:23:26.915381 25678 sgd_solver.cpp:105] Iteration 7512, lr = 0.00356365
I0428 22:23:31.822912 25678 solver.cpp:218] Iteration 7524 (2.44522 iter/s, 4.90753s/12 iters), loss = 0.136144
I0428 22:23:31.822948 25678 solver.cpp:237] Train net output #0: loss = 0.136144 (* 1 = 0.136144 loss)
I0428 22:23:31.822957 25678 sgd_solver.cpp:105] Iteration 7524, lr = 0.00355691
I0428 22:23:36.751475 25678 solver.cpp:218] Iteration 7536 (2.43481 iter/s, 4.92852s/12 iters), loss = 0.05428
I0428 22:23:36.751511 25678 solver.cpp:237] Train net output #0: loss = 0.05428 (* 1 = 0.05428 loss)
I0428 22:23:36.751518 25678 sgd_solver.cpp:105] Iteration 7536, lr = 0.00355017
I0428 22:23:41.205785 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_7548.caffemodel
I0428 22:23:45.524891 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_7548.solverstate
I0428 22:23:48.459362 25678 solver.cpp:330] Iteration 7548, Testing net (#0)
I0428 22:23:48.459388 25678 net.cpp:676] Ignoring source layer train-data
I0428 22:23:50.088696 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:23:53.502230 25678 solver.cpp:397] Test net output #0: accuracy = 0.476716
I0428 22:23:53.502262 25678 solver.cpp:397] Test net output #1: loss = 2.93986 (* 1 = 2.93986 loss)
I0428 22:23:53.599530 25678 solver.cpp:218] Iteration 7548 (0.71225 iter/s, 16.848s/12 iters), loss = 0.105198
I0428 22:23:53.599576 25678 solver.cpp:237] Train net output #0: loss = 0.105198 (* 1 = 0.105198 loss)
I0428 22:23:53.599584 25678 sgd_solver.cpp:105] Iteration 7548, lr = 0.00354344
I0428 22:23:57.653198 25678 solver.cpp:218] Iteration 7560 (2.96032 iter/s, 4.05361s/12 iters), loss = 0.215528
I0428 22:23:57.653237 25678 solver.cpp:237] Train net output #0: loss = 0.215528 (* 1 = 0.215528 loss)
I0428 22:23:57.653244 25678 sgd_solver.cpp:105] Iteration 7560, lr = 0.00353671
I0428 22:24:02.572261 25678 solver.cpp:218] Iteration 7572 (2.43951 iter/s, 4.91901s/12 iters), loss = 0.13076
I0428 22:24:02.572299 25678 solver.cpp:237] Train net output #0: loss = 0.13076 (* 1 = 0.13076 loss)
I0428 22:24:02.572307 25678 sgd_solver.cpp:105] Iteration 7572, lr = 0.00352999
I0428 22:24:07.534456 25678 solver.cpp:218] Iteration 7584 (2.41831 iter/s, 4.96215s/12 iters), loss = 0.121991
I0428 22:24:07.534494 25678 solver.cpp:237] Train net output #0: loss = 0.121991 (* 1 = 0.121991 loss)
I0428 22:24:07.534502 25678 sgd_solver.cpp:105] Iteration 7584, lr = 0.00352328
I0428 22:24:08.177417 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:24:12.435081 25678 solver.cpp:218] Iteration 7596 (2.44869 iter/s, 4.90057s/12 iters), loss = 0.183252
I0428 22:24:12.435199 25678 solver.cpp:237] Train net output #0: loss = 0.183252 (* 1 = 0.183252 loss)
I0428 22:24:12.435207 25678 sgd_solver.cpp:105] Iteration 7596, lr = 0.00351657
I0428 22:24:17.378379 25678 solver.cpp:218] Iteration 7608 (2.42759 iter/s, 4.94317s/12 iters), loss = 0.158016
I0428 22:24:17.378413 25678 solver.cpp:237] Train net output #0: loss = 0.158016 (* 1 = 0.158016 loss)
I0428 22:24:17.378422 25678 sgd_solver.cpp:105] Iteration 7608, lr = 0.00350986
I0428 22:24:22.306327 25678 solver.cpp:218] Iteration 7620 (2.43511 iter/s, 4.9279s/12 iters), loss = 0.222129
I0428 22:24:22.306365 25678 solver.cpp:237] Train net output #0: loss = 0.222129 (* 1 = 0.222129 loss)
I0428 22:24:22.306371 25678 sgd_solver.cpp:105] Iteration 7620, lr = 0.00350317
I0428 22:24:24.331542 25678 blocking_queue.cpp:49] Waiting for data
I0428 22:24:27.309520 25678 solver.cpp:218] Iteration 7632 (2.39849 iter/s, 5.00315s/12 iters), loss = 0.167924
I0428 22:24:27.309556 25678 solver.cpp:237] Train net output #0: loss = 0.167924 (* 1 = 0.167924 loss)
I0428 22:24:27.309562 25678 sgd_solver.cpp:105] Iteration 7632, lr = 0.00349648
I0428 22:24:32.302386 25678 solver.cpp:218] Iteration 7644 (2.40345 iter/s, 4.99282s/12 iters), loss = 0.27051
I0428 22:24:32.302423 25678 solver.cpp:237] Train net output #0: loss = 0.27051 (* 1 = 0.27051 loss)
I0428 22:24:32.302429 25678 sgd_solver.cpp:105] Iteration 7644, lr = 0.00348979
I0428 22:24:34.303741 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_7650.caffemodel
I0428 22:24:37.376546 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_7650.solverstate
I0428 22:24:39.796944 25678 solver.cpp:330] Iteration 7650, Testing net (#0)
I0428 22:24:39.796962 25678 net.cpp:676] Ignoring source layer train-data
I0428 22:24:41.359160 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:24:44.624876 25678 solver.cpp:397] Test net output #0: accuracy = 0.474265
I0428 22:24:44.625054 25678 solver.cpp:397] Test net output #1: loss = 2.99118 (* 1 = 2.99118 loss)
I0428 22:24:46.438832 25678 solver.cpp:218] Iteration 7656 (0.848872 iter/s, 14.1364s/12 iters), loss = 0.0951798
I0428 22:24:46.438874 25678 solver.cpp:237] Train net output #0: loss = 0.0951798 (* 1 = 0.0951798 loss)
I0428 22:24:46.438882 25678 sgd_solver.cpp:105] Iteration 7656, lr = 0.00348311
I0428 22:24:51.409806 25678 solver.cpp:218] Iteration 7668 (2.41404 iter/s, 4.97092s/12 iters), loss = 0.235323
I0428 22:24:51.409845 25678 solver.cpp:237] Train net output #0: loss = 0.235323 (* 1 = 0.235323 loss)
I0428 22:24:51.409852 25678 sgd_solver.cpp:105] Iteration 7668, lr = 0.00347644
I0428 22:24:56.389897 25678 solver.cpp:218] Iteration 7680 (2.40962 iter/s, 4.98004s/12 iters), loss = 0.172142
I0428 22:24:56.389935 25678 solver.cpp:237] Train net output #0: loss = 0.172142 (* 1 = 0.172142 loss)
I0428 22:24:56.389941 25678 sgd_solver.cpp:105] Iteration 7680, lr = 0.00346977
I0428 22:24:59.156873 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:25:01.341956 25678 solver.cpp:218] Iteration 7692 (2.42326 iter/s, 4.95201s/12 iters), loss = 0.0837416
I0428 22:25:01.341991 25678 solver.cpp:237] Train net output #0: loss = 0.0837416 (* 1 = 0.0837416 loss)
I0428 22:25:01.342000 25678 sgd_solver.cpp:105] Iteration 7692, lr = 0.00346311
I0428 22:25:06.310190 25678 solver.cpp:218] Iteration 7704 (2.41537 iter/s, 4.96819s/12 iters), loss = 0.117302
I0428 22:25:06.310226 25678 solver.cpp:237] Train net output #0: loss = 0.117302 (* 1 = 0.117302 loss)
I0428 22:25:06.310233 25678 sgd_solver.cpp:105] Iteration 7704, lr = 0.00345646
I0428 22:25:11.246150 25678 solver.cpp:218] Iteration 7716 (2.43116 iter/s, 4.93591s/12 iters), loss = 0.185387
I0428 22:25:11.246186 25678 solver.cpp:237] Train net output #0: loss = 0.185387 (* 1 = 0.185387 loss)
I0428 22:25:11.246194 25678 sgd_solver.cpp:105] Iteration 7716, lr = 0.00344981
I0428 22:25:16.141295 25678 solver.cpp:218] Iteration 7728 (2.45143 iter/s, 4.8951s/12 iters), loss = 0.131893
I0428 22:25:16.141363 25678 solver.cpp:237] Train net output #0: loss = 0.131893 (* 1 = 0.131893 loss)
I0428 22:25:16.141371 25678 sgd_solver.cpp:105] Iteration 7728, lr = 0.00344316
I0428 22:25:21.051036 25678 solver.cpp:218] Iteration 7740 (2.44416 iter/s, 4.90966s/12 iters), loss = 0.188913
I0428 22:25:21.051074 25678 solver.cpp:237] Train net output #0: loss = 0.188913 (* 1 = 0.188913 loss)
I0428 22:25:21.051080 25678 sgd_solver.cpp:105] Iteration 7740, lr = 0.00343653
I0428 22:25:25.557476 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_7752.caffemodel
I0428 22:25:28.609997 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_7752.solverstate
I0428 22:25:30.971911 25678 solver.cpp:330] Iteration 7752, Testing net (#0)
I0428 22:25:30.971930 25678 net.cpp:676] Ignoring source layer train-data
I0428 22:25:32.420861 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:25:35.825016 25678 solver.cpp:397] Test net output #0: accuracy = 0.451593
I0428 22:25:35.825063 25678 solver.cpp:397] Test net output #1: loss = 3.07619 (* 1 = 3.07619 loss)
I0428 22:25:35.922379 25678 solver.cpp:218] Iteration 7752 (0.806923 iter/s, 14.8713s/12 iters), loss = 0.169861
I0428 22:25:35.922435 25678 solver.cpp:237] Train net output #0: loss = 0.169861 (* 1 = 0.169861 loss)
I0428 22:25:35.922443 25678 sgd_solver.cpp:105] Iteration 7752, lr = 0.0034299
I0428 22:25:40.038172 25678 solver.cpp:218] Iteration 7764 (2.91564 iter/s, 4.11574s/12 iters), loss = 0.11284
I0428 22:25:40.038210 25678 solver.cpp:237] Train net output #0: loss = 0.11284 (* 1 = 0.11284 loss)
I0428 22:25:40.038218 25678 sgd_solver.cpp:105] Iteration 7764, lr = 0.00342327
I0428 22:25:44.958468 25678 solver.cpp:218] Iteration 7776 (2.4389 iter/s, 4.92025s/12 iters), loss = 0.170917
I0428 22:25:44.958506 25678 solver.cpp:237] Train net output #0: loss = 0.170917 (* 1 = 0.170917 loss)
I0428 22:25:44.958513 25678 sgd_solver.cpp:105] Iteration 7776, lr = 0.00341665
I0428 22:25:49.778085 25678 solver.cpp:218] Iteration 7788 (2.48985 iter/s, 4.81957s/12 iters), loss = 0.190541
I0428 22:25:49.778237 25678 solver.cpp:237] Train net output #0: loss = 0.190541 (* 1 = 0.190541 loss)
I0428 22:25:49.778246 25678 sgd_solver.cpp:105] Iteration 7788, lr = 0.00341004
I0428 22:25:49.785563 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:25:54.698738 25678 solver.cpp:218] Iteration 7800 (2.43878 iter/s, 4.92049s/12 iters), loss = 0.0993916
I0428 22:25:54.698784 25678 solver.cpp:237] Train net output #0: loss = 0.0993916 (* 1 = 0.0993916 loss)
I0428 22:25:54.698792 25678 sgd_solver.cpp:105] Iteration 7800, lr = 0.00340343
I0428 22:25:59.573341 25678 solver.cpp:218] Iteration 7812 (2.46177 iter/s, 4.87455s/12 iters), loss = 0.0487302
I0428 22:25:59.573377 25678 solver.cpp:237] Train net output #0: loss = 0.0487302 (* 1 = 0.0487302 loss)
I0428 22:25:59.573385 25678 sgd_solver.cpp:105] Iteration 7812, lr = 0.00339683
I0428 22:26:04.483127 25678 solver.cpp:218] Iteration 7824 (2.44412 iter/s, 4.90973s/12 iters), loss = 0.192182
I0428 22:26:04.483170 25678 solver.cpp:237] Train net output #0: loss = 0.192182 (* 1 = 0.192182 loss)
I0428 22:26:04.483177 25678 sgd_solver.cpp:105] Iteration 7824, lr = 0.00339024
I0428 22:26:09.449776 25678 solver.cpp:218] Iteration 7836 (2.41614 iter/s, 4.9666s/12 iters), loss = 0.079927
I0428 22:26:09.449813 25678 solver.cpp:237] Train net output #0: loss = 0.079927 (* 1 = 0.079927 loss)
I0428 22:26:09.449821 25678 sgd_solver.cpp:105] Iteration 7836, lr = 0.00338365
I0428 22:26:14.376606 25678 solver.cpp:218] Iteration 7848 (2.43567 iter/s, 4.92678s/12 iters), loss = 0.168968
I0428 22:26:14.376642 25678 solver.cpp:237] Train net output #0: loss = 0.168968 (* 1 = 0.168968 loss)
I0428 22:26:14.376649 25678 sgd_solver.cpp:105] Iteration 7848, lr = 0.00337707
I0428 22:26:16.392664 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_7854.caffemodel
I0428 22:26:20.869812 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_7854.solverstate
I0428 22:26:24.913031 25678 solver.cpp:330] Iteration 7854, Testing net (#0)
I0428 22:26:24.913053 25678 net.cpp:676] Ignoring source layer train-data
I0428 22:26:26.307220 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:26:29.755507 25678 solver.cpp:397] Test net output #0: accuracy = 0.473039
I0428 22:26:29.755546 25678 solver.cpp:397] Test net output #1: loss = 3.01303 (* 1 = 3.01303 loss)
I0428 22:26:31.539934 25678 solver.cpp:218] Iteration 7860 (0.699167 iter/s, 17.1633s/12 iters), loss = 0.156028
I0428 22:26:31.539971 25678 solver.cpp:237] Train net output #0: loss = 0.156028 (* 1 = 0.156028 loss)
I0428 22:26:31.539979 25678 sgd_solver.cpp:105] Iteration 7860, lr = 0.00337049
I0428 22:26:36.502089 25678 solver.cpp:218] Iteration 7872 (2.41833 iter/s, 4.96211s/12 iters), loss = 0.230175
I0428 22:26:36.502128 25678 solver.cpp:237] Train net output #0: loss = 0.230175 (* 1 = 0.230175 loss)
I0428 22:26:36.502136 25678 sgd_solver.cpp:105] Iteration 7872, lr = 0.00336393
I0428 22:26:41.386911 25678 solver.cpp:218] Iteration 7884 (2.45661 iter/s, 4.88477s/12 iters), loss = 0.134351
I0428 22:26:41.386946 25678 solver.cpp:237] Train net output #0: loss = 0.134351 (* 1 = 0.134351 loss)
I0428 22:26:41.386955 25678 sgd_solver.cpp:105] Iteration 7884, lr = 0.00335736
I0428 22:26:43.527750 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:26:46.399663 25678 solver.cpp:218] Iteration 7896 (2.39392 iter/s, 5.01271s/12 iters), loss = 0.0986693
I0428 22:26:46.399703 25678 solver.cpp:237] Train net output #0: loss = 0.0986693 (* 1 = 0.0986693 loss)
I0428 22:26:46.399709 25678 sgd_solver.cpp:105] Iteration 7896, lr = 0.00335081
I0428 22:26:51.336688 25678 solver.cpp:218] Iteration 7908 (2.43064 iter/s, 4.93698s/12 iters), loss = 0.0504983
I0428 22:26:51.336805 25678 solver.cpp:237] Train net output #0: loss = 0.0504983 (* 1 = 0.0504983 loss)
I0428 22:26:51.336815 25678 sgd_solver.cpp:105] Iteration 7908, lr = 0.00334426
I0428 22:26:56.295868 25678 solver.cpp:218] Iteration 7920 (2.41982 iter/s, 4.95905s/12 iters), loss = 0.0759939
I0428 22:26:56.295905 25678 solver.cpp:237] Train net output #0: loss = 0.0759939 (* 1 = 0.0759939 loss)
I0428 22:26:56.295913 25678 sgd_solver.cpp:105] Iteration 7920, lr = 0.00333771
I0428 22:27:01.239046 25678 solver.cpp:218] Iteration 7932 (2.42761 iter/s, 4.94313s/12 iters), loss = 0.151465
I0428 22:27:01.239081 25678 solver.cpp:237] Train net output #0: loss = 0.151465 (* 1 = 0.151465 loss)
I0428 22:27:01.239089 25678 sgd_solver.cpp:105] Iteration 7932, lr = 0.00333118
I0428 22:27:06.169051 25678 solver.cpp:218] Iteration 7944 (2.4341 iter/s, 4.92995s/12 iters), loss = 0.0576361
I0428 22:27:06.169093 25678 solver.cpp:237] Train net output #0: loss = 0.0576361 (* 1 = 0.0576361 loss)
I0428 22:27:06.169101 25678 sgd_solver.cpp:105] Iteration 7944, lr = 0.00332465
I0428 22:27:10.629603 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_7956.caffemodel
I0428 22:27:14.349784 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_7956.solverstate
I0428 22:27:16.747941 25678 solver.cpp:330] Iteration 7956, Testing net (#0)
I0428 22:27:16.747959 25678 net.cpp:676] Ignoring source layer train-data
I0428 22:27:18.099572 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:27:21.571136 25678 solver.cpp:397] Test net output #0: accuracy = 0.481618
I0428 22:27:21.571280 25678 solver.cpp:397] Test net output #1: loss = 2.91031 (* 1 = 2.91031 loss)
I0428 22:27:21.667856 25678 solver.cpp:218] Iteration 7956 (0.774256 iter/s, 15.4988s/12 iters), loss = 0.107453
I0428 22:27:21.667898 25678 solver.cpp:237] Train net output #0: loss = 0.107453 (* 1 = 0.107453 loss)
I0428 22:27:21.667906 25678 sgd_solver.cpp:105] Iteration 7956, lr = 0.00331812
I0428 22:27:25.841964 25678 solver.cpp:218] Iteration 7968 (2.8749 iter/s, 4.17405s/12 iters), loss = 0.0985376
I0428 22:27:25.841997 25678 solver.cpp:237] Train net output #0: loss = 0.0985376 (* 1 = 0.0985376 loss)
I0428 22:27:25.842005 25678 sgd_solver.cpp:105] Iteration 7968, lr = 0.0033116
I0428 22:27:30.778903 25678 solver.cpp:218] Iteration 7980 (2.43068 iter/s, 4.93689s/12 iters), loss = 0.0800474
I0428 22:27:30.778939 25678 solver.cpp:237] Train net output #0: loss = 0.0800474 (* 1 = 0.0800474 loss)
I0428 22:27:30.778946 25678 sgd_solver.cpp:105] Iteration 7980, lr = 0.00330509
I0428 22:27:35.069820 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:27:35.785219 25678 solver.cpp:218] Iteration 7992 (2.39699 iter/s, 5.00627s/12 iters), loss = 0.0774769
I0428 22:27:35.785257 25678 solver.cpp:237] Train net output #0: loss = 0.0774769 (* 1 = 0.0774769 loss)
I0428 22:27:35.785264 25678 sgd_solver.cpp:105] Iteration 7992, lr = 0.00329859
I0428 22:27:40.774179 25678 solver.cpp:218] Iteration 8004 (2.40533 iter/s, 4.98891s/12 iters), loss = 0.154812
I0428 22:27:40.774214 25678 solver.cpp:237] Train net output #0: loss = 0.154812 (* 1 = 0.154812 loss)
I0428 22:27:40.774220 25678 sgd_solver.cpp:105] Iteration 8004, lr = 0.00329209
I0428 22:27:45.618849 25678 solver.cpp:218] Iteration 8016 (2.47697 iter/s, 4.84463s/12 iters), loss = 0.110681
I0428 22:27:45.618885 25678 solver.cpp:237] Train net output #0: loss = 0.110681 (* 1 = 0.110681 loss)
I0428 22:27:45.618892 25678 sgd_solver.cpp:105] Iteration 8016, lr = 0.0032856
I0428 22:27:50.597975 25678 solver.cpp:218] Iteration 8028 (2.41008 iter/s, 4.97908s/12 iters), loss = 0.127131
I0428 22:27:50.598011 25678 solver.cpp:237] Train net output #0: loss = 0.127131 (* 1 = 0.127131 loss)
I0428 22:27:50.598018 25678 sgd_solver.cpp:105] Iteration 8028, lr = 0.00327911
I0428 22:27:55.546093 25678 solver.cpp:218] Iteration 8040 (2.42519 iter/s, 4.94807s/12 iters), loss = 0.19025
I0428 22:27:55.546212 25678 solver.cpp:237] Train net output #0: loss = 0.19025 (* 1 = 0.19025 loss)
I0428 22:27:55.546221 25678 sgd_solver.cpp:105] Iteration 8040, lr = 0.00327263
I0428 22:28:00.519366 25678 solver.cpp:218] Iteration 8052 (2.41296 iter/s, 4.97315s/12 iters), loss = 0.0615575
I0428 22:28:00.519403 25678 solver.cpp:237] Train net output #0: loss = 0.0615576 (* 1 = 0.0615576 loss)
I0428 22:28:00.519410 25678 sgd_solver.cpp:105] Iteration 8052, lr = 0.00326616
I0428 22:28:02.496948 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_8058.caffemodel
I0428 22:28:05.554570 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_8058.solverstate
I0428 22:28:07.913580 25678 solver.cpp:330] Iteration 8058, Testing net (#0)
I0428 22:28:07.913605 25678 net.cpp:676] Ignoring source layer train-data
I0428 22:28:09.205487 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:28:12.642385 25678 solver.cpp:397] Test net output #0: accuracy = 0.478554
I0428 22:28:12.642426 25678 solver.cpp:397] Test net output #1: loss = 2.95319 (* 1 = 2.95319 loss)
I0428 22:28:14.442018 25678 solver.cpp:218] Iteration 8064 (0.861907 iter/s, 13.9226s/12 iters), loss = 0.112183
I0428 22:28:14.442057 25678 solver.cpp:237] Train net output #0: loss = 0.112183 (* 1 = 0.112183 loss)
I0428 22:28:14.442065 25678 sgd_solver.cpp:105] Iteration 8064, lr = 0.0032597
I0428 22:28:19.405000 25678 solver.cpp:218] Iteration 8076 (2.41793 iter/s, 4.96293s/12 iters), loss = 0.0473804
I0428 22:28:19.405040 25678 solver.cpp:237] Train net output #0: loss = 0.0473805 (* 1 = 0.0473805 loss)
I0428 22:28:19.405047 25678 sgd_solver.cpp:105] Iteration 8076, lr = 0.00325324
I0428 22:28:24.360291 25678 solver.cpp:218] Iteration 8088 (2.42168 iter/s, 4.95524s/12 iters), loss = 0.0525464
I0428 22:28:24.360327 25678 solver.cpp:237] Train net output #0: loss = 0.0525464 (* 1 = 0.0525464 loss)
I0428 22:28:24.360334 25678 sgd_solver.cpp:105] Iteration 8088, lr = 0.00324679
I0428 22:28:25.774713 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:28:29.333962 25678 solver.cpp:218] Iteration 8100 (2.41273 iter/s, 4.97363s/12 iters), loss = 0.0731846
I0428 22:28:29.334000 25678 solver.cpp:237] Train net output #0: loss = 0.0731847 (* 1 = 0.0731847 loss)
I0428 22:28:29.334007 25678 sgd_solver.cpp:105] Iteration 8100, lr = 0.00324034
I0428 22:28:34.291330 25678 solver.cpp:218] Iteration 8112 (2.42066 iter/s, 4.95732s/12 iters), loss = 0.102271
I0428 22:28:34.291365 25678 solver.cpp:237] Train net output #0: loss = 0.102271 (* 1 = 0.102271 loss)
I0428 22:28:34.291374 25678 sgd_solver.cpp:105] Iteration 8112, lr = 0.0032339
I0428 22:28:39.210912 25678 solver.cpp:218] Iteration 8124 (2.43925 iter/s, 4.91954s/12 iters), loss = 0.0629569
I0428 22:28:39.210949 25678 solver.cpp:237] Train net output #0: loss = 0.0629569 (* 1 = 0.0629569 loss)
I0428 22:28:39.210956 25678 sgd_solver.cpp:105] Iteration 8124, lr = 0.00322747
I0428 22:28:44.105248 25678 solver.cpp:218] Iteration 8136 (2.45184 iter/s, 4.89429s/12 iters), loss = 0.0566237
I0428 22:28:44.105284 25678 solver.cpp:237] Train net output #0: loss = 0.0566238 (* 1 = 0.0566238 loss)
I0428 22:28:44.105293 25678 sgd_solver.cpp:105] Iteration 8136, lr = 0.00322104
I0428 22:28:49.031502 25678 solver.cpp:218] Iteration 8148 (2.43595 iter/s, 4.92621s/12 iters), loss = 0.0602982
I0428 22:28:49.031536 25678 solver.cpp:237] Train net output #0: loss = 0.0602983 (* 1 = 0.0602983 loss)
I0428 22:28:49.031544 25678 sgd_solver.cpp:105] Iteration 8148, lr = 0.00321462
I0428 22:28:53.485169 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_8160.caffemodel
I0428 22:28:59.077641 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_8160.solverstate
I0428 22:29:04.090385 25678 solver.cpp:330] Iteration 8160, Testing net (#0)
I0428 22:29:04.090404 25678 net.cpp:676] Ignoring source layer train-data
I0428 22:29:05.345455 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:29:08.887950 25678 solver.cpp:397] Test net output #0: accuracy = 0.467524
I0428 22:29:08.887995 25678 solver.cpp:397] Test net output #1: loss = 3.04653 (* 1 = 3.04653 loss)
I0428 22:29:08.985358 25678 solver.cpp:218] Iteration 8160 (0.601389 iter/s, 19.9538s/12 iters), loss = 0.0526869
I0428 22:29:08.985404 25678 solver.cpp:237] Train net output #0: loss = 0.0526869 (* 1 = 0.0526869 loss)
I0428 22:29:08.985411 25678 sgd_solver.cpp:105] Iteration 8160, lr = 0.00320821
I0428 22:29:13.080979 25678 solver.cpp:218] Iteration 8172 (2.93 iter/s, 4.09557s/12 iters), loss = 0.107657
I0428 22:29:13.081017 25678 solver.cpp:237] Train net output #0: loss = 0.107657 (* 1 = 0.107657 loss)
I0428 22:29:13.081025 25678 sgd_solver.cpp:105] Iteration 8172, lr = 0.00320181
I0428 22:29:17.939975 25678 solver.cpp:218] Iteration 8184 (2.46967 iter/s, 4.85895s/12 iters), loss = 0.152012
I0428 22:29:17.940011 25678 solver.cpp:237] Train net output #0: loss = 0.152012 (* 1 = 0.152012 loss)
I0428 22:29:17.940018 25678 sgd_solver.cpp:105] Iteration 8184, lr = 0.00319541
I0428 22:29:21.410077 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:29:22.852622 25678 solver.cpp:218] Iteration 8196 (2.4427 iter/s, 4.9126s/12 iters), loss = 0.178458
I0428 22:29:22.852658 25678 solver.cpp:237] Train net output #0: loss = 0.178458 (* 1 = 0.178458 loss)
I0428 22:29:22.852665 25678 sgd_solver.cpp:105] Iteration 8196, lr = 0.00318902
I0428 22:29:27.800874 25678 solver.cpp:218] Iteration 8208 (2.42512 iter/s, 4.9482s/12 iters), loss = 0.0845781
I0428 22:29:27.800910 25678 solver.cpp:237] Train net output #0: loss = 0.0845782 (* 1 = 0.0845782 loss)
I0428 22:29:27.800918 25678 sgd_solver.cpp:105] Iteration 8208, lr = 0.00318263
I0428 22:29:32.751149 25678 solver.cpp:218] Iteration 8220 (2.42414 iter/s, 4.95022s/12 iters), loss = 0.100312
I0428 22:29:32.751289 25678 solver.cpp:237] Train net output #0: loss = 0.100312 (* 1 = 0.100312 loss)
I0428 22:29:32.751302 25678 sgd_solver.cpp:105] Iteration 8220, lr = 0.00317625
I0428 22:29:37.696530 25678 solver.cpp:218] Iteration 8232 (2.42658 iter/s, 4.94523s/12 iters), loss = 0.108665
I0428 22:29:37.696569 25678 solver.cpp:237] Train net output #0: loss = 0.108665 (* 1 = 0.108665 loss)
I0428 22:29:37.696576 25678 sgd_solver.cpp:105] Iteration 8232, lr = 0.00316988
I0428 22:29:42.666584 25678 solver.cpp:218] Iteration 8244 (2.41449 iter/s, 4.97s/12 iters), loss = 0.0722926
I0428 22:29:42.666628 25678 solver.cpp:237] Train net output #0: loss = 0.0722926 (* 1 = 0.0722926 loss)
I0428 22:29:42.666636 25678 sgd_solver.cpp:105] Iteration 8244, lr = 0.00316352
I0428 22:29:47.621738 25678 solver.cpp:218] Iteration 8256 (2.42175 iter/s, 4.9551s/12 iters), loss = 0.13119
I0428 22:29:47.621778 25678 solver.cpp:237] Train net output #0: loss = 0.13119 (* 1 = 0.13119 loss)
I0428 22:29:47.621786 25678 sgd_solver.cpp:105] Iteration 8256, lr = 0.00315716
I0428 22:29:49.639513 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_8262.caffemodel
I0428 22:29:52.720326 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_8262.solverstate
I0428 22:29:55.071611 25678 solver.cpp:330] Iteration 8262, Testing net (#0)
I0428 22:29:55.071628 25678 net.cpp:676] Ignoring source layer train-data
I0428 22:29:56.296381 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:29:59.874963 25678 solver.cpp:397] Test net output #0: accuracy = 0.464461
I0428 22:29:59.875010 25678 solver.cpp:397] Test net output #1: loss = 2.90387 (* 1 = 2.90387 loss)
I0428 22:30:01.684916 25678 solver.cpp:218] Iteration 8268 (0.853295 iter/s, 14.0631s/12 iters), loss = 0.103431
I0428 22:30:01.684952 25678 solver.cpp:237] Train net output #0: loss = 0.103431 (* 1 = 0.103431 loss)
I0428 22:30:01.684960 25678 sgd_solver.cpp:105] Iteration 8268, lr = 0.00315081
I0428 22:30:06.617102 25678 solver.cpp:218] Iteration 8280 (2.43302 iter/s, 4.93214s/12 iters), loss = 0.0644934
I0428 22:30:06.617200 25678 solver.cpp:237] Train net output #0: loss = 0.0644934 (* 1 = 0.0644934 loss)
I0428 22:30:06.617209 25678 sgd_solver.cpp:105] Iteration 8280, lr = 0.00314447
I0428 22:30:11.533972 25678 solver.cpp:218] Iteration 8292 (2.44063 iter/s, 4.91676s/12 iters), loss = 0.124775
I0428 22:30:11.534008 25678 solver.cpp:237] Train net output #0: loss = 0.124775 (* 1 = 0.124775 loss)
I0428 22:30:11.534014 25678 sgd_solver.cpp:105] Iteration 8292, lr = 0.00313813
I0428 22:30:12.214383 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:30:16.474637 25678 solver.cpp:218] Iteration 8304 (2.42885 iter/s, 4.94062s/12 iters), loss = 0.0841714
I0428 22:30:16.474674 25678 solver.cpp:237] Train net output #0: loss = 0.0841714 (* 1 = 0.0841714 loss)
I0428 22:30:16.474682 25678 sgd_solver.cpp:105] Iteration 8304, lr = 0.0031318
I0428 22:30:18.846786 25678 blocking_queue.cpp:49] Waiting for data
I0428 22:30:21.366377 25678 solver.cpp:218] Iteration 8316 (2.45314 iter/s, 4.89169s/12 iters), loss = 0.147582
I0428 22:30:21.366415 25678 solver.cpp:237] Train net output #0: loss = 0.147582 (* 1 = 0.147582 loss)
I0428 22:30:21.366423 25678 sgd_solver.cpp:105] Iteration 8316, lr = 0.00312548
I0428 22:30:26.341571 25678 solver.cpp:218] Iteration 8328 (2.41199 iter/s, 4.97515s/12 iters), loss = 0.192971
I0428 22:30:26.341609 25678 solver.cpp:237] Train net output #0: loss = 0.192971 (* 1 = 0.192971 loss)
I0428 22:30:26.341617 25678 sgd_solver.cpp:105] Iteration 8328, lr = 0.00311916
I0428 22:30:31.305516 25678 solver.cpp:218] Iteration 8340 (2.41746 iter/s, 4.96389s/12 iters), loss = 0.0549017
I0428 22:30:31.305552 25678 solver.cpp:237] Train net output #0: loss = 0.0549018 (* 1 = 0.0549018 loss)
I0428 22:30:31.305560 25678 sgd_solver.cpp:105] Iteration 8340, lr = 0.00311285
I0428 22:30:36.200002 25678 solver.cpp:218] Iteration 8352 (2.45176 iter/s, 4.89444s/12 iters), loss = 0.0443239
I0428 22:30:36.200039 25678 solver.cpp:237] Train net output #0: loss = 0.0443239 (* 1 = 0.0443239 loss)
I0428 22:30:36.200047 25678 sgd_solver.cpp:105] Iteration 8352, lr = 0.00310655
I0428 22:30:40.637758 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_8364.caffemodel
I0428 22:30:43.883217 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_8364.solverstate
I0428 22:30:46.239604 25678 solver.cpp:330] Iteration 8364, Testing net (#0)
I0428 22:30:46.239621 25678 net.cpp:676] Ignoring source layer train-data
I0428 22:30:47.424708 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:30:50.910710 25678 solver.cpp:397] Test net output #0: accuracy = 0.466912
I0428 22:30:50.910754 25678 solver.cpp:397] Test net output #1: loss = 3.00835 (* 1 = 3.00835 loss)
I0428 22:30:51.008265 25678 solver.cpp:218] Iteration 8364 (0.810361 iter/s, 14.8082s/12 iters), loss = 0.0819057
I0428 22:30:51.008308 25678 solver.cpp:237] Train net output #0: loss = 0.0819058 (* 1 = 0.0819058 loss)
I0428 22:30:51.008316 25678 sgd_solver.cpp:105] Iteration 8364, lr = 0.00310026
I0428 22:30:55.101220 25678 solver.cpp:218] Iteration 8376 (2.9319 iter/s, 4.0929s/12 iters), loss = 0.0663018
I0428 22:30:55.101255 25678 solver.cpp:237] Train net output #0: loss = 0.0663019 (* 1 = 0.0663019 loss)
I0428 22:30:55.101263 25678 sgd_solver.cpp:105] Iteration 8376, lr = 0.00309397
I0428 22:31:00.089691 25678 solver.cpp:218] Iteration 8388 (2.40557 iter/s, 4.98843s/12 iters), loss = 0.0942425
I0428 22:31:00.089725 25678 solver.cpp:237] Train net output #0: loss = 0.0942425 (* 1 = 0.0942425 loss)
I0428 22:31:00.089733 25678 sgd_solver.cpp:105] Iteration 8388, lr = 0.00308769
I0428 22:31:02.878506 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:31:05.052867 25678 solver.cpp:218] Iteration 8400 (2.41783 iter/s, 4.96313s/12 iters), loss = 0.0578712
I0428 22:31:05.052906 25678 solver.cpp:237] Train net output #0: loss = 0.0578712 (* 1 = 0.0578712 loss)
I0428 22:31:05.052913 25678 sgd_solver.cpp:105] Iteration 8400, lr = 0.00308141
I0428 22:31:09.989008 25678 solver.cpp:218] Iteration 8412 (2.43107 iter/s, 4.93609s/12 iters), loss = 0.0514014
I0428 22:31:09.989045 25678 solver.cpp:237] Train net output #0: loss = 0.0514014 (* 1 = 0.0514014 loss)
I0428 22:31:09.989053 25678 sgd_solver.cpp:105] Iteration 8412, lr = 0.00307515
I0428 22:31:14.905043 25678 solver.cpp:218] Iteration 8424 (2.44101 iter/s, 4.91599s/12 iters), loss = 0.0570274
I0428 22:31:14.905164 25678 solver.cpp:237] Train net output #0: loss = 0.0570274 (* 1 = 0.0570274 loss)
I0428 22:31:14.905172 25678 sgd_solver.cpp:105] Iteration 8424, lr = 0.00306889
I0428 22:31:19.858650 25678 solver.cpp:218] Iteration 8436 (2.42254 iter/s, 4.95348s/12 iters), loss = 0.0679922
I0428 22:31:19.858688 25678 solver.cpp:237] Train net output #0: loss = 0.0679922 (* 1 = 0.0679922 loss)
I0428 22:31:19.858696 25678 sgd_solver.cpp:105] Iteration 8436, lr = 0.00306263
I0428 22:31:24.833640 25678 solver.cpp:218] Iteration 8448 (2.41209 iter/s, 4.97494s/12 iters), loss = 0.0516789
I0428 22:31:24.833676 25678 solver.cpp:237] Train net output #0: loss = 0.0516789 (* 1 = 0.0516789 loss)
I0428 22:31:24.833684 25678 sgd_solver.cpp:105] Iteration 8448, lr = 0.00305639
I0428 22:31:29.773749 25678 solver.cpp:218] Iteration 8460 (2.42912 iter/s, 4.94006s/12 iters), loss = 0.0641184
I0428 22:31:29.773787 25678 solver.cpp:237] Train net output #0: loss = 0.0641184 (* 1 = 0.0641184 loss)
I0428 22:31:29.773794 25678 sgd_solver.cpp:105] Iteration 8460, lr = 0.00305015
I0428 22:31:31.824834 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_8466.caffemodel
I0428 22:31:36.706856 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_8466.solverstate
I0428 22:31:40.763360 25678 solver.cpp:330] Iteration 8466, Testing net (#0)
I0428 22:31:40.763378 25678 net.cpp:676] Ignoring source layer train-data
I0428 22:31:41.902590 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:31:45.579229 25678 solver.cpp:397] Test net output #0: accuracy = 0.478554
I0428 22:31:45.579317 25678 solver.cpp:397] Test net output #1: loss = 2.91827 (* 1 = 2.91827 loss)
I0428 22:31:47.359144 25678 solver.cpp:218] Iteration 8472 (0.682386 iter/s, 17.5854s/12 iters), loss = 0.128208
I0428 22:31:47.359181 25678 solver.cpp:237] Train net output #0: loss = 0.128208 (* 1 = 0.128208 loss)
I0428 22:31:47.359189 25678 sgd_solver.cpp:105] Iteration 8472, lr = 0.00304392
I0428 22:31:52.253955 25678 solver.cpp:218] Iteration 8484 (2.4516 iter/s, 4.89476s/12 iters), loss = 0.105749
I0428 22:31:52.253993 25678 solver.cpp:237] Train net output #0: loss = 0.105749 (* 1 = 0.105749 loss)
I0428 22:31:52.253999 25678 sgd_solver.cpp:105] Iteration 8484, lr = 0.00303769
I0428 22:31:57.220443 25678 solver.cpp:218] Iteration 8496 (2.41622 iter/s, 4.96644s/12 iters), loss = 0.214259
I0428 22:31:57.220484 25678 solver.cpp:237] Train net output #0: loss = 0.214259 (* 1 = 0.214259 loss)
I0428 22:31:57.220492 25678 sgd_solver.cpp:105] Iteration 8496, lr = 0.00303148
I0428 22:31:57.257175 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:32:02.084364 25678 solver.cpp:218] Iteration 8508 (2.46717 iter/s, 4.86387s/12 iters), loss = 0.0858581
I0428 22:32:02.084404 25678 solver.cpp:237] Train net output #0: loss = 0.0858581 (* 1 = 0.0858581 loss)
I0428 22:32:02.084412 25678 sgd_solver.cpp:105] Iteration 8508, lr = 0.00302527
I0428 22:32:07.003311 25678 solver.cpp:218] Iteration 8520 (2.43957 iter/s, 4.9189s/12 iters), loss = 0.0492501
I0428 22:32:07.003348 25678 solver.cpp:237] Train net output #0: loss = 0.0492501 (* 1 = 0.0492501 loss)
I0428 22:32:07.003355 25678 sgd_solver.cpp:105] Iteration 8520, lr = 0.00301907
I0428 22:32:11.914407 25678 solver.cpp:218] Iteration 8532 (2.44347 iter/s, 4.91105s/12 iters), loss = 0.0420032
I0428 22:32:11.914446 25678 solver.cpp:237] Train net output #0: loss = 0.0420032 (* 1 = 0.0420032 loss)
I0428 22:32:11.914453 25678 sgd_solver.cpp:105] Iteration 8532, lr = 0.00301287
I0428 22:32:16.831701 25678 solver.cpp:218] Iteration 8544 (2.44039 iter/s, 4.91724s/12 iters), loss = 0.065527
I0428 22:32:16.831797 25678 solver.cpp:237] Train net output #0: loss = 0.065527 (* 1 = 0.065527 loss)
I0428 22:32:16.831806 25678 sgd_solver.cpp:105] Iteration 8544, lr = 0.00300668
I0428 22:32:21.780254 25678 solver.cpp:218] Iteration 8556 (2.425 iter/s, 4.94845s/12 iters), loss = 0.117172
I0428 22:32:21.780289 25678 solver.cpp:237] Train net output #0: loss = 0.117172 (* 1 = 0.117172 loss)
I0428 22:32:21.780297 25678 sgd_solver.cpp:105] Iteration 8556, lr = 0.0030005
I0428 22:32:26.160980 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_8568.caffemodel
I0428 22:32:29.229725 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_8568.solverstate
I0428 22:32:31.625458 25678 solver.cpp:330] Iteration 8568, Testing net (#0)
I0428 22:32:31.625476 25678 net.cpp:676] Ignoring source layer train-data
I0428 22:32:32.791087 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:32:36.564692 25678 solver.cpp:397] Test net output #0: accuracy = 0.474877
I0428 22:32:36.564734 25678 solver.cpp:397] Test net output #1: loss = 2.92473 (* 1 = 2.92473 loss)
I0428 22:32:36.662147 25678 solver.cpp:218] Iteration 8568 (0.806351 iter/s, 14.8818s/12 iters), loss = 0.0593131
I0428 22:32:36.662192 25678 solver.cpp:237] Train net output #0: loss = 0.0593131 (* 1 = 0.0593131 loss)
I0428 22:32:36.662201 25678 sgd_solver.cpp:105] Iteration 8568, lr = 0.00299433
I0428 22:32:40.745885 25678 solver.cpp:218] Iteration 8580 (2.93853 iter/s, 4.08368s/12 iters), loss = 0.14648
I0428 22:32:40.745923 25678 solver.cpp:237] Train net output #0: loss = 0.14648 (* 1 = 0.14648 loss)
I0428 22:32:40.745931 25678 sgd_solver.cpp:105] Iteration 8580, lr = 0.00298816
I0428 22:32:45.732738 25678 solver.cpp:218] Iteration 8592 (2.40635 iter/s, 4.9868s/12 iters), loss = 0.0494385
I0428 22:32:45.732779 25678 solver.cpp:237] Train net output #0: loss = 0.0494385 (* 1 = 0.0494385 loss)
I0428 22:32:45.732787 25678 sgd_solver.cpp:105] Iteration 8592, lr = 0.002982
I0428 22:32:47.894058 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:32:50.720902 25678 solver.cpp:218] Iteration 8604 (2.40572 iter/s, 4.98812s/12 iters), loss = 0.0497068
I0428 22:32:50.720937 25678 solver.cpp:237] Train net output #0: loss = 0.0497068 (* 1 = 0.0497068 loss)
I0428 22:32:50.720944 25678 sgd_solver.cpp:105] Iteration 8604, lr = 0.00297585
I0428 22:32:55.651049 25678 solver.cpp:218] Iteration 8616 (2.43402 iter/s, 4.93011s/12 iters), loss = 0.0533792
I0428 22:32:55.651085 25678 solver.cpp:237] Train net output #0: loss = 0.0533792 (* 1 = 0.0533792 loss)
I0428 22:32:55.651093 25678 sgd_solver.cpp:105] Iteration 8616, lr = 0.00296971
I0428 22:33:00.621806 25678 solver.cpp:218] Iteration 8628 (2.41414 iter/s, 4.97071s/12 iters), loss = 0.0442034
I0428 22:33:00.621845 25678 solver.cpp:237] Train net output #0: loss = 0.0442034 (* 1 = 0.0442034 loss)
I0428 22:33:00.621852 25678 sgd_solver.cpp:105] Iteration 8628, lr = 0.00296357
I0428 22:33:05.597659 25678 solver.cpp:218] Iteration 8640 (2.41167 iter/s, 4.9758s/12 iters), loss = 0.0724171
I0428 22:33:05.597697 25678 solver.cpp:237] Train net output #0: loss = 0.0724171 (* 1 = 0.0724171 loss)
I0428 22:33:05.597704 25678 sgd_solver.cpp:105] Iteration 8640, lr = 0.00295744
I0428 22:33:10.535823 25678 solver.cpp:218] Iteration 8652 (2.43008 iter/s, 4.93812s/12 iters), loss = 0.0575572
I0428 22:33:10.535862 25678 solver.cpp:237] Train net output #0: loss = 0.0575572 (* 1 = 0.0575572 loss)
I0428 22:33:10.535871 25678 sgd_solver.cpp:105] Iteration 8652, lr = 0.00295132
I0428 22:33:15.499688 25678 solver.cpp:218] Iteration 8664 (2.41749 iter/s, 4.96382s/12 iters), loss = 0.0848998
I0428 22:33:15.499725 25678 solver.cpp:237] Train net output #0: loss = 0.0848998 (* 1 = 0.0848998 loss)
I0428 22:33:15.499733 25678 sgd_solver.cpp:105] Iteration 8664, lr = 0.0029452
I0428 22:33:17.512220 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_8670.caffemodel
I0428 22:33:20.565500 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_8670.solverstate
I0428 22:33:22.924484 25678 solver.cpp:330] Iteration 8670, Testing net (#0)
I0428 22:33:22.924501 25678 net.cpp:676] Ignoring source layer train-data
I0428 22:33:23.977804 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:33:27.730154 25678 solver.cpp:397] Test net output #0: accuracy = 0.48223
I0428 22:33:27.730199 25678 solver.cpp:397] Test net output #1: loss = 2.97286 (* 1 = 2.97286 loss)
I0428 22:33:29.529729 25678 solver.cpp:218] Iteration 8676 (0.85531 iter/s, 14.03s/12 iters), loss = 0.164514
I0428 22:33:29.529770 25678 solver.cpp:237] Train net output #0: loss = 0.164514 (* 1 = 0.164514 loss)
I0428 22:33:29.529778 25678 sgd_solver.cpp:105] Iteration 8676, lr = 0.0029391
I0428 22:33:34.477787 25678 solver.cpp:218] Iteration 8688 (2.42522 iter/s, 4.94801s/12 iters), loss = 0.0598857
I0428 22:33:34.477823 25678 solver.cpp:237] Train net output #0: loss = 0.0598857 (* 1 = 0.0598857 loss)
I0428 22:33:34.477830 25678 sgd_solver.cpp:105] Iteration 8688, lr = 0.002933
I0428 22:33:38.779101 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:33:39.463521 25678 solver.cpp:218] Iteration 8700 (2.40689 iter/s, 4.98569s/12 iters), loss = 0.18989
I0428 22:33:39.463557 25678 solver.cpp:237] Train net output #0: loss = 0.18989 (* 1 = 0.18989 loss)
I0428 22:33:39.463563 25678 sgd_solver.cpp:105] Iteration 8700, lr = 0.0029269
I0428 22:33:44.406587 25678 solver.cpp:218] Iteration 8712 (2.42766 iter/s, 4.94303s/12 iters), loss = 0.0364679
I0428 22:33:44.406630 25678 solver.cpp:237] Train net output #0: loss = 0.0364679 (* 1 = 0.0364679 loss)
I0428 22:33:44.406637 25678 sgd_solver.cpp:105] Iteration 8712, lr = 0.00292082
I0428 22:33:49.326359 25678 solver.cpp:218] Iteration 8724 (2.43916 iter/s, 4.91972s/12 iters), loss = 0.0897496
I0428 22:33:49.326395 25678 solver.cpp:237] Train net output #0: loss = 0.0897496 (* 1 = 0.0897496 loss)
I0428 22:33:49.326401 25678 sgd_solver.cpp:105] Iteration 8724, lr = 0.00291474
I0428 22:33:54.304780 25678 solver.cpp:218] Iteration 8736 (2.41043 iter/s, 4.97837s/12 iters), loss = 0.107724
I0428 22:33:54.304901 25678 solver.cpp:237] Train net output #0: loss = 0.107724 (* 1 = 0.107724 loss)
I0428 22:33:54.304909 25678 sgd_solver.cpp:105] Iteration 8736, lr = 0.00290867
I0428 22:33:59.280021 25678 solver.cpp:218] Iteration 8748 (2.41201 iter/s, 4.97511s/12 iters), loss = 0.184371
I0428 22:33:59.280061 25678 solver.cpp:237] Train net output #0: loss = 0.184371 (* 1 = 0.184371 loss)
I0428 22:33:59.280068 25678 sgd_solver.cpp:105] Iteration 8748, lr = 0.00290261
I0428 22:34:04.215756 25678 solver.cpp:218] Iteration 8760 (2.43127 iter/s, 4.93569s/12 iters), loss = 0.0403807
I0428 22:34:04.215793 25678 solver.cpp:237] Train net output #0: loss = 0.0403807 (* 1 = 0.0403807 loss)
I0428 22:34:04.215801 25678 sgd_solver.cpp:105] Iteration 8760, lr = 0.00289655
I0428 22:34:08.711431 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_8772.caffemodel
I0428 22:34:12.558656 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_8772.solverstate
I0428 22:34:16.241330 25678 solver.cpp:330] Iteration 8772, Testing net (#0)
I0428 22:34:16.241348 25678 net.cpp:676] Ignoring source layer train-data
I0428 22:34:17.239392 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:34:21.063598 25678 solver.cpp:397] Test net output #0: accuracy = 0.476716
I0428 22:34:21.063647 25678 solver.cpp:397] Test net output #1: loss = 2.90746 (* 1 = 2.90746 loss)
I0428 22:34:21.161661 25678 solver.cpp:218] Iteration 8772 (0.708137 iter/s, 16.9459s/12 iters), loss = 0.0583591
I0428 22:34:21.161700 25678 solver.cpp:237] Train net output #0: loss = 0.0583591 (* 1 = 0.0583591 loss)
I0428 22:34:21.161706 25678 sgd_solver.cpp:105] Iteration 8772, lr = 0.0028905
I0428 22:34:25.341218 25678 solver.cpp:218] Iteration 8784 (2.87115 iter/s, 4.17951s/12 iters), loss = 0.0691559
I0428 22:34:25.341316 25678 solver.cpp:237] Train net output #0: loss = 0.069156 (* 1 = 0.069156 loss)
I0428 22:34:25.341326 25678 sgd_solver.cpp:105] Iteration 8784, lr = 0.00288446
I0428 22:34:30.316243 25678 solver.cpp:218] Iteration 8796 (2.4121 iter/s, 4.97492s/12 iters), loss = 0.0751946
I0428 22:34:30.316282 25678 solver.cpp:237] Train net output #0: loss = 0.0751946 (* 1 = 0.0751946 loss)
I0428 22:34:30.316290 25678 sgd_solver.cpp:105] Iteration 8796, lr = 0.00287843
I0428 22:34:31.724987 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:34:35.231415 25678 solver.cpp:218] Iteration 8808 (2.44145 iter/s, 4.91512s/12 iters), loss = 0.0730233
I0428 22:34:35.231452 25678 solver.cpp:237] Train net output #0: loss = 0.0730233 (* 1 = 0.0730233 loss)
I0428 22:34:35.231460 25678 sgd_solver.cpp:105] Iteration 8808, lr = 0.00287241
I0428 22:34:40.194104 25678 solver.cpp:218] Iteration 8820 (2.41807 iter/s, 4.96264s/12 iters), loss = 0.0700737
I0428 22:34:40.194140 25678 solver.cpp:237] Train net output #0: loss = 0.0700737 (* 1 = 0.0700737 loss)
I0428 22:34:40.194149 25678 sgd_solver.cpp:105] Iteration 8820, lr = 0.00286639
I0428 22:34:45.157701 25678 solver.cpp:218] Iteration 8832 (2.41762 iter/s, 4.96355s/12 iters), loss = 0.0916016
I0428 22:34:45.157740 25678 solver.cpp:237] Train net output #0: loss = 0.0916017 (* 1 = 0.0916017 loss)
I0428 22:34:45.157748 25678 sgd_solver.cpp:105] Iteration 8832, lr = 0.00286038
I0428 22:34:50.094597 25678 solver.cpp:218] Iteration 8844 (2.4307 iter/s, 4.93685s/12 iters), loss = 0.0941767
I0428 22:34:50.094640 25678 solver.cpp:237] Train net output #0: loss = 0.0941768 (* 1 = 0.0941768 loss)
I0428 22:34:50.094648 25678 sgd_solver.cpp:105] Iteration 8844, lr = 0.00285438
I0428 22:34:55.053867 25678 solver.cpp:218] Iteration 8856 (2.41974 iter/s, 4.95922s/12 iters), loss = 0.169304
I0428 22:34:55.053906 25678 solver.cpp:237] Train net output #0: loss = 0.169304 (* 1 = 0.169304 loss)
I0428 22:34:55.053915 25678 sgd_solver.cpp:105] Iteration 8856, lr = 0.00284838
I0428 22:35:00.035528 25678 solver.cpp:218] Iteration 8868 (2.40886 iter/s, 4.98161s/12 iters), loss = 0.0924341
I0428 22:35:00.035651 25678 solver.cpp:237] Train net output #0: loss = 0.0924342 (* 1 = 0.0924342 loss)
I0428 22:35:00.035661 25678 sgd_solver.cpp:105] Iteration 8868, lr = 0.00284239
I0428 22:35:02.056161 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_8874.caffemodel
I0428 22:35:05.145485 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_8874.solverstate
I0428 22:35:07.496548 25678 solver.cpp:330] Iteration 8874, Testing net (#0)
I0428 22:35:07.496567 25678 net.cpp:676] Ignoring source layer train-data
I0428 22:35:08.476233 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:35:12.177687 25678 solver.cpp:397] Test net output #0: accuracy = 0.474877
I0428 22:35:12.177726 25678 solver.cpp:397] Test net output #1: loss = 2.9438 (* 1 = 2.9438 loss)
I0428 22:35:14.011384 25678 solver.cpp:218] Iteration 8880 (0.858631 iter/s, 13.9757s/12 iters), loss = 0.0497861
I0428 22:35:14.011422 25678 solver.cpp:237] Train net output #0: loss = 0.0497862 (* 1 = 0.0497862 loss)
I0428 22:35:14.011430 25678 sgd_solver.cpp:105] Iteration 8880, lr = 0.00283641
I0428 22:35:18.963802 25678 solver.cpp:218] Iteration 8892 (2.42308 iter/s, 4.95237s/12 iters), loss = 0.102444
I0428 22:35:18.963850 25678 solver.cpp:237] Train net output #0: loss = 0.102444 (* 1 = 0.102444 loss)
I0428 22:35:18.963856 25678 sgd_solver.cpp:105] Iteration 8892, lr = 0.00283044
I0428 22:35:22.552227 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:35:23.954246 25678 solver.cpp:218] Iteration 8904 (2.40462 iter/s, 4.99039s/12 iters), loss = 0.0465837
I0428 22:35:23.954279 25678 solver.cpp:237] Train net output #0: loss = 0.0465838 (* 1 = 0.0465838 loss)
I0428 22:35:23.954286 25678 sgd_solver.cpp:105] Iteration 8904, lr = 0.00282448
I0428 22:35:28.882176 25678 solver.cpp:218] Iteration 8916 (2.43512 iter/s, 4.92789s/12 iters), loss = 0.0615255
I0428 22:35:28.882212 25678 solver.cpp:237] Train net output #0: loss = 0.0615255 (* 1 = 0.0615255 loss)
I0428 22:35:28.882220 25678 sgd_solver.cpp:105] Iteration 8916, lr = 0.00281852
I0428 22:35:33.857604 25678 solver.cpp:218] Iteration 8928 (2.41187 iter/s, 4.97538s/12 iters), loss = 0.0708565
I0428 22:35:33.857760 25678 solver.cpp:237] Train net output #0: loss = 0.0708566 (* 1 = 0.0708566 loss)
I0428 22:35:33.857769 25678 sgd_solver.cpp:105] Iteration 8928, lr = 0.00281257
I0428 22:35:38.806186 25678 solver.cpp:218] Iteration 8940 (2.42502 iter/s, 4.94841s/12 iters), loss = 0.0754144
I0428 22:35:38.806231 25678 solver.cpp:237] Train net output #0: loss = 0.0754145 (* 1 = 0.0754145 loss)
I0428 22:35:38.806238 25678 sgd_solver.cpp:105] Iteration 8940, lr = 0.00280663
I0428 22:35:43.770795 25678 solver.cpp:218] Iteration 8952 (2.41714 iter/s, 4.96455s/12 iters), loss = 0.0878853
I0428 22:35:43.770834 25678 solver.cpp:237] Train net output #0: loss = 0.0878854 (* 1 = 0.0878854 loss)
I0428 22:35:43.770841 25678 sgd_solver.cpp:105] Iteration 8952, lr = 0.00280069
I0428 22:35:48.740584 25678 solver.cpp:218] Iteration 8964 (2.41461 iter/s, 4.96974s/12 iters), loss = 0.134752
I0428 22:35:48.740620 25678 solver.cpp:237] Train net output #0: loss = 0.134752 (* 1 = 0.134752 loss)
I0428 22:35:48.740628 25678 sgd_solver.cpp:105] Iteration 8964, lr = 0.00279477
I0428 22:35:53.185397 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_8976.caffemodel
I0428 22:35:56.279052 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_8976.solverstate
I0428 22:35:58.674229 25678 solver.cpp:330] Iteration 8976, Testing net (#0)
I0428 22:35:58.674248 25678 net.cpp:676] Ignoring source layer train-data
I0428 22:35:59.607141 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:36:03.487399 25678 solver.cpp:397] Test net output #0: accuracy = 0.481618
I0428 22:36:03.487442 25678 solver.cpp:397] Test net output #1: loss = 3.04942 (* 1 = 3.04942 loss)
I0428 22:36:03.585137 25678 solver.cpp:218] Iteration 8976 (0.80838 iter/s, 14.8445s/12 iters), loss = 0.153771
I0428 22:36:03.585196 25678 solver.cpp:237] Train net output #0: loss = 0.153771 (* 1 = 0.153771 loss)
I0428 22:36:03.585209 25678 sgd_solver.cpp:105] Iteration 8976, lr = 0.00278885
I0428 22:36:07.693760 25678 solver.cpp:218] Iteration 8988 (2.92073 iter/s, 4.10856s/12 iters), loss = 0.0827613
I0428 22:36:07.693879 25678 solver.cpp:237] Train net output #0: loss = 0.0827614 (* 1 = 0.0827614 loss)
I0428 22:36:07.693888 25678 sgd_solver.cpp:105] Iteration 8988, lr = 0.00278294
I0428 22:36:10.586205 25678 blocking_queue.cpp:49] Waiting for data
I0428 22:36:12.653658 25678 solver.cpp:218] Iteration 9000 (2.41947 iter/s, 4.95977s/12 iters), loss = 0.106366
I0428 22:36:12.653694 25678 solver.cpp:237] Train net output #0: loss = 0.106366 (* 1 = 0.106366 loss)
I0428 22:36:12.653702 25678 sgd_solver.cpp:105] Iteration 9000, lr = 0.00277703
I0428 22:36:13.346585 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:36:17.557318 25678 solver.cpp:218] Iteration 9012 (2.44718 iter/s, 4.90361s/12 iters), loss = 0.0491384
I0428 22:36:17.557356 25678 solver.cpp:237] Train net output #0: loss = 0.0491385 (* 1 = 0.0491385 loss)
I0428 22:36:17.557364 25678 sgd_solver.cpp:105] Iteration 9012, lr = 0.00277114
I0428 22:36:22.480433 25678 solver.cpp:218] Iteration 9024 (2.43751 iter/s, 4.92306s/12 iters), loss = 0.0681543
I0428 22:36:22.480473 25678 solver.cpp:237] Train net output #0: loss = 0.0681544 (* 1 = 0.0681544 loss)
I0428 22:36:22.480480 25678 sgd_solver.cpp:105] Iteration 9024, lr = 0.00276525
I0428 22:36:27.395527 25678 solver.cpp:218] Iteration 9036 (2.44148 iter/s, 4.91505s/12 iters), loss = 0.105605
I0428 22:36:27.395565 25678 solver.cpp:237] Train net output #0: loss = 0.105605 (* 1 = 0.105605 loss)
I0428 22:36:27.395572 25678 sgd_solver.cpp:105] Iteration 9036, lr = 0.00275937
I0428 22:36:32.350585 25678 solver.cpp:218] Iteration 9048 (2.42179 iter/s, 4.95501s/12 iters), loss = 0.0708149
I0428 22:36:32.350632 25678 solver.cpp:237] Train net output #0: loss = 0.070815 (* 1 = 0.070815 loss)
I0428 22:36:32.350641 25678 sgd_solver.cpp:105] Iteration 9048, lr = 0.0027535
I0428 22:36:37.256003 25678 solver.cpp:218] Iteration 9060 (2.4463 iter/s, 4.90536s/12 iters), loss = 0.170565
I0428 22:36:37.256042 25678 solver.cpp:237] Train net output #0: loss = 0.170565 (* 1 = 0.170565 loss)
I0428 22:36:37.256049 25678 sgd_solver.cpp:105] Iteration 9060, lr = 0.00274763
I0428 22:36:42.149708 25678 solver.cpp:218] Iteration 9072 (2.45216 iter/s, 4.89365s/12 iters), loss = 0.0871038
I0428 22:36:42.149838 25678 solver.cpp:237] Train net output #0: loss = 0.0871039 (* 1 = 0.0871039 loss)
I0428 22:36:42.149847 25678 sgd_solver.cpp:105] Iteration 9072, lr = 0.00274178
I0428 22:36:44.152992 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_9078.caffemodel
I0428 22:36:47.157393 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_9078.solverstate
I0428 22:36:50.784430 25678 solver.cpp:330] Iteration 9078, Testing net (#0)
I0428 22:36:50.784447 25678 net.cpp:676] Ignoring source layer train-data
I0428 22:36:51.667786 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:36:55.613499 25678 solver.cpp:397] Test net output #0: accuracy = 0.494485
I0428 22:36:55.613546 25678 solver.cpp:397] Test net output #1: loss = 2.9931 (* 1 = 2.9931 loss)
I0428 22:36:57.421003 25678 solver.cpp:218] Iteration 9084 (0.785794 iter/s, 15.2712s/12 iters), loss = 0.0187137
I0428 22:36:57.421043 25678 solver.cpp:237] Train net output #0: loss = 0.0187138 (* 1 = 0.0187138 loss)
I0428 22:36:57.421051 25678 sgd_solver.cpp:105] Iteration 9084, lr = 0.00273593
I0428 22:37:02.379954 25678 solver.cpp:218] Iteration 9096 (2.41989 iter/s, 4.9589s/12 iters), loss = 0.0832237
I0428 22:37:02.379990 25678 solver.cpp:237] Train net output #0: loss = 0.0832238 (* 1 = 0.0832238 loss)
I0428 22:37:02.379998 25678 sgd_solver.cpp:105] Iteration 9096, lr = 0.00273009
I0428 22:37:05.247742 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:37:07.266842 25678 solver.cpp:218] Iteration 9108 (2.45558 iter/s, 4.88684s/12 iters), loss = 0.0719608
I0428 22:37:07.266882 25678 solver.cpp:237] Train net output #0: loss = 0.0719609 (* 1 = 0.0719609 loss)
I0428 22:37:07.266891 25678 sgd_solver.cpp:105] Iteration 9108, lr = 0.00272425
I0428 22:37:12.154985 25678 solver.cpp:218] Iteration 9120 (2.45495 iter/s, 4.88809s/12 iters), loss = 0.0465718
I0428 22:37:12.155053 25678 solver.cpp:237] Train net output #0: loss = 0.0465718 (* 1 = 0.0465718 loss)
I0428 22:37:12.155061 25678 sgd_solver.cpp:105] Iteration 9120, lr = 0.00271843
I0428 22:37:17.111497 25678 solver.cpp:218] Iteration 9132 (2.4211 iter/s, 4.95643s/12 iters), loss = 0.106885
I0428 22:37:17.111536 25678 solver.cpp:237] Train net output #0: loss = 0.106885 (* 1 = 0.106885 loss)
I0428 22:37:17.111543 25678 sgd_solver.cpp:105] Iteration 9132, lr = 0.00271261
I0428 22:37:22.096801 25678 solver.cpp:218] Iteration 9144 (2.4071 iter/s, 4.98525s/12 iters), loss = 0.215628
I0428 22:37:22.096838 25678 solver.cpp:237] Train net output #0: loss = 0.215629 (* 1 = 0.215629 loss)
I0428 22:37:22.096846 25678 sgd_solver.cpp:105] Iteration 9144, lr = 0.0027068
I0428 22:37:26.967406 25678 solver.cpp:218] Iteration 9156 (2.46379 iter/s, 4.87055s/12 iters), loss = 0.0341205
I0428 22:37:26.967447 25678 solver.cpp:237] Train net output #0: loss = 0.0341206 (* 1 = 0.0341206 loss)
I0428 22:37:26.967454 25678 sgd_solver.cpp:105] Iteration 9156, lr = 0.002701
I0428 22:37:31.916172 25678 solver.cpp:218] Iteration 9168 (2.42487 iter/s, 4.94872s/12 iters), loss = 0.0664764
I0428 22:37:31.916208 25678 solver.cpp:237] Train net output #0: loss = 0.0664765 (* 1 = 0.0664765 loss)
I0428 22:37:31.916215 25678 sgd_solver.cpp:105] Iteration 9168, lr = 0.0026952
I0428 22:37:36.375211 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_9180.caffemodel
I0428 22:37:39.443045 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_9180.solverstate
I0428 22:37:41.804177 25678 solver.cpp:330] Iteration 9180, Testing net (#0)
I0428 22:37:41.804194 25678 net.cpp:676] Ignoring source layer train-data
I0428 22:37:42.645102 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:37:46.613001 25678 solver.cpp:397] Test net output #0: accuracy = 0.479167
I0428 22:37:46.613039 25678 solver.cpp:397] Test net output #1: loss = 2.99502 (* 1 = 2.99502 loss)
I0428 22:37:46.708612 25678 solver.cpp:218] Iteration 9180 (0.811228 iter/s, 14.7924s/12 iters), loss = 0.105158
I0428 22:37:46.708653 25678 solver.cpp:237] Train net output #0: loss = 0.105158 (* 1 = 0.105158 loss)
I0428 22:37:46.708662 25678 sgd_solver.cpp:105] Iteration 9180, lr = 0.00268941
I0428 22:37:50.831233 25678 solver.cpp:218] Iteration 9192 (2.91081 iter/s, 4.12257s/12 iters), loss = 0.171259
I0428 22:37:50.831267 25678 solver.cpp:237] Train net output #0: loss = 0.171259 (* 1 = 0.171259 loss)
I0428 22:37:50.831275 25678 sgd_solver.cpp:105] Iteration 9192, lr = 0.00268364
I0428 22:37:55.900168 25678 solver.cpp:218] Iteration 9204 (2.36738 iter/s, 5.06889s/12 iters), loss = 0.0831026
I0428 22:37:55.900204 25678 solver.cpp:237] Train net output #0: loss = 0.0831026 (* 1 = 0.0831026 loss)
I0428 22:37:55.900211 25678 sgd_solver.cpp:105] Iteration 9204, lr = 0.00267786
I0428 22:37:55.966257 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:38:00.853188 25678 solver.cpp:218] Iteration 9216 (2.42279 iter/s, 4.95297s/12 iters), loss = 0.014649
I0428 22:38:00.853229 25678 solver.cpp:237] Train net output #0: loss = 0.014649 (* 1 = 0.014649 loss)
I0428 22:38:00.853236 25678 sgd_solver.cpp:105] Iteration 9216, lr = 0.0026721
I0428 22:38:05.770005 25678 solver.cpp:218] Iteration 9228 (2.44063 iter/s, 4.91677s/12 iters), loss = 0.0340129
I0428 22:38:05.770042 25678 solver.cpp:237] Train net output #0: loss = 0.0340129 (* 1 = 0.0340129 loss)
I0428 22:38:05.770049 25678 sgd_solver.cpp:105] Iteration 9228, lr = 0.00266635
I0428 22:38:10.708591 25678 solver.cpp:218] Iteration 9240 (2.42987 iter/s, 4.93854s/12 iters), loss = 0.028908
I0428 22:38:10.708627 25678 solver.cpp:237] Train net output #0: loss = 0.028908 (* 1 = 0.028908 loss)
I0428 22:38:10.708634 25678 sgd_solver.cpp:105] Iteration 9240, lr = 0.0026606
I0428 22:38:15.655854 25678 solver.cpp:218] Iteration 9252 (2.42561 iter/s, 4.94721s/12 iters), loss = 0.0239421
I0428 22:38:15.655923 25678 solver.cpp:237] Train net output #0: loss = 0.0239421 (* 1 = 0.0239421 loss)
I0428 22:38:15.655932 25678 sgd_solver.cpp:105] Iteration 9252, lr = 0.00265486
I0428 22:38:20.595196 25678 solver.cpp:218] Iteration 9264 (2.42951 iter/s, 4.93926s/12 iters), loss = 0.0731511
I0428 22:38:20.595234 25678 solver.cpp:237] Train net output #0: loss = 0.0731511 (* 1 = 0.0731511 loss)
I0428 22:38:20.595242 25678 sgd_solver.cpp:105] Iteration 9264, lr = 0.00264913
I0428 22:38:25.404414 25678 solver.cpp:218] Iteration 9276 (2.49523 iter/s, 4.80917s/12 iters), loss = 0.133873
I0428 22:38:25.404451 25678 solver.cpp:237] Train net output #0: loss = 0.133873 (* 1 = 0.133873 loss)
I0428 22:38:25.404459 25678 sgd_solver.cpp:105] Iteration 9276, lr = 0.0026434
I0428 22:38:27.369439 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_9282.caffemodel
I0428 22:38:30.426613 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_9282.solverstate
I0428 22:38:32.784401 25678 solver.cpp:330] Iteration 9282, Testing net (#0)
I0428 22:38:32.784423 25678 net.cpp:676] Ignoring source layer train-data
I0428 22:38:33.566519 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:38:37.411883 25678 solver.cpp:397] Test net output #0: accuracy = 0.487745
I0428 22:38:37.411929 25678 solver.cpp:397] Test net output #1: loss = 2.9748 (* 1 = 2.9748 loss)
I0428 22:38:39.215179 25678 solver.cpp:218] Iteration 9288 (0.86889 iter/s, 13.8107s/12 iters), loss = 0.025159
I0428 22:38:39.215219 25678 solver.cpp:237] Train net output #0: loss = 0.025159 (* 1 = 0.025159 loss)
I0428 22:38:39.215227 25678 sgd_solver.cpp:105] Iteration 9288, lr = 0.00263769
I0428 22:38:44.146903 25678 solver.cpp:218] Iteration 9300 (2.43325 iter/s, 4.93168s/12 iters), loss = 0.0933576
I0428 22:38:44.146939 25678 solver.cpp:237] Train net output #0: loss = 0.0933576 (* 1 = 0.0933576 loss)
I0428 22:38:44.146946 25678 sgd_solver.cpp:105] Iteration 9300, lr = 0.00263198
I0428 22:38:46.288247 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:38:49.025000 25678 solver.cpp:218] Iteration 9312 (2.46 iter/s, 4.87805s/12 iters), loss = 0.121539
I0428 22:38:49.025048 25678 solver.cpp:237] Train net output #0: loss = 0.121539 (* 1 = 0.121539 loss)
I0428 22:38:49.025059 25678 sgd_solver.cpp:105] Iteration 9312, lr = 0.00262628
I0428 22:38:53.921541 25678 solver.cpp:218] Iteration 9324 (2.45074 iter/s, 4.89649s/12 iters), loss = 0.042468
I0428 22:38:53.921577 25678 solver.cpp:237] Train net output #0: loss = 0.042468 (* 1 = 0.042468 loss)
I0428 22:38:53.921586 25678 sgd_solver.cpp:105] Iteration 9324, lr = 0.00262059
I0428 22:38:58.877923 25678 solver.cpp:218] Iteration 9336 (2.42114 iter/s, 4.95633s/12 iters), loss = 0.0323244
I0428 22:38:58.877969 25678 solver.cpp:237] Train net output #0: loss = 0.0323244 (* 1 = 0.0323244 loss)
I0428 22:38:58.877975 25678 sgd_solver.cpp:105] Iteration 9336, lr = 0.00261491
I0428 22:39:03.748149 25678 solver.cpp:218] Iteration 9348 (2.46398 iter/s, 4.87018s/12 iters), loss = 0.107891
I0428 22:39:03.748188 25678 solver.cpp:237] Train net output #0: loss = 0.107891 (* 1 = 0.107891 loss)
I0428 22:39:03.748196 25678 sgd_solver.cpp:105] Iteration 9348, lr = 0.00260923
I0428 22:39:08.685235 25678 solver.cpp:218] Iteration 9360 (2.43061 iter/s, 4.93704s/12 iters), loss = 0.0297188
I0428 22:39:08.685276 25678 solver.cpp:237] Train net output #0: loss = 0.0297188 (* 1 = 0.0297188 loss)
I0428 22:39:08.685282 25678 sgd_solver.cpp:105] Iteration 9360, lr = 0.00260356
I0428 22:39:13.541321 25678 solver.cpp:218] Iteration 9372 (2.47115 iter/s, 4.85604s/12 iters), loss = 0.0467321
I0428 22:39:13.541358 25678 solver.cpp:237] Train net output #0: loss = 0.0467321 (* 1 = 0.0467321 loss)
I0428 22:39:13.541365 25678 sgd_solver.cpp:105] Iteration 9372, lr = 0.0025979
I0428 22:39:17.982543 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_9384.caffemodel
I0428 22:39:21.094018 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_9384.solverstate
I0428 22:39:24.969488 25678 solver.cpp:330] Iteration 9384, Testing net (#0)
I0428 22:39:24.969512 25678 net.cpp:676] Ignoring source layer train-data
I0428 22:39:25.711647 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:39:29.730506 25678 solver.cpp:397] Test net output #0: accuracy = 0.484069
I0428 22:39:29.730545 25678 solver.cpp:397] Test net output #1: loss = 2.94102 (* 1 = 2.94102 loss)
I0428 22:39:29.827019 25678 solver.cpp:218] Iteration 9384 (0.736845 iter/s, 16.2857s/12 iters), loss = 0.0517563
I0428 22:39:29.827061 25678 solver.cpp:237] Train net output #0: loss = 0.0517563 (* 1 = 0.0517563 loss)
I0428 22:39:29.827069 25678 sgd_solver.cpp:105] Iteration 9384, lr = 0.00259225
I0428 22:39:33.969964 25678 solver.cpp:218] Iteration 9396 (2.89653 iter/s, 4.14289s/12 iters), loss = 0.0385373
I0428 22:39:33.969996 25678 solver.cpp:237] Train net output #0: loss = 0.0385373 (* 1 = 0.0385373 loss)
I0428 22:39:33.970005 25678 sgd_solver.cpp:105] Iteration 9396, lr = 0.00258661
I0428 22:39:38.270546 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:39:38.921058 25678 solver.cpp:218] Iteration 9408 (2.42373 iter/s, 4.95105s/12 iters), loss = 0.0671653
I0428 22:39:38.921108 25678 solver.cpp:237] Train net output #0: loss = 0.0671653 (* 1 = 0.0671653 loss)
I0428 22:39:38.921123 25678 sgd_solver.cpp:105] Iteration 9408, lr = 0.00258097
I0428 22:39:43.882916 25678 solver.cpp:218] Iteration 9420 (2.41848 iter/s, 4.9618s/12 iters), loss = 0.0729266
I0428 22:39:43.882952 25678 solver.cpp:237] Train net output #0: loss = 0.0729266 (* 1 = 0.0729266 loss)
I0428 22:39:43.882961 25678 sgd_solver.cpp:105] Iteration 9420, lr = 0.00257534
I0428 22:39:48.803680 25678 solver.cpp:218] Iteration 9432 (2.43867 iter/s, 4.92071s/12 iters), loss = 0.0330957
I0428 22:39:48.803826 25678 solver.cpp:237] Train net output #0: loss = 0.0330957 (* 1 = 0.0330957 loss)
I0428 22:39:48.803835 25678 sgd_solver.cpp:105] Iteration 9432, lr = 0.00256972
I0428 22:39:53.752087 25678 solver.cpp:218] Iteration 9444 (2.4251 iter/s, 4.94825s/12 iters), loss = 0.030188
I0428 22:39:53.752140 25678 solver.cpp:237] Train net output #0: loss = 0.030188 (* 1 = 0.030188 loss)
I0428 22:39:53.752151 25678 sgd_solver.cpp:105] Iteration 9444, lr = 0.00256411
I0428 22:39:58.660953 25678 solver.cpp:218] Iteration 9456 (2.44459 iter/s, 4.9088s/12 iters), loss = 0.0605644
I0428 22:39:58.660991 25678 solver.cpp:237] Train net output #0: loss = 0.0605644 (* 1 = 0.0605644 loss)
I0428 22:39:58.661000 25678 sgd_solver.cpp:105] Iteration 9456, lr = 0.00255851
I0428 22:40:03.625423 25678 solver.cpp:218] Iteration 9468 (2.4172 iter/s, 4.96442s/12 iters), loss = 0.0362446
I0428 22:40:03.625473 25678 solver.cpp:237] Train net output #0: loss = 0.0362446 (* 1 = 0.0362446 loss)
I0428 22:40:03.625486 25678 sgd_solver.cpp:105] Iteration 9468, lr = 0.00255291
I0428 22:40:08.543308 25678 solver.cpp:218] Iteration 9480 (2.4401 iter/s, 4.91783s/12 iters), loss = 0.0747958
I0428 22:40:08.543345 25678 solver.cpp:237] Train net output #0: loss = 0.0747958 (* 1 = 0.0747958 loss)
I0428 22:40:08.543354 25678 sgd_solver.cpp:105] Iteration 9480, lr = 0.00254732
I0428 22:40:10.562480 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_9486.caffemodel
I0428 22:40:13.667505 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_9486.solverstate
I0428 22:40:16.045444 25678 solver.cpp:330] Iteration 9486, Testing net (#0)
I0428 22:40:16.045461 25678 net.cpp:676] Ignoring source layer train-data
I0428 22:40:16.746686 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:40:20.797196 25678 solver.cpp:397] Test net output #0: accuracy = 0.481005
I0428 22:40:20.797295 25678 solver.cpp:397] Test net output #1: loss = 2.98855 (* 1 = 2.98855 loss)
I0428 22:40:22.495545 25678 solver.cpp:218] Iteration 9492 (0.860079 iter/s, 13.9522s/12 iters), loss = 0.0845575
I0428 22:40:22.495582 25678 solver.cpp:237] Train net output #0: loss = 0.0845575 (* 1 = 0.0845575 loss)
I0428 22:40:22.495589 25678 sgd_solver.cpp:105] Iteration 9492, lr = 0.00254174
I0428 22:40:27.438450 25678 solver.cpp:218] Iteration 9504 (2.42774 iter/s, 4.94286s/12 iters), loss = 0.0562251
I0428 22:40:27.438488 25678 solver.cpp:237] Train net output #0: loss = 0.0562251 (* 1 = 0.0562251 loss)
I0428 22:40:27.438495 25678 sgd_solver.cpp:105] Iteration 9504, lr = 0.00253617
I0428 22:40:28.884860 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:40:32.389825 25678 solver.cpp:218] Iteration 9516 (2.42359 iter/s, 4.95133s/12 iters), loss = 0.0725511
I0428 22:40:32.389865 25678 solver.cpp:237] Train net output #0: loss = 0.0725512 (* 1 = 0.0725512 loss)
I0428 22:40:32.389873 25678 sgd_solver.cpp:105] Iteration 9516, lr = 0.00253061
I0428 22:40:37.301141 25678 solver.cpp:218] Iteration 9528 (2.44336 iter/s, 4.91127s/12 iters), loss = 0.0353053
I0428 22:40:37.301177 25678 solver.cpp:237] Train net output #0: loss = 0.0353053 (* 1 = 0.0353053 loss)
I0428 22:40:37.301183 25678 sgd_solver.cpp:105] Iteration 9528, lr = 0.00252505
I0428 22:40:42.251418 25678 solver.cpp:218] Iteration 9540 (2.42413 iter/s, 4.95023s/12 iters), loss = 0.0477816
I0428 22:40:42.251461 25678 solver.cpp:237] Train net output #0: loss = 0.0477816 (* 1 = 0.0477816 loss)
I0428 22:40:42.251469 25678 sgd_solver.cpp:105] Iteration 9540, lr = 0.00251951
I0428 22:40:47.131004 25678 solver.cpp:218] Iteration 9552 (2.45925 iter/s, 4.87953s/12 iters), loss = 0.034683
I0428 22:40:47.131042 25678 solver.cpp:237] Train net output #0: loss = 0.034683 (* 1 = 0.034683 loss)
I0428 22:40:47.131050 25678 sgd_solver.cpp:105] Iteration 9552, lr = 0.00251397
I0428 22:40:52.081696 25678 solver.cpp:218] Iteration 9564 (2.42393 iter/s, 4.95064s/12 iters), loss = 0.00754526
I0428 22:40:52.081827 25678 solver.cpp:237] Train net output #0: loss = 0.00754527 (* 1 = 0.00754527 loss)
I0428 22:40:52.081837 25678 sgd_solver.cpp:105] Iteration 9564, lr = 0.00250844
I0428 22:40:56.987051 25678 solver.cpp:218] Iteration 9576 (2.44637 iter/s, 4.90522s/12 iters), loss = 0.029237
I0428 22:40:56.987088 25678 solver.cpp:237] Train net output #0: loss = 0.029237 (* 1 = 0.029237 loss)
I0428 22:40:56.987097 25678 sgd_solver.cpp:105] Iteration 9576, lr = 0.00250291
I0428 22:41:01.484937 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_9588.caffemodel
I0428 22:41:04.943887 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_9588.solverstate
I0428 22:41:07.305012 25678 solver.cpp:330] Iteration 9588, Testing net (#0)
I0428 22:41:07.305030 25678 net.cpp:676] Ignoring source layer train-data
I0428 22:41:07.959936 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:41:12.009263 25678 solver.cpp:397] Test net output #0: accuracy = 0.495711
I0428 22:41:12.009311 25678 solver.cpp:397] Test net output #1: loss = 2.8896 (* 1 = 2.8896 loss)
I0428 22:41:12.106904 25678 solver.cpp:218] Iteration 9588 (0.793661 iter/s, 15.1198s/12 iters), loss = 0.0732143
I0428 22:41:12.106947 25678 solver.cpp:237] Train net output #0: loss = 0.0732143 (* 1 = 0.0732143 loss)
I0428 22:41:12.106956 25678 sgd_solver.cpp:105] Iteration 9588, lr = 0.0024974
I0428 22:41:16.242578 25678 solver.cpp:218] Iteration 9600 (2.90162 iter/s, 4.13562s/12 iters), loss = 0.0397955
I0428 22:41:16.242616 25678 solver.cpp:237] Train net output #0: loss = 0.0397955 (* 1 = 0.0397955 loss)
I0428 22:41:16.242624 25678 sgd_solver.cpp:105] Iteration 9600, lr = 0.00249189
I0428 22:41:19.830365 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:41:21.237490 25678 solver.cpp:218] Iteration 9612 (2.40247 iter/s, 4.99486s/12 iters), loss = 0.0513531
I0428 22:41:21.237527 25678 solver.cpp:237] Train net output #0: loss = 0.0513531 (* 1 = 0.0513531 loss)
I0428 22:41:21.237535 25678 sgd_solver.cpp:105] Iteration 9612, lr = 0.00248639
I0428 22:41:26.193217 25678 solver.cpp:218] Iteration 9624 (2.42146 iter/s, 4.95568s/12 iters), loss = 0.0798512
I0428 22:41:26.193326 25678 solver.cpp:237] Train net output #0: loss = 0.0798512 (* 1 = 0.0798512 loss)
I0428 22:41:26.193336 25678 sgd_solver.cpp:105] Iteration 9624, lr = 0.0024809
I0428 22:41:31.201532 25678 solver.cpp:218] Iteration 9636 (2.39607 iter/s, 5.0082s/12 iters), loss = 0.013433
I0428 22:41:31.201570 25678 solver.cpp:237] Train net output #0: loss = 0.013433 (* 1 = 0.013433 loss)
I0428 22:41:31.201578 25678 sgd_solver.cpp:105] Iteration 9636, lr = 0.00247542
I0428 22:41:36.227324 25678 solver.cpp:218] Iteration 9648 (2.38771 iter/s, 5.02575s/12 iters), loss = 0.0300955
I0428 22:41:36.227362 25678 solver.cpp:237] Train net output #0: loss = 0.0300955 (* 1 = 0.0300955 loss)
I0428 22:41:36.227370 25678 sgd_solver.cpp:105] Iteration 9648, lr = 0.00246995
I0428 22:41:41.240839 25678 solver.cpp:218] Iteration 9660 (2.39355 iter/s, 5.01346s/12 iters), loss = 0.0406512
I0428 22:41:41.240878 25678 solver.cpp:237] Train net output #0: loss = 0.0406512 (* 1 = 0.0406512 loss)
I0428 22:41:41.240886 25678 sgd_solver.cpp:105] Iteration 9660, lr = 0.00246448
I0428 22:41:46.212538 25678 solver.cpp:218] Iteration 9672 (2.41369 iter/s, 4.97165s/12 iters), loss = 0.0399672
I0428 22:41:46.212575 25678 solver.cpp:237] Train net output #0: loss = 0.0399672 (* 1 = 0.0399672 loss)
I0428 22:41:46.212584 25678 sgd_solver.cpp:105] Iteration 9672, lr = 0.00245902
I0428 22:41:51.127003 25678 solver.cpp:218] Iteration 9684 (2.44179 iter/s, 4.91442s/12 iters), loss = 0.0853332
I0428 22:41:51.127039 25678 solver.cpp:237] Train net output #0: loss = 0.0853332 (* 1 = 0.0853332 loss)
I0428 22:41:51.127048 25678 sgd_solver.cpp:105] Iteration 9684, lr = 0.00245357
I0428 22:41:53.117014 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_9690.caffemodel
I0428 22:41:56.201161 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_9690.solverstate
I0428 22:42:00.089157 25678 solver.cpp:330] Iteration 9690, Testing net (#0)
I0428 22:42:00.089180 25678 net.cpp:676] Ignoring source layer train-data
I0428 22:42:00.708981 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:42:03.715987 25678 blocking_queue.cpp:49] Waiting for data
I0428 22:42:04.819211 25678 solver.cpp:397] Test net output #0: accuracy = 0.495711
I0428 22:42:04.819257 25678 solver.cpp:397] Test net output #1: loss = 2.92342 (* 1 = 2.92342 loss)
I0428 22:42:06.624215 25678 solver.cpp:218] Iteration 9696 (0.774335 iter/s, 15.4972s/12 iters), loss = 0.0692718
I0428 22:42:06.624253 25678 solver.cpp:237] Train net output #0: loss = 0.0692718 (* 1 = 0.0692718 loss)
I0428 22:42:06.624261 25678 sgd_solver.cpp:105] Iteration 9696, lr = 0.00244813
I0428 22:42:11.577497 25678 solver.cpp:218] Iteration 9708 (2.42266 iter/s, 4.95323s/12 iters), loss = 0.0307285
I0428 22:42:11.577533 25678 solver.cpp:237] Train net output #0: loss = 0.0307285 (* 1 = 0.0307285 loss)
I0428 22:42:11.577540 25678 sgd_solver.cpp:105] Iteration 9708, lr = 0.0024427
I0428 22:42:12.301887 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:42:16.552850 25678 solver.cpp:218] Iteration 9720 (2.41191 iter/s, 4.97531s/12 iters), loss = 0.0255599
I0428 22:42:16.552886 25678 solver.cpp:237] Train net output #0: loss = 0.0255599 (* 1 = 0.0255599 loss)
I0428 22:42:16.552896 25678 sgd_solver.cpp:105] Iteration 9720, lr = 0.00243727
I0428 22:42:21.463868 25678 solver.cpp:218] Iteration 9732 (2.44351 iter/s, 4.91097s/12 iters), loss = 0.0602362
I0428 22:42:21.463905 25678 solver.cpp:237] Train net output #0: loss = 0.0602362 (* 1 = 0.0602362 loss)
I0428 22:42:21.463913 25678 sgd_solver.cpp:105] Iteration 9732, lr = 0.00243185
I0428 22:42:26.416765 25678 solver.cpp:218] Iteration 9744 (2.42285 iter/s, 4.95285s/12 iters), loss = 0.0455383
I0428 22:42:26.416847 25678 solver.cpp:237] Train net output #0: loss = 0.0455383 (* 1 = 0.0455383 loss)
I0428 22:42:26.416862 25678 sgd_solver.cpp:105] Iteration 9744, lr = 0.00242645
I0428 22:42:31.340716 25678 solver.cpp:218] Iteration 9756 (2.43711 iter/s, 4.92386s/12 iters), loss = 0.135541
I0428 22:42:31.340754 25678 solver.cpp:237] Train net output #0: loss = 0.135541 (* 1 = 0.135541 loss)
I0428 22:42:31.340761 25678 sgd_solver.cpp:105] Iteration 9756, lr = 0.00242104
I0428 22:42:36.283372 25678 solver.cpp:218] Iteration 9768 (2.42787 iter/s, 4.94261s/12 iters), loss = 0.0142115
I0428 22:42:36.283412 25678 solver.cpp:237] Train net output #0: loss = 0.0142115 (* 1 = 0.0142115 loss)
I0428 22:42:36.283421 25678 sgd_solver.cpp:105] Iteration 9768, lr = 0.00241565
I0428 22:42:41.188351 25678 solver.cpp:218] Iteration 9780 (2.44652 iter/s, 4.90493s/12 iters), loss = 0.0735833
I0428 22:42:41.188388 25678 solver.cpp:237] Train net output #0: loss = 0.0735833 (* 1 = 0.0735833 loss)
I0428 22:42:41.188395 25678 sgd_solver.cpp:105] Iteration 9780, lr = 0.00241027
I0428 22:42:45.718405 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_9792.caffemodel
I0428 22:42:48.847275 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_9792.solverstate
I0428 22:42:51.207386 25678 solver.cpp:330] Iteration 9792, Testing net (#0)
I0428 22:42:51.207403 25678 net.cpp:676] Ignoring source layer train-data
I0428 22:42:51.775357 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:42:56.029074 25678 solver.cpp:397] Test net output #0: accuracy = 0.499387
I0428 22:42:56.029121 25678 solver.cpp:397] Test net output #1: loss = 2.918 (* 1 = 2.918 loss)
I0428 22:42:56.126792 25678 solver.cpp:218] Iteration 9792 (0.803299 iter/s, 14.9384s/12 iters), loss = 0.0548192
I0428 22:42:56.126837 25678 solver.cpp:237] Train net output #0: loss = 0.0548192 (* 1 = 0.0548192 loss)
I0428 22:42:56.126843 25678 sgd_solver.cpp:105] Iteration 9792, lr = 0.00240489
I0428 22:43:00.196784 25678 solver.cpp:218] Iteration 9804 (2.94845 iter/s, 4.06994s/12 iters), loss = 0.010795
I0428 22:43:00.196962 25678 solver.cpp:237] Train net output #0: loss = 0.010795 (* 1 = 0.010795 loss)
I0428 22:43:00.196972 25678 sgd_solver.cpp:105] Iteration 9804, lr = 0.00239952
I0428 22:43:03.070339 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:43:05.054646 25678 solver.cpp:218] Iteration 9816 (2.47032 iter/s, 4.85768s/12 iters), loss = 0.0370664
I0428 22:43:05.054680 25678 solver.cpp:237] Train net output #0: loss = 0.0370664 (* 1 = 0.0370664 loss)
I0428 22:43:05.054688 25678 sgd_solver.cpp:105] Iteration 9816, lr = 0.00239416
I0428 22:43:09.866394 25678 solver.cpp:218] Iteration 9828 (2.49392 iter/s, 4.8117s/12 iters), loss = 0.0311552
I0428 22:43:09.866430 25678 solver.cpp:237] Train net output #0: loss = 0.0311552 (* 1 = 0.0311552 loss)
I0428 22:43:09.866437 25678 sgd_solver.cpp:105] Iteration 9828, lr = 0.00238881
I0428 22:43:14.688549 25678 solver.cpp:218] Iteration 9840 (2.48854 iter/s, 4.82211s/12 iters), loss = 0.0571882
I0428 22:43:14.688585 25678 solver.cpp:237] Train net output #0: loss = 0.0571882 (* 1 = 0.0571882 loss)
I0428 22:43:14.688593 25678 sgd_solver.cpp:105] Iteration 9840, lr = 0.00238347
I0428 22:43:19.502144 25678 solver.cpp:218] Iteration 9852 (2.49296 iter/s, 4.81355s/12 iters), loss = 0.00959912
I0428 22:43:19.502183 25678 solver.cpp:237] Train net output #0: loss = 0.00959913 (* 1 = 0.00959913 loss)
I0428 22:43:19.502192 25678 sgd_solver.cpp:105] Iteration 9852, lr = 0.00237813
I0428 22:43:24.316923 25678 solver.cpp:218] Iteration 9864 (2.49235 iter/s, 4.81473s/12 iters), loss = 0.153545
I0428 22:43:24.316959 25678 solver.cpp:237] Train net output #0: loss = 0.153545 (* 1 = 0.153545 loss)
I0428 22:43:24.316967 25678 sgd_solver.cpp:105] Iteration 9864, lr = 0.00237281
I0428 22:43:29.133965 25678 solver.cpp:218] Iteration 9876 (2.49118 iter/s, 4.817s/12 iters), loss = 0.041185
I0428 22:43:29.134001 25678 solver.cpp:237] Train net output #0: loss = 0.041185 (* 1 = 0.041185 loss)
I0428 22:43:29.134008 25678 sgd_solver.cpp:105] Iteration 9876, lr = 0.00236749
I0428 22:43:33.972013 25678 solver.cpp:218] Iteration 9888 (2.48037 iter/s, 4.83799s/12 iters), loss = 0.161469
I0428 22:43:33.972127 25678 solver.cpp:237] Train net output #0: loss = 0.161469 (* 1 = 0.161469 loss)
I0428 22:43:33.972136 25678 sgd_solver.cpp:105] Iteration 9888, lr = 0.00236218
I0428 22:43:35.942137 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_9894.caffemodel
I0428 22:43:40.398447 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_9894.solverstate
I0428 22:43:43.136787 25678 solver.cpp:330] Iteration 9894, Testing net (#0)
I0428 22:43:43.136806 25678 net.cpp:676] Ignoring source layer train-data
I0428 22:43:43.677402 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:43:47.950650 25678 solver.cpp:397] Test net output #0: accuracy = 0.5
I0428 22:43:47.950696 25678 solver.cpp:397] Test net output #1: loss = 2.86054 (* 1 = 2.86054 loss)
I0428 22:43:49.739816 25678 solver.cpp:218] Iteration 9900 (0.76105 iter/s, 15.7677s/12 iters), loss = 0.0990341
I0428 22:43:49.739853 25678 solver.cpp:237] Train net output #0: loss = 0.0990341 (* 1 = 0.0990341 loss)
I0428 22:43:49.739861 25678 sgd_solver.cpp:105] Iteration 9900, lr = 0.00235687
I0428 22:43:54.550225 25678 solver.cpp:218] Iteration 9912 (2.49461 iter/s, 4.81036s/12 iters), loss = 0.0161887
I0428 22:43:54.550258 25678 solver.cpp:237] Train net output #0: loss = 0.0161887 (* 1 = 0.0161887 loss)
I0428 22:43:54.550266 25678 sgd_solver.cpp:105] Iteration 9912, lr = 0.00235158
I0428 22:43:54.646414 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:43:59.380578 25678 solver.cpp:218] Iteration 9924 (2.48431 iter/s, 4.83031s/12 iters), loss = 0.0565167
I0428 22:43:59.380614 25678 solver.cpp:237] Train net output #0: loss = 0.0565167 (* 1 = 0.0565167 loss)
I0428 22:43:59.380622 25678 sgd_solver.cpp:105] Iteration 9924, lr = 0.00234629
I0428 22:44:04.189445 25678 solver.cpp:218] Iteration 9936 (2.49542 iter/s, 4.80882s/12 iters), loss = 0.0318879
I0428 22:44:04.189604 25678 solver.cpp:237] Train net output #0: loss = 0.0318879 (* 1 = 0.0318879 loss)
I0428 22:44:04.189612 25678 sgd_solver.cpp:105] Iteration 9936, lr = 0.00234102
I0428 22:44:09.003131 25678 solver.cpp:218] Iteration 9948 (2.49298 iter/s, 4.81352s/12 iters), loss = 0.045278
I0428 22:44:09.003166 25678 solver.cpp:237] Train net output #0: loss = 0.045278 (* 1 = 0.045278 loss)
I0428 22:44:09.003175 25678 sgd_solver.cpp:105] Iteration 9948, lr = 0.00233575
I0428 22:44:13.843305 25678 solver.cpp:218] Iteration 9960 (2.47927 iter/s, 4.84013s/12 iters), loss = 0.0617346
I0428 22:44:13.843343 25678 solver.cpp:237] Train net output #0: loss = 0.0617346 (* 1 = 0.0617346 loss)
I0428 22:44:13.843351 25678 sgd_solver.cpp:105] Iteration 9960, lr = 0.00233049
I0428 22:44:18.658396 25678 solver.cpp:218] Iteration 9972 (2.49219 iter/s, 4.81504s/12 iters), loss = 0.134715
I0428 22:44:18.658429 25678 solver.cpp:237] Train net output #0: loss = 0.134715 (* 1 = 0.134715 loss)
I0428 22:44:18.658437 25678 sgd_solver.cpp:105] Iteration 9972, lr = 0.00232523
I0428 22:44:23.461871 25678 solver.cpp:218] Iteration 9984 (2.49821 iter/s, 4.80344s/12 iters), loss = 0.0142197
I0428 22:44:23.461907 25678 solver.cpp:237] Train net output #0: loss = 0.0142197 (* 1 = 0.0142197 loss)
I0428 22:44:23.461915 25678 sgd_solver.cpp:105] Iteration 9984, lr = 0.00231999
I0428 22:44:27.829972 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_9996.caffemodel
I0428 22:44:30.928009 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_9996.solverstate
I0428 22:44:33.663465 25678 solver.cpp:330] Iteration 9996, Testing net (#0)
I0428 22:44:33.663482 25678 net.cpp:676] Ignoring source layer train-data
I0428 22:44:34.142524 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:44:38.155512 25678 solver.cpp:397] Test net output #0: accuracy = 0.493873
I0428 22:44:38.155639 25678 solver.cpp:397] Test net output #1: loss = 3.01777 (* 1 = 3.01777 loss)
I0428 22:44:38.251736 25678 solver.cpp:218] Iteration 9996 (0.811368 iter/s, 14.7898s/12 iters), loss = 0.0304294
I0428 22:44:38.251773 25678 solver.cpp:237] Train net output #0: loss = 0.0304294 (* 1 = 0.0304294 loss)
I0428 22:44:38.251781 25678 sgd_solver.cpp:105] Iteration 9996, lr = 0.00231475
I0428 22:44:42.400483 25678 solver.cpp:218] Iteration 10008 (2.89247 iter/s, 4.1487s/12 iters), loss = 0.0701308
I0428 22:44:42.400517 25678 solver.cpp:237] Train net output #0: loss = 0.0701308 (* 1 = 0.0701308 loss)
I0428 22:44:42.400524 25678 sgd_solver.cpp:105] Iteration 10008, lr = 0.00230952
I0428 22:44:44.568116 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:44:47.250315 25678 solver.cpp:218] Iteration 10020 (2.47433 iter/s, 4.84979s/12 iters), loss = 0.0530806
I0428 22:44:47.250351 25678 solver.cpp:237] Train net output #0: loss = 0.0530806 (* 1 = 0.0530806 loss)
I0428 22:44:47.250358 25678 sgd_solver.cpp:105] Iteration 10020, lr = 0.0023043
I0428 22:44:52.067620 25678 solver.cpp:218] Iteration 10032 (2.49104 iter/s, 4.81726s/12 iters), loss = 0.0317089
I0428 22:44:52.067656 25678 solver.cpp:237] Train net output #0: loss = 0.0317089 (* 1 = 0.0317089 loss)
I0428 22:44:52.067663 25678 sgd_solver.cpp:105] Iteration 10032, lr = 0.00229909
I0428 22:44:56.888659 25678 solver.cpp:218] Iteration 10044 (2.48911 iter/s, 4.82099s/12 iters), loss = 0.0167158
I0428 22:44:56.888693 25678 solver.cpp:237] Train net output #0: loss = 0.0167158 (* 1 = 0.0167158 loss)
I0428 22:44:56.888700 25678 sgd_solver.cpp:105] Iteration 10044, lr = 0.00229389
I0428 22:45:01.730283 25678 solver.cpp:218] Iteration 10056 (2.47853 iter/s, 4.84158s/12 iters), loss = 0.0519013
I0428 22:45:01.730319 25678 solver.cpp:237] Train net output #0: loss = 0.0519013 (* 1 = 0.0519013 loss)
I0428 22:45:01.730327 25678 sgd_solver.cpp:105] Iteration 10056, lr = 0.00228869
I0428 22:45:06.544340 25678 solver.cpp:218] Iteration 10068 (2.49272 iter/s, 4.81401s/12 iters), loss = 0.0165082
I0428 22:45:06.544375 25678 solver.cpp:237] Train net output #0: loss = 0.0165082 (* 1 = 0.0165082 loss)
I0428 22:45:06.544384 25678 sgd_solver.cpp:105] Iteration 10068, lr = 0.00228351
I0428 22:45:11.367049 25678 solver.cpp:218] Iteration 10080 (2.48825 iter/s, 4.82267s/12 iters), loss = 0.093604
I0428 22:45:11.367194 25678 solver.cpp:237] Train net output #0: loss = 0.093604 (* 1 = 0.093604 loss)
I0428 22:45:11.367204 25678 sgd_solver.cpp:105] Iteration 10080, lr = 0.00227833
I0428 22:45:16.187214 25678 solver.cpp:218] Iteration 10092 (2.48962 iter/s, 4.82001s/12 iters), loss = 0.129422
I0428 22:45:16.187250 25678 solver.cpp:237] Train net output #0: loss = 0.129422 (* 1 = 0.129422 loss)
I0428 22:45:16.187258 25678 sgd_solver.cpp:105] Iteration 10092, lr = 0.00227316
I0428 22:45:18.136682 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_10098.caffemodel
I0428 22:45:21.230093 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_10098.solverstate
I0428 22:45:23.586699 25678 solver.cpp:330] Iteration 10098, Testing net (#0)
I0428 22:45:23.586719 25678 net.cpp:676] Ignoring source layer train-data
I0428 22:45:24.034718 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:45:28.414743 25678 solver.cpp:397] Test net output #0: accuracy = 0.5
I0428 22:45:28.414791 25678 solver.cpp:397] Test net output #1: loss = 2.9398 (* 1 = 2.9398 loss)
I0428 22:45:30.189975 25678 solver.cpp:218] Iteration 10104 (0.856976 iter/s, 14.0027s/12 iters), loss = 0.0589276
I0428 22:45:30.190012 25678 solver.cpp:237] Train net output #0: loss = 0.0589276 (* 1 = 0.0589276 loss)
I0428 22:45:30.190021 25678 sgd_solver.cpp:105] Iteration 10104, lr = 0.002268
I0428 22:45:34.525251 25683 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:45:35.132292 25678 solver.cpp:218] Iteration 10116 (2.42803 iter/s, 4.94227s/12 iters), loss = 0.0691769
I0428 22:45:35.132328 25678 solver.cpp:237] Train net output #0: loss = 0.0691769 (* 1 = 0.0691769 loss)
I0428 22:45:35.132335 25678 sgd_solver.cpp:105] Iteration 10116, lr = 0.00226284
I0428 22:45:39.963527 25678 solver.cpp:218] Iteration 10128 (2.48386 iter/s, 4.83119s/12 iters), loss = 0.0384641
I0428 22:45:39.963563 25678 solver.cpp:237] Train net output #0: loss = 0.0384641 (* 1 = 0.0384641 loss)
I0428 22:45:39.963570 25678 sgd_solver.cpp:105] Iteration 10128, lr = 0.0022577
I0428 22:45:44.767992 25678 solver.cpp:218] Iteration 10140 (2.4977 iter/s, 4.80442s/12 iters), loss = 0.100909
I0428 22:45:44.768157 25678 solver.cpp:237] Train net output #0: loss = 0.100909 (* 1 = 0.100909 loss)
I0428 22:45:44.768167 25678 sgd_solver.cpp:105] Iteration 10140, lr = 0.00225256
I0428 22:45:49.580550 25678 solver.cpp:218] Iteration 10152 (2.49356 iter/s, 4.81239s/12 iters), loss = 0.0518938
I0428 22:45:49.580585 25678 solver.cpp:237] Train net output #0: loss = 0.0518938 (* 1 = 0.0518938 loss)
I0428 22:45:49.580593 25678 sgd_solver.cpp:105] Iteration 10152, lr = 0.00224743
I0428 22:45:54.416370 25678 solver.cpp:218] Iteration 10164 (2.4815 iter/s, 4.83578s/12 iters), loss = 0.0182388
I0428 22:45:54.416406 25678 solver.cpp:237] Train net output #0: loss = 0.0182388 (* 1 = 0.0182388 loss)
I0428 22:45:54.416414 25678 sgd_solver.cpp:105] Iteration 10164, lr = 0.00224231
I0428 22:45:59.255656 25678 solver.cpp:218] Iteration 10176 (2.47973 iter/s, 4.83924s/12 iters), loss = 0.0627391
I0428 22:45:59.255698 25678 solver.cpp:237] Train net output #0: loss = 0.0627391 (* 1 = 0.0627391 loss)
I0428 22:45:59.255707 25678 sgd_solver.cpp:105] Iteration 10176, lr = 0.0022372
I0428 22:46:04.064157 25678 solver.cpp:218] Iteration 10188 (2.49561 iter/s, 4.80845s/12 iters), loss = 0.0427048
I0428 22:46:04.064190 25678 solver.cpp:237] Train net output #0: loss = 0.0427048 (* 1 = 0.0427048 loss)
I0428 22:46:04.064198 25678 sgd_solver.cpp:105] Iteration 10188, lr = 0.0022321
I0428 22:46:08.432128 25678 solver.cpp:447] Snapshotting to binary proto file snapshot_iter_10200.caffemodel
I0428 22:46:11.512274 25678 sgd_solver.cpp:273] Snapshotting solver state to binary proto file snapshot_iter_10200.solverstate
I0428 22:46:16.424602 25678 solver.cpp:310] Iteration 10200, loss = 0.0440367
I0428 22:46:16.424695 25678 solver.cpp:330] Iteration 10200, Testing net (#0)
I0428 22:46:16.424700 25678 net.cpp:676] Ignoring source layer train-data
I0428 22:46:16.825510 25687 data_layer.cpp:73] Restarting data prefetching from start.
I0428 22:46:21.240029 25678 solver.cpp:397] Test net output #0: accuracy = 0.506127
I0428 22:46:21.240077 25678 solver.cpp:397] Test net output #1: loss = 2.88448 (* 1 = 2.88448 loss)
I0428 22:46:21.240088 25678 solver.cpp:315] Optimization Done.
I0428 22:46:21.240095 25678 caffe.cpp:259] Optimization Done.