423 lines
5.8 KiB
Plaintext
423 lines
5.8 KiB
Plaintext
|
name: "AlexNet"
|
||
|
layer {
|
||
|
name: "train-data"
|
||
|
type: "Data"
|
||
|
top: "data"
|
||
|
top: "label"
|
||
|
include {
|
||
|
stage: "train"
|
||
|
}
|
||
|
transform_param {
|
||
|
mirror: true
|
||
|
crop_size: 227
|
||
|
}
|
||
|
data_param {
|
||
|
batch_size: 128
|
||
|
}
|
||
|
}
|
||
|
layer {
|
||
|
name: "val-data"
|
||
|
type: "Data"
|
||
|
top: "data"
|
||
|
top: "label"
|
||
|
include {
|
||
|
stage: "val"
|
||
|
}
|
||
|
transform_param {
|
||
|
crop_size: 227
|
||
|
}
|
||
|
data_param {
|
||
|
batch_size: 32
|
||
|
}
|
||
|
}
|
||
|
layer {
|
||
|
name: "conv1"
|
||
|
type: "Convolution"
|
||
|
bottom: "data"
|
||
|
top: "conv1"
|
||
|
param {
|
||
|
lr_mult: 1.0
|
||
|
decay_mult: 1.0
|
||
|
}
|
||
|
param {
|
||
|
lr_mult: 2.0
|
||
|
decay_mult: 0.0
|
||
|
}
|
||
|
convolution_param {
|
||
|
num_output: 96
|
||
|
kernel_size: 11
|
||
|
stride: 4
|
||
|
weight_filler {
|
||
|
type: "gaussian"
|
||
|
std: 0.00999999977648
|
||
|
}
|
||
|
bias_filler {
|
||
|
type: "constant"
|
||
|
value: 0.0
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
layer {
|
||
|
name: "relu1"
|
||
|
type: "ReLU"
|
||
|
bottom: "conv1"
|
||
|
top: "conv1"
|
||
|
}
|
||
|
layer {
|
||
|
name: "norm1"
|
||
|
type: "LRN"
|
||
|
bottom: "conv1"
|
||
|
top: "norm1"
|
||
|
lrn_param {
|
||
|
local_size: 5
|
||
|
alpha: 9.99999974738e-05
|
||
|
beta: 0.75
|
||
|
}
|
||
|
}
|
||
|
layer {
|
||
|
name: "pool1"
|
||
|
type: "Pooling"
|
||
|
bottom: "norm1"
|
||
|
top: "pool1"
|
||
|
pooling_param {
|
||
|
pool: MAX
|
||
|
kernel_size: 3
|
||
|
stride: 2
|
||
|
}
|
||
|
}
|
||
|
layer {
|
||
|
name: "conv2"
|
||
|
type: "Convolution"
|
||
|
bottom: "pool1"
|
||
|
top: "conv2"
|
||
|
param {
|
||
|
lr_mult: 1.0
|
||
|
decay_mult: 1.0
|
||
|
}
|
||
|
param {
|
||
|
lr_mult: 2.0
|
||
|
decay_mult: 0.0
|
||
|
}
|
||
|
convolution_param {
|
||
|
num_output: 256
|
||
|
pad: 2
|
||
|
kernel_size: 5
|
||
|
group: 2
|
||
|
weight_filler {
|
||
|
type: "gaussian"
|
||
|
std: 0.00999999977648
|
||
|
}
|
||
|
bias_filler {
|
||
|
type: "constant"
|
||
|
value: 0.10000000149
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
layer {
|
||
|
name: "relu2"
|
||
|
type: "ReLU"
|
||
|
bottom: "conv2"
|
||
|
top: "conv2"
|
||
|
}
|
||
|
layer {
|
||
|
name: "norm2"
|
||
|
type: "LRN"
|
||
|
bottom: "conv2"
|
||
|
top: "norm2"
|
||
|
lrn_param {
|
||
|
local_size: 5
|
||
|
alpha: 9.99999974738e-05
|
||
|
beta: 0.75
|
||
|
}
|
||
|
}
|
||
|
layer {
|
||
|
name: "pool2"
|
||
|
type: "Pooling"
|
||
|
bottom: "norm2"
|
||
|
top: "pool2"
|
||
|
pooling_param {
|
||
|
pool: MAX
|
||
|
kernel_size: 3
|
||
|
stride: 2
|
||
|
}
|
||
|
}
|
||
|
layer {
|
||
|
name: "conv3"
|
||
|
type: "Convolution"
|
||
|
bottom: "pool2"
|
||
|
top: "conv3"
|
||
|
param {
|
||
|
lr_mult: 1.0
|
||
|
decay_mult: 1.0
|
||
|
}
|
||
|
param {
|
||
|
lr_mult: 2.0
|
||
|
decay_mult: 0.0
|
||
|
}
|
||
|
convolution_param {
|
||
|
num_output: 384
|
||
|
pad: 1
|
||
|
kernel_size: 3
|
||
|
weight_filler {
|
||
|
type: "gaussian"
|
||
|
std: 0.00999999977648
|
||
|
}
|
||
|
bias_filler {
|
||
|
type: "constant"
|
||
|
value: 0.0
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
layer {
|
||
|
name: "relu3"
|
||
|
type: "ReLU"
|
||
|
bottom: "conv3"
|
||
|
top: "conv3"
|
||
|
}
|
||
|
layer {
|
||
|
name: "conv4"
|
||
|
type: "Convolution"
|
||
|
bottom: "conv3"
|
||
|
top: "conv4"
|
||
|
param {
|
||
|
lr_mult: 1.0
|
||
|
decay_mult: 1.0
|
||
|
}
|
||
|
param {
|
||
|
lr_mult: 2.0
|
||
|
decay_mult: 0.0
|
||
|
}
|
||
|
convolution_param {
|
||
|
num_output: 192
|
||
|
pad: 1
|
||
|
kernel_size: 3
|
||
|
group: 2
|
||
|
weight_filler {
|
||
|
type: "gaussian"
|
||
|
std: 0.00999999977648
|
||
|
}
|
||
|
bias_filler {
|
||
|
type: "constant"
|
||
|
value: 0.10000000149
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
layer {
|
||
|
name: "relu4"
|
||
|
type: "ReLU"
|
||
|
bottom: "conv4"
|
||
|
top: "conv4"
|
||
|
}
|
||
|
layer {
|
||
|
name: "conv4.2"
|
||
|
type: "Convolution"
|
||
|
bottom: "conv4"
|
||
|
top: "conv4.2"
|
||
|
param {
|
||
|
lr_mult: 1.0
|
||
|
decay_mult: 1.0
|
||
|
}
|
||
|
param {
|
||
|
lr_mult: 2.0
|
||
|
decay_mult: 0.0
|
||
|
}
|
||
|
convolution_param {
|
||
|
num_output: 192
|
||
|
pad: 1
|
||
|
kernel_size: 3
|
||
|
group: 2
|
||
|
weight_filler {
|
||
|
type: "gaussian"
|
||
|
std: 0.00999999977648
|
||
|
}
|
||
|
bias_filler {
|
||
|
type: "constant"
|
||
|
value: 0.10000000149
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
layer {
|
||
|
name: "relu4.2"
|
||
|
type: "ReLU"
|
||
|
bottom: "conv4.2"
|
||
|
top: "conv4.2"
|
||
|
}
|
||
|
layer {
|
||
|
name: "conv5"
|
||
|
type: "Convolution"
|
||
|
bottom: "conv4.2"
|
||
|
top: "conv5"
|
||
|
param {
|
||
|
lr_mult: 1.0
|
||
|
decay_mult: 1.0
|
||
|
}
|
||
|
param {
|
||
|
lr_mult: 2.0
|
||
|
decay_mult: 0.0
|
||
|
}
|
||
|
convolution_param {
|
||
|
num_output: 256
|
||
|
pad: 1
|
||
|
kernel_size: 3
|
||
|
group: 2
|
||
|
weight_filler {
|
||
|
type: "gaussian"
|
||
|
std: 0.00999999977648
|
||
|
}
|
||
|
bias_filler {
|
||
|
type: "constant"
|
||
|
value: 0.10000000149
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
layer {
|
||
|
name: "relu5"
|
||
|
type: "ReLU"
|
||
|
bottom: "conv5"
|
||
|
top: "conv5"
|
||
|
}
|
||
|
layer {
|
||
|
name: "pool5"
|
||
|
type: "Pooling"
|
||
|
bottom: "conv5"
|
||
|
top: "pool5"
|
||
|
pooling_param {
|
||
|
pool: MAX
|
||
|
kernel_size: 3
|
||
|
stride: 2
|
||
|
}
|
||
|
}
|
||
|
layer {
|
||
|
name: "fc6"
|
||
|
type: "InnerProduct"
|
||
|
bottom: "pool5"
|
||
|
top: "fc6"
|
||
|
param {
|
||
|
lr_mult: 1.0
|
||
|
decay_mult: 1.0
|
||
|
}
|
||
|
param {
|
||
|
lr_mult: 2.0
|
||
|
decay_mult: 0.0
|
||
|
}
|
||
|
inner_product_param {
|
||
|
num_output: 4096
|
||
|
weight_filler {
|
||
|
type: "gaussian"
|
||
|
std: 0.00499999988824
|
||
|
}
|
||
|
bias_filler {
|
||
|
type: "constant"
|
||
|
value: 0.10000000149
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
layer {
|
||
|
name: "relu6"
|
||
|
type: "ReLU"
|
||
|
bottom: "fc6"
|
||
|
top: "fc6"
|
||
|
}
|
||
|
layer {
|
||
|
name: "drop6"
|
||
|
type: "Dropout"
|
||
|
bottom: "fc6"
|
||
|
top: "fc6"
|
||
|
dropout_param {
|
||
|
dropout_ratio: 0.5
|
||
|
}
|
||
|
}
|
||
|
layer {
|
||
|
name: "fc7"
|
||
|
type: "InnerProduct"
|
||
|
bottom: "fc6"
|
||
|
top: "fc7"
|
||
|
param {
|
||
|
lr_mult: 1.0
|
||
|
decay_mult: 1.0
|
||
|
}
|
||
|
param {
|
||
|
lr_mult: 2.0
|
||
|
decay_mult: 0.0
|
||
|
}
|
||
|
inner_product_param {
|
||
|
num_output: 4096
|
||
|
weight_filler {
|
||
|
type: "gaussian"
|
||
|
std: 0.00499999988824
|
||
|
}
|
||
|
bias_filler {
|
||
|
type: "constant"
|
||
|
value: 0.10000000149
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
layer {
|
||
|
name: "relu7"
|
||
|
type: "ReLU"
|
||
|
bottom: "fc7"
|
||
|
top: "fc7"
|
||
|
}
|
||
|
layer {
|
||
|
name: "drop7"
|
||
|
type: "Dropout"
|
||
|
bottom: "fc7"
|
||
|
top: "fc7"
|
||
|
dropout_param {
|
||
|
dropout_ratio: 0.5
|
||
|
}
|
||
|
}
|
||
|
layer {
|
||
|
name: "fc8"
|
||
|
type: "InnerProduct"
|
||
|
bottom: "fc7"
|
||
|
top: "fc8"
|
||
|
param {
|
||
|
lr_mult: 1.0
|
||
|
decay_mult: 1.0
|
||
|
}
|
||
|
param {
|
||
|
lr_mult: 2.0
|
||
|
decay_mult: 0.0
|
||
|
}
|
||
|
inner_product_param {
|
||
|
weight_filler {
|
||
|
type: "gaussian"
|
||
|
std: 0.00999999977648
|
||
|
}
|
||
|
bias_filler {
|
||
|
type: "constant"
|
||
|
value: 0.0
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
layer {
|
||
|
name: "accuracy"
|
||
|
type: "Accuracy"
|
||
|
bottom: "fc8"
|
||
|
bottom: "label"
|
||
|
top: "accuracy"
|
||
|
include {
|
||
|
stage: "val"
|
||
|
}
|
||
|
}
|
||
|
layer {
|
||
|
name: "loss"
|
||
|
type: "SoftmaxWithLoss"
|
||
|
bottom: "fc8"
|
||
|
bottom: "label"
|
||
|
top: "loss"
|
||
|
exclude {
|
||
|
stage: "deploy"
|
||
|
}
|
||
|
}
|
||
|
layer {
|
||
|
name: "softmax"
|
||
|
type: "Softmax"
|
||
|
bottom: "fc8"
|
||
|
top: "softmax"
|
||
|
include {
|
||
|
stage: "deploy"
|
||
|
}
|
||
|
}
|