[caffeノート006]:caffeにおけるsoftmax層の導出
7829 ワード
1. SoftmaxParameter
// Message that stores parameters used by SoftmaxLayer, SoftmaxWithLossLayer
message SoftmaxParameter {
enum Engine {
DEFAULT = 0;
CAFFE = 1;
CUDNN = 2;
}
optional Engine engine = 1 [default = DEFAULT];
// The axis along which to perform the softmax -- may be negative to index
// from the end (e.g., -1 for the last axis).
// Any other axes will be evaluated as independent softmaxes.
optional int32 axis = 2 [default = 1];
}
2.構造
caffeのsoftmaxレイヤ値はsoftmax関数自体に注目し,二部図である.入力の場合
zi(i=1,⋯,k),
その対応出力は
ai=ezi∑j=1kezj.
3.順方向伝播
計算手順Step 1:入力最大値の計算
z=max{zi}
Step 2:最大値を減算
zi=zi−z
Step 3:指数を求める
zi=ezi
Step 4:加算
zsum=∑i=1kezi
Step5: softmax
zi=zizsum
コード#コード#
template <typename Dtype>
void SoftmaxLayer::Forward_cpu(const vector *>& bottom,
const vector *>& top) {
const Dtype* bottom_data = bottom[0]->cpu_data();
Dtype* top_data = top[0]->mutable_cpu_data();
Dtype* scale_data = scale_.mutable_cpu_data();
// channles
int channels = bottom[0]->shape(softmax_axis_);
int dim = bottom[0]->count() / outer_num_;
// top , bottom
caffe_copy(bottom[0]->count(), bottom_data, top_data);
// We need to subtract the max to avoid numerical issues, compute the exp,
// and then normalize.
// outer_num_
for (int i = 0; i < outer_num_; ++i) {
// scale_data i , softmax
// Step1:
// initialize scale_data to the first plane
caffe_copy(inner_num_, bottom_data + i * dim, scale_data);
for (int j = 0; j < channels; j++) {
for (int k = 0; k < inner_num_; k++) {
scale_data[k] = std::max(scale_data[k],
bottom_data[i * dim + j * inner_num_ + k]);
}
}
// subtraction, Step2:
caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, channels, inner_num_,
1, -1., sum_multiplier_.cpu_data(), scale_data, 1., top_data);
// exponentiation, Step3:
caffe_exp(dim, top_data, top_data);
// sum after exp, Step4:
caffe_cpu_gemv(CblasTrans, channels, inner_num_, 1.,
top_data, sum_multiplier_.cpu_data(), 0., scale_data);
// division, Step5:
for (int j = 0; j < channels; j++) {
caffe_div(inner_num_, top_data, scale_data, top_data);
top_data += inner_num_;
}
}
}
4.逆伝播
計算手順Step 1:top_を求めるdataとtop_diff内積Step 2:差分値を求め,対応する導出過程(∂l∂ai−∂l∂a⋅a)Step 3:乗算し,バイアスを求める
導出プロセス
∂l∂z=∂l∂a∂a∂z
次のようになります.
∂l∂a=top−diff ,
a=top−data .
次のようになります.
∂ai∂zj=∂(ezi∑kezk)∂zj
当
i≠jの場合
∂ai∂zj=−eziezj(∑kezk)2=−aiaj
当
i=jの場合:
∂ai∂zj=ezi∑kezk−eziezj(∑kezk)2=aj−ajaj
次のような機能が提供されます.
∂l∂zj=∂l∂a∂a∂zj=∑i∂l∂ai∂ai∂zj=−(∂l∂a⋅a)aj+∂l∂ajaj=(∂l∂aj−∂l∂a⋅a)aj
コード#コード#
template <typename Dtype>
void SoftmaxLayer::Backward_cpu(const vector *>& top,
const vector<bool>& propagate_down,
const vector *>& bottom) {
const Dtype* top_diff = top[0]->cpu_diff();
const Dtype* top_data = top[0]->cpu_data();
Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
Dtype* scale_data = scale_.mutable_cpu_data();
int channels = top[0]->shape(softmax_axis_);
int dim = top[0]->count() / outer_num_;
caffe_copy(top[0]->count(), top_diff, bottom_diff);
for (int i = 0; i < outer_num_; ++i) {
// compute dot(top_diff, top_data) and subtract them from the bottom diff, Step1:
for (int k = 0; k < inner_num_; ++k) {
scale_data[k] = caffe_cpu_strided_dot(channels,
bottom_diff + i * dim + k, inner_num_,
top_data + i * dim + k, inner_num_);
}
// subtraction, Step2:
caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, channels, inner_num_, 1,
-1., sum_multiplier_.cpu_data(), scale_data, 1., bottom_diff + i * dim);
}
// elementwise multiplication, Step3:
caffe_mul(top[0]->count(), bottom_diff, top_data, bottom_diff);
}