Caffe学習ノート(4)layerをカスタマイズしてテストする


例:sinレイヤを追加する


主なプロセス:
  • ヘッダファイル:include/caffe/layers/your_layer.hpp
  • 層定義:src/caffe/layers/your_layer.cppおよびsrc/caffe/layers/your_layer.cu[オプション]
  • 試験書類:test/test_your_layer.cpp
  • buildフォルダの下でテスト
  • ヘッダファイル定義

    caffe_root/include/caffe/layers/ディレクトリの下にヘッダファイルsin_layer.hppを作成し、以下の内容を追加します.
    #ifndef CAFFE_SIN_LAYER_HPP_
    #define CAFFE_SIN_LAYER_HPP_
    
    #include 
    
    #include "caffe/blob.hpp"
    #include "caffe/layer.hpp"
    #include "caffe/proto/caffe.pb.h"
    
    #include "caffe/layers/neuron_layer.hpp"
    
    namespace caffe {
    
    template <typename Dtype>
    class SinLayer : public NeuronLayer<Dtype> {
     public:
      explicit SinLayer(const LayerParameter& param)
          : NeuronLayer<Dtype>(param) {}
    
    #  
    virtual inline const char* type() const { return "Sin"; }
    
    #  , 
    protected:
      virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
          const vector<Blob<Dtype>*>& top);
      virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
          const vector<Blob<Dtype>*>& top);
    
      virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
          const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
      virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
          const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);  
      };
        
    }  // namespace caffe
    
    #endif  // CAFFE_SIN_LAYER_HPP_ 
    

    Layer定義


    CPUバージョン:.cpp

    caffe_root/src/caffe/layers/ディレクトリの下に配置されます.
    // Sin neuron activation function layer.
    // Adapted from TanH layer which was adapted from the ReLU layer code written by Yangqing Jia
    
    #include 
    
    #include "caffe/layers/sin_layer.hpp"
    
    namespace caffe {
    
    template <typename Dtype>
    void SinLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
                                      const vector<Blob<Dtype>*>& top) 
    { 
      const Dtype* bottom_data = bottom[0]->cpu_data();
      Dtype* top_data = top[0]->mutable_cpu_data();
      const int count = bottom[0]->count();
      for (int i = 0; i < count; ++i) {
        top_data[i] = sin(bottom_data[i]);
      }
    }
    
    template <typename Dtype>
    void SinLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
                                        const vector<bool>& propagate_down,
                                        const vector<Blob<Dtype>*>& bottom) 
    { 
      if (propagate_down[0]) {
        const Dtype* bottom_data = bottom[0]->cpu_data();
        const Dtype* top_diff = top[0]->cpu_diff();
        Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
        const int count = bottom[0]->count();
        Dtype bottom_datum;
        for (int i = 0; i < count; ++i) {
          bottom_datum = bottom_data[i];
          bottom_diff[i] = top_diff[i] * cos(bottom_datum);
        }
      }
    }
    
    #ifdef CPU_ONLY
    STUB_GPU(SinLayer);
    #endif
    
    INSTANTIATE_CLASS(SinLayer);
    REGISTER_LAYER_CLASS(Sin);
    
    }  // namespace caffe    
    

    GPUバージョン:.cu

    caffe_root/src/caffe/layers/ディレクトリの下に配置されます.
    // Sin neuron activation function layer.
    // Adapted from TanH layer which was adapted from the ReLU layer code written by Yangqing Jia
    
    #include 
    
    #include "caffe/layers/sin_layer.hpp"
    
    namespace caffe {
    
    template <typename Dtype>
    __global__ void SinForward(const int n, const Dtype* in, Dtype* out) {
      CUDA_KERNEL_LOOP(index, n) {
        out[index] = sin(in[index]);
      }
    }
    
    template <typename Dtype>
    void SinLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
        const vector<Blob<Dtype>*>& top) {
      const Dtype* bottom_data = bottom[0]->gpu_data();
      Dtype* top_data = top[0]->mutable_gpu_data();
      const int count = bottom[0]->count();
      // NOLINT_NEXT_LINE(whitespace/operators)
      SinForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
          count, bottom_data, top_data);
      CUDA_POST_KERNEL_CHECK;
    }
    
    template <typename Dtype>
    __global__ void SinBackward(const int n, const Dtype* in_diff,
        const Dtype* out_data, Dtype* out_diff) {
      CUDA_KERNEL_LOOP(index, n) {
        Dtype sinx = out_data[index];
        out_diff[index] = in_diff[index] * cos(sinx);
      }
    }
    
    template <typename Dtype>
    void SinLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
        const vector<bool>& propagate_down,
        const vector<Blob<Dtype>*>& bottom) {
      if (propagate_down[0]) {
        const Dtype* bottom_data = bottom[0]->gpu_data();
        const Dtype* top_diff = top[0]->gpu_diff();
        Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
        const int count = bottom[0]->count();
        // NOLINT_NEXT_LINE(whitespace/operators)
        SinBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
            count, top_diff, bottom_data, bottom_diff);
        CUDA_POST_KERNEL_CHECK;
      }
    }
    
    INSTANTIATE_LAYER_GPU_FUNCS(SinLayer);
    
    
    }  // namespace caffe
    

    テストレイヤー


    定義されたsin layerが有効かどうかをテストするためのテストファイルを追加し、caffe_root/src/caffe/test/ディレクトリの下にテストファイルtest_sin_layer.cppを追加します.
    #include 
    #include 
    
    #include "gtest/gtest.h"
    
    #include "caffe/blob.hpp"
    #include "caffe/common.hpp"
    #include "caffe/filler.hpp"
    
    #include "caffe/test/test_caffe_main.hpp"
    #include "caffe/test/test_gradient_check_util.hpp"
    
    // inclued the layer that we are testing!
    #include "caffe/layers/sin_layer.hpp"
    
    namespace caffe {
    
    template <typename TypeParam>
    class SinLayerTest : public MultiDeviceTest<TypeParam> {
      typedef typename TypeParam::Dtype Dtype;
    
     protected:
      SinLayerTest()
          : blob_bottom_(new Blob<Dtype>(2, 3, 4, 5)),
            blob_top_(new Blob<Dtype>())
      {
        Caffe::set_random_seed(1701);
        FillerParameter filler_param;
        blob_bottom_vec_.push_back(blob_bottom_);
        blob_top_vec_.push_back(blob_top_);
      }
      virtual ~SinLayerTest() { delete blob_bottom_; delete blob_top_; }
    
    // test forward process
        void TestForward(Dtype filler_std)
      {
        FillerParameter filler_param;
        filler_param.set_std(filler_std);
        GaussianFiller<Dtype> filler(filler_param);
        filler.Fill(this->blob_bottom_);
    
        LayerParameter layer_param;
        SinLayer<Dtype> layer(layer_param);
        layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
        layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
        // Now, check values
        const Dtype* bottom_data = this->blob_bottom_->cpu_data();
        const Dtype* top_data = this->blob_top_->cpu_data();
        const Dtype min_precision = 1e-5;
        for (int i = 0; i < this->blob_bottom_->count(); ++i) {
          Dtype expected_value = sin(bottom_data[i]);
          Dtype precision = std::max(
            Dtype(std::abs(expected_value * Dtype(1e-4))), min_precision);
          EXPECT_NEAR(expected_value, top_data[i], precision);
        }
      }
    
      // test backward process
       void TestBackward(Dtype filler_std)
      {
        FillerParameter filler_param;
        filler_param.set_std(filler_std);
        GaussianFiller<Dtype> filler(filler_param);
        filler.Fill(this->blob_bottom_);
    
        LayerParameter layer_param;
        SinLayer<Dtype> layer(layer_param);
        GradientChecker<Dtype> checker(1e-4, 1e-2, 1701);
        checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_,
            this->blob_top_vec_);
      }
    
      Blob<Dtype>* const blob_bottom_;
      Blob<Dtype>* const blob_top_;
      vector<Blob<Dtype>*> blob_bottom_vec_;
      vector<Blob<Dtype>*> blob_top_vec_;
    };
    
    // test type(in this case SinLayerTest)
    TYPED_TEST_CASE(SinLayerTest, TestDtypesAndDevices);
    
    // test sin
    TYPED_TEST(SinLayerTest, TestSin) {
      this->TestForward(1.0);
    }
    
    // test calculating the gradient correctly when backpropagating
    TYPED_TEST(SinLayerTest, TestSinGradient) {
      this->TestBackward(1.0);
    }
    
    }  // namespace caffe
    

    テストの実行:buildフォルダの下
    cmake ..
    make all -j8
    make test
    make runtest GTEST_FILTER='SinLayerTest/*'
    

    テストのクラス名は対応するtestファイルで探します