|
| 1 | +#ifndef CAFFE_REORG_LAYER_HPP_ |
| 2 | +#define CAFFE_REORG_LAYER_HPP_ |
| 3 | + |
| 4 | +#include <vector> |
| 5 | + |
| 6 | +#include "caffe/blob.hpp" |
| 7 | +#include "caffe/layer.hpp" |
| 8 | +#include "caffe/proto/caffe.pb.h" |
| 9 | + |
| 10 | +namespace caffe { |
| 11 | + |
| 12 | +/* |
| 13 | + * @brief Reshapes the input Blob into an arbitrary-sized output Blob. |
| 14 | + * |
| 15 | + * Note: similarly to FlattenLayer, this layer does not change the input values |
| 16 | + * (see FlattenLayer, Blob::ShareData and Blob::ShareDiff). |
| 17 | + */ |
| 18 | + template<typename Dtype> |
| 19 | + class ReorgLayer : public Layer<Dtype> { |
| 20 | + public: |
| 21 | + explicit ReorgLayer(const LayerParameter ¶m) |
| 22 | + : Layer<Dtype>(param) {} |
| 23 | + |
| 24 | + virtual void LayerSetUp(const vector<Blob<Dtype> *> &bottom, |
| 25 | + const vector<Blob<Dtype> *> &top); |
| 26 | + |
| 27 | + virtual void Reshape(const vector<Blob<Dtype> *> &bottom, |
| 28 | + const vector<Blob<Dtype> *> &top); |
| 29 | + |
| 30 | + virtual inline const char *type() const { return "Reorg"; } |
| 31 | + |
| 32 | + virtual inline int ExactNumBottomBlobs() const { return 1; } |
| 33 | + |
| 34 | + virtual inline int ExactNumTopBlobs() const { return 1; } |
| 35 | + |
| 36 | + protected: |
| 37 | + |
| 38 | + |
| 39 | + virtual void Forward_cpu(const vector<Blob<Dtype> *> &bottom, |
| 40 | + const vector<Blob<Dtype> *> &top); |
| 41 | + |
| 42 | + virtual void Backward_cpu(const vector<Blob<Dtype> *> &top, |
| 43 | + const vector<bool> &propagate_down, const vector<Blob<Dtype> *> &bottom); |
| 44 | + |
| 45 | + virtual void Forward_gpu(const vector<Blob<Dtype> *> &bottom, |
| 46 | + const vector<Blob<Dtype> *> &top); |
| 47 | + |
| 48 | + virtual void Backward_gpu(const vector<Blob<Dtype> *> &top, |
| 49 | + const vector<bool> &propagate_down, const vector<Blob<Dtype> *> &bottom); |
| 50 | + |
| 51 | + int stride_; |
| 52 | + bool reverse_; |
| 53 | + int batch_num_; |
| 54 | + int channels_; |
| 55 | + int reorged_channels_; |
| 56 | + int height_, width_; |
| 57 | + int reorged_height_, reorged_width_; |
| 58 | + Blob<Dtype> diff_; |
| 59 | + }; |
| 60 | + template<typename Dtype> |
| 61 | + void reorg_cpu(Dtype *x, int w, int h, int c, int batch, int stride, int forward, Dtype *out) |
| 62 | + { |
| 63 | + int b,i,j,k; |
| 64 | + int out_c = c/(stride*stride); |
| 65 | + |
| 66 | + for(b = 0; b < batch; ++b){ |
| 67 | + for(k = 0; k < c; ++k){ |
| 68 | + for(j = 0; j < h; ++j){ |
| 69 | + for(i = 0; i < w; ++i){ |
| 70 | + int in_index = i + w*(j + h*(k + c*b)); |
| 71 | + int c2 = k % out_c; |
| 72 | + int offset = k / out_c; |
| 73 | + int w2 = i*stride + offset % stride; |
| 74 | + int h2 = j*stride + offset / stride; |
| 75 | + int out_index = w2 + w*stride*(h2 + h*stride*(c2 + out_c*b)); |
| 76 | + if(forward) out[out_index] = x[in_index]; |
| 77 | + else out[in_index] = x[out_index]; |
| 78 | + } |
| 79 | + } |
| 80 | + } |
| 81 | + } |
| 82 | + } |
| 83 | + |
| 84 | + template<typename Dtype> |
| 85 | + void reorg_cpu(const Dtype *bottom_data, const int b_w, const int b_h, |
| 86 | + const int b_c, const int b_n, const int stride, |
| 87 | + const bool forward, Dtype *top_data) { |
| 88 | + int t_c = b_c / (stride * stride); |
| 89 | + int t_w = b_w * stride; |
| 90 | + int t_h = b_h * stride; |
| 91 | + for (int n = 0; n < b_n; n++) { |
| 92 | + for (int c = 0; c < b_c; c++) { |
| 93 | + for (int h = 0; h < b_h; h++) { |
| 94 | + for (int w = 0; w < b_w; w++) { |
| 95 | + int bottom_index = w + b_w * (h + b_h * (c + b_c * n)); |
| 96 | + int c2 = c % t_c; |
| 97 | + int offset = c / t_c; |
| 98 | + int w2 = w * stride + offset % stride; |
| 99 | + int h2 = h * stride + offset / stride; |
| 100 | + int top_index = w2 + t_w * (h2 + t_h * (c2 + t_c * n)); |
| 101 | + if (forward) top_data[top_index] = bottom_data[bottom_index]; |
| 102 | + else |
| 103 | + top_data[bottom_index] = bottom_data[top_index]; |
| 104 | + } |
| 105 | + } |
| 106 | + } |
| 107 | + } |
| 108 | + } |
| 109 | + |
| 110 | + |
| 111 | +} // namespace caffe |
| 112 | + |
| 113 | +#endif // CAFFE_REORG_LAYER_HPP_ |
0 commit comments