|
1 | | -#include <vector> |
2 | 1 | #include <algorithm> |
3 | 2 | #include <cmath> |
| 3 | +#include <vector> |
4 | 4 |
|
5 | 5 | #include "caffe/layers/gather_nd_layer.hpp" |
6 | | -#include "caffe/util/math_functions.hpp" |
7 | | - |
8 | | - |
| 6 | +#include "caffe/util/math_functions.hpp" |
9 | 7 |
|
10 | 8 | namespace caffe { |
11 | 9 |
|
12 | 10 | template <typename Dtype> |
13 | | -void GatherNdLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom, |
14 | | - const vector<Blob<Dtype>*>& top) { |
15 | | - const GatherNdParameter& gather_nd_param = this->layer_param_.gather_nd_param(); |
16 | | - indices_.clear(); |
17 | | - std::copy(gather_nd_param.indices().begin(), |
18 | | - gather_nd_param.indices().end(), |
19 | | - std::back_inserter(indices_)); |
20 | | - indices_shape_.clear(); |
21 | | - std::copy(gather_nd_param.shape().begin(), |
22 | | - gather_nd_param.shape().end(), |
23 | | - std::back_inserter(indices_shape_)); |
| 11 | +void GatherNdLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype> *> &bottom, |
| 12 | + const vector<Blob<Dtype> *> &top) { |
| 13 | + const GatherNdParameter &gather_nd_param = |
| 14 | + this->layer_param_.gather_nd_param(); |
| 15 | + indices_.clear(); |
| 16 | + std::copy(gather_nd_param.indices().begin(), gather_nd_param.indices().end(), |
| 17 | + std::back_inserter(indices_)); |
| 18 | + indices_shape_.clear(); |
| 19 | + std::copy(gather_nd_param.indices_shape().begin(), gather_nd_param.indices_shape().end(), |
| 20 | + std::back_inserter(indices_shape_)); |
24 | 21 | } |
25 | 22 |
|
26 | 23 | template <typename Dtype> |
27 | | -void GatherNdLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom, |
28 | | - const vector<Blob<Dtype>*>& top) { |
29 | | - const int num_axes = bottom[0]->num_axes(); |
30 | | - CHECK_GE(num_axes, 1) << "the dimension of input should be larger than or equal to 1"; |
31 | | - //const GatherNdParameter& gather_nd_param = this->layer_param_.gather_nd_param(); |
32 | | - indices_dim_ = indices_shape_.size(); |
33 | | - CHECK_GE(indices_dim_, 1) << "the dimension of indices should be larger than or equal to 1"; |
34 | | - int count = 1; |
35 | | - for (int i = 0; i < indices_shape_.size(); ++i) { |
36 | | - count *= indices_shape_[i]; |
37 | | - } |
38 | | - CHECK_EQ(indices_.size(), count) << "the size and shape of indices do not match" ; |
39 | | - vector<int> bottom_shape = bottom[0]->shape(); |
40 | | - vector<int> top_shape = bottom[0]->shape(); |
41 | | - indices_N_ = indices_shape_[indices_shape_.size()-1]; |
42 | | - CHECK_LE(indices_N_, num_axes) << "indices.shape[-1] must be <= params.rank, but saw indices.shape[-1]:" |
43 | | - << indices_N_ << ", and params.rank: " << num_axes; |
44 | | - top_shape.resize(indices_dim_ - 1 + num_axes - indices_N_); |
45 | | - gather_nd_size_ = bottom[0]->count(indices_N_); |
| 24 | +void GatherNdLayer<Dtype>::Reshape(const vector<Blob<Dtype> *> &bottom, |
| 25 | + const vector<Blob<Dtype> *> &top) { |
| 26 | + const int num_axes = bottom[0]->num_axes(); |
| 27 | + CHECK_GE(num_axes, 1) |
| 28 | + << "the dimension of input should be larger than or equal to 1"; |
| 29 | + // const GatherNdParameter& gather_nd_param = |
| 30 | + // this->layer_param_.gather_nd_param(); |
| 31 | + indices_dim_ = indices_shape_.size(); |
| 32 | + CHECK_GE(indices_dim_, 1) |
| 33 | + << "the dimension of indices should be larger than or equal to 1"; |
| 34 | + int count = 1; |
| 35 | + for (int i = 0; i < indices_shape_.size(); ++i) { |
| 36 | + count *= indices_shape_[i]; |
| 37 | + } |
| 38 | + CHECK_EQ(indices_.size(), count) |
| 39 | + << "the size and shape of indices do not match"; |
| 40 | + vector<int> bottom_shape = bottom[0]->shape(); |
| 41 | + vector<int> top_shape = bottom[0]->shape(); |
| 42 | + indices_N_ = indices_shape_[indices_shape_.size() - 1]; |
| 43 | + CHECK_LE(indices_N_, num_axes) |
| 44 | + << "indices.shape[-1] must be <= params.rank, but saw indices.shape[-1]:" |
| 45 | + << indices_N_ << ", and params.rank: " << num_axes; |
| 46 | + top_shape.resize(indices_dim_ - 1 + num_axes - indices_N_); |
| 47 | + gather_nd_size_ = bottom[0]->count(indices_N_); |
46 | 48 |
|
47 | | - // The result shape is |
48 | | - // indices.shape[:-1] + params.shape[indices.shape[-1]:] |
49 | | - for (int i = 0; i < indices_.size(); ++i) { |
50 | | - CHECK_GE(indices_[i], 0) << "indices_ element with idx" << i << " is negative"; |
51 | | - } |
52 | | - for (int i = 0; i < indices_dim_ - 1; ++i) { |
53 | | - top_shape[i] = indices_shape_[i]; |
54 | | - } |
55 | | - for (int i = 0; i < num_axes - indices_N_; ++i) { |
56 | | - top_shape[i + indices_dim_ - 1] = bottom_shape[i + indices_N_]; |
57 | | - } |
58 | | - top[0]->Reshape(top_shape); |
| 49 | + // The result shape is |
| 50 | + // indices.shape[:-1] + params.shape[indices.shape[-1]:] |
| 51 | + for (int i = 0; i < indices_.size(); ++i) { |
| 52 | + CHECK_GE(indices_[i], 0) |
| 53 | + << "indices_ element with idx" << i << " is negative"; |
| 54 | + } |
| 55 | + for (int i = 0; i < indices_dim_ - 1; ++i) { |
| 56 | + top_shape[i] = indices_shape_[i]; |
| 57 | + } |
| 58 | + for (int i = 0; i < num_axes - indices_N_; ++i) { |
| 59 | + top_shape[i + indices_dim_ - 1] = bottom_shape[i + indices_N_]; |
| 60 | + } |
| 61 | + top[0]->Reshape(top_shape); |
59 | 62 | } |
60 | 63 |
|
61 | 64 | template <typename Dtype> |
62 | | -void GatherNdLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom, |
63 | | - const vector<Blob<Dtype>*>& top) { |
64 | | - const Dtype* bottom_data = bottom[0]->cpu_data(); |
65 | | - Dtype* top_data = top[0]->mutable_cpu_data(); |
66 | | - vector<int> bottom_shape = bottom[0]->shape(); |
67 | | - for (int m = 0; m < indices_.size()/indices_N_; ++m) { |
68 | | - const int top_offset = m * gather_nd_size_; |
69 | | - int bottom_offset = 0; |
70 | | - for (int n = 0; n < indices_N_; ++n) { |
71 | | - int indices_value = indices_[m*indices_N_ + n]; |
72 | | - int params_idx = bottom_shape[n]; |
73 | | - CHECK_LT(indices_value, params_idx) << "indices value does not index into param dimension: " << n; |
74 | | - bottom_offset += indices_[m*indices_N_ + n] * bottom[0]->count(n + 1); |
75 | | - } |
76 | | - caffe_copy(gather_nd_size_, |
77 | | - bottom_data + bottom_offset, top_data + top_offset); |
78 | | - } |
| 65 | +void GatherNdLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype> *> &bottom, |
| 66 | + const vector<Blob<Dtype> *> &top) { |
| 67 | + const Dtype *bottom_data = bottom[0]->cpu_data(); |
| 68 | + Dtype *top_data = top[0]->mutable_cpu_data(); |
| 69 | + vector<int> bottom_shape = bottom[0]->shape(); |
| 70 | + for (int m = 0; m < indices_.size() / indices_N_; ++m) { |
| 71 | + const int top_offset = m * gather_nd_size_; |
| 72 | + int bottom_offset = 0; |
| 73 | + for (int n = 0; n < indices_N_; ++n) { |
| 74 | + int indices_value = indices_[m * indices_N_ + n]; |
| 75 | + int params_idx = bottom_shape[n]; |
| 76 | + CHECK_LT(indices_value, params_idx) |
| 77 | + << "indices value does not index into param dimension: " << n; |
| 78 | + bottom_offset += indices_[m * indices_N_ + n] * bottom[0]->count(n + 1); |
| 79 | + } |
| 80 | + caffe_copy(gather_nd_size_, bottom_data + bottom_offset, |
| 81 | + top_data + top_offset); |
| 82 | + } |
79 | 83 | } |
80 | 84 |
|
81 | 85 | INSTANTIATE_CLASS(GatherNdLayer); |
82 | 86 | REGISTER_LAYER_CLASS(GatherNd); |
83 | 87 |
|
84 | | - |
85 | | -} // namespace caffe |
| 88 | +} // namespace caffe |
0 commit comments