Skip to content

Commit bb89c03

Browse files
committed
Revert "add submanifold sparse feature for conv layer"
This reverts commit e02871b.
1 parent b720d3f commit bb89c03

File tree

7 files changed

+9
-48
lines changed

7 files changed

+9
-48
lines changed

FEATURES.md

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -103,7 +103,6 @@ pad_type (deprecated, "SAME" style padding) in ConvolutionParameter and PoolingP
103103
pad_l, pad_r, pad_t and pad_b (arbitrary 2D padding) in ConvolutionParameter and PoolingParameter
104104
AVE_EXC_PAD (average pooling excluding the paddings), AVE_TF (deprecated, alias for AVE_EXC_PAD) in PoolingParameter
105105
ceil_mode in PoolingParameter
106-
submanifold_sparse in ConvolutionParameter
107106
faceboxes, box_width, box_height, keras, tf and yx_order in PriorBoxParameter
108107
relu6, maximum and minimum in ReLUParameter
109108

include/caffe/layers/base_conv_layer.hpp

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ class BaseConvolutionLayer : public Layer<Dtype> {
3535
// The last argument in forward_cpu_gemm is so that we can skip the im2col if
3636
// we just called weight_cpu_gemm with the same input.
3737
void forward_cpu_gemm(const Dtype* input, const Dtype* weights,
38-
Dtype* output, bool skip_im2col = false, bool submanifold_sparse = false);
38+
Dtype* output, bool skip_im2col = false);
3939
void forward_cpu_bias(Dtype* output, const Dtype* bias);
4040
void backward_cpu_gemm(const Dtype* input, const Dtype* weights,
4141
Dtype* output);
@@ -111,7 +111,6 @@ class BaseConvolutionLayer : public Layer<Dtype> {
111111
bool per_channel_scale_weight_; //CUSTOMIZATION
112112
bool per_channel_scale_output_; //CUSTOMIZATION
113113
int quantize_method_; //CUSTOMIZATION
114-
bool submanifold_sparse_;
115114

116115
private:
117116
// wrap im2col/col2im so we don't have to remember the (long) argument lists

include/caffe/util/math_functions.hpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ template <typename Dtype>
1818
void caffe_cpu_gemm(const CBLAS_TRANSPOSE TransA,
1919
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
2020
const Dtype alpha, const Dtype* A, const Dtype* B, const Dtype beta,
21-
Dtype* C, const bool submanifold_sparse=false);
21+
Dtype* C);
2222

2323
template <typename Dtype>
2424
void caffe_cpu_gemv(const CBLAS_TRANSPOSE TransA, const int M, const int N,

src/caffe/layers/base_conv_layer.cpp

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,6 @@ void BaseConvolutionLayer<Dtype>::LayerSetUpInternal(LayerParam conv_param,
8787
quantize_method_ = conv_param.quantize_method();
8888
per_channel_scale_weight_ = conv_param.per_channel_scale_weight();
8989
per_channel_scale_output_ = conv_param.per_channel_scale_output();
90-
submanifold_sparse_ = conv_param.submanifold_sparse();
9190
//CUSTOMIZATION-->
9291

9392
// Setup pad dimensions (pad_).
@@ -391,7 +390,7 @@ void BaseConvolutionLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
391390

392391
template <typename Dtype>
393392
void BaseConvolutionLayer<Dtype>::forward_cpu_gemm(const Dtype* input,
394-
const Dtype* weights, Dtype* output, bool skip_im2col, bool submanifold_sparse) {
393+
const Dtype* weights, Dtype* output, bool skip_im2col) {
395394
const Dtype* col_buff = input;
396395
if (!is_1x1_) {
397396
if (!skip_im2col) {
@@ -403,7 +402,7 @@ void BaseConvolutionLayer<Dtype>::forward_cpu_gemm(const Dtype* input,
403402
caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, conv_out_channels_ /
404403
group_, conv_out_spatial_dim_, kernel_dim_,
405404
(Dtype)1., weights + weight_offset_ * g, col_buff + col_offset_ * g,
406-
(Dtype)0., output + output_offset_ * g, submanifold_sparse);
405+
(Dtype)0., output + output_offset_ * g);
407406
}
408407
}
409408

src/caffe/layers/conv_layer.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -117,7 +117,7 @@ void ConvolutionLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
117117
Dtype* top_data = top[i]->mutable_cpu_data();
118118
for (int n = 0; n < this->num_; ++n) {
119119
this->forward_cpu_gemm(bottom_data + n * this->bottom_dim_, weight,
120-
top_data + n * this->top_dim_, false, this->submanifold_sparse_);
120+
top_data + n * this->top_dim_);
121121
if (this->bias_term_) {
122122
const Dtype* bias = this->blobs_[1]->cpu_data();
123123
this->forward_cpu_bias(top_data + n * this->top_dim_, bias);

src/caffe/proto/caffe.proto

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1428,7 +1428,7 @@ message ConvolutionParameter {
14281428
optional bool per_channel_scale_weight = 39 [default = false];
14291429
// CUSTOMIZATION, whether to have per-channel scale & zero_points for output (scale data will be stored in caffemodel)
14301430
optional bool per_channel_scale_output = 41 [default = false];
1431-
optional bool submanifold_sparse = 42 [default = false];
1431+
14321432

14331433
//<--CUSTOMIZATION
14341434
enum SaturateMethod {
@@ -2746,7 +2746,7 @@ message SqueezeConvolutionParameter {
27462746
// CUSTOMIZATION, whether to have per-channel scale & zero_points for weights/bias (data will be stored in caffemodel)
27472747
optional bool per_channel_scale_weight = 59 [default = false];
27482748
optional bool per_channel_scale_output = 61 [default = false];
2749-
optional bool submanifold_sparse = 62 [default = false];
2749+
27502750

27512751
//<--CUSTOMIZATION
27522752
enum SaturateMethod {

src/caffe/util/math_functions.cpp

Lines changed: 2 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -20,54 +20,18 @@ template<>
2020
void caffe_cpu_gemm<float>(const CBLAS_TRANSPOSE TransA,
2121
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
2222
const float alpha, const float* A, const float* B, const float beta,
23-
float* C, const bool submanifold_sparse) {
23+
float* C) {
2424
int lda = (TransA == CblasNoTrans) ? K : M;
2525
int ldb = (TransB == CblasNoTrans) ? N : K;
26-
2726
cblas_sgemm(CblasRowMajor, TransA, TransB, M, N, K, alpha, A, lda, B,
2827
ldb, beta, C, N);
29-
30-
if(submanifold_sparse)
31-
{
32-
// Submanifold sparse active site check
33-
// weight: M*K input blob: K*N -> output blob: M*N
34-
//LOG(INFO)<<"K="<<K<<", M="<<M<<", N="<<N<<std::endl;
35-
LOG(INFO)<<"Use submanifold sparse."<<std::endl;
36-
for (int j=0;j<N; j++)
37-
{
38-
bool batch_center_active = false;
39-
for (int b=0; b<M; b++)
40-
{
41-
bool center_active = false;
42-
for(int i=0; i<K/M; i++) // K=M*xyz
43-
{
44-
int index = (b*(K/M)+i)*N+j;
45-
if(B[index] != 0)
46-
{
47-
if(i == (K/M)/2)
48-
{
49-
center_active = true;
50-
//LOG(WARNING)<<"center active!!"<<std::endl;
51-
//LOG(INFO)<<"index b="<<b<<", i="<<i<<", j="<<j<<", data="<<B[index]<<" ";
52-
}
53-
}
54-
}
55-
batch_center_active = batch_center_active || center_active;
56-
}
57-
if(!batch_center_active)
58-
{
59-
for(int t=0; t<M; t++)
60-
C[t*N+j] = 0;
61-
}
62-
}
63-
}
6428
}
6529

6630
template<>
6731
void caffe_cpu_gemm<double>(const CBLAS_TRANSPOSE TransA,
6832
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
6933
const double alpha, const double* A, const double* B, const double beta,
70-
double* C, const bool submanifold_sparse) {
34+
double* C) {
7135
int lda = (TransA == CblasNoTrans) ? K : M;
7236
int ldb = (TransB == CblasNoTrans) ? N : K;
7337
cblas_dgemm(CblasRowMajor, TransA, TransB, M, N, K, alpha, A, lda, B,

0 commit comments

Comments
 (0)