Skip to content

Commit dae8fa3

Browse files
committed
Fix linting
1 parent ef9236a commit dae8fa3

File tree

2 files changed

+36
-19
lines changed

2 files changed

+36
-19
lines changed

src/caffe/layers/cudnn_conv_layer.cpp

Lines changed: 32 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -116,7 +116,7 @@ void CuDNNConvolutionLayer<Dtype>::Reshape(
116116
cudnnConvolutionFwdAlgoPerf_t fwd_algo_pref_[4];
117117
cudnnConvolutionBwdDataAlgoPerf_t bwd_data_algo_pref_[4];
118118

119-
//get memory sizes
119+
// get memory sizes
120120
cudaMemGetInfo(&free_memory, &total_memory);
121121
#else
122122
// Specify workspace limit for kernels directly until we have a
@@ -142,46 +142,63 @@ void CuDNNConvolutionLayer<Dtype>::Reshape(
142142
// Note: Copied from https://github.com/Qengineering/caffe/tree/ssd/src/caffe/layers
143143
#if CUDNN_VERSION_MIN(8, 0, 0)
144144
// choose forward algorithm for filter
145-
// in forward filter the CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED is not implemented in cuDNN 8
146-
CUDNN_CHECK(cudnnGetConvolutionForwardAlgorithm_v7(handle_[0], bottom_descs_[i], filter_desc_, conv_descs_[i], top_descs_[i], 4, &RetCnt, fwd_algo_pref_));
145+
// in forward filter the CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED is not
146+
// implemented in cuDNN 8
147+
CUDNN_CHECK(cudnnGetConvolutionForwardAlgorithm_v7(handle_[0],
148+
bottom_descs_[i],
149+
filter_desc_,
150+
conv_descs_[i],
151+
top_descs_[i],
152+
4,
153+
&RetCnt,
154+
fwd_algo_pref_));
147155

148156
found_conv_algorithm = false;
149-
for(int n=0;n<RetCnt;n++){
157+
for (int n = 0; n < RetCnt; n++) {
150158
if (fwd_algo_pref_[n].status == CUDNN_STATUS_SUCCESS &&
151-
fwd_algo_pref_[n].algo != CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED &&
152-
fwd_algo_pref_[n].memory < free_memory){
159+
fwd_algo_pref_[n].algo != CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED && // NOLINT(whitespace/line_length)
160+
fwd_algo_pref_[n].memory < free_memory) {
153161
found_conv_algorithm = true;
154162
fwd_algo_[i] = fwd_algo_pref_[n].algo;
155163
workspace_fwd_sizes_[i] = fwd_algo_pref_[n].memory;
156164
break;
157165
}
158166
}
159-
if(!found_conv_algorithm) LOG(ERROR) << "cuDNN did not return a suitable algorithm for convolution.";
160-
else{
167+
if (!found_conv_algorithm) {
168+
LOG(ERROR) << "cuDNN did not return a suitable algorithm for convolution."; // NOLINT(whitespace/line_length)
169+
} else {
161170
// choose backward algorithm for filter
162171
// for better or worse, just a fixed constant due to the missing
163172
// cudnnGetConvolutionBackwardFilterAlgorithm in cuDNN version 8.0
164173
bwd_filter_algo_[i] = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
165-
//twice the amount of the forward search to be save
174+
// twice the amount of the forward search to be save
166175
workspace_bwd_filter_sizes_[i] = 2*workspace_fwd_sizes_[i];
167176
}
168177

169178
// choose backward algo for data
170-
CUDNN_CHECK(cudnnGetConvolutionBackwardDataAlgorithm_v7(handle_[0], filter_desc_, top_descs_[i], conv_descs_[i], bottom_descs_[i], 4, &RetCnt, bwd_data_algo_pref_));
179+
CUDNN_CHECK(cudnnGetConvolutionBackwardDataAlgorithm_v7(handle_[0],
180+
filter_desc_,
181+
top_descs_[i],
182+
conv_descs_[i],
183+
bottom_descs_[i],
184+
4,
185+
&RetCnt,
186+
bwd_data_algo_pref_));
171187

172188
found_conv_algorithm = false;
173-
for(int n=0;n<RetCnt;n++){
189+
for (int n = 0; n < RetCnt; n++) {
174190
if (bwd_data_algo_pref_[n].status == CUDNN_STATUS_SUCCESS &&
175-
bwd_data_algo_pref_[n].algo != CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD &&
176-
bwd_data_algo_pref_[n].algo != CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD_NONFUSED &&
177-
bwd_data_algo_pref_[n].memory < free_memory){
191+
bwd_data_algo_pref_[n].algo != CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD && // NOLINT(whitespace/line_length)
192+
bwd_data_algo_pref_[n].algo != CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD_NONFUSED && // NOLINT(whitespace/line_length)
193+
bwd_data_algo_pref_[n].memory < free_memory) {
178194
found_conv_algorithm = true;
179195
bwd_data_algo_[i] = bwd_data_algo_pref_[n].algo;
180196
workspace_bwd_data_sizes_[i] = bwd_data_algo_pref_[n].memory;
181197
break;
182198
}
183199
}
184-
if(!found_conv_algorithm) LOG(ERROR) << "cuDNN did not return a suitable algorithm for convolution.";
200+
if (!found_conv_algorithm)
201+
LOG(ERROR) << "cuDNN did not return a suitable algorithm for convolution."; // NOLINT(whitespace/line_length)
185202
#else
186203
// choose forward and backward algorithms + workspace(s)
187204
CUDNN_CHECK(cudnnGetConvolutionForwardAlgorithm(handle_[0],

src/caffe/layers/cudnn_deconv_layer.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -154,7 +154,7 @@ void CuDNNDeconvolutionLayer<Dtype>::Reshape(
154154

155155
// // We have found that CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM is
156156
// // buggy. Thus, if this algo was chosen, choose winograd instead. If
157-
// // winograd is not supported or workspace is larger than threshold, choose
157+
// // winograd is not supported or workspace is larger than threshold, choose // NOLINT(whitespace/line_length)
158158
// // implicit_gemm instead.
159159
// if (fwd_algo_[i] == CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM) {
160160
// size_t winograd_workspace_size;
@@ -226,7 +226,7 @@ void CuDNNDeconvolutionLayer<Dtype>::Reshape(
226226
// &workspace_bwd_data_sizes_[i]));
227227
// }
228228

229-
// // reduce over all workspace sizes to get a maximum to allocate / reallocate
229+
// // reduce over all workspace sizes to get a maximum to allocate / reallocate // NOLINT(whitespace/line_length)
230230
// size_t total_workspace_fwd = 0;
231231
// size_t total_workspace_bwd_data = 0;
232232
// size_t total_workspace_bwd_filter = 0;
@@ -255,7 +255,7 @@ void CuDNNDeconvolutionLayer<Dtype>::Reshape(
255255
// // free the existing workspace and allocate a new (larger) one
256256
// cudaFree(this->workspaceData);
257257

258-
// cudaError_t err = cudaMalloc(&(this->workspaceData), workspaceSizeInBytes);
258+
// cudaError_t err = cudaMalloc(&(this->workspaceData), workspaceSizeInBytes); // NOLINT(whitespace/line_length)
259259
// if (err != cudaSuccess) {
260260
// // force zero memory path
261261
// for (int i = 0; i < bottom.size(); i++) {
@@ -278,7 +278,7 @@ void CuDNNDeconvolutionLayer<Dtype>::Reshape(
278278

279279
// // if we succeed in the allocation, set pointer aliases for workspaces
280280
// for (int g = 0; g < (this->group_ * CUDNN_STREAMS_PER_GROUP); g++) {
281-
// workspace[g] = reinterpret_cast<char *>(workspaceData) + g*max_workspace;
281+
// workspace[g] = reinterpret_cast<char *>(workspaceData) + g*max_workspace; // NOLINT(whitespace/line_length)
282282
// }
283283
// }
284284

0 commit comments

Comments
 (0)