Skip to content

Commit 37638c3

Browse files
sampathvicpytorchmergebot
authored andcommitted
Addressing some linter errors (pytorch#158670)
Summary: Addressing the linter errors reported in the changed files. Test Plan: ``` buck test mode/opt deeplearning/fbgemm:QuantUtilsTest ``` https://www.internalfb.com/intern/testinfra/testrun/11821949118528688 ``` buck test mode/opt caffe2/torch/fb/model_transform/splitting/tests:split_dispatcher_test ``` https://www.internalfb.com/intern/testinfra/testrun/7881299627525465 Rollback Plan: Differential Revision: D78352311 Pull Request resolved: pytorch#158670 Approved by: https://github.com/excelle08, https://github.com/cyyever, https://github.com/digantdesai
1 parent ee2edf3 commit 37638c3

File tree

3 files changed

+36
-31
lines changed

3 files changed

+36
-31
lines changed

aten/src/ATen/native/quantized/cpu/fbgemm_utils.cpp

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -83,10 +83,8 @@ void CopyICFirst3dTensorToChannelsLast3dTensor(
8383
for (int64_t i = 0; i < G * OC_G; ++i) {
8484
for (const auto j : c10::irange(inner_size)) {
8585
for (const auto ic : c10::irange(IC_G)) {
86-
// NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions)
87-
int g = i / OC_G;
88-
// NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions)
89-
int oc = i % OC_G;
86+
int g = static_cast<int>(i / OC_G);
87+
int oc = static_cast<int>(i % OC_G);
9088
dst[(i * inner_size + j) * IC_G + ic] =
9189
src[((g * IC_G + ic) * OC_G + oc) * inner_size + j];
9290
}
@@ -112,24 +110,26 @@ fbgemm::conv_param_t<kSpatialDim> MakeFbgemmConvParam(
112110
std::array<int, kSpatialDim> image_shape_{};
113111
std::array<int, kSpatialDim> kernels_{};
114112
std::array<int, kSpatialDim> strides_{};
115-
std::array<int, kSpatialDim * 2> pads_{};
113+
std::array<int, kSpatialDim * 2ull> pads_{};
116114
std::array<int, kSpatialDim> dilations_{};
117115
std::array<int, kSpatialDim> output_padding_{};
118-
std::move(image_shape.begin(), image_shape.begin() + image_shape.size(), image_shape_.begin());
119116
std::move(
120-
kernels.begin(), kernels.begin() + kernels.size(), kernels_.begin());
117+
image_shape.begin(), image_shape.begin() + static_cast<int64_t>(image_shape.size()), image_shape_.begin());
121118
std::move(
122-
strides.begin(), strides.begin() + strides.size(), strides_.begin());
119+
kernels.begin(), kernels.begin() + static_cast<int64_t>(kernels.size()), kernels_.begin());
120+
std::move(
121+
strides.begin(), strides.begin() + static_cast<int64_t>(strides.size()), strides_.begin());
123122
std::move(
124123
dilations.begin(),
125-
dilations.begin() + dilations.size(),
124+
dilations.begin() + static_cast<int64_t>(dilations.size()),
126125
dilations_.begin());
127126
std::move(
128127
output_padding.begin(),
129-
output_padding.begin() + output_padding.size(),
128+
output_padding.begin() + static_cast<int64_t>(output_padding.size()),
130129
output_padding_.begin());
131-
std::copy(pads.begin(), pads.begin() + pads.size(), pads_.begin());
132-
std::move(pads.begin(), pads.begin() + pads.size(), pads_.begin() + pads.size());
130+
std::copy(pads.begin(), pads.begin() + static_cast<int64_t>(pads.size()), pads_.begin());
131+
const auto pads_size = static_cast<int64_t>(pads.size());
132+
std::move(pads.begin(), pads.begin() + pads_size, pads_.begin() + pads_size);
133133

134134
return fbgemm::conv_param_t<kSpatialDim>(
135135
N, // batch size
@@ -158,7 +158,7 @@ Tensor MakeStridedQTensorCPU(
158158
TORCH_CHECK(
159159
isQIntType(typeMetaToScalarType(dtype)),
160160
"ScalarType is not supported in new_qtensor_cpu.");
161-
int64_t size_bytes = nelements * dtype.itemsize();
161+
int64_t size_bytes = static_cast<int64_t>(nelements * dtype.itemsize());
162162
auto storage = c10::make_intrusive<StorageImpl>(
163163
StorageImpl::use_byte_size_t(),
164164
size_bytes,
@@ -531,8 +531,8 @@ int register_embedding_params() {
531531
TORCH_INTERNAL_ASSERT(longs.size() == 1, "EmbeddingPackedParams: Expected bit_rate to be serialized");
532532
TORCH_CHECK(version == 1, "EmbeddingPackedParams: Currently only version 1 supported.");
533533

534-
at::Tensor weight = std::move(tensors[0]);
535-
return PackedEmbeddingBagWeight::prepack(std::move(weight));
534+
const auto& weight = tensors[0];
535+
return PackedEmbeddingBagWeight::prepack(weight);
536536
})
537537
.def("bit_rate", &EmbeddingPackedParamsBase::bit_rate)
538538
.def("unpack", &EmbeddingPackedParamsBase::unpack)

aten/src/ATen/native/quantized/cpu/fbgemm_utils.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -380,7 +380,7 @@ struct TORCH_API PackedEmbeddingBagWeight : public EmbeddingPackedParamsBase {
380380

381381
at::Tensor unpack() override;
382382
static c10::intrusive_ptr<EmbeddingPackedParamsBase> prepack(
383-
at::Tensor weight);
383+
const at::Tensor& weight);
384384

385385
int64_t bit_rate() const override {
386386
return bit_rate_;

aten/src/ATen/native/quantized/cpu/qembeddingbag_prepack.cpp

Lines changed: 20 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@
3333
* for each row along with the quantized weights.
3434
*/
3535
c10::intrusive_ptr<EmbeddingPackedParamsBase> PackedEmbeddingBagWeight::prepack(
36-
at::Tensor qweight) {
36+
const at::Tensor& qweight) {
3737
static constexpr int64_t version = 1;
3838
TORCH_CHECK(
3939
qweight.dim() == 2,
@@ -67,8 +67,8 @@ c10::intrusive_ptr<EmbeddingPackedParamsBase> PackedEmbeddingBagWeight::prepack(
6767
"Expect embedding_bag weights to be quantized using kPerChannelAffineFloatQParams");
6868
std::vector<float> weight_bias(embedding_rows);
6969

70-
at::Tensor channel_scales = qweight.q_per_channel_scales();
71-
at::Tensor channel_zero_points = qweight.q_per_channel_zero_points();
70+
const auto& channel_scales = qweight.q_per_channel_scales();
71+
const auto& channel_zero_points = qweight.q_per_channel_zero_points();
7272
std::vector<float> weight_scales(
7373
channel_scales.data_ptr<float>(),
7474
channel_scales.data_ptr<float>() + embedding_rows);
@@ -77,6 +77,11 @@ c10::intrusive_ptr<EmbeddingPackedParamsBase> PackedEmbeddingBagWeight::prepack(
7777
channel_zero_points.data_ptr<float>() + embedding_rows);
7878

7979
for (const auto i : c10::irange(embedding_rows)) {
80+
// As of now weight_zero_points and weight_scales are initialized with
81+
// the size of embedding_rows. Hence, this linter is a false positive.
82+
// However, if this assumption changes in the future, we need to
83+
// ensure that the bounds are checked.
84+
// NOLINTNEXTLINE(facebook-hte-LocalUncheckedArrayBounds)
8085
weight_bias[i] = weight_zero_points[i] * weight_scales[i] * -1;
8186
}
8287

@@ -237,16 +242,16 @@ Tensor& qembeddingbag_byte_prepack_out(Tensor& output, const Tensor& weight) {
237242

238243
const auto weight_sizes = weight.sizes();
239244
const auto cols_dim = weight_sizes.size() - 1;
240-
const int64_t embedding_rows = c10::size_to_dim_(cols_dim, weight_sizes);
241-
const int32_t embedding_cols = weight_sizes[cols_dim];
245+
const int64_t embedding_rows = c10::size_to_dim_(static_cast<int>(cols_dim), weight_sizes);
246+
const int32_t embedding_cols = static_cast<int32_t>(weight_sizes[cols_dim]);
242247
// Add 8 bytes per column to store FP32 scale and zero_point per row.
243-
const int32_t output_columns = embedding_cols + 2 * sizeof(float);
248+
const int32_t output_columns = static_cast<int32_t>(embedding_cols + 2 * sizeof(float));
244249
const auto weight_contig =
245250
weight.expect_contiguous(weight.suggest_memory_format());
246251

247252
// Adjust output dimensions to account for FP32 scale and zero_points.
248253
std::vector<int64_t> output_shape = weight_sizes.vec();
249-
output_shape[cols_dim] = output_columns;
254+
output_shape.at(cols_dim) = output_columns;
250255
at::native::resize_(output, output_shape, std::nullopt);
251256
auto* output_data = output.data_ptr<uint8_t>();
252257

@@ -330,13 +335,13 @@ Tensor qembeddingbag_byte_prepack_meta(const Tensor& weight) {
330335
"'embedding_bag_byte_prepack' only support float32 or float16.");
331336
const auto weight_sizes = weight.sizes();
332337
const auto cols_dim = weight_sizes.size() - 1;
333-
const int32_t embedding_cols = weight_sizes[cols_dim];
338+
const int32_t embedding_cols = static_cast<int32_t>(weight_sizes[cols_dim]);
334339
// Add 8 bytes per column to store FP32 scale and zero_point per row.
335-
const int32_t output_columns = embedding_cols + 2 * sizeof(float);
340+
const int32_t output_columns = static_cast<int32_t>(embedding_cols + 2 * sizeof(float));
336341

337342
// Adjust output dimensions to account for FP32 scale and zero_points.
338343
std::vector<int64_t> output_shape = weight_sizes.vec();
339-
output_shape[cols_dim] = output_columns;
344+
output_shape.at(cols_dim) = output_columns;
340345
at::SymDimVector output_shape_vec(output_shape);
341346

342347
return at::empty_symint(
@@ -407,7 +412,7 @@ Tensor _qembeddingbag_nbit_prepack_helper(
407412
bit_width,
408413
weight_data + start_idx * embedding_cols,
409414
end_idx - start_idx,
410-
embedding_cols,
415+
static_cast<int>(embedding_cols),
411416
output_data + start_idx * output_shape[1]);
412417
});
413418
} else {
@@ -418,7 +423,7 @@ Tensor _qembeddingbag_nbit_prepack_helper(
418423
bit_width,
419424
weight_data + start_idx * embedding_cols,
420425
end_idx - start_idx,
421-
embedding_cols,
426+
static_cast<int>(embedding_cols),
422427
output_data + start_idx * output_shape[1]);
423428
});
424429
}
@@ -475,7 +480,7 @@ Tensor _qembeddingbag_nbit_prepack_helper(
475480
std::uint8_t quantized = std::max(
476481
0,
477482
std::min<int>(
478-
lrintf((X - Xmin) * inverse_scale), (1 << bit_width) - 1));
483+
static_cast<int>(lrintf((X - Xmin) * inverse_scale)), (1 << bit_width) - 1));
479484
// We pack 2 4-bit values in a byte. Index 0 is packed in the lower
480485
// 4-bits and index 1 is packed in the upper 4-bits.
481486
if (col % NUM_ELEM_PER_BYTE == 0) {
@@ -528,8 +533,8 @@ Tensor qembeddingbag_2bit_prepack(
528533

529534
class QEmbeddingPackWeights final {
530535
public:
531-
static c10::intrusive_ptr<EmbeddingPackedParamsBase> run(at::Tensor weight) {
532-
return PackedEmbeddingBagWeight::prepack(std::move(weight));
536+
static c10::intrusive_ptr<EmbeddingPackedParamsBase> run(const at::Tensor& weight) {
537+
return PackedEmbeddingBagWeight::prepack(weight);
533538
}
534539
};
535540

0 commit comments

Comments
 (0)