Skip to content

Commit c73f00f

Browse files
authored
Merge pull request #7634 from Yancey1989/fix_compile_warning
Fix sequence_padding compile warning
2 parents 5139865 + 2ce5c9d commit c73f00f

File tree

2 files changed

+32
-28
lines changed

2 files changed

+32
-28
lines changed

paddle/operators/math/sequence_padding.cc

Lines changed: 20 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,8 @@ class PaddingLoDTensorFunctor<platform::CPUDeviceContext, T> {
3232
framework::LoD abs_offset_lod = framework::ToAbsOffset(lod);
3333

3434
auto seq_dims = seq.dims();
35-
PADDLE_ENFORCE_EQ(seq_dims[0], abs_offset_lod[level].back(),
35+
PADDLE_ENFORCE_EQ(seq_dims[0],
36+
static_cast<int64_t>(abs_offset_lod[level].back()),
3637
"The first dimension of LoDTensor seq should be "
3738
"equal to the sum of all sequences's length.");
3839

@@ -41,32 +42,32 @@ class PaddingLoDTensorFunctor<platform::CPUDeviceContext, T> {
4142
"The input padding should be a 3-D Tensor of shape "
4243
"[max_sequence_length, num_sequences, sequence_width].");
4344

44-
const size_t max_sequence_length = MaximumSequenceLength(lod, level);
45+
const int64_t max_sequence_length = MaximumSequenceLength(lod, level);
4546
PADDLE_ENFORCE_EQ(padding_dims[0], max_sequence_length,
4647
"The first dimension of Tensor padding should be the "
4748
"maximum length of all sequences in LoDTensor seq.");
4849

49-
const size_t num_sequences = abs_offset_lod[level].size() - 1;
50+
const int64_t num_sequences = abs_offset_lod[level].size() - 1;
5051
PADDLE_ENFORCE_EQ(padding_dims[1], num_sequences,
5152
"The second dimension of Tensor padding should be the "
5253
"number of sequences in LoDTensor seq.");
5354

54-
const size_t sequence_width = seq.numel() / seq_dims[0];
55+
const int64_t sequence_width = seq.numel() / seq_dims[0];
5556
PADDLE_ENFORCE_EQ(padding_dims[2], sequence_width,
5657
"The third dimension of Tensor padding should be the "
5758
"width of sequence in LoDTensor seq.");
5859

5960
const T* seq_data = seq.data<T>();
6061
T* padding_data = padding.data<T>();
61-
for (size_t i = 0; i < max_sequence_length; ++i) {
62-
for (size_t j = 0; j < num_sequences; ++j) {
63-
size_t start_pos = abs_offset_lod[level][j];
64-
size_t sequence_length = abs_offset_lod[level][j + 1] - start_pos;
62+
for (int64_t i = 0; i < max_sequence_length; ++i) {
63+
for (int64_t j = 0; j < num_sequences; ++j) {
64+
int64_t start_pos = abs_offset_lod[level][j];
65+
int64_t sequence_length = abs_offset_lod[level][j + 1] - start_pos;
6566
if (i < sequence_length) {
6667
// i > 0 => sequence_length > 0
6768
T scale =
6869
norm_by_times ? (1.0f / static_cast<T>(sequence_length)) : 1.0f;
69-
for (size_t k = 0; k < sequence_width; ++k) {
70+
for (int64_t k = 0; k < sequence_width; ++k) {
7071
padding_data[(i * num_sequences + j) * sequence_width + k] =
7172
seq_data[(start_pos + i) * sequence_width + k] * scale;
7273
}
@@ -93,7 +94,8 @@ class UnpaddingLoDTensorFunctor<platform::CPUDeviceContext, T> {
9394
framework::LoD abs_offset_lod = framework::ToAbsOffset(lod);
9495

9596
auto seq_dims = seq.dims();
96-
PADDLE_ENFORCE_EQ(seq_dims[0], abs_offset_lod[level].back(),
97+
PADDLE_ENFORCE_EQ(seq_dims[0],
98+
static_cast<int64_t>(abs_offset_lod[level].back()),
9799
"The first dimension of LoDTensor seq should be "
98100
"equal to the sum of all sequences's length.");
99101

@@ -102,31 +104,31 @@ class UnpaddingLoDTensorFunctor<platform::CPUDeviceContext, T> {
102104
"The input padding should be a 3-D Tensor of shape "
103105
"[max_sequnece_length, num_sequences, sequence_width].");
104106

105-
const size_t max_sequence_length = MaximumSequenceLength(lod, level);
107+
const int64_t max_sequence_length = MaximumSequenceLength(lod, level);
106108
PADDLE_ENFORCE_EQ(padding_dims[0], max_sequence_length,
107109
"The first dimension of Tensor padding should be "
108110
"the maximum length of all sequences in LoDTensor seq.");
109111

110-
const size_t num_sequences = abs_offset_lod[level].size() - 1;
112+
const int64_t num_sequences = abs_offset_lod[level].size() - 1;
111113
PADDLE_ENFORCE_EQ(padding_dims[1], num_sequences,
112114
"The second dimension of Tensor padding should be "
113115
"the number of sequences in LoDTensor seq.");
114116

115-
const size_t sequence_width = seq.numel() / seq_dims[0];
117+
const int64_t sequence_width = seq.numel() / seq_dims[0];
116118
PADDLE_ENFORCE_EQ(padding_dims[2], sequence_width,
117119
"The third dimension of Tensor padding should be the "
118120
"width of sequence in LoDTensor seq.");
119121

120122
const T* padding_data = padding.data<T>();
121123
T* seq_data = seq.data<T>();
122-
for (size_t i = 0; i < num_sequences; ++i) {
123-
size_t start_pos = abs_offset_lod[level][i];
124-
size_t sequence_length = abs_offset_lod[level][i + 1] - start_pos;
125-
for (size_t j = 0; j < sequence_length; ++j) {
124+
for (int64_t i = 0; i < num_sequences; ++i) {
125+
int64_t start_pos = abs_offset_lod[level][i];
126+
int64_t sequence_length = abs_offset_lod[level][i + 1] - start_pos;
127+
for (int64_t j = 0; j < sequence_length; ++j) {
126128
// sequence_width > j > 0
127129
T scale =
128130
norm_by_times ? (1.0f / static_cast<T>(sequence_length)) : 1.0f;
129-
for (size_t k = 0; k < sequence_width; ++k) {
131+
for (int64_t k = 0; k < sequence_width; ++k) {
130132
seq_data[(start_pos + j) * sequence_width + k] =
131133
padding_data[(j * num_sequences + i) * sequence_width + k] *
132134
scale;

paddle/operators/math/sequence_padding.cu

Lines changed: 12 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,8 @@ class PaddingLoDTensorFunctor<platform::CUDADeviceContext, T> {
7171
framework::LoD abs_offset_lod = framework::ToAbsOffset(lod);
7272

7373
auto seq_dims = seq.dims();
74-
PADDLE_ENFORCE_EQ(seq_dims[0], abs_offset_lod[level].back(),
74+
PADDLE_ENFORCE_EQ(seq_dims[0],
75+
static_cast<int64_t>(abs_offset_lod[level].back()),
7576
"The first dimension of LoDTensor seq should be "
7677
"equal to the sum of all sequences's length.");
7778

@@ -80,17 +81,17 @@ class PaddingLoDTensorFunctor<platform::CUDADeviceContext, T> {
8081
"The input padding should be a 3-D Tensor of shape "
8182
"[max_sequence_length, num_sequences, sequence_width].");
8283

83-
size_t max_sequence_length = MaximumSequenceLength(lod, level);
84+
int64_t max_sequence_length = MaximumSequenceLength(lod, level);
8485
PADDLE_ENFORCE_EQ(padding_dims[0], max_sequence_length,
8586
"The first dimension of Tensor padding should be the "
8687
"maximum length of all sequences in LoDTensor seq.");
8788

88-
const size_t num_sequences = abs_offset_lod[level].size() - 1;
89+
const int64_t num_sequences = abs_offset_lod[level].size() - 1;
8990
PADDLE_ENFORCE_EQ(padding_dims[1], num_sequences,
9091
"The second dimension of Tensor padding should be the "
9192
"number of sequences in LoDTensor seq.");
9293

93-
const size_t sequence_width = seq.numel() / seq_dims[0];
94+
const int64_t sequence_width = seq.numel() / seq_dims[0];
9495
PADDLE_ENFORCE_EQ(padding_dims[2], sequence_width,
9596
"The third dimension of Tensor padding should be the "
9697
"width of sequence in LoDTensor seq.");
@@ -101,7 +102,7 @@ class PaddingLoDTensorFunctor<platform::CUDADeviceContext, T> {
101102
return;
102103
}
103104

104-
const size_t kBlockSize = 512;
105+
const int64_t kBlockSize = 512;
105106

106107
/* At least use 32 threads to copy sequence_width elements,
107108
* and at least 8 elements for each thread.
@@ -143,7 +144,8 @@ class UnpaddingLoDTensorFunctor<platform::CUDADeviceContext, T> {
143144
framework::LoD abs_offset_lod = framework::ToAbsOffset(lod);
144145

145146
auto seq_dims = seq.dims();
146-
PADDLE_ENFORCE_EQ(seq_dims[0], abs_offset_lod[level].back(),
147+
PADDLE_ENFORCE_EQ(seq_dims[0],
148+
static_cast<int64_t>(abs_offset_lod[level].back()),
147149
"The first dimension of LoDTensor seq should be "
148150
"equal to the sum of all sequences's length.");
149151

@@ -152,17 +154,17 @@ class UnpaddingLoDTensorFunctor<platform::CUDADeviceContext, T> {
152154
"The input padding should be a 3-D Tensor of shape "
153155
"[max_sequnece_length, num_sequences, sequence_width].");
154156

155-
size_t max_sequence_length = MaximumSequenceLength(lod, level);
157+
int64_t max_sequence_length = MaximumSequenceLength(lod, level);
156158
PADDLE_ENFORCE_EQ(padding_dims[0], max_sequence_length,
157159
"The first dimension of Tensor padding should be "
158160
"the maximum length of all sequences in LoDTensor seq.");
159161

160-
const size_t num_sequences = abs_offset_lod[level].size() - 1;
162+
const int64_t num_sequences = abs_offset_lod[level].size() - 1;
161163
PADDLE_ENFORCE_EQ(padding_dims[1], num_sequences,
162164
"The second dimension of Tensor padding should be "
163165
"the number of sequences in LoDTensor seq.");
164166

165-
const size_t sequence_width = seq.numel() / seq_dims[0];
167+
const int64_t sequence_width = seq.numel() / seq_dims[0];
166168
PADDLE_ENFORCE_EQ(padding_dims[2], sequence_width,
167169
"The third dimension of Tensor padding should be the "
168170
"width of sequence in LoDTensor seq.");
@@ -173,7 +175,7 @@ class UnpaddingLoDTensorFunctor<platform::CUDADeviceContext, T> {
173175
return;
174176
}
175177

176-
const size_t kBlockSize = 512;
178+
const int64_t kBlockSize = 512;
177179

178180
/* At least use 32 threads to copy sequence_width elements,
179181
* and at least 8 elements for each thread.

0 commit comments

Comments
 (0)