Skip to content

Commit e0d4e04

Browse files
committed
fix some compiler warning
test=develop
1 parent 8ea13e3 commit e0d4e04

File tree

9 files changed

+15
-15
lines changed

9 files changed

+15
-15
lines changed

paddle/fluid/inference/analysis/ir_passes/subgraph_detector.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -412,7 +412,7 @@ void DetachDeletedNodes(framework::ir::Graph *graph) {
412412
void SubGraphFuser::ReplaceNodesWithSubGraphs() {
413413
auto subgraphs = SubgraphDetector(graph_, node_inside_subgraph_teller_)();
414414
for (auto &subgraph : subgraphs) {
415-
if (subgraph.size() <= min_subgraph_size_) continue;
415+
if (subgraph.size() <= (size_t)min_subgraph_size_) continue;
416416
LOG(INFO) << "detect a subgraph size " << subgraph.size();
417417
std::unordered_set<Node *> subgraph_uniq(subgraph.begin(), subgraph.end());
418418
// replace this sub-graph with the first node. Two steps: 1. Create a Block

paddle/fluid/inference/analysis/ir_passes/tensorrt_subgraph_pass.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -114,7 +114,7 @@ void TensorRtSubgraphPass::CreateTensorRTOp(framework::ir::Node *node,
114114
// it is either an OP's input or an OP's output.
115115

116116
auto &subgraph_nodes = *Agent(node).subgraph();
117-
for (int index = 0; index < block_desc.OpSize(); index++) {
117+
for (size_t index = 0; index < block_desc.OpSize(); index++) {
118118
framework::proto::OpDesc *op = block_desc.Op(index)->Proto();
119119
auto correspond_node = subgraph_nodes[index];
120120
PADDLE_ENFORCE_EQ(correspond_node->Name(), op->type());

paddle/fluid/inference/tests/api/analyzer_dam_tester.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,7 @@ struct DataRecord {
6969
num_lines++;
7070
std::vector<std::string> data;
7171
split(line, ',', &data);
72-
CHECK_EQ(data.size(), 2 * MAX_TURN_NUM + 3);
72+
CHECK_EQ(data.size(), (size_t)(2 * MAX_TURN_NUM + 3));
7373
// load turn data
7474
std::vector<int64_t> turns_tmp[MAX_TURN_NUM];
7575
for (int i = 0; i < MAX_TURN_NUM; ++i) {

paddle/fluid/operators/hash_op.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ class HashOp : public framework::OperatorWithKernel {
3838
std::vector<int64_t> out_dims;
3939
out_dims.reserve(dims.size() + 1);
4040
// copy all dims except the last one
41-
for (size_t i = 0u; i != dims.size() - 1; ++i) {
41+
for (int i = 0u; i != dims.size() - 1; ++i) {
4242
out_dims.emplace_back(dims[i]);
4343
}
4444
int num_hash = ctx->Attrs().Get<int>("num_hash");

paddle/fluid/operators/math/selected_rows_functor.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -244,7 +244,7 @@ typename std::enable_if<
244244
std::is_same<DeviceContext, platform::CPUDeviceContext>::value>::type
245245
elementwise_add_to(const DeviceContext& ctx, BlasT<DeviceContext, T>* blas,
246246
size_t data_len, const T* in, T* out) {
247-
for (int64_t i = 0; i < data_len; i++) {
247+
for (size_t i = 0; i < data_len; i++) {
248248
out[i] += in[i];
249249
}
250250
}

paddle/fluid/operators/math/sequence_pooling_test.cc

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -70,23 +70,23 @@ void TestSequencePoolingSum(const paddle::framework::LoD& lod) {
7070
EXPECT_EQ(in_grad.lod(), lod);
7171

7272
if (paddle::platform::is_cpu_place(*place)) {
73-
for (int64_t i = 0; i < in_grad.lod()[0].size() - 1; ++i) {
73+
for (size_t i = 0; i < in_grad.lod()[0].size() - 1; ++i) {
7474
int64_t begin = in_grad.lod()[0][i];
7575
int64_t end = in_grad.lod()[0][i + 1];
7676
paddle::framework::Tensor tmp = in_grad.Slice(begin, end);
77-
for (int64_t j = 0; j != tmp.numel() / second_dim; ++j) {
77+
for (size_t j = 0; j != tmp.numel() / second_dim; ++j) {
7878
for (int64_t m = 0; m != second_dim; ++m) {
7979
EXPECT_EQ(tmp.data<T>()[m + j * second_dim],
8080
out_grad.data<T>()[m + i * second_dim]);
8181
}
8282
}
8383
}
8484
} else {
85-
for (int64_t i = 0; i < cpu_in_grad.lod()[0].size() - 1; ++i) {
85+
for (size_t i = 0; i < cpu_in_grad.lod()[0].size() - 1; ++i) {
8686
int64_t begin = cpu_in_grad.lod()[0][i];
8787
int64_t end = cpu_in_grad.lod()[0][i + 1];
8888
paddle::framework::Tensor tmp = cpu_in_grad.Slice(begin, end);
89-
for (int64_t j = 0; j != tmp.numel() / second_dim; ++j) {
89+
for (size_t j = 0; j != tmp.numel() / second_dim; ++j) {
9090
for (int64_t m = 0; m != second_dim; ++m) {
9191
EXPECT_EQ(tmp.data<T>()[m + j * second_dim],
9292
cpu_out_grad.data<T>()[m + i * second_dim]);

paddle/fluid/operators/merge_ids_op.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -43,11 +43,11 @@ class MergeIdsOpKernel : public framework::OpKernel<T> {
4343
PADDLE_ENFORCE_EQ(ids.size(), outs.size(),
4444
"the number of Ids and Out should be the same");
4545

46-
int row_ids_size = 0;
46+
size_t row_ids_size = 0;
4747
int row_size = 0;
4848
int embedding_size = 0;
4949

50-
for (int i = 0; i < x_tensors.size(); ++i) {
50+
for (size_t i = 0; i < x_tensors.size(); ++i) {
5151
const auto *x_tensor = x_tensors[i];
5252
const auto *row_id = row_ids[i];
5353

@@ -66,7 +66,7 @@ class MergeIdsOpKernel : public framework::OpKernel<T> {
6666

6767
std::unordered_map<int64_t, std::tuple<int64_t, int64_t>>
6868
selected_rows_idx_map;
69-
for (int i = 0; i < x_tensors.size(); ++i) {
69+
for (size_t i = 0; i < x_tensors.size(); ++i) {
7070
const auto *row_id = row_ids[i];
7171

7272
for (int j = 0; j < row_id->numel(); ++j) {
@@ -78,7 +78,7 @@ class MergeIdsOpKernel : public framework::OpKernel<T> {
7878
PADDLE_ENFORCE_EQ(row_ids_size, selected_rows_idx_map.size(),
7979
"the rows and tensor map size should be the same");
8080

81-
for (int i = 0; i < outs.size(); ++i) {
81+
for (size_t i = 0; i < outs.size(); ++i) {
8282
auto *out_ids = ids[i];
8383
auto *out = outs[i];
8484

paddle/fluid/operators/ref_by_trainer_id_op.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ class RefByTrainerIdKernel : public framework::OpKernel<T> {
3838
} else {
3939
trainer_id = *trainer_id_data;
4040
}
41-
PADDLE_ENFORCE_LT(trainer_id, in_list.size());
41+
PADDLE_ENFORCE_LT((size_t)trainer_id, in_list.size());
4242
out->mutable_data<T>(context.GetPlace());
4343
out->ShareDataWith(*(in_list[trainer_id]));
4444
}

paddle/fluid/operators/split_ids_op.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@ class SplitIdsOpKernel : public framework::OpKernel<T> {
6464
out_ids.resize(outs.size());
6565

6666
// split id by their shard_num.
67-
for (int i = 0; i < all_ids.size(); ++i) {
67+
for (size_t i = 0; i < all_ids.size(); ++i) {
6868
T id = all_ids[i];
6969
size_t shard_id = static_cast<size_t>(id) % shard_num;
7070
out_ids[shard_id].push_back(id);

0 commit comments

Comments
 (0)