Skip to content

Commit 21ea542

Browse files
authored
test=Release/1.4, merge security issue fix (#16942)
* Security issue (#16774) * disable memory_optimize and inpalce strategy by default, test=develop * fix security issue http://newicafe.baidu.com:80/issue/PaddleSec-3/show?from=page http://newicafe.baidu.com:80/issue/PaddleSec-8/show?from=page http://newicafe.baidu.com:80/issue/PaddleSec-12/show?from=page http://newicafe.baidu.com:80/issue/PaddleSec-32/show?from=page http://newicafe.baidu.com:80/issue/PaddleSec-35/show?from=page http://newicafe.baidu.com:80/issue/PaddleSec-37/show?from=page http://newicafe.baidu.com:80/issue/PaddleSec-40/show?from=page http://newicafe.baidu.com:80/issue/PaddleSec-43/show?from=page http://newicafe.baidu.com:80/issue/PaddleSec-44/show?from=page http://newicafe.baidu.com:80/issue/PaddleSec-45/show?from=page test=develop * revert piece.cc, test=develop * adjust api.cc,test=develop * fix overflow by int32 mul test=develop (#16794) * fix overflow by int32 mul test=develop * fix reference nullptr * fix codestyle test=develop * modify to point in ContextProjectFunctor test=develop * modify to point in ContextProjectFunctor test=develop * modify . to -> test=develop * test=release/1.4 cherry-pick (#16783) (#16794) (#16774) fix security issue
1 parent 07462de commit 21ea542

File tree

10 files changed

+56
-35
lines changed

10 files changed

+56
-35
lines changed

paddle/fluid/framework/op_desc.cc

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -241,6 +241,7 @@ OpDesc::OpDesc(const std::string &type, const VariableNameMap &inputs,
241241
outputs_ = outputs;
242242
attrs_ = attrs;
243243
need_update_ = true;
244+
block_ = nullptr;
244245
}
245246

246247
OpDesc::OpDesc(const OpDesc &other, BlockDesc *block) {

paddle/fluid/inference/api/analysis_predictor.cc

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -259,6 +259,9 @@ bool AnalysisPredictor::SetFeed(const std::vector<PaddleTensor> &inputs,
259259
return false;
260260
}
261261

262+
PADDLE_ENFORCE_NOT_NULL(input_ptr);
263+
PADDLE_ENFORCE_NOT_NULL(inputs[i].data.data());
264+
262265
if (platform::is_cpu_place(place_)) {
263266
// TODO(panyx0718): Init LoDTensor from existing memcpy to save a copy.
264267
std::memcpy(static_cast<void *>(input_ptr), inputs[i].data.data(),

paddle/fluid/inference/api/api.cc

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -54,6 +54,7 @@ PaddleBuf &PaddleBuf::operator=(const PaddleBuf &other) {
5454
memory_owned_ = other.memory_owned_;
5555
} else {
5656
Resize(other.length());
57+
PADDLE_ENFORCE(!(other.length() > 0 && other.data() == nullptr));
5758
memcpy(data_, other.data(), other.length());
5859
length_ = other.length();
5960
memory_owned_ = true;

paddle/fluid/inference/api/api_impl.cc

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -169,6 +169,7 @@ std::unique_ptr<PaddlePredictor> NativePaddlePredictor::Clone() {
169169
std::unique_ptr<PaddlePredictor> cls(new NativePaddlePredictor(config_));
170170
// Hot fix the bug that result diff in multi-thread.
171171
// TODO(Superjomn) re-implement a real clone here.
172+
PADDLE_ENFORCE_NOT_NULL(dynamic_cast<NativePaddlePredictor *>(cls.get()));
172173
if (!dynamic_cast<NativePaddlePredictor *>(cls.get())->Init(nullptr)) {
173174
LOG(ERROR) << "fail to call Init";
174175
return nullptr;
@@ -210,6 +211,8 @@ bool NativePaddlePredictor::SetFeed(const std::vector<PaddleTensor> &inputs,
210211
return false;
211212
}
212213

214+
PADDLE_ENFORCE_NOT_NULL(input_ptr);
215+
PADDLE_ENFORCE_NOT_NULL(inputs[i].data.data());
213216
if (platform::is_cpu_place(place_)) {
214217
// TODO(panyx0718): Init LoDTensor from existing memcpy to save a copy.
215218
std::memcpy(static_cast<void *>(input_ptr), inputs[i].data.data(),
@@ -316,6 +319,8 @@ std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<
316319
}
317320

318321
std::unique_ptr<PaddlePredictor> predictor(new NativePaddlePredictor(config));
322+
PADDLE_ENFORCE_NOT_NULL(
323+
dynamic_cast<NativePaddlePredictor *>(predictor.get()));
319324
if (!dynamic_cast<NativePaddlePredictor *>(predictor.get())->Init(nullptr)) {
320325
return nullptr;
321326
}

paddle/fluid/inference/tests/api/analyzer_seq_conv1_tester.cc

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,7 @@ struct DataRecord {
4747
num_lines++;
4848
std::vector<std::string> data;
4949
split(line, '\t', &data);
50+
PADDLE_ENFORCE(data.size() >= 4);
5051
// load title1 data
5152
std::vector<int64_t> title1_data;
5253
split_to_int64(data[0], ' ', &title1_data);

paddle/fluid/operators/affine_grid_op.h

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -121,9 +121,11 @@ class AffineGridOpKernel : public framework::OpKernel<T> {
121121
// TODO(wanghaoshuang): Refine batched matrix multiply
122122
auto blas = math::GetBlas<DeviceContext, T>(ctx);
123123
for (int i = 0; i < n; ++i) {
124-
Tensor sliced_grid = grid.Slice(i, i + 1).Resize({h * w, 3});
124+
Tensor sliced_grid = grid.Slice(i, i + 1).Resize(
125+
{static_cast<int64_t>(h) * static_cast<int64_t>(w), 3});
125126
Tensor sliced_theta = theta->Slice(i, i + 1).Resize({2, 3});
126-
Tensor sliced_out = output->Slice(i, i + 1).Resize({h * w, 2});
127+
Tensor sliced_out = output->Slice(i, i + 1).Resize(
128+
{static_cast<int64_t>(h) * static_cast<int64_t>(w), 2});
127129
blas.MatMul(sliced_grid, false, sliced_theta, true, T(1), &sliced_out,
128130
T(0));
129131
}
@@ -161,8 +163,10 @@ class AffineGridGradOpKernel : public framework::OpKernel<T> {
161163
// TODO(wanghaoshuang): Refine batched matrix multiply
162164
auto blas = math::GetBlas<DeviceContext, T>(ctx);
163165
for (int i = 0; i < n; ++i) {
164-
Tensor sliced_grid = grid.Slice(i, i + 1).Resize({h * w, 3});
165-
Tensor sliced_out_grad = output_grad->Slice(i, i + 1).Resize({h * w, 2});
166+
Tensor sliced_grid = grid.Slice(i, i + 1).Resize(
167+
{static_cast<int64_t>(h) * static_cast<int64_t>(w), 3});
168+
Tensor sliced_out_grad = output_grad->Slice(i, i + 1).Resize(
169+
{static_cast<int64_t>(h) * static_cast<int64_t>(w), 2});
166170
Tensor sliced_theta_grad = theta_grad->Slice(i, i + 1).Resize({2, 3});
167171
blas.MatMul(sliced_out_grad, true, sliced_grid, false, T(1),
168172
&sliced_theta_grad, T(0));

paddle/fluid/operators/detection/gpc.cc

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@
2424
**/
2525

2626
#include "paddle/fluid/operators/detection/gpc.h"
27+
#include "paddle/fluid/platform/enforce.h"
2728

2829
namespace gpc {
2930

@@ -689,6 +690,7 @@ static bbox *create_contour_bboxes(gpc_polygon *p) {
689690

690691
gpc_malloc<bbox>(box, p->num_contours * sizeof(bbox),
691692
const_cast<char *>("Bounding box creation"));
693+
PADDLE_ENFORCE_NOT_NULL(box);
692694

693695
/* Construct contour bounding boxes */
694696
for (c = 0; c < p->num_contours; c++) {
@@ -852,6 +854,7 @@ void gpc_add_contour(gpc_polygon *p, gpc_vertex_list *new_contour, int hole) {
852854
/* Create an extended hole array */
853855
gpc_malloc<int>(extended_hole, (p->num_contours + 1) * sizeof(int),
854856
const_cast<char *>("contour hole addition"));
857+
PADDLE_ENFORCE_NOT_NULL(extended_hole);
855858

856859
/* Create an extended contour array */
857860
gpc_malloc<gpc_vertex_list>(extended_contour,
@@ -969,6 +972,7 @@ void gpc_polygon_clip(gpc_op op, gpc_polygon *subj, gpc_polygon *clip,
969972
/* Build scanbeam table from scanbeam tree */
970973
gpc_malloc<double>(sbt, sbt_entries * sizeof(double),
971974
const_cast<char *>("sbt creation"));
975+
PADDLE_ENFORCE_NOT_NULL(sbt);
972976
build_sbt(&scanbeam, sbt, sbtree);
973977
scanbeam = 0;
974978
free_sbtree(&sbtree);
@@ -1604,6 +1608,7 @@ void gpc_tristrip_clip(gpc_op op, gpc_polygon *subj, gpc_polygon *clip,
16041608
/* Build scanbeam table from scanbeam tree */
16051609
gpc_malloc<double>(sbt, sbt_entries * sizeof(double),
16061610
const_cast<char *>("sbt creation"));
1611+
PADDLE_ENFORCE_NOT_NULL(sbt);
16071612
build_sbt(&scanbeam, sbt, sbtree);
16081613
scanbeam = 0;
16091614
free_sbtree(&sbtree);

paddle/fluid/operators/math/context_project.h

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,7 @@ template <typename DeviceContext, typename T>
8787
class ContextProjectFunctor {
8888
public:
8989
void operator()(const DeviceContext& context, const LoDTensor& in,
90-
const Tensor& padding_data, bool padding_trainable,
90+
const Tensor* padding_data, bool padding_trainable,
9191
const int context_start, const int context_length,
9292
const int context_stride, const int up_pad,
9393
const int down_pad, Tensor* col) {
@@ -132,6 +132,7 @@ class ContextProjectFunctor {
132132
}
133133
}
134134
if (padding_trainable) {
135+
PADDLE_ENFORCE_NOT_NULL(padding_data);
135136
for (int i = 0; i < static_cast<int>(lod_level_0.size()) - 1; ++i) {
136137
Tensor out_t = col->Slice(static_cast<int>(lod_level_0[i]),
137138
static_cast<int>(lod_level_0[i + 1]));
@@ -150,7 +151,7 @@ class ContextProjectFunctor {
150151
k + context_length < up_pad ? context_length : up_pad - k;
151152
Tensor out_t_sub = out_t.Slice(k * context_length,
152153
k * context_length + padding_size);
153-
Tensor w_sub = padding_data.Slice(k, k + padding_size);
154+
Tensor w_sub = padding_data->Slice(k, k + padding_size);
154155
framework::TensorCopy(w_sub, context.GetPlace(), context,
155156
&out_t_sub);
156157
}
@@ -180,7 +181,7 @@ class ContextProjectFunctor {
180181
Tensor out_t_sub = out_t.Slice(
181182
(down_pad_begin_row + t) * context_length - padding_size,
182183
(down_pad_begin_row + t) * context_length);
183-
Tensor w_sub = padding_data.Slice(
184+
Tensor w_sub = padding_data->Slice(
184185
up_pad + padding_idx, up_pad + padding_idx + padding_size);
185186
framework::TensorCopy(w_sub, context.GetPlace(), context,
186187
&out_t_sub);

paddle/fluid/operators/sequence_ops/sequence_conv_op.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ class SequenceConvKernel : public framework::OpKernel<T> {
4949

5050
int up_pad = std::max(0, -context_start);
5151
int down_pad = std::max(0, context_start + context_length - 1);
52-
int sequence_width = static_cast<int>(in->dims()[1]);
52+
auto sequence_width = static_cast<int64_t>(in->dims()[1]);
5353

5454
framework::DDim col_shape = {in->dims()[0],
5555
context_length * sequence_width};
@@ -62,7 +62,7 @@ class SequenceConvKernel : public framework::OpKernel<T> {
6262
set_zero(dev_ctx, &col, static_cast<T>(0));
6363
math::ContextProjectFunctor<DeviceContext, T> seq_project_functor;
6464

65-
seq_project_functor(dev_ctx, *in, *padding_data, padding_trainable,
65+
seq_project_functor(dev_ctx, *in, padding_data, padding_trainable,
6666
context_start, context_length, context_stride, up_pad,
6767
down_pad, &col);
6868

@@ -93,7 +93,7 @@ class SequenceConvGradKernel : public framework::OpKernel<T> {
9393

9494
int up_pad = std::max(0, -context_start);
9595
int down_pad = std::max(0, context_start + context_length - 1);
96-
int sequence_width = static_cast<int>(in->dims()[1]);
96+
auto sequence_width = static_cast<int64_t>(in->dims()[1]);
9797

9898
math::SetConstant<DeviceContext, T> set_zero;
9999
auto& dev_ctx = context.template device_context<DeviceContext>();
@@ -144,7 +144,7 @@ class SequenceConvGradKernel : public framework::OpKernel<T> {
144144
padding_data = context.Input<Tensor>("PaddingData");
145145
}
146146

147-
seq_project_functor(dev_ctx, *in, *padding_data, padding_trainable,
147+
seq_project_functor(dev_ctx, *in, padding_data, padding_trainable,
148148
context_start, context_length, context_stride, up_pad,
149149
down_pad, &col);
150150

paddle/fluid/operators/squared_l2_distance_op.h

Lines changed: 24 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -77,6 +77,9 @@ class SquaredL2DistanceGradKernel : public framework::OpKernel<T> {
7777
auto* x_g = context.Output<Tensor>(framework::GradVarName("X"));
7878
auto* y_g = context.Output<Tensor>(framework::GradVarName("Y"));
7979

80+
PADDLE_ENFORCE_NOT_NULL(x_g);
81+
PADDLE_ENFORCE_NOT_NULL(y_g);
82+
8083
auto sub_result = EigenMatrix<T>::From(*in0);
8184
auto out_grad = EigenMatrix<T>::From(*in1);
8285

@@ -92,31 +95,28 @@ class SquaredL2DistanceGradKernel : public framework::OpKernel<T> {
9295
// propagate back to input
9396
auto& eigen_place =
9497
*context.template device_context<DeviceContext>().eigen_device();
95-
if (x_g) {
96-
x_g->mutable_data<T>(context.GetPlace());
97-
// eigen matrix
98-
auto x_grad =
99-
EigenMatrix<T>::From(*x_g, framework::make_ddim({x_dims[0], cols}));
100-
// dimensions are same with subResult
101-
x_grad.device(eigen_place) = grad_mat;
102-
}
10398

104-
if (y_g) {
105-
y_g->mutable_data<T>(context.GetPlace());
106-
107-
PADDLE_ENFORCE_GE(sub_result.dimensions()[0], y_dims[0],
108-
"First dimension of gradient must be greater or "
109-
"equal than first dimension of target.");
110-
111-
if (sub_result.dimensions()[0] == y_dims[0]) {
112-
auto y_grad =
113-
EigenMatrix<T>::From(*y_g, framework::make_ddim({y_dims[0], cols}));
114-
y_grad.device(eigen_place) = -1 * grad_mat;
115-
} else {
116-
auto col_sum_res = -1 * (grad_mat.sum(Eigen::array<int, 1>({{0}})));
117-
auto y_grad = EigenVector<T>::Flatten(*y_g);
118-
y_grad.device(eigen_place) = col_sum_res;
119-
}
99+
x_g->mutable_data<T>(context.GetPlace());
100+
// eigen matrix
101+
auto x_grad =
102+
EigenMatrix<T>::From(*x_g, framework::make_ddim({x_dims[0], cols}));
103+
// dimensions are same with subResult
104+
x_grad.device(eigen_place) = grad_mat;
105+
106+
y_g->mutable_data<T>(context.GetPlace());
107+
108+
PADDLE_ENFORCE_GE(sub_result.dimensions()[0], y_dims[0],
109+
"First dimension of gradient must be greater or "
110+
"equal than first dimension of target.");
111+
112+
if (sub_result.dimensions()[0] == y_dims[0]) {
113+
auto y_grad =
114+
EigenMatrix<T>::From(*y_g, framework::make_ddim({y_dims[0], cols}));
115+
y_grad.device(eigen_place) = -1 * grad_mat;
116+
} else {
117+
auto col_sum_res = -1 * (grad_mat.sum(Eigen::array<int, 1>({{0}})));
118+
auto y_grad = EigenVector<T>::Flatten(*y_g);
119+
y_grad.device(eigen_place) = col_sum_res;
120120
}
121121
}
122122
};

0 commit comments

Comments
 (0)