Skip to content

refine some error message to avoid linking words together part6 #74520

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Aug 11, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion paddle/fluid/operators/collective/c_embedding_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ class CEmbeddingOpMaker : public framework::OpProtoAndCheckerMaker {
"and the out-of-bounds will be set to 0 ")
.SetDefault(0);
AddAttr<int64_t>("vocab_size",
"(int64, default -1), The total vocabulary size to check"
"(int64, default -1), The total vocabulary size to check "
"the out-of-bounds ids. If it is -1, no check will be ")
.SetDefault(-1);
AddComment(R"DOC(
Expand Down
13 changes: 7 additions & 6 deletions paddle/fluid/operators/memcpy_d2h_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -62,12 +62,13 @@ class MemcpyD2HOpProtoMaker : public framework::OpProtoAndCheckerMaker {
AddOutput("Out",
"(phi::DenseTensor) The type of output "
"is the same as input X.");
AddAttr<int>("dst_place_type",
"Determine the dst place of tensor copy. "
"By Now it ONLY support XPU/CUDAPlace <-> CUDAPinnedPlace/CPU"
"Other place type is Unimplemented and will cause ERROR."
"0: dst is on CPUPlace. "
"1: dst is on CUDAPinnedPlace. ");
AddAttr<int>(
"dst_place_type",
"Determine the dst place of tensor copy. "
"By Now it ONLY support XPU/CUDAPlace <-> CUDAPinnedPlace/CPU. "
"Other place type is Unimplemented and will cause ERROR. "
"0: dst is on CPUPlace. "
"1: dst is on CUDAPinnedPlace. ");
AddComment(R"DOC(
MemcpyD2H Operator.
By now, it ONLY supports the memcopy between CUDAPlace <-> CUDAPinnedPlace/CPU.
Expand Down
8 changes: 4 additions & 4 deletions paddle/fluid/operators/memcpy_h2d_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -64,10 +64,10 @@ class MemcpyH2DOpProtoMaker : public framework::OpProtoAndCheckerMaker {
"is the same as input X.");
AddAttr<int>("dst_place_type",
"Determine the dst place of tensor copy. "
"By Now it support:"
"0. CUDAPinnedPlace/CPU <->CUDAPlace"
"1. CPU <->XPUPlace"
"2. CPU <->IPUPlace"
"By Now it support: "
"0. CUDAPinnedPlace/CPU <->CUDAPlace. "
"1. CPU <->XPUPlace. "
"2. CPU <->IPUPlace. "
"Other place type is Unimplemented and will cause ERROR.");
AddComment(R"DOC(
MemcpyD2H Operator.
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/operators/sum_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -127,8 +127,8 @@ class SumOpMaker : public framework::OpProtoAndCheckerMaker {
void Make() override {
AddInput(
"X",
"A Variable list. The shape and data type of the list elements"
"should be consistent. Variable can be multi-dimensional Tensor"
"A Variable list. The shape and data type of the list elements "
"should be consistent. Variable can be multi-dimensional Tensor "
"or phi::DenseTensor, and data types can be: float32, float64, int32, "
"int64.")
.AsDuplicable();
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/pybind/eager.cc
Original file line number Diff line number Diff line change
Expand Up @@ -901,7 +901,7 @@ int TensorInit(PyObject* self, PyObject* args, PyObject* kwargs) {
true,
common::errors::PreconditionNotMet(
"Could not parse args and kwargs successfully, "
"please check your input first and make"
"please check your input first and make "
"sure you are on the right way. "
"The expected arguments as follow: ("
"value, place, persistable, zero_copy, "
Expand Down Expand Up @@ -1307,7 +1307,7 @@ int StringTensorInit(PyObject* self, PyObject* args, PyObject* kwargs) {
true,
common::errors::PreconditionNotMet(
"Could not parse args and kwargs successfully, "
"please check your input first and make"
"please check your input first and make "
"sure you are on the right way. "
"The expected arguments as follow: ("
"value, zero_copy, name, dims)"));
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/pybind/eager_functions.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1353,8 +1353,8 @@ static PyObject* eager_api_set_master_grads(PyObject* self,
PADDLE_ENFORCE_NE(
grad,
nullptr,
common::errors::Fatal("Detected nullptr grad"
"Please check if you have manually cleared"
common::errors::Fatal("Detected nullptr grad. "
"Please check if you have manually cleared "
"the grad inside autograd_meta"));
if (((*grad).has_allocation() || (*grad).is_dist_tensor()) &&
((*grad).dtype() == phi::DataType::FLOAT16 ||
Expand Down
3 changes: 2 additions & 1 deletion paddle/fluid/pybind/eager_generator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -502,6 +502,7 @@ static void SlotNameMatching(
grad_fwd_slotname_map[grad_slot_name] != fwd_slot_name) {
PADDLE_THROW(common::errors::Fatal(
"Detected mismatched slot names."
"Detected mismatched slot names: "
"grad_slot_name %s matches both %s and %s fwd_slot_name",
grad_slot_name,
grad_fwd_slotname_map[grad_slot_name],
Expand Down Expand Up @@ -536,7 +537,7 @@ static void SlotNameMatching(
if (grad_fwd_slotname_map.count(grad_slot_name) &&
grad_fwd_slotname_map[grad_slot_name] != fwd_slot_name) {
PADDLE_THROW(common::errors::Fatal(
"Detected mismatched slot names"
"Detected mismatched slot names: "
"grad_slot_name %s matches both %s and %s fwd_slot_name",
grad_slot_name,
grad_fwd_slotname_map[grad_slot_name],
Expand Down
8 changes: 4 additions & 4 deletions paddle/fluid/pybind/eager_method.cc
Original file line number Diff line number Diff line change
Expand Up @@ -931,8 +931,8 @@ static PyObject* tensor_clear_gradient(TensorObject* self,
grad = egr::EagerUtils::mutable_grad(self->tensor);
PADDLE_ENFORCE(
grad != nullptr,
common::errors::Fatal("Detected nullptr grad"
"Please check if you have manually cleared"
common::errors::Fatal("Detected nullptr grad. "
"Please check if you have manually cleared "
"the grad inside autograd_meta"));
} else {
auto meta = egr::EagerUtils::unsafe_autograd_meta(self->tensor);
Expand Down Expand Up @@ -995,8 +995,8 @@ static PyObject* tensor__zero_grads(TensorObject* self,
paddle::Tensor* grad = egr::EagerUtils::mutable_grad(self->tensor);
PADDLE_ENFORCE(
grad != nullptr,
common::errors::Fatal("Detected nullptr grad"
"Please check if you have manually cleared"
common::errors::Fatal("Detected nullptr grad. "
"Please check if you have manually cleared "
"the grad inside autograd_meta"));
if (grad->initialized()) {
if (grad->is_dense_tensor() || grad->is_dist_tensor()) {
Expand Down
8 changes: 4 additions & 4 deletions paddle/fluid/pybind/eager_properties.cc
Original file line number Diff line number Diff line change
Expand Up @@ -311,8 +311,8 @@ int tensor_properties_set_grad(TensorObject* self,
paddle::Tensor* grad = egr::EagerUtils::mutable_grad(self->tensor);
PADDLE_ENFORCE(
grad != nullptr,
common::errors::Fatal("Detected NULL grad"
"Please check if you have manually cleared"
common::errors::Fatal("Detected NULL grad. "
"Please check if you have manually cleared "
"the grad inside autograd_meta"));
const phi::distributed::ProcessMesh* mesh = nullptr;
if (InputsContainDistTensor(&mesh, src, self->tensor, *grad)) {
Expand All @@ -334,8 +334,8 @@ int tensor_properties_set_grad_(TensorObject* self,
paddle::Tensor* grad = egr::EagerUtils::mutable_grad(self->tensor);
PADDLE_ENFORCE(
grad != nullptr,
common::errors::Fatal("Detected NULL grad"
"Please check if you have manually cleared"
common::errors::Fatal("Detected NULL grad. "
"Please check if you have manually cleared "
"the grad inside autograd_meta"));
*grad = src;
return 0;
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/pybind/imperative.cc
Original file line number Diff line number Diff line change
Expand Up @@ -737,7 +737,7 @@ void BindImperative(py::module *m_ptr) {
} else {
PADDLE_THROW(common::errors::InvalidArgument(
"Incompatible Place Type: supports XPUPlace, CUDAPlace, "
"CPUPlace, IPUPlace, XPUPinnedPlace"
"CPUPlace, IPUPlace, XPUPinnedPlace "
"and CUDAPinnedPlace, "
"but got Unknown Type!"));
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/pybind/pir.cc
Original file line number Diff line number Diff line change
Expand Up @@ -337,7 +337,7 @@ void PruneWithInput(const std::vector<pir::Value> &input_vars,
if (!input_vars_set.empty() && SomeInSet(op_results, input_vars_set)) {
PADDLE_THROW(common::errors::InvalidArgument(
"The input_var create by: '{%s}' is not involved in the "
"output_vars calculation"
"output_vars calculation. "
"Please remove it from input_vars.",
op->name()));
}
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/pybind/place.cc
Original file line number Diff line number Diff line change
Expand Up @@ -342,9 +342,9 @@ void BindPlace(pybind11::module &m) { // NOLINT
}
#else
LOG(ERROR) << string::Sprintf(
"Cannot use CustomDevice because you have installed CPU/GPU"
"Cannot use CustomDevice because you have installed CPU/GPU "
"version PaddlePaddle.\n"
"If you want to use CustomDevice, please try to install"
"If you want to use CustomDevice, please try to install "
"CustomDevice version "
"PaddlePaddle by: pip install paddlepaddle\n"
"If you only have CPU, please change "
Expand Down
8 changes: 4 additions & 4 deletions paddle/fluid/pybind/pybind.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2596,7 +2596,7 @@ All parameter, weight, gradient are variables in Paddle.
VLOG(1) << string::Sprintf(
"Cannot use get_all_device_type because you have installed "
"CPU/GPU version PaddlePaddle.\n"
"If you want to use get_all_device_type, please try to install"
"If you want to use get_all_device_type, please try to install "
"CustomDevice version "
"PaddlePaddle by: pip install paddlepaddle\n");
#endif
Expand Down Expand Up @@ -2624,7 +2624,7 @@ All parameter, weight, gradient are variables in Paddle.
VLOG(1) << string::Sprintf(
"Cannot use get_available_device because you have installed "
"CPU/GPU version PaddlePaddle.\n"
"If you want to use get_available_device, please try to install"
"If you want to use get_available_device, please try to install "
"CustomDevice version "
"PaddlePaddle by: pip install paddlepaddle\n");
#endif
Expand All @@ -2639,7 +2639,7 @@ All parameter, weight, gradient are variables in Paddle.
"Cannot use get_available_custom_device because you have "
"installed CPU/GPU version PaddlePaddle.\n"
"If you want to use get_available_custom_device, please try to "
"install"
"install "
"CustomDevice version "
"PaddlePaddle by: pip install paddlepaddle\n");
#endif
Expand All @@ -2657,7 +2657,7 @@ All parameter, weight, gradient are variables in Paddle.
"Cannot use get_custom_device_count because you have "
"installed CPU/GPU version PaddlePaddle.\n"
"If you want to use get_custom_device_count, please try to "
"install"
"install "
"CustomDevice version "
"PaddlePaddle by: pip install paddlepaddle\n");
#endif
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/pybind/slice_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -529,7 +529,7 @@ static void ParseIndex(const paddle::Tensor& tensor,
PADDLE_ENFORCE_EQ(slice_tensor.shape()[i],
dim_len,
common::errors::OutOfRange(
"The shape of boolean index %d did not match"
"The shape of boolean index %d did not match "
"indexed tensor %d along axis %d.",
slice_tensor.shape()[0],
dim_len,
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/pybind/tensor_py.h
Original file line number Diff line number Diff line change
Expand Up @@ -512,7 +512,7 @@ void SetTensorFromPyArrayT(
} else {
PADDLE_THROW(common::errors::InvalidArgument(
"Incompatible place type: Tensor.set() supports "
"CPUPlace, CUDAPlace"
"CPUPlace, CUDAPlace "
"and CUDAPinnedPlace, but got %s!",
place));
}
Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/backends/xpu/xpu_info.cc
Original file line number Diff line number Diff line change
Expand Up @@ -40,8 +40,8 @@ PHI_DEFINE_EXPORTED_string(
"This option is useful when doing multi process training and "
"each process have only one device (XPU). If you want to use "
"all visible devices, set this to empty string. NOTE: the "
"reason of doing this is that we want to use P2P communication"
"between XPU devices, use XPU_VISIBLE_DEVICES can only use"
"reason of doing this is that we want to use P2P communication "
"between XPU devices, use XPU_VISIBLE_DEVICES can only use "
"share-memory only.");

namespace phi {
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/core/platform/cpu_helper.cc
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ void SetNumThreads(int num_threads) {
return;
#else
PADDLE_THROW(common::errors::Unimplemented(
"This library (except OPENBLAS, MKLML) is not supported yet, so the"
"This library (except OPENBLAS, MKLML) is not supported yet, so the "
"number of threads cannot be set."));
#endif
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/core/platform/profiler.cc
Original file line number Diff line number Diff line change
Expand Up @@ -625,7 +625,7 @@ void EnableProfiler(ProfilerState state) {
PADDLE_ENFORCE_NE(state,
ProfilerState::kDisabled,
common::errors::InvalidArgument(
"Can't enable profiling, since the input state is"
"Can't enable profiling, since the input state is "
"ProfilerState::kDisabled"));
SynchronizeAllDevice();
std::lock_guard<std::mutex> l(profiler_mu);
Expand Down