diff --git a/paddle/phi/kernels/legacy/compare_kernel.h b/paddle/phi/kernels/legacy/compare_kernel.h index 541ec10d244da..95ea7081a1cfa 100644 --- a/paddle/phi/kernels/legacy/compare_kernel.h +++ b/paddle/phi/kernels/legacy/compare_kernel.h @@ -19,42 +19,42 @@ limitations under the License. */ namespace phi { template -void LessThanRawKernel(const Context& ctx, +void LessThanRawKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& y, int axis, DenseTensor* out); template -void LessEqualRawKernel(const Context& ctx, +void LessEqualRawKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& y, int axis, DenseTensor* out); template -void GreaterThanRawKernel(const Context& ctx, +void GreaterThanRawKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& y, int axis, DenseTensor* out); template -void GreaterEqualRawKernel(const Context& ctx, +void GreaterEqualRawKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& y, int axis, DenseTensor* out); template -void EqualRawKernel(const Context& ctx, +void EqualRawKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& y, int axis, DenseTensor* out); template -void NotEqualRawKernel(const Context& ctx, +void NotEqualRawKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& y, int axis, diff --git a/paddle/phi/kernels/legacy/cpu/compare_kernel.cc b/paddle/phi/kernels/legacy/cpu/compare_kernel.cc index 5b11c81f573a8..77800701c94b2 100644 --- a/paddle/phi/kernels/legacy/cpu/compare_kernel.cc +++ b/paddle/phi/kernels/legacy/cpu/compare_kernel.cc @@ -25,23 +25,23 @@ template -inline void CompareRawKernelImpl(const Context& ctx, +inline void CompareRawKernelImpl(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& y, int axis, DenseTensor* out) { - ctx.template Alloc(out); + dev_ctx.template Alloc(out); if (x.dims().size() >= y.dims().size()) { funcs::ElementwiseCompute( - ctx, x, y, Functor(), out, axis); + dev_ctx, x, y, Functor(), out, axis); } else { funcs::ElementwiseCompute( - ctx, x, y, InverseFunctor(), out, axis); + dev_ctx, x, y, InverseFunctor(), out, axis); } } template -void LessThanRawKernel(const Context& ctx, +void LessThanRawKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& y, int axis, @@ -49,11 +49,11 @@ void LessThanRawKernel(const Context& ctx, CompareRawKernelImpl, - funcs::GreaterThanFunctor>(ctx, x, y, axis, out); + funcs::GreaterThanFunctor>(dev_ctx, x, y, axis, out); } template -void LessEqualRawKernel(const Context& ctx, +void LessEqualRawKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& y, int axis, @@ -61,11 +61,11 @@ void LessEqualRawKernel(const Context& ctx, CompareRawKernelImpl, - funcs::GreaterEqualFunctor>(ctx, x, y, axis, out); + funcs::GreaterEqualFunctor>(dev_ctx, x, y, axis, out); } template -void GreaterThanRawKernel(const Context& ctx, +void GreaterThanRawKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& y, int axis, @@ -73,10 +73,10 @@ void GreaterThanRawKernel(const Context& ctx, CompareRawKernelImpl, - funcs::LessThanFunctor>(ctx, x, y, axis, out); + funcs::LessThanFunctor>(dev_ctx, x, y, axis, out); } template -void GreaterEqualRawKernel(const Context& ctx, +void GreaterEqualRawKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& y, int axis, @@ -84,10 +84,10 @@ void GreaterEqualRawKernel(const Context& ctx, CompareRawKernelImpl, - funcs::LessEqualFunctor>(ctx, x, y, axis, out); + funcs::LessEqualFunctor>(dev_ctx, x, y, axis, out); } template -void EqualRawKernel(const Context& ctx, +void EqualRawKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& y, int axis, @@ -95,10 +95,10 @@ void EqualRawKernel(const Context& ctx, CompareRawKernelImpl, - funcs::EqualFunctor>(ctx, x, y, axis, out); + funcs::EqualFunctor>(dev_ctx, x, y, axis, out); } template -void NotEqualRawKernel(const Context& ctx, +void NotEqualRawKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& y, int axis, @@ -106,7 +106,7 @@ void NotEqualRawKernel(const Context& ctx, CompareRawKernelImpl, - funcs::NotEqualFunctor>(ctx, x, y, axis, out); + funcs::NotEqualFunctor>(dev_ctx, x, y, axis, out); } } // namespace phi diff --git a/paddle/phi/kernels/legacy/cpu/legacy_generate_proposals_kernel.cc b/paddle/phi/kernels/legacy/cpu/legacy_generate_proposals_kernel.cc index 5e6249249b1ee..a77372f459202 100644 --- a/paddle/phi/kernels/legacy/cpu/legacy_generate_proposals_kernel.cc +++ b/paddle/phi/kernels/legacy/cpu/legacy_generate_proposals_kernel.cc @@ -28,7 +28,7 @@ namespace phi { template std::pair ProposalForOneImage( - const phi::CPUContext &ctx, + const phi::CPUContext &dev_ctx, const phi::DenseTensor &im_info_slice, const phi::DenseTensor &anchors, const phi::DenseTensor &variances, @@ -44,7 +44,7 @@ std::pair ProposalForOneImage( // Sort index phi::DenseTensor index_t; index_t.Resize({scores_slice.numel()}); - int *index = ctx.Alloc(&index_t); + int *index = dev_ctx.Alloc(&index_t); for (int i = 0; i < scores_slice.numel(); ++i) { index[i] = i; } @@ -65,53 +65,54 @@ std::pair ProposalForOneImage( bbox_sel.Resize({index_t.numel(), 4}); anchor_sel.Resize({index_t.numel(), 4}); var_sel.Resize({index_t.numel(), 4}); - ctx.Alloc(&scores_sel); - ctx.Alloc(&bbox_sel); - ctx.Alloc(&anchor_sel); - ctx.Alloc(&var_sel); + dev_ctx.Alloc(&scores_sel); + dev_ctx.Alloc(&bbox_sel); + dev_ctx.Alloc(&anchor_sel); + dev_ctx.Alloc(&var_sel); - phi::funcs::CPUGather(ctx, scores_slice, index_t, &scores_sel); - phi::funcs::CPUGather(ctx, bbox_deltas_slice, index_t, &bbox_sel); - phi::funcs::CPUGather(ctx, anchors, index_t, &anchor_sel); - phi::funcs::CPUGather(ctx, variances, index_t, &var_sel); + phi::funcs::CPUGather(dev_ctx, scores_slice, index_t, &scores_sel); + phi::funcs::CPUGather(dev_ctx, bbox_deltas_slice, index_t, &bbox_sel); + phi::funcs::CPUGather(dev_ctx, anchors, index_t, &anchor_sel); + phi::funcs::CPUGather(dev_ctx, variances, index_t, &var_sel); phi::DenseTensor proposals; proposals.Resize({index_t.numel(), 4}); - ctx.Alloc(&proposals); - phi::funcs::BoxCoder(ctx, &anchor_sel, &bbox_sel, &var_sel, &proposals); + dev_ctx.Alloc(&proposals); + phi::funcs::BoxCoder( + dev_ctx, &anchor_sel, &bbox_sel, &var_sel, &proposals); phi::funcs::ClipTiledBoxes( - ctx, im_info_slice, proposals, &proposals, false); + dev_ctx, im_info_slice, proposals, &proposals, false); phi::DenseTensor keep; phi::funcs::FilterBoxes( - ctx, &proposals, min_size, im_info_slice, true, &keep); + dev_ctx, &proposals, min_size, im_info_slice, true, &keep); // Handle the case when there is no keep index left if (keep.numel() == 0) { phi::funcs::SetConstant set_zero; bbox_sel.Resize({1, 4}); - ctx.Alloc(&bbox_sel); - set_zero(ctx, &bbox_sel, static_cast(0)); + dev_ctx.Alloc(&bbox_sel); + set_zero(dev_ctx, &bbox_sel, static_cast(0)); phi::DenseTensor scores_filter; scores_filter.Resize({1, 1}); - ctx.Alloc(&scores_filter); - set_zero(ctx, &scores_filter, static_cast(0)); + dev_ctx.Alloc(&scores_filter); + set_zero(dev_ctx, &scores_filter, static_cast(0)); return std::make_pair(bbox_sel, scores_filter); } phi::DenseTensor scores_filter; bbox_sel.Resize({keep.numel(), 4}); scores_filter.Resize({keep.numel(), 1}); - ctx.Alloc(&bbox_sel); - ctx.Alloc(&scores_filter); - phi::funcs::CPUGather(ctx, proposals, keep, &bbox_sel); - phi::funcs::CPUGather(ctx, scores_sel, keep, &scores_filter); + dev_ctx.Alloc(&bbox_sel); + dev_ctx.Alloc(&scores_filter); + phi::funcs::CPUGather(dev_ctx, proposals, keep, &bbox_sel); + phi::funcs::CPUGather(dev_ctx, scores_sel, keep, &scores_filter); if (nms_thresh <= 0) { return std::make_pair(bbox_sel, scores_filter); } phi::DenseTensor keep_nms = - phi::funcs::NMS(ctx, &bbox_sel, &scores_filter, nms_thresh, eta); + phi::funcs::NMS(dev_ctx, &bbox_sel, &scores_filter, nms_thresh, eta); if (post_nms_top_n > 0 && post_nms_top_n < keep_nms.numel()) { keep_nms.Resize({post_nms_top_n}); @@ -119,10 +120,10 @@ std::pair ProposalForOneImage( proposals.Resize({keep_nms.numel(), 4}); scores_sel.Resize({keep_nms.numel(), 1}); - ctx.Alloc(&proposals); - ctx.Alloc(&scores_sel); - phi::funcs::CPUGather(ctx, bbox_sel, keep_nms, &proposals); - phi::funcs::CPUGather(ctx, scores_filter, keep_nms, &scores_sel); + dev_ctx.Alloc(&proposals); + dev_ctx.Alloc(&scores_sel); + phi::funcs::CPUGather(dev_ctx, bbox_sel, keep_nms, &proposals); + phi::funcs::CPUGather(dev_ctx, scores_filter, keep_nms, &scores_sel); return std::make_pair(proposals, scores_sel); } diff --git a/paddle/phi/kernels/legacy/cpu/one_hot_kernel.cc b/paddle/phi/kernels/legacy/cpu/one_hot_kernel.cc index 85347e71c606f..d475c5fec98d9 100644 --- a/paddle/phi/kernels/legacy/cpu/one_hot_kernel.cc +++ b/paddle/phi/kernels/legacy/cpu/one_hot_kernel.cc @@ -25,20 +25,20 @@ struct OneHotV2OpFunctor { const DenseTensor* in_; DenseTensor* out_; int depth_; - const DeviceContext& ctx_; + const DeviceContext& dev_ctx_; OneHotV2OpFunctor(const DenseTensor* in, DenseTensor* out, int depth, - const DeviceContext& ctx) - : in_(in), out_(out), depth_(depth), ctx_(ctx) {} + const DeviceContext& dev_ctx) + : in_(in), out_(out), depth_(depth), dev_ctx_(dev_ctx) {} template void apply() const { auto* p_in_data = in_->data(); auto numel = in_->numel(); - auto* p_out_data = ctx_.template Alloc(out_); - funcs::set_constant(ctx_, out_, 0.0); + auto* p_out_data = dev_ctx_.template Alloc(out_); + funcs::set_constant(dev_ctx_, out_, 0.0); for (int i = 0; i < numel; ++i) { PADDLE_ENFORCE_GE( diff --git a/paddle/phi/kernels/legacy/gpu/layer_norm_cuda_kernel.h b/paddle/phi/kernels/legacy/gpu/layer_norm_cuda_kernel.h index 39bd2837b9e45..f6d81228b34b6 100644 --- a/paddle/phi/kernels/legacy/gpu/layer_norm_cuda_kernel.h +++ b/paddle/phi/kernels/legacy/gpu/layer_norm_cuda_kernel.h @@ -943,7 +943,7 @@ void HostApplyRMSNorm(V* output, } template -void cuda_rms_norm(const Context& ctx, +void cuda_rms_norm(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& scale, int rows, @@ -960,7 +960,7 @@ void cuda_rms_norm(const Context& ctx, cols, \ epsilon, \ const_cast(scale.data()), \ - ctx.stream()) + dev_ctx.stream()) // scale.dtype() same as y->dtype() if (scale.dtype() == phi::DataType::FLOAT32) { DISPATCH_FWD_CASE(float); @@ -971,7 +971,7 @@ void cuda_rms_norm(const Context& ctx, } template -void HostRMSNormGradient(const Context& ctx, +void HostRMSNormGradient(const Context& dev_ctx, const V* dout, const U* invvar, const DenseTensor& input, @@ -992,7 +992,7 @@ void HostRMSNormGradient(const Context& ctx, const int nshared2 = nshared2_a > nshared2_b ? nshared2_a : nshared2_b; auto place = input.place(); DenseTensor part_grad_gamma = - phi::Empty(ctx, {part_size, n2}); + phi::Empty(dev_ctx, {part_size, n2}); cuComputePartGradGammaBeta<<>>( dout, input.data(), @@ -1038,7 +1038,7 @@ void HostRMSNormGradient(const Context& ctx, } template -void cuda_rms_norm_gradient(const Context& ctx, +void cuda_rms_norm_gradient(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& scale, const DenseTensor& invvar, @@ -1050,7 +1050,7 @@ void cuda_rms_norm_gradient(const Context& ctx, DenseTensor* grad_scale) { #define DISPATCH_BWD_CASE(scalar_t_out) \ HostRMSNormGradient( \ - ctx, \ + dev_ctx, \ dy.data(), \ invvar.data(), \ x, \ @@ -1060,7 +1060,7 @@ void cuda_rms_norm_gradient(const Context& ctx, epsilon, \ grad_x->data(), \ grad_scale->data(), \ - ctx.stream()) + dev_ctx.stream()) if (scale.dtype() == phi::DataType::FLOAT32) { DISPATCH_BWD_CASE(float); } else if (scale.dtype() == phi::DataType::BFLOAT16) { diff --git a/paddle/phi/kernels/legacy/gpu/legacy_generate_proposals_kernel.cu b/paddle/phi/kernels/legacy/gpu/legacy_generate_proposals_kernel.cu index c7630a3717a41..90e1a9f1c498a 100644 --- a/paddle/phi/kernels/legacy/gpu/legacy_generate_proposals_kernel.cu +++ b/paddle/phi/kernels/legacy/gpu/legacy_generate_proposals_kernel.cu @@ -30,7 +30,7 @@ namespace phi { namespace { template static std::pair ProposalForOneImage( - const phi::GPUContext &ctx, + const phi::GPUContext &dev_ctx, const phi::DenseTensor &im_info, const phi::DenseTensor &anchors, const phi::DenseTensor &variances, @@ -43,7 +43,7 @@ static std::pair ProposalForOneImage( float eta) { // 1. pre nms phi::DenseTensor scores_sort, index_sort; - phi::funcs::SortDescending(ctx, scores, &scores_sort, &index_sort); + phi::funcs::SortDescending(dev_ctx, scores, &scores_sort, &index_sort); int num = scores.numel(); int pre_nms_num = (pre_nms_top_n <= 0 || pre_nms_top_n > num) ? scores.numel() : pre_nms_top_n; @@ -53,10 +53,10 @@ static std::pair ProposalForOneImage( // 2. box decode and clipping phi::DenseTensor proposals; proposals.Resize({pre_nms_num, 4}); - ctx.Alloc(&proposals); + dev_ctx.Alloc(&proposals); { - phi::funcs::ForRange for_range(ctx, pre_nms_num); + phi::funcs::ForRange for_range(dev_ctx, pre_nms_num); for_range(phi::funcs::BoxDecodeAndClipFunctor{anchors.data(), bbox_deltas.data(), variances.data(), @@ -69,10 +69,10 @@ static std::pair ProposalForOneImage( phi::DenseTensor keep_index, keep_num_t; keep_index.Resize({pre_nms_num}); keep_num_t.Resize({1}); - ctx.Alloc(&keep_index); - ctx.Alloc(&keep_num_t); + dev_ctx.Alloc(&keep_index); + dev_ctx.Alloc(&keep_num_t); min_size = std::max(min_size, 1.0f); - auto stream = ctx.stream(); + auto stream = dev_ctx.stream(); phi::funcs::FilterBBoxes <<<1, 512, 0, stream>>>(proposals.data(), im_info.data(), @@ -81,14 +81,14 @@ static std::pair ProposalForOneImage( keep_num_t.data(), keep_index.data()); int keep_num; - const auto gpu_place = ctx.GetPlace(); + const auto gpu_place = dev_ctx.GetPlace(); phi::memory_utils::Copy(phi::CPUPlace(), &keep_num, gpu_place, keep_num_t.data(), sizeof(int), - ctx.stream()); - ctx.Wait(); + dev_ctx.stream()); + dev_ctx.Wait(); keep_index.Resize({keep_num}); phi::DenseTensor scores_filter, proposals_filter; @@ -97,18 +97,18 @@ static std::pair ProposalForOneImage( phi::funcs::SetConstant set_zero; proposals_filter.Resize({1, 4}); scores_filter.Resize({1, 1}); - ctx.Alloc(&proposals_filter); - ctx.Alloc(&scores_filter); - set_zero(ctx, &proposals_filter, static_cast(0)); - set_zero(ctx, &scores_filter, static_cast(0)); + dev_ctx.Alloc(&proposals_filter); + dev_ctx.Alloc(&scores_filter); + set_zero(dev_ctx, &proposals_filter, static_cast(0)); + set_zero(dev_ctx, &scores_filter, static_cast(0)); return std::make_pair(proposals_filter, scores_filter); } proposals_filter.Resize({keep_num, 4}); scores_filter.Resize({keep_num, 1}); - ctx.Alloc(&proposals_filter); - ctx.Alloc(&scores_filter); - phi::funcs::GPUGather(ctx, proposals, keep_index, &proposals_filter); - phi::funcs::GPUGather(ctx, scores_sort, keep_index, &scores_filter); + dev_ctx.Alloc(&proposals_filter); + dev_ctx.Alloc(&scores_filter); + phi::funcs::GPUGather(dev_ctx, proposals, keep_index, &proposals_filter); + phi::funcs::GPUGather(dev_ctx, scores_sort, keep_index, &scores_filter); if (nms_thresh <= 0) { return std::make_pair(proposals_filter, scores_filter); @@ -116,7 +116,8 @@ static std::pair ProposalForOneImage( // 4. nms phi::DenseTensor keep_nms; - phi::funcs::NMS(ctx, proposals_filter, keep_index, nms_thresh, &keep_nms); + phi::funcs::NMS( + dev_ctx, proposals_filter, keep_index, nms_thresh, &keep_nms); if (post_nms_top_n > 0 && post_nms_top_n < keep_nms.numel()) { keep_nms.Resize({post_nms_top_n}); } @@ -124,10 +125,10 @@ static std::pair ProposalForOneImage( phi::DenseTensor scores_nms, proposals_nms; proposals_nms.Resize({keep_nms.numel(), 4}); scores_nms.Resize({keep_nms.numel(), 1}); - ctx.Alloc(&proposals_nms); - ctx.Alloc(&scores_nms); - phi::funcs::GPUGather(ctx, proposals_filter, keep_nms, &proposals_nms); - phi::funcs::GPUGather(ctx, scores_filter, keep_nms, &scores_nms); + dev_ctx.Alloc(&proposals_nms); + dev_ctx.Alloc(&scores_nms); + phi::funcs::GPUGather(dev_ctx, proposals_filter, keep_nms, &proposals_nms); + phi::funcs::GPUGather(dev_ctx, scores_filter, keep_nms, &scores_nms); return std::make_pair(proposals_nms, scores_nms); } diff --git a/paddle/phi/kernels/legacy/gpu/one_hot_kernel.cu b/paddle/phi/kernels/legacy/gpu/one_hot_kernel.cu index c64f2e2d75566..8030231e7fa02 100644 --- a/paddle/phi/kernels/legacy/gpu/one_hot_kernel.cu +++ b/paddle/phi/kernels/legacy/gpu/one_hot_kernel.cu @@ -44,24 +44,24 @@ template struct OneHotV2OpCUDAFunctor { const DenseTensor* in_; DenseTensor* out_; - const DeviceContext& ctx_; + const DeviceContext& dev_ctx_; int depth_; OneHotV2OpCUDAFunctor(const DenseTensor* in, DenseTensor* out, int depth, - const DeviceContext& ctx) - : in_(in), out_(out), depth_(depth), ctx_(ctx) {} + const DeviceContext& dev_ctx) + : in_(in), out_(out), depth_(depth), dev_ctx_(dev_ctx) {} template void apply() const { auto* p_in_data = in_->data(); auto numel = in_->numel(); - auto* p_out_data = ctx_.template Alloc(out_); - auto stream = ctx_.stream(); - funcs::set_constant(ctx_, out_, 0.0); + auto* p_out_data = dev_ctx_.template Alloc(out_); + auto stream = dev_ctx_.stream(); + funcs::set_constant(dev_ctx_, out_, 0.0); - auto config = phi::backends::gpu::GetGpuLaunchConfig1D(ctx_, numel); + auto config = phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx_, numel); FillOutputKernel<< -inline void CompareRawKernelImpl(const Context& ctx, +inline void CompareRawKernelImpl(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& y, int axis, DenseTensor* out) { - ctx.template Alloc(out); + dev_ctx.template Alloc(out); out->set_type(phi::DataType::BOOL); if (out->numel() == 0) return; std::vector ins{&x, &y}; std::vector outs{out}; - funcs::BroadcastKernel(ctx, ins, &outs, Functor(), axis); + funcs::BroadcastKernel(dev_ctx, ins, &outs, Functor(), axis); } template -void LessThanRawKernel(const Context& ctx, +void LessThanRawKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& y, int axis, DenseTensor* out) { CompareRawKernelImpl>( - ctx, x, y, axis, out); + dev_ctx, x, y, axis, out); } template -void LessEqualRawKernel(const Context& ctx, +void LessEqualRawKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& y, int axis, DenseTensor* out) { CompareRawKernelImpl>( - ctx, x, y, axis, out); + dev_ctx, x, y, axis, out); } template -void GreaterThanRawKernel(const Context& ctx, +void GreaterThanRawKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& y, int axis, DenseTensor* out) { CompareRawKernelImpl>( - ctx, x, y, axis, out); + dev_ctx, x, y, axis, out); } template -void GreaterEqualRawKernel(const Context& ctx, +void GreaterEqualRawKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& y, int axis, DenseTensor* out) { CompareRawKernelImpl>( - ctx, x, y, axis, out); + dev_ctx, x, y, axis, out); } template -void EqualRawKernel(const Context& ctx, +void EqualRawKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& y, int axis, DenseTensor* out) { CompareRawKernelImpl>( - ctx, x, y, axis, out); + dev_ctx, x, y, axis, out); } template -void NotEqualRawKernel(const Context& ctx, +void NotEqualRawKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& y, int axis, DenseTensor* out) { CompareRawKernelImpl>( - ctx, x, y, axis, out); + dev_ctx, x, y, axis, out); } } // namespace phi diff --git a/paddle/phi/kernels/legacy/xpu/compare_kernel.cc b/paddle/phi/kernels/legacy/xpu/compare_kernel.cc index 3d461517ac6f2..4253b86915d45 100644 --- a/paddle/phi/kernels/legacy/xpu/compare_kernel.cc +++ b/paddle/phi/kernels/legacy/xpu/compare_kernel.cc @@ -61,13 +61,13 @@ void XPUCompareRawKernelImpl( int axis, \ DenseTensor* out) { \ using XPUType = typename XPUTypeTrait::Type; \ - auto f = [](xpu::Context* ctx, \ + auto f = [](xpu::Context* xpu_ctx, \ const XPUType* x, \ const XPUType* y, \ bool* z, \ const std::vector& xshape, \ const std::vector& yshape) { \ - return functor(ctx, x, y, z, xshape, yshape); \ + return functor(xpu_ctx, x, y, z, xshape, yshape); \ }; \ XPUCompareRawKernelImpl(dev_ctx, x, y, out, f); \ } diff --git a/paddle/phi/kernels/legacy/xpu/elementwise_add_kernel.cc b/paddle/phi/kernels/legacy/xpu/elementwise_add_kernel.cc index 2ca79cd26160b..b3a891f280f66 100644 --- a/paddle/phi/kernels/legacy/xpu/elementwise_add_kernel.cc +++ b/paddle/phi/kernels/legacy/xpu/elementwise_add_kernel.cc @@ -36,13 +36,13 @@ void AddRawKernel(const Context& dev_ctx, DenseTensor* out) { using XPUType = typename XPUTypeTrait::Type; - auto f = [](xpu::Context* ctx, + auto f = [](xpu::Context* xpu_ctx, const XPUType* x, const XPUType* y, XPUType* z, const std::vector& xshape, const std::vector& yshape) { - return xpu::broadcast_add(ctx, x, y, z, xshape, yshape); + return xpu::broadcast_add(xpu_ctx, x, y, z, xshape, yshape); }; XPUElementwise(dev_ctx, x, y, axis, out, f); diff --git a/paddle/phi/kernels/legacy/xpu/elementwise_divide_kernel.cc b/paddle/phi/kernels/legacy/xpu/elementwise_divide_kernel.cc index 3fed6a52fdff4..d87bf7362581b 100644 --- a/paddle/phi/kernels/legacy/xpu/elementwise_divide_kernel.cc +++ b/paddle/phi/kernels/legacy/xpu/elementwise_divide_kernel.cc @@ -31,13 +31,13 @@ void DivideRawKernel(const Context& dev_ctx, int axis, DenseTensor* out) { using XPUType = typename XPUTypeTrait::Type; - auto f = [](xpu::Context* ctx, + auto f = [](xpu::Context* xpu_ctx, const XPUType* x, const XPUType* y, XPUType* z, const std::vector& xshape, const std::vector& yshape) { - return xpu::broadcast_div(ctx, x, y, z, xshape, yshape); + return xpu::broadcast_div(xpu_ctx, x, y, z, xshape, yshape); }; XPUElementwise(dev_ctx, x, y, axis, out, f); diff --git a/paddle/phi/kernels/legacy/xpu/elementwise_kernel.cc b/paddle/phi/kernels/legacy/xpu/elementwise_kernel.cc index 0825014319dfe..ce9aa48b883b2 100644 --- a/paddle/phi/kernels/legacy/xpu/elementwise_kernel.cc +++ b/paddle/phi/kernels/legacy/xpu/elementwise_kernel.cc @@ -31,13 +31,13 @@ void MaximumRawKernel(const Context& dev_ctx, } using XPUType = typename XPUTypeTrait::Type; - auto f = [](xpu::Context* ctx, + auto f = [](xpu::Context* xpu_ctx, const XPUType* x, const XPUType* y, XPUType* z, const std::vector& xshape, const std::vector& yshape) { - return xpu::broadcast_max(ctx, x, y, z, xshape, yshape); + return xpu::broadcast_max(xpu_ctx, x, y, z, xshape, yshape); }; XPUElementwise(dev_ctx, x, y, axis, out, f); @@ -55,13 +55,13 @@ void MinimumRawKernel(const Context& dev_ctx, } using XPUType = typename XPUTypeTrait::Type; - auto f = [](xpu::Context* ctx, + auto f = [](xpu::Context* xpu_ctx, const XPUType* x, const XPUType* y, XPUType* z, const std::vector& xshape, const std::vector& yshape) { - return xpu::broadcast_min(ctx, x, y, z, xshape, yshape); + return xpu::broadcast_min(xpu_ctx, x, y, z, xshape, yshape); }; XPUElementwise(dev_ctx, x, y, axis, out, f); @@ -74,13 +74,13 @@ void RemainderRawKernel(const Context& dev_ctx, int axis, DenseTensor* out) { using XPUType = typename XPUTypeTrait::Type; - auto f = [](xpu::Context* ctx, + auto f = [](xpu::Context* xpu_ctx, const XPUType* x, const XPUType* y, XPUType* z, const std::vector& xshape, const std::vector& yshape) { - return xpu::broadcast_mod(ctx, x, y, z, xshape, yshape); + return xpu::broadcast_mod(xpu_ctx, x, y, z, xshape, yshape); }; XPUElementwise(dev_ctx, x, y, axis, out, f); @@ -93,13 +93,13 @@ void FloorDivideRawKernel(const Context& dev_ctx, int axis, DenseTensor* out) { using XPUType = typename XPUTypeTrait::Type; - auto f = [](xpu::Context* ctx, + auto f = [](xpu::Context* xpu_ctx, const XPUType* x, const XPUType* y, XPUType* z, const std::vector& xshape, const std::vector& yshape) { - return xpu::broadcast_floordiv(ctx, x, y, z, xshape, yshape); + return xpu::broadcast_floordiv(xpu_ctx, x, y, z, xshape, yshape); }; XPUElementwise(dev_ctx, x, y, axis, out, f); @@ -112,13 +112,13 @@ void ElementwisePowRawKernel(const Context& dev_ctx, int axis, DenseTensor* out) { using XPUType = typename XPUTypeTrait::Type; - auto f = [](xpu::Context* ctx, + auto f = [](xpu::Context* xpu_ctx, const XPUType* x, const XPUType* y, XPUType* z, const std::vector& xshape, const std::vector& yshape) { - return xpu::broadcast_pow(ctx, x, y, z, xshape, yshape); + return xpu::broadcast_pow(xpu_ctx, x, y, z, xshape, yshape); }; XPUElementwise(dev_ctx, x, y, axis, out, f); diff --git a/paddle/phi/kernels/legacy/xpu/elementwise_multiply_kernel.cc b/paddle/phi/kernels/legacy/xpu/elementwise_multiply_kernel.cc index d64499498ae8b..e3cf1e7f377f2 100644 --- a/paddle/phi/kernels/legacy/xpu/elementwise_multiply_kernel.cc +++ b/paddle/phi/kernels/legacy/xpu/elementwise_multiply_kernel.cc @@ -31,13 +31,13 @@ void MultiplyRawKernel(const Context& dev_ctx, int axis, DenseTensor* out) { using XPUType = typename XPUTypeTrait::Type; - auto f = [](xpu::Context* ctx, + auto f = [](xpu::Context* xpu_ctx, const XPUType* x, const XPUType* y, XPUType* z, const std::vector& xshape, const std::vector& yshape) { - return xpu::broadcast_mul(ctx, x, y, z, xshape, yshape); + return xpu::broadcast_mul(xpu_ctx, x, y, z, xshape, yshape); }; XPUElementwise(dev_ctx, x, y, axis, out, f); diff --git a/paddle/phi/kernels/legacy/xpu/elementwise_subtract_kernel.cc b/paddle/phi/kernels/legacy/xpu/elementwise_subtract_kernel.cc index bf5ea1381965f..231b84a8dd91a 100644 --- a/paddle/phi/kernels/legacy/xpu/elementwise_subtract_kernel.cc +++ b/paddle/phi/kernels/legacy/xpu/elementwise_subtract_kernel.cc @@ -26,13 +26,13 @@ void SubtractRawKernel(const Context& dev_ctx, int axis, DenseTensor* out) { using XPUType = typename XPUTypeTrait::Type; - auto f = [](xpu::Context* ctx, + auto f = [](xpu::Context* xpu_ctx, const XPUType* x, const XPUType* y, XPUType* z, const std::vector& xshape, const std::vector& yshape) { - return xpu::broadcast_sub(ctx, x, y, z, xshape, yshape); + return xpu::broadcast_sub(xpu_ctx, x, y, z, xshape, yshape); }; phi::XPUElementwise(dev_ctx, x, y, axis, out, f); diff --git a/paddle/phi/kernels/legacy/xpu/one_hot_kernel.cc b/paddle/phi/kernels/legacy/xpu/one_hot_kernel.cc index 02edbd128430b..76903f89660e7 100644 --- a/paddle/phi/kernels/legacy/xpu/one_hot_kernel.cc +++ b/paddle/phi/kernels/legacy/xpu/one_hot_kernel.cc @@ -25,21 +25,21 @@ struct OneHotV2OpFunctor { const DenseTensor* in_; DenseTensor* out_; int depth_; - const Context& ctx_; + const Context& dev_ctx_; OneHotV2OpFunctor(const DenseTensor* in, DenseTensor* out, int depth, - const Context& ctx) - : in_(in), out_(out), depth_(depth), ctx_(ctx) {} + const Context& dev_ctx) + : in_(in), out_(out), depth_(depth), dev_ctx_(dev_ctx) {} template void apply() const { auto* p_in_data = in_->data(); auto numel = in_->numel(); - auto* p_out_data = ctx_.template Alloc(out_); + auto* p_out_data = dev_ctx_.template Alloc(out_); int r = xpu::one_hot( - ctx_.x_context(), p_in_data, p_out_data, numel, depth_, 1.0, 0.0); + dev_ctx_.x_context(), p_in_data, p_out_data, numel, depth_, 1.0, 0.0); PADDLE_ENFORCE_XDNN_SUCCESS(r, "one_hot"); } }; diff --git a/paddle/phi/kernels/legacy/xpu/reduce_max_kernel.cc b/paddle/phi/kernels/legacy/xpu/reduce_max_kernel.cc index 4cb8d9d043924..8c5881603e2e6 100644 --- a/paddle/phi/kernels/legacy/xpu/reduce_max_kernel.cc +++ b/paddle/phi/kernels/legacy/xpu/reduce_max_kernel.cc @@ -30,12 +30,12 @@ void MaxRawKernel(const Context& dev_ctx, DenseTensor* out) { reduce_all = recompute_reduce_all(x, dims, reduce_all); using XPUType = typename XPUTypeTrait::Type; - auto f = [](xpu::Context* ctx, + auto f = [](xpu::Context* xpu_ctx, const T* x, T* y, const std::vector& xdims, const std::vector& reduce_dims) { - return xpu::reduce_max(ctx, + return xpu::reduce_max(xpu_ctx, reinterpret_cast(x), reinterpret_cast(y), xdims, diff --git a/paddle/phi/kernels/stride/index_select_kernel.cc b/paddle/phi/kernels/stride/index_select_kernel.cc index 0f3a8aae1e4e7..6db84f5c89180 100644 --- a/paddle/phi/kernels/stride/index_select_kernel.cc +++ b/paddle/phi/kernels/stride/index_select_kernel.cc @@ -25,7 +25,7 @@ COMMON_DECLARE_bool(use_stride_kernel); namespace phi { template -void IndexSelectStridedKernel(const Context& ctx, +void IndexSelectStridedKernel(const Context& dev_ctx, const DenseTensor& x, int64_t index, int dim, diff --git a/paddle/phi/kernels/stride/slice_kernel.cc b/paddle/phi/kernels/stride/slice_kernel.cc index fe65a002b67df..bff461867c37d 100644 --- a/paddle/phi/kernels/stride/slice_kernel.cc +++ b/paddle/phi/kernels/stride/slice_kernel.cc @@ -26,7 +26,7 @@ COMMON_DECLARE_bool(use_stride_kernel); namespace phi { template -void SliceStridedKernel(const Context& ctx, +void SliceStridedKernel(const Context& dev_ctx, const DenseTensor& input, const std::vector& axes, const IntArray& starts_arr, diff --git a/paddle/phi/kernels/stride/transpose_kernel.cc b/paddle/phi/kernels/stride/transpose_kernel.cc index aaa4773f60808..a5d627c461326 100644 --- a/paddle/phi/kernels/stride/transpose_kernel.cc +++ b/paddle/phi/kernels/stride/transpose_kernel.cc @@ -22,7 +22,7 @@ COMMON_DECLARE_bool(use_stride_kernel); namespace phi { template -void TransposeStridedKernel(const Context& ctx, +void TransposeStridedKernel(const Context& dev_ctx, const DenseTensor& x, const std::vector& axis, DenseTensor* out) {