Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 15 additions & 0 deletions paddle/phi/api/include/compat/ATen/ops/abs.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,11 +19,19 @@
#include <optional>
#include <string_view>

#include "glog/logging.h"
#include "paddle/phi/api/include/api.h"

namespace at {

inline at::Tensor abs(const at::Tensor& self) {
if (!self.is_contiguous()) {
LOG(WARNING)
<< "at::abs: input tensor is non-contiguous. PyTorch and Paddle handle "
"non-contiguous tensors differently, which may produce logically "
"incorrect results even though the code is syntactically valid. "
"See https://github.com/PaddlePaddle/Paddle/pull/78099 for details.";
}
return paddle::experimental::abs(self._PD_GetInner());
}

Expand All @@ -34,6 +42,13 @@ namespace at {
inline at::Tensor Tensor::abs() const { return at::abs(*this); }

inline at::Tensor& Tensor::abs_() const {
if (!is_contiguous()) {
LOG(WARNING)
<< "Tensor::abs_: tensor is non-contiguous. PyTorch and Paddle handle "
"non-contiguous tensors differently, which may produce logically "
"incorrect results even though the code is syntactically valid. "
"See https://github.com/PaddlePaddle/Paddle/pull/78099 for details.";
}
PaddleTensor& inner = const_cast<PaddleTensor&>(tensor_);
paddle::experimental::abs_(inner);
return const_cast<at::Tensor&>(*this);
Expand Down
27 changes: 24 additions & 3 deletions paddle/phi/api/include/compat/ATen/ops/empty.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
#include <string_view>

#include "paddle/phi/api/include/api.h"
#include "paddle/phi/common/place.h"

namespace at {

Expand All @@ -31,6 +32,14 @@ inline at::Tensor empty(
PD_CHECK(!(memory_format.has_value() &&
memory_format.value() != c10::MemoryFormat::Contiguous),
"`MemoryFormat` other than Contiguous is not supported now.");
if (options.pinned_memory()) {
auto dense = paddle::experimental::empty(
size._PD_ToPaddleIntArray(),
compat::_PD_AtenScalarTypeToPhiDataType(options.dtype()),
phi::CPUPlace());
dense = dense.copy_to(phi::GPUPinnedPlace(), /*blocking=*/true);
return compat::_PD_ConvertToSparseIfNeeded(dense, options.layout());
}
auto dense = paddle::experimental::empty(
size._PD_ToPaddleIntArray(),
compat::_PD_AtenScalarTypeToPhiDataType(options.dtype()),
Expand All @@ -44,12 +53,24 @@ inline at::Tensor empty(at::IntArrayRef size,
::std::optional<at::Device> device,
::std::optional<bool> pin_memory,
::std::optional<at::MemoryFormat> memory_format) {
PD_CHECK(!(pin_memory.has_value() && pin_memory.value() != false),
"`pin_memory` other than False is not supported now.");
PD_CHECK(!(memory_format.has_value() &&
memory_format.value() != c10::MemoryFormat::Contiguous),
"`MemoryFormat` other than Contiguous is not supported now.");

if (pin_memory.value_or(false)) {
phi::Place base_place =
device.has_value() ? device.value()._PD_GetInner() : phi::CPUPlace();
phi::Place pinned_place = phi::is_xpu_place(base_place)
? phi::Place(phi::XPUPinnedPlace())
: phi::Place(phi::GPUPinnedPlace());
auto dense = paddle::experimental::empty(
size._PD_ToPaddleIntArray(),
compat::_PD_AtenScalarTypeToPhiDataType(
dtype.value_or(c10::get_default_dtype())),
phi::CPUPlace());
dense = dense.copy_to(pinned_place, /*blocking=*/true);
return compat::_PD_ConvertToSparseIfNeeded(dense,
layout.value_or(c10::kStrided));
}
auto dense =
paddle::experimental::empty(size._PD_ToPaddleIntArray(),
compat::_PD_AtenScalarTypeToPhiDataType(
Expand Down
199 changes: 160 additions & 39 deletions paddle/phi/api/include/compat/ATen/ops/from_blob.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,21 +18,159 @@
#include "paddle/phi/api/include/tensor_utils.h"
namespace at {

namespace detail {

inline void noopDelete(void* /*unused*/) {}

} // namespace detail

class TensorMaker {
friend TensorMaker for_blob(void* data, IntArrayRef sizes) noexcept;

public:
using ContextDeleter = DeleterFnPtr;

TensorMaker& strides(OptionalIntArrayRef value) noexcept {
strides_ = value;

return *this;
}

TensorMaker& storage_offset(std::optional<int64_t> value) noexcept {
storage_offset_ = value;

return *this;
}

TensorMaker& deleter(std::function<void(void*)> value) noexcept {
deleter_ = std::move(value);

return *this;
}

TensorMaker& context(void* value, ContextDeleter deleter = nullptr) noexcept {
ctx_ = std::unique_ptr<void, ContextDeleter>{
value, deleter != nullptr ? deleter : detail::noopDelete};

return *this;
}

TensorMaker& target_device(std::optional<Device> value) noexcept {
device_ = value;

return *this;
}

TensorMaker& options(TensorOptions value) noexcept {
opts_ = value;

return *this;
}

TensorMaker& resizeable_storage() noexcept {
resizeable_ = true;

return *this;
}

Tensor make_tensor() {
PD_CHECK(!deleter_ || !ctx_,
"The deleter and context arguments are mutually exclusive.");

PD_CHECK(!storage_offset_.has_value() || storage_offset_.value() == 0,
"storage_offset` should be zero.");
Copy link

Copilot AI Mar 10, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The error message string on line 81 has a mismatched backtick: "storage_offset should be zero.". The opening backtick before storage_offset is missing. It should read: `` "storage_offset` should be zero." ``

Copilot uses AI. Check for mistakes.

if (device_.has_value() && opts_.has_device() &&
opts_.device().has_index()) {
PD_CHECK(opts_.device() == *device_,
"Specified device ",
opts_.device(),
" does not match device of data ",
*device_);
}

phi::Place pd_place;
if (device_.has_value()) {
pd_place = device_->_PD_GetInner();
} else if (opts_.has_device() && opts_.device().has_index()) {
pd_place = opts_.device()._PD_GetInner();
} else {
pd_place = phi::Place(); // UNDEFINED → auto-detect inside from_blob
}

// Build paddle deleter: prefer explicit deleter_, then wrap ctx_ so its
// lifetime is tied to the tensor allocation.
paddle::Deleter pd_deleter = nullptr;
if (deleter_) {
pd_deleter = deleter_;
} else if (ctx_) {
// shared_ptr takes ownership of the context and calls its deleter when
// the last copy (held in the lambda) is destroyed.
auto shared_ctx =
std::shared_ptr<void>(ctx_.release(), ctx_.get_deleter());
pd_deleter = [shared_ctx](void* /*data*/) {};
}

if (strides_.has_value()) {
return paddle::from_blob(
data_,
sizes_._PD_ToPaddleIntArray(),
strides_.value()._PD_ToPaddleIntArray(),
compat::_PD_AtenScalarTypeToPhiDataType(opts_.dtype()),
phi::DataLayout::NCHW,
pd_place,
pd_deleter);
} else {
return paddle::from_blob(
data_,
sizes_._PD_ToPaddleIntArray(),
compat::_PD_AtenScalarTypeToPhiDataType(opts_.dtype()),
phi::DataLayout::NCHW,
pd_place,
pd_deleter);
}
}

private:
explicit TensorMaker(void* data, IntArrayRef sizes) noexcept
: data_{data}, sizes_{sizes} {}

std::size_t computeStorageSize() const noexcept;

DataPtr makeDataPtrFromDeleter() noexcept;

DataPtr makeDataPtrFromContext() noexcept;

IntArrayRef makeTempSizes() const noexcept;

void* data_;
IntArrayRef sizes_;
OptionalIntArrayRef strides_;
std::optional<int64_t> storage_offset_;
std::function<void(void*)> deleter_;
std::unique_ptr<void, ContextDeleter> ctx_{nullptr, detail::noopDelete};
std::optional<Device> device_;
TensorOptions opts_;
bool resizeable_{};
Comment on lines +138 to +154
Copy link

Copilot AI Mar 10, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The four private methods computeStorageSize(), makeDataPtrFromDeleter(), makeDataPtrFromContext(), and makeTempSizes() are declared in TensorMaker but have no definitions anywhere in the codebase, and are never called inside make_tensor(). Similarly, the resizeable_ member is set by resizeable_storage() but never used in make_tensor(). These stubs inflate the class definition and would cause linker errors if anyone attempts to invoke them. They should either be implemented to reflect the intended behavior or removed to avoid confusion.

Copilot uses AI. Check for mistakes.
};

inline TensorMaker for_blob(void* data, IntArrayRef sizes) noexcept {
return TensorMaker{data, sizes};
}

inline Tensor from_blob(
void* data,
IntArrayRef sizes,
IntArrayRef strides,
const std::function<void(void*)>& deleter,
const TensorOptions& options = {},
const std::optional<Device> target_device = std::nullopt) {
return paddle::from_blob(
data,
sizes._PD_ToPaddleIntArray(),
strides._PD_ToPaddleIntArray(),
compat::_PD_AtenScalarTypeToPhiDataType(options.dtype()),
phi::DataLayout::NCHW,
device_or_default(target_device)._PD_GetInner(),
deleter);
return for_blob(data, sizes)
.strides(strides)
.deleter(deleter)
.options(options)
.target_device(target_device)
.make_tensor();
}

inline Tensor from_blob(
Expand All @@ -43,16 +181,13 @@ inline Tensor from_blob(
const std::function<void(void*)>& deleter,
const TensorOptions& options = {},
const std::optional<Device> target_device = std::nullopt) {
PD_CHECK(storage_offset == 0, "`storage_offset` should be zero.");

return paddle::from_blob(
data,
sizes._PD_ToPaddleIntArray(),
strides._PD_ToPaddleIntArray(),
compat::_PD_AtenScalarTypeToPhiDataType(options.dtype()),
phi::DataLayout::NCHW,
device_or_default(target_device)._PD_GetInner(),
deleter);
return for_blob(data, sizes)
.strides(strides)
.storage_offset(storage_offset)
.deleter(deleter)
.options(options)
.target_device(target_device)
.make_tensor();
}

inline Tensor from_blob(
Expand All @@ -61,38 +196,24 @@ inline Tensor from_blob(
std::function<void(void*)> deleter,
const TensorOptions& options = {},
const std::optional<Device> target_device = std::nullopt) {
return paddle::from_blob(
data,
sizes._PD_ToPaddleIntArray(),
compat::_PD_AtenScalarTypeToPhiDataType(options.dtype()),
phi::DataLayout::NCHW,
device_or_default(target_device)._PD_GetInner(),
deleter);
return for_blob(data, sizes)
.deleter(std::move(deleter))
.options(options)
.target_device(target_device)
.make_tensor();
}

inline Tensor from_blob(void* data,
IntArrayRef sizes,
IntArrayRef strides,
const TensorOptions& options = {}) {
return paddle::from_blob(
data,
sizes._PD_ToPaddleIntArray(),
strides._PD_ToPaddleIntArray(),
compat::_PD_AtenScalarTypeToPhiDataType(options.dtype()),
phi::DataLayout::NCHW,
options._PD_GetPlace());
return for_blob(data, sizes).strides(strides).options(options).make_tensor();
}

inline Tensor from_blob(void* data,
IntArrayRef sizes,
const TensorOptions& options = {}) {
return paddle::from_blob(
data,
sizes._PD_ToPaddleIntArray(),
compat::_PD_AtenScalarTypeToPhiDataType(options.dtype()),
phi::DataLayout::NCHW,
options._PD_GetPlace(),
nullptr);
return for_blob(data, sizes).options(options).make_tensor();
}

} // namespace at
6 changes: 6 additions & 0 deletions paddle/phi/api/include/compat/c10/core/TensorOptions.h
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,12 @@ struct PADDLE_API TensorOptions {
return r;
}

template <typename... Args>
[[nodiscard]] TensorOptions device(Args&&... args) const noexcept {
return device(
std::optional<Device>(std::in_place, std::forward<Args>(args)...));
}

[[nodiscard]] TensorOptions device_index(
c10::DeviceIndex device_index) const noexcept {
return device(Device(kCUDA, device_index));
Expand Down
Loading
Loading