|
37 | 37 | #include "simplify_iteration_space.hpp"
|
38 | 38 | #include "utils/memory_overlap.hpp"
|
39 | 39 | #include "utils/offset_utils.hpp"
|
| 40 | +#include "utils/output_validation.hpp" |
40 | 41 | #include "utils/type_dispatch.hpp"
|
41 | 42 |
|
42 | 43 | namespace dpctl
|
@@ -118,6 +119,8 @@ py_extract(const dpctl::tensor::usm_ndarray &src,
|
118 | 119 | sycl::queue &exec_q,
|
119 | 120 | const std::vector<sycl::event> &depends)
|
120 | 121 | {
|
| 122 | + dpctl::tensor::validation::CheckWritable::throw_if_not_writable(dst); |
| 123 | + |
121 | 124 | int src_nd = src.get_ndim();
|
122 | 125 | if ((axis_start < 0 || axis_end > src_nd || axis_start >= axis_end)) {
|
123 | 126 | throw py::value_error("Specified axes_start and axes_end are invalid.");
|
@@ -171,19 +174,8 @@ py_extract(const dpctl::tensor::usm_ndarray &src,
|
171 | 174 | throw py::value_error("Inconsistent array dimensions");
|
172 | 175 | }
|
173 | 176 |
|
174 |
| - // ensure that dst is sufficiently ample |
175 |
| - auto dst_offsets = dst.get_minmax_offsets(); |
176 |
| - // destination must be ample enough to accommodate all elements |
177 |
| - { |
178 |
| - size_t range = |
179 |
| - static_cast<size_t>(dst_offsets.second - dst_offsets.first); |
180 |
| - if (range + 1 < static_cast<size_t>(ortho_nelems * masked_dst_nelems)) { |
181 |
| - throw py::value_error( |
182 |
| - "Memory addressed by the destination array can not " |
183 |
| - "accommodate all the " |
184 |
| - "array elements."); |
185 |
| - } |
186 |
| - } |
| 177 | + dpctl::tensor::validation::AmpleMemory::throw_if_not_ample( |
| 178 | + dst, ortho_nelems * masked_dst_nelems); |
187 | 179 |
|
188 | 180 | auto const &overlap = dpctl::tensor::overlap::MemoryOverlap();
|
189 | 181 | // check that dst does not intersect with src, not with cumsum.
|
@@ -452,6 +444,8 @@ py_place(const dpctl::tensor::usm_ndarray &dst,
|
452 | 444 | sycl::queue &exec_q,
|
453 | 445 | const std::vector<sycl::event> &depends)
|
454 | 446 | {
|
| 447 | + dpctl::tensor::validation::CheckWritable::throw_if_not_writable(dst); |
| 448 | + |
455 | 449 | int dst_nd = dst.get_ndim();
|
456 | 450 | if ((axis_start < 0 || axis_end > dst_nd || axis_start >= axis_end)) {
|
457 | 451 | throw py::value_error("Specified axes_start and axes_end are invalid.");
|
@@ -502,19 +496,8 @@ py_place(const dpctl::tensor::usm_ndarray &dst,
|
502 | 496 | throw py::value_error("Inconsistent array dimensions");
|
503 | 497 | }
|
504 | 498 |
|
505 |
| - // ensure that dst is sufficiently ample |
506 |
| - auto dst_offsets = dst.get_minmax_offsets(); |
507 |
| - // destination must be ample enough to accommodate all elements |
508 |
| - { |
509 |
| - size_t range = |
510 |
| - static_cast<size_t>(dst_offsets.second - dst_offsets.first); |
511 |
| - if (range + 1 < static_cast<size_t>(ortho_nelems * masked_dst_nelems)) { |
512 |
| - throw py::value_error( |
513 |
| - "Memory addressed by the destination array can not " |
514 |
| - "accommodate all the " |
515 |
| - "array elements."); |
516 |
| - } |
517 |
| - } |
| 499 | + dpctl::tensor::validation::AmpleMemory::throw_if_not_ample( |
| 500 | + dst, ortho_nelems * masked_dst_nelems); |
518 | 501 |
|
519 | 502 | auto const &overlap = dpctl::tensor::overlap::MemoryOverlap();
|
520 | 503 | // check that dst does not intersect with src, not with cumsum.
|
@@ -726,6 +709,8 @@ py_nonzero(const dpctl::tensor::usm_ndarray
|
726 | 709 | "Execution queue is not compatible with allocation queues");
|
727 | 710 | }
|
728 | 711 |
|
| 712 | + dpctl::tensor::validation::CheckWritable::throw_if_not_writable(indexes); |
| 713 | + |
729 | 714 | int cumsum_nd = cumsum.get_ndim();
|
730 | 715 | if (cumsum_nd != 1 || !cumsum.is_c_contiguous()) {
|
731 | 716 | throw py::value_error("Cumsum array must be a C-contiguous vector");
|
@@ -787,18 +772,8 @@ py_nonzero(const dpctl::tensor::usm_ndarray
|
787 | 772 | throw py::value_error("Arrays are expected to ave no memory overlap");
|
788 | 773 | }
|
789 | 774 |
|
790 |
| - // ensure that dst is sufficiently ample |
791 |
| - auto indexes_offsets = indexes.get_minmax_offsets(); |
792 |
| - // destination must be ample enough to accommodate all elements |
793 |
| - { |
794 |
| - size_t range = |
795 |
| - static_cast<size_t>(indexes_offsets.second - indexes_offsets.first); |
796 |
| - if (range + 1 < static_cast<size_t>(nz_elems * _ndim)) { |
797 |
| - throw py::value_error( |
798 |
| - "Memory addressed by the destination array can not " |
799 |
| - "accommodate all the array elements."); |
800 |
| - } |
801 |
| - } |
| 775 | + dpctl::tensor::validation::AmpleMemory::throw_if_not_ample( |
| 776 | + indexes, nz_elems * _ndim); |
802 | 777 |
|
803 | 778 | std::vector<sycl::event> host_task_events;
|
804 | 779 | host_task_events.reserve(2);
|
|
0 commit comments