|
| 1 | +#define PY_ARRAY_UNIQUE_SYMBOL QuadPrecType_ARRAY_API |
| 2 | +#define PY_UFUNC_UNIQUE_SYMBOL QuadPrecType_UFUNC_API |
| 3 | +#define NPY_NO_DEPRECATED_API NPY_2_0_API_VERSION |
| 4 | +#define NPY_TARGET_VERSION NPY_2_0_API_VERSION |
| 5 | +#define NO_IMPORT_ARRAY |
| 6 | +#define NO_IMPORT_UFUNC |
| 7 | + |
| 8 | + |
| 9 | +#include <Python.h> |
| 10 | +#include <cstdio> |
| 11 | + |
| 12 | +#include "numpy/arrayobject.h" |
| 13 | +#include "numpy/ufuncobject.h" |
| 14 | +#include "numpy/dtype_api.h" |
| 15 | +#include "numpy/ndarraytypes.h" |
| 16 | + |
| 17 | +#include "../quad_common.h" |
| 18 | +#include "../scalar.h" |
| 19 | +#include "../dtype.h" |
| 20 | +#include "../ops.hpp" |
| 21 | +#include "binary_ops.h" |
| 22 | +#include "matmul.h" |
| 23 | + |
| 24 | +#include <iostream> |
| 25 | + |
| 26 | +static NPY_CASTING |
| 27 | +quad_matmul_resolve_descriptors(PyObject *self, PyArray_DTypeMeta *const dtypes[], |
| 28 | + PyArray_Descr *const given_descrs[], |
| 29 | + PyArray_Descr *loop_descrs[], npy_intp *NPY_UNUSED(view_offset)) |
| 30 | +{ |
| 31 | + |
| 32 | + NPY_CASTING casting = NPY_NO_CASTING; |
| 33 | + std::cout << "exiting the descriptor"; |
| 34 | + return casting; |
| 35 | +} |
| 36 | + |
| 37 | +template <binary_op_quad_def sleef_op, binary_op_longdouble_def longdouble_op> |
| 38 | +int |
| 39 | +quad_generic_matmul_strided_loop_unaligned(PyArrayMethod_Context *context, char *const data[], |
| 40 | + npy_intp const dimensions[], npy_intp const strides[], |
| 41 | + NpyAuxData *auxdata) |
| 42 | +{ |
| 43 | + npy_intp N = dimensions[0]; |
| 44 | + char *in1_ptr = data[0], *in2_ptr = data[1]; |
| 45 | + char *out_ptr = data[2]; |
| 46 | + npy_intp in1_stride = strides[0]; |
| 47 | + npy_intp in2_stride = strides[1]; |
| 48 | + npy_intp out_stride = strides[2]; |
| 49 | + |
| 50 | + QuadPrecDTypeObject *descr = (QuadPrecDTypeObject *)context->descriptors[0]; |
| 51 | + QuadBackendType backend = descr->backend; |
| 52 | + size_t elem_size = (backend == BACKEND_SLEEF) ? sizeof(Sleef_quad) : sizeof(long double); |
| 53 | + |
| 54 | + quad_value in1, in2, out; |
| 55 | + while (N--) { |
| 56 | + memcpy(&in1, in1_ptr, elem_size); |
| 57 | + memcpy(&in2, in2_ptr, elem_size); |
| 58 | + if (backend == BACKEND_SLEEF) { |
| 59 | + out.sleef_value = sleef_op(&in1.sleef_value, &in2.sleef_value); |
| 60 | + } |
| 61 | + else { |
| 62 | + out.longdouble_value = longdouble_op(&in1.longdouble_value, &in2.longdouble_value); |
| 63 | + } |
| 64 | + memcpy(out_ptr, &out, elem_size); |
| 65 | + |
| 66 | + in1_ptr += in1_stride; |
| 67 | + in2_ptr += in2_stride; |
| 68 | + out_ptr += out_stride; |
| 69 | + } |
| 70 | + return 0; |
| 71 | +} |
| 72 | + |
| 73 | +template <binary_op_quad_def sleef_op, binary_op_longdouble_def longdouble_op> |
| 74 | +int |
| 75 | +quad_generic_matmul_strided_loop_aligned(PyArrayMethod_Context *context, char *const data[], |
| 76 | + npy_intp const dimensions[], npy_intp const strides[], |
| 77 | + NpyAuxData *auxdata) |
| 78 | +{ |
| 79 | + npy_intp N = dimensions[0]; |
| 80 | + char *in1_ptr = data[0], *in2_ptr = data[1]; |
| 81 | + char *out_ptr = data[2]; |
| 82 | + npy_intp in1_stride = strides[0]; |
| 83 | + npy_intp in2_stride = strides[1]; |
| 84 | + npy_intp out_stride = strides[2]; |
| 85 | + |
| 86 | + QuadPrecDTypeObject *descr = (QuadPrecDTypeObject *)context->descriptors[0]; |
| 87 | + QuadBackendType backend = descr->backend; |
| 88 | + |
| 89 | + while (N--) { |
| 90 | + if (backend == BACKEND_SLEEF) { |
| 91 | + *(Sleef_quad *)out_ptr = sleef_op((Sleef_quad *)in1_ptr, (Sleef_quad *)in2_ptr); |
| 92 | + } |
| 93 | + else { |
| 94 | + *(long double *)out_ptr = longdouble_op((long double *)in1_ptr, (long double *)in2_ptr); |
| 95 | + } |
| 96 | + |
| 97 | + in1_ptr += in1_stride; |
| 98 | + in2_ptr += in2_stride; |
| 99 | + out_ptr += out_stride; |
| 100 | + } |
| 101 | + return 0; |
| 102 | +} |
| 103 | + |
| 104 | +int |
| 105 | +create_matmul_ufunc(PyObject *numpy, const char *ufunc_name) |
| 106 | +{ |
| 107 | + PyObject *ufunc = PyObject_GetAttrString(numpy, ufunc_name); |
| 108 | + if (ufunc == NULL) { |
| 109 | + return -1; |
| 110 | + } |
| 111 | + |
| 112 | + PyArray_DTypeMeta *dtypes[3] = {&QuadPrecDType, &QuadPrecDType, &QuadPrecDType}; |
| 113 | + |
| 114 | + PyType_Slot slots[] = { |
| 115 | + {NPY_METH_resolve_descriptors, (void *)&quad_matmul_resolve_descriptors}, |
| 116 | + {NPY_METH_strided_loop, |
| 117 | + (void *)&quad_generic_matmul_strided_loop_aligned<sleef_op, longdouble_op>}, |
| 118 | + {NPY_METH_unaligned_strided_loop, |
| 119 | + (void *)&quad_generic_matmul_strided_loop_unaligned<sleef_op, longdouble_op>}, |
| 120 | + {0, NULL}}; |
| 121 | + |
| 122 | + PyArrayMethod_Spec Spec = { |
| 123 | + .name = "quad_matmul", |
| 124 | + .nin = 2, |
| 125 | + .nout = 1, |
| 126 | + .casting = NPY_NO_CASTING, |
| 127 | + .flags = (NPY_ARRAYMETHOD_FLAGS)(NPY_METH_SUPPORTS_UNALIGNED | NPY_METH_IS_REORDERABLE), |
| 128 | + .dtypes = dtypes, |
| 129 | + .slots = slots, |
| 130 | + }; |
| 131 | + |
| 132 | + if (PyUFunc_AddLoopFromSpec(ufunc, &Spec) < 0) { |
| 133 | + return -1; |
| 134 | + } |
| 135 | + // my guess we don't need any promoter here as of now, since matmul is quad specific |
| 136 | + return 0; |
| 137 | +} |
| 138 | + |
| 139 | + |
| 140 | +int |
| 141 | +init_matmul_ops(PyObject *numpy) |
| 142 | +{ |
| 143 | + if (create_matmul_ufunc<quad_add>(numpy, "matmul") < 0) { |
| 144 | + return -1; |
| 145 | + } |
| 146 | + return 0; |
| 147 | +} |
| 148 | + |
0 commit comments