Skip to content

Commit 2713c7c

Browse files
committed
Merge remote-tracking branch 'upstream/main' into annotations
2 parents 646fc61 + a479293 commit 2713c7c

File tree

11 files changed

+210
-34
lines changed

11 files changed

+210
-34
lines changed

.github/workflows/publish-package.yml

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -41,13 +41,14 @@ jobs:
4141

4242
- name: Install python-build and twine
4343
run: |
44-
python -m pip install --upgrade pip setuptools
44+
python -m pip install --upgrade pip "setuptools<=67"
4545
python -m pip install build twine
4646
python -m pip list
4747
4848
- name: Build a wheel and a sdist
4949
run: |
50-
PYTHONWARNINGS=error,default::DeprecationWarning python -m build .
50+
#PYTHONWARNINGS=error,default::DeprecationWarning python -m build .
51+
python -m build .
5152
5253
- name: Verify the distribution
5354
run: twine check --strict dist/*

array_api_compat/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,6 @@
1717
this implementation for the default when working with NumPy arrays.
1818
1919
"""
20-
__version__ = '1.12.dev0'
20+
__version__ = '1.11.2'
2121

2222
from .common import * # noqa: F401, F403

array_api_compat/common/_aliases.py

Lines changed: 18 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
import inspect
88
from typing import NamedTuple, Optional, Sequence, Tuple, Union
99

10-
from ._helpers import array_namespace, _check_device, device, is_torch_array, is_cupy_namespace
10+
from ._helpers import array_namespace, _check_device, device, is_cupy_namespace
1111
from ._typing import Array, Device, DType, Namespace
1212

1313
# These functions are modified from the NumPy versions.
@@ -375,28 +375,29 @@ def _isscalar(a):
375375

376376
# At least handle the case of Python integers correctly (see
377377
# https://github.com/numpy/numpy/pull/26892).
378-
if type(min) is int and min <= wrapped_xp.iinfo(x.dtype).min:
379-
min = None
380-
if type(max) is int and max >= wrapped_xp.iinfo(x.dtype).max:
381-
max = None
378+
if wrapped_xp.isdtype(x.dtype, "integral"):
379+
if type(min) is int and min <= wrapped_xp.iinfo(x.dtype).min:
380+
min = None
381+
if type(max) is int and max >= wrapped_xp.iinfo(x.dtype).max:
382+
max = None
382383

384+
dev = device(x)
383385
if out is None:
384-
out = wrapped_xp.asarray(xp.broadcast_to(x, result_shape),
385-
copy=True, device=device(x))
386+
out = wrapped_xp.empty(result_shape, dtype=x.dtype, device=dev)
387+
out[()] = x
388+
386389
if min is not None:
387-
if is_torch_array(x) and x.dtype == xp.float64 and _isscalar(min):
388-
# Avoid loss of precision due to torch defaulting to float32
389-
min = wrapped_xp.asarray(min, dtype=xp.float64)
390-
a = xp.broadcast_to(wrapped_xp.asarray(min, device=device(x)), result_shape)
390+
a = wrapped_xp.asarray(min, dtype=x.dtype, device=dev)
391+
a = xp.broadcast_to(a, result_shape)
391392
ia = (out < a) | xp.isnan(a)
392-
# torch requires an explicit cast here
393-
out[ia] = wrapped_xp.astype(a[ia], out.dtype)
393+
out[ia] = a[ia]
394+
394395
if max is not None:
395-
if is_torch_array(x) and x.dtype == xp.float64 and _isscalar(max):
396-
max = wrapped_xp.asarray(max, dtype=xp.float64)
397-
b = xp.broadcast_to(wrapped_xp.asarray(max, device=device(x)), result_shape)
396+
b = wrapped_xp.asarray(max, dtype=x.dtype, device=dev)
397+
b = xp.broadcast_to(b, result_shape)
398398
ib = (out > b) | xp.isnan(b)
399-
out[ib] = wrapped_xp.astype(b[ib], out.dtype)
399+
out[ib] = b[ib]
400+
400401
# Return a scalar for 0-D
401402
return out[()]
402403

array_api_compat/torch/_aliases.py

Lines changed: 32 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
from __future__ import annotations
22

3-
from functools import wraps as _wraps
3+
from functools import reduce as _reduce, wraps as _wraps
44
from builtins import all as _builtin_all, any as _builtin_any
55
from typing import List, Optional, Sequence, Tuple, Union
66

@@ -117,25 +117,43 @@ def _fix_promotion(x1, x2, only_scalar=True):
117117

118118

119119
def result_type(*arrays_and_dtypes: Array | DType | complex) -> DType:
120-
if len(arrays_and_dtypes) == 0:
121-
raise TypeError("At least one array or dtype must be provided")
122-
if len(arrays_and_dtypes) == 1:
120+
num = len(arrays_and_dtypes)
121+
122+
if num == 0:
123+
raise ValueError("At least one array or dtype must be provided")
124+
125+
elif num == 1:
123126
x = arrays_and_dtypes[0]
124127
if isinstance(x, torch.dtype):
125128
return x
126129
return x.dtype
127-
if len(arrays_and_dtypes) > 2:
128-
return result_type(arrays_and_dtypes[0], result_type(*arrays_and_dtypes[1:]))
129130

130-
x, y = arrays_and_dtypes
131-
if isinstance(x, _py_scalars) or isinstance(y, _py_scalars):
132-
return torch.result_type(x, y)
131+
if num == 2:
132+
x, y = arrays_and_dtypes
133+
return _result_type(x, y)
134+
135+
else:
136+
# sort scalars so that they are treated last
137+
scalars, others = [], []
138+
for x in arrays_and_dtypes:
139+
if isinstance(x, _py_scalars):
140+
scalars.append(x)
141+
else:
142+
others.append(x)
143+
if not others:
144+
raise ValueError("At least one array or dtype must be provided")
145+
146+
# combine left-to-right
147+
return _reduce(_result_type, others + scalars)
133148

134-
xdt = x.dtype if not isinstance(x, torch.dtype) else x
135-
ydt = y.dtype if not isinstance(y, torch.dtype) else y
136149

137-
if (xdt, ydt) in _promotion_table:
138-
return _promotion_table[xdt, ydt]
150+
def _result_type(x, y):
151+
if not (isinstance(x, _py_scalars) or isinstance(y, _py_scalars)):
152+
xdt = x.dtype if not isinstance(x, torch.dtype) else x
153+
ydt = y.dtype if not isinstance(y, torch.dtype) else y
154+
155+
if (xdt, ydt) in _promotion_table:
156+
return _promotion_table[xdt, ydt]
139157

140158
# This doesn't result_type(dtype, dtype) for non-array API dtypes
141159
# because torch.result_type only accepts tensors. This does however, allow
@@ -144,6 +162,7 @@ def result_type(*arrays_and_dtypes: Array | DType | complex) -> DType:
144162
y = torch.tensor([], dtype=y) if isinstance(y, torch.dtype) else y
145163
return torch.result_type(x, y)
146164

165+
147166
def can_cast(from_: Union[DType, Array], to: DType, /) -> bool:
148167
if not isinstance(from_, torch.dtype):
149168
from_ = from_.dtype

docs/changelog.md

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,22 @@
11
# Changelog
22

3+
## 1.11.2 (2025-03-20)
4+
5+
This is a bugfix release with no new features compared to version 1.11.
6+
7+
- fix the `result_type` wrapper for pytorch. Previously, `result_type` had multiple
8+
issues with scalar arguments.
9+
- fix several issues with `clip` wrappers. Previously, `clip` was failing to allow
10+
behaviors which are unspecified by the 2024.12 standard but allowed by the array
11+
libraries.
12+
13+
The following users contributed to this release:
14+
15+
Evgeni Burovski
16+
Guido Imperiale
17+
Magnus Dalen Kvalevåg
18+
19+
320
## 1.11.1 (2025-03-04)
421

522
This is a bugfix release with no new features compared to version 1.11.

numpy-1-21-xfails.txt

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -212,3 +212,6 @@ array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(x1_i is -infinity
212212
array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(x1_i is -infinity and isfinite(x2_i) and x2_i < 0) -> +infinity]
213213
array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(isfinite(x1_i) and x1_i > 0 and x2_i is -infinity) -> -0]
214214
array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(isfinite(x1_i) and x1_i < 0 and x2_i is +infinity) -> -0]
215+
216+
# numpy < 2 bug: type promotion of asarray([], 'float32') and (np.finfo(float32).max + 1) -> float64
217+
array_api_tests/test_operators_and_elementwise_functions.py::test_binary_with_scalars_real

numpy-1-26-xfails.txt

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -66,3 +66,6 @@ array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(x1_i is -infinity
6666
array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(x1_i is -infinity and isfinite(x2_i) and x2_i < 0) -> +infinity]
6767
array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(isfinite(x1_i) and x1_i > 0 and x2_i is -infinity) -> -0]
6868
array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(isfinite(x1_i) and x1_i < 0 and x2_i is +infinity) -> -0]
69+
70+
# numpy < 2 bug: type promotion of asarray([], 'float32') and (finfo(float32).max + 1) gives float64 not float32
71+
array_api_tests/test_operators_and_elementwise_functions.py::test_binary_with_scalars_real

tests/test_all.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@
2626
"SupportsBufferProtocol",
2727
))
2828

29+
@pytest.mark.skip(reason="TODO: starts failing after adding test_torch.py in gh-277")
2930
@pytest.mark.parametrize("library", ["common"] + wrapped_libraries)
3031
def test_all(library):
3132
if library == "common":

tests/test_common.py

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -367,3 +367,18 @@ def test_asarray_copy(library):
367367
assert all(b[0] == 1.0)
368368
else:
369369
assert all(b[0] == 0.0)
370+
371+
372+
@pytest.mark.parametrize("library", ["numpy", "cupy", "torch"])
373+
def test_clip_out(library):
374+
"""Test non-standard out= parameter for clip()
375+
376+
(see "Avoid Restricting Behavior that is Outside the Scope of the Standard"
377+
in https://data-apis.org/array-api-compat/dev/special-considerations.html)
378+
"""
379+
xp = import_(library, wrapper=True)
380+
x = xp.asarray([10, 20, 30])
381+
out = xp.zeros_like(x)
382+
xp.clip(x, 15, 25, out=out)
383+
expect = xp.asarray([15, 20, 25])
384+
assert xp.all(out == expect)

tests/test_torch.py

Lines changed: 98 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,98 @@
1+
"""Test "unspecified" behavior which we cannot easily test in the Array API test suite.
2+
"""
3+
import itertools
4+
5+
import pytest
6+
import torch
7+
8+
from array_api_compat import torch as xp
9+
10+
11+
class TestResultType:
12+
def test_empty(self):
13+
with pytest.raises(ValueError):
14+
xp.result_type()
15+
16+
def test_one_arg(self):
17+
for x in [1, 1.0, 1j, '...', None]:
18+
with pytest.raises((ValueError, AttributeError)):
19+
xp.result_type(x)
20+
21+
for x in [xp.float32, xp.int64, torch.complex64]:
22+
assert xp.result_type(x) == x
23+
24+
for x in [xp.asarray(True, dtype=xp.bool), xp.asarray(1, dtype=xp.complex64)]:
25+
assert xp.result_type(x) == x.dtype
26+
27+
def test_two_args(self):
28+
# Only include here things "unspecified" in the spec
29+
30+
# scalar, tensor or tensor,tensor
31+
for x, y in [
32+
(1., 1j),
33+
(1j, xp.arange(3)),
34+
(True, xp.asarray(3.)),
35+
(xp.ones(3) == 1, 1j*xp.ones(3)),
36+
]:
37+
assert xp.result_type(x, y) == torch.result_type(x, y)
38+
39+
# dtype, scalar
40+
for x, y in [
41+
(1j, xp.int64),
42+
(True, xp.float64),
43+
]:
44+
assert xp.result_type(x, y) == torch.result_type(x, xp.empty([], dtype=y))
45+
46+
# dtype, dtype
47+
for x, y in [
48+
(xp.bool, xp.complex64)
49+
]:
50+
xt, yt = xp.empty([], dtype=x), xp.empty([], dtype=y)
51+
assert xp.result_type(x, y) == torch.result_type(xt, yt)
52+
53+
def test_multi_arg(self):
54+
torch.set_default_dtype(torch.float32)
55+
56+
args = [1., 5, 3, torch.asarray([3], dtype=torch.float16), 5, 6, 1.]
57+
assert xp.result_type(*args) == torch.float16
58+
59+
args = [1, 2, 3j, xp.arange(3, dtype=xp.float32), 4, 5, 6]
60+
assert xp.result_type(*args) == xp.complex64
61+
62+
args = [1, 2, 3j, xp.float64, 4, 5, 6]
63+
assert xp.result_type(*args) == xp.complex128
64+
65+
args = [1, 2, 3j, xp.float64, 4, xp.asarray(3, dtype=xp.int16), 5, 6, False]
66+
assert xp.result_type(*args) == xp.complex128
67+
68+
i64 = xp.ones(1, dtype=xp.int64)
69+
f16 = xp.ones(1, dtype=xp.float16)
70+
for i in itertools.permutations([i64, f16, 1.0, 1.0]):
71+
assert xp.result_type(*i) == xp.float16, f"{i}"
72+
73+
with pytest.raises(ValueError):
74+
xp.result_type(1, 2, 3, 4)
75+
76+
77+
@pytest.mark.parametrize("default_dt", ['float32', 'float64'])
78+
@pytest.mark.parametrize("dtype_a",
79+
(xp.int32, xp.int64, xp.float32, xp.float64, xp.complex64, xp.complex128)
80+
)
81+
@pytest.mark.parametrize("dtype_b",
82+
(xp.int32, xp.int64, xp.float32, xp.float64, xp.complex64, xp.complex128)
83+
)
84+
def test_gh_273(self, default_dt, dtype_a, dtype_b):
85+
# Regression test for https://github.com/data-apis/array-api-compat/issues/273
86+
87+
try:
88+
prev_default = torch.get_default_dtype()
89+
default_dtype = getattr(torch, default_dt)
90+
torch.set_default_dtype(default_dtype)
91+
92+
a = xp.asarray([2, 1], dtype=dtype_a)
93+
b = xp.asarray([1, -1], dtype=dtype_b)
94+
dtype_1 = xp.result_type(a, b, 1.0)
95+
dtype_2 = xp.result_type(b, a, 1.0)
96+
assert dtype_1 == dtype_2
97+
finally:
98+
torch.set_default_dtype(prev_default)

0 commit comments

Comments
 (0)