Skip to content

Commit 83aa3b2

Browse files
committed
🏷️ stub lib._format_impl private module
1 parent b3cfddf commit 83aa3b2

File tree

2 files changed

+190
-166
lines changed

2 files changed

+190
-166
lines changed

src/numpy-stubs/lib/_format_impl.pyi

Lines changed: 168 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,168 @@
1+
import io
2+
import os
3+
from _typeshed import SupportsRead, SupportsWrite
4+
from collections.abc import Mapping, Sequence
5+
from typing import Any, Final, Literal as L, TypeAlias, TypeGuard, TypedDict, overload, type_check_only
6+
from typing_extensions import TypeVar
7+
8+
import _numtype as _nt
9+
import numpy as np
10+
from numpy import _AnyShapeT, _DTypeDescr # noqa: ICN003
11+
from numpy._typing import DTypeLike, _DTypeLike
12+
13+
from ._utils_impl import drop_metadata as drop_metadata
14+
15+
__all__: list[str] = []
16+
17+
###
18+
19+
_ScalarT = TypeVar("_ScalarT", bound=np.generic)
20+
21+
_ToDescr: TypeAlias = str | Sequence[tuple[str, str] | tuple[str, str, tuple[int, ...]]]
22+
_HeaderVersion: TypeAlias = tuple[L[1, 2, 3], L[0]]
23+
_MemmapMode: TypeAlias = L["r", "c", "r+", "w+"]
24+
_ArrayHeader: TypeAlias = tuple[tuple[int, ...], bool, np.dtype]
25+
26+
@type_check_only
27+
class _HeaderDict_1_0(TypedDict):
28+
shape: _nt.Shape
29+
fortran_order: bool
30+
descr: _DTypeDescr
31+
32+
###
33+
34+
EXPECTED_KEYS: Final[set[str]] = ...
35+
MAGIC_PREFIX: Final = b"\x93NUMPY"
36+
MAGIC_LEN: Final = 16
37+
ARRAY_ALIGN: Final = 64
38+
BUFFER_SIZE: Final = 262_144 # 2**18
39+
GROWTH_AXIS_MAX_DIGITS: Final = 21
40+
_MAX_HEADER_SIZE: Final = 10_000
41+
42+
#
43+
def _check_version(version: _HeaderVersion | None) -> None: ...
44+
def _filter_header(s: str) -> str: ...
45+
def _wrap_header(header: str, version: _HeaderVersion) -> bytes: ...
46+
def _wrap_header_guess_version(header: str) -> bytes: ...
47+
def _read_bytes(fp: SupportsRead[bytes], size: int, error_template: str = "ran out of data") -> bytes: ...
48+
49+
# NOTE: Don't use `TypeIs` here: It might still be of this IO type if `False` is returned
50+
def isfileobj(f: object) -> TypeGuard[io.FileIO | io.BufferedReader | io.BufferedWriter]: ...
51+
52+
#
53+
def magic(major: int, minor: int) -> bytes: ...
54+
def read_magic(fp: SupportsRead[bytes]) -> tuple[int, int]: ...
55+
56+
#
57+
def dtype_to_descr(dtype: np.dtype) -> _DTypeDescr | str: ...
58+
def descr_to_dtype(descr: _ToDescr) -> np.dtype: ...
59+
60+
#
61+
@overload # known dtype, known shape (positional)
62+
def open_memmap(
63+
filename: str | os.PathLike[str],
64+
mode: _MemmapMode,
65+
dtype: _DTypeLike[_ScalarT],
66+
shape: _AnyShapeT,
67+
fortran_order: bool = False,
68+
version: _HeaderVersion | None = None,
69+
*,
70+
max_header_size: int = 10_000,
71+
) -> np.memmap[_AnyShapeT, np.dtype[_ScalarT]]: ...
72+
@overload # known dtype, known shape (keyword)
73+
def open_memmap(
74+
filename: str | os.PathLike[str],
75+
mode: _MemmapMode = "r+",
76+
*,
77+
dtype: _DTypeLike[_ScalarT],
78+
shape: _AnyShapeT,
79+
fortran_order: bool = False,
80+
version: _HeaderVersion | None = None,
81+
max_header_size: int = 10_000,
82+
) -> np.memmap[_AnyShapeT, np.dtype[_ScalarT]]: ...
83+
@overload # unknown dtype, known shape (positional)
84+
def open_memmap(
85+
filename: str | os.PathLike[str],
86+
mode: _MemmapMode,
87+
dtype: DTypeLike | None,
88+
shape: _AnyShapeT,
89+
fortran_order: bool = False,
90+
version: _HeaderVersion | None = None,
91+
*,
92+
max_header_size: int = 10_000,
93+
) -> np.memmap[_AnyShapeT, np.dtype]: ...
94+
@overload # unknown dtype, known shape (keyword)
95+
def open_memmap(
96+
filename: str | os.PathLike[str],
97+
mode: _MemmapMode = "r+",
98+
dtype: DTypeLike | None = None,
99+
*,
100+
shape: _AnyShapeT,
101+
fortran_order: bool = False,
102+
version: _HeaderVersion | None = None,
103+
max_header_size: int = 10_000,
104+
) -> np.memmap[_AnyShapeT, np.dtype]: ...
105+
@overload # known dtype, unknown shape (positional)
106+
def open_memmap(
107+
filename: str | os.PathLike[str],
108+
mode: _MemmapMode,
109+
dtype: _DTypeLike[_ScalarT],
110+
shape: tuple[int, ...] | None = None,
111+
fortran_order: bool = False,
112+
version: _HeaderVersion | None = None,
113+
*,
114+
max_header_size: int = 10_000,
115+
) -> np.memmap[Any, np.dtype[_ScalarT]]: ...
116+
@overload # known dtype, unknown shape (keyword)
117+
def open_memmap(
118+
filename: str | os.PathLike[str],
119+
mode: _MemmapMode = "r+",
120+
*,
121+
dtype: _DTypeLike[_ScalarT],
122+
shape: tuple[int, ...] | None = None,
123+
fortran_order: bool = False,
124+
version: _HeaderVersion | None = None,
125+
max_header_size: int = 10_000,
126+
) -> np.memmap[Any, np.dtype[_ScalarT]]: ...
127+
@overload # unknown dtype, unknown shape
128+
def open_memmap(
129+
filename: str | os.PathLike[str],
130+
mode: _MemmapMode = "r+",
131+
dtype: DTypeLike | None = None,
132+
shape: tuple[int, ...] | None = None,
133+
fortran_order: bool = False,
134+
version: _HeaderVersion | None = None,
135+
*,
136+
max_header_size: int = 10_000,
137+
) -> np.memmap[Any, np.dtype]: ...
138+
139+
#
140+
def header_data_from_array_1_0(array: np.ndarray[Any, Any]) -> _HeaderDict_1_0: ...
141+
142+
#
143+
def _read_array_header(
144+
fp: SupportsRead[bytes], version: _HeaderVersion, max_header_size: int = 10_000
145+
) -> _ArrayHeader: ...
146+
def read_array_header_1_0(fp: SupportsRead[bytes], max_header_size: int = 10_000) -> _ArrayHeader: ...
147+
def read_array_header_2_0(fp: SupportsRead[bytes], max_header_size: int = 10_000) -> _ArrayHeader: ...
148+
def read_array(
149+
fp: SupportsRead[bytes],
150+
allow_pickle: bool = False,
151+
pickle_kwargs: Mapping[str, object] | None = None,
152+
*,
153+
max_header_size: int = 10_000,
154+
) -> np.ndarray[Any, np.dtype]: ...
155+
156+
#
157+
def _write_array_header(
158+
fp: SupportsWrite[str], d: Mapping[str, str], version: _HeaderVersion | None = None
159+
) -> None: ...
160+
def write_array_header_1_0(fp: SupportsWrite[str], d: Mapping[str, str]) -> None: ...
161+
def write_array_header_2_0(fp: SupportsWrite[str], d: Mapping[str, str]) -> None: ...
162+
def write_array(
163+
fp: SupportsWrite[str],
164+
array: np.ndarray[Any, Any],
165+
version: _HeaderVersion | None = None,
166+
allow_pickle: bool = True,
167+
pickle_kwargs: Mapping[str, object] | None = None,
168+
) -> None: ...

src/numpy-stubs/lib/format.pyi

Lines changed: 22 additions & 166 deletions
Original file line numberDiff line numberDiff line change
@@ -1,168 +1,24 @@
1-
import io
2-
import os
3-
from _typeshed import SupportsRead, SupportsWrite
4-
from collections.abc import Mapping, Sequence
5-
from typing import Any, Final, Literal as L, TypeAlias, TypeGuard, TypedDict, overload, type_check_only
6-
from typing_extensions import TypeVar
7-
8-
import _numtype as _nt
9-
import numpy as np
10-
from numpy import _AnyShapeT, _DTypeDescr # noqa: ICN003
11-
from numpy._typing import DTypeLike, _DTypeLike
12-
13-
from ._utils_impl import drop_metadata as drop_metadata
1+
from ._format_impl import (
2+
ARRAY_ALIGN as ARRAY_ALIGN,
3+
BUFFER_SIZE as BUFFER_SIZE,
4+
EXPECTED_KEYS as EXPECTED_KEYS,
5+
GROWTH_AXIS_MAX_DIGITS as GROWTH_AXIS_MAX_DIGITS,
6+
MAGIC_LEN as MAGIC_LEN,
7+
MAGIC_PREFIX as MAGIC_PREFIX,
8+
descr_to_dtype as descr_to_dtype,
9+
drop_metadata as drop_metadata,
10+
dtype_to_descr as dtype_to_descr,
11+
header_data_from_array_1_0 as header_data_from_array_1_0,
12+
isfileobj as isfileobj,
13+
magic as magic,
14+
open_memmap as open_memmap,
15+
read_array as read_array,
16+
read_array_header_1_0 as read_array_header_1_0,
17+
read_array_header_2_0 as read_array_header_2_0,
18+
read_magic as read_magic,
19+
write_array as write_array,
20+
write_array_header_1_0 as write_array_header_1_0,
21+
write_array_header_2_0 as write_array_header_2_0,
22+
)
1423

1524
__all__: list[str] = []
16-
17-
###
18-
19-
_ScalarT = TypeVar("_ScalarT", bound=np.generic)
20-
21-
_ToDescr: TypeAlias = str | Sequence[tuple[str, str] | tuple[str, str, tuple[int, ...]]]
22-
_HeaderVersion: TypeAlias = tuple[L[1, 2, 3], L[0]]
23-
_MemmapMode: TypeAlias = L["r", "c", "r+", "w+"]
24-
_ArrayHeader: TypeAlias = tuple[tuple[int, ...], bool, np.dtype]
25-
26-
@type_check_only
27-
class _HeaderDict_1_0(TypedDict):
28-
shape: _nt.Shape
29-
fortran_order: bool
30-
descr: _DTypeDescr
31-
32-
###
33-
34-
EXPECTED_KEYS: Final[set[str]] = ...
35-
MAGIC_PREFIX: Final = b"\x93NUMPY"
36-
MAGIC_LEN: Final = 16
37-
ARRAY_ALIGN: Final = 64
38-
BUFFER_SIZE: Final = 262_144 # 2**18
39-
GROWTH_AXIS_MAX_DIGITS: Final = 21
40-
_MAX_HEADER_SIZE: Final = 10_000
41-
42-
#
43-
def _check_version(version: _HeaderVersion | None) -> None: ...
44-
def _filter_header(s: str) -> str: ...
45-
def _wrap_header(header: str, version: _HeaderVersion) -> bytes: ...
46-
def _wrap_header_guess_version(header: str) -> bytes: ...
47-
def _read_bytes(fp: SupportsRead[bytes], size: int, error_template: str = "ran out of data") -> bytes: ...
48-
49-
# NOTE: Don't use `TypeIs` here: It might still be of this IO type if `False` is returned
50-
def isfileobj(f: object) -> TypeGuard[io.FileIO | io.BufferedReader | io.BufferedWriter]: ...
51-
52-
#
53-
def magic(major: int, minor: int) -> bytes: ...
54-
def read_magic(fp: SupportsRead[bytes]) -> tuple[int, int]: ...
55-
56-
#
57-
def dtype_to_descr(dtype: np.dtype) -> _DTypeDescr | str: ...
58-
def descr_to_dtype(descr: _ToDescr) -> np.dtype: ...
59-
60-
#
61-
@overload # known dtype, known shape (positional)
62-
def open_memmap(
63-
filename: str | os.PathLike[str],
64-
mode: _MemmapMode,
65-
dtype: _DTypeLike[_ScalarT],
66-
shape: _AnyShapeT,
67-
fortran_order: bool = False,
68-
version: _HeaderVersion | None = None,
69-
*,
70-
max_header_size: int = 10_000,
71-
) -> np.memmap[_AnyShapeT, np.dtype[_ScalarT]]: ...
72-
@overload # known dtype, known shape (keyword)
73-
def open_memmap(
74-
filename: str | os.PathLike[str],
75-
mode: _MemmapMode = "r+",
76-
*,
77-
dtype: _DTypeLike[_ScalarT],
78-
shape: _AnyShapeT,
79-
fortran_order: bool = False,
80-
version: _HeaderVersion | None = None,
81-
max_header_size: int = 10_000,
82-
) -> np.memmap[_AnyShapeT, np.dtype[_ScalarT]]: ...
83-
@overload # unknown dtype, known shape (positional)
84-
def open_memmap(
85-
filename: str | os.PathLike[str],
86-
mode: _MemmapMode,
87-
dtype: DTypeLike | None,
88-
shape: _AnyShapeT,
89-
fortran_order: bool = False,
90-
version: _HeaderVersion | None = None,
91-
*,
92-
max_header_size: int = 10_000,
93-
) -> np.memmap[_AnyShapeT, np.dtype]: ...
94-
@overload # unknown dtype, known shape (keyword)
95-
def open_memmap(
96-
filename: str | os.PathLike[str],
97-
mode: _MemmapMode = "r+",
98-
dtype: DTypeLike | None = None,
99-
*,
100-
shape: _AnyShapeT,
101-
fortran_order: bool = False,
102-
version: _HeaderVersion | None = None,
103-
max_header_size: int = 10_000,
104-
) -> np.memmap[_AnyShapeT, np.dtype]: ...
105-
@overload # known dtype, unknown shape (positional)
106-
def open_memmap(
107-
filename: str | os.PathLike[str],
108-
mode: _MemmapMode,
109-
dtype: _DTypeLike[_ScalarT],
110-
shape: tuple[int, ...] | None = None,
111-
fortran_order: bool = False,
112-
version: _HeaderVersion | None = None,
113-
*,
114-
max_header_size: int = 10_000,
115-
) -> np.memmap[Any, np.dtype[_ScalarT]]: ...
116-
@overload # known dtype, unknown shape (keyword)
117-
def open_memmap(
118-
filename: str | os.PathLike[str],
119-
mode: _MemmapMode = "r+",
120-
*,
121-
dtype: _DTypeLike[_ScalarT],
122-
shape: tuple[int, ...] | None = None,
123-
fortran_order: bool = False,
124-
version: _HeaderVersion | None = None,
125-
max_header_size: int = 10_000,
126-
) -> np.memmap[Any, np.dtype[_ScalarT]]: ...
127-
@overload # unknown dtype, unknown shape
128-
def open_memmap(
129-
filename: str | os.PathLike[str],
130-
mode: _MemmapMode = "r+",
131-
dtype: DTypeLike | None = None,
132-
shape: tuple[int, ...] | None = None,
133-
fortran_order: bool = False,
134-
version: _HeaderVersion | None = None,
135-
*,
136-
max_header_size: int = 10_000,
137-
) -> np.memmap[Any, np.dtype]: ...
138-
139-
#
140-
def header_data_from_array_1_0(array: np.ndarray[Any, Any]) -> _HeaderDict_1_0: ...
141-
142-
#
143-
def _read_array_header(
144-
fp: SupportsRead[bytes], version: _HeaderVersion, max_header_size: int = 10_000
145-
) -> _ArrayHeader: ...
146-
def read_array_header_1_0(fp: SupportsRead[bytes], max_header_size: int = 10_000) -> _ArrayHeader: ...
147-
def read_array_header_2_0(fp: SupportsRead[bytes], max_header_size: int = 10_000) -> _ArrayHeader: ...
148-
def read_array(
149-
fp: SupportsRead[bytes],
150-
allow_pickle: bool = False,
151-
pickle_kwargs: Mapping[str, object] | None = None,
152-
*,
153-
max_header_size: int = 10_000,
154-
) -> np.ndarray[Any, np.dtype]: ...
155-
156-
#
157-
def _write_array_header(
158-
fp: SupportsWrite[str], d: Mapping[str, str], version: _HeaderVersion | None = None
159-
) -> None: ...
160-
def write_array_header_1_0(fp: SupportsWrite[str], d: Mapping[str, str]) -> None: ...
161-
def write_array_header_2_0(fp: SupportsWrite[str], d: Mapping[str, str]) -> None: ...
162-
def write_array(
163-
fp: SupportsWrite[str],
164-
array: np.ndarray[Any, Any],
165-
version: _HeaderVersion | None = None,
166-
allow_pickle: bool = True,
167-
pickle_kwargs: Mapping[str, object] | None = None,
168-
) -> None: ...

0 commit comments

Comments
 (0)