From fbe337317fe6230ac94cd42966b309e584bbe43a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lo=C3=AFc=20Diridollou?= Date: Fri, 14 Nov 2025 15:59:34 -0500 Subject: [PATCH 1/7] GH1317 Set defaults --- pandas-stubs/core/indexes/interval.pyi | 60 ++-- pandas-stubs/io/parsers/readers.pyi | 380 ++++++++++++------------- pandas-stubs/io/sql.pyi | 18 +- 3 files changed, 229 insertions(+), 229 deletions(-) diff --git a/pandas-stubs/core/indexes/interval.pyi b/pandas-stubs/core/indexes/interval.pyi index 193ceb08e..2c4381c62 100644 --- a/pandas-stubs/core/indexes/interval.pyi +++ b/pandas-stubs/core/indexes/interval.pyi @@ -295,57 +295,57 @@ class IntervalIndex(ExtensionIndex[IntervalT, np.object_], IntervalMixin): # int gets hit first and so the correct type is returned @overload def interval_range( # pyright: ignore[reportOverlappingOverload] - start: int | None = ..., - end: int | None = ..., - periods: int | None = ..., - freq: int | None = ..., - name: Hashable = ..., - closed: IntervalClosedType = ..., + start: int | None = None, + end: int | None = None, + periods: int | None = None, + freq: int | None = None, + name: Hashable = None, + closed: IntervalClosedType = "right", ) -> IntervalIndex[Interval[int]]: ... @overload def interval_range( - start: float | None = ..., - end: float | None = ..., - periods: int | None = ..., - freq: int | None = ..., - name: Hashable = ..., - closed: IntervalClosedType = ..., + start: float | None = None, + end: float | None = None, + periods: int | None = None, + freq: int | None = None, + name: Hashable = None, + closed: IntervalClosedType = "right", ) -> IntervalIndex[Interval[float]]: ... @overload def interval_range( start: _TimestampLike, - end: _TimestampLike | None = ..., - periods: int | None = ..., - freq: Frequency | dt.timedelta | None = ..., - name: Hashable = ..., - closed: IntervalClosedType = ..., + end: _TimestampLike | None = None, + periods: int | None = None, + freq: Frequency | dt.timedelta | None = None, + name: Hashable = None, + closed: IntervalClosedType = "right", ) -> IntervalIndex[Interval[pd.Timestamp]]: ... @overload def interval_range( *, start: None = None, end: _TimestampLike, - periods: int | None = ..., - freq: Frequency | dt.timedelta | None = ..., - name: Hashable = ..., - closed: IntervalClosedType = ..., + periods: int | None = None, + freq: Frequency | dt.timedelta | None = None, + name: Hashable = None, + closed: IntervalClosedType = "right", ) -> IntervalIndex[Interval[pd.Timestamp]]: ... @overload def interval_range( start: _TimedeltaLike, - end: _TimedeltaLike | None = ..., - periods: int | None = ..., - freq: Frequency | dt.timedelta | None = ..., - name: Hashable = ..., - closed: IntervalClosedType = ..., + end: _TimedeltaLike | None = None, + periods: int | None = None, + freq: Frequency | dt.timedelta | None = None, + name: Hashable = None, + closed: IntervalClosedType = "right", ) -> IntervalIndex[Interval[pd.Timedelta]]: ... @overload def interval_range( *, start: None = None, end: _TimedeltaLike, - periods: int | None = ..., - freq: Frequency | dt.timedelta | None = ..., - name: Hashable = ..., - closed: IntervalClosedType = ..., + periods: int | None = None, + freq: Frequency | dt.timedelta | None = None, + name: Hashable = None, + closed: IntervalClosedType = "right", ) -> IntervalIndex[Interval[pd.Timedelta]]: ... diff --git a/pandas-stubs/io/parsers/readers.pyi b/pandas-stubs/io/parsers/readers.pyi index 1584c76ab..94ecb529c 100644 --- a/pandas-stubs/io/parsers/readers.pyi +++ b/pandas-stubs/io/parsers/readers.pyi @@ -39,30 +39,30 @@ def read_csv( filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], *, sep: str | None = ..., - delimiter: str | None = ..., - header: int | Sequence[int] | Literal["infer"] | None = ..., + delimiter: str | None = None, + header: int | Sequence[int] | Literal["infer"] | None = "infer", names: ListLikeHashable | None = ..., - index_col: int | str | Sequence[str | int] | Literal[False] | None = ..., - usecols: UsecolsArgType[HashableT] = ..., - dtype: DtypeArg | defaultdict | None = ..., - engine: CSVEngine | None = ..., + index_col: int | str | Sequence[str | int] | Literal[False] | None = None, + usecols: UsecolsArgType[HashableT] = None, + dtype: DtypeArg | defaultdict | None = None, + engine: CSVEngine | None = None, converters: ( Mapping[int | str, Callable[[str], Any]] | Mapping[int, Callable[[str], Any]] | Mapping[str, Callable[[str], Any]] | None - ) = ..., - true_values: list[str] | None = ..., - false_values: list[str] | None = ..., - skipinitialspace: bool = ..., - skiprows: int | Sequence[int] | Callable[[int], bool] | None = ..., - skipfooter: int = ..., - nrows: int | None = ..., - na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = ..., - keep_default_na: bool = ..., - na_filter: bool = ..., + ) = None, + true_values: list[str] | None = None, + false_values: list[str] | None = None, + skipinitialspace: bool = False, + skiprows: int | Sequence[int] | Callable[[int], bool] | None = None, + skipfooter: int = 0, + nrows: int | None = None, + na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = None, + keep_default_na: bool = True, + na_filter: bool = True, verbose: bool = ..., - skip_blank_lines: bool = ..., + skip_blank_lines: bool = True, parse_dates: ( bool | list[int] @@ -70,33 +70,33 @@ def read_csv( | Sequence[Sequence[int]] | Mapping[str, Sequence[int | str]] | None - ) = ..., - keep_date_col: bool = ..., - date_format: dict[Hashable, str] | str | None = ..., - dayfirst: bool = ..., - cache_dates: bool = ..., + ) = None, + keep_date_col: bool = True, + date_format: dict[Hashable, str] | str | None = None, + dayfirst: bool = False, + cache_dates: bool = True, iterator: Literal[True], - chunksize: int | None = ..., - compression: CompressionOptions = ..., - thousands: str | None = ..., - decimal: str = ..., - lineterminator: str | None = ..., - quotechar: str = ..., - quoting: CSVQuoting = ..., - doublequote: bool = ..., - escapechar: str | None = ..., - comment: str | None = ..., - encoding: str | None = ..., - encoding_errors: str | None = ..., - dialect: str | csv.Dialect | None = ..., + chunksize: int | None = None, + compression: CompressionOptions = "infer", + thousands: str | None = None, + decimal: str = ".", + lineterminator: str | None = None, + quotechar: str = '"', + quoting: CSVQuoting = 0, + doublequote: bool = True, + escapechar: str | None = None, + comment: str | None = None, + encoding: str | None = None, + encoding_errors: str | None = "strict", + dialect: str | csv.Dialect | None = None, on_bad_lines: ( Literal["error", "warn", "skip"] | Callable[[list[str]], list[str] | None] - ) = ..., + ) = "error", delim_whitespace: bool = ..., low_memory: bool = ..., - memory_map: bool = ..., - float_precision: Literal["high", "legacy", "round_trip"] | None = ..., - storage_options: StorageOptions | None = ..., + memory_map: bool = False, + float_precision: Literal["high", "legacy", "round_trip"] | None = None, + storage_options: StorageOptions | None = None, dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ..., ) -> TextFileReader: ... @overload @@ -104,30 +104,30 @@ def read_csv( filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], *, sep: str | None = ..., - delimiter: str | None = ..., - header: int | Sequence[int] | Literal["infer"] | None = ..., + delimiter: str | None = None, + header: int | Sequence[int] | Literal["infer"] | None = "infer", names: ListLikeHashable | None = ..., - index_col: int | str | Sequence[str | int] | Literal[False] | None = ..., - usecols: UsecolsArgType[HashableT] = ..., - dtype: DtypeArg | defaultdict | None = ..., - engine: CSVEngine | None = ..., + index_col: int | str | Sequence[str | int] | Literal[False] | None = None, + usecols: UsecolsArgType[HashableT] = None, + dtype: DtypeArg | defaultdict | None = None, + engine: CSVEngine | None = None, converters: ( Mapping[int | str, Callable[[str], Any]] | Mapping[int, Callable[[str], Any]] | Mapping[str, Callable[[str], Any]] | None - ) = ..., - true_values: list[str] | None = ..., - false_values: list[str] | None = ..., - skipinitialspace: bool = ..., - skiprows: int | Sequence[int] | Callable[[int], bool] | None = ..., - skipfooter: int = ..., - nrows: int | None = ..., - na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = ..., - keep_default_na: bool = ..., - na_filter: bool = ..., + ) = None, + true_values: list[str] | None = None, + false_values: list[str] | None = None, + skipinitialspace: bool = False, + skiprows: int | Sequence[int] | Callable[[int], bool] | None = None, + skipfooter: int = 0, + nrows: int | None = None, + na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = None, + keep_default_na: bool = True, + na_filter: bool = True, verbose: bool = ..., - skip_blank_lines: bool = ..., + skip_blank_lines: bool = True, parse_dates: ( bool | list[int] @@ -135,33 +135,33 @@ def read_csv( | Sequence[Sequence[int]] | Mapping[str, Sequence[int | str]] | None - ) = ..., + ) = None, keep_date_col: bool = ..., - date_format: dict[Hashable, str] | str | None = ..., - dayfirst: bool = ..., - cache_dates: bool = ..., - iterator: bool = ..., + date_format: dict[Hashable, str] | str | None = None, + dayfirst: bool = False, + cache_dates: bool = True, + iterator: bool = False, chunksize: int, - compression: CompressionOptions = ..., - thousands: str | None = ..., - decimal: str = ..., - lineterminator: str | None = ..., - quotechar: str = ..., - quoting: CSVQuoting = ..., - doublequote: bool = ..., - escapechar: str | None = ..., - comment: str | None = ..., - encoding: str | None = ..., - encoding_errors: str | None = ..., - dialect: str | csv.Dialect | None = ..., + compression: CompressionOptions = "infer", + thousands: str | None = None, + decimal: str = ".", + lineterminator: str | None = None, + quotechar: str = '"', + quoting: CSVQuoting = 0, + doublequote: bool = True, + escapechar: str | None = None, + comment: str | None = None, + encoding: str | None = None, + encoding_errors: str | None = "strict", + dialect: str | csv.Dialect | None = None, on_bad_lines: ( Literal["error", "warn", "skip"] | Callable[[list[str]], list[str] | None] - ) = ..., + ) = "error", delim_whitespace: bool = ..., low_memory: bool = ..., - memory_map: bool = ..., - float_precision: Literal["high", "legacy", "round_trip"] | None = ..., - storage_options: StorageOptions | None = ..., + memory_map: bool = False, + float_precision: Literal["high", "legacy", "round_trip"] | None = None, + storage_options: StorageOptions | None = None, dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ..., ) -> TextFileReader: ... @overload @@ -234,30 +234,30 @@ def read_table( filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], *, sep: str | None = ..., - delimiter: str | None = ..., - header: int | Sequence[int] | Literal["infer"] | None = ..., + delimiter: str | None = None, + header: int | Sequence[int] | Literal["infer"] | None = "infer", names: ListLikeHashable | None = ..., - index_col: int | str | Sequence[str | int] | Literal[False] | None = ..., - usecols: UsecolsArgType[HashableT] = ..., - dtype: DtypeArg | defaultdict | None = ..., - engine: CSVEngine | None = ..., + index_col: int | str | Sequence[str | int] | Literal[False] | None = None, + usecols: UsecolsArgType[HashableT] = None, + dtype: DtypeArg | defaultdict | None = None, + engine: CSVEngine | None = None, converters: ( Mapping[int | str, Callable[[str], Any]] | Mapping[int, Callable[[str], Any]] | Mapping[str, Callable[[str], Any]] | None - ) = ..., - true_values: list[str] | None = ..., - false_values: list[str] | None = ..., - skipinitialspace: bool = ..., - skiprows: int | Sequence[int] | Callable[[int], bool] | None = ..., - skipfooter: int = ..., - nrows: int | None = ..., - na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = ..., - keep_default_na: bool = ..., - na_filter: bool = ..., + ) = None, + true_values: list[str] | None = None, + false_values: list[str] | None = None, + skipinitialspace: bool = False, + skiprows: int | Sequence[int] | Callable[[int], bool] | None = None, + skipfooter: int = 0, + nrows: int | None = None, + na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = None, + keep_default_na: bool = True, + na_filter: bool = True, verbose: bool = ..., - skip_blank_lines: bool = ..., + skip_blank_lines: bool = True, parse_dates: ( bool | list[int] @@ -265,64 +265,64 @@ def read_table( | Sequence[Sequence[int]] | Mapping[str, Sequence[int | str]] | None - ) = ..., + ) = False, infer_datetime_format: bool = ..., keep_date_col: bool = ..., - date_format: dict[Hashable, str] | str | None = ..., - dayfirst: bool = ..., - cache_dates: bool = ..., + date_format: dict[Hashable, str] | str | None = None, + dayfirst: bool = False, + cache_dates: bool = True, iterator: Literal[True], - chunksize: int | None = ..., - compression: CompressionOptions = ..., - thousands: str | None = ..., - decimal: str = ..., - lineterminator: str | None = ..., - quotechar: str = ..., - quoting: CSVQuoting = ..., - doublequote: bool = ..., - escapechar: str | None = ..., - comment: str | None = ..., - encoding: str | None = ..., - encoding_errors: str | None = ..., - dialect: str | csv.Dialect | None = ..., + chunksize: int | None = None, + compression: CompressionOptions = "infer", + thousands: str | None = None, + decimal: str = ".", + lineterminator: str | None = None, + quotechar: str = '"', + quoting: CSVQuoting = 0, + doublequote: bool = True, + escapechar: str | None = None, + comment: str | None = None, + encoding: str | None = None, + encoding_errors: str | None = "strict", + dialect: str | csv.Dialect | None = None, on_bad_lines: ( Literal["error", "warn", "skip"] | Callable[[list[str]], list[str] | None] - ) = ..., + ) = "error", delim_whitespace: bool = ..., low_memory: bool = ..., - memory_map: bool = ..., - float_precision: Literal["high", "legacy", "round_trip"] | None = ..., - storage_options: StorageOptions | None = ..., + memory_map: bool = False, + float_precision: Literal["high", "legacy", "round_trip"] | None = None, + storage_options: StorageOptions | None = None, ) -> TextFileReader: ... @overload def read_table( filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], *, sep: str | None = ..., - delimiter: str | None = ..., - header: int | Sequence[int] | Literal["infer"] | None = ..., + delimiter: str | None = None, + header: int | Sequence[int] | Literal["infer"] | None = "infer", names: ListLikeHashable | None = ..., - index_col: int | str | Sequence[str | int] | Literal[False] | None = ..., - usecols: UsecolsArgType[HashableT] = ..., - dtype: DtypeArg | defaultdict | None = ..., - engine: CSVEngine | None = ..., + index_col: int | str | Sequence[str | int] | Literal[False] | None = None, + usecols: UsecolsArgType[HashableT] = None, + dtype: DtypeArg | defaultdict | None = None, + engine: CSVEngine | None = None, converters: ( Mapping[int | str, Callable[[str], Any]] | Mapping[int, Callable[[str], Any]] | Mapping[str, Callable[[str], Any]] | None - ) = ..., - true_values: list[str] | None = ..., - false_values: list[str] | None = ..., - skipinitialspace: bool = ..., - skiprows: int | Sequence[int] | Callable[[int], bool] | None = ..., - skipfooter: int = ..., - nrows: int | None = ..., - na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = ..., - keep_default_na: bool = ..., - na_filter: bool = ..., + ) = None, + true_values: list[str] | None = None, + false_values: list[str] | None = None, + skipinitialspace: bool = False, + skiprows: int | Sequence[int] | Callable[[int], bool] | None = None, + skipfooter: int = 0, + nrows: int | None = None, + na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = None, + keep_default_na: bool = True, + na_filter: bool = True, verbose: bool = ..., - skip_blank_lines: bool = ..., + skip_blank_lines: bool = True, parse_dates: ( bool | list[int] @@ -330,64 +330,64 @@ def read_table( | Sequence[Sequence[int]] | Mapping[str, Sequence[int | str]] | None - ) = ..., + ) = False, infer_datetime_format: bool = ..., keep_date_col: bool = ..., - date_format: dict[Hashable, str] | str | None = ..., - dayfirst: bool = ..., - cache_dates: bool = ..., - iterator: bool = ..., + date_format: dict[Hashable, str] | str | None = None, + dayfirst: bool = False, + cache_dates: bool = True, + iterator: bool = False, chunksize: int, - compression: CompressionOptions = ..., - thousands: str | None = ..., - decimal: str = ..., - lineterminator: str | None = ..., - quotechar: str = ..., - quoting: CSVQuoting = ..., - doublequote: bool = ..., - escapechar: str | None = ..., - comment: str | None = ..., - encoding: str | None = ..., - encoding_errors: str | None = ..., - dialect: str | csv.Dialect | None = ..., + compression: CompressionOptions = "infer", + thousands: str | None = None, + decimal: str = ".", + lineterminator: str | None = None, + quotechar: str = '"', + quoting: CSVQuoting = 0, + doublequote: bool = True, + escapechar: str | None = None, + comment: str | None = None, + encoding: str | None = None, + encoding_errors: str | None = "strict", + dialect: str | csv.Dialect | None = None, on_bad_lines: ( Literal["error", "warn", "skip"] | Callable[[list[str]], list[str] | None] - ) = ..., + ) = "error", delim_whitespace: bool = ..., low_memory: bool = ..., - memory_map: bool = ..., - float_precision: Literal["high", "legacy", "round_trip"] | None = ..., - storage_options: StorageOptions | None = ..., + memory_map: bool = False, + float_precision: Literal["high", "legacy", "round_trip"] | None = None, + storage_options: StorageOptions | None = None, ) -> TextFileReader: ... @overload def read_table( filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], *, sep: str | None = ..., - delimiter: str | None = ..., - header: int | Sequence[int] | Literal["infer"] | None = ..., + delimiter: str | None = None, + header: int | Sequence[int] | Literal["infer"] | None = "infer", names: ListLikeHashable | None = ..., - index_col: int | str | Sequence[str | int] | Literal[False] | None = ..., - usecols: UsecolsArgType[HashableT] = ..., - dtype: DtypeArg | defaultdict | None = ..., - engine: CSVEngine | None = ..., + index_col: int | str | Sequence[str | int] | Literal[False] | None = None, + usecols: UsecolsArgType[HashableT] = None, + dtype: DtypeArg | defaultdict | None = None, + engine: CSVEngine | None = None, converters: ( Mapping[int | str, Callable[[str], Any]] | Mapping[int, Callable[[str], Any]] | Mapping[str, Callable[[str], Any]] | None - ) = ..., - true_values: list[str] | None = ..., - false_values: list[str] | None = ..., - skipinitialspace: bool = ..., - skiprows: int | Sequence[int] | Callable[[int], bool] | None = ..., - skipfooter: int = ..., - nrows: int | None = ..., - na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = ..., - keep_default_na: bool = ..., - na_filter: bool = ..., + ) = None, + true_values: list[str] | None = None, + false_values: list[str] | None = None, + skipinitialspace: bool = False, + skiprows: int | Sequence[int] | Callable[[int], bool] | None = None, + skipfooter: int = 0, + nrows: int | None = None, + na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = None, + keep_default_na: bool = True, + na_filter: bool = True, verbose: bool = ..., - skip_blank_lines: bool = ..., + skip_blank_lines: bool = True, parse_dates: ( bool | list[int] @@ -395,34 +395,34 @@ def read_table( | Sequence[Sequence[int]] | Mapping[str, Sequence[int | str]] | None - ) = ..., + ) = False, infer_datetime_format: bool = ..., keep_date_col: bool = ..., - date_format: dict[Hashable, str] | str | None = ..., - dayfirst: bool = ..., - cache_dates: bool = ..., + date_format: dict[Hashable, str] | str | None = None, + dayfirst: bool = False, + cache_dates: bool = True, iterator: Literal[False] = False, chunksize: None = None, - compression: CompressionOptions = ..., - thousands: str | None = ..., - decimal: str = ..., - lineterminator: str | None = ..., - quotechar: str = ..., - quoting: CSVQuoting = ..., - doublequote: bool = ..., - escapechar: str | None = ..., - comment: str | None = ..., - encoding: str | None = ..., - encoding_errors: str | None = ..., - dialect: str | csv.Dialect | None = ..., + compression: CompressionOptions = "infer", + thousands: str | None = None, + decimal: str = ".", + lineterminator: str | None = None, + quotechar: str = '"', + quoting: CSVQuoting = 0, + doublequote: bool = True, + escapechar: str | None = None, + comment: str | None = None, + encoding: str | None = None, + encoding_errors: str | None = "strict", + dialect: str | csv.Dialect | None = None, on_bad_lines: ( Literal["error", "warn", "skip"] | Callable[[list[str]], list[str] | None] - ) = ..., + ) = "error", delim_whitespace: bool = ..., low_memory: bool = ..., - memory_map: bool = ..., - float_precision: Literal["high", "legacy", "round_trip"] | None = ..., - storage_options: StorageOptions | None = ..., + memory_map: bool = False, + float_precision: Literal["high", "legacy", "round_trip"] | None = None, + storage_options: StorageOptions | None = None, ) -> DataFrame: ... @overload def read_fwf( diff --git a/pandas-stubs/io/sql.pyi b/pandas-stubs/io/sql.pyi index b1dfd7b63..428f3a503 100644 --- a/pandas-stubs/io/sql.pyi +++ b/pandas-stubs/io/sql.pyi @@ -105,8 +105,8 @@ def read_sql_query( def read_sql( sql: _SQLStatement, con: _SQLConnection, - index_col: str | list[str] | None = ..., - coerce_float: bool = ..., + index_col: str | list[str] | None = None, + coerce_float: bool = True, params: ( list[Scalar] | tuple[Scalar, ...] @@ -115,8 +115,8 @@ def read_sql( | Mapping[str, tuple[Scalar, ...]] | None ) = ..., - parse_dates: list[str] | dict[str, str] | dict[str, dict[str, Any]] | None = ..., - columns: list[str] | None = ..., + parse_dates: list[str] | dict[str, str] | dict[str, dict[str, Any]] | None = None, + columns: list[str] | None = None, *, chunksize: int, dtype: DtypeArg | None = ..., @@ -126,8 +126,8 @@ def read_sql( def read_sql( sql: _SQLStatement, con: _SQLConnection, - index_col: str | list[str] | None = ..., - coerce_float: bool = ..., + index_col: str | list[str] | None = None, + coerce_float: bool = True, params: ( list[Scalar] | tuple[Scalar, ...] @@ -136,10 +136,10 @@ def read_sql( | Mapping[str, tuple[Scalar, ...]] | None ) = ..., - parse_dates: list[str] | dict[str, str] | dict[str, dict[str, Any]] | None = ..., - columns: list[str] | None = ..., + parse_dates: list[str] | dict[str, str] | dict[str, dict[str, Any]] | None = None, + columns: list[str] | None = None, chunksize: None = None, - dtype: DtypeArg | None = ..., + dtype: DtypeArg | None = None, dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ..., ) -> DataFrame: ... From ec8c20ff3663e2fd9ed049c7ce7c7749a389ec1f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lo=C3=AFc=20Diridollou?= Date: Mon, 17 Nov 2025 20:37:36 -0500 Subject: [PATCH 2/7] GH1317 PR feedback --- pandas-stubs/core/indexes/interval.pyi | 22 ++++++++++++++++++++-- tests/indexes/test_indexes.py | 19 +++++++++++++++++++ 2 files changed, 39 insertions(+), 2 deletions(-) diff --git a/pandas-stubs/core/indexes/interval.pyi b/pandas-stubs/core/indexes/interval.pyi index 2c4381c62..0b4d18517 100644 --- a/pandas-stubs/core/indexes/interval.pyi +++ b/pandas-stubs/core/indexes/interval.pyi @@ -322,8 +322,8 @@ def interval_range( ) -> IntervalIndex[Interval[pd.Timestamp]]: ... @overload def interval_range( - *, start: None = None, + *, end: _TimestampLike, periods: int | None = None, freq: Frequency | dt.timedelta | None = None, @@ -331,6 +331,15 @@ def interval_range( closed: IntervalClosedType = "right", ) -> IntervalIndex[Interval[pd.Timestamp]]: ... @overload +def interval_range( + start: None, + end: _TimestampLike, + periods: int, + freq: Frequency | dt.timedelta, + name: Hashable = None, + closed: IntervalClosedType = "right", +) -> IntervalIndex[Interval[pd.Timestamp]]: ... +@overload def interval_range( start: _TimedeltaLike, end: _TimedeltaLike | None = None, @@ -341,8 +350,17 @@ def interval_range( ) -> IntervalIndex[Interval[pd.Timedelta]]: ... @overload def interval_range( - *, + start: None, + end: _TimedeltaLike, + periods: int, + freq: Frequency | dt.timedelta, + name: Hashable = None, + closed: IntervalClosedType = "right", +) -> IntervalIndex[Interval[pd.Timedelta]]: ... +@overload +def interval_range( start: None = None, + *, end: _TimedeltaLike, periods: int | None = None, freq: Frequency | dt.timedelta | None = None, diff --git a/tests/indexes/test_indexes.py b/tests/indexes/test_indexes.py index 8647f6dde..cbf18ebcd 100644 --- a/tests/indexes/test_indexes.py +++ b/tests/indexes/test_indexes.py @@ -458,6 +458,25 @@ def test_interval_range() -> None: pd.IntervalIndex, pd.Interval, ) + check( + assert_type( + pd.interval_range(None, pd.Timestamp("2020-01-01"), 2, "1D"), + "pd.IntervalIndex[pd.Interval[pd.Timestamp]]", + ), + pd.IntervalIndex, + pd.Interval, + ) + + check( + assert_type( + pd.interval_range( + None, end=pd.Timestamp("2020-01-01"), periods=2, freq="1D" + ), + "pd.IntervalIndex[pd.Interval[pd.Timestamp]]", + ), + pd.IntervalIndex, + pd.Interval, + ) def test_interval_index_breaks() -> None: From 64b7c5f5f114eba5c47980519fc4abdd7072102c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lo=C3=AFc=20Diridollou?= Date: Mon, 17 Nov 2025 20:57:32 -0500 Subject: [PATCH 3/7] GH1317 PR feedback --- pandas-stubs/io/parsers/readers.pyi | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/pandas-stubs/io/parsers/readers.pyi b/pandas-stubs/io/parsers/readers.pyi index 94ecb529c..504754ef7 100644 --- a/pandas-stubs/io/parsers/readers.pyi +++ b/pandas-stubs/io/parsers/readers.pyi @@ -61,7 +61,6 @@ def read_csv( na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = None, keep_default_na: bool = True, na_filter: bool = True, - verbose: bool = ..., skip_blank_lines: bool = True, parse_dates: ( bool @@ -92,8 +91,7 @@ def read_csv( on_bad_lines: ( Literal["error", "warn", "skip"] | Callable[[list[str]], list[str] | None] ) = "error", - delim_whitespace: bool = ..., - low_memory: bool = ..., + low_memory: bool = True, memory_map: bool = False, float_precision: Literal["high", "legacy", "round_trip"] | None = None, storage_options: StorageOptions | None = None, @@ -126,7 +124,6 @@ def read_csv( na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = None, keep_default_na: bool = True, na_filter: bool = True, - verbose: bool = ..., skip_blank_lines: bool = True, parse_dates: ( bool @@ -157,8 +154,7 @@ def read_csv( on_bad_lines: ( Literal["error", "warn", "skip"] | Callable[[list[str]], list[str] | None] ) = "error", - delim_whitespace: bool = ..., - low_memory: bool = ..., + low_memory: bool = True, memory_map: bool = False, float_precision: Literal["high", "legacy", "round_trip"] | None = None, storage_options: StorageOptions | None = None, @@ -191,7 +187,6 @@ def read_csv( na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = ..., keep_default_na: bool = ..., na_filter: bool = ..., - verbose: bool = ..., skip_blank_lines: bool = ..., parse_dates: ( bool @@ -222,8 +217,7 @@ def read_csv( on_bad_lines: ( Literal["error", "warn", "skip"] | Callable[[list[str]], list[str] | None] ) = ..., - delim_whitespace: bool = ..., - low_memory: bool = ..., + low_memory: bool = True, memory_map: bool = ..., float_precision: Literal["high", "legacy", "round_trip"] | None = ..., storage_options: StorageOptions | None = ..., @@ -293,6 +287,7 @@ def read_table( memory_map: bool = False, float_precision: Literal["high", "legacy", "round_trip"] | None = None, storage_options: StorageOptions | None = None, + dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ..., ) -> TextFileReader: ... @overload def read_table( @@ -358,6 +353,7 @@ def read_table( memory_map: bool = False, float_precision: Literal["high", "legacy", "round_trip"] | None = None, storage_options: StorageOptions | None = None, + dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ..., ) -> TextFileReader: ... @overload def read_table( @@ -423,6 +419,7 @@ def read_table( memory_map: bool = False, float_precision: Literal["high", "legacy", "round_trip"] | None = None, storage_options: StorageOptions | None = None, + dtype_backend: DtypeBackend | _NoDefaultDoNotUse = ..., ) -> DataFrame: ... @overload def read_fwf( From 70a66eddd800387e0909cedeaf6b6a7b7c49f665 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lo=C3=AFc=20Diridollou?= Date: Tue, 18 Nov 2025 08:01:45 -0500 Subject: [PATCH 4/7] GH1317 PR feedback --- pandas-stubs/io/parsers/readers.pyi | 25 ++++++++----------------- pandas-stubs/io/sql.pyi | 6 +++--- 2 files changed, 11 insertions(+), 20 deletions(-) diff --git a/pandas-stubs/io/parsers/readers.pyi b/pandas-stubs/io/parsers/readers.pyi index 504754ef7..17f2ced8e 100644 --- a/pandas-stubs/io/parsers/readers.pyi +++ b/pandas-stubs/io/parsers/readers.pyi @@ -133,7 +133,7 @@ def read_csv( | Mapping[str, Sequence[int | str]] | None ) = None, - keep_date_col: bool = ..., + keep_date_col: bool = False, date_format: dict[Hashable, str] | str | None = None, dayfirst: bool = False, cache_dates: bool = True, @@ -196,7 +196,7 @@ def read_csv( | Mapping[str, Sequence[int | str]] | None ) = ..., - keep_date_col: bool = ..., + keep_date_col: bool = False, date_format: dict[Hashable, str] | str | None = ..., dayfirst: bool = ..., cache_dates: bool = ..., @@ -250,7 +250,6 @@ def read_table( na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = None, keep_default_na: bool = True, na_filter: bool = True, - verbose: bool = ..., skip_blank_lines: bool = True, parse_dates: ( bool @@ -260,8 +259,7 @@ def read_table( | Mapping[str, Sequence[int | str]] | None ) = False, - infer_datetime_format: bool = ..., - keep_date_col: bool = ..., + keep_date_col: bool = False, date_format: dict[Hashable, str] | str | None = None, dayfirst: bool = False, cache_dates: bool = True, @@ -282,8 +280,7 @@ def read_table( on_bad_lines: ( Literal["error", "warn", "skip"] | Callable[[list[str]], list[str] | None] ) = "error", - delim_whitespace: bool = ..., - low_memory: bool = ..., + low_memory: bool = True, memory_map: bool = False, float_precision: Literal["high", "legacy", "round_trip"] | None = None, storage_options: StorageOptions | None = None, @@ -316,7 +313,6 @@ def read_table( na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = None, keep_default_na: bool = True, na_filter: bool = True, - verbose: bool = ..., skip_blank_lines: bool = True, parse_dates: ( bool @@ -326,8 +322,7 @@ def read_table( | Mapping[str, Sequence[int | str]] | None ) = False, - infer_datetime_format: bool = ..., - keep_date_col: bool = ..., + keep_date_col: bool = False, date_format: dict[Hashable, str] | str | None = None, dayfirst: bool = False, cache_dates: bool = True, @@ -348,8 +343,7 @@ def read_table( on_bad_lines: ( Literal["error", "warn", "skip"] | Callable[[list[str]], list[str] | None] ) = "error", - delim_whitespace: bool = ..., - low_memory: bool = ..., + low_memory: bool = True, memory_map: bool = False, float_precision: Literal["high", "legacy", "round_trip"] | None = None, storage_options: StorageOptions | None = None, @@ -382,7 +376,6 @@ def read_table( na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = None, keep_default_na: bool = True, na_filter: bool = True, - verbose: bool = ..., skip_blank_lines: bool = True, parse_dates: ( bool @@ -392,8 +385,7 @@ def read_table( | Mapping[str, Sequence[int | str]] | None ) = False, - infer_datetime_format: bool = ..., - keep_date_col: bool = ..., + keep_date_col: bool = False, date_format: dict[Hashable, str] | str | None = None, dayfirst: bool = False, cache_dates: bool = True, @@ -414,8 +406,7 @@ def read_table( on_bad_lines: ( Literal["error", "warn", "skip"] | Callable[[list[str]], list[str] | None] ) = "error", - delim_whitespace: bool = ..., - low_memory: bool = ..., + low_memory: bool = True, memory_map: bool = False, float_precision: Literal["high", "legacy", "round_trip"] | None = None, storage_options: StorageOptions | None = None, diff --git a/pandas-stubs/io/sql.pyi b/pandas-stubs/io/sql.pyi index 428f3a503..46fb6497f 100644 --- a/pandas-stubs/io/sql.pyi +++ b/pandas-stubs/io/sql.pyi @@ -3,6 +3,7 @@ from collections.abc import ( Generator, Iterable, Mapping, + Sequence, ) import sqlite3 from typing import ( @@ -129,13 +130,12 @@ def read_sql( index_col: str | list[str] | None = None, coerce_float: bool = True, params: ( - list[Scalar] - | tuple[Scalar, ...] + Sequence[Scalar] | tuple[tuple[Scalar, ...], ...] | Mapping[str, Scalar] | Mapping[str, tuple[Scalar, ...]] | None - ) = ..., + ) = None, parse_dates: list[str] | dict[str, str] | dict[str, dict[str, Any]] | None = None, columns: list[str] | None = None, chunksize: None = None, From 3a4a45a15308bccd77db318e4cf577e0f9b96480 Mon Sep 17 00:00:00 2001 From: Loic Diridollou Date: Tue, 18 Nov 2025 21:04:42 -0500 Subject: [PATCH 5/7] Update pandas-stubs/io/sql.pyi Co-authored-by: Yi-Fan Wang --- pandas-stubs/io/sql.pyi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pandas-stubs/io/sql.pyi b/pandas-stubs/io/sql.pyi index 46fb6497f..2e1a838c0 100644 --- a/pandas-stubs/io/sql.pyi +++ b/pandas-stubs/io/sql.pyi @@ -115,7 +115,7 @@ def read_sql( | Mapping[str, Scalar] | Mapping[str, tuple[Scalar, ...]] | None - ) = ..., + ) = None, parse_dates: list[str] | dict[str, str] | dict[str, dict[str, Any]] | None = None, columns: list[str] | None = None, *, From ab0da363e4087c7c5fe5d4d77b0c0170df2a6399 Mon Sep 17 00:00:00 2001 From: Loic Diridollou Date: Tue, 18 Nov 2025 21:04:54 -0500 Subject: [PATCH 6/7] Update pandas-stubs/io/sql.pyi Co-authored-by: Yi-Fan Wang --- pandas-stubs/io/sql.pyi | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pandas-stubs/io/sql.pyi b/pandas-stubs/io/sql.pyi index 2e1a838c0..41178d62f 100644 --- a/pandas-stubs/io/sql.pyi +++ b/pandas-stubs/io/sql.pyi @@ -109,8 +109,7 @@ def read_sql( index_col: str | list[str] | None = None, coerce_float: bool = True, params: ( - list[Scalar] - | tuple[Scalar, ...] + Sequence[Scalar] | tuple[tuple[Scalar, ...], ...] | Mapping[str, Scalar] | Mapping[str, tuple[Scalar, ...]] From 3f4b010df5ff17c9c153e6e7be69c984bcabf2f2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lo=C3=AFc=20Diridollou?= Date: Tue, 18 Nov 2025 21:09:48 -0500 Subject: [PATCH 7/7] GH1317 PR feedback --- pandas-stubs/io/sql.pyi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pandas-stubs/io/sql.pyi b/pandas-stubs/io/sql.pyi index 41178d62f..4d92623de 100644 --- a/pandas-stubs/io/sql.pyi +++ b/pandas-stubs/io/sql.pyi @@ -109,7 +109,7 @@ def read_sql( index_col: str | list[str] | None = None, coerce_float: bool = True, params: ( - Sequence[Scalar] + Sequence[Scalar] | tuple[tuple[Scalar, ...], ...] | Mapping[str, Scalar] | Mapping[str, tuple[Scalar, ...]]