Skip to content
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion pandas/_typing.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@
# numpy compatible types
NumpyValueArrayLike: TypeAlias = ScalarLike_co | npt.ArrayLike
# Name "npt._ArrayLikeInt_co" is not defined [name-defined]
NumpySorter: TypeAlias = npt._ArrayLikeInt_co | None # type: ignore[name-defined]
NumpySorter: TypeAlias = npt._ArrayLikeInt_co | None


P = ParamSpec("P")
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/algorithms.py
Original file line number Diff line number Diff line change
Expand Up @@ -220,7 +220,7 @@ def _reconstruct_data(
values = cls._from_sequence(values, dtype=dtype) # type: ignore[assignment]

else:
values = values.astype(dtype, copy=False)
values = values.astype(dtype, copy=False) # type: ignore[assignment]

return values
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think this can be fixed as follows. (and you can remove the comments in 218-219). Move the return values statement after line 220 where it says values = cls.from_sequence ... and after line 223 the values = values.astype... statement so that it is no longer at the end of the function.

If mypy still complains on either statement, you need to have a comment in there with the mypy error (as in lines 218-219)

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't know If I've done what you asked, but ended up having these errors:

pandas/core/algorithms.py:232: error: Incompatible return value type (got "ExtensionArray", expected "ndarray[tuple[Any, ...], dtype[Any]]") [return-value]
pandas/core/algorithms.py:234: error: Incompatible return value type (got "ndarray[tuple[Any, ...], dtype[Any]]", expected "ExtensionArray") [return-value]

Check if the changes are correct please


Expand Down
2 changes: 1 addition & 1 deletion pandas/core/array_algos/quantile.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ def quantile_with_mask(
interpolation=interpolation,
)

result = np.asarray(result) # type: ignore[assignment]
result = np.asarray(result)
result = result.T

return result
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/arrays/_mixins.py
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ def view(self, dtype: Dtype | None = None) -> ArrayLike:

td64_values = arr.view(dtype)
return TimedeltaArray._simple_new(td64_values, dtype=dtype)
return arr.view(dtype=dtype)
return arr.view(dtype=dtype) # type: ignore[arg-type]

def take(
self,
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/arrays/arrow/_arrow_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ def pyarrow_array_to_numpy_and_mask(
mask = pyarrow.BooleanArray.from_buffers(
pyarrow.bool_(), len(arr), [None, bitmask], offset=arr.offset
)
mask = np.asarray(mask) # type: ignore[assignment]
mask = np.asarray(mask)
else:
mask = np.ones(len(arr), dtype=bool)
return data, mask
4 changes: 2 additions & 2 deletions pandas/core/arrays/arrow/array.py
Original file line number Diff line number Diff line change
Expand Up @@ -657,7 +657,7 @@ def _box_pa_array(
):
arr_value = np.asarray(value, dtype=object)
# similar to isna(value) but exclude NaN, NaT, nat-like, nan-like
mask = is_pdna_or_none(arr_value) # type: ignore[assignment]
mask = is_pdna_or_none(arr_value)

try:
pa_array = pa.array(value, type=pa_type, mask=mask)
Expand Down Expand Up @@ -2738,7 +2738,7 @@ def _str_get_dummies(self, sep: str = "|", dtype: NpDtype | None = None):
dummies_dtype = np.bool_
dummies = np.zeros(n_rows * n_cols, dtype=dummies_dtype)
dummies[indices] = True
dummies = dummies.reshape((n_rows, n_cols)) # type: ignore[assignment]
dummies = dummies.reshape((n_rows, n_cols))
result = self._from_pyarrow_array(pa.array(list(dummies)))
return result, uniques_sorted.to_pylist()

Expand Down
2 changes: 1 addition & 1 deletion pandas/core/arrays/categorical.py
Original file line number Diff line number Diff line change
Expand Up @@ -1869,7 +1869,7 @@ def value_counts(self, dropna: bool = True) -> Series:
count = np.bincount(obs, minlength=ncat or 0)
else:
count = np.bincount(np.where(mask, code, ncat))
ix = np.append(ix, -1) # type: ignore[assignment]
ix = np.append(ix, -1)

ix = coerce_indexer_dtype(ix, self.dtype.categories)
ix_categorical = self._from_backing_data(ix)
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/arrays/datetimes.py
Original file line number Diff line number Diff line change
Expand Up @@ -804,7 +804,7 @@ def _add_offset(self, offset: BaseOffset) -> Self:
try:
res_values = offset._apply_array(values._ndarray)
if res_values.dtype.kind == "i":
res_values = res_values.view(values.dtype)
res_values = res_values.view(values.dtype) # type: ignore[arg-type]
except NotImplementedError:
if get_option("performance_warnings"):
warnings.warn(
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/arrays/string_.py
Original file line number Diff line number Diff line change
Expand Up @@ -764,7 +764,7 @@ def _cast_pointwise_result(self, values) -> ArrayLike:
result = super()._cast_pointwise_result(values)
if isinstance(result.dtype, StringDtype):
# Ensure we retain our same na_value/storage
result = result.astype(self.dtype) # type: ignore[call-overload]
result = result.astype(self.dtype)
return result

@classmethod
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/groupby/groupby.py
Original file line number Diff line number Diff line change
Expand Up @@ -1886,7 +1886,7 @@ def _apply_filter(self, indices, dropna):
mask.fill(False)
mask[indices.astype(int)] = True
# mask fails to broadcast when passed to where; broadcast manually.
mask = np.tile(mask, list(self._selected_obj.shape[1:]) + [1]).T # type: ignore[assignment]
mask = np.tile(mask, list(self._selected_obj.shape[1:]) + [1]).T
filtered = self._selected_obj.where(mask) # Fill with NaNs.
return filtered

Expand Down
8 changes: 4 additions & 4 deletions pandas/core/indexers/objects.py
Original file line number Diff line number Diff line change
Expand Up @@ -131,8 +131,8 @@ def get_window_bounds(
if closed in ["left", "neither"]:
end -= 1

end = np.clip(end, 0, num_values) # type: ignore[assignment]
start = np.clip(start, 0, num_values) # type: ignore[assignment]
end = np.clip(end, 0, num_values)
start = np.clip(start, 0, num_values)

return start, end

Expand Down Expand Up @@ -402,7 +402,7 @@ def get_window_bounds(
start = np.arange(0, num_values, step, dtype="int64")
end = start + self.window_size
if self.window_size:
end = np.clip(end, 0, num_values) # type: ignore[assignment]
end = np.clip(end, 0, num_values)

return start, end

Expand Down Expand Up @@ -488,7 +488,7 @@ def get_window_bounds(
)
window_indices_start += len(indices)
# Extend as we'll be slicing window like [start, end)
window_indices = np.append(window_indices, [window_indices[-1] + 1]).astype( # type: ignore[assignment]
window_indices = np.append(window_indices, [window_indices[-1] + 1]).astype(
np.int64, copy=False
)
start_arrays.append(window_indices.take(ensure_platform_int(start)))
Expand Down
5 changes: 1 addition & 4 deletions pandas/core/methods/describe.py
Original file line number Diff line number Diff line change
Expand Up @@ -353,14 +353,11 @@ def _refine_percentiles(
if percentiles is None:
return np.array([0.25, 0.5, 0.75])

# explicit conversion of `percentiles` to list
percentiles = list(percentiles)
percentiles = np.asarray(percentiles)

# get them all to be in [0, 1]
validate_percentile(percentiles)

percentiles = np.asarray(percentiles)

# sort and check for duplicates
unique_pcts = np.unique(percentiles)
assert percentiles is not None
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/nanops.py
Original file line number Diff line number Diff line change
Expand Up @@ -653,7 +653,7 @@ def _mask_datetimelike_result(
axis_mask = mask.any(axis=axis)
# error: Unsupported target for indexed assignment ("Union[ndarray[Any, Any],
# datetime64, timedelta64]")
result[axis_mask] = iNaT # type: ignore[index]
result[axis_mask] = iNaT
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You can remove the comment here since you removed the ignore

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

done

else:
if mask.any():
return np.int64(iNaT).view(orig_values.dtype)
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/reshape/encoding.py
Original file line number Diff line number Diff line change
Expand Up @@ -359,7 +359,7 @@ def get_empty_frame(data) -> DataFrame:

if drop_first:
# remove first GH12042
dummy_mat = dummy_mat[:, 1:] # type: ignore[assignment]
dummy_mat = dummy_mat[:, 1:]
dummy_cols = dummy_cols[1:]
return DataFrame(dummy_mat, index=index, columns=dummy_cols, dtype=_dtype)

Expand Down
4 changes: 2 additions & 2 deletions pandas/core/reshape/merge.py
Original file line number Diff line number Diff line change
Expand Up @@ -1746,7 +1746,7 @@ def _maybe_coerce_merge_keys(self) -> None:

mask = ~np.isnan(lk)
match = lk == casted
if not match[mask].all():
if not match[mask].all(): # type: ignore[union-attr]
warnings.warn(
"You are merging on int and float "
"columns where the float values "
Expand All @@ -1766,7 +1766,7 @@ def _maybe_coerce_merge_keys(self) -> None:

mask = ~np.isnan(rk)
match = rk == casted
if not match[mask].all():
if not match[mask].all(): # type: ignore[union-attr]
warnings.warn(
"You are merging on int and float "
"columns where the float values "
Expand Down
4 changes: 2 additions & 2 deletions pandas/core/util/hashing.py
Original file line number Diff line number Diff line change
Expand Up @@ -324,8 +324,8 @@ def _hash_ndarray(
)

codes, categories = factorize(vals, sort=False)
dtype = CategoricalDtype(categories=Index(categories), ordered=False)
cat = Categorical._simple_new(codes, dtype)
dtype = CategoricalDtype(categories=Index(categories), ordered=False) # type: ignore[assignment]
cat = Categorical._simple_new(codes, dtype) # type: ignore[arg-type]
return cat._hash_pandas_object(
encoding=encoding, hash_key=hash_key, categorize=False
)
Expand Down
6 changes: 3 additions & 3 deletions pandas/io/pytables.py
Original file line number Diff line number Diff line change
Expand Up @@ -3306,13 +3306,13 @@ def write_array(
self._handle.create_array(
self.group,
key,
value.asi8, # type: ignore[union-attr]
value.asi8, # type: ignore[attr-defined]
)

node = getattr(self.group, key)
# error: Item "ExtensionArray" of "Union[Any, ExtensionArray]" has no
# attribute "tz"
node._v_attrs.tz = _get_tz(value.tz) # type: ignore[union-attr]
node._v_attrs.tz = _get_tz(value.tz) # type: ignore[attr-defined]
Comment on lines 3308 to 3313
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

add mypy error in comments (both places)

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

done

node._v_attrs.value_type = f"datetime64[{value.dtype.unit}]"
elif lib.is_np_dtype(value.dtype, "m"):
self._handle.create_array(self.group, key, value.view("i8"))
Expand Down Expand Up @@ -5196,7 +5196,7 @@ def _maybe_convert_for_string_atom(
):
if isinstance(bvalues.dtype, StringDtype):
# "ndarray[Any, Any]" has no attribute "to_numpy"
bvalues = bvalues.to_numpy() # type: ignore[union-attr]
bvalues = bvalues.to_numpy()
if bvalues.dtype != object:
return bvalues

Expand Down
Loading