Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion examples/asset-pricing_examples.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -412,7 +412,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.7"
"version": "3.12.9"
},
"pycharm": {
"stem_cell": {
Expand Down
2 changes: 1 addition & 1 deletion examples/asset-pricing_formulas.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.7"
"version": "3.12.9"
},
"pycharm": {
"stem_cell": {
Expand Down
2 changes: 1 addition & 1 deletion examples/iv_absorbing-regression.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.7"
"version": "3.12.9"
},
"pycharm": {
"stem_cell": {
Expand Down
2 changes: 1 addition & 1 deletion examples/iv_advanced-examples.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -570,7 +570,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.7"
"version": "3.12.9"
},
"nbsphinx": {
"allow_errors": true
Expand Down
2 changes: 1 addition & 1 deletion examples/iv_basic-examples.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -805,7 +805,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.7"
"version": "3.12.9"
},
"pycharm": {
"stem_cell": {
Expand Down
2 changes: 1 addition & 1 deletion examples/iv_using-formulas.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -191,7 +191,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.7"
"version": "3.12.9"
},
"pycharm": {
"stem_cell": {
Expand Down
2 changes: 1 addition & 1 deletion examples/panel_data-formats.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -230,7 +230,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.7"
"version": "3.12.9"
},
"pycharm": {
"stem_cell": {
Expand Down
2 changes: 1 addition & 1 deletion examples/panel_examples.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -429,7 +429,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.7"
"version": "3.12.9"
},
"pycharm": {
"stem_cell": {
Expand Down
2 changes: 1 addition & 1 deletion examples/panel_using-formulas.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -182,7 +182,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.7"
"version": "3.12.9"
},
"pycharm": {
"stem_cell": {
Expand Down
2 changes: 1 addition & 1 deletion examples/system_examples.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -779,7 +779,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.7"
"version": "3.12.9"
},
"pycharm": {
"stem_cell": {
Expand Down
2 changes: 1 addition & 1 deletion examples/system_formulas.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.7"
"version": "3.12.9"
},
"pycharm": {
"stem_cell": {
Expand Down
2 changes: 1 addition & 1 deletion examples/system_three-stage-ls.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -353,7 +353,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.7"
"version": "3.12.9"
},
"pycharm": {
"stem_cell": {
Expand Down
4 changes: 2 additions & 2 deletions linearmodels/asset_pricing/covariance.py
Original file line number Diff line number Diff line change
Expand Up @@ -192,7 +192,7 @@ def cov(self) -> linearmodels.typing.data.Float64Array:
out = ji @ s @ ji.T
else:
j = self.jacobian
out = inv(j.T @ inv(s) @ j)
out = inv(j.T @ inv(s) @ j).astype(float, copy=False)
out = (scale / 2) * (out + out.T)
return out

Expand Down Expand Up @@ -371,4 +371,4 @@ def w(
moments = moments - moments.mean(0)[None, :]
out = self._kernel_cov(moments)

return inv(out)
return inv(out).astype(float, copy=False)
6 changes: 3 additions & 3 deletions linearmodels/asset_pricing/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -442,7 +442,7 @@ def __init__(
self._sigma = np.asarray(sigma)
vals, vecs = np.linalg.eigh(sigma)
self._sigma_m12 = vecs @ np.diag(1.0 / np.sqrt(vals)) @ vecs.T
self._sigma_inv = np.linalg.inv(self._sigma)
self._sigma_inv = np.linalg.inv(self._sigma).astype(float, copy=False)

def __str__(self) -> str:
out = super().__str__()
Expand Down Expand Up @@ -966,10 +966,10 @@ def fit(
self.portfolios, self.factors, risk_free=self._risk_free
)
res = mod.fit()
betas = np.asarray(res.betas).ravel()
betas_1d = np.asarray(res.betas).ravel()
lam = np.asarray(res.risk_premia)
mu = self.factors.ndarray.mean(0)
sv = np.r_[betas, lam, mu][:, None]
sv = np.r_[betas_1d, lam, mu][:, None]
if starting is not None:
starting = np.asarray(starting)
if starting.ndim == 1:
Expand Down
6 changes: 3 additions & 3 deletions linearmodels/iv/__init__.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from .absorbing import AbsorbingLS, Interaction # flake8: noqa
from .model import IV2SLS, IVGMM, IVGMMCUE, IVLIML # flake8: noqa
from .results import compare # flake8: noqa
from .absorbing import AbsorbingLS, Interaction
from .model import IV2SLS, IVGMM, IVGMMCUE, IVLIML
from .results import compare

__all__ = [
"IV2SLS",
Expand Down
8 changes: 4 additions & 4 deletions linearmodels/iv/absorbing.py
Original file line number Diff line number Diff line change
Expand Up @@ -913,10 +913,10 @@ def _first_time_fit(
self._regressors_hash = areg.hash
self._constant_absorbed = self._has_constant_exog and areg_constant

dep = self._dependent.ndarray
dep = self._dependent.ndarray.astype(float, copy=False)
exog = cast(linearmodels.typing.data.Float64Array, self._exog.ndarray)

root_w = sqrt(self._weight_data.ndarray)
root_w = sqrt(self._weight_data.ndarray.astype(float, copy=False))
dep = root_w * dep
exog = root_w * exog
denom = root_w.T @ root_w
Expand All @@ -931,13 +931,13 @@ def _first_time_fit(

absorb_options["drop_singletons"] = False
algo = create(self._absorb_inter.cat, **absorb_options)
dep_exog = column_stack((dep, exog))
dep_exog = column_stack((dep, exog)).astype(float, copy=False)
resids = algo.residualize(dep_exog)
dep_resid = resids[:, :1]
exog_resid = resids[:, 1:]
else:
self._regressors = preconditioner(self._regressors)[0]
dep_exog = column_stack((dep, exog))
dep_exog = column_stack((dep, exog)).astype(float, copy=False)
resid = lsmr_annihilate(
self._regressors,
dep_exog,
Expand Down
4 changes: 2 additions & 2 deletions linearmodels/iv/covariance.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,8 +104,8 @@ def kernel_weight_quadratic_spectral(
w[0] = 0
return w

z = arange(n + 1) / float(bw)
w = 6 * pi * z / 5
z = arange(n + 1).astype(float) / float(bw)
w = cast(linearmodels.typing.data.FloatArray1D, 6 * pi * z / 5)
w[0] = 1
w[1:] = 3 / w[1:] ** 2 * (sin(w[1:]) / w[1:] - cos(w[1:]))

Expand Down
4 changes: 2 additions & 2 deletions linearmodels/iv/results.py
Original file line number Diff line number Diff line change
Expand Up @@ -710,9 +710,9 @@ def diagnostics(self) -> DataFrame:

endog, exog, instr, weights = self.endog, self.exog, self.instr, self.weights
w = sqrt(weights.ndarray)
z = w * instr.ndarray
z = w * instr.ndarray.astype(float, copy=False)
nz = z.shape[1]
x = w * exog.ndarray
x = w * exog.ndarray.astype(float, copy=False)
ez = annihilate(z, x)
individual_results = self.individual
out_df = DataFrame(
Expand Down
14 changes: 10 additions & 4 deletions linearmodels/panel/data.py
Original file line number Diff line number Diff line change
Expand Up @@ -195,9 +195,15 @@
if x.ndim == 2:
x = x.to_pandas()
else:
items: list[Hashable] = np.asarray(x.coords[x.dims[0]]).tolist()
major: list[Hashable] = np.asarray(x.coords[x.dims[1]]).tolist()
minor: list[Hashable] = np.asarray(x.coords[x.dims[2]]).tolist()
items: list[Hashable] = cast(
list[Hashable], np.asarray(x.coords[x.dims[0]]).tolist()
)
major: list[Hashable] = cast(
list[Hashable], np.asarray(x.coords[x.dims[1]]).tolist()
)
minor: list[Hashable] = cast(
list[Hashable], np.asarray(x.coords[x.dims[2]]).tolist()
)
values = x.values
x = panel_to_frame(values, items, major, minor, True)
except ImportError:
Expand Down Expand Up @@ -538,7 +544,7 @@
group: Literal["entity", "time", "both"],
weights: PanelData | None,
return_panel: Literal[False],
) -> linearmodels.typing.data.Float64Array: ... # noqa: E704
) -> linearmodels.typing.data.Float64Array: ...

Check notice

Code scanning / CodeQL

Statement has no effect Note

This statement has no effect.

Copilot Autofix

AI 11 months ago

To fix the problem, we need to remove the ellipsis (...) from the function overload definition. The ellipsis is not serving any purpose and its removal will not affect the existing functionality of the code. The function overload should be correctly defined without unnecessary placeholders.

Suggested changeset 1
linearmodels/panel/data.py

Autofix patch

Autofix patch
Run the following command in your local git repository to apply this patch
cat << 'EOF' | git apply
diff --git a/linearmodels/panel/data.py b/linearmodels/panel/data.py
--- a/linearmodels/panel/data.py
+++ b/linearmodels/panel/data.py
@@ -545,4 +545,4 @@
         weights: PanelData | None,
-        return_panel: Literal[False],
-    ) -> linearmodels.typing.data.Float64Array: ...
+        return_panel: Literal[False]
+    ) -> linearmodels.typing.data.Float64Array:
 
EOF
@@ -545,4 +545,4 @@
weights: PanelData | None,
return_panel: Literal[False],
) -> linearmodels.typing.data.Float64Array: ...
return_panel: Literal[False]
) -> linearmodels.typing.data.Float64Array:

Copilot is powered by AI and may make mistakes. Always verify output.

def demean(
self,
Expand Down
22 changes: 15 additions & 7 deletions linearmodels/panel/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,7 @@
from linearmodels.shared.linalg import has_constant
from linearmodels.shared.typed_getters import get_panel_data_like
from linearmodels.shared.utility import AttrDict, ensure_unique_column, panel_to_frame
from linearmodels.typing import BoolArray
import linearmodels.typing.data

CovarianceEstimator = Union[
Expand Down Expand Up @@ -319,7 +320,9 @@ def __init__(
self._is_weighted = True
self._name = self.__class__.__name__
self.weights = self._adapt_weights(weights)
self._not_null = np.ones(self.dependent.values2d.shape[0], dtype=bool)
self._not_null: BoolArray = np.ones(
self.dependent.values2d.shape[0], dtype=bool
)
self._cov_estimators = CovarianceManager(
self.__class__.__name__,
HomoskedasticCovariance,
Expand Down Expand Up @@ -466,10 +469,11 @@ def _validate_data(self) -> None:
)

all_missing = np.any(np.isnan(y), axis=1) & np.all(np.isnan(x), axis=1)
missing = (
missing = np.asarray(
np.any(np.isnan(y), axis=1)
| np.any(np.isnan(x), axis=1)
| np.any(np.isnan(w), axis=1)
| np.any(np.isnan(w), axis=1),
dtype=bool,
)

missing_warning(np.asarray(all_missing ^ missing), stacklevel=4)
Expand All @@ -479,7 +483,7 @@ def _validate_data(self) -> None:
self.weights.drop(missing)

x = cast(linearmodels.typing.data.Float64Array, self.exog.values2d)
self._not_null = np.asarray(~missing)
self._not_null = cast(BoolArray, np.asarray(~missing))

w_df = self.weights.dataframe
if np.any(np.asarray(w_df) <= 0):
Expand Down Expand Up @@ -778,7 +782,9 @@ def _setup_clusters(
if cluster_entity:
group_ids_arr = self.dependent.entity_ids.squeeze()
name = "cov.cluster.entity"
group_ids = Series(group_ids_arr, index=self.dependent.index, name=name)
group_ids: Series[int] = Series(
group_ids_arr, index=self.dependent.index, name=name
)
if clusters_frame is not None:
clusters_frame[name] = group_ids
else:
Expand Down Expand Up @@ -2021,7 +2027,7 @@ def fit(
linearmodels.typing.data.Float64Array, np.sqrt(self.weights.values2d)
)
y_ex = root_w * self.dependent.values2d
mu_ex = 0
mu_ex = np.array(0.0, dtype=float)
if (
self.has_constant
or self.entity_effects
Expand Down Expand Up @@ -2469,7 +2475,9 @@ def _setup_clusters(
if cluster_entity:
group_ids = self.dependent.entity_ids.squeeze()
name = "cov.cluster.entity"
group_ids_s = Series(group_ids, index=self.dependent.index, name=name)
group_ids_s: Series[int] = Series(
group_ids, index=self.dependent.index, name=name
)
if clusters_frame is not None:
clusters_frame[name] = group_ids_s
else:
Expand Down
6 changes: 5 additions & 1 deletion linearmodels/system/_utility.py
Original file line number Diff line number Diff line change
Expand Up @@ -308,7 +308,11 @@ def _compute_transform(self) -> None:
t, left = vecs[:, : k - c], vecs[:, k - c :]
q = self._qa[:, None]
a = q.T @ inv(left.T @ r.T) @ left.T
self._t, self._l, self._a = t, left, a
self._t, self._l, self._a = (
cast(linearmodels.typing.data.FloatArray2D, t),
cast(linearmodels.typing.data.FloatArray2D, left),
cast(linearmodels.typing.data.FloatArray2D, a),
)
self._computed = True

@property
Expand Down
8 changes: 5 additions & 3 deletions linearmodels/system/covariance.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ def _mvreg_cov(self) -> linearmodels.typing.data.Float64Array:
def _gls_cov(self) -> linearmodels.typing.data.Float64Array:
x = self._x
sigma = self._sigma
sigma_inv = inv(sigma)
sigma_inv = cast(linearmodels.typing.data.Float64Array, inv(sigma))

xpx = blocked_inner_prod(x, sigma_inv)
# Handles case where sigma_inv is not inverse of full_sigma
Expand Down Expand Up @@ -229,7 +229,7 @@ def __init__(
nobs = eps.shape[0]

if gls:
weights = inv(sigma)
weights = cast(linearmodels.typing.data.Float64Array, inv(sigma))
bigx = blocked_diag_product(x, weights)
e = eps.T.ravel()[:, None]
bigxe = bigx * e
Expand Down Expand Up @@ -258,7 +258,9 @@ def _cov(self, gls: bool) -> linearmodels.typing.data.Float64Array:
nobs = x[0].shape[0]
k = len(x)
sigma = self.sigma
weights = inv(sigma) if gls else eye(k)
weights = (
cast(linearmodels.typing.data.Float64Array, inv(sigma)) if gls else eye(k)
)
xpx = blocked_inner_prod(x, weights) / nobs
xeex = self._xeex()

Expand Down
6 changes: 3 additions & 3 deletions linearmodels/system/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ def _parameters_from_xprod(
params_c = solve(xpx, xpy)
params = cons.t @ params_c + cons.a.T
else:
params = solve(xpx, xpy)
params = solve(xpx, xpy).astype(float, copy=False)
return params


Expand Down Expand Up @@ -760,7 +760,7 @@ def _gls_estimate(

if not full_cov:
sigma = np.diag(np.diag(sigma))
sigma_inv = inv(sigma)
sigma_inv = cast(linearmodels.typing.data.Float64Array, inv(sigma))

k = len(wy)

Expand Down Expand Up @@ -1045,7 +1045,7 @@ def _system_r2(
est_sigma = sigma
if not full_cov:
est_sigma = np.diag(np.diag(est_sigma))
est_sigma_inv = inv(est_sigma)
est_sigma_inv = cast(linearmodels.typing.data.Float64Array, inv(est_sigma))
nobs = wy[0].shape[0]
k = len(wy)
xpx = blocked_inner_prod(wi, est_sigma_inv)
Expand Down
Loading
Loading