From b10a928b2dc2f23d73afa01cf14dbbb0a18f06b4 Mon Sep 17 00:00:00 2001 From: Mathieu Scheltienne Date: Mon, 12 Jan 2026 11:28:13 +0100 Subject: [PATCH 01/10] fix deprecation of setting shape on a numpy array --- mne/_fiff/_digitization.py | 2 +- mne/_fiff/tag.py | 2 +- mne/beamformer/_rap_music.py | 6 +++--- mne/beamformer/tests/test_dics.py | 2 +- mne/beamformer/tests/test_external.py | 2 +- mne/beamformer/tests/test_lcmv.py | 2 +- mne/channels/montage.py | 4 ++-- mne/chpi.py | 4 ++-- mne/decoding/receptive_field.py | 2 +- mne/decoding/tests/test_receptive_field.py | 14 +++++++------- mne/decoding/transformer.py | 2 +- mne/epochs.py | 2 +- mne/event.py | 2 +- mne/filter.py | 10 +++++----- mne/forward/_compute_forward.py | 2 +- mne/forward/_lead_dots.py | 6 +++--- mne/forward/forward.py | 4 ++-- mne/forward/tests/test_field_interpolation.py | 2 +- mne/inverse_sparse/_gamma_map.py | 2 +- mne/inverse_sparse/mxne_inverse.py | 2 +- mne/io/bti/bti.py | 2 +- mne/io/ctf/ctf.py | 2 +- mne/io/eeglab/eeglab.py | 2 +- mne/io/fiff/raw.py | 2 +- mne/io/kit/kit.py | 2 +- mne/io/nirx/nirx.py | 2 +- mne/minimum_norm/inverse.py | 6 +++--- mne/minimum_norm/tests/test_inverse.py | 2 +- mne/morph.py | 2 +- mne/preprocessing/maxwell.py | 2 +- mne/report/tests/test_report.py | 2 +- mne/source_estimate.py | 2 +- mne/source_space/_source_space.py | 2 +- mne/stats/cluster_level.py | 10 +++++----- mne/tests/test_transforms.py | 2 +- mne/time_frequency/multitaper.py | 2 +- mne/time_frequency/psd.py | 2 +- mne/viz/_brain/_brain.py | 4 ++-- mne/viz/_brain/tests/test_brain.py | 2 +- mne/viz/backends/_utils.py | 2 +- mne/viz/tests/test_3d.py | 8 ++++---- 41 files changed, 68 insertions(+), 68 deletions(-) diff --git a/mne/_fiff/_digitization.py b/mne/_fiff/_digitization.py index c198d606dee..b3dfc0997f4 100644 --- a/mne/_fiff/_digitization.py +++ b/mne/_fiff/_digitization.py @@ -335,7 +335,7 @@ def _get_data_as_dict_from_dig(dig, exclude_ref_channel=True): f"Only single coordinate frame in dig is supported, got {dig_coord_frames}" ) dig_ch_pos_location = np.array(dig_ch_pos_location) - dig_ch_pos_location.shape = (-1, 3) # empty will be (0, 3) + dig_ch_pos_location = dig_ch_pos_location.reshape(-1, 3) # empty will be (0, 3) return Bunch( nasion=fids.get("nasion", None), lpa=fids.get("lpa", None), diff --git a/mne/_fiff/tag.py b/mne/_fiff/tag.py index 96fee81da20..f669e72abeb 100644 --- a/mne/_fiff/tag.py +++ b/mne/_fiff/tag.py @@ -177,7 +177,7 @@ def _read_matrix(fid, tag, shape, rlims): data = data.view(">c8") elif matrix_type == FIFF.FIFFT_COMPLEX_DOUBLE: data = data.view(">c16") - data.shape = dims + data = data.reshape(dims) else: # Find dimensions and return to the beginning of tag data ndim = int(np.frombuffer(fid.read(4), dtype=">i4").item()) diff --git a/mne/beamformer/_rap_music.py b/mne/beamformer/_rap_music.py index 2b8c1a4ee84..839a6b83338 100644 --- a/mne/beamformer/_rap_music.py +++ b/mne/beamformer/_rap_music.py @@ -68,9 +68,9 @@ def _apply_rap_music( phi_sig = eig_vectors[:, -n_dipoles:] n_orient = 3 if is_free_ori else 1 - G.shape = (G.shape[0], -1, n_orient) + G = G.reshape(G.shape[0], -1, n_orient) gain = forward["sol"]["data"].copy() - gain.shape = G.shape + gain = gain.reshape(G.shape) n_channels = G.shape[0] A = np.empty((n_channels, n_dipoles)) gain_dip = np.empty((n_channels, n_dipoles)) @@ -122,7 +122,7 @@ def _apply_rap_music( sol = linalg.lstsq(A, M)[0] if n_orient == 3: X = sol[:, np.newaxis] * oris[:, :, np.newaxis] - X.shape = (-1, len(times)) + X = X.reshape(-1, len(times)) else: X = sol diff --git a/mne/beamformer/tests/test_dics.py b/mne/beamformer/tests/test_dics.py index cebc0bb4057..c44883ad436 100644 --- a/mne/beamformer/tests/test_dics.py +++ b/mne/beamformer/tests/test_dics.py @@ -269,7 +269,7 @@ def test_make_dics(tmp_path, _load_forward, idx, whiten): exp=None, noise_cov=noise_cov, ) - G.shape = (n_channels, n_verts, n_orient) + G = G.reshape(n_channels, n_verts, n_orient) G = G.transpose(1, 2, 0).conj() # verts, orient, ch _assert_weight_norm(filters, G) diff --git a/mne/beamformer/tests/test_external.py b/mne/beamformer/tests/test_external.py index e4373693496..d02661d628e 100644 --- a/mne/beamformer/tests/test_external.py +++ b/mne/beamformer/tests/test_external.py @@ -98,7 +98,7 @@ def test_lcmv_fieldtrip(_get_bf_data, bf_type, weight_norm, pick_ori, pwr): ft_fname = ft_data_path / ("ft_source_" + bf_type + "-vol.mat") stc_ft_data = pymatreader.read_mat(ft_fname)["stc"] if stc_ft_data.ndim == 1: - stc_ft_data.shape = (stc_ft_data.size, 1) + stc_ft_data = stc_ft_data.reshape(stc_ft_data.size, 1) if stc_mne.data.ndim == 2: signs = np.sign((stc_mne.data * stc_ft_data).sum(-1, keepdims=True)) diff --git a/mne/beamformer/tests/test_lcmv.py b/mne/beamformer/tests/test_lcmv.py index 35b9943d54e..e8d81d8bd4a 100644 --- a/mne/beamformer/tests/test_lcmv.py +++ b/mne/beamformer/tests/test_lcmv.py @@ -1185,7 +1185,7 @@ def test_unit_noise_gain_formula(pick_ori, weight_norm, reg, inversion): ) n_channels, n_sources = G.shape n_sources //= 3 - G.shape = (n_channels, n_sources, 3) + G = G.reshape(n_channels, n_sources, 3) G = G.transpose(1, 2, 0) # verts, orient, ch _assert_weight_norm(filters, G) diff --git a/mne/channels/montage.py b/mne/channels/montage.py index 35fdbce917c..82c407bf552 100644 --- a/mne/channels/montage.py +++ b/mne/channels/montage.py @@ -973,9 +973,9 @@ def read_dig_hpts(fname, unit="mm"): label[ii]: this_xyz for ii, this_xyz in enumerate(xyz) if kind[ii] == "eeg" } hpi = np.array([this_xyz for ii, this_xyz in enumerate(xyz) if kind[ii] == "hpi"]) - hpi.shape = (-1, 3) # in case it's empty + hpi = hpi.reshape(-1, 3) # in case it's empty hsp = np.array([this_xyz for ii, this_xyz in enumerate(xyz) if kind[ii] == "extra"]) - hsp.shape = (-1, 3) # in case it's empty + hsp = hsp.reshape(-1, 3) # in case it's empty return make_dig_montage(ch_pos=ch_pos, **fid, hpi=hpi, hsp=hsp) diff --git a/mne/chpi.py b/mne/chpi.py index cc921a9843e..1337c8315a3 100644 --- a/mne/chpi.py +++ b/mne/chpi.py @@ -117,7 +117,7 @@ def read_head_pos(fname): """ _check_fname(fname, must_exist=True, overwrite="read") data = np.loadtxt(fname, skiprows=1) # first line is header, skip it - data.shape = (-1, 10) # ensure it's the right size even if empty + data = data.reshape(-1, 10) # ensure it's the right size even if empty if np.isnan(data).any(): # make sure we didn't do something dumb raise RuntimeError(f"positions could not be read properly from {fname}") return data @@ -1390,7 +1390,7 @@ def compute_chpi_locs( ) fwd = _magnetic_dipole_field_vec(guesses, meg_coils, too_close) fwd = np.dot(fwd, whitener.T) - fwd.shape = (guesses.shape[0], 3, -1) + fwd = fwd.reshape(guesses.shape[0], 3, -1) fwd = np.linalg.svd(fwd, full_matrices=False)[2] guesses = dict(rr=guesses, whitened_fwd_svd=fwd) del fwd, R diff --git a/mne/decoding/receptive_field.py b/mne/decoding/receptive_field.py index 0516adadb9a..19cbf8559e9 100644 --- a/mne/decoding/receptive_field.py +++ b/mne/decoding/receptive_field.py @@ -361,7 +361,7 @@ def predict(self, X): else: extra = 1 shape = shape[: self._y_dim + extra] - y_pred.shape = shape + y_pred = y_pred.reshape(shape) return y_pred def score(self, X, y): diff --git a/mne/decoding/tests/test_receptive_field.py b/mne/decoding/tests/test_receptive_field.py index db2209f4695..e1012850ce7 100644 --- a/mne/decoding/tests/test_receptive_field.py +++ b/mne/decoding/tests/test_receptive_field.py @@ -271,7 +271,7 @@ def test_time_delaying_fast_calc(n_jobs): smin, smax = 1, 2 X_del = _delay_time_series(X, smin, smax, 1.0) # (n_times, n_features, n_delays) -> (n_times, n_features * n_delays) - X_del.shape = (X.shape[0], -1) + X_del = X_del.reshape(X.shape[0], -1) expected = np.array([[0, 1, 2], [0, 0, 1], [0, 5, 7], [0, 0, 5]]).T assert_allclose(X_del, expected) Xt_X = np.dot(X_del.T, X_del) @@ -282,7 +282,7 @@ def test_time_delaying_fast_calc(n_jobs): # all positive smin, smax = -2, -1 X_del = _delay_time_series(X, smin, smax, 1.0) - X_del.shape = (X.shape[0], -1) + X_del = X_del.reshape(X.shape[0], -1) expected = np.array([[3, 0, 0], [2, 3, 0], [11, 0, 0], [7, 11, 0]]).T assert_allclose(X_del, expected) Xt_X = np.dot(X_del.T, X_del) @@ -293,7 +293,7 @@ def test_time_delaying_fast_calc(n_jobs): # both sides smin, smax = -1, 1 X_del = _delay_time_series(X, smin, smax, 1.0) - X_del.shape = (X.shape[0], -1) + X_del = X_del.reshape(X.shape[0], -1) expected = np.array( [[2, 3, 0], [1, 2, 3], [0, 1, 2], [7, 11, 0], [5, 7, 11], [0, 5, 7]] ).T @@ -315,7 +315,7 @@ def test_time_delaying_fast_calc(n_jobs): X = np.array([[1, 2, 3, 5]]).T smin, smax = 0, 3 X_del = _delay_time_series(X, smin, smax, 1.0) - X_del.shape = (X.shape[0], -1) + X_del = X_del.reshape(X.shape[0], -1) expected = np.array([[1, 2, 3, 5], [0, 1, 2, 3], [0, 0, 1, 2], [0, 0, 0, 1]]).T assert_allclose(X_del, expected) Xt_X = np.dot(X_del.T, X_del) @@ -328,7 +328,7 @@ def test_time_delaying_fast_calc(n_jobs): X = np.array([[1, 2, 3], [5, 7, 11]]).T smin, smax = 0, 2 X_del = _delay_time_series(X, smin, smax, 1.0) - X_del.shape = (X.shape[0], -1) + X_del = X_del.reshape(X.shape[0], -1) expected = np.array( [[1, 2, 3], [0, 1, 2], [0, 0, 1], [5, 7, 11], [0, 5, 7], [0, 0, 5]] ).T @@ -366,7 +366,7 @@ def test_time_delaying_fast_calc(n_jobs): x_yt_true = einsum("tfd,to->ofd", X_del, y) x_yt_true = np.reshape(x_yt_true, (x_yt_true.shape[0], -1)).T assert_allclose(x_yt, x_yt_true, atol=1e-7, err_msg=(smin, smax)) - X_del.shape = (X.shape[0], -1) + X_del = X_del.reshape(X.shape[0], -1) x_xt_true = np.dot(X_del.T, X_del).T assert_allclose(x_xt, x_xt_true, atol=1e-7, err_msg=(smin, smax)) @@ -388,7 +388,7 @@ def test_receptive_field_1d(n_jobs): y[delay:] = x[:-delay, 0] slims += [(1, 2)] for ndim in (1, 2): - y.shape = (y.shape[0],) + (1,) * (ndim - 1) + y = y.reshape((y.shape[0],) + (1,) * (ndim - 1)) for slim in slims: smin, smax = slim lap = TimeDelayingRidge( diff --git a/mne/decoding/transformer.py b/mne/decoding/transformer.py index b02ff8d8deb..1d9aa9c8001 100644 --- a/mne/decoding/transformer.py +++ b/mne/decoding/transformer.py @@ -118,7 +118,7 @@ def _sklearn_reshape_apply(func, return_result, X, *args, **kwargs): X = np.reshape(X.transpose(0, 2, 1), (-1, orig_shape[1])) X = func(X, *args, **kwargs) if return_result: - X.shape = (orig_shape[0], orig_shape[2], orig_shape[1]) + X = X.reshape(orig_shape[0], orig_shape[2], orig_shape[1]) X = X.transpose(0, 2, 1) return X diff --git a/mne/epochs.py b/mne/epochs.py index 4bd94ffa2c5..0fab6f2f706 100644 --- a/mne/epochs.py +++ b/mne/epochs.py @@ -4479,7 +4479,7 @@ def _get_epoch_from_raw(self, idx, verbose=None): else: data = data.astype(np.float64) - data.shape = raw.epoch_shape + data = data.reshape(raw.epoch_shape) data *= raw.cals return data diff --git a/mne/event.py b/mne/event.py index ba2c4f0120c..7d04548f081 100644 --- a/mne/event.py +++ b/mne/event.py @@ -181,7 +181,7 @@ def _read_events_fif(fid, tree): if event_list is None: raise ValueError("Could not find any events") else: - event_list.shape = (-1, 3) + event_list = event_list.reshape(-1, 3) for d in events["directory"]: kind = d.kind pos = d.pos diff --git a/mne/filter.py b/mne/filter.py index 8d5d3e48ea7..5f571899b14 100644 --- a/mne/filter.py +++ b/mne/filter.py @@ -349,7 +349,7 @@ def _overlap_add_filter( for pp, p in enumerate(picks): x[p] = data_new[pp] - x.shape = orig_shape + x = x.reshape(orig_shape) return x @@ -404,7 +404,7 @@ def _prep_for_filtering(x, copy, picks=None): orig_shape = x.shape x = np.atleast_2d(x) picks = _picks_to_idx(x.shape[-2], picks) - x.shape = (np.prod(x.shape[:-1]), x.shape[-1]) + x = x.reshape(np.prod(x.shape[:-1]), x.shape[-1]) if len(orig_shape) == 3: n_epochs, n_channels, n_times = orig_shape offset = np.repeat(np.arange(0, n_channels * n_epochs, n_channels), len(picks)) @@ -577,7 +577,7 @@ def _iir_filter(x, iir_params, picks, n_jobs, copy, phase="zero"): data_new = parallel(p_fun(x=x[p]) for p in picks) for pp, p in enumerate(picks): x[p] = data_new[pp] - x.shape = orig_shape + x = x.reshape(orig_shape) return x @@ -1657,7 +1657,7 @@ def _mt_spectrum_proc( ) logger.info(f"{kind} notch frequencies (Hz):\n{found_freqs}") - x.shape = orig_shape + x = x.reshape(orig_shape) return x @@ -2952,5 +2952,5 @@ def _iir_pad_apply_unpad(x, *, func, padlen, padtype, **kwargs): x_ext = _smart_pad(x_ext, (padlen, padlen), padtype) x_ext = func(x=x_ext, axis=-1, padlen=0, **kwargs) this_x[:] = x_ext[padlen : len(x_ext) - padlen] - x_out.shape = x.shape + x_out = x_out.reshape(x.shape) return x_out diff --git a/mne/forward/_compute_forward.py b/mne/forward/_compute_forward.py index 8ba15def389..5166d9a7ae7 100644 --- a/mne/forward/_compute_forward.py +++ b/mne/forward/_compute_forward.py @@ -457,7 +457,7 @@ def _do_prim_curr(rr, coils): for start, stop in _rr_bounds(rr, chunk=1): pp = _bem_inf_fields(rr[start:stop], rmags, cosmags) pp *= ws - pp.shape = (3 * (stop - start), -1) + pp = pp.reshape(3 * (stop - start), -1) pc[3 * start : 3 * stop] = [ bincount(bins, this_pp, bins[-1] + 1) for this_pp in pp ] diff --git a/mne/forward/_lead_dots.py b/mne/forward/_lead_dots.py index 10183fc6280..82fe12d8db0 100644 --- a/mne/forward/_lead_dots.py +++ b/mne/forward/_lead_dots.py @@ -86,7 +86,7 @@ def _get_legen_table( logger.info(f"Reading Legendre{extra_str} table...") with _open_lock(fname, "rb", buffering=0) as fid: lut = np.fromfile(fid, np.float32) - lut.shape = lut_shape + lut = lut.reshape(lut_shape) # we need this for the integration step n_fact = np.arange(1, n_coeff, dtype=float) @@ -265,7 +265,7 @@ def _fast_sphere_dot_r0( sums = _comp_sums_meg( beta.flatten(), ct.flatten(), lut, n_fact, volume_integral ) - sums.shape = (4,) + beta.shape + sums = sums.reshape((4,) + beta.shape) # Accumulate the result, a little bit streamlined version # cosmags1 = cosmags1[:, np.newaxis, :] @@ -296,7 +296,7 @@ def _fast_sphere_dot_r0( result *= r else: # 'eeg' result = _comp_sum_eeg(beta.flatten(), ct.flatten(), lut, n_fact) - result.shape = beta.shape + result = result.reshape(beta.shape) # Give it a finishing touch! result *= _eeg_const result /= lr1lr2 diff --git a/mne/forward/forward.py b/mne/forward/forward.py index e8a7d62a3ce..1dfa8c94372 100644 --- a/mne/forward/forward.py +++ b/mne/forward/forward.py @@ -1430,13 +1430,13 @@ def compute_depth_prior( # Gk = G[:, 3 * k:3 * (k + 1)] # x = np.dot(Gk.T, Gk) # d[k] = linalg.svdvals(x)[0] - G.shape = (G.shape[0], -1, 3) + G = G.reshape(G.shape[0], -1, 3) d = np.linalg.norm( np.einsum("svj,svk->vjk", G, G), # vector dot prods ord=2, # ord=2 spectral (largest s.v.) axis=(1, 2), ) - G.shape = (G.shape[0], -1) + G = G.reshape(G.shape[0], -1) # XXX Currently the fwd solns never have "patch_areas" defined if patch_areas is not None: diff --git a/mne/forward/tests/test_field_interpolation.py b/mne/forward/tests/test_field_interpolation.py index 57b204d97af..25f357b3280 100644 --- a/mne/forward/tests/test_field_interpolation.py +++ b/mne/forward/tests/test_field_interpolation.py @@ -85,7 +85,7 @@ def test_legendre_val(): ctheta = rng.rand(20, 30) * 2.0 - 1.0 beta = rng.rand(20, 30) * 0.8 c1 = _comp_sum_eeg(beta.flatten(), ctheta.flatten(), lut_fun, n_fact) - c1.shape = beta.shape + c1 = c1.reshape(beta.shape) # compare to numpy n = np.arange(1, n_terms, dtype=float)[:, np.newaxis, np.newaxis] diff --git a/mne/inverse_sparse/_gamma_map.py b/mne/inverse_sparse/_gamma_map.py index 35fd158f3e0..3d869a88bab 100644 --- a/mne/inverse_sparse/_gamma_map.py +++ b/mne/inverse_sparse/_gamma_map.py @@ -306,7 +306,7 @@ def gamma_map( X_xyz = np.zeros((len(active_src), 3, X.shape[1]), dtype=X.dtype) idx = np.searchsorted(active_src, idx) X_xyz[idx, offset, :] = X - X_xyz.shape = (len(active_src) * 3, X.shape[1]) + X_xyz = X_xyz.reshape(len(active_src) * 3, X.shape[1]) X = X_xyz active_set = (active_src[:, np.newaxis] * 3 + np.arange(3)).ravel() source_weighting[source_weighting == 0] = 1 # zeros diff --git a/mne/inverse_sparse/mxne_inverse.py b/mne/inverse_sparse/mxne_inverse.py index c3ccddbf7cd..bd8604ba48c 100644 --- a/mne/inverse_sparse/mxne_inverse.py +++ b/mne/inverse_sparse/mxne_inverse.py @@ -253,7 +253,7 @@ def _make_dipoles_sparse( _, keep = np.unique(active_idx, return_index=True) keep.sort() # maintain old order active_idx = active_idx[keep] - gof_split.shape = (len(active_idx), n_dip_per_pos, len(times)) + gof_split = gof_split.reshape(len(active_idx), n_dip_per_pos, len(times)) gof_split = gof_split.sum(1) assert (gof_split < 100).all() assert gof_split.shape == (len(active_idx), len(times)) diff --git a/mne/io/bti/bti.py b/mne/io/bti/bti.py index a992d3c7694..8835da4759c 100644 --- a/mne/io/bti/bti.py +++ b/mne/io/bti/bti.py @@ -1041,7 +1041,7 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): block = np.fromfile(fid, dtype, count) sample_stop = sample_start + count // n_channels shape = (sample_stop - sample_start, bti_info["total_chans"]) - block.shape = shape + block = block.reshape(shape) data_view = data[:, sample_start:sample_stop] one = np.empty(block.shape[::-1]) diff --git a/mne/io/ctf/ctf.py b/mne/io/ctf/ctf.py index 971ac51c2f6..93f1abd0640 100644 --- a/mne/io/ctf/ctf.py +++ b/mne/io/ctf/ctf.py @@ -207,7 +207,7 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): pos += np.int64(samp_offset) * si["n_chan"] * 4 fid.seek(pos, 0) this_data = np.fromfile(fid, ">i4", count=si["n_chan"] * n_read) - this_data.shape = (si["n_chan"], n_read) + this_data = this_data.reshape(si["n_chan"], n_read) this_data = this_data[:, r_lims[bi, 0] : r_lims[bi, 1]] data_view = data[:, d_lims[bi, 0] : d_lims[bi, 1]] _mult_cal_one(data_view, this_data, idx, cals, mult) diff --git a/mne/io/eeglab/eeglab.py b/mne/io/eeglab/eeglab.py index 497a7eeaf8f..52001b62e55 100644 --- a/mne/io/eeglab/eeglab.py +++ b/mne/io/eeglab/eeglab.py @@ -187,7 +187,7 @@ def _get_montage_information(eeg, get_pos, *, montage_units): _check_option("montage_units", montage_units, ("m", "dm", "cm", "mm", "auto")) if pos_ch_names: pos_array = np.array(pos, float) - pos_array.shape = (-1, 3) + pos_array = pos_array.reshape(-1, 3) # roughly estimate head radius and check if its reasonable is_nan_pos = np.isnan(pos).any(axis=1) diff --git a/mne/io/fiff/raw.py b/mne/io/fiff/raw.py index d4e1e40a7aa..ac3f6d86c24 100644 --- a/mne/io/fiff/raw.py +++ b/mne/io/fiff/raw.py @@ -424,7 +424,7 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): fid.seek(ent.pos + 16, 0) one = _call_dict[ent.type](fid, ent, shape=None, rlims=None) try: - one.shape = (nsamp, nchan) + one = one.reshape((nsamp, nchan)) except AttributeError: # one is None n_bad += picksamp else: diff --git a/mne/io/kit/kit.py b/mne/io/kit/kit.py index 4a783518344..72ec6492174 100644 --- a/mne/io/kit/kit.py +++ b/mne/io/kit/kit.py @@ -672,7 +672,7 @@ def get_kit_info(rawfile, allow_unknown_format, standardize_names=None, verbose= fid.seek(dirs[KIT.DIR_INDEX_CALIBRATION]["offset"]) # (offset [Volt], gain [Tesla/Volt]) for each channel sensitivity = np.fromfile(fid, dtype=FLOAT64, count=channel_count * 2) - sensitivity.shape = (channel_count, 2) + sensitivity = sensitivity.reshape(channel_count, 2) channel_offset, channel_gain = sensitivity.T assert (channel_offset == 0).all() # otherwise we have a problem diff --git a/mne/io/nirx/nirx.py b/mne/io/nirx/nirx.py index eb4101be0af..419d7c8cd52 100644 --- a/mne/io/nirx/nirx.py +++ b/mne/io/nirx/nirx.py @@ -567,7 +567,7 @@ def _read_csv_rows_cols(fname, start, stop, cols, bounds, sep=" ", replace=None) if replace is not None: data = replace(data) x = np.fromstring(data, float, sep=sep) - x.shape = (stop - start, -1) + x = x.reshape(stop - start, -1) x = x[:, cols] return x diff --git a/mne/minimum_norm/inverse.py b/mne/minimum_norm/inverse.py index da770744881..cabca4b8ef3 100644 --- a/mne/minimum_norm/inverse.py +++ b/mne/minimum_norm/inverse.py @@ -835,8 +835,8 @@ def _assemble_kernel(inv, label, method, pick_ori, use_cps=True, verbose=None): # No need to rotate source_cov because it should be uniform # (loose=1., and depth weighting is uniform across columns) offset = sl.stop - eigen_leads.shape = (-1, eigen_leads.shape[2]) - source_nn.shape = (-1, 3) + eigen_leads = eigen_leads.reshape(-1, eigen_leads.shape[2]) + source_nn = source_nn.reshape(-1, 3) if pick_ori == "normal": if not inv["source_ori"] == FIFF.FIFFV_MNE_FREE_ORI: @@ -1673,7 +1673,7 @@ def apply_inverse_cov( sol = cov.data[sel][:, sel] @ K.T sol = np.sum(K * sol.T, axis=1, keepdims=True) # Reshape back to (n_src, ..., 1) - sol.shape = stc.data.shape[:-1] + (1,) + sol = sol.reshape(stc.data.shape[:-1] + (1,)) stc = stc.__class__(sol, stc.vertices, stc.tmin, stc.tstep, stc.subject) if combine: # combine the three directions logger.info(" Combining the current components...") diff --git a/mne/minimum_norm/tests/test_inverse.py b/mne/minimum_norm/tests/test_inverse.py index d5eec6e7f91..245314449a3 100644 --- a/mne/minimum_norm/tests/test_inverse.py +++ b/mne/minimum_norm/tests/test_inverse.py @@ -1686,7 +1686,7 @@ def _assert_free_ori_match(ori, max_idx, lower_ori, upper_ori): assert ori.shape == (ori.shape[0], 3) ori = ori[max_idx] assert ori.shape == (max_idx.size, 3) - ori.shape = (max_idx.size // 3, 3, 3) + ori = ori.reshape(max_idx.size // 3, 3, 3) dots = np.abs(np.diagonal(ori, axis1=1, axis2=2)) mu = np.mean(dots) assert lower_ori <= mu <= upper_ori, mu diff --git a/mne/morph.py b/mne/morph.py index e2a48350f1b..3863280aaf9 100644 --- a/mne/morph.py +++ b/mne/morph.py @@ -1556,7 +1556,7 @@ def _apply_morph_data(morph, stc_from): data[to_sl] = morph.morph_mat @ data_from[from_sl] assert to_used.all() assert from_used.all() - data.shape = (data.shape[0],) + stc_from.data.shape[1:] + data = data.reshape((data.shape[0],) + stc_from.data.shape[1:]) klass = stc_from.__class__ stc_to = klass(data, vertices_to, stc_from.tmin, stc_from.tstep, morph.subject_to) return stc_to diff --git a/mne/preprocessing/maxwell.py b/mne/preprocessing/maxwell.py index 38a8f1c59f6..8f071abd22a 100644 --- a/mne/preprocessing/maxwell.py +++ b/mne/preprocessing/maxwell.py @@ -2787,7 +2787,7 @@ def find_bad_channels_maxwell( n = stop - start flat_stop = n - (n % flat_step) data = chunk_raw.get_data(good_meg_picks, 0, flat_stop) - data.shape = (data.shape[0], -1, flat_step) + data = data.reshape(data.shape[0], -1, flat_step) delta = np.std(data, axis=-1).min(-1) # min std across segments # We may want to return this later if `return_scores=True`. diff --git a/mne/report/tests/test_report.py b/mne/report/tests/test_report.py index 6481d9e8e31..2ec57d9382b 100644 --- a/mne/report/tests/test_report.py +++ b/mne/report/tests/test_report.py @@ -507,7 +507,7 @@ def test_add_bem_n_jobs(n_jobs, monkeypatch): ) assert imgs.ndim == 4 # images, h, w, rgba assert len(imgs) == 6 - imgs.shape = (len(imgs), -1) + imgs = imgs.reshape(len(imgs), -1) norms = np.linalg.norm(imgs, axis=-1) # should have down-up-down shape corr = np.corrcoef(norms, np.hanning(len(imgs)))[0, 1] diff --git a/mne/source_estimate.py b/mne/source_estimate.py index 689022fb326..6f875253cc4 100644 --- a/mne/source_estimate.py +++ b/mne/source_estimate.py @@ -3729,7 +3729,7 @@ def _gen_extract_label_time_course( assert vertidx.shape[1] == stc.data.shape[0] this_data = np.reshape(stc.data, (stc.data.shape[0], -1)) this_data = vertidx @ this_data - this_data.shape = (this_data.shape[0],) + stc.data.shape[1:] + this_data = this_data.reshape((this_data.shape[0],) + stc.data.shape[1:]) else: this_data = stc.data[vertidx] label_tc[i] = func(flip, this_data) diff --git a/mne/source_space/_source_space.py b/mne/source_space/_source_space.py index 74ffc7a44f2..d6b17d29da0 100644 --- a/mne/source_space/_source_space.py +++ b/mne/source_space/_source_space.py @@ -2317,7 +2317,7 @@ def _make_volume_source_space( checks = np.where(neigh >= 0)[0] removes = np.logical_not(np.isin(checks, sp["vertno"])) neigh[checks[removes]] = -1 - neigh.shape = old_shape + neigh = neigh.reshape(old_shape) neigh = neigh.T # Thought we would need this, but C code keeps -1 vertices, so we will: # neigh = [n[n >= 0] for n in enumerate(neigh[vertno])] diff --git a/mne/stats/cluster_level.py b/mne/stats/cluster_level.py index 7c4c8b40435..8733104b18d 100644 --- a/mne/stats/cluster_level.py +++ b/mne/stats/cluster_level.py @@ -695,7 +695,7 @@ def _do_permutations( # The stat should have the same shape as the samples for no adj. if adjacency is None: - t_obs_surr.shape = sample_shape + t_obs_surr = t_obs_surr.reshape(sample_shape) # Find cluster on randomized stats out = _find_clusters( @@ -783,7 +783,7 @@ def _do_1samp_permutations( # The stat should have the same shape as the samples for no adj. if adjacency is None: - t_obs_surr.shape = sample_shape + t_obs_surr = t_obs_surr.reshape(sample_shape) # Find cluster on randomized stats out = _find_clusters( @@ -974,7 +974,7 @@ def _permutation_cluster_test( f"compatible with the sample shape {sample_shape}" ) if adjacency is None or adjacency is False: - t_obs.shape = sample_shape + t_obs = t_obs.reshape(sample_shape) if exclude is not None: include = np.logical_not(exclude) @@ -1001,7 +1001,7 @@ def _permutation_cluster_test( clusters, cluster_stats = out # The stat should have the same shape as the samples - t_obs.shape = sample_shape + t_obs = t_obs.reshape(sample_shape) # For TFCE, return the "adjusted" statistic instead of raw scores # and for clusters, each point gets treated independently @@ -1113,7 +1113,7 @@ def _permutation_cluster_test( for ti in to_remove: step_down_include[clusters[ti]] = False if adjacency is None and adjacency is not False: - step_down_include.shape = sample_shape + step_down_include = step_down_include.reshape(sample_shape) n_step_downs += 1 if step_down_p > 0: a_text = "additional " if n_step_downs > 1 else "" diff --git a/mne/tests/test_transforms.py b/mne/tests/test_transforms.py index 69f8160f3c1..e7500480e21 100644 --- a/mne/tests/test_transforms.py +++ b/mne/tests/test_transforms.py @@ -76,7 +76,7 @@ def test_tps(): az = np.linspace(0.0, 2 * np.pi, 20, endpoint=False) pol = np.linspace(0, np.pi, 12)[1:-1] sph = np.array(np.meshgrid(1, az, pol, indexing="ij")) - sph.shape = (3, -1) + sph = sph.reshape(3, -1) assert_equal(sph.shape[1], 200) source = _sph_to_cart(sph.T) destination = source.copy() diff --git a/mne/time_frequency/multitaper.py b/mne/time_frequency/multitaper.py index 1c1a3baf238..b04fc9e243c 100644 --- a/mne/time_frequency/multitaper.py +++ b/mne/time_frequency/multitaper.py @@ -455,7 +455,7 @@ def psd_array_multitaper( # Combining/reshaping to original data shape last_dims = (n_freqs,) if output == "power" else (n_tapers, n_freqs) - psd.shape = dshape + last_dims + psd = psd.reshape(dshape + last_dims) if ndim_in == 1: psd = psd[0] diff --git a/mne/time_frequency/psd.py b/mne/time_frequency/psd.py index 383ccc9f0f9..83f4296553c 100644 --- a/mne/time_frequency/psd.py +++ b/mne/time_frequency/psd.py @@ -312,5 +312,5 @@ def func(*args, **kwargs): if bad_ch.any(): psds[bad_ch] = np.nan - psds.shape = shape + psds = psds.reshape(shape) return psds, freqs diff --git a/mne/viz/_brain/_brain.py b/mne/viz/_brain/_brain.py index 04321fa0503..cbf00c2138b 100644 --- a/mne/viz/_brain/_brain.py +++ b/mne/viz/_brain/_brain.py @@ -2230,7 +2230,7 @@ def add_label( if isinstance(borders, int): for _ in range(borders): keep_idx = np.isin(self.geo[hemi].faces.ravel(), keep_idx) - keep_idx.shape = self.geo[hemi].faces.shape + keep_idx = keep_idx.reshape(self.geo[hemi].faces.shape) keep_idx = self.geo[hemi].faces[np.any(keep_idx, axis=1)] keep_idx = np.unique(keep_idx) show[keep_idx] = 1 @@ -3978,7 +3978,7 @@ def _to_borders(self, label, hemi, borders, restrict_idx=None): if isinstance(borders, int): for _ in range(borders): keep_idx = np.isin(self.geo[hemi].orig_faces.ravel(), keep_idx) - keep_idx.shape = self.geo[hemi].orig_faces.shape + keep_idx = keep_idx.reshape(self.geo[hemi].orig_faces.shape) keep_idx = self.geo[hemi].orig_faces[np.any(keep_idx, axis=1)] keep_idx = np.unique(keep_idx) if restrict_idx is not None: diff --git a/mne/viz/_brain/tests/test_brain.py b/mne/viz/_brain/tests/test_brain.py index e3c7559ffa0..7a5a82760dc 100644 --- a/mne/viz/_brain/tests/test_brain.py +++ b/mne/viz/_brain/tests/test_brain.py @@ -1466,7 +1466,7 @@ def _create_testing_brain( stc_data[(rng.rand(stc_size // 20) * stc_size).astype(int)] = rng.rand( stc_data.size // 20 ) - stc_data.shape = (n_verts, n_time) + stc_data = stc_data.reshape(n_verts, n_time) if diverging: stc_data -= 0.5 stc = klass(stc_data, vertices, 1, 1) diff --git a/mne/viz/backends/_utils.py b/mne/viz/backends/_utils.py index 467f5cb15e7..8f5738e7839 100644 --- a/mne/viz/backends/_utils.py +++ b/mne/viz/backends/_utils.py @@ -355,7 +355,7 @@ def _pixmap_to_ndarray(pixmap): if hasattr(ptr, "setsize"): # PyQt ptr.setsize(count) data = np.frombuffer(ptr, dtype=np.uint8, count=count).copy() - data.shape = (img.height(), img.width(), 4) + data = data.reshape(img.height(), img.width(), 4) return data / 255.0 diff --git a/mne/viz/tests/test_3d.py b/mne/viz/tests/test_3d.py index ab24e6a70db..8f846e13192 100644 --- a/mne/viz/tests/test_3d.py +++ b/mne/viz/tests/test_3d.py @@ -135,7 +135,7 @@ def test_plot_sparse_source_estimates(renderer_interactive, brain_gc): stc_data[(np.random.rand(stc_size // 20) * stc_size).astype(int)] = ( np.random.RandomState(0).rand(stc_data.size // 20) ) - stc_data.shape = (n_verts, n_time) + stc_data = stc_data.reshape(n_verts, n_time) stc = SourceEstimate(stc_data, vertices, 1, 1) colormap = "mne_analyze" @@ -942,7 +942,7 @@ def test_process_clim_plot(renderer_interactive, brain_gc): n_time = 5 n_verts = sum(len(v) for v in vertices) stc_data = np.random.RandomState(0).rand(n_verts * n_time) - stc_data.shape = (n_verts, n_time) + stc_data = stc_data.reshape(n_verts, n_time) stc = SourceEstimate(stc_data, vertices, 1, 1, "sample") # Test for simple use cases @@ -1064,7 +1064,7 @@ def test_stc_mpl(): n_time = 5 n_verts = sum(len(v) for v in vertices) stc_data = np.ones(n_verts * n_time) - stc_data.shape = (n_verts, n_time) + stc_data = stc_data.reshape(n_verts, n_time) stc = SourceEstimate(stc_data, vertices, 1, 1, "sample") stc.plot( subjects_dir=subjects_dir, @@ -1396,7 +1396,7 @@ def test_link_brains(renderer_interactive): stc_data[(np.random.rand(stc_size // 20) * stc_size).astype(int)] = ( np.random.RandomState(0).rand(stc_data.size // 20) ) - stc_data.shape = (n_verts, n_time) + stc_data = stc_data.reshape(n_verts, n_time) stc = SourceEstimate(stc_data, vertices, 1, 1) colormap = "mne_analyze" From 84b6b9d25cb07cc0484e8a4ef8591ef52088399a Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 12 Jan 2026 10:30:04 +0000 Subject: [PATCH 02/10] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- mne/source_estimate.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/mne/source_estimate.py b/mne/source_estimate.py index 6f875253cc4..2b9dfef5d6c 100644 --- a/mne/source_estimate.py +++ b/mne/source_estimate.py @@ -3729,7 +3729,9 @@ def _gen_extract_label_time_course( assert vertidx.shape[1] == stc.data.shape[0] this_data = np.reshape(stc.data, (stc.data.shape[0], -1)) this_data = vertidx @ this_data - this_data = this_data.reshape((this_data.shape[0],) + stc.data.shape[1:]) + this_data = this_data.reshape( + (this_data.shape[0],) + stc.data.shape[1:] + ) else: this_data = stc.data[vertidx] label_tc[i] = func(flip, this_data) From 683a16895252579efb612d5762fdbeb1c1d43e0b Mon Sep 17 00:00:00 2001 From: Mathieu Scheltienne Date: Mon, 12 Jan 2026 11:34:30 +0100 Subject: [PATCH 03/10] add changelog entry --- doc/changes/dev/13585.other.rst | 1 + 1 file changed, 1 insertion(+) create mode 100644 doc/changes/dev/13585.other.rst diff --git a/doc/changes/dev/13585.other.rst b/doc/changes/dev/13585.other.rst new file mode 100644 index 00000000000..bb62007ee71 --- /dev/null +++ b/doc/changes/dev/13585.other.rst @@ -0,0 +1 @@ +Fix deprecation of setting a shape on an array directly in ``numpy`` 2.5+, by `Mathieu Scheltienne`_. From a68105ccaf55d5131da4ec8b286c526e342fdf6d Mon Sep 17 00:00:00 2001 From: Mathieu Scheltienne Date: Mon, 12 Jan 2026 14:55:12 +0100 Subject: [PATCH 04/10] use copy=False and filter warnings now that PRs are open in VTK and nitime --- mne/_fiff/_digitization.py | 2 +- mne/_fiff/tag.py | 2 +- mne/beamformer/_rap_music.py | 6 +++--- mne/beamformer/tests/test_dics.py | 2 +- mne/beamformer/tests/test_external.py | 2 +- mne/beamformer/tests/test_lcmv.py | 2 +- mne/channels/montage.py | 4 ++-- mne/chpi.py | 4 ++-- mne/conftest.py | 3 +++ mne/decoding/receptive_field.py | 2 +- mne/decoding/tests/test_receptive_field.py | 14 +++++++------- mne/decoding/transformer.py | 2 +- mne/epochs.py | 2 +- mne/event.py | 2 +- mne/filter.py | 10 +++++----- mne/forward/_compute_forward.py | 2 +- mne/forward/_lead_dots.py | 6 +++--- mne/forward/forward.py | 4 ++-- mne/forward/tests/test_field_interpolation.py | 2 +- mne/inverse_sparse/_gamma_map.py | 2 +- mne/inverse_sparse/mxne_inverse.py | 2 +- mne/io/bti/bti.py | 2 +- mne/io/ctf/ctf.py | 2 +- mne/io/eeglab/eeglab.py | 4 ++-- mne/io/fiff/raw.py | 2 +- mne/io/kit/kit.py | 10 +++++----- mne/io/nirx/nirx.py | 2 +- mne/minimum_norm/inverse.py | 6 +++--- mne/minimum_norm/tests/test_inverse.py | 2 +- mne/morph.py | 2 +- mne/preprocessing/maxwell.py | 2 +- mne/report/tests/test_report.py | 2 +- mne/source_estimate.py | 2 +- mne/source_space/_source_space.py | 2 +- mne/stats/cluster_level.py | 10 +++++----- mne/tests/test_transforms.py | 2 +- mne/time_frequency/multitaper.py | 2 +- mne/time_frequency/psd.py | 2 +- mne/viz/_brain/_brain.py | 4 ++-- mne/viz/_brain/tests/test_brain.py | 2 +- mne/viz/backends/_utils.py | 2 +- mne/viz/tests/test_3d.py | 8 ++++---- 42 files changed, 76 insertions(+), 73 deletions(-) diff --git a/mne/_fiff/_digitization.py b/mne/_fiff/_digitization.py index b3dfc0997f4..dec2f57c4d2 100644 --- a/mne/_fiff/_digitization.py +++ b/mne/_fiff/_digitization.py @@ -335,7 +335,7 @@ def _get_data_as_dict_from_dig(dig, exclude_ref_channel=True): f"Only single coordinate frame in dig is supported, got {dig_coord_frames}" ) dig_ch_pos_location = np.array(dig_ch_pos_location) - dig_ch_pos_location = dig_ch_pos_location.reshape(-1, 3) # empty will be (0, 3) + dig_ch_pos_location = dig_ch_pos_location.reshape((-1, 3), copy=False) return Bunch( nasion=fids.get("nasion", None), lpa=fids.get("lpa", None), diff --git a/mne/_fiff/tag.py b/mne/_fiff/tag.py index f669e72abeb..bd6cf4b588c 100644 --- a/mne/_fiff/tag.py +++ b/mne/_fiff/tag.py @@ -177,7 +177,7 @@ def _read_matrix(fid, tag, shape, rlims): data = data.view(">c8") elif matrix_type == FIFF.FIFFT_COMPLEX_DOUBLE: data = data.view(">c16") - data = data.reshape(dims) + data = data.reshape(dims, copy=False) else: # Find dimensions and return to the beginning of tag data ndim = int(np.frombuffer(fid.read(4), dtype=">i4").item()) diff --git a/mne/beamformer/_rap_music.py b/mne/beamformer/_rap_music.py index 839a6b83338..1a2eac63673 100644 --- a/mne/beamformer/_rap_music.py +++ b/mne/beamformer/_rap_music.py @@ -68,9 +68,9 @@ def _apply_rap_music( phi_sig = eig_vectors[:, -n_dipoles:] n_orient = 3 if is_free_ori else 1 - G = G.reshape(G.shape[0], -1, n_orient) + G = G.reshape((G.shape[0], -1, n_orient), copy=False) gain = forward["sol"]["data"].copy() - gain = gain.reshape(G.shape) + gain = gain.reshape(G.shape, copy=False) n_channels = G.shape[0] A = np.empty((n_channels, n_dipoles)) gain_dip = np.empty((n_channels, n_dipoles)) @@ -122,7 +122,7 @@ def _apply_rap_music( sol = linalg.lstsq(A, M)[0] if n_orient == 3: X = sol[:, np.newaxis] * oris[:, :, np.newaxis] - X = X.reshape(-1, len(times)) + X = X.reshape((-1, len(times)), copy=False) else: X = sol diff --git a/mne/beamformer/tests/test_dics.py b/mne/beamformer/tests/test_dics.py index c44883ad436..302ad2d7d7e 100644 --- a/mne/beamformer/tests/test_dics.py +++ b/mne/beamformer/tests/test_dics.py @@ -269,7 +269,7 @@ def test_make_dics(tmp_path, _load_forward, idx, whiten): exp=None, noise_cov=noise_cov, ) - G = G.reshape(n_channels, n_verts, n_orient) + G = G.reshape((n_channels, n_verts, n_orient), copy=False) G = G.transpose(1, 2, 0).conj() # verts, orient, ch _assert_weight_norm(filters, G) diff --git a/mne/beamformer/tests/test_external.py b/mne/beamformer/tests/test_external.py index d02661d628e..21dde3df458 100644 --- a/mne/beamformer/tests/test_external.py +++ b/mne/beamformer/tests/test_external.py @@ -98,7 +98,7 @@ def test_lcmv_fieldtrip(_get_bf_data, bf_type, weight_norm, pick_ori, pwr): ft_fname = ft_data_path / ("ft_source_" + bf_type + "-vol.mat") stc_ft_data = pymatreader.read_mat(ft_fname)["stc"] if stc_ft_data.ndim == 1: - stc_ft_data = stc_ft_data.reshape(stc_ft_data.size, 1) + stc_ft_data = stc_ft_data.reshape((stc_ft_data.size, 1), copy=False) if stc_mne.data.ndim == 2: signs = np.sign((stc_mne.data * stc_ft_data).sum(-1, keepdims=True)) diff --git a/mne/beamformer/tests/test_lcmv.py b/mne/beamformer/tests/test_lcmv.py index e8d81d8bd4a..0be455ba494 100644 --- a/mne/beamformer/tests/test_lcmv.py +++ b/mne/beamformer/tests/test_lcmv.py @@ -1185,7 +1185,7 @@ def test_unit_noise_gain_formula(pick_ori, weight_norm, reg, inversion): ) n_channels, n_sources = G.shape n_sources //= 3 - G = G.reshape(n_channels, n_sources, 3) + G = G.reshape((n_channels, n_sources, 3), copy=False) G = G.transpose(1, 2, 0) # verts, orient, ch _assert_weight_norm(filters, G) diff --git a/mne/channels/montage.py b/mne/channels/montage.py index 82c407bf552..1c323c6fb70 100644 --- a/mne/channels/montage.py +++ b/mne/channels/montage.py @@ -973,9 +973,9 @@ def read_dig_hpts(fname, unit="mm"): label[ii]: this_xyz for ii, this_xyz in enumerate(xyz) if kind[ii] == "eeg" } hpi = np.array([this_xyz for ii, this_xyz in enumerate(xyz) if kind[ii] == "hpi"]) - hpi = hpi.reshape(-1, 3) # in case it's empty + hpi = hpi.reshape((-1, 3), copy=False) # in case it's empty hsp = np.array([this_xyz for ii, this_xyz in enumerate(xyz) if kind[ii] == "extra"]) - hsp = hsp.reshape(-1, 3) # in case it's empty + hsp = hsp.reshape((-1, 3), copy=False) # in case it's empty return make_dig_montage(ch_pos=ch_pos, **fid, hpi=hpi, hsp=hsp) diff --git a/mne/chpi.py b/mne/chpi.py index 1337c8315a3..f88c4d28ea2 100644 --- a/mne/chpi.py +++ b/mne/chpi.py @@ -117,7 +117,7 @@ def read_head_pos(fname): """ _check_fname(fname, must_exist=True, overwrite="read") data = np.loadtxt(fname, skiprows=1) # first line is header, skip it - data = data.reshape(-1, 10) # ensure it's the right size even if empty + data = data.reshape((-1, 10), copy=False) # ensure it's the right size even if empty if np.isnan(data).any(): # make sure we didn't do something dumb raise RuntimeError(f"positions could not be read properly from {fname}") return data @@ -1390,7 +1390,7 @@ def compute_chpi_locs( ) fwd = _magnetic_dipole_field_vec(guesses, meg_coils, too_close) fwd = np.dot(fwd, whitener.T) - fwd = fwd.reshape(guesses.shape[0], 3, -1) + fwd = fwd.reshape((guesses.shape[0], 3, -1), copy=False) fwd = np.linalg.svd(fwd, full_matrices=False)[2] guesses = dict(rr=guesses, whitened_fwd_svd=fwd) del fwd, R diff --git a/mne/conftest.py b/mne/conftest.py index 5a7fa4fed40..b1d18d6805f 100644 --- a/mne/conftest.py +++ b/mne/conftest.py @@ -207,6 +207,9 @@ def pytest_configure(config: pytest.Config): ignore:^'.*' deprecated - use '.*'$:DeprecationWarning # dipy ignore:'where' used without 'out', expect .*:UserWarning + # VTK <-> NumPy 2.5 (https://gitlab.kitware.com/vtk/vtk/-/merge_requests/12796) + # nitime <-> NumPy 2.5 (https://github.com/nipy/nitime/pull/236) + ignore:Setting the shape on a NumPy array has been deprecated.*:DeprecationWarning """ # noqa: E501 for warning_line in warning_lines.split("\n"): warning_line = warning_line.strip() diff --git a/mne/decoding/receptive_field.py b/mne/decoding/receptive_field.py index 19cbf8559e9..53fb7558599 100644 --- a/mne/decoding/receptive_field.py +++ b/mne/decoding/receptive_field.py @@ -361,7 +361,7 @@ def predict(self, X): else: extra = 1 shape = shape[: self._y_dim + extra] - y_pred = y_pred.reshape(shape) + y_pred = y_pred.reshape(shape, copy=False) return y_pred def score(self, X, y): diff --git a/mne/decoding/tests/test_receptive_field.py b/mne/decoding/tests/test_receptive_field.py index e1012850ce7..251db07d3e6 100644 --- a/mne/decoding/tests/test_receptive_field.py +++ b/mne/decoding/tests/test_receptive_field.py @@ -271,7 +271,7 @@ def test_time_delaying_fast_calc(n_jobs): smin, smax = 1, 2 X_del = _delay_time_series(X, smin, smax, 1.0) # (n_times, n_features, n_delays) -> (n_times, n_features * n_delays) - X_del = X_del.reshape(X.shape[0], -1) + X_del = X_del.reshape((X.shape[0], -1), copy=False) expected = np.array([[0, 1, 2], [0, 0, 1], [0, 5, 7], [0, 0, 5]]).T assert_allclose(X_del, expected) Xt_X = np.dot(X_del.T, X_del) @@ -282,7 +282,7 @@ def test_time_delaying_fast_calc(n_jobs): # all positive smin, smax = -2, -1 X_del = _delay_time_series(X, smin, smax, 1.0) - X_del = X_del.reshape(X.shape[0], -1) + X_del = X_del.reshape((X.shape[0], -1), copy=False) expected = np.array([[3, 0, 0], [2, 3, 0], [11, 0, 0], [7, 11, 0]]).T assert_allclose(X_del, expected) Xt_X = np.dot(X_del.T, X_del) @@ -293,7 +293,7 @@ def test_time_delaying_fast_calc(n_jobs): # both sides smin, smax = -1, 1 X_del = _delay_time_series(X, smin, smax, 1.0) - X_del = X_del.reshape(X.shape[0], -1) + X_del = X_del.reshape((X.shape[0], -1), copy=False) expected = np.array( [[2, 3, 0], [1, 2, 3], [0, 1, 2], [7, 11, 0], [5, 7, 11], [0, 5, 7]] ).T @@ -315,7 +315,7 @@ def test_time_delaying_fast_calc(n_jobs): X = np.array([[1, 2, 3, 5]]).T smin, smax = 0, 3 X_del = _delay_time_series(X, smin, smax, 1.0) - X_del = X_del.reshape(X.shape[0], -1) + X_del = X_del.reshape((X.shape[0], -1), copy=False) expected = np.array([[1, 2, 3, 5], [0, 1, 2, 3], [0, 0, 1, 2], [0, 0, 0, 1]]).T assert_allclose(X_del, expected) Xt_X = np.dot(X_del.T, X_del) @@ -328,7 +328,7 @@ def test_time_delaying_fast_calc(n_jobs): X = np.array([[1, 2, 3], [5, 7, 11]]).T smin, smax = 0, 2 X_del = _delay_time_series(X, smin, smax, 1.0) - X_del = X_del.reshape(X.shape[0], -1) + X_del = X_del.reshape((X.shape[0], -1), copy=False) expected = np.array( [[1, 2, 3], [0, 1, 2], [0, 0, 1], [5, 7, 11], [0, 5, 7], [0, 0, 5]] ).T @@ -366,7 +366,7 @@ def test_time_delaying_fast_calc(n_jobs): x_yt_true = einsum("tfd,to->ofd", X_del, y) x_yt_true = np.reshape(x_yt_true, (x_yt_true.shape[0], -1)).T assert_allclose(x_yt, x_yt_true, atol=1e-7, err_msg=(smin, smax)) - X_del = X_del.reshape(X.shape[0], -1) + X_del = X_del.reshape((X.shape[0], -1), copy=False) x_xt_true = np.dot(X_del.T, X_del).T assert_allclose(x_xt, x_xt_true, atol=1e-7, err_msg=(smin, smax)) @@ -388,7 +388,7 @@ def test_receptive_field_1d(n_jobs): y[delay:] = x[:-delay, 0] slims += [(1, 2)] for ndim in (1, 2): - y = y.reshape((y.shape[0],) + (1,) * (ndim - 1)) + y = y.reshape((y.shape[0],) + (1,) * (ndim - 1), copy=False) for slim in slims: smin, smax = slim lap = TimeDelayingRidge( diff --git a/mne/decoding/transformer.py b/mne/decoding/transformer.py index 1d9aa9c8001..f004f6aa268 100644 --- a/mne/decoding/transformer.py +++ b/mne/decoding/transformer.py @@ -118,7 +118,7 @@ def _sklearn_reshape_apply(func, return_result, X, *args, **kwargs): X = np.reshape(X.transpose(0, 2, 1), (-1, orig_shape[1])) X = func(X, *args, **kwargs) if return_result: - X = X.reshape(orig_shape[0], orig_shape[2], orig_shape[1]) + X = X.reshape((orig_shape[0], orig_shape[2], orig_shape[1]), copy=False) X = X.transpose(0, 2, 1) return X diff --git a/mne/epochs.py b/mne/epochs.py index 0fab6f2f706..6f2fab86dd2 100644 --- a/mne/epochs.py +++ b/mne/epochs.py @@ -4479,7 +4479,7 @@ def _get_epoch_from_raw(self, idx, verbose=None): else: data = data.astype(np.float64) - data = data.reshape(raw.epoch_shape) + data = data.reshape(raw.epoch_shape, copy=False) data *= raw.cals return data diff --git a/mne/event.py b/mne/event.py index 7d04548f081..d8891769fd1 100644 --- a/mne/event.py +++ b/mne/event.py @@ -181,7 +181,7 @@ def _read_events_fif(fid, tree): if event_list is None: raise ValueError("Could not find any events") else: - event_list = event_list.reshape(-1, 3) + event_list = event_list.reshape((-1, 3), copy=False) for d in events["directory"]: kind = d.kind pos = d.pos diff --git a/mne/filter.py b/mne/filter.py index 5f571899b14..a8751c2c987 100644 --- a/mne/filter.py +++ b/mne/filter.py @@ -349,7 +349,7 @@ def _overlap_add_filter( for pp, p in enumerate(picks): x[p] = data_new[pp] - x = x.reshape(orig_shape) + x = x.reshape(orig_shape, copy=False) return x @@ -404,7 +404,7 @@ def _prep_for_filtering(x, copy, picks=None): orig_shape = x.shape x = np.atleast_2d(x) picks = _picks_to_idx(x.shape[-2], picks) - x = x.reshape(np.prod(x.shape[:-1]), x.shape[-1]) + x = x.reshape((np.prod(x.shape[:-1]), x.shape[-1]), copy=False) if len(orig_shape) == 3: n_epochs, n_channels, n_times = orig_shape offset = np.repeat(np.arange(0, n_channels * n_epochs, n_channels), len(picks)) @@ -577,7 +577,7 @@ def _iir_filter(x, iir_params, picks, n_jobs, copy, phase="zero"): data_new = parallel(p_fun(x=x[p]) for p in picks) for pp, p in enumerate(picks): x[p] = data_new[pp] - x = x.reshape(orig_shape) + x = x.reshape(orig_shape, copy=False) return x @@ -1657,7 +1657,7 @@ def _mt_spectrum_proc( ) logger.info(f"{kind} notch frequencies (Hz):\n{found_freqs}") - x = x.reshape(orig_shape) + x = x.reshape(orig_shape, copy=False) return x @@ -2952,5 +2952,5 @@ def _iir_pad_apply_unpad(x, *, func, padlen, padtype, **kwargs): x_ext = _smart_pad(x_ext, (padlen, padlen), padtype) x_ext = func(x=x_ext, axis=-1, padlen=0, **kwargs) this_x[:] = x_ext[padlen : len(x_ext) - padlen] - x_out = x_out.reshape(x.shape) + x_out = x_out.reshape(x.shape, copy=False) return x_out diff --git a/mne/forward/_compute_forward.py b/mne/forward/_compute_forward.py index 5166d9a7ae7..356f74b54e8 100644 --- a/mne/forward/_compute_forward.py +++ b/mne/forward/_compute_forward.py @@ -457,7 +457,7 @@ def _do_prim_curr(rr, coils): for start, stop in _rr_bounds(rr, chunk=1): pp = _bem_inf_fields(rr[start:stop], rmags, cosmags) pp *= ws - pp = pp.reshape(3 * (stop - start), -1) + pp = pp.reshape((3 * (stop - start), -1), copy=False) pc[3 * start : 3 * stop] = [ bincount(bins, this_pp, bins[-1] + 1) for this_pp in pp ] diff --git a/mne/forward/_lead_dots.py b/mne/forward/_lead_dots.py index 82fe12d8db0..ba63ea9a9ee 100644 --- a/mne/forward/_lead_dots.py +++ b/mne/forward/_lead_dots.py @@ -86,7 +86,7 @@ def _get_legen_table( logger.info(f"Reading Legendre{extra_str} table...") with _open_lock(fname, "rb", buffering=0) as fid: lut = np.fromfile(fid, np.float32) - lut = lut.reshape(lut_shape) + lut = lut.reshape(lut_shape, copy=False) # we need this for the integration step n_fact = np.arange(1, n_coeff, dtype=float) @@ -265,7 +265,7 @@ def _fast_sphere_dot_r0( sums = _comp_sums_meg( beta.flatten(), ct.flatten(), lut, n_fact, volume_integral ) - sums = sums.reshape((4,) + beta.shape) + sums = sums.reshape(((4,) + beta.shape), copy=False) # Accumulate the result, a little bit streamlined version # cosmags1 = cosmags1[:, np.newaxis, :] @@ -296,7 +296,7 @@ def _fast_sphere_dot_r0( result *= r else: # 'eeg' result = _comp_sum_eeg(beta.flatten(), ct.flatten(), lut, n_fact) - result = result.reshape(beta.shape) + result = result.reshape(beta.shape, copy=False) # Give it a finishing touch! result *= _eeg_const result /= lr1lr2 diff --git a/mne/forward/forward.py b/mne/forward/forward.py index 1dfa8c94372..b07267fc992 100644 --- a/mne/forward/forward.py +++ b/mne/forward/forward.py @@ -1430,13 +1430,13 @@ def compute_depth_prior( # Gk = G[:, 3 * k:3 * (k + 1)] # x = np.dot(Gk.T, Gk) # d[k] = linalg.svdvals(x)[0] - G = G.reshape(G.shape[0], -1, 3) + G = G.reshape((G.shape[0], -1, 3), copy=False) d = np.linalg.norm( np.einsum("svj,svk->vjk", G, G), # vector dot prods ord=2, # ord=2 spectral (largest s.v.) axis=(1, 2), ) - G = G.reshape(G.shape[0], -1) + G = G.reshape((G.shape[0], -1), copy=False) # XXX Currently the fwd solns never have "patch_areas" defined if patch_areas is not None: diff --git a/mne/forward/tests/test_field_interpolation.py b/mne/forward/tests/test_field_interpolation.py index 25f357b3280..d6c85d18a7b 100644 --- a/mne/forward/tests/test_field_interpolation.py +++ b/mne/forward/tests/test_field_interpolation.py @@ -85,7 +85,7 @@ def test_legendre_val(): ctheta = rng.rand(20, 30) * 2.0 - 1.0 beta = rng.rand(20, 30) * 0.8 c1 = _comp_sum_eeg(beta.flatten(), ctheta.flatten(), lut_fun, n_fact) - c1 = c1.reshape(beta.shape) + c1 = c1.reshape(beta.shape, copy=False) # compare to numpy n = np.arange(1, n_terms, dtype=float)[:, np.newaxis, np.newaxis] diff --git a/mne/inverse_sparse/_gamma_map.py b/mne/inverse_sparse/_gamma_map.py index 3d869a88bab..6d35cb783b2 100644 --- a/mne/inverse_sparse/_gamma_map.py +++ b/mne/inverse_sparse/_gamma_map.py @@ -306,7 +306,7 @@ def gamma_map( X_xyz = np.zeros((len(active_src), 3, X.shape[1]), dtype=X.dtype) idx = np.searchsorted(active_src, idx) X_xyz[idx, offset, :] = X - X_xyz = X_xyz.reshape(len(active_src) * 3, X.shape[1]) + X_xyz = X_xyz.reshape((len(active_src) * 3, X.shape[1]), copy=False) X = X_xyz active_set = (active_src[:, np.newaxis] * 3 + np.arange(3)).ravel() source_weighting[source_weighting == 0] = 1 # zeros diff --git a/mne/inverse_sparse/mxne_inverse.py b/mne/inverse_sparse/mxne_inverse.py index bd8604ba48c..fe6698c64fd 100644 --- a/mne/inverse_sparse/mxne_inverse.py +++ b/mne/inverse_sparse/mxne_inverse.py @@ -253,7 +253,7 @@ def _make_dipoles_sparse( _, keep = np.unique(active_idx, return_index=True) keep.sort() # maintain old order active_idx = active_idx[keep] - gof_split = gof_split.reshape(len(active_idx), n_dip_per_pos, len(times)) + gof_split = gof_split.reshape((len(active_idx), n_dip_per_pos, len(times)), copy=False) gof_split = gof_split.sum(1) assert (gof_split < 100).all() assert gof_split.shape == (len(active_idx), len(times)) diff --git a/mne/io/bti/bti.py b/mne/io/bti/bti.py index 8835da4759c..764048d354c 100644 --- a/mne/io/bti/bti.py +++ b/mne/io/bti/bti.py @@ -1041,7 +1041,7 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): block = np.fromfile(fid, dtype, count) sample_stop = sample_start + count // n_channels shape = (sample_stop - sample_start, bti_info["total_chans"]) - block = block.reshape(shape) + block = block.reshape(shape, copy=False) data_view = data[:, sample_start:sample_stop] one = np.empty(block.shape[::-1]) diff --git a/mne/io/ctf/ctf.py b/mne/io/ctf/ctf.py index 93f1abd0640..eab54c6c735 100644 --- a/mne/io/ctf/ctf.py +++ b/mne/io/ctf/ctf.py @@ -207,7 +207,7 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): pos += np.int64(samp_offset) * si["n_chan"] * 4 fid.seek(pos, 0) this_data = np.fromfile(fid, ">i4", count=si["n_chan"] * n_read) - this_data = this_data.reshape(si["n_chan"], n_read) + this_data = this_data.reshape((si["n_chan"], n_read), copy=False) this_data = this_data[:, r_lims[bi, 0] : r_lims[bi, 1]] data_view = data[:, d_lims[bi, 0] : d_lims[bi, 1]] _mult_cal_one(data_view, this_data, idx, cals, mult) diff --git a/mne/io/eeglab/eeglab.py b/mne/io/eeglab/eeglab.py index 52001b62e55..d561315e651 100644 --- a/mne/io/eeglab/eeglab.py +++ b/mne/io/eeglab/eeglab.py @@ -187,7 +187,7 @@ def _get_montage_information(eeg, get_pos, *, montage_units): _check_option("montage_units", montage_units, ("m", "dm", "cm", "mm", "auto")) if pos_ch_names: pos_array = np.array(pos, float) - pos_array = pos_array.reshape(-1, 3) + pos_array = pos_array.reshape((-1, 3), copy=False) # roughly estimate head radius and check if its reasonable is_nan_pos = np.isnan(pos).any(axis=1) @@ -718,7 +718,7 @@ def __init__( data_fname = _check_eeglab_fname(input_fname, eeg.data) with open(data_fname, "rb") as data_fid: data = np.fromfile(data_fid, dtype=np.float32) - data = data.reshape((eeg.nbchan, eeg.pnts, eeg.trials), order="F") + data = data.reshape((eeg.nbchan, eeg.pnts, eeg.trials), order="F", copy=False) else: data = eeg.data diff --git a/mne/io/fiff/raw.py b/mne/io/fiff/raw.py index ac3f6d86c24..06fb07295bd 100644 --- a/mne/io/fiff/raw.py +++ b/mne/io/fiff/raw.py @@ -424,7 +424,7 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): fid.seek(ent.pos + 16, 0) one = _call_dict[ent.type](fid, ent, shape=None, rlims=None) try: - one = one.reshape((nsamp, nchan)) + one = one.reshape((nsamp, nchan), copy=False) except AttributeError: # one is None n_bad += picksamp else: diff --git a/mne/io/kit/kit.py b/mne/io/kit/kit.py index 72ec6492174..dabd4e96340 100644 --- a/mne/io/kit/kit.py +++ b/mne/io/kit/kit.py @@ -218,7 +218,7 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): for blk_start in np.arange(0, data_left, blk_size) // nchan: blk_size = min(blk_size, data_left - blk_start * nchan) block = np.fromfile(fid, dtype=sqd["dtype"], count=blk_size) - block = block.reshape(nchan, -1, order="F").astype(float) + block = block.reshape((nchan, -1), order="F", copy=False).astype(float) blk_stop = blk_start + block.shape[1] data_view = data[:, blk_start:blk_stop] block *= conv_factor @@ -471,9 +471,9 @@ def _read_kit_data(self): fid.seek(info["dirs"][KIT.DIR_INDEX_RAW_DATA]["offset"]) count = n_samples * nchan data = np.fromfile(fid, dtype=dtype, count=count) - data = data.reshape((n_samples, nchan)).T + data = data.reshape((n_samples, nchan), copy=False).T data = data * info["conv_factor"] - data = data.reshape((nchan, n_epochs, epoch_length)) + data = data.reshape((nchan, n_epochs, epoch_length), copy=False) data = data.transpose((1, 0, 2)) return data @@ -672,7 +672,7 @@ def get_kit_info(rawfile, allow_unknown_format, standardize_names=None, verbose= fid.seek(dirs[KIT.DIR_INDEX_CALIBRATION]["offset"]) # (offset [Volt], gain [Tesla/Volt]) for each channel sensitivity = np.fromfile(fid, dtype=FLOAT64, count=channel_count * 2) - sensitivity = sensitivity.reshape(channel_count, 2) + sensitivity = sensitivity.reshape((channel_count, 2), copy=False) channel_offset, channel_gain = sensitivity.T assert (channel_offset == 0).all() # otherwise we have a problem @@ -768,7 +768,7 @@ def get_kit_info(rawfile, allow_unknown_format, standardize_names=None, verbose= if key in dig and np.isfinite(dig[key]).all(): elp.append(dig.pop(key)) elp = np.array(elp) - hsp = np.array(hsp, float).reshape(-1, 3) + hsp = np.array(hsp, float).reshape((-1, 3), copy=False) if elp.shape not in ((6, 3), (7, 3), (8, 3)): raise RuntimeError(f"Fewer than 3 HPI coils found, got {len(elp) - 3}") # coregistration diff --git a/mne/io/nirx/nirx.py b/mne/io/nirx/nirx.py index 419d7c8cd52..47fb992ef45 100644 --- a/mne/io/nirx/nirx.py +++ b/mne/io/nirx/nirx.py @@ -567,7 +567,7 @@ def _read_csv_rows_cols(fname, start, stop, cols, bounds, sep=" ", replace=None) if replace is not None: data = replace(data) x = np.fromstring(data, float, sep=sep) - x = x.reshape(stop - start, -1) + x = x.reshape((stop - start, -1), copy=False) x = x[:, cols] return x diff --git a/mne/minimum_norm/inverse.py b/mne/minimum_norm/inverse.py index cabca4b8ef3..7335873c9fc 100644 --- a/mne/minimum_norm/inverse.py +++ b/mne/minimum_norm/inverse.py @@ -835,8 +835,8 @@ def _assemble_kernel(inv, label, method, pick_ori, use_cps=True, verbose=None): # No need to rotate source_cov because it should be uniform # (loose=1., and depth weighting is uniform across columns) offset = sl.stop - eigen_leads = eigen_leads.reshape(-1, eigen_leads.shape[2]) - source_nn = source_nn.reshape(-1, 3) + eigen_leads = eigen_leads.reshape((-1, eigen_leads.shape[2]), copy=False) + source_nn = source_nn.reshape((-1, 3), copy=False) if pick_ori == "normal": if not inv["source_ori"] == FIFF.FIFFV_MNE_FREE_ORI: @@ -1673,7 +1673,7 @@ def apply_inverse_cov( sol = cov.data[sel][:, sel] @ K.T sol = np.sum(K * sol.T, axis=1, keepdims=True) # Reshape back to (n_src, ..., 1) - sol = sol.reshape(stc.data.shape[:-1] + (1,)) + sol = sol.reshape(stc.data.shape[:-1] + (1,), copy=False) stc = stc.__class__(sol, stc.vertices, stc.tmin, stc.tstep, stc.subject) if combine: # combine the three directions logger.info(" Combining the current components...") diff --git a/mne/minimum_norm/tests/test_inverse.py b/mne/minimum_norm/tests/test_inverse.py index 245314449a3..6ca4aa1781d 100644 --- a/mne/minimum_norm/tests/test_inverse.py +++ b/mne/minimum_norm/tests/test_inverse.py @@ -1686,7 +1686,7 @@ def _assert_free_ori_match(ori, max_idx, lower_ori, upper_ori): assert ori.shape == (ori.shape[0], 3) ori = ori[max_idx] assert ori.shape == (max_idx.size, 3) - ori = ori.reshape(max_idx.size // 3, 3, 3) + ori = ori.reshape((max_idx.size // 3, 3, 3), copy=False) dots = np.abs(np.diagonal(ori, axis1=1, axis2=2)) mu = np.mean(dots) assert lower_ori <= mu <= upper_ori, mu diff --git a/mne/morph.py b/mne/morph.py index 3863280aaf9..f77285e62a4 100644 --- a/mne/morph.py +++ b/mne/morph.py @@ -1556,7 +1556,7 @@ def _apply_morph_data(morph, stc_from): data[to_sl] = morph.morph_mat @ data_from[from_sl] assert to_used.all() assert from_used.all() - data = data.reshape((data.shape[0],) + stc_from.data.shape[1:]) + data = data.reshape((data.shape[0],) + stc_from.data.shape[1:], copy=False) klass = stc_from.__class__ stc_to = klass(data, vertices_to, stc_from.tmin, stc_from.tstep, morph.subject_to) return stc_to diff --git a/mne/preprocessing/maxwell.py b/mne/preprocessing/maxwell.py index 8f071abd22a..c4433a43977 100644 --- a/mne/preprocessing/maxwell.py +++ b/mne/preprocessing/maxwell.py @@ -2787,7 +2787,7 @@ def find_bad_channels_maxwell( n = stop - start flat_stop = n - (n % flat_step) data = chunk_raw.get_data(good_meg_picks, 0, flat_stop) - data = data.reshape(data.shape[0], -1, flat_step) + data = data.reshape((data.shape[0], -1, flat_step), copy=False) delta = np.std(data, axis=-1).min(-1) # min std across segments # We may want to return this later if `return_scores=True`. diff --git a/mne/report/tests/test_report.py b/mne/report/tests/test_report.py index 2ec57d9382b..912c921321d 100644 --- a/mne/report/tests/test_report.py +++ b/mne/report/tests/test_report.py @@ -507,7 +507,7 @@ def test_add_bem_n_jobs(n_jobs, monkeypatch): ) assert imgs.ndim == 4 # images, h, w, rgba assert len(imgs) == 6 - imgs = imgs.reshape(len(imgs), -1) + imgs = imgs.reshape((len(imgs), -1), copy=False) norms = np.linalg.norm(imgs, axis=-1) # should have down-up-down shape corr = np.corrcoef(norms, np.hanning(len(imgs)))[0, 1] diff --git a/mne/source_estimate.py b/mne/source_estimate.py index 2b9dfef5d6c..20ded52b5e1 100644 --- a/mne/source_estimate.py +++ b/mne/source_estimate.py @@ -3730,7 +3730,7 @@ def _gen_extract_label_time_course( this_data = np.reshape(stc.data, (stc.data.shape[0], -1)) this_data = vertidx @ this_data this_data = this_data.reshape( - (this_data.shape[0],) + stc.data.shape[1:] + (this_data.shape[0],) + stc.data.shape[1:], copy=False ) else: this_data = stc.data[vertidx] diff --git a/mne/source_space/_source_space.py b/mne/source_space/_source_space.py index d6b17d29da0..cda5256ce1c 100644 --- a/mne/source_space/_source_space.py +++ b/mne/source_space/_source_space.py @@ -2317,7 +2317,7 @@ def _make_volume_source_space( checks = np.where(neigh >= 0)[0] removes = np.logical_not(np.isin(checks, sp["vertno"])) neigh[checks[removes]] = -1 - neigh = neigh.reshape(old_shape) + neigh = neigh.reshape(old_shape, copy=False) neigh = neigh.T # Thought we would need this, but C code keeps -1 vertices, so we will: # neigh = [n[n >= 0] for n in enumerate(neigh[vertno])] diff --git a/mne/stats/cluster_level.py b/mne/stats/cluster_level.py index 8733104b18d..24fd74ff9f2 100644 --- a/mne/stats/cluster_level.py +++ b/mne/stats/cluster_level.py @@ -695,7 +695,7 @@ def _do_permutations( # The stat should have the same shape as the samples for no adj. if adjacency is None: - t_obs_surr = t_obs_surr.reshape(sample_shape) + t_obs_surr = t_obs_surr.reshape(sample_shape, copy=False) # Find cluster on randomized stats out = _find_clusters( @@ -783,7 +783,7 @@ def _do_1samp_permutations( # The stat should have the same shape as the samples for no adj. if adjacency is None: - t_obs_surr = t_obs_surr.reshape(sample_shape) + t_obs_surr = t_obs_surr.reshape(sample_shape, copy=False) # Find cluster on randomized stats out = _find_clusters( @@ -974,7 +974,7 @@ def _permutation_cluster_test( f"compatible with the sample shape {sample_shape}" ) if adjacency is None or adjacency is False: - t_obs = t_obs.reshape(sample_shape) + t_obs = t_obs.reshape(sample_shape, copy=False) if exclude is not None: include = np.logical_not(exclude) @@ -1001,7 +1001,7 @@ def _permutation_cluster_test( clusters, cluster_stats = out # The stat should have the same shape as the samples - t_obs = t_obs.reshape(sample_shape) + t_obs = t_obs.reshape(sample_shape, copy=False) # For TFCE, return the "adjusted" statistic instead of raw scores # and for clusters, each point gets treated independently @@ -1113,7 +1113,7 @@ def _permutation_cluster_test( for ti in to_remove: step_down_include[clusters[ti]] = False if adjacency is None and adjacency is not False: - step_down_include = step_down_include.reshape(sample_shape) + step_down_include = step_down_include.reshape(sample_shape, copy=False) n_step_downs += 1 if step_down_p > 0: a_text = "additional " if n_step_downs > 1 else "" diff --git a/mne/tests/test_transforms.py b/mne/tests/test_transforms.py index e7500480e21..c1f98b6853a 100644 --- a/mne/tests/test_transforms.py +++ b/mne/tests/test_transforms.py @@ -76,7 +76,7 @@ def test_tps(): az = np.linspace(0.0, 2 * np.pi, 20, endpoint=False) pol = np.linspace(0, np.pi, 12)[1:-1] sph = np.array(np.meshgrid(1, az, pol, indexing="ij")) - sph = sph.reshape(3, -1) + sph = sph.reshape((3, -1), copy=False) assert_equal(sph.shape[1], 200) source = _sph_to_cart(sph.T) destination = source.copy() diff --git a/mne/time_frequency/multitaper.py b/mne/time_frequency/multitaper.py index b04fc9e243c..78ee4fe65b2 100644 --- a/mne/time_frequency/multitaper.py +++ b/mne/time_frequency/multitaper.py @@ -455,7 +455,7 @@ def psd_array_multitaper( # Combining/reshaping to original data shape last_dims = (n_freqs,) if output == "power" else (n_tapers, n_freqs) - psd = psd.reshape(dshape + last_dims) + psd = psd.reshape(dshape + last_dims, copy=False) if ndim_in == 1: psd = psd[0] diff --git a/mne/time_frequency/psd.py b/mne/time_frequency/psd.py index 83f4296553c..ef229de0c66 100644 --- a/mne/time_frequency/psd.py +++ b/mne/time_frequency/psd.py @@ -312,5 +312,5 @@ def func(*args, **kwargs): if bad_ch.any(): psds[bad_ch] = np.nan - psds = psds.reshape(shape) + psds = psds.reshape(shape, copy=False) return psds, freqs diff --git a/mne/viz/_brain/_brain.py b/mne/viz/_brain/_brain.py index cbf00c2138b..c492c0a698b 100644 --- a/mne/viz/_brain/_brain.py +++ b/mne/viz/_brain/_brain.py @@ -2230,7 +2230,7 @@ def add_label( if isinstance(borders, int): for _ in range(borders): keep_idx = np.isin(self.geo[hemi].faces.ravel(), keep_idx) - keep_idx = keep_idx.reshape(self.geo[hemi].faces.shape) + keep_idx = keep_idx.reshape(self.geo[hemi].faces.shape, copy=False) keep_idx = self.geo[hemi].faces[np.any(keep_idx, axis=1)] keep_idx = np.unique(keep_idx) show[keep_idx] = 1 @@ -3978,7 +3978,7 @@ def _to_borders(self, label, hemi, borders, restrict_idx=None): if isinstance(borders, int): for _ in range(borders): keep_idx = np.isin(self.geo[hemi].orig_faces.ravel(), keep_idx) - keep_idx = keep_idx.reshape(self.geo[hemi].orig_faces.shape) + keep_idx = keep_idx.reshape(self.geo[hemi].orig_faces.shape, copy=False) keep_idx = self.geo[hemi].orig_faces[np.any(keep_idx, axis=1)] keep_idx = np.unique(keep_idx) if restrict_idx is not None: diff --git a/mne/viz/_brain/tests/test_brain.py b/mne/viz/_brain/tests/test_brain.py index 7a5a82760dc..b8ad2bf8bc1 100644 --- a/mne/viz/_brain/tests/test_brain.py +++ b/mne/viz/_brain/tests/test_brain.py @@ -1466,7 +1466,7 @@ def _create_testing_brain( stc_data[(rng.rand(stc_size // 20) * stc_size).astype(int)] = rng.rand( stc_data.size // 20 ) - stc_data = stc_data.reshape(n_verts, n_time) + stc_data = stc_data.reshape((n_verts, n_time), copy=False) if diverging: stc_data -= 0.5 stc = klass(stc_data, vertices, 1, 1) diff --git a/mne/viz/backends/_utils.py b/mne/viz/backends/_utils.py index 8f5738e7839..7793b8ed90a 100644 --- a/mne/viz/backends/_utils.py +++ b/mne/viz/backends/_utils.py @@ -355,7 +355,7 @@ def _pixmap_to_ndarray(pixmap): if hasattr(ptr, "setsize"): # PyQt ptr.setsize(count) data = np.frombuffer(ptr, dtype=np.uint8, count=count).copy() - data = data.reshape(img.height(), img.width(), 4) + data = data.reshape((img.height(), img.width(), 4), copy=False) return data / 255.0 diff --git a/mne/viz/tests/test_3d.py b/mne/viz/tests/test_3d.py index 8f846e13192..56948f2dbd6 100644 --- a/mne/viz/tests/test_3d.py +++ b/mne/viz/tests/test_3d.py @@ -135,7 +135,7 @@ def test_plot_sparse_source_estimates(renderer_interactive, brain_gc): stc_data[(np.random.rand(stc_size // 20) * stc_size).astype(int)] = ( np.random.RandomState(0).rand(stc_data.size // 20) ) - stc_data = stc_data.reshape(n_verts, n_time) + stc_data = stc_data.reshape((n_verts, n_time), copy=False) stc = SourceEstimate(stc_data, vertices, 1, 1) colormap = "mne_analyze" @@ -942,7 +942,7 @@ def test_process_clim_plot(renderer_interactive, brain_gc): n_time = 5 n_verts = sum(len(v) for v in vertices) stc_data = np.random.RandomState(0).rand(n_verts * n_time) - stc_data = stc_data.reshape(n_verts, n_time) + stc_data = stc_data.reshape((n_verts, n_time), copy=False) stc = SourceEstimate(stc_data, vertices, 1, 1, "sample") # Test for simple use cases @@ -1064,7 +1064,7 @@ def test_stc_mpl(): n_time = 5 n_verts = sum(len(v) for v in vertices) stc_data = np.ones(n_verts * n_time) - stc_data = stc_data.reshape(n_verts, n_time) + stc_data = stc_data.reshape((n_verts, n_time), copy=False) stc = SourceEstimate(stc_data, vertices, 1, 1, "sample") stc.plot( subjects_dir=subjects_dir, @@ -1396,7 +1396,7 @@ def test_link_brains(renderer_interactive): stc_data[(np.random.rand(stc_size // 20) * stc_size).astype(int)] = ( np.random.RandomState(0).rand(stc_data.size // 20) ) - stc_data = stc_data.reshape(n_verts, n_time) + stc_data = stc_data.reshape((n_verts, n_time), copy=False) stc = SourceEstimate(stc_data, vertices, 1, 1) colormap = "mne_analyze" From 478b825ede67f7800ba643b06299b276ffb3e3d6 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 12 Jan 2026 13:55:38 +0000 Subject: [PATCH 05/10] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- mne/chpi.py | 4 +++- mne/inverse_sparse/mxne_inverse.py | 4 +++- mne/io/eeglab/eeglab.py | 4 +++- mne/viz/_brain/_brain.py | 4 +++- 4 files changed, 12 insertions(+), 4 deletions(-) diff --git a/mne/chpi.py b/mne/chpi.py index f88c4d28ea2..dd6ddb7bcf0 100644 --- a/mne/chpi.py +++ b/mne/chpi.py @@ -117,7 +117,9 @@ def read_head_pos(fname): """ _check_fname(fname, must_exist=True, overwrite="read") data = np.loadtxt(fname, skiprows=1) # first line is header, skip it - data = data.reshape((-1, 10), copy=False) # ensure it's the right size even if empty + data = data.reshape( + (-1, 10), copy=False + ) # ensure it's the right size even if empty if np.isnan(data).any(): # make sure we didn't do something dumb raise RuntimeError(f"positions could not be read properly from {fname}") return data diff --git a/mne/inverse_sparse/mxne_inverse.py b/mne/inverse_sparse/mxne_inverse.py index fe6698c64fd..c97b40607a5 100644 --- a/mne/inverse_sparse/mxne_inverse.py +++ b/mne/inverse_sparse/mxne_inverse.py @@ -253,7 +253,9 @@ def _make_dipoles_sparse( _, keep = np.unique(active_idx, return_index=True) keep.sort() # maintain old order active_idx = active_idx[keep] - gof_split = gof_split.reshape((len(active_idx), n_dip_per_pos, len(times)), copy=False) + gof_split = gof_split.reshape( + (len(active_idx), n_dip_per_pos, len(times)), copy=False + ) gof_split = gof_split.sum(1) assert (gof_split < 100).all() assert gof_split.shape == (len(active_idx), len(times)) diff --git a/mne/io/eeglab/eeglab.py b/mne/io/eeglab/eeglab.py index d561315e651..f2e940d136b 100644 --- a/mne/io/eeglab/eeglab.py +++ b/mne/io/eeglab/eeglab.py @@ -718,7 +718,9 @@ def __init__( data_fname = _check_eeglab_fname(input_fname, eeg.data) with open(data_fname, "rb") as data_fid: data = np.fromfile(data_fid, dtype=np.float32) - data = data.reshape((eeg.nbchan, eeg.pnts, eeg.trials), order="F", copy=False) + data = data.reshape( + (eeg.nbchan, eeg.pnts, eeg.trials), order="F", copy=False + ) else: data = eeg.data diff --git a/mne/viz/_brain/_brain.py b/mne/viz/_brain/_brain.py index c492c0a698b..a0be81b46dc 100644 --- a/mne/viz/_brain/_brain.py +++ b/mne/viz/_brain/_brain.py @@ -3978,7 +3978,9 @@ def _to_borders(self, label, hemi, borders, restrict_idx=None): if isinstance(borders, int): for _ in range(borders): keep_idx = np.isin(self.geo[hemi].orig_faces.ravel(), keep_idx) - keep_idx = keep_idx.reshape(self.geo[hemi].orig_faces.shape, copy=False) + keep_idx = keep_idx.reshape( + self.geo[hemi].orig_faces.shape, copy=False + ) keep_idx = self.geo[hemi].orig_faces[np.any(keep_idx, axis=1)] keep_idx = np.unique(keep_idx) if restrict_idx is not None: From 90fa5cd5d09e9345d97d4b38d927e0fa09860e8d Mon Sep 17 00:00:00 2001 From: Mathieu Scheltienne Date: Mon, 12 Jan 2026 15:11:24 +0100 Subject: [PATCH 06/10] more fixes --- mne/chpi.py | 4 +--- mne/inverse_sparse/mxne_inverse.py | 4 +--- mne/io/eeglab/eeglab.py | 4 +--- mne/io/kit/kit.py | 8 ++++---- mne/source_estimate.py | 4 +--- mne/viz/_brain/_brain.py | 4 +--- 6 files changed, 9 insertions(+), 19 deletions(-) diff --git a/mne/chpi.py b/mne/chpi.py index dd6ddb7bcf0..f88c4d28ea2 100644 --- a/mne/chpi.py +++ b/mne/chpi.py @@ -117,9 +117,7 @@ def read_head_pos(fname): """ _check_fname(fname, must_exist=True, overwrite="read") data = np.loadtxt(fname, skiprows=1) # first line is header, skip it - data = data.reshape( - (-1, 10), copy=False - ) # ensure it's the right size even if empty + data = data.reshape((-1, 10), copy=False) # ensure it's the right size even if empty if np.isnan(data).any(): # make sure we didn't do something dumb raise RuntimeError(f"positions could not be read properly from {fname}") return data diff --git a/mne/inverse_sparse/mxne_inverse.py b/mne/inverse_sparse/mxne_inverse.py index c97b40607a5..fe6698c64fd 100644 --- a/mne/inverse_sparse/mxne_inverse.py +++ b/mne/inverse_sparse/mxne_inverse.py @@ -253,9 +253,7 @@ def _make_dipoles_sparse( _, keep = np.unique(active_idx, return_index=True) keep.sort() # maintain old order active_idx = active_idx[keep] - gof_split = gof_split.reshape( - (len(active_idx), n_dip_per_pos, len(times)), copy=False - ) + gof_split = gof_split.reshape((len(active_idx), n_dip_per_pos, len(times)), copy=False) gof_split = gof_split.sum(1) assert (gof_split < 100).all() assert gof_split.shape == (len(active_idx), len(times)) diff --git a/mne/io/eeglab/eeglab.py b/mne/io/eeglab/eeglab.py index f2e940d136b..c5ba60bbada 100644 --- a/mne/io/eeglab/eeglab.py +++ b/mne/io/eeglab/eeglab.py @@ -718,9 +718,7 @@ def __init__( data_fname = _check_eeglab_fname(input_fname, eeg.data) with open(data_fname, "rb") as data_fid: data = np.fromfile(data_fid, dtype=np.float32) - data = data.reshape( - (eeg.nbchan, eeg.pnts, eeg.trials), order="F", copy=False - ) + data = data.reshape((eeg.nbchan, eeg.pnts, eeg.trials), order="F") else: data = eeg.data diff --git a/mne/io/kit/kit.py b/mne/io/kit/kit.py index dabd4e96340..95d190fb7ef 100644 --- a/mne/io/kit/kit.py +++ b/mne/io/kit/kit.py @@ -218,7 +218,7 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): for blk_start in np.arange(0, data_left, blk_size) // nchan: blk_size = min(blk_size, data_left - blk_start * nchan) block = np.fromfile(fid, dtype=sqd["dtype"], count=blk_size) - block = block.reshape((nchan, -1), order="F", copy=False).astype(float) + block = block.reshape(nchan, -1, order="F").astype(float) blk_stop = blk_start + block.shape[1] data_view = data[:, blk_start:blk_stop] block *= conv_factor @@ -471,9 +471,9 @@ def _read_kit_data(self): fid.seek(info["dirs"][KIT.DIR_INDEX_RAW_DATA]["offset"]) count = n_samples * nchan data = np.fromfile(fid, dtype=dtype, count=count) - data = data.reshape((n_samples, nchan), copy=False).T + data = data.reshape((n_samples, nchan)).T data = data * info["conv_factor"] - data = data.reshape((nchan, n_epochs, epoch_length), copy=False) + data = data.reshape((nchan, n_epochs, epoch_length)) data = data.transpose((1, 0, 2)) return data @@ -768,7 +768,7 @@ def get_kit_info(rawfile, allow_unknown_format, standardize_names=None, verbose= if key in dig and np.isfinite(dig[key]).all(): elp.append(dig.pop(key)) elp = np.array(elp) - hsp = np.array(hsp, float).reshape((-1, 3), copy=False) + hsp = np.array(hsp, float).reshape(-1, 3) if elp.shape not in ((6, 3), (7, 3), (8, 3)): raise RuntimeError(f"Fewer than 3 HPI coils found, got {len(elp) - 3}") # coregistration diff --git a/mne/source_estimate.py b/mne/source_estimate.py index 20ded52b5e1..67ab758df7e 100644 --- a/mne/source_estimate.py +++ b/mne/source_estimate.py @@ -3729,9 +3729,7 @@ def _gen_extract_label_time_course( assert vertidx.shape[1] == stc.data.shape[0] this_data = np.reshape(stc.data, (stc.data.shape[0], -1)) this_data = vertidx @ this_data - this_data = this_data.reshape( - (this_data.shape[0],) + stc.data.shape[1:], copy=False - ) + this_data = this_data.reshape((this_data.shape[0],) + stc.data.shape[1:], copy=False) else: this_data = stc.data[vertidx] label_tc[i] = func(flip, this_data) diff --git a/mne/viz/_brain/_brain.py b/mne/viz/_brain/_brain.py index a0be81b46dc..c492c0a698b 100644 --- a/mne/viz/_brain/_brain.py +++ b/mne/viz/_brain/_brain.py @@ -3978,9 +3978,7 @@ def _to_borders(self, label, hemi, borders, restrict_idx=None): if isinstance(borders, int): for _ in range(borders): keep_idx = np.isin(self.geo[hemi].orig_faces.ravel(), keep_idx) - keep_idx = keep_idx.reshape( - self.geo[hemi].orig_faces.shape, copy=False - ) + keep_idx = keep_idx.reshape(self.geo[hemi].orig_faces.shape, copy=False) keep_idx = self.geo[hemi].orig_faces[np.any(keep_idx, axis=1)] keep_idx = np.unique(keep_idx) if restrict_idx is not None: From ae7505a3a047bfbd7f179b4d874137efb99a4bf7 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 12 Jan 2026 14:11:49 +0000 Subject: [PATCH 07/10] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- mne/chpi.py | 4 +++- mne/inverse_sparse/mxne_inverse.py | 4 +++- mne/source_estimate.py | 4 +++- mne/viz/_brain/_brain.py | 4 +++- 4 files changed, 12 insertions(+), 4 deletions(-) diff --git a/mne/chpi.py b/mne/chpi.py index f88c4d28ea2..dd6ddb7bcf0 100644 --- a/mne/chpi.py +++ b/mne/chpi.py @@ -117,7 +117,9 @@ def read_head_pos(fname): """ _check_fname(fname, must_exist=True, overwrite="read") data = np.loadtxt(fname, skiprows=1) # first line is header, skip it - data = data.reshape((-1, 10), copy=False) # ensure it's the right size even if empty + data = data.reshape( + (-1, 10), copy=False + ) # ensure it's the right size even if empty if np.isnan(data).any(): # make sure we didn't do something dumb raise RuntimeError(f"positions could not be read properly from {fname}") return data diff --git a/mne/inverse_sparse/mxne_inverse.py b/mne/inverse_sparse/mxne_inverse.py index fe6698c64fd..c97b40607a5 100644 --- a/mne/inverse_sparse/mxne_inverse.py +++ b/mne/inverse_sparse/mxne_inverse.py @@ -253,7 +253,9 @@ def _make_dipoles_sparse( _, keep = np.unique(active_idx, return_index=True) keep.sort() # maintain old order active_idx = active_idx[keep] - gof_split = gof_split.reshape((len(active_idx), n_dip_per_pos, len(times)), copy=False) + gof_split = gof_split.reshape( + (len(active_idx), n_dip_per_pos, len(times)), copy=False + ) gof_split = gof_split.sum(1) assert (gof_split < 100).all() assert gof_split.shape == (len(active_idx), len(times)) diff --git a/mne/source_estimate.py b/mne/source_estimate.py index 67ab758df7e..20ded52b5e1 100644 --- a/mne/source_estimate.py +++ b/mne/source_estimate.py @@ -3729,7 +3729,9 @@ def _gen_extract_label_time_course( assert vertidx.shape[1] == stc.data.shape[0] this_data = np.reshape(stc.data, (stc.data.shape[0], -1)) this_data = vertidx @ this_data - this_data = this_data.reshape((this_data.shape[0],) + stc.data.shape[1:], copy=False) + this_data = this_data.reshape( + (this_data.shape[0],) + stc.data.shape[1:], copy=False + ) else: this_data = stc.data[vertidx] label_tc[i] = func(flip, this_data) diff --git a/mne/viz/_brain/_brain.py b/mne/viz/_brain/_brain.py index c492c0a698b..a0be81b46dc 100644 --- a/mne/viz/_brain/_brain.py +++ b/mne/viz/_brain/_brain.py @@ -3978,7 +3978,9 @@ def _to_borders(self, label, hemi, borders, restrict_idx=None): if isinstance(borders, int): for _ in range(borders): keep_idx = np.isin(self.geo[hemi].orig_faces.ravel(), keep_idx) - keep_idx = keep_idx.reshape(self.geo[hemi].orig_faces.shape, copy=False) + keep_idx = keep_idx.reshape( + self.geo[hemi].orig_faces.shape, copy=False + ) keep_idx = self.geo[hemi].orig_faces[np.any(keep_idx, axis=1)] keep_idx = np.unique(keep_idx) if restrict_idx is not None: From 7713ceff205292b8a73db6185d919c393a30e0db Mon Sep 17 00:00:00 2001 From: Mathieu Scheltienne Date: Mon, 12 Jan 2026 15:12:57 +0100 Subject: [PATCH 08/10] trigger CIs From 0769daef75a9c61620ce9f9c4a821c007f0a2e52 Mon Sep 17 00:00:00 2001 From: Mathieu Scheltienne Date: Mon, 12 Jan 2026 17:38:18 +0100 Subject: [PATCH 09/10] add compat function and fixes for numpy < 2.1 --- mne/_fiff/_digitization.py | 3 +- mne/_fiff/tag.py | 3 +- mne/beamformer/_rap_music.py | 8 ++-- mne/beamformer/tests/test_dics.py | 3 +- mne/beamformer/tests/test_external.py | 3 +- mne/beamformer/tests/test_lcmv.py | 3 +- mne/channels/montage.py | 5 ++- mne/chpi.py | 8 ++-- mne/decoding/receptive_field.py | 3 +- mne/decoding/tests/test_receptive_field.py | 15 +++---- mne/decoding/transformer.py | 3 +- mne/epochs.py | 4 +- mne/event.py | 3 +- mne/filter.py | 12 +++--- mne/fixes.py | 41 +++++++++++++++++++ mne/forward/_compute_forward.py | 4 +- mne/forward/_lead_dots.py | 7 ++-- mne/forward/forward.py | 5 ++- mne/forward/tests/test_field_interpolation.py | 3 +- mne/inverse_sparse/_gamma_map.py | 4 +- mne/inverse_sparse/mxne_inverse.py | 6 +-- mne/io/bti/bti.py | 3 +- mne/io/ctf/ctf.py | 3 +- mne/io/eeglab/eeglab.py | 3 +- mne/io/fiff/raw.py | 3 +- mne/io/kit/kit.py | 3 +- mne/io/nirx/nirx.py | 3 +- mne/minimum_norm/inverse.py | 8 ++-- mne/minimum_norm/tests/test_inverse.py | 3 +- mne/morph.py | 4 +- mne/preprocessing/maxwell.py | 4 +- mne/report/tests/test_report.py | 3 +- mne/source_estimate.py | 6 +-- mne/source_space/_source_space.py | 4 +- mne/stats/cluster_level.py | 12 +++--- mne/tests/test_transforms.py | 4 +- mne/time_frequency/multitaper.py | 3 +- mne/time_frequency/psd.py | 3 +- mne/viz/_brain/_brain.py | 7 ++-- mne/viz/_brain/tests/test_brain.py | 3 +- mne/viz/backends/_utils.py | 4 +- mne/viz/tests/test_3d.py | 9 ++-- 42 files changed, 152 insertions(+), 89 deletions(-) diff --git a/mne/_fiff/_digitization.py b/mne/_fiff/_digitization.py index dec2f57c4d2..9664a4f470e 100644 --- a/mne/_fiff/_digitization.py +++ b/mne/_fiff/_digitization.py @@ -7,6 +7,7 @@ import numpy as np +from ..fixes import _reshape_view from ..utils import Bunch, _check_fname, _validate_type, logger, verbose, warn from .constants import FIFF, _coord_frame_named from .tag import read_tag @@ -335,7 +336,7 @@ def _get_data_as_dict_from_dig(dig, exclude_ref_channel=True): f"Only single coordinate frame in dig is supported, got {dig_coord_frames}" ) dig_ch_pos_location = np.array(dig_ch_pos_location) - dig_ch_pos_location = dig_ch_pos_location.reshape((-1, 3), copy=False) + dig_ch_pos_location = _reshape_view(dig_ch_pos_location, (-1, 3)) return Bunch( nasion=fids.get("nasion", None), lpa=fids.get("lpa", None), diff --git a/mne/_fiff/tag.py b/mne/_fiff/tag.py index bd6cf4b588c..391606e7cca 100644 --- a/mne/_fiff/tag.py +++ b/mne/_fiff/tag.py @@ -12,6 +12,7 @@ import numpy as np from scipy.sparse import csc_array, csr_array +from ..fixes import _reshape_view from ..utils import _check_option, warn from ..utils.numerics import _julian_to_date from .constants import ( @@ -177,7 +178,7 @@ def _read_matrix(fid, tag, shape, rlims): data = data.view(">c8") elif matrix_type == FIFF.FIFFT_COMPLEX_DOUBLE: data = data.view(">c16") - data = data.reshape(dims, copy=False) + data = _reshape_view(data, dims) else: # Find dimensions and return to the beginning of tag data ndim = int(np.frombuffer(fid.read(4), dtype=">i4").item()) diff --git a/mne/beamformer/_rap_music.py b/mne/beamformer/_rap_music.py index 1a2eac63673..901651d89de 100644 --- a/mne/beamformer/_rap_music.py +++ b/mne/beamformer/_rap_music.py @@ -8,7 +8,7 @@ from scipy import linalg from .._fiff.pick import pick_channels_forward, pick_info -from ..fixes import _safe_svd +from ..fixes import _reshape_view, _safe_svd from ..forward import convert_forward_solution, is_fixed_orient from ..inverse_sparse.mxne_inverse import _make_dipoles_sparse from ..minimum_norm.inverse import _log_exp_var @@ -68,9 +68,9 @@ def _apply_rap_music( phi_sig = eig_vectors[:, -n_dipoles:] n_orient = 3 if is_free_ori else 1 - G = G.reshape((G.shape[0], -1, n_orient), copy=False) + G = _reshape_view(G, (G.shape[0], -1, n_orient)) gain = forward["sol"]["data"].copy() - gain = gain.reshape(G.shape, copy=False) + gain = _reshape_view(gain, G.shape) n_channels = G.shape[0] A = np.empty((n_channels, n_dipoles)) gain_dip = np.empty((n_channels, n_dipoles)) @@ -122,7 +122,7 @@ def _apply_rap_music( sol = linalg.lstsq(A, M)[0] if n_orient == 3: X = sol[:, np.newaxis] * oris[:, :, np.newaxis] - X = X.reshape((-1, len(times)), copy=False) + X = _reshape_view(X, (-1, len(times))) else: X = sol diff --git a/mne/beamformer/tests/test_dics.py b/mne/beamformer/tests/test_dics.py index 302ad2d7d7e..a9ef542e8d1 100644 --- a/mne/beamformer/tests/test_dics.py +++ b/mne/beamformer/tests/test_dics.py @@ -10,6 +10,7 @@ import mne from mne import pick_types +from mne.fixes import _reshape_view from mne._fiff.constants import FIFF from mne._fiff.pick import pick_info from mne.beamformer import ( @@ -269,7 +270,7 @@ def test_make_dics(tmp_path, _load_forward, idx, whiten): exp=None, noise_cov=noise_cov, ) - G = G.reshape((n_channels, n_verts, n_orient), copy=False) + G = _reshape_view(G, (n_channels, n_verts, n_orient)) G = G.transpose(1, 2, 0).conj() # verts, orient, ch _assert_weight_norm(filters, G) diff --git a/mne/beamformer/tests/test_external.py b/mne/beamformer/tests/test_external.py index 21dde3df458..75d79cb9cef 100644 --- a/mne/beamformer/tests/test_external.py +++ b/mne/beamformer/tests/test_external.py @@ -9,6 +9,7 @@ import mne from mne.beamformer import apply_lcmv, apply_lcmv_cov, make_lcmv +from mne.fixes import _reshape_view from mne.beamformer.tests.test_lcmv import _get_data from mne.datasets import testing @@ -98,7 +99,7 @@ def test_lcmv_fieldtrip(_get_bf_data, bf_type, weight_norm, pick_ori, pwr): ft_fname = ft_data_path / ("ft_source_" + bf_type + "-vol.mat") stc_ft_data = pymatreader.read_mat(ft_fname)["stc"] if stc_ft_data.ndim == 1: - stc_ft_data = stc_ft_data.reshape((stc_ft_data.size, 1), copy=False) + stc_ft_data = _reshape_view(stc_ft_data, (stc_ft_data.size, 1)) if stc_mne.data.ndim == 2: signs = np.sign((stc_mne.data * stc_ft_data).sum(-1, keepdims=True)) diff --git a/mne/beamformer/tests/test_lcmv.py b/mne/beamformer/tests/test_lcmv.py index 0be455ba494..9f23a5b91c4 100644 --- a/mne/beamformer/tests/test_lcmv.py +++ b/mne/beamformer/tests/test_lcmv.py @@ -18,6 +18,7 @@ from scipy.spatial.distance import cdist import mne +from mne.fixes import _reshape_view from mne import ( EvokedArray, VolSourceEstimate, @@ -1185,7 +1186,7 @@ def test_unit_noise_gain_formula(pick_ori, weight_norm, reg, inversion): ) n_channels, n_sources = G.shape n_sources //= 3 - G = G.reshape((n_channels, n_sources, 3), copy=False) + G = _reshape_view(G, (n_channels, n_sources, 3)) G = G.transpose(1, 2, 0) # verts, orient, ch _assert_weight_norm(filters, G) diff --git a/mne/channels/montage.py b/mne/channels/montage.py index 1c323c6fb70..142ac86e6ca 100644 --- a/mne/channels/montage.py +++ b/mne/channels/montage.py @@ -10,6 +10,7 @@ import numpy as np +from ..fixes import _reshape_view from .._fiff._digitization import ( _coord_frame_const, _count_points_by_type, @@ -973,9 +974,9 @@ def read_dig_hpts(fname, unit="mm"): label[ii]: this_xyz for ii, this_xyz in enumerate(xyz) if kind[ii] == "eeg" } hpi = np.array([this_xyz for ii, this_xyz in enumerate(xyz) if kind[ii] == "hpi"]) - hpi = hpi.reshape((-1, 3), copy=False) # in case it's empty + hpi = _reshape_view(hpi, (-1, 3)) # in case it's empty hsp = np.array([this_xyz for ii, this_xyz in enumerate(xyz) if kind[ii] == "extra"]) - hsp = hsp.reshape((-1, 3), copy=False) # in case it's empty + hsp = _reshape_view(hsp, (-1, 3)) # in case it's empty return make_dig_montage(ch_pos=ch_pos, **fid, hpi=hpi, hsp=hsp) diff --git a/mne/chpi.py b/mne/chpi.py index dd6ddb7bcf0..7ea3d1b407d 100644 --- a/mne/chpi.py +++ b/mne/chpi.py @@ -43,7 +43,7 @@ from .cov import compute_whitener, make_ad_hoc_cov from .dipole import _make_guesses from .event import find_events -from .fixes import jit +from .fixes import _reshape_view, jit from .forward import _concatenate_coils, _create_meg_coils, _magnetic_dipole_field_vec from .io import BaseRaw, RawArray from .io.ctf.trans import _make_ctf_coord_trans_set @@ -117,9 +117,7 @@ def read_head_pos(fname): """ _check_fname(fname, must_exist=True, overwrite="read") data = np.loadtxt(fname, skiprows=1) # first line is header, skip it - data = data.reshape( - (-1, 10), copy=False - ) # ensure it's the right size even if empty + data = _reshape_view(data, (-1, 10)) # ensure it's the right size even if empty if np.isnan(data).any(): # make sure we didn't do something dumb raise RuntimeError(f"positions could not be read properly from {fname}") return data @@ -1392,7 +1390,7 @@ def compute_chpi_locs( ) fwd = _magnetic_dipole_field_vec(guesses, meg_coils, too_close) fwd = np.dot(fwd, whitener.T) - fwd = fwd.reshape((guesses.shape[0], 3, -1), copy=False) + fwd = _reshape_view(fwd, (guesses.shape[0], 3, -1)) fwd = np.linalg.svd(fwd, full_matrices=False)[2] guesses = dict(rr=guesses, whitened_fwd_svd=fwd) del fwd, R diff --git a/mne/decoding/receptive_field.py b/mne/decoding/receptive_field.py index 53fb7558599..7b8fb63dfd3 100644 --- a/mne/decoding/receptive_field.py +++ b/mne/decoding/receptive_field.py @@ -15,6 +15,7 @@ from sklearn.exceptions import NotFittedError from sklearn.metrics import r2_score +from ..fixes import _reshape_view from ..utils import _validate_type, fill_doc, pinv from ._fixes import _check_n_features_3d, validate_data from .base import _check_estimator, get_coef @@ -361,7 +362,7 @@ def predict(self, X): else: extra = 1 shape = shape[: self._y_dim + extra] - y_pred = y_pred.reshape(shape, copy=False) + y_pred = _reshape_view(y_pred, shape) return y_pred def score(self, X, y): diff --git a/mne/decoding/tests/test_receptive_field.py b/mne/decoding/tests/test_receptive_field.py index 251db07d3e6..c718af4beb5 100644 --- a/mne/decoding/tests/test_receptive_field.py +++ b/mne/decoding/tests/test_receptive_field.py @@ -16,6 +16,7 @@ from sklearn.utils.estimator_checks import parametrize_with_checks from mne.decoding import ReceptiveField, TimeDelayingRidge +from mne.fixes import _reshape_view from mne.decoding.receptive_field import ( _SCORERS, _delay_time_series, @@ -271,7 +272,7 @@ def test_time_delaying_fast_calc(n_jobs): smin, smax = 1, 2 X_del = _delay_time_series(X, smin, smax, 1.0) # (n_times, n_features, n_delays) -> (n_times, n_features * n_delays) - X_del = X_del.reshape((X.shape[0], -1), copy=False) + X_del = _reshape_view(X_del, (X.shape[0], -1)) expected = np.array([[0, 1, 2], [0, 0, 1], [0, 5, 7], [0, 0, 5]]).T assert_allclose(X_del, expected) Xt_X = np.dot(X_del.T, X_del) @@ -282,7 +283,7 @@ def test_time_delaying_fast_calc(n_jobs): # all positive smin, smax = -2, -1 X_del = _delay_time_series(X, smin, smax, 1.0) - X_del = X_del.reshape((X.shape[0], -1), copy=False) + X_del = _reshape_view(X_del, (X.shape[0], -1)) expected = np.array([[3, 0, 0], [2, 3, 0], [11, 0, 0], [7, 11, 0]]).T assert_allclose(X_del, expected) Xt_X = np.dot(X_del.T, X_del) @@ -293,7 +294,7 @@ def test_time_delaying_fast_calc(n_jobs): # both sides smin, smax = -1, 1 X_del = _delay_time_series(X, smin, smax, 1.0) - X_del = X_del.reshape((X.shape[0], -1), copy=False) + X_del = _reshape_view(X_del, (X.shape[0], -1)) expected = np.array( [[2, 3, 0], [1, 2, 3], [0, 1, 2], [7, 11, 0], [5, 7, 11], [0, 5, 7]] ).T @@ -315,7 +316,7 @@ def test_time_delaying_fast_calc(n_jobs): X = np.array([[1, 2, 3, 5]]).T smin, smax = 0, 3 X_del = _delay_time_series(X, smin, smax, 1.0) - X_del = X_del.reshape((X.shape[0], -1), copy=False) + X_del = _reshape_view(X_del, (X.shape[0], -1)) expected = np.array([[1, 2, 3, 5], [0, 1, 2, 3], [0, 0, 1, 2], [0, 0, 0, 1]]).T assert_allclose(X_del, expected) Xt_X = np.dot(X_del.T, X_del) @@ -328,7 +329,7 @@ def test_time_delaying_fast_calc(n_jobs): X = np.array([[1, 2, 3], [5, 7, 11]]).T smin, smax = 0, 2 X_del = _delay_time_series(X, smin, smax, 1.0) - X_del = X_del.reshape((X.shape[0], -1), copy=False) + X_del = _reshape_view(X_del, (X.shape[0], -1)) expected = np.array( [[1, 2, 3], [0, 1, 2], [0, 0, 1], [5, 7, 11], [0, 5, 7], [0, 0, 5]] ).T @@ -366,7 +367,7 @@ def test_time_delaying_fast_calc(n_jobs): x_yt_true = einsum("tfd,to->ofd", X_del, y) x_yt_true = np.reshape(x_yt_true, (x_yt_true.shape[0], -1)).T assert_allclose(x_yt, x_yt_true, atol=1e-7, err_msg=(smin, smax)) - X_del = X_del.reshape((X.shape[0], -1), copy=False) + X_del = _reshape_view(X_del, (X.shape[0], -1)) x_xt_true = np.dot(X_del.T, X_del).T assert_allclose(x_xt, x_xt_true, atol=1e-7, err_msg=(smin, smax)) @@ -388,7 +389,7 @@ def test_receptive_field_1d(n_jobs): y[delay:] = x[:-delay, 0] slims += [(1, 2)] for ndim in (1, 2): - y = y.reshape((y.shape[0],) + (1,) * (ndim - 1), copy=False) + y = _reshape_view(y, (y.shape[0],) + (1,) * (ndim - 1)) for slim in slims: smin, smax = slim lap = TimeDelayingRidge( diff --git a/mne/decoding/transformer.py b/mne/decoding/transformer.py index f004f6aa268..c5fd14d9568 100644 --- a/mne/decoding/transformer.py +++ b/mne/decoding/transformer.py @@ -17,6 +17,7 @@ from ..cov import _check_scalings_user from ..epochs import BaseEpochs from ..filter import filter_data +from ..fixes import _reshape_view from ..time_frequency import psd_array_multitaper from ..utils import _check_option, _validate_type, check_version, fill_doc from ._fixes import validate_data # TODO VERSION remove with sklearn 1.4+ @@ -118,7 +119,7 @@ def _sklearn_reshape_apply(func, return_result, X, *args, **kwargs): X = np.reshape(X.transpose(0, 2, 1), (-1, orig_shape[1])) X = func(X, *args, **kwargs) if return_result: - X = X.reshape((orig_shape[0], orig_shape[2], orig_shape[1]), copy=False) + X = _reshape_view(X, (orig_shape[0], orig_shape[2], orig_shape[1])) X = X.transpose(0, 2, 1) return X diff --git a/mne/epochs.py b/mne/epochs.py index 6f2fab86dd2..2d317caa63e 100644 --- a/mne/epochs.py +++ b/mne/epochs.py @@ -66,7 +66,7 @@ from .event import _read_events_fif, make_fixed_length_events, match_event_names from .evoked import EvokedArray from .filter import FilterMixin, _check_fun, detrend -from .fixes import rng_uniform +from .fixes import _reshape_view, rng_uniform from .html_templates import _get_html_template from .parallel import parallel_func from .time_frequency.spectrum import EpochsSpectrum, SpectrumMixin, _validate_method @@ -4479,7 +4479,7 @@ def _get_epoch_from_raw(self, idx, verbose=None): else: data = data.astype(np.float64) - data = data.reshape(raw.epoch_shape, copy=False) + data = _reshape_view(data, raw.epoch_shape) data *= raw.cals return data diff --git a/mne/event.py b/mne/event.py index d8891769fd1..76fd5ab8959 100644 --- a/mne/event.py +++ b/mne/event.py @@ -9,6 +9,7 @@ import numpy as np +from .fixes import _reshape_view from ._fiff.constants import FIFF from ._fiff.open import fiff_open from ._fiff.pick import pick_channels @@ -181,7 +182,7 @@ def _read_events_fif(fid, tree): if event_list is None: raise ValueError("Could not find any events") else: - event_list = event_list.reshape((-1, 3), copy=False) + event_list = _reshape_view(event_list, (-1, 3)) for d in events["directory"]: kind = d.kind pos = d.pos diff --git a/mne/filter.py b/mne/filter.py index a8751c2c987..304afdf4de7 100644 --- a/mne/filter.py +++ b/mne/filter.py @@ -22,7 +22,7 @@ _setup_cuda_fft_resample, _smart_pad, ) -from .fixes import minimum_phase +from .fixes import _reshape_view, minimum_phase from .parallel import parallel_func from .utils import ( _check_option, @@ -349,7 +349,7 @@ def _overlap_add_filter( for pp, p in enumerate(picks): x[p] = data_new[pp] - x = x.reshape(orig_shape, copy=False) + x = _reshape_view(x, orig_shape) return x @@ -404,7 +404,7 @@ def _prep_for_filtering(x, copy, picks=None): orig_shape = x.shape x = np.atleast_2d(x) picks = _picks_to_idx(x.shape[-2], picks) - x = x.reshape((np.prod(x.shape[:-1]), x.shape[-1]), copy=False) + x = _reshape_view(x, (np.prod(x.shape[:-1]), x.shape[-1])) if len(orig_shape) == 3: n_epochs, n_channels, n_times = orig_shape offset = np.repeat(np.arange(0, n_channels * n_epochs, n_channels), len(picks)) @@ -577,7 +577,7 @@ def _iir_filter(x, iir_params, picks, n_jobs, copy, phase="zero"): data_new = parallel(p_fun(x=x[p]) for p in picks) for pp, p in enumerate(picks): x[p] = data_new[pp] - x = x.reshape(orig_shape, copy=False) + x = _reshape_view(x, orig_shape) return x @@ -1657,7 +1657,7 @@ def _mt_spectrum_proc( ) logger.info(f"{kind} notch frequencies (Hz):\n{found_freqs}") - x = x.reshape(orig_shape, copy=False) + x = _reshape_view(x, orig_shape) return x @@ -2952,5 +2952,5 @@ def _iir_pad_apply_unpad(x, *, func, padlen, padtype, **kwargs): x_ext = _smart_pad(x_ext, (padlen, padlen), padtype) x_ext = func(x=x_ext, axis=-1, padlen=0, **kwargs) this_x[:] = x_ext[padlen : len(x_ext) - padlen] - x_out = x_out.reshape(x.shape, copy=False) + x_out = _reshape_view(x_out, x.shape) return x_out diff --git a/mne/fixes.py b/mne/fixes.py index 2148330fb34..f3a8252c40f 100644 --- a/mne/fixes.py +++ b/mne/fixes.py @@ -56,6 +56,47 @@ def _compare_version(version_a, operator, version_b): return getattr(operator_module, mapping[operator])(ver_a, ver_b) +############################################################################### +# NumPy 2.5 deprecates .shape assignment, but .reshape(copy=False) requires 2.1+ + + +def _reshape_view(arr, shape): + """Reshape an array as a view, raising if a copy would be required. + + This function provides compatibility across NumPy versions for reshaping + arrays as views. On NumPy >= 2.1, it uses ``reshape(copy=False)`` which + explicitly fails if a view cannot be created. On older versions, it uses + direct shape assignment which has the same behavior but is deprecated in + NumPy 2.5+. + + Can be removed once NumPy 2.1 is the minimum supported version. + + Parameters + ---------- + arr : ndarray + The array to reshape. + shape : tuple of int + The new shape. + + Returns + ------- + ndarray + A reshaped view of the array. + + Raises + ------ + AttributeError + If a view cannot be created on NumPy < 2.1. + ValueError + If a view cannot be created on NumPy >= 2.1. + """ + if _compare_version(np.__version__, ">=", "2.1"): + return arr.reshape(shape, copy=False) + else: + arr.shape = shape + return arr + + ############################################################################### # Misc diff --git a/mne/forward/_compute_forward.py b/mne/forward/_compute_forward.py index 356f74b54e8..6453fdb47e7 100644 --- a/mne/forward/_compute_forward.py +++ b/mne/forward/_compute_forward.py @@ -17,7 +17,7 @@ from .._fiff.constants import FIFF from ..bem import _import_openmeeg, _make_openmeeg_geometry -from ..fixes import bincount, jit +from ..fixes import _reshape_view, bincount, jit from ..parallel import parallel_func from ..surface import _jit_cross, _project_onto_surface from ..transforms import apply_trans, invert_transform @@ -457,7 +457,7 @@ def _do_prim_curr(rr, coils): for start, stop in _rr_bounds(rr, chunk=1): pp = _bem_inf_fields(rr[start:stop], rmags, cosmags) pp *= ws - pp = pp.reshape((3 * (stop - start), -1), copy=False) + pp = _reshape_view(pp, (3 * (stop - start), -1)) pc[3 * start : 3 * stop] = [ bincount(bins, this_pp, bins[-1] + 1) for this_pp in pp ] diff --git a/mne/forward/_lead_dots.py b/mne/forward/_lead_dots.py index ba63ea9a9ee..9e6176dbc5c 100644 --- a/mne/forward/_lead_dots.py +++ b/mne/forward/_lead_dots.py @@ -11,6 +11,7 @@ import numpy as np from numpy.polynomial import legendre +from ..fixes import _reshape_view from ..parallel import parallel_func from ..utils import _get_extra_data_path, _open_lock, fill_doc, logger, verbose @@ -86,7 +87,7 @@ def _get_legen_table( logger.info(f"Reading Legendre{extra_str} table...") with _open_lock(fname, "rb", buffering=0) as fid: lut = np.fromfile(fid, np.float32) - lut = lut.reshape(lut_shape, copy=False) + lut = _reshape_view(lut, lut_shape) # we need this for the integration step n_fact = np.arange(1, n_coeff, dtype=float) @@ -265,7 +266,7 @@ def _fast_sphere_dot_r0( sums = _comp_sums_meg( beta.flatten(), ct.flatten(), lut, n_fact, volume_integral ) - sums = sums.reshape(((4,) + beta.shape), copy=False) + sums = _reshape_view(sums, ((4,) + beta.shape)) # Accumulate the result, a little bit streamlined version # cosmags1 = cosmags1[:, np.newaxis, :] @@ -296,7 +297,7 @@ def _fast_sphere_dot_r0( result *= r else: # 'eeg' result = _comp_sum_eeg(beta.flatten(), ct.flatten(), lut, n_fact) - result = result.reshape(beta.shape, copy=False) + result = _reshape_view(result, beta.shape) # Give it a finishing touch! result *= _eeg_const result /= lr1lr2 diff --git a/mne/forward/forward.py b/mne/forward/forward.py index b07267fc992..53ebaffd3de 100644 --- a/mne/forward/forward.py +++ b/mne/forward/forward.py @@ -18,6 +18,7 @@ import numpy as np from scipy import sparse +from ..fixes import _reshape_view from .._fiff.constants import FIFF from .._fiff.matrix import ( _read_named_matrix, @@ -1430,13 +1431,13 @@ def compute_depth_prior( # Gk = G[:, 3 * k:3 * (k + 1)] # x = np.dot(Gk.T, Gk) # d[k] = linalg.svdvals(x)[0] - G = G.reshape((G.shape[0], -1, 3), copy=False) + G = _reshape_view(G, (G.shape[0], -1, 3)) d = np.linalg.norm( np.einsum("svj,svk->vjk", G, G), # vector dot prods ord=2, # ord=2 spectral (largest s.v.) axis=(1, 2), ) - G = G.reshape((G.shape[0], -1), copy=False) + G = _reshape_view(G, (G.shape[0], -1)) # XXX Currently the fwd solns never have "patch_areas" defined if patch_areas is not None: diff --git a/mne/forward/tests/test_field_interpolation.py b/mne/forward/tests/test_field_interpolation.py index d6c85d18a7b..2da3fd93095 100644 --- a/mne/forward/tests/test_field_interpolation.py +++ b/mne/forward/tests/test_field_interpolation.py @@ -18,6 +18,7 @@ import mne from mne import Epochs, make_fixed_length_events, pick_types, read_evokeds +from mne.fixes import _reshape_view from mne.datasets import testing from mne.forward import _make_surface_mapping, make_field_map from mne.forward._field_interpolation import _setup_dots @@ -85,7 +86,7 @@ def test_legendre_val(): ctheta = rng.rand(20, 30) * 2.0 - 1.0 beta = rng.rand(20, 30) * 0.8 c1 = _comp_sum_eeg(beta.flatten(), ctheta.flatten(), lut_fun, n_fact) - c1 = c1.reshape(beta.shape, copy=False) + c1 = _reshape_view(c1, beta.shape) # compare to numpy n = np.arange(1, n_terms, dtype=float)[:, np.newaxis, np.newaxis] diff --git a/mne/inverse_sparse/_gamma_map.py b/mne/inverse_sparse/_gamma_map.py index 6d35cb783b2..e3d077b77e6 100644 --- a/mne/inverse_sparse/_gamma_map.py +++ b/mne/inverse_sparse/_gamma_map.py @@ -4,7 +4,7 @@ import numpy as np -from ..fixes import _safe_svd +from ..fixes import _reshape_view, _safe_svd from ..forward import is_fixed_orient from ..minimum_norm.inverse import _check_reference, _log_exp_var from ..utils import logger, verbose, warn @@ -306,7 +306,7 @@ def gamma_map( X_xyz = np.zeros((len(active_src), 3, X.shape[1]), dtype=X.dtype) idx = np.searchsorted(active_src, idx) X_xyz[idx, offset, :] = X - X_xyz = X_xyz.reshape((len(active_src) * 3, X.shape[1]), copy=False) + X_xyz = _reshape_view(X_xyz, (len(active_src) * 3, X.shape[1])) X = X_xyz active_set = (active_src[:, np.newaxis] * 3 + np.arange(3)).ravel() source_weighting[source_weighting == 0] = 1 # zeros diff --git a/mne/inverse_sparse/mxne_inverse.py b/mne/inverse_sparse/mxne_inverse.py index c97b40607a5..295f72c49ce 100644 --- a/mne/inverse_sparse/mxne_inverse.py +++ b/mne/inverse_sparse/mxne_inverse.py @@ -6,7 +6,7 @@ from .._fiff.proj import deactivate_proj from ..dipole import Dipole -from ..fixes import _safe_svd +from ..fixes import _reshape_view, _safe_svd from ..forward import is_fixed_orient from ..minimum_norm.inverse import ( _check_reference, @@ -253,8 +253,8 @@ def _make_dipoles_sparse( _, keep = np.unique(active_idx, return_index=True) keep.sort() # maintain old order active_idx = active_idx[keep] - gof_split = gof_split.reshape( - (len(active_idx), n_dip_per_pos, len(times)), copy=False + gof_split = _reshape_view( + gof_split, (len(active_idx), n_dip_per_pos, len(times)) ) gof_split = gof_split.sum(1) assert (gof_split < 100).all() diff --git a/mne/io/bti/bti.py b/mne/io/bti/bti.py index 764048d354c..41f265d2070 100644 --- a/mne/io/bti/bti.py +++ b/mne/io/bti/bti.py @@ -9,6 +9,7 @@ import numpy as np +from ...fixes import _reshape_view from ..._fiff._digitization import _make_bti_dig_points from ..._fiff.constants import FIFF from ..._fiff.meas_info import _empty_info @@ -1041,7 +1042,7 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): block = np.fromfile(fid, dtype, count) sample_stop = sample_start + count // n_channels shape = (sample_stop - sample_start, bti_info["total_chans"]) - block = block.reshape(shape, copy=False) + block = _reshape_view(block, shape) data_view = data[:, sample_start:sample_stop] one = np.empty(block.shape[::-1]) diff --git a/mne/io/ctf/ctf.py b/mne/io/ctf/ctf.py index eab54c6c735..8ad0d6b0185 100644 --- a/mne/io/ctf/ctf.py +++ b/mne/io/ctf/ctf.py @@ -8,6 +8,7 @@ import numpy as np +from ...fixes import _reshape_view from ..._fiff._digitization import _format_dig_points from ..._fiff.utils import _blk_read_lims, _mult_cal_one from ...utils import ( @@ -207,7 +208,7 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): pos += np.int64(samp_offset) * si["n_chan"] * 4 fid.seek(pos, 0) this_data = np.fromfile(fid, ">i4", count=si["n_chan"] * n_read) - this_data = this_data.reshape((si["n_chan"], n_read), copy=False) + this_data = _reshape_view(this_data, (si["n_chan"], n_read)) this_data = this_data[:, r_lims[bi, 0] : r_lims[bi, 1]] data_view = data[:, d_lims[bi, 0] : d_lims[bi, 1]] _mult_cal_one(data_view, this_data, idx, cals, mult) diff --git a/mne/io/eeglab/eeglab.py b/mne/io/eeglab/eeglab.py index c5ba60bbada..7cb21a5cf46 100644 --- a/mne/io/eeglab/eeglab.py +++ b/mne/io/eeglab/eeglab.py @@ -10,6 +10,7 @@ from mne.utils.check import _check_option +from ...fixes import _reshape_view from ..._fiff._digitization import _ensure_fiducials_head from ..._fiff.constants import FIFF from ..._fiff.meas_info import create_info @@ -187,7 +188,7 @@ def _get_montage_information(eeg, get_pos, *, montage_units): _check_option("montage_units", montage_units, ("m", "dm", "cm", "mm", "auto")) if pos_ch_names: pos_array = np.array(pos, float) - pos_array = pos_array.reshape((-1, 3), copy=False) + pos_array = _reshape_view(pos_array, (-1, 3)) # roughly estimate head radius and check if its reasonable is_nan_pos = np.isnan(pos).any(axis=1) diff --git a/mne/io/fiff/raw.py b/mne/io/fiff/raw.py index 06fb07295bd..89010fa8d3a 100644 --- a/mne/io/fiff/raw.py +++ b/mne/io/fiff/raw.py @@ -8,6 +8,7 @@ import numpy as np +from ...fixes import _reshape_view from ..._fiff.constants import FIFF from ..._fiff.meas_info import read_meas_info from ..._fiff.open import _fiff_get_fid, _get_next_fname, fiff_open @@ -424,7 +425,7 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): fid.seek(ent.pos + 16, 0) one = _call_dict[ent.type](fid, ent, shape=None, rlims=None) try: - one = one.reshape((nsamp, nchan), copy=False) + one = _reshape_view(one, (nsamp, nchan)) except AttributeError: # one is None n_bad += picksamp else: diff --git a/mne/io/kit/kit.py b/mne/io/kit/kit.py index 95d190fb7ef..76c01aa32bf 100644 --- a/mne/io/kit/kit.py +++ b/mne/io/kit/kit.py @@ -15,6 +15,7 @@ import numpy as np +from ...fixes import _reshape_view from ..._fiff.constants import FIFF from ..._fiff.meas_info import _empty_info from ..._fiff.pick import pick_types @@ -672,7 +673,7 @@ def get_kit_info(rawfile, allow_unknown_format, standardize_names=None, verbose= fid.seek(dirs[KIT.DIR_INDEX_CALIBRATION]["offset"]) # (offset [Volt], gain [Tesla/Volt]) for each channel sensitivity = np.fromfile(fid, dtype=FLOAT64, count=channel_count * 2) - sensitivity = sensitivity.reshape((channel_count, 2), copy=False) + sensitivity = _reshape_view(sensitivity, (channel_count, 2)) channel_offset, channel_gain = sensitivity.T assert (channel_offset == 0).all() # otherwise we have a problem diff --git a/mne/io/nirx/nirx.py b/mne/io/nirx/nirx.py index 47fb992ef45..a2fa89c42bc 100644 --- a/mne/io/nirx/nirx.py +++ b/mne/io/nirx/nirx.py @@ -12,6 +12,7 @@ import numpy as np from scipy.io import loadmat +from ...fixes import _reshape_view from ..._fiff.constants import FIFF from ..._fiff.meas_info import _format_dig_points, create_info from ..._fiff.utils import _mult_cal_one @@ -567,7 +568,7 @@ def _read_csv_rows_cols(fname, start, stop, cols, bounds, sep=" ", replace=None) if replace is not None: data = replace(data) x = np.fromstring(data, float, sep=sep) - x = x.reshape((stop - start, -1), copy=False) + x = _reshape_view(x, (stop - start, -1)) x = x[:, cols] return x diff --git a/mne/minimum_norm/inverse.py b/mne/minimum_norm/inverse.py index 7335873c9fc..0c9fb6a6bc7 100644 --- a/mne/minimum_norm/inverse.py +++ b/mne/minimum_norm/inverse.py @@ -39,7 +39,7 @@ from ..cov import Covariance, _read_cov, _write_cov, compute_whitener, prepare_noise_cov from ..epochs import BaseEpochs, EpochsArray from ..evoked import Evoked, EvokedArray -from ..fixes import _safe_svd +from ..fixes import _reshape_view, _safe_svd from ..forward import ( _read_forward_meas_info, _select_orient_forward, @@ -835,8 +835,8 @@ def _assemble_kernel(inv, label, method, pick_ori, use_cps=True, verbose=None): # No need to rotate source_cov because it should be uniform # (loose=1., and depth weighting is uniform across columns) offset = sl.stop - eigen_leads = eigen_leads.reshape((-1, eigen_leads.shape[2]), copy=False) - source_nn = source_nn.reshape((-1, 3), copy=False) + eigen_leads = _reshape_view(eigen_leads, (-1, eigen_leads.shape[2])) + source_nn = _reshape_view(source_nn, (-1, 3)) if pick_ori == "normal": if not inv["source_ori"] == FIFF.FIFFV_MNE_FREE_ORI: @@ -1673,7 +1673,7 @@ def apply_inverse_cov( sol = cov.data[sel][:, sel] @ K.T sol = np.sum(K * sol.T, axis=1, keepdims=True) # Reshape back to (n_src, ..., 1) - sol = sol.reshape(stc.data.shape[:-1] + (1,), copy=False) + sol = _reshape_view(sol, stc.data.shape[:-1] + (1,)) stc = stc.__class__(sol, stc.vertices, stc.tmin, stc.tstep, stc.subject) if combine: # combine the three directions logger.info(" Combining the current components...") diff --git a/mne/minimum_norm/tests/test_inverse.py b/mne/minimum_norm/tests/test_inverse.py index 6ca4aa1781d..84f50090d00 100644 --- a/mne/minimum_norm/tests/test_inverse.py +++ b/mne/minimum_norm/tests/test_inverse.py @@ -18,6 +18,7 @@ from scipy import sparse import mne +from mne.fixes import _reshape_view from mne import ( Covariance, EvokedArray, @@ -1686,7 +1687,7 @@ def _assert_free_ori_match(ori, max_idx, lower_ori, upper_ori): assert ori.shape == (ori.shape[0], 3) ori = ori[max_idx] assert ori.shape == (max_idx.size, 3) - ori = ori.reshape((max_idx.size // 3, 3, 3), copy=False) + ori = _reshape_view(ori, (max_idx.size // 3, 3, 3)) dots = np.abs(np.diagonal(ori, axis1=1, axis2=2)) mu = np.mean(dots) assert lower_ori <= mu <= upper_ori, mu diff --git a/mne/morph.py b/mne/morph.py index f77285e62a4..7631fedf89a 100644 --- a/mne/morph.py +++ b/mne/morph.py @@ -9,7 +9,7 @@ import numpy as np from scipy import sparse -from .fixes import _eye_array, _get_img_fdata +from .fixes import _eye_array, _get_img_fdata, _reshape_view from .morph_map import read_morph_map from .parallel import parallel_func from .source_estimate import ( @@ -1556,7 +1556,7 @@ def _apply_morph_data(morph, stc_from): data[to_sl] = morph.morph_mat @ data_from[from_sl] assert to_used.all() assert from_used.all() - data = data.reshape((data.shape[0],) + stc_from.data.shape[1:], copy=False) + data = _reshape_view(data, (data.shape[0],) + stc_from.data.shape[1:]) klass = stc_from.__class__ stc_to = klass(data, vertices_to, stc_from.tmin, stc_from.tstep, morph.subject_to) return stc_to diff --git a/mne/preprocessing/maxwell.py b/mne/preprocessing/maxwell.py index c4433a43977..8c270252bb2 100644 --- a/mne/preprocessing/maxwell.py +++ b/mne/preprocessing/maxwell.py @@ -25,7 +25,7 @@ from ..annotations import _annotations_starts_stops from ..bem import _check_origin from ..channels.channels import _get_T1T2_mag_inds, fix_mag_coil_types -from ..fixes import _safe_svd, bincount, sph_harm_y +from ..fixes import _reshape_view, _safe_svd, bincount, sph_harm_y from ..forward import _concatenate_coils, _create_meg_coils, _prep_meg_channels from ..io import BaseRaw, RawArray from ..surface import _normalize_vectors @@ -2787,7 +2787,7 @@ def find_bad_channels_maxwell( n = stop - start flat_stop = n - (n % flat_step) data = chunk_raw.get_data(good_meg_picks, 0, flat_stop) - data = data.reshape((data.shape[0], -1, flat_step), copy=False) + data = _reshape_view(data, (data.shape[0], -1, flat_step)) delta = np.std(data, axis=-1).min(-1) # min std across segments # We may want to return this later if `return_scores=True`. diff --git a/mne/report/tests/test_report.py b/mne/report/tests/test_report.py index 912c921321d..80e9c5fc841 100644 --- a/mne/report/tests/test_report.py +++ b/mne/report/tests/test_report.py @@ -15,6 +15,7 @@ import pytest from matplotlib import pyplot as plt +from mne.fixes import _reshape_view from mne import ( Epochs, create_info, @@ -507,7 +508,7 @@ def test_add_bem_n_jobs(n_jobs, monkeypatch): ) assert imgs.ndim == 4 # images, h, w, rgba assert len(imgs) == 6 - imgs = imgs.reshape((len(imgs), -1), copy=False) + imgs = _reshape_view(imgs, (len(imgs), -1)) norms = np.linalg.norm(imgs, axis=-1) # should have down-up-down shape corr = np.corrcoef(norms, np.hanning(len(imgs)))[0, 1] diff --git a/mne/source_estimate.py b/mne/source_estimate.py index 20ded52b5e1..16a4f08226f 100644 --- a/mne/source_estimate.py +++ b/mne/source_estimate.py @@ -19,7 +19,7 @@ from .cov import Covariance from .evoked import _get_peak from .filter import FilterMixin, _check_fun, resample -from .fixes import _eye_array, _safe_svd +from .fixes import _eye_array, _reshape_view, _safe_svd from .parallel import parallel_func from .source_space._source_space import ( SourceSpaces, @@ -3729,8 +3729,8 @@ def _gen_extract_label_time_course( assert vertidx.shape[1] == stc.data.shape[0] this_data = np.reshape(stc.data, (stc.data.shape[0], -1)) this_data = vertidx @ this_data - this_data = this_data.reshape( - (this_data.shape[0],) + stc.data.shape[1:], copy=False + this_data = _reshape_view( + this_data, (this_data.shape[0],) + stc.data.shape[1:] ) else: this_data = stc.data[vertidx] diff --git a/mne/source_space/_source_space.py b/mne/source_space/_source_space.py index cda5256ce1c..d18fba88d65 100644 --- a/mne/source_space/_source_space.py +++ b/mne/source_space/_source_space.py @@ -41,7 +41,7 @@ read_freesurfer_lut, ) from ..bem import ConductorModel, read_bem_surfaces -from ..fixes import _get_img_fdata +from ..fixes import _get_img_fdata, _reshape_view from ..parallel import parallel_func from ..surface import ( _CheckInside, @@ -2317,7 +2317,7 @@ def _make_volume_source_space( checks = np.where(neigh >= 0)[0] removes = np.logical_not(np.isin(checks, sp["vertno"])) neigh[checks[removes]] = -1 - neigh = neigh.reshape(old_shape, copy=False) + neigh = _reshape_view(neigh, old_shape) neigh = neigh.T # Thought we would need this, but C code keeps -1 vertices, so we will: # neigh = [n[n >= 0] for n in enumerate(neigh[vertno])] diff --git a/mne/stats/cluster_level.py b/mne/stats/cluster_level.py index 24fd74ff9f2..eb887e74a7d 100644 --- a/mne/stats/cluster_level.py +++ b/mne/stats/cluster_level.py @@ -10,7 +10,7 @@ from scipy.stats import f as fstat from scipy.stats import t as tstat -from ..fixes import has_numba, jit +from ..fixes import _reshape_view, has_numba, jit from ..parallel import parallel_func from ..source_estimate import MixedSourceEstimate, SourceEstimate, VolSourceEstimate from ..source_space import SourceSpaces @@ -695,7 +695,7 @@ def _do_permutations( # The stat should have the same shape as the samples for no adj. if adjacency is None: - t_obs_surr = t_obs_surr.reshape(sample_shape, copy=False) + t_obs_surr = _reshape_view(t_obs_surr, sample_shape) # Find cluster on randomized stats out = _find_clusters( @@ -783,7 +783,7 @@ def _do_1samp_permutations( # The stat should have the same shape as the samples for no adj. if adjacency is None: - t_obs_surr = t_obs_surr.reshape(sample_shape, copy=False) + t_obs_surr = _reshape_view(t_obs_surr, sample_shape) # Find cluster on randomized stats out = _find_clusters( @@ -974,7 +974,7 @@ def _permutation_cluster_test( f"compatible with the sample shape {sample_shape}" ) if adjacency is None or adjacency is False: - t_obs = t_obs.reshape(sample_shape, copy=False) + t_obs = _reshape_view(t_obs, sample_shape) if exclude is not None: include = np.logical_not(exclude) @@ -1001,7 +1001,7 @@ def _permutation_cluster_test( clusters, cluster_stats = out # The stat should have the same shape as the samples - t_obs = t_obs.reshape(sample_shape, copy=False) + t_obs = _reshape_view(t_obs, sample_shape) # For TFCE, return the "adjusted" statistic instead of raw scores # and for clusters, each point gets treated independently @@ -1113,7 +1113,7 @@ def _permutation_cluster_test( for ti in to_remove: step_down_include[clusters[ti]] = False if adjacency is None and adjacency is not False: - step_down_include = step_down_include.reshape(sample_shape, copy=False) + step_down_include = _reshape_view(step_down_include, sample_shape) n_step_downs += 1 if step_down_p > 0: a_text = "additional " if n_step_downs > 1 else "" diff --git a/mne/tests/test_transforms.py b/mne/tests/test_transforms.py index c1f98b6853a..d0a86a5722a 100644 --- a/mne/tests/test_transforms.py +++ b/mne/tests/test_transforms.py @@ -19,7 +19,7 @@ import mne from mne import read_trans, write_trans from mne.datasets import testing -from mne.fixes import _get_img_fdata +from mne.fixes import _get_img_fdata, _reshape_view from mne.io import read_info from mne.transforms import ( _angle_between_quats, @@ -76,7 +76,7 @@ def test_tps(): az = np.linspace(0.0, 2 * np.pi, 20, endpoint=False) pol = np.linspace(0, np.pi, 12)[1:-1] sph = np.array(np.meshgrid(1, az, pol, indexing="ij")) - sph = sph.reshape((3, -1), copy=False) + sph = _reshape_view(sph, (3, -1)) assert_equal(sph.shape[1], 200) source = _sph_to_cart(sph.T) destination = source.copy() diff --git a/mne/time_frequency/multitaper.py b/mne/time_frequency/multitaper.py index 78ee4fe65b2..d917056b2ac 100644 --- a/mne/time_frequency/multitaper.py +++ b/mne/time_frequency/multitaper.py @@ -10,6 +10,7 @@ from scipy.signal import get_window from scipy.signal.windows import dpss as sp_dpss +from ..fixes import _reshape_view from ..parallel import parallel_func from ..utils import _check_option, logger, verbose, warn @@ -455,7 +456,7 @@ def psd_array_multitaper( # Combining/reshaping to original data shape last_dims = (n_freqs,) if output == "power" else (n_tapers, n_freqs) - psd = psd.reshape(dshape + last_dims, copy=False) + psd = _reshape_view(psd, dshape + last_dims) if ndim_in == 1: psd = psd[0] diff --git a/mne/time_frequency/psd.py b/mne/time_frequency/psd.py index ef229de0c66..01d932699a1 100644 --- a/mne/time_frequency/psd.py +++ b/mne/time_frequency/psd.py @@ -8,6 +8,7 @@ import numpy as np from scipy.signal import spectrogram +from ..fixes import _reshape_view from ..parallel import parallel_func from ..utils import _check_option, _ensure_int, logger, verbose, warn from ..utils.numerics import _mask_to_onsets_offsets @@ -312,5 +313,5 @@ def func(*args, **kwargs): if bad_ch.any(): psds[bad_ch] = np.nan - psds = psds.reshape(shape, copy=False) + psds = _reshape_view(psds, shape) return psds, freqs diff --git a/mne/viz/_brain/_brain.py b/mne/viz/_brain/_brain.py index a0be81b46dc..c43bd8aa1e7 100644 --- a/mne/viz/_brain/_brain.py +++ b/mne/viz/_brain/_brain.py @@ -16,6 +16,7 @@ from scipy.sparse import csr_array from scipy.spatial.distance import cdist +from ...fixes import _reshape_view from ..._fiff.meas_info import Info from ..._fiff.pick import pick_types from ..._freesurfer import ( @@ -2230,7 +2231,7 @@ def add_label( if isinstance(borders, int): for _ in range(borders): keep_idx = np.isin(self.geo[hemi].faces.ravel(), keep_idx) - keep_idx = keep_idx.reshape(self.geo[hemi].faces.shape, copy=False) + keep_idx = _reshape_view(keep_idx, self.geo[hemi].faces.shape) keep_idx = self.geo[hemi].faces[np.any(keep_idx, axis=1)] keep_idx = np.unique(keep_idx) show[keep_idx] = 1 @@ -3978,9 +3979,7 @@ def _to_borders(self, label, hemi, borders, restrict_idx=None): if isinstance(borders, int): for _ in range(borders): keep_idx = np.isin(self.geo[hemi].orig_faces.ravel(), keep_idx) - keep_idx = keep_idx.reshape( - self.geo[hemi].orig_faces.shape, copy=False - ) + keep_idx = _reshape_view(keep_idx, self.geo[hemi].orig_faces.shape) keep_idx = self.geo[hemi].orig_faces[np.any(keep_idx, axis=1)] keep_idx = np.unique(keep_idx) if restrict_idx is not None: diff --git a/mne/viz/_brain/tests/test_brain.py b/mne/viz/_brain/tests/test_brain.py index b8ad2bf8bc1..6315d79618c 100644 --- a/mne/viz/_brain/tests/test_brain.py +++ b/mne/viz/_brain/tests/test_brain.py @@ -15,6 +15,7 @@ from matplotlib.lines import Line2D from numpy.testing import assert_allclose, assert_array_equal +from mne.fixes import _reshape_view from mne import ( Dipole, MixedSourceEstimate, @@ -1466,7 +1467,7 @@ def _create_testing_brain( stc_data[(rng.rand(stc_size // 20) * stc_size).astype(int)] = rng.rand( stc_data.size // 20 ) - stc_data = stc_data.reshape((n_verts, n_time), copy=False) + stc_data = _reshape_view(stc_data, (n_verts, n_time)) if diverging: stc_data -= 0.5 stc = klass(stc_data, vertices, 1, 1) diff --git a/mne/viz/backends/_utils.py b/mne/viz/backends/_utils.py index 7793b8ed90a..a46e1f9cc31 100644 --- a/mne/viz/backends/_utils.py +++ b/mne/viz/backends/_utils.py @@ -16,7 +16,7 @@ import numpy as np -from ...fixes import _compare_version +from ...fixes import _compare_version, _reshape_view from ...utils import _check_qt_version, _validate_type, logger, warn from ..utils import _get_cmap @@ -355,7 +355,7 @@ def _pixmap_to_ndarray(pixmap): if hasattr(ptr, "setsize"): # PyQt ptr.setsize(count) data = np.frombuffer(ptr, dtype=np.uint8, count=count).copy() - data = data.reshape((img.height(), img.width(), 4), copy=False) + data = _reshape_view(data, (img.height(), img.width(), 4)) return data / 255.0 diff --git a/mne/viz/tests/test_3d.py b/mne/viz/tests/test_3d.py index 56948f2dbd6..b5853d095d7 100644 --- a/mne/viz/tests/test_3d.py +++ b/mne/viz/tests/test_3d.py @@ -12,6 +12,7 @@ from matplotlib.figure import Figure from numpy.testing import assert_allclose, assert_array_equal +from mne.fixes import _reshape_view from mne import ( MixedSourceEstimate, SourceEstimate, @@ -135,7 +136,7 @@ def test_plot_sparse_source_estimates(renderer_interactive, brain_gc): stc_data[(np.random.rand(stc_size // 20) * stc_size).astype(int)] = ( np.random.RandomState(0).rand(stc_data.size // 20) ) - stc_data = stc_data.reshape((n_verts, n_time), copy=False) + stc_data = _reshape_view(stc_data, (n_verts, n_time)) stc = SourceEstimate(stc_data, vertices, 1, 1) colormap = "mne_analyze" @@ -942,7 +943,7 @@ def test_process_clim_plot(renderer_interactive, brain_gc): n_time = 5 n_verts = sum(len(v) for v in vertices) stc_data = np.random.RandomState(0).rand(n_verts * n_time) - stc_data = stc_data.reshape((n_verts, n_time), copy=False) + stc_data = _reshape_view(stc_data, (n_verts, n_time)) stc = SourceEstimate(stc_data, vertices, 1, 1, "sample") # Test for simple use cases @@ -1064,7 +1065,7 @@ def test_stc_mpl(): n_time = 5 n_verts = sum(len(v) for v in vertices) stc_data = np.ones(n_verts * n_time) - stc_data = stc_data.reshape((n_verts, n_time), copy=False) + stc_data = _reshape_view(stc_data, (n_verts, n_time)) stc = SourceEstimate(stc_data, vertices, 1, 1, "sample") stc.plot( subjects_dir=subjects_dir, @@ -1396,7 +1397,7 @@ def test_link_brains(renderer_interactive): stc_data[(np.random.rand(stc_size // 20) * stc_size).astype(int)] = ( np.random.RandomState(0).rand(stc_data.size // 20) ) - stc_data = stc_data.reshape((n_verts, n_time), copy=False) + stc_data = _reshape_view(stc_data, (n_verts, n_time)) stc = SourceEstimate(stc_data, vertices, 1, 1) colormap = "mne_analyze" From 9470161f32b4febddc5b662c637341c822b780d8 Mon Sep 17 00:00:00 2001 From: Mathieu Scheltienne Date: Mon, 12 Jan 2026 18:09:15 +0100 Subject: [PATCH 10/10] fix pre-commit --- mne/beamformer/tests/test_dics.py | 2 +- mne/beamformer/tests/test_external.py | 2 +- mne/beamformer/tests/test_lcmv.py | 2 +- mne/channels/montage.py | 2 +- mne/decoding/tests/test_receptive_field.py | 2 +- mne/event.py | 2 +- mne/forward/forward.py | 2 +- mne/forward/tests/test_field_interpolation.py | 2 +- mne/io/bti/bti.py | 2 +- mne/io/ctf/ctf.py | 2 +- mne/io/eeglab/eeglab.py | 2 +- mne/io/fiff/raw.py | 2 +- mne/io/kit/kit.py | 2 +- mne/io/nirx/nirx.py | 2 +- mne/minimum_norm/tests/test_inverse.py | 2 +- mne/report/tests/test_report.py | 2 +- mne/viz/_brain/_brain.py | 2 +- mne/viz/_brain/tests/test_brain.py | 2 +- mne/viz/tests/test_3d.py | 2 +- 19 files changed, 19 insertions(+), 19 deletions(-) diff --git a/mne/beamformer/tests/test_dics.py b/mne/beamformer/tests/test_dics.py index a9ef542e8d1..555eceec513 100644 --- a/mne/beamformer/tests/test_dics.py +++ b/mne/beamformer/tests/test_dics.py @@ -10,7 +10,6 @@ import mne from mne import pick_types -from mne.fixes import _reshape_view from mne._fiff.constants import FIFF from mne._fiff.pick import pick_info from mne.beamformer import ( @@ -26,6 +25,7 @@ from mne.beamformer._dics import _prepare_noise_csd from mne.beamformer.tests.test_lcmv import _assert_weight_norm from mne.datasets import testing +from mne.fixes import _reshape_view from mne.io import read_info from mne.proj import compute_proj_evoked, make_projector from mne.surface import _compute_nearest diff --git a/mne/beamformer/tests/test_external.py b/mne/beamformer/tests/test_external.py index 75d79cb9cef..8fcb09e870e 100644 --- a/mne/beamformer/tests/test_external.py +++ b/mne/beamformer/tests/test_external.py @@ -9,9 +9,9 @@ import mne from mne.beamformer import apply_lcmv, apply_lcmv_cov, make_lcmv -from mne.fixes import _reshape_view from mne.beamformer.tests.test_lcmv import _get_data from mne.datasets import testing +from mne.fixes import _reshape_view data_path = testing.data_path(download=False) ft_data_path = data_path / "fieldtrip" / "beamformer" diff --git a/mne/beamformer/tests/test_lcmv.py b/mne/beamformer/tests/test_lcmv.py index 9f23a5b91c4..1e0443a48f3 100644 --- a/mne/beamformer/tests/test_lcmv.py +++ b/mne/beamformer/tests/test_lcmv.py @@ -18,7 +18,6 @@ from scipy.spatial.distance import cdist import mne -from mne.fixes import _reshape_view from mne import ( EvokedArray, VolSourceEstimate, @@ -43,6 +42,7 @@ ) from mne.beamformer._compute_beamformer import _prepare_beamformer_input from mne.datasets import testing +from mne.fixes import _reshape_view from mne.minimum_norm import apply_inverse, make_inverse_operator from mne.minimum_norm.tests.test_inverse import _assert_free_ori_match from mne.simulation import simulate_evoked diff --git a/mne/channels/montage.py b/mne/channels/montage.py index 142ac86e6ca..60287a0178d 100644 --- a/mne/channels/montage.py +++ b/mne/channels/montage.py @@ -10,7 +10,6 @@ import numpy as np -from ..fixes import _reshape_view from .._fiff._digitization import ( _coord_frame_const, _count_points_by_type, @@ -29,6 +28,7 @@ from .._fiff.pick import _picks_to_idx, channel_type, pick_types from .._freesurfer import get_mni_fiducials from ..defaults import HEAD_SIZE_DEFAULT +from ..fixes import _reshape_view from ..transforms import ( Transform, _ensure_trans, diff --git a/mne/decoding/tests/test_receptive_field.py b/mne/decoding/tests/test_receptive_field.py index c718af4beb5..b9bf9693bd8 100644 --- a/mne/decoding/tests/test_receptive_field.py +++ b/mne/decoding/tests/test_receptive_field.py @@ -16,7 +16,6 @@ from sklearn.utils.estimator_checks import parametrize_with_checks from mne.decoding import ReceptiveField, TimeDelayingRidge -from mne.fixes import _reshape_view from mne.decoding.receptive_field import ( _SCORERS, _delay_time_series, @@ -24,6 +23,7 @@ _times_to_delays, ) from mne.decoding.time_delaying_ridge import _compute_corrs, _compute_reg_neighbors +from mne.fixes import _reshape_view data_dir = Path(__file__).parents[2] / "io" / "tests" / "data" raw_fname = data_dir / "test_raw.fif" diff --git a/mne/event.py b/mne/event.py index 76fd5ab8959..e8eb4f28579 100644 --- a/mne/event.py +++ b/mne/event.py @@ -9,13 +9,13 @@ import numpy as np -from .fixes import _reshape_view from ._fiff.constants import FIFF from ._fiff.open import fiff_open from ._fiff.pick import pick_channels from ._fiff.tag import read_tag from ._fiff.tree import dir_tree_find from ._fiff.write import end_block, start_and_end_file, start_block, write_int +from .fixes import _reshape_view from .utils import ( _check_fname, _check_integer_or_list, diff --git a/mne/forward/forward.py b/mne/forward/forward.py index 53ebaffd3de..83aa4b3a9b8 100644 --- a/mne/forward/forward.py +++ b/mne/forward/forward.py @@ -18,7 +18,6 @@ import numpy as np from scipy import sparse -from ..fixes import _reshape_view from .._fiff.constants import FIFF from .._fiff.matrix import ( _read_named_matrix, @@ -49,6 +48,7 @@ ) from ..epochs import BaseEpochs from ..evoked import Evoked, EvokedArray +from ..fixes import _reshape_view from ..html_templates import _get_html_template from ..io import BaseRaw, RawArray from ..label import Label diff --git a/mne/forward/tests/test_field_interpolation.py b/mne/forward/tests/test_field_interpolation.py index 2da3fd93095..4c6ecd73fd5 100644 --- a/mne/forward/tests/test_field_interpolation.py +++ b/mne/forward/tests/test_field_interpolation.py @@ -18,8 +18,8 @@ import mne from mne import Epochs, make_fixed_length_events, pick_types, read_evokeds -from mne.fixes import _reshape_view from mne.datasets import testing +from mne.fixes import _reshape_view from mne.forward import _make_surface_mapping, make_field_map from mne.forward._field_interpolation import _setup_dots from mne.forward._lead_dots import ( diff --git a/mne/io/bti/bti.py b/mne/io/bti/bti.py index 41f265d2070..e811cd61ff0 100644 --- a/mne/io/bti/bti.py +++ b/mne/io/bti/bti.py @@ -9,12 +9,12 @@ import numpy as np -from ...fixes import _reshape_view from ..._fiff._digitization import _make_bti_dig_points from ..._fiff.constants import FIFF from ..._fiff.meas_info import _empty_info from ..._fiff.tag import _coil_trans_to_loc, _loc_to_coil_trans from ..._fiff.utils import _mult_cal_one, read_str +from ...fixes import _reshape_view from ...transforms import Transform, combine_transforms, invert_transform from ...utils import _stamp_to_dt, _validate_type, logger, path_like, verbose from ..base import BaseRaw diff --git a/mne/io/ctf/ctf.py b/mne/io/ctf/ctf.py index 8ad0d6b0185..d8b2c96e1bd 100644 --- a/mne/io/ctf/ctf.py +++ b/mne/io/ctf/ctf.py @@ -8,9 +8,9 @@ import numpy as np -from ...fixes import _reshape_view from ..._fiff._digitization import _format_dig_points from ..._fiff.utils import _blk_read_lims, _mult_cal_one +from ...fixes import _reshape_view from ...utils import ( _check_fname, _check_option, diff --git a/mne/io/eeglab/eeglab.py b/mne/io/eeglab/eeglab.py index 7cb21a5cf46..6e651c639fd 100644 --- a/mne/io/eeglab/eeglab.py +++ b/mne/io/eeglab/eeglab.py @@ -10,7 +10,6 @@ from mne.utils.check import _check_option -from ...fixes import _reshape_view from ..._fiff._digitization import _ensure_fiducials_head from ..._fiff.constants import FIFF from ..._fiff.meas_info import create_info @@ -21,6 +20,7 @@ from ...defaults import DEFAULTS from ...epochs import BaseEpochs from ...event import read_events +from ...fixes import _reshape_view from ...utils import ( Bunch, _check_fname, diff --git a/mne/io/fiff/raw.py b/mne/io/fiff/raw.py index 89010fa8d3a..aad07690de8 100644 --- a/mne/io/fiff/raw.py +++ b/mne/io/fiff/raw.py @@ -8,7 +8,6 @@ import numpy as np -from ...fixes import _reshape_view from ..._fiff.constants import FIFF from ..._fiff.meas_info import read_meas_info from ..._fiff.open import _fiff_get_fid, _get_next_fname, fiff_open @@ -18,6 +17,7 @@ from ...annotations import Annotations, _read_annotations_fif from ...channels import fix_mag_coil_types from ...event import AcqParserFIF +from ...fixes import _reshape_view from ...utils import ( _check_fname, _file_like, diff --git a/mne/io/kit/kit.py b/mne/io/kit/kit.py index 76c01aa32bf..53006dba43d 100644 --- a/mne/io/kit/kit.py +++ b/mne/io/kit/kit.py @@ -15,13 +15,13 @@ import numpy as np -from ...fixes import _reshape_view from ..._fiff.constants import FIFF from ..._fiff.meas_info import _empty_info from ..._fiff.pick import pick_types from ..._fiff.utils import _mult_cal_one from ...epochs import BaseEpochs from ...event import read_events +from ...fixes import _reshape_view from ...transforms import Transform, als_ras_trans, apply_trans from ...utils import ( _check_fname, diff --git a/mne/io/nirx/nirx.py b/mne/io/nirx/nirx.py index a2fa89c42bc..766986c4612 100644 --- a/mne/io/nirx/nirx.py +++ b/mne/io/nirx/nirx.py @@ -12,12 +12,12 @@ import numpy as np from scipy.io import loadmat -from ...fixes import _reshape_view from ..._fiff.constants import FIFF from ..._fiff.meas_info import _format_dig_points, create_info from ..._fiff.utils import _mult_cal_one from ..._freesurfer import get_mni_fiducials from ...annotations import Annotations +from ...fixes import _reshape_view from ...transforms import _get_trans, apply_trans from ...utils import ( _check_fname, diff --git a/mne/minimum_norm/tests/test_inverse.py b/mne/minimum_norm/tests/test_inverse.py index 84f50090d00..438003c16ee 100644 --- a/mne/minimum_norm/tests/test_inverse.py +++ b/mne/minimum_norm/tests/test_inverse.py @@ -18,7 +18,6 @@ from scipy import sparse import mne -from mne.fixes import _reshape_view from mne import ( Covariance, EvokedArray, @@ -39,6 +38,7 @@ from mne.datasets import testing from mne.epochs import Epochs, EpochsArray, make_fixed_length_epochs from mne.event import read_events +from mne.fixes import _reshape_view from mne.forward import apply_forward, is_fixed_orient, restrict_forward_to_stc from mne.io import read_info, read_raw_fif from mne.label import label_sign_flip, read_label diff --git a/mne/report/tests/test_report.py b/mne/report/tests/test_report.py index 80e9c5fc841..5fa47d63914 100644 --- a/mne/report/tests/test_report.py +++ b/mne/report/tests/test_report.py @@ -15,7 +15,6 @@ import pytest from matplotlib import pyplot as plt -from mne.fixes import _reshape_view from mne import ( Epochs, create_info, @@ -27,6 +26,7 @@ from mne._fiff.write import DATE_NONE from mne.datasets import testing from mne.epochs import make_metadata +from mne.fixes import _reshape_view from mne.io import RawArray, read_info, read_raw_fif from mne.preprocessing import ICA from mne.report import Report, _ReportScraper, open_report, report diff --git a/mne/viz/_brain/_brain.py b/mne/viz/_brain/_brain.py index c43bd8aa1e7..5735fd98e24 100644 --- a/mne/viz/_brain/_brain.py +++ b/mne/viz/_brain/_brain.py @@ -16,7 +16,6 @@ from scipy.sparse import csr_array from scipy.spatial.distance import cdist -from ...fixes import _reshape_view from ..._fiff.meas_info import Info from ..._fiff.pick import pick_types from ..._freesurfer import ( @@ -29,6 +28,7 @@ vertex_to_mni, ) from ...defaults import DEFAULTS, _handle_default +from ...fixes import _reshape_view from ...surface import _marching_cubes, _mesh_borders, mesh_edges from ...transforms import ( Transform, diff --git a/mne/viz/_brain/tests/test_brain.py b/mne/viz/_brain/tests/test_brain.py index 6315d79618c..705f1a52347 100644 --- a/mne/viz/_brain/tests/test_brain.py +++ b/mne/viz/_brain/tests/test_brain.py @@ -15,7 +15,6 @@ from matplotlib.lines import Line2D from numpy.testing import assert_allclose, assert_array_equal -from mne.fixes import _reshape_view from mne import ( Dipole, MixedSourceEstimate, @@ -32,6 +31,7 @@ ) from mne.channels import make_dig_montage from mne.datasets import testing +from mne.fixes import _reshape_view from mne.io import read_info from mne.label import read_label from mne.minimum_norm import apply_inverse, make_inverse_operator diff --git a/mne/viz/tests/test_3d.py b/mne/viz/tests/test_3d.py index b5853d095d7..154313a1eeb 100644 --- a/mne/viz/tests/test_3d.py +++ b/mne/viz/tests/test_3d.py @@ -12,7 +12,6 @@ from matplotlib.figure import Figure from numpy.testing import assert_allclose, assert_array_equal -from mne.fixes import _reshape_view from mne import ( MixedSourceEstimate, SourceEstimate, @@ -33,6 +32,7 @@ from mne.bem import read_bem_solution, read_bem_surfaces from mne.datasets import testing from mne.defaults import DEFAULTS +from mne.fixes import _reshape_view from mne.io import read_info, read_raw_bti, read_raw_ctf, read_raw_kit, read_raw_nirx from mne.minimum_norm import apply_inverse from mne.source_estimate import _BaseVolSourceEstimate