diff --git a/doc/changes/dev/13585.other.rst b/doc/changes/dev/13585.other.rst new file mode 100644 index 00000000000..bb62007ee71 --- /dev/null +++ b/doc/changes/dev/13585.other.rst @@ -0,0 +1 @@ +Fix deprecation of setting a shape on an array directly in ``numpy`` 2.5+, by `Mathieu Scheltienne`_. diff --git a/mne/_fiff/_digitization.py b/mne/_fiff/_digitization.py index c198d606dee..9664a4f470e 100644 --- a/mne/_fiff/_digitization.py +++ b/mne/_fiff/_digitization.py @@ -7,6 +7,7 @@ import numpy as np +from ..fixes import _reshape_view from ..utils import Bunch, _check_fname, _validate_type, logger, verbose, warn from .constants import FIFF, _coord_frame_named from .tag import read_tag @@ -335,7 +336,7 @@ def _get_data_as_dict_from_dig(dig, exclude_ref_channel=True): f"Only single coordinate frame in dig is supported, got {dig_coord_frames}" ) dig_ch_pos_location = np.array(dig_ch_pos_location) - dig_ch_pos_location.shape = (-1, 3) # empty will be (0, 3) + dig_ch_pos_location = _reshape_view(dig_ch_pos_location, (-1, 3)) return Bunch( nasion=fids.get("nasion", None), lpa=fids.get("lpa", None), diff --git a/mne/_fiff/tag.py b/mne/_fiff/tag.py index 96fee81da20..391606e7cca 100644 --- a/mne/_fiff/tag.py +++ b/mne/_fiff/tag.py @@ -12,6 +12,7 @@ import numpy as np from scipy.sparse import csc_array, csr_array +from ..fixes import _reshape_view from ..utils import _check_option, warn from ..utils.numerics import _julian_to_date from .constants import ( @@ -177,7 +178,7 @@ def _read_matrix(fid, tag, shape, rlims): data = data.view(">c8") elif matrix_type == FIFF.FIFFT_COMPLEX_DOUBLE: data = data.view(">c16") - data.shape = dims + data = _reshape_view(data, dims) else: # Find dimensions and return to the beginning of tag data ndim = int(np.frombuffer(fid.read(4), dtype=">i4").item()) diff --git a/mne/beamformer/_rap_music.py b/mne/beamformer/_rap_music.py index 2b8c1a4ee84..901651d89de 100644 --- a/mne/beamformer/_rap_music.py +++ b/mne/beamformer/_rap_music.py @@ -8,7 +8,7 @@ from scipy import linalg from .._fiff.pick import pick_channels_forward, pick_info -from ..fixes import _safe_svd +from ..fixes import _reshape_view, _safe_svd from ..forward import convert_forward_solution, is_fixed_orient from ..inverse_sparse.mxne_inverse import _make_dipoles_sparse from ..minimum_norm.inverse import _log_exp_var @@ -68,9 +68,9 @@ def _apply_rap_music( phi_sig = eig_vectors[:, -n_dipoles:] n_orient = 3 if is_free_ori else 1 - G.shape = (G.shape[0], -1, n_orient) + G = _reshape_view(G, (G.shape[0], -1, n_orient)) gain = forward["sol"]["data"].copy() - gain.shape = G.shape + gain = _reshape_view(gain, G.shape) n_channels = G.shape[0] A = np.empty((n_channels, n_dipoles)) gain_dip = np.empty((n_channels, n_dipoles)) @@ -122,7 +122,7 @@ def _apply_rap_music( sol = linalg.lstsq(A, M)[0] if n_orient == 3: X = sol[:, np.newaxis] * oris[:, :, np.newaxis] - X.shape = (-1, len(times)) + X = _reshape_view(X, (-1, len(times))) else: X = sol diff --git a/mne/beamformer/tests/test_dics.py b/mne/beamformer/tests/test_dics.py index cebc0bb4057..555eceec513 100644 --- a/mne/beamformer/tests/test_dics.py +++ b/mne/beamformer/tests/test_dics.py @@ -25,6 +25,7 @@ from mne.beamformer._dics import _prepare_noise_csd from mne.beamformer.tests.test_lcmv import _assert_weight_norm from mne.datasets import testing +from mne.fixes import _reshape_view from mne.io import read_info from mne.proj import compute_proj_evoked, make_projector from mne.surface import _compute_nearest @@ -269,7 +270,7 @@ def test_make_dics(tmp_path, _load_forward, idx, whiten): exp=None, noise_cov=noise_cov, ) - G.shape = (n_channels, n_verts, n_orient) + G = _reshape_view(G, (n_channels, n_verts, n_orient)) G = G.transpose(1, 2, 0).conj() # verts, orient, ch _assert_weight_norm(filters, G) diff --git a/mne/beamformer/tests/test_external.py b/mne/beamformer/tests/test_external.py index e4373693496..8fcb09e870e 100644 --- a/mne/beamformer/tests/test_external.py +++ b/mne/beamformer/tests/test_external.py @@ -11,6 +11,7 @@ from mne.beamformer import apply_lcmv, apply_lcmv_cov, make_lcmv from mne.beamformer.tests.test_lcmv import _get_data from mne.datasets import testing +from mne.fixes import _reshape_view data_path = testing.data_path(download=False) ft_data_path = data_path / "fieldtrip" / "beamformer" @@ -98,7 +99,7 @@ def test_lcmv_fieldtrip(_get_bf_data, bf_type, weight_norm, pick_ori, pwr): ft_fname = ft_data_path / ("ft_source_" + bf_type + "-vol.mat") stc_ft_data = pymatreader.read_mat(ft_fname)["stc"] if stc_ft_data.ndim == 1: - stc_ft_data.shape = (stc_ft_data.size, 1) + stc_ft_data = _reshape_view(stc_ft_data, (stc_ft_data.size, 1)) if stc_mne.data.ndim == 2: signs = np.sign((stc_mne.data * stc_ft_data).sum(-1, keepdims=True)) diff --git a/mne/beamformer/tests/test_lcmv.py b/mne/beamformer/tests/test_lcmv.py index 35b9943d54e..1e0443a48f3 100644 --- a/mne/beamformer/tests/test_lcmv.py +++ b/mne/beamformer/tests/test_lcmv.py @@ -42,6 +42,7 @@ ) from mne.beamformer._compute_beamformer import _prepare_beamformer_input from mne.datasets import testing +from mne.fixes import _reshape_view from mne.minimum_norm import apply_inverse, make_inverse_operator from mne.minimum_norm.tests.test_inverse import _assert_free_ori_match from mne.simulation import simulate_evoked @@ -1185,7 +1186,7 @@ def test_unit_noise_gain_formula(pick_ori, weight_norm, reg, inversion): ) n_channels, n_sources = G.shape n_sources //= 3 - G.shape = (n_channels, n_sources, 3) + G = _reshape_view(G, (n_channels, n_sources, 3)) G = G.transpose(1, 2, 0) # verts, orient, ch _assert_weight_norm(filters, G) diff --git a/mne/channels/montage.py b/mne/channels/montage.py index 35fdbce917c..60287a0178d 100644 --- a/mne/channels/montage.py +++ b/mne/channels/montage.py @@ -28,6 +28,7 @@ from .._fiff.pick import _picks_to_idx, channel_type, pick_types from .._freesurfer import get_mni_fiducials from ..defaults import HEAD_SIZE_DEFAULT +from ..fixes import _reshape_view from ..transforms import ( Transform, _ensure_trans, @@ -973,9 +974,9 @@ def read_dig_hpts(fname, unit="mm"): label[ii]: this_xyz for ii, this_xyz in enumerate(xyz) if kind[ii] == "eeg" } hpi = np.array([this_xyz for ii, this_xyz in enumerate(xyz) if kind[ii] == "hpi"]) - hpi.shape = (-1, 3) # in case it's empty + hpi = _reshape_view(hpi, (-1, 3)) # in case it's empty hsp = np.array([this_xyz for ii, this_xyz in enumerate(xyz) if kind[ii] == "extra"]) - hsp.shape = (-1, 3) # in case it's empty + hsp = _reshape_view(hsp, (-1, 3)) # in case it's empty return make_dig_montage(ch_pos=ch_pos, **fid, hpi=hpi, hsp=hsp) diff --git a/mne/chpi.py b/mne/chpi.py index cc921a9843e..7ea3d1b407d 100644 --- a/mne/chpi.py +++ b/mne/chpi.py @@ -43,7 +43,7 @@ from .cov import compute_whitener, make_ad_hoc_cov from .dipole import _make_guesses from .event import find_events -from .fixes import jit +from .fixes import _reshape_view, jit from .forward import _concatenate_coils, _create_meg_coils, _magnetic_dipole_field_vec from .io import BaseRaw, RawArray from .io.ctf.trans import _make_ctf_coord_trans_set @@ -117,7 +117,7 @@ def read_head_pos(fname): """ _check_fname(fname, must_exist=True, overwrite="read") data = np.loadtxt(fname, skiprows=1) # first line is header, skip it - data.shape = (-1, 10) # ensure it's the right size even if empty + data = _reshape_view(data, (-1, 10)) # ensure it's the right size even if empty if np.isnan(data).any(): # make sure we didn't do something dumb raise RuntimeError(f"positions could not be read properly from {fname}") return data @@ -1390,7 +1390,7 @@ def compute_chpi_locs( ) fwd = _magnetic_dipole_field_vec(guesses, meg_coils, too_close) fwd = np.dot(fwd, whitener.T) - fwd.shape = (guesses.shape[0], 3, -1) + fwd = _reshape_view(fwd, (guesses.shape[0], 3, -1)) fwd = np.linalg.svd(fwd, full_matrices=False)[2] guesses = dict(rr=guesses, whitened_fwd_svd=fwd) del fwd, R diff --git a/mne/conftest.py b/mne/conftest.py index 5a7fa4fed40..b1d18d6805f 100644 --- a/mne/conftest.py +++ b/mne/conftest.py @@ -207,6 +207,9 @@ def pytest_configure(config: pytest.Config): ignore:^'.*' deprecated - use '.*'$:DeprecationWarning # dipy ignore:'where' used without 'out', expect .*:UserWarning + # VTK <-> NumPy 2.5 (https://gitlab.kitware.com/vtk/vtk/-/merge_requests/12796) + # nitime <-> NumPy 2.5 (https://github.com/nipy/nitime/pull/236) + ignore:Setting the shape on a NumPy array has been deprecated.*:DeprecationWarning """ # noqa: E501 for warning_line in warning_lines.split("\n"): warning_line = warning_line.strip() diff --git a/mne/decoding/receptive_field.py b/mne/decoding/receptive_field.py index 0516adadb9a..7b8fb63dfd3 100644 --- a/mne/decoding/receptive_field.py +++ b/mne/decoding/receptive_field.py @@ -15,6 +15,7 @@ from sklearn.exceptions import NotFittedError from sklearn.metrics import r2_score +from ..fixes import _reshape_view from ..utils import _validate_type, fill_doc, pinv from ._fixes import _check_n_features_3d, validate_data from .base import _check_estimator, get_coef @@ -361,7 +362,7 @@ def predict(self, X): else: extra = 1 shape = shape[: self._y_dim + extra] - y_pred.shape = shape + y_pred = _reshape_view(y_pred, shape) return y_pred def score(self, X, y): diff --git a/mne/decoding/tests/test_receptive_field.py b/mne/decoding/tests/test_receptive_field.py index db2209f4695..b9bf9693bd8 100644 --- a/mne/decoding/tests/test_receptive_field.py +++ b/mne/decoding/tests/test_receptive_field.py @@ -23,6 +23,7 @@ _times_to_delays, ) from mne.decoding.time_delaying_ridge import _compute_corrs, _compute_reg_neighbors +from mne.fixes import _reshape_view data_dir = Path(__file__).parents[2] / "io" / "tests" / "data" raw_fname = data_dir / "test_raw.fif" @@ -271,7 +272,7 @@ def test_time_delaying_fast_calc(n_jobs): smin, smax = 1, 2 X_del = _delay_time_series(X, smin, smax, 1.0) # (n_times, n_features, n_delays) -> (n_times, n_features * n_delays) - X_del.shape = (X.shape[0], -1) + X_del = _reshape_view(X_del, (X.shape[0], -1)) expected = np.array([[0, 1, 2], [0, 0, 1], [0, 5, 7], [0, 0, 5]]).T assert_allclose(X_del, expected) Xt_X = np.dot(X_del.T, X_del) @@ -282,7 +283,7 @@ def test_time_delaying_fast_calc(n_jobs): # all positive smin, smax = -2, -1 X_del = _delay_time_series(X, smin, smax, 1.0) - X_del.shape = (X.shape[0], -1) + X_del = _reshape_view(X_del, (X.shape[0], -1)) expected = np.array([[3, 0, 0], [2, 3, 0], [11, 0, 0], [7, 11, 0]]).T assert_allclose(X_del, expected) Xt_X = np.dot(X_del.T, X_del) @@ -293,7 +294,7 @@ def test_time_delaying_fast_calc(n_jobs): # both sides smin, smax = -1, 1 X_del = _delay_time_series(X, smin, smax, 1.0) - X_del.shape = (X.shape[0], -1) + X_del = _reshape_view(X_del, (X.shape[0], -1)) expected = np.array( [[2, 3, 0], [1, 2, 3], [0, 1, 2], [7, 11, 0], [5, 7, 11], [0, 5, 7]] ).T @@ -315,7 +316,7 @@ def test_time_delaying_fast_calc(n_jobs): X = np.array([[1, 2, 3, 5]]).T smin, smax = 0, 3 X_del = _delay_time_series(X, smin, smax, 1.0) - X_del.shape = (X.shape[0], -1) + X_del = _reshape_view(X_del, (X.shape[0], -1)) expected = np.array([[1, 2, 3, 5], [0, 1, 2, 3], [0, 0, 1, 2], [0, 0, 0, 1]]).T assert_allclose(X_del, expected) Xt_X = np.dot(X_del.T, X_del) @@ -328,7 +329,7 @@ def test_time_delaying_fast_calc(n_jobs): X = np.array([[1, 2, 3], [5, 7, 11]]).T smin, smax = 0, 2 X_del = _delay_time_series(X, smin, smax, 1.0) - X_del.shape = (X.shape[0], -1) + X_del = _reshape_view(X_del, (X.shape[0], -1)) expected = np.array( [[1, 2, 3], [0, 1, 2], [0, 0, 1], [5, 7, 11], [0, 5, 7], [0, 0, 5]] ).T @@ -366,7 +367,7 @@ def test_time_delaying_fast_calc(n_jobs): x_yt_true = einsum("tfd,to->ofd", X_del, y) x_yt_true = np.reshape(x_yt_true, (x_yt_true.shape[0], -1)).T assert_allclose(x_yt, x_yt_true, atol=1e-7, err_msg=(smin, smax)) - X_del.shape = (X.shape[0], -1) + X_del = _reshape_view(X_del, (X.shape[0], -1)) x_xt_true = np.dot(X_del.T, X_del).T assert_allclose(x_xt, x_xt_true, atol=1e-7, err_msg=(smin, smax)) @@ -388,7 +389,7 @@ def test_receptive_field_1d(n_jobs): y[delay:] = x[:-delay, 0] slims += [(1, 2)] for ndim in (1, 2): - y.shape = (y.shape[0],) + (1,) * (ndim - 1) + y = _reshape_view(y, (y.shape[0],) + (1,) * (ndim - 1)) for slim in slims: smin, smax = slim lap = TimeDelayingRidge( diff --git a/mne/decoding/transformer.py b/mne/decoding/transformer.py index b02ff8d8deb..c5fd14d9568 100644 --- a/mne/decoding/transformer.py +++ b/mne/decoding/transformer.py @@ -17,6 +17,7 @@ from ..cov import _check_scalings_user from ..epochs import BaseEpochs from ..filter import filter_data +from ..fixes import _reshape_view from ..time_frequency import psd_array_multitaper from ..utils import _check_option, _validate_type, check_version, fill_doc from ._fixes import validate_data # TODO VERSION remove with sklearn 1.4+ @@ -118,7 +119,7 @@ def _sklearn_reshape_apply(func, return_result, X, *args, **kwargs): X = np.reshape(X.transpose(0, 2, 1), (-1, orig_shape[1])) X = func(X, *args, **kwargs) if return_result: - X.shape = (orig_shape[0], orig_shape[2], orig_shape[1]) + X = _reshape_view(X, (orig_shape[0], orig_shape[2], orig_shape[1])) X = X.transpose(0, 2, 1) return X diff --git a/mne/epochs.py b/mne/epochs.py index 4bd94ffa2c5..2d317caa63e 100644 --- a/mne/epochs.py +++ b/mne/epochs.py @@ -66,7 +66,7 @@ from .event import _read_events_fif, make_fixed_length_events, match_event_names from .evoked import EvokedArray from .filter import FilterMixin, _check_fun, detrend -from .fixes import rng_uniform +from .fixes import _reshape_view, rng_uniform from .html_templates import _get_html_template from .parallel import parallel_func from .time_frequency.spectrum import EpochsSpectrum, SpectrumMixin, _validate_method @@ -4479,7 +4479,7 @@ def _get_epoch_from_raw(self, idx, verbose=None): else: data = data.astype(np.float64) - data.shape = raw.epoch_shape + data = _reshape_view(data, raw.epoch_shape) data *= raw.cals return data diff --git a/mne/event.py b/mne/event.py index ba2c4f0120c..e8eb4f28579 100644 --- a/mne/event.py +++ b/mne/event.py @@ -15,6 +15,7 @@ from ._fiff.tag import read_tag from ._fiff.tree import dir_tree_find from ._fiff.write import end_block, start_and_end_file, start_block, write_int +from .fixes import _reshape_view from .utils import ( _check_fname, _check_integer_or_list, @@ -181,7 +182,7 @@ def _read_events_fif(fid, tree): if event_list is None: raise ValueError("Could not find any events") else: - event_list.shape = (-1, 3) + event_list = _reshape_view(event_list, (-1, 3)) for d in events["directory"]: kind = d.kind pos = d.pos diff --git a/mne/filter.py b/mne/filter.py index 8d5d3e48ea7..304afdf4de7 100644 --- a/mne/filter.py +++ b/mne/filter.py @@ -22,7 +22,7 @@ _setup_cuda_fft_resample, _smart_pad, ) -from .fixes import minimum_phase +from .fixes import _reshape_view, minimum_phase from .parallel import parallel_func from .utils import ( _check_option, @@ -349,7 +349,7 @@ def _overlap_add_filter( for pp, p in enumerate(picks): x[p] = data_new[pp] - x.shape = orig_shape + x = _reshape_view(x, orig_shape) return x @@ -404,7 +404,7 @@ def _prep_for_filtering(x, copy, picks=None): orig_shape = x.shape x = np.atleast_2d(x) picks = _picks_to_idx(x.shape[-2], picks) - x.shape = (np.prod(x.shape[:-1]), x.shape[-1]) + x = _reshape_view(x, (np.prod(x.shape[:-1]), x.shape[-1])) if len(orig_shape) == 3: n_epochs, n_channels, n_times = orig_shape offset = np.repeat(np.arange(0, n_channels * n_epochs, n_channels), len(picks)) @@ -577,7 +577,7 @@ def _iir_filter(x, iir_params, picks, n_jobs, copy, phase="zero"): data_new = parallel(p_fun(x=x[p]) for p in picks) for pp, p in enumerate(picks): x[p] = data_new[pp] - x.shape = orig_shape + x = _reshape_view(x, orig_shape) return x @@ -1657,7 +1657,7 @@ def _mt_spectrum_proc( ) logger.info(f"{kind} notch frequencies (Hz):\n{found_freqs}") - x.shape = orig_shape + x = _reshape_view(x, orig_shape) return x @@ -2952,5 +2952,5 @@ def _iir_pad_apply_unpad(x, *, func, padlen, padtype, **kwargs): x_ext = _smart_pad(x_ext, (padlen, padlen), padtype) x_ext = func(x=x_ext, axis=-1, padlen=0, **kwargs) this_x[:] = x_ext[padlen : len(x_ext) - padlen] - x_out.shape = x.shape + x_out = _reshape_view(x_out, x.shape) return x_out diff --git a/mne/fixes.py b/mne/fixes.py index 2148330fb34..f3a8252c40f 100644 --- a/mne/fixes.py +++ b/mne/fixes.py @@ -56,6 +56,47 @@ def _compare_version(version_a, operator, version_b): return getattr(operator_module, mapping[operator])(ver_a, ver_b) +############################################################################### +# NumPy 2.5 deprecates .shape assignment, but .reshape(copy=False) requires 2.1+ + + +def _reshape_view(arr, shape): + """Reshape an array as a view, raising if a copy would be required. + + This function provides compatibility across NumPy versions for reshaping + arrays as views. On NumPy >= 2.1, it uses ``reshape(copy=False)`` which + explicitly fails if a view cannot be created. On older versions, it uses + direct shape assignment which has the same behavior but is deprecated in + NumPy 2.5+. + + Can be removed once NumPy 2.1 is the minimum supported version. + + Parameters + ---------- + arr : ndarray + The array to reshape. + shape : tuple of int + The new shape. + + Returns + ------- + ndarray + A reshaped view of the array. + + Raises + ------ + AttributeError + If a view cannot be created on NumPy < 2.1. + ValueError + If a view cannot be created on NumPy >= 2.1. + """ + if _compare_version(np.__version__, ">=", "2.1"): + return arr.reshape(shape, copy=False) + else: + arr.shape = shape + return arr + + ############################################################################### # Misc diff --git a/mne/forward/_compute_forward.py b/mne/forward/_compute_forward.py index 8ba15def389..6453fdb47e7 100644 --- a/mne/forward/_compute_forward.py +++ b/mne/forward/_compute_forward.py @@ -17,7 +17,7 @@ from .._fiff.constants import FIFF from ..bem import _import_openmeeg, _make_openmeeg_geometry -from ..fixes import bincount, jit +from ..fixes import _reshape_view, bincount, jit from ..parallel import parallel_func from ..surface import _jit_cross, _project_onto_surface from ..transforms import apply_trans, invert_transform @@ -457,7 +457,7 @@ def _do_prim_curr(rr, coils): for start, stop in _rr_bounds(rr, chunk=1): pp = _bem_inf_fields(rr[start:stop], rmags, cosmags) pp *= ws - pp.shape = (3 * (stop - start), -1) + pp = _reshape_view(pp, (3 * (stop - start), -1)) pc[3 * start : 3 * stop] = [ bincount(bins, this_pp, bins[-1] + 1) for this_pp in pp ] diff --git a/mne/forward/_lead_dots.py b/mne/forward/_lead_dots.py index 10183fc6280..9e6176dbc5c 100644 --- a/mne/forward/_lead_dots.py +++ b/mne/forward/_lead_dots.py @@ -11,6 +11,7 @@ import numpy as np from numpy.polynomial import legendre +from ..fixes import _reshape_view from ..parallel import parallel_func from ..utils import _get_extra_data_path, _open_lock, fill_doc, logger, verbose @@ -86,7 +87,7 @@ def _get_legen_table( logger.info(f"Reading Legendre{extra_str} table...") with _open_lock(fname, "rb", buffering=0) as fid: lut = np.fromfile(fid, np.float32) - lut.shape = lut_shape + lut = _reshape_view(lut, lut_shape) # we need this for the integration step n_fact = np.arange(1, n_coeff, dtype=float) @@ -265,7 +266,7 @@ def _fast_sphere_dot_r0( sums = _comp_sums_meg( beta.flatten(), ct.flatten(), lut, n_fact, volume_integral ) - sums.shape = (4,) + beta.shape + sums = _reshape_view(sums, ((4,) + beta.shape)) # Accumulate the result, a little bit streamlined version # cosmags1 = cosmags1[:, np.newaxis, :] @@ -296,7 +297,7 @@ def _fast_sphere_dot_r0( result *= r else: # 'eeg' result = _comp_sum_eeg(beta.flatten(), ct.flatten(), lut, n_fact) - result.shape = beta.shape + result = _reshape_view(result, beta.shape) # Give it a finishing touch! result *= _eeg_const result /= lr1lr2 diff --git a/mne/forward/forward.py b/mne/forward/forward.py index e8a7d62a3ce..83aa4b3a9b8 100644 --- a/mne/forward/forward.py +++ b/mne/forward/forward.py @@ -48,6 +48,7 @@ ) from ..epochs import BaseEpochs from ..evoked import Evoked, EvokedArray +from ..fixes import _reshape_view from ..html_templates import _get_html_template from ..io import BaseRaw, RawArray from ..label import Label @@ -1430,13 +1431,13 @@ def compute_depth_prior( # Gk = G[:, 3 * k:3 * (k + 1)] # x = np.dot(Gk.T, Gk) # d[k] = linalg.svdvals(x)[0] - G.shape = (G.shape[0], -1, 3) + G = _reshape_view(G, (G.shape[0], -1, 3)) d = np.linalg.norm( np.einsum("svj,svk->vjk", G, G), # vector dot prods ord=2, # ord=2 spectral (largest s.v.) axis=(1, 2), ) - G.shape = (G.shape[0], -1) + G = _reshape_view(G, (G.shape[0], -1)) # XXX Currently the fwd solns never have "patch_areas" defined if patch_areas is not None: diff --git a/mne/forward/tests/test_field_interpolation.py b/mne/forward/tests/test_field_interpolation.py index 57b204d97af..4c6ecd73fd5 100644 --- a/mne/forward/tests/test_field_interpolation.py +++ b/mne/forward/tests/test_field_interpolation.py @@ -19,6 +19,7 @@ import mne from mne import Epochs, make_fixed_length_events, pick_types, read_evokeds from mne.datasets import testing +from mne.fixes import _reshape_view from mne.forward import _make_surface_mapping, make_field_map from mne.forward._field_interpolation import _setup_dots from mne.forward._lead_dots import ( @@ -85,7 +86,7 @@ def test_legendre_val(): ctheta = rng.rand(20, 30) * 2.0 - 1.0 beta = rng.rand(20, 30) * 0.8 c1 = _comp_sum_eeg(beta.flatten(), ctheta.flatten(), lut_fun, n_fact) - c1.shape = beta.shape + c1 = _reshape_view(c1, beta.shape) # compare to numpy n = np.arange(1, n_terms, dtype=float)[:, np.newaxis, np.newaxis] diff --git a/mne/inverse_sparse/_gamma_map.py b/mne/inverse_sparse/_gamma_map.py index 35fd158f3e0..e3d077b77e6 100644 --- a/mne/inverse_sparse/_gamma_map.py +++ b/mne/inverse_sparse/_gamma_map.py @@ -4,7 +4,7 @@ import numpy as np -from ..fixes import _safe_svd +from ..fixes import _reshape_view, _safe_svd from ..forward import is_fixed_orient from ..minimum_norm.inverse import _check_reference, _log_exp_var from ..utils import logger, verbose, warn @@ -306,7 +306,7 @@ def gamma_map( X_xyz = np.zeros((len(active_src), 3, X.shape[1]), dtype=X.dtype) idx = np.searchsorted(active_src, idx) X_xyz[idx, offset, :] = X - X_xyz.shape = (len(active_src) * 3, X.shape[1]) + X_xyz = _reshape_view(X_xyz, (len(active_src) * 3, X.shape[1])) X = X_xyz active_set = (active_src[:, np.newaxis] * 3 + np.arange(3)).ravel() source_weighting[source_weighting == 0] = 1 # zeros diff --git a/mne/inverse_sparse/mxne_inverse.py b/mne/inverse_sparse/mxne_inverse.py index c3ccddbf7cd..295f72c49ce 100644 --- a/mne/inverse_sparse/mxne_inverse.py +++ b/mne/inverse_sparse/mxne_inverse.py @@ -6,7 +6,7 @@ from .._fiff.proj import deactivate_proj from ..dipole import Dipole -from ..fixes import _safe_svd +from ..fixes import _reshape_view, _safe_svd from ..forward import is_fixed_orient from ..minimum_norm.inverse import ( _check_reference, @@ -253,7 +253,9 @@ def _make_dipoles_sparse( _, keep = np.unique(active_idx, return_index=True) keep.sort() # maintain old order active_idx = active_idx[keep] - gof_split.shape = (len(active_idx), n_dip_per_pos, len(times)) + gof_split = _reshape_view( + gof_split, (len(active_idx), n_dip_per_pos, len(times)) + ) gof_split = gof_split.sum(1) assert (gof_split < 100).all() assert gof_split.shape == (len(active_idx), len(times)) diff --git a/mne/io/bti/bti.py b/mne/io/bti/bti.py index a992d3c7694..e811cd61ff0 100644 --- a/mne/io/bti/bti.py +++ b/mne/io/bti/bti.py @@ -14,6 +14,7 @@ from ..._fiff.meas_info import _empty_info from ..._fiff.tag import _coil_trans_to_loc, _loc_to_coil_trans from ..._fiff.utils import _mult_cal_one, read_str +from ...fixes import _reshape_view from ...transforms import Transform, combine_transforms, invert_transform from ...utils import _stamp_to_dt, _validate_type, logger, path_like, verbose from ..base import BaseRaw @@ -1041,7 +1042,7 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): block = np.fromfile(fid, dtype, count) sample_stop = sample_start + count // n_channels shape = (sample_stop - sample_start, bti_info["total_chans"]) - block.shape = shape + block = _reshape_view(block, shape) data_view = data[:, sample_start:sample_stop] one = np.empty(block.shape[::-1]) diff --git a/mne/io/ctf/ctf.py b/mne/io/ctf/ctf.py index 971ac51c2f6..d8b2c96e1bd 100644 --- a/mne/io/ctf/ctf.py +++ b/mne/io/ctf/ctf.py @@ -10,6 +10,7 @@ from ..._fiff._digitization import _format_dig_points from ..._fiff.utils import _blk_read_lims, _mult_cal_one +from ...fixes import _reshape_view from ...utils import ( _check_fname, _check_option, @@ -207,7 +208,7 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): pos += np.int64(samp_offset) * si["n_chan"] * 4 fid.seek(pos, 0) this_data = np.fromfile(fid, ">i4", count=si["n_chan"] * n_read) - this_data.shape = (si["n_chan"], n_read) + this_data = _reshape_view(this_data, (si["n_chan"], n_read)) this_data = this_data[:, r_lims[bi, 0] : r_lims[bi, 1]] data_view = data[:, d_lims[bi, 0] : d_lims[bi, 1]] _mult_cal_one(data_view, this_data, idx, cals, mult) diff --git a/mne/io/eeglab/eeglab.py b/mne/io/eeglab/eeglab.py index 497a7eeaf8f..6e651c639fd 100644 --- a/mne/io/eeglab/eeglab.py +++ b/mne/io/eeglab/eeglab.py @@ -20,6 +20,7 @@ from ...defaults import DEFAULTS from ...epochs import BaseEpochs from ...event import read_events +from ...fixes import _reshape_view from ...utils import ( Bunch, _check_fname, @@ -187,7 +188,7 @@ def _get_montage_information(eeg, get_pos, *, montage_units): _check_option("montage_units", montage_units, ("m", "dm", "cm", "mm", "auto")) if pos_ch_names: pos_array = np.array(pos, float) - pos_array.shape = (-1, 3) + pos_array = _reshape_view(pos_array, (-1, 3)) # roughly estimate head radius and check if its reasonable is_nan_pos = np.isnan(pos).any(axis=1) diff --git a/mne/io/fiff/raw.py b/mne/io/fiff/raw.py index d4e1e40a7aa..aad07690de8 100644 --- a/mne/io/fiff/raw.py +++ b/mne/io/fiff/raw.py @@ -17,6 +17,7 @@ from ...annotations import Annotations, _read_annotations_fif from ...channels import fix_mag_coil_types from ...event import AcqParserFIF +from ...fixes import _reshape_view from ...utils import ( _check_fname, _file_like, @@ -424,7 +425,7 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): fid.seek(ent.pos + 16, 0) one = _call_dict[ent.type](fid, ent, shape=None, rlims=None) try: - one.shape = (nsamp, nchan) + one = _reshape_view(one, (nsamp, nchan)) except AttributeError: # one is None n_bad += picksamp else: diff --git a/mne/io/kit/kit.py b/mne/io/kit/kit.py index 4a783518344..53006dba43d 100644 --- a/mne/io/kit/kit.py +++ b/mne/io/kit/kit.py @@ -21,6 +21,7 @@ from ..._fiff.utils import _mult_cal_one from ...epochs import BaseEpochs from ...event import read_events +from ...fixes import _reshape_view from ...transforms import Transform, als_ras_trans, apply_trans from ...utils import ( _check_fname, @@ -672,7 +673,7 @@ def get_kit_info(rawfile, allow_unknown_format, standardize_names=None, verbose= fid.seek(dirs[KIT.DIR_INDEX_CALIBRATION]["offset"]) # (offset [Volt], gain [Tesla/Volt]) for each channel sensitivity = np.fromfile(fid, dtype=FLOAT64, count=channel_count * 2) - sensitivity.shape = (channel_count, 2) + sensitivity = _reshape_view(sensitivity, (channel_count, 2)) channel_offset, channel_gain = sensitivity.T assert (channel_offset == 0).all() # otherwise we have a problem diff --git a/mne/io/nirx/nirx.py b/mne/io/nirx/nirx.py index eb4101be0af..766986c4612 100644 --- a/mne/io/nirx/nirx.py +++ b/mne/io/nirx/nirx.py @@ -17,6 +17,7 @@ from ..._fiff.utils import _mult_cal_one from ..._freesurfer import get_mni_fiducials from ...annotations import Annotations +from ...fixes import _reshape_view from ...transforms import _get_trans, apply_trans from ...utils import ( _check_fname, @@ -567,7 +568,7 @@ def _read_csv_rows_cols(fname, start, stop, cols, bounds, sep=" ", replace=None) if replace is not None: data = replace(data) x = np.fromstring(data, float, sep=sep) - x.shape = (stop - start, -1) + x = _reshape_view(x, (stop - start, -1)) x = x[:, cols] return x diff --git a/mne/minimum_norm/inverse.py b/mne/minimum_norm/inverse.py index da770744881..0c9fb6a6bc7 100644 --- a/mne/minimum_norm/inverse.py +++ b/mne/minimum_norm/inverse.py @@ -39,7 +39,7 @@ from ..cov import Covariance, _read_cov, _write_cov, compute_whitener, prepare_noise_cov from ..epochs import BaseEpochs, EpochsArray from ..evoked import Evoked, EvokedArray -from ..fixes import _safe_svd +from ..fixes import _reshape_view, _safe_svd from ..forward import ( _read_forward_meas_info, _select_orient_forward, @@ -835,8 +835,8 @@ def _assemble_kernel(inv, label, method, pick_ori, use_cps=True, verbose=None): # No need to rotate source_cov because it should be uniform # (loose=1., and depth weighting is uniform across columns) offset = sl.stop - eigen_leads.shape = (-1, eigen_leads.shape[2]) - source_nn.shape = (-1, 3) + eigen_leads = _reshape_view(eigen_leads, (-1, eigen_leads.shape[2])) + source_nn = _reshape_view(source_nn, (-1, 3)) if pick_ori == "normal": if not inv["source_ori"] == FIFF.FIFFV_MNE_FREE_ORI: @@ -1673,7 +1673,7 @@ def apply_inverse_cov( sol = cov.data[sel][:, sel] @ K.T sol = np.sum(K * sol.T, axis=1, keepdims=True) # Reshape back to (n_src, ..., 1) - sol.shape = stc.data.shape[:-1] + (1,) + sol = _reshape_view(sol, stc.data.shape[:-1] + (1,)) stc = stc.__class__(sol, stc.vertices, stc.tmin, stc.tstep, stc.subject) if combine: # combine the three directions logger.info(" Combining the current components...") diff --git a/mne/minimum_norm/tests/test_inverse.py b/mne/minimum_norm/tests/test_inverse.py index d5eec6e7f91..438003c16ee 100644 --- a/mne/minimum_norm/tests/test_inverse.py +++ b/mne/minimum_norm/tests/test_inverse.py @@ -38,6 +38,7 @@ from mne.datasets import testing from mne.epochs import Epochs, EpochsArray, make_fixed_length_epochs from mne.event import read_events +from mne.fixes import _reshape_view from mne.forward import apply_forward, is_fixed_orient, restrict_forward_to_stc from mne.io import read_info, read_raw_fif from mne.label import label_sign_flip, read_label @@ -1686,7 +1687,7 @@ def _assert_free_ori_match(ori, max_idx, lower_ori, upper_ori): assert ori.shape == (ori.shape[0], 3) ori = ori[max_idx] assert ori.shape == (max_idx.size, 3) - ori.shape = (max_idx.size // 3, 3, 3) + ori = _reshape_view(ori, (max_idx.size // 3, 3, 3)) dots = np.abs(np.diagonal(ori, axis1=1, axis2=2)) mu = np.mean(dots) assert lower_ori <= mu <= upper_ori, mu diff --git a/mne/morph.py b/mne/morph.py index e2a48350f1b..7631fedf89a 100644 --- a/mne/morph.py +++ b/mne/morph.py @@ -9,7 +9,7 @@ import numpy as np from scipy import sparse -from .fixes import _eye_array, _get_img_fdata +from .fixes import _eye_array, _get_img_fdata, _reshape_view from .morph_map import read_morph_map from .parallel import parallel_func from .source_estimate import ( @@ -1556,7 +1556,7 @@ def _apply_morph_data(morph, stc_from): data[to_sl] = morph.morph_mat @ data_from[from_sl] assert to_used.all() assert from_used.all() - data.shape = (data.shape[0],) + stc_from.data.shape[1:] + data = _reshape_view(data, (data.shape[0],) + stc_from.data.shape[1:]) klass = stc_from.__class__ stc_to = klass(data, vertices_to, stc_from.tmin, stc_from.tstep, morph.subject_to) return stc_to diff --git a/mne/preprocessing/maxwell.py b/mne/preprocessing/maxwell.py index 38a8f1c59f6..8c270252bb2 100644 --- a/mne/preprocessing/maxwell.py +++ b/mne/preprocessing/maxwell.py @@ -25,7 +25,7 @@ from ..annotations import _annotations_starts_stops from ..bem import _check_origin from ..channels.channels import _get_T1T2_mag_inds, fix_mag_coil_types -from ..fixes import _safe_svd, bincount, sph_harm_y +from ..fixes import _reshape_view, _safe_svd, bincount, sph_harm_y from ..forward import _concatenate_coils, _create_meg_coils, _prep_meg_channels from ..io import BaseRaw, RawArray from ..surface import _normalize_vectors @@ -2787,7 +2787,7 @@ def find_bad_channels_maxwell( n = stop - start flat_stop = n - (n % flat_step) data = chunk_raw.get_data(good_meg_picks, 0, flat_stop) - data.shape = (data.shape[0], -1, flat_step) + data = _reshape_view(data, (data.shape[0], -1, flat_step)) delta = np.std(data, axis=-1).min(-1) # min std across segments # We may want to return this later if `return_scores=True`. diff --git a/mne/report/tests/test_report.py b/mne/report/tests/test_report.py index 6481d9e8e31..5fa47d63914 100644 --- a/mne/report/tests/test_report.py +++ b/mne/report/tests/test_report.py @@ -26,6 +26,7 @@ from mne._fiff.write import DATE_NONE from mne.datasets import testing from mne.epochs import make_metadata +from mne.fixes import _reshape_view from mne.io import RawArray, read_info, read_raw_fif from mne.preprocessing import ICA from mne.report import Report, _ReportScraper, open_report, report @@ -507,7 +508,7 @@ def test_add_bem_n_jobs(n_jobs, monkeypatch): ) assert imgs.ndim == 4 # images, h, w, rgba assert len(imgs) == 6 - imgs.shape = (len(imgs), -1) + imgs = _reshape_view(imgs, (len(imgs), -1)) norms = np.linalg.norm(imgs, axis=-1) # should have down-up-down shape corr = np.corrcoef(norms, np.hanning(len(imgs)))[0, 1] diff --git a/mne/source_estimate.py b/mne/source_estimate.py index 689022fb326..16a4f08226f 100644 --- a/mne/source_estimate.py +++ b/mne/source_estimate.py @@ -19,7 +19,7 @@ from .cov import Covariance from .evoked import _get_peak from .filter import FilterMixin, _check_fun, resample -from .fixes import _eye_array, _safe_svd +from .fixes import _eye_array, _reshape_view, _safe_svd from .parallel import parallel_func from .source_space._source_space import ( SourceSpaces, @@ -3729,7 +3729,9 @@ def _gen_extract_label_time_course( assert vertidx.shape[1] == stc.data.shape[0] this_data = np.reshape(stc.data, (stc.data.shape[0], -1)) this_data = vertidx @ this_data - this_data.shape = (this_data.shape[0],) + stc.data.shape[1:] + this_data = _reshape_view( + this_data, (this_data.shape[0],) + stc.data.shape[1:] + ) else: this_data = stc.data[vertidx] label_tc[i] = func(flip, this_data) diff --git a/mne/source_space/_source_space.py b/mne/source_space/_source_space.py index 74ffc7a44f2..d18fba88d65 100644 --- a/mne/source_space/_source_space.py +++ b/mne/source_space/_source_space.py @@ -41,7 +41,7 @@ read_freesurfer_lut, ) from ..bem import ConductorModel, read_bem_surfaces -from ..fixes import _get_img_fdata +from ..fixes import _get_img_fdata, _reshape_view from ..parallel import parallel_func from ..surface import ( _CheckInside, @@ -2317,7 +2317,7 @@ def _make_volume_source_space( checks = np.where(neigh >= 0)[0] removes = np.logical_not(np.isin(checks, sp["vertno"])) neigh[checks[removes]] = -1 - neigh.shape = old_shape + neigh = _reshape_view(neigh, old_shape) neigh = neigh.T # Thought we would need this, but C code keeps -1 vertices, so we will: # neigh = [n[n >= 0] for n in enumerate(neigh[vertno])] diff --git a/mne/stats/cluster_level.py b/mne/stats/cluster_level.py index 7c4c8b40435..eb887e74a7d 100644 --- a/mne/stats/cluster_level.py +++ b/mne/stats/cluster_level.py @@ -10,7 +10,7 @@ from scipy.stats import f as fstat from scipy.stats import t as tstat -from ..fixes import has_numba, jit +from ..fixes import _reshape_view, has_numba, jit from ..parallel import parallel_func from ..source_estimate import MixedSourceEstimate, SourceEstimate, VolSourceEstimate from ..source_space import SourceSpaces @@ -695,7 +695,7 @@ def _do_permutations( # The stat should have the same shape as the samples for no adj. if adjacency is None: - t_obs_surr.shape = sample_shape + t_obs_surr = _reshape_view(t_obs_surr, sample_shape) # Find cluster on randomized stats out = _find_clusters( @@ -783,7 +783,7 @@ def _do_1samp_permutations( # The stat should have the same shape as the samples for no adj. if adjacency is None: - t_obs_surr.shape = sample_shape + t_obs_surr = _reshape_view(t_obs_surr, sample_shape) # Find cluster on randomized stats out = _find_clusters( @@ -974,7 +974,7 @@ def _permutation_cluster_test( f"compatible with the sample shape {sample_shape}" ) if adjacency is None or adjacency is False: - t_obs.shape = sample_shape + t_obs = _reshape_view(t_obs, sample_shape) if exclude is not None: include = np.logical_not(exclude) @@ -1001,7 +1001,7 @@ def _permutation_cluster_test( clusters, cluster_stats = out # The stat should have the same shape as the samples - t_obs.shape = sample_shape + t_obs = _reshape_view(t_obs, sample_shape) # For TFCE, return the "adjusted" statistic instead of raw scores # and for clusters, each point gets treated independently @@ -1113,7 +1113,7 @@ def _permutation_cluster_test( for ti in to_remove: step_down_include[clusters[ti]] = False if adjacency is None and adjacency is not False: - step_down_include.shape = sample_shape + step_down_include = _reshape_view(step_down_include, sample_shape) n_step_downs += 1 if step_down_p > 0: a_text = "additional " if n_step_downs > 1 else "" diff --git a/mne/tests/test_transforms.py b/mne/tests/test_transforms.py index 69f8160f3c1..d0a86a5722a 100644 --- a/mne/tests/test_transforms.py +++ b/mne/tests/test_transforms.py @@ -19,7 +19,7 @@ import mne from mne import read_trans, write_trans from mne.datasets import testing -from mne.fixes import _get_img_fdata +from mne.fixes import _get_img_fdata, _reshape_view from mne.io import read_info from mne.transforms import ( _angle_between_quats, @@ -76,7 +76,7 @@ def test_tps(): az = np.linspace(0.0, 2 * np.pi, 20, endpoint=False) pol = np.linspace(0, np.pi, 12)[1:-1] sph = np.array(np.meshgrid(1, az, pol, indexing="ij")) - sph.shape = (3, -1) + sph = _reshape_view(sph, (3, -1)) assert_equal(sph.shape[1], 200) source = _sph_to_cart(sph.T) destination = source.copy() diff --git a/mne/time_frequency/multitaper.py b/mne/time_frequency/multitaper.py index 1c1a3baf238..d917056b2ac 100644 --- a/mne/time_frequency/multitaper.py +++ b/mne/time_frequency/multitaper.py @@ -10,6 +10,7 @@ from scipy.signal import get_window from scipy.signal.windows import dpss as sp_dpss +from ..fixes import _reshape_view from ..parallel import parallel_func from ..utils import _check_option, logger, verbose, warn @@ -455,7 +456,7 @@ def psd_array_multitaper( # Combining/reshaping to original data shape last_dims = (n_freqs,) if output == "power" else (n_tapers, n_freqs) - psd.shape = dshape + last_dims + psd = _reshape_view(psd, dshape + last_dims) if ndim_in == 1: psd = psd[0] diff --git a/mne/time_frequency/psd.py b/mne/time_frequency/psd.py index 383ccc9f0f9..01d932699a1 100644 --- a/mne/time_frequency/psd.py +++ b/mne/time_frequency/psd.py @@ -8,6 +8,7 @@ import numpy as np from scipy.signal import spectrogram +from ..fixes import _reshape_view from ..parallel import parallel_func from ..utils import _check_option, _ensure_int, logger, verbose, warn from ..utils.numerics import _mask_to_onsets_offsets @@ -312,5 +313,5 @@ def func(*args, **kwargs): if bad_ch.any(): psds[bad_ch] = np.nan - psds.shape = shape + psds = _reshape_view(psds, shape) return psds, freqs diff --git a/mne/viz/_brain/_brain.py b/mne/viz/_brain/_brain.py index 04321fa0503..5735fd98e24 100644 --- a/mne/viz/_brain/_brain.py +++ b/mne/viz/_brain/_brain.py @@ -28,6 +28,7 @@ vertex_to_mni, ) from ...defaults import DEFAULTS, _handle_default +from ...fixes import _reshape_view from ...surface import _marching_cubes, _mesh_borders, mesh_edges from ...transforms import ( Transform, @@ -2230,7 +2231,7 @@ def add_label( if isinstance(borders, int): for _ in range(borders): keep_idx = np.isin(self.geo[hemi].faces.ravel(), keep_idx) - keep_idx.shape = self.geo[hemi].faces.shape + keep_idx = _reshape_view(keep_idx, self.geo[hemi].faces.shape) keep_idx = self.geo[hemi].faces[np.any(keep_idx, axis=1)] keep_idx = np.unique(keep_idx) show[keep_idx] = 1 @@ -3978,7 +3979,7 @@ def _to_borders(self, label, hemi, borders, restrict_idx=None): if isinstance(borders, int): for _ in range(borders): keep_idx = np.isin(self.geo[hemi].orig_faces.ravel(), keep_idx) - keep_idx.shape = self.geo[hemi].orig_faces.shape + keep_idx = _reshape_view(keep_idx, self.geo[hemi].orig_faces.shape) keep_idx = self.geo[hemi].orig_faces[np.any(keep_idx, axis=1)] keep_idx = np.unique(keep_idx) if restrict_idx is not None: diff --git a/mne/viz/_brain/tests/test_brain.py b/mne/viz/_brain/tests/test_brain.py index e3c7559ffa0..705f1a52347 100644 --- a/mne/viz/_brain/tests/test_brain.py +++ b/mne/viz/_brain/tests/test_brain.py @@ -31,6 +31,7 @@ ) from mne.channels import make_dig_montage from mne.datasets import testing +from mne.fixes import _reshape_view from mne.io import read_info from mne.label import read_label from mne.minimum_norm import apply_inverse, make_inverse_operator @@ -1466,7 +1467,7 @@ def _create_testing_brain( stc_data[(rng.rand(stc_size // 20) * stc_size).astype(int)] = rng.rand( stc_data.size // 20 ) - stc_data.shape = (n_verts, n_time) + stc_data = _reshape_view(stc_data, (n_verts, n_time)) if diverging: stc_data -= 0.5 stc = klass(stc_data, vertices, 1, 1) diff --git a/mne/viz/backends/_utils.py b/mne/viz/backends/_utils.py index 467f5cb15e7..a46e1f9cc31 100644 --- a/mne/viz/backends/_utils.py +++ b/mne/viz/backends/_utils.py @@ -16,7 +16,7 @@ import numpy as np -from ...fixes import _compare_version +from ...fixes import _compare_version, _reshape_view from ...utils import _check_qt_version, _validate_type, logger, warn from ..utils import _get_cmap @@ -355,7 +355,7 @@ def _pixmap_to_ndarray(pixmap): if hasattr(ptr, "setsize"): # PyQt ptr.setsize(count) data = np.frombuffer(ptr, dtype=np.uint8, count=count).copy() - data.shape = (img.height(), img.width(), 4) + data = _reshape_view(data, (img.height(), img.width(), 4)) return data / 255.0 diff --git a/mne/viz/tests/test_3d.py b/mne/viz/tests/test_3d.py index ab24e6a70db..154313a1eeb 100644 --- a/mne/viz/tests/test_3d.py +++ b/mne/viz/tests/test_3d.py @@ -32,6 +32,7 @@ from mne.bem import read_bem_solution, read_bem_surfaces from mne.datasets import testing from mne.defaults import DEFAULTS +from mne.fixes import _reshape_view from mne.io import read_info, read_raw_bti, read_raw_ctf, read_raw_kit, read_raw_nirx from mne.minimum_norm import apply_inverse from mne.source_estimate import _BaseVolSourceEstimate @@ -135,7 +136,7 @@ def test_plot_sparse_source_estimates(renderer_interactive, brain_gc): stc_data[(np.random.rand(stc_size // 20) * stc_size).astype(int)] = ( np.random.RandomState(0).rand(stc_data.size // 20) ) - stc_data.shape = (n_verts, n_time) + stc_data = _reshape_view(stc_data, (n_verts, n_time)) stc = SourceEstimate(stc_data, vertices, 1, 1) colormap = "mne_analyze" @@ -942,7 +943,7 @@ def test_process_clim_plot(renderer_interactive, brain_gc): n_time = 5 n_verts = sum(len(v) for v in vertices) stc_data = np.random.RandomState(0).rand(n_verts * n_time) - stc_data.shape = (n_verts, n_time) + stc_data = _reshape_view(stc_data, (n_verts, n_time)) stc = SourceEstimate(stc_data, vertices, 1, 1, "sample") # Test for simple use cases @@ -1064,7 +1065,7 @@ def test_stc_mpl(): n_time = 5 n_verts = sum(len(v) for v in vertices) stc_data = np.ones(n_verts * n_time) - stc_data.shape = (n_verts, n_time) + stc_data = _reshape_view(stc_data, (n_verts, n_time)) stc = SourceEstimate(stc_data, vertices, 1, 1, "sample") stc.plot( subjects_dir=subjects_dir, @@ -1396,7 +1397,7 @@ def test_link_brains(renderer_interactive): stc_data[(np.random.rand(stc_size // 20) * stc_size).astype(int)] = ( np.random.RandomState(0).rand(stc_data.size // 20) ) - stc_data.shape = (n_verts, n_time) + stc_data = _reshape_view(stc_data, (n_verts, n_time)) stc = SourceEstimate(stc_data, vertices, 1, 1) colormap = "mne_analyze"