diff --git a/nltools/datasets.py b/nltools/datasets.py index aff2f4c3..1659dd45 100644 --- a/nltools/datasets.py +++ b/nltools/datasets.py @@ -21,8 +21,15 @@ import os import pandas as pd +import warnings from nltools.data import Brain_Data -from nilearn.datasets.utils import _get_dataset_dir, _fetch_file +import pkg_resources + +if pkg_resources.get_distribution("nilearn").version >= "0.10.4": + from nilearn.datasets import fetch_neurovault_ids + from nilearn.datasets.utils import get_data_dirs +else: + from nilearn.datasets.utils import _get_dataset_dir, _fetch_file from pynv import Client # Optional dependencies @@ -59,7 +66,9 @@ def get_collection_image_metadata(collection=None, data_dir=None, limit=10): Returns: pd.DataFrame: Dataframe with full image metadata from collection """ - + warnings.warn( + "This function is deprecated and will be removed in a future version. Please use fetch_neurovault_ids instead." + ) if os.path.isfile(os.path.join(data_dir, "metadata.csv")): dat = pd.read_csv(os.path.join(data_dir, "metadata.csv")) else: @@ -96,6 +105,9 @@ def download_collection( Returns: (pd.DataFrame, list): (DataFrame of image metadata, list of files from downloaded collection) """ + warnings.warn( + "This function is deprecated and will be removed in a future version. Please use fetch_neurovault_ids instead." + ) if data_dir is None: data_dir = _get_dataset_dir(str(collection), data_dir=data_dir, verbose=verbose) @@ -128,10 +140,18 @@ def fetch_pain(data_dir=None, resume=True, verbose=1): collection = 504 dataset_name = "chang2015_pain" - data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose) - metadata, files = download_collection( - collection=collection, data_dir=data_dir, resume=resume, verbose=verbose - ) + + if pkg_resources.get_distribution("nilearn").version >= "0.10.4": + nv_data = fetch_neurovault_ids( + collection_ids=[collection], data_dir=data_dir, verbose=verbose + ) + files = nv_data["images"] + metadata = pd.DataFrame(nv_data["images_meta"]) + else: + data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose) + metadata, files = download_collection( + collection=collection, data_dir=data_dir, resume=resume, verbose=verbose + ) return Brain_Data(data=files, X=metadata) @@ -148,8 +168,16 @@ def fetch_emotion_ratings(data_dir=None, resume=True, verbose=1): collection = 1964 dataset_name = "chang2015_emotion_ratings" - data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose) - metadata, files = download_collection( - collection=collection, data_dir=data_dir, resume=resume, verbose=verbose - ) + + if pkg_resources.get_distribution("nilearn").version >= "0.10.4": + nv_data = fetch_neurovault_ids( + collection_ids=[collection], data_dir=data_dir, verbose=verbose + ) + files = nv_data["images"] + metadata = pd.DataFrame(nv_data["images_meta"]) + else: + data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose) + metadata, files = download_collection( + collection=collection, data_dir=data_dir, resume=resume, verbose=verbose + ) return Brain_Data(data=files, X=metadata) diff --git a/nltools/stats.py b/nltools/stats.py index 7e93afbb..fec0415c 100644 --- a/nltools/stats.py +++ b/nltools/stats.py @@ -1943,6 +1943,7 @@ def isc( tail=2, n_jobs=-1, random_state=None, + sim_metric="correlation", ): """Compute pairwise intersubject correlation from observations by subjects array. @@ -1981,6 +1982,7 @@ def isc( tail: (int) either 1 for one-tail or 2 for two-tailed test (default: 2) n_jobs: (int) The number of CPUs to use to do the computation. -1 means all CPUs. return_null: (bool) Return the permutation distribution along with the p-value; default False + sim_metric: (str) pairwise distance metric. See sklearn's pairwise_distances for valid inputs (default: correlation) Returns: stats: (dict) dictionary of permutation results ['correlation','p'] @@ -2000,7 +2002,7 @@ def isc( stats = {"isc": _compute_isc(data, metric=metric)} similarity = Adjacency( - 1 - pairwise_distances(data.T, metric="correlation"), matrix_type="similarity" + 1 - pairwise_distances(data.T, metric=sim_metric), matrix_type="similarity" ) if method == "bootstrap":