|
1 | | -import |
| 1 | +import json |
| 2 | +import os |
| 3 | + |
| 4 | +import numpy as np |
| 5 | +import pandas as pd |
| 6 | +from flamingo_tools.s3_utils import create_s3_target, BUCKET_NAME, get_s3_path |
| 7 | + |
| 8 | + |
| 9 | +# Note: downsampling with anisotropic scale in the beginning would make sense for better visualization. |
| 10 | +def analyze_sgn(visualize=False): |
| 11 | + s3 = create_s3_target() |
| 12 | + datasets = ["LaVision-M04", "LaVision-Mar05"] |
| 13 | + |
| 14 | + # Use this to select the compoents for analysis. |
| 15 | + sgn_components = { |
| 16 | + "LaVision-M04": [1], |
| 17 | + "LaVision-Mar05": [1], |
| 18 | + } |
| 19 | + seg_name = "SGN_LOWRES-v2" |
| 20 | + |
| 21 | + for cochlea in datasets: |
| 22 | + content = s3.open(f"{BUCKET_NAME}/{cochlea}/dataset.json", mode="r", encoding="utf-8") |
| 23 | + info = json.loads(content.read()) |
| 24 | + sources = info["sources"] |
| 25 | + |
| 26 | + # Load the segmentation table. |
| 27 | + seg_source = sources[seg_name] |
| 28 | + table_folder = os.path.join( |
| 29 | + BUCKET_NAME, cochlea, seg_source["segmentation"]["tableData"]["tsv"]["relativePath"] |
| 30 | + ) |
| 31 | + table_content = s3.open(os.path.join(table_folder, "default.tsv"), mode="rb") |
| 32 | + table = pd.read_csv(table_content, sep="\t") |
| 33 | + |
| 34 | + if visualize: |
| 35 | + import napari |
| 36 | + import zarr |
| 37 | + from nifty.tools import takeDict |
| 38 | + |
| 39 | + key = "s2" |
| 40 | + img_s3 = f"{cochlea}/images/ome-zarr/PV.ome.zarr" |
| 41 | + seg_s3 = os.path.join(cochlea, seg_source["segmentation"]["imageData"]["ome.zarr"]["relativePath"]) |
| 42 | + img_path, _ = get_s3_path(img_s3) |
| 43 | + seg_path, _ = get_s3_path(seg_s3) |
| 44 | + |
| 45 | + print("Loading image data") |
| 46 | + f = zarr.open(seg_path, mode="r") |
| 47 | + seg = f[key][:] |
| 48 | + |
| 49 | + seg_ids = np.unique(seg) |
| 50 | + component_dict = {int(label_id): int(component_id) |
| 51 | + for label_id, component_id in zip(table.label_id, table.component_labels)} |
| 52 | + missing_ids = np.setdiff1d(seg_ids, table.label_id.values) |
| 53 | + component_dict.update({miss: 0 for miss in missing_ids}) |
| 54 | + components = takeDict(component_dict, seg) |
| 55 | + |
| 56 | + f = zarr.open(img_path, mode="r") |
| 57 | + data = f[key][:] |
| 58 | + |
| 59 | + v = napari.Viewer() |
| 60 | + v.add_image(data) |
| 61 | + v.add_labels(seg) |
| 62 | + v.add_labels(components) |
| 63 | + napari.run() |
| 64 | + |
| 65 | + table = table[table.component_labels.isin(sgn_components[cochlea])] |
| 66 | + n_sgns = len(table) |
| 67 | + print(cochlea, ":", n_sgns) |
| 68 | + |
| 69 | + |
| 70 | +def main(): |
| 71 | + analyze_sgn(visualize=True) |
| 72 | + |
| 73 | + |
| 74 | +if __name__ == "__main__": |
| 75 | + main() |
0 commit comments