Skip to content

Commit f312873

Browse files
committed
Revert "rm tasks"
This reverts commit 648cdfe.
1 parent 648cdfe commit f312873

21 files changed

+5981
-0
lines changed
Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
"""
2+
Tasks subpackage (requires installation extra `fractal-tasks`).
3+
"""
Lines changed: 238 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,238 @@
1+
# Copyright 2024 (C) BioVisionCenter
2+
#
3+
# Original authors:
4+
# Joel Lüthi <[email protected]>
5+
#
6+
# This file is part of Fractal
7+
"""Utils functions for registration"""
8+
import copy
9+
10+
import anndata as ad
11+
import dask.array as da
12+
import numpy as np
13+
import pandas as pd
14+
from image_registration import chi2_shift
15+
16+
17+
def calculate_physical_shifts(
18+
shifts: np.array,
19+
level: int,
20+
coarsening_xy: int,
21+
full_res_pxl_sizes_zyx: list[float],
22+
) -> list[float]:
23+
"""
24+
Calculates shifts in physical units based on pixel shifts
25+
26+
Args:
27+
shifts: array of shifts, zyx or yx
28+
level: resolution level
29+
coarsening_xy: coarsening factor between levels
30+
full_res_pxl_sizes_zyx: pixel sizes in physical units as zyx
31+
32+
Returns:
33+
shifts in physical units as zyx
34+
"""
35+
36+
curr_pixel_size = np.array(full_res_pxl_sizes_zyx) * coarsening_xy**level
37+
if len(shifts) == 3:
38+
shifts_physical = shifts * curr_pixel_size
39+
elif len(shifts) == 2:
40+
shifts_physical = [
41+
0,
42+
shifts[0] * curr_pixel_size[1],
43+
shifts[1] * curr_pixel_size[2],
44+
]
45+
else:
46+
raise ValueError(
47+
f"Wrong input for calculate_physical_shifts ({shifts=})"
48+
)
49+
return shifts_physical
50+
51+
52+
def get_ROI_table_with_translation(
53+
ROI_table: ad.AnnData,
54+
new_shifts: dict[str, list[float]],
55+
) -> ad.AnnData:
56+
"""
57+
Adds translation columns to a ROI table
58+
59+
Args:
60+
ROI_table: Fractal ROI table
61+
new_shifts: zyx list of shifts
62+
63+
Returns:
64+
Fractal ROI table with 3 additional columns for calculated translations
65+
"""
66+
67+
shift_table = pd.DataFrame(new_shifts).T
68+
shift_table.columns = ["translation_z", "translation_y", "translation_x"]
69+
shift_table = shift_table.rename_axis("FieldIndex")
70+
new_roi_table = ROI_table.to_df().merge(
71+
shift_table, left_index=True, right_index=True
72+
)
73+
if len(new_roi_table) != len(ROI_table):
74+
raise ValueError(
75+
"New ROI table with registration info has a "
76+
f"different length ({len(new_roi_table)=}) "
77+
f"from the original ROI table ({len(ROI_table)=})"
78+
)
79+
80+
adata = ad.AnnData(X=new_roi_table.astype(np.float32))
81+
adata.obs_names = new_roi_table.index
82+
adata.var_names = list(map(str, new_roi_table.columns))
83+
return adata
84+
85+
86+
# Helper functions
87+
def add_zero_translation_columns(ad_table: ad.AnnData):
88+
"""
89+
Add three zero-filled columns (`translation_{x,y,z}`) to an AnnData table.
90+
"""
91+
columns = ["translation_z", "translation_y", "translation_x"]
92+
if ad_table.var.index.isin(columns).any().any():
93+
raise ValueError(
94+
"The roi table already contains translation columns. Did you "
95+
"enter a wrong reference acquisition?"
96+
)
97+
df = pd.DataFrame(np.zeros([len(ad_table), 3]), columns=columns)
98+
df.index = ad_table.obs.index
99+
ad_new = ad.concat([ad_table, ad.AnnData(df)], axis=1)
100+
return ad_new
101+
102+
103+
def calculate_min_max_across_dfs(tables_list):
104+
# Initialize dataframes to store the max and min values
105+
max_df = pd.DataFrame(
106+
index=tables_list[0].index, columns=tables_list[0].columns
107+
)
108+
min_df = pd.DataFrame(
109+
index=tables_list[0].index, columns=tables_list[0].columns
110+
)
111+
112+
# Loop through the tables and calculate max and min values
113+
for table in tables_list:
114+
max_df = pd.DataFrame(
115+
np.maximum(max_df.values, table.values),
116+
columns=max_df.columns,
117+
index=max_df.index,
118+
)
119+
min_df = pd.DataFrame(
120+
np.minimum(min_df.values, table.values),
121+
columns=min_df.columns,
122+
index=min_df.index,
123+
)
124+
125+
return max_df, min_df
126+
127+
128+
def apply_registration_to_single_ROI_table(
129+
roi_table: ad.AnnData,
130+
max_df: pd.DataFrame,
131+
min_df: pd.DataFrame,
132+
) -> ad.AnnData:
133+
"""
134+
Applies the registration to a ROI table
135+
136+
Calculates the new position as: p = position + max(shift, 0) - own_shift
137+
Calculates the new len as: l = len - max(shift, 0) + min(shift, 0)
138+
139+
Args:
140+
roi_table: AnnData table which contains a Fractal ROI table.
141+
Rows are ROIs
142+
max_df: Max translation shift in z, y, x for each ROI. Rows are ROIs,
143+
columns are translation_z, translation_y, translation_x
144+
min_df: Min translation shift in z, y, x for each ROI. Rows are ROIs,
145+
columns are translation_z, translation_y, translation_x
146+
Returns:
147+
ROI table where all ROIs are registered to the smallest common area
148+
across all acquisitions.
149+
"""
150+
roi_table = copy.deepcopy(roi_table)
151+
rois = roi_table.obs.index
152+
if (rois != max_df.index).all() or (rois != min_df.index).all():
153+
raise ValueError(
154+
"ROI table and max & min translation need to contain the same "
155+
f"ROIS, but they were {rois=}, {max_df.index=}, {min_df.index=}"
156+
)
157+
158+
for roi in rois:
159+
roi_table[[roi], ["z_micrometer"]] = (
160+
roi_table[[roi], ["z_micrometer"]].X
161+
+ float(max_df.loc[roi, "translation_z"])
162+
- roi_table[[roi], ["translation_z"]].X
163+
)
164+
roi_table[[roi], ["y_micrometer"]] = (
165+
roi_table[[roi], ["y_micrometer"]].X
166+
+ float(max_df.loc[roi, "translation_y"])
167+
- roi_table[[roi], ["translation_y"]].X
168+
)
169+
roi_table[[roi], ["x_micrometer"]] = (
170+
roi_table[[roi], ["x_micrometer"]].X
171+
+ float(max_df.loc[roi, "translation_x"])
172+
- roi_table[[roi], ["translation_x"]].X
173+
)
174+
# This calculation only works if all ROIs are the same size initially!
175+
roi_table[[roi], ["len_z_micrometer"]] = (
176+
roi_table[[roi], ["len_z_micrometer"]].X
177+
- float(max_df.loc[roi, "translation_z"])
178+
+ float(min_df.loc[roi, "translation_z"])
179+
)
180+
roi_table[[roi], ["len_y_micrometer"]] = (
181+
roi_table[[roi], ["len_y_micrometer"]].X
182+
- float(max_df.loc[roi, "translation_y"])
183+
+ float(min_df.loc[roi, "translation_y"])
184+
)
185+
roi_table[[roi], ["len_x_micrometer"]] = (
186+
roi_table[[roi], ["len_x_micrometer"]].X
187+
- float(max_df.loc[roi, "translation_x"])
188+
+ float(min_df.loc[roi, "translation_x"])
189+
)
190+
return roi_table
191+
192+
193+
def chi2_shift_out(img_ref, img_cycle_x) -> list[np.ndarray]:
194+
"""
195+
Helper function to get the output of chi2_shift into the same format as
196+
phase_cross_correlation. Calculates the shift between two images using
197+
the chi2_shift method.
198+
199+
Args:
200+
img_ref (np.ndarray): First image.
201+
img_cycle_x (np.ndarray): Second image.
202+
203+
Returns:
204+
List containing numpy array of shift in y and x direction.
205+
"""
206+
x, y, a, b = chi2_shift(np.squeeze(img_ref), np.squeeze(img_cycle_x))
207+
208+
"""
209+
Running into issues when using direct float output for fractal.
210+
When rounding to integer and using integer dtype, it typically works
211+
but for some reasons fails when run over a whole 384 well plate (but
212+
the well where it fails works fine when run alone). For now, rounding
213+
to integer, but still using float64 dtype (like the scikit-image
214+
phase cross correlation function) seems to be the safest option.
215+
"""
216+
shifts = np.array([-np.round(y), -np.round(x)], dtype="float64")
217+
# return as a list to adhere to the phase_cross_correlation output format
218+
return [shifts]
219+
220+
221+
def is_3D(dask_array: da.array) -> bool:
222+
"""
223+
Check if a dask array is 3D.
224+
225+
Treats singelton Z dimensions as 2D images.
226+
(1, 2000, 2000) => False
227+
(10, 2000, 2000) => True
228+
229+
Args:
230+
dask_array: Input array to be checked
231+
232+
Returns:
233+
bool on whether the array is 3D
234+
"""
235+
if len(dask_array.shape) == 3 and dask_array.shape[0] > 1:
236+
return True
237+
else:
238+
return False

fractal_tasks_core/tasks/_utils.py

Lines changed: 84 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,84 @@
1+
# Copyright 2022 (C) Friedrich Miescher Institute for Biomedical Research and
2+
# University of Zurich
3+
#
4+
# Original authors:
5+
# Tommaso Comparin <[email protected]>
6+
#
7+
# This file is part of Fractal and was originally developed by eXact lab S.r.l.
8+
# <exact-lab.it> under contract with Liberali Lab from the Friedrich Miescher
9+
# Institute for Biomedical Research and Pelkmans Lab from the University of
10+
# Zurich.
11+
"""
12+
Standard input/output interface for tasks.
13+
"""
14+
import json
15+
import logging
16+
from argparse import ArgumentParser
17+
from json import JSONEncoder
18+
from pathlib import Path
19+
from typing import Callable
20+
from typing import Optional
21+
22+
23+
class TaskParameterEncoder(JSONEncoder):
24+
"""
25+
Custom JSONEncoder that transforms Path objects to strings.
26+
"""
27+
28+
def default(self, value):
29+
"""
30+
Subclass implementation of `default`, to serialize Path objects as
31+
strings.
32+
"""
33+
if isinstance(value, Path):
34+
return value.as_posix()
35+
return JSONEncoder.default(self, value)
36+
37+
38+
def run_fractal_task(
39+
*,
40+
task_function: Callable,
41+
logger_name: Optional[str] = None,
42+
):
43+
"""
44+
Implement standard task interface and call task_function.
45+
46+
Args:
47+
task_function: the callable function that runs the task.
48+
logger_name: TBD
49+
"""
50+
51+
# Parse `-j` and `--metadata-out` arguments
52+
parser = ArgumentParser()
53+
parser.add_argument(
54+
"--args-json", help="Read parameters from json file", required=True
55+
)
56+
parser.add_argument(
57+
"--out-json",
58+
help="Output file to redirect serialised returned data",
59+
required=True,
60+
)
61+
parsed_args = parser.parse_args()
62+
63+
# Set logger
64+
logger = logging.getLogger(logger_name)
65+
66+
# Preliminary check
67+
if Path(parsed_args.out_json).exists():
68+
logger.error(
69+
f"Output file {parsed_args.out_json} already exists. Terminating"
70+
)
71+
exit(1)
72+
73+
# Read parameters dictionary
74+
with open(parsed_args.args_json, "r") as f:
75+
pars = json.load(f)
76+
77+
# Run task
78+
logger.info(f"START {task_function.__name__} task")
79+
metadata_update = task_function(**pars)
80+
logger.info(f"END {task_function.__name__} task")
81+
82+
# Write output metadata to file, with custom JSON encoder
83+
with open(parsed_args.out_json, "w") as fout:
84+
json.dump(metadata_update, fout, cls=TaskParameterEncoder, indent=2)

0 commit comments

Comments
 (0)