Skip to content

Commit 16a0027

Browse files
authored
feat: Better logging (#110)
* feat: better logging * feat: better logging * update pre-commit * fix tests * add rich to extras
1 parent 0258b9b commit 16a0027

10 files changed

Lines changed: 304 additions & 187 deletions

File tree

.pre-commit-config.yaml

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,8 +18,7 @@ repos:
1818
- repo: https://github.com/astral-sh/ruff-pre-commit
1919
rev: v0.11.10
2020
hooks:
21-
- id: ruff
22-
args: [--fix, --unsafe-fixes]
21+
- id: ruff-check
2322
- id: ruff-format
2423

2524
- repo: https://github.com/pre-commit/mirrors-mypy

examples/basic_confocal.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,3 @@
1-
import time
2-
31
from microsim import schema as ms
42
from microsim.util import ndview, ortho_plot
53

@@ -20,8 +18,6 @@
2018
# output_path="au1.tif",
2119
)
2220

23-
_start = time.time()
2421
result = sim.run()
25-
print("ran in", time.time() - _start)
2622
ortho_plot(result.data, show=True)
2723
ndview(result)

pyproject.toml

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@ jax = [
6666
"dm-tree ==0.1.8; sys_platform == 'darwin'",
6767
"dm-tree >=0.1.7",
6868
]
69-
all = ["microsim[view,io,cosem]"]
69+
all = ["microsim[view,io,cosem]", "rich>=13"]
7070

7171
[dependency-groups]
7272
test = ["pytest>=8.3.5", "pytest-cov>=6.0.0"]
@@ -77,7 +77,7 @@ dev = [
7777
"ipython",
7878
"mypy",
7979
"pdbpp; sys_platform == 'darwin'",
80-
"pre-commit",
80+
"pre-commit-uv",
8181
"rich",
8282
"ruff",
8383
"types-tqdm",
@@ -101,6 +101,7 @@ microsim = { workspace = true }
101101
line-length = 88
102102
src = ["src"]
103103
fix = true
104+
unsafe-fixes = true
104105

105106
[tool.ruff.lint]
106107
pydocstyle = { convention = "numpy" }

src/microsim/_logger.py

Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,43 @@
1+
import logging
2+
from collections.abc import Iterator
3+
from contextlib import contextmanager
4+
from contextvars import ContextVar
5+
6+
_log_indent = ContextVar("log_indent", default=0)
7+
logger = logging.getLogger("microsim")
8+
9+
10+
@contextmanager
11+
def logging_indented(delta: int = 1) -> Iterator[None]:
12+
token = _log_indent.set(_log_indent.get() + delta)
13+
try:
14+
yield
15+
finally:
16+
_log_indent.reset(token)
17+
18+
19+
class IndentPrefixFilter(logging.Filter):
20+
def filter(self, record: logging.LogRecord) -> bool:
21+
prefix = " " * _log_indent.get()
22+
record.msg = prefix + str(record.msg)
23+
return True
24+
25+
26+
_filter = IndentPrefixFilter()
27+
28+
try:
29+
from rich.logging import RichHandler
30+
except ImportError:
31+
handler = logging.StreamHandler()
32+
handler.addFilter(_filter)
33+
handler.setFormatter(logging.Formatter("%(asctime)s %(levelname)s: %(message)s"))
34+
else:
35+
handler = RichHandler()
36+
handler.addFilter(_filter)
37+
38+
logging.basicConfig(
39+
level="NOTSET",
40+
format="%(message)s",
41+
datefmt="[%X:%f]",
42+
handlers=[handler],
43+
)

src/microsim/cosem/_tstore.py

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,9 @@ def read_tensorstore(
6363
level = level or 0
6464
spec = ts_spec(img, level=level, bin_mode=bin_mode)
6565
if cache_limit:
66-
spec["context"] = {"cache_pool": {"total_bytes_limit": cache_limit}}
66+
ctx = spec.setdefault("context", {})
67+
ctx["cache_pool"] = {"total_bytes_limit": cache_limit}
68+
6769
data = ts.open(spec).result()
6870

6971
# "squeeze" the data (haven't found a tensorstore-native way to do this)
@@ -114,7 +116,13 @@ def _kv_store(img: "CosemImage", level: int, bin_mode: BinMode = "standard") ->
114116
return {"driver": "file", "path": str(cached_path)}
115117

116118
# if we get here, use the remote bucket to load data
117-
return {"driver": "s3", "bucket": COSEM_BUCKET, "path": path}
119+
return {
120+
"driver": "s3",
121+
"bucket": COSEM_BUCKET,
122+
"path": path,
123+
"aws_credentials": {"type": "anonymous"},
124+
"aws_region": "us-east-1",
125+
}
118126

119127

120128
def ts_spec(

src/microsim/psf.py

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
from __future__ import annotations
22

3-
import logging
43
import os
54
from functools import cache, lru_cache
65
from typing import TYPE_CHECKING, Literal
@@ -15,6 +14,7 @@
1514
from microsim.util import microsim_cache
1615

1716
from ._data_array import ArrayProtocol
17+
from ._logger import logger
1818

1919
if TYPE_CHECKING:
2020
from collections.abc import Sequence
@@ -425,9 +425,15 @@ def cached_psf(
425425
nz, nx, dz, dx, em_wvl_um, pinhole_au, ex_wvl_um, objective
426426
)
427427
if cache_path.exists():
428-
logging.info("Using cached PSF: %s", cache_path)
428+
logger.info(
429+
f"Found cached PSF {nz=} {nx=} {dz=} {dx=} "
430+
f"{ex_wvl_um=:.2f} {pinhole_au=}"
431+
)
429432
return xp.asarray(np.load(cache_path))
430433

434+
logger.info(
435+
f"Creating new PSF {nz=} {nx=} {dz=} {dx=} {ex_wvl_um=:.2f} {pinhole_au=}"
436+
)
431437
if pinhole_au is None:
432438
psf = vectorial_psf_centered(
433439
wvl=em_wvl_um,

src/microsim/schema/modality/_simple_psf.py

Lines changed: 33 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,11 @@
1-
import logging
21
import warnings
32
from typing import Annotated, Any, Literal
43

54
import numpy as np
65
from annotated_types import Ge
76

87
from microsim._data_array import ArrayProtocol, DataArray, xrDataArray
8+
from microsim._logger import logger, logging_indented
99
from microsim.psf import make_psf
1010
from microsim.schema._base_model import SimBaseModel
1111
from microsim.schema.backend import NumpyAPI
@@ -52,31 +52,37 @@ def render(
5252
# for every channel in the emission rates...
5353
channels = []
5454
for ch in em_rates.coords[Axis.C].values:
55-
logging.info(f"Rendering channel {ch} -----------------")
56-
57-
# for every fluorophore in the sample...
58-
fluors = []
59-
for f_idx, fluor in enumerate(truth.coords[Axis.F].values):
60-
logging.info(f">> fluor {fluor}")
61-
f_truth = truth.isel({Axis.F: f_idx})
62-
63-
# discretize the emission spectrum for this specific ch/fluor pair
64-
em_spectrum = em_rates.sel({Axis.C: ch, Axis.F: fluor})
65-
# if we happen to have 2 spectra for the same fluorophore
66-
# in the same channel, just take the first one (shouldn't happen)
67-
if Axis.F in em_spectrum.dims: # pragma: no cover
68-
em_spectrum = em_spectrum.isel({Axis.F: 0})
69-
70-
if not (em_spectrum > 1e-12).any():
71-
# no emission at all for this fluorophore in this channel
72-
fluors.append(xp.zeros_like(f_truth))
73-
continue
74-
75-
summed_psf = self._summed_weighted_psf(
76-
em_spectrum, settings, truth.attrs["space"], objective_lens, xp
77-
)
78-
fluor_sum = xp.fftconvolve(f_truth, summed_psf, mode="same")
79-
fluors.append(fluor_sum)
55+
logger.info(f"Rendering {type(self).__name__} channel {ch} ---------------")
56+
57+
with logging_indented():
58+
# for every fluorophore in the sample...
59+
fluors = []
60+
for f_idx, fluor in enumerate(truth.coords[Axis.F].values):
61+
logger.info(f"Fluor: {fluor}")
62+
with logging_indented():
63+
f_truth = truth.isel({Axis.F: f_idx})
64+
65+
# discretize the em spectrum for this specific ch/fluor pair
66+
em_spectrum = em_rates.sel({Axis.C: ch, Axis.F: fluor})
67+
# if we happen to have 2 spectra for the same fluorophore
68+
# in the same channel, just take the first one (shouldn't happen
69+
if Axis.F in em_spectrum.dims: # pragma: no cover
70+
em_spectrum = em_spectrum.isel({Axis.F: 0})
71+
72+
if not (em_spectrum > 1e-12).any():
73+
# no emission at all for this fluorophore in this channel
74+
fluors.append(xp.zeros_like(f_truth))
75+
continue
76+
77+
summed_psf = self._summed_weighted_psf(
78+
em_spectrum,
79+
settings,
80+
truth.attrs["space"],
81+
objective_lens,
82+
xp,
83+
)
84+
fluor_sum = xp.fftconvolve(f_truth, summed_psf, mode="same")
85+
fluors.append(fluor_sum)
8086

8187
# stack the fluorophores together to create the channel
8288
channels.append(xp.stack(fluors, axis=0))
@@ -141,7 +147,7 @@ def _summed_weighted_psf(
141147
if em_rate.isnull().any() or em_rate == 0 or xp.isnan(em_wvl_nm):
142148
continue
143149
weight = em_rate.item()
144-
logging.info(f">>>> PSF @ {em_wvl_nm:.1f}nm (x{weight:.2f})")
150+
logger.info(f"Need PSF ({nz},{nx}) @ {em_wvl_nm:.1f} nm ({weight=:.2f})")
145151
psf = self.psf(
146152
nz=nz,
147153
nx=nx,

src/microsim/schema/simulation.py

Lines changed: 49 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
import logging
1+
import time
22
import warnings
33
from contextlib import suppress
44
from pathlib import Path
@@ -11,6 +11,7 @@
1111
from pydantic_core import PydanticSerializationError
1212

1313
from microsim._data_array import ArrayProtocol, from_cache, to_cache
14+
from microsim._logger import logger, logging_indented
1415
from microsim.util import microsim_cache
1516

1617
from ._base_model import SimBaseModel
@@ -112,38 +113,46 @@ def ground_truth(self) -> xr.DataArray:
112113
>>> truth.isel(f=0).max('z').plot() # plot max projection of first fluorophore
113114
>>> plt.show()
114115
"""
116+
logger.info(f"Creating ground truth (shape ={self.truth_space.shape}) + ")
117+
_t0 = time.perf_counter()
118+
115119
if not hasattr(self, "_ground_truth"):
116-
xp = self._xp
117-
# make empty space into which we'll add the ground truth
118-
# TODO: this is wasteful... label.render should probably
119-
# accept the space object directly
120-
truth = self.truth_space.create(array_creator=xp.zeros)
121-
122-
# render each ground truth
123-
label_data = []
124-
for label in self.sample.labels:
125-
cache_path = self._truth_cache_path(
126-
label, self.truth_space, self.settings.random_seed
127-
)
128-
if self.settings.cache.read and cache_path and cache_path.exists():
129-
data = from_cache(cache_path, xp=xp).astype(
130-
self.settings.float_dtype
131-
)
132-
logging.info(
133-
f"Loaded ground truth for {label} from cache: {cache_path}"
120+
with logging_indented():
121+
xp = self._xp
122+
# make empty space into which we'll add the ground truth
123+
# TODO: this is wasteful... label.render should probably
124+
# accept the space object directly
125+
truth = self.truth_space.create(array_creator=xp.zeros)
126+
127+
# render each ground truth
128+
label_data = []
129+
for label in self.sample.labels:
130+
cache_path = self._truth_cache_path(
131+
label, self.truth_space, self.settings.random_seed
134132
)
135-
else:
136-
data = label.render(truth, xp=xp)
137-
if self.settings.cache.write and cache_path:
138-
to_cache(data, cache_path, dtype=np.uint16)
139-
140-
label_data.append(data)
141-
142-
# concat along the F axis
143-
fluors = [lbl.fluorophore for lbl in self.sample.labels]
144-
truth = xr.concat(label_data, dim=pd.Index(fluors, name=Axis.F))
145-
truth.attrs.update(units="fluorophores", long_name="Ground Truth")
146-
self._ground_truth = truth
133+
if self.settings.cache.read and cache_path and cache_path.exists():
134+
data = from_cache(cache_path, xp=xp).astype(
135+
self.settings.float_dtype
136+
)
137+
logger.info(
138+
f"Loaded ground truth for {label} from cache: {cache_path}"
139+
)
140+
else:
141+
data = label.render(truth, xp=xp)
142+
if self.settings.cache.write and cache_path:
143+
to_cache(data, cache_path, dtype=np.uint16)
144+
145+
label_data.append(data)
146+
147+
# concat along the F axis
148+
fluors = [lbl.fluorophore for lbl in self.sample.labels]
149+
truth = xr.concat(label_data, dim=pd.Index(fluors, name=Axis.F))
150+
truth.attrs.update(units="fluorophores", long_name="Ground Truth")
151+
self._ground_truth = truth
152+
153+
logger.info(
154+
f"Ground truth generated in {time.perf_counter() - _t0:.2f} seconds"
155+
)
147156
return self._ground_truth
148157

149158
def filtered_emission_rates(self) -> xr.DataArray:
@@ -234,6 +243,7 @@ def optical_image(self) -> xr.DataArray:
234243
fluorophores in each channel (which a detector would not know). The return
235244
array has dimensions (C, Z, Y, X). The units are photons/s.
236245
"""
246+
logger.info("Creating optical_image ...")
237247
oipf = self.optical_image_per_fluor()
238248
return oipf.sum(Axis.F) # (C, Z, Y, X)
239249

@@ -251,6 +261,9 @@ def digital_image(
251261
are gray values, based on the bit-depth of the detector. If there is no
252262
detector or `with_detector_noise` is False, the units are simply photons.
253263
"""
264+
_t0 = time.perf_counter()
265+
logger.info("Creating digital_image ...")
266+
254267
if optical_image is None:
255268
optical_image = self.optical_image()
256269
image = optical_image # (C, Z, Y, X)
@@ -259,6 +272,7 @@ def digital_image(
259272
# TODO: consider how we would integrate detector pixel size
260273
# rather than a user-sepicified output space
261274
if self.output_space is not None:
275+
logger.info(f"Rescaling to output space {self.output_space.shape}")
262276
image = self.output_space.rescale(image)
263277

264278
# simulate detector
@@ -276,13 +290,17 @@ def digital_image(
276290
ch_exposures = exposure_ms
277291

278292
if self.detector is not None and with_detector_noise:
293+
logger.info(f"Simulating {type(self.detector).__name__} detector ...")
279294
image = self.detector.render(image, exposure_ms=ch_exposures, xp=self._xp)
280295
image.attrs.update(units="gray values")
281296
else:
282297
image = image * (ch_exposures / 1000)
283298
image.attrs.update(units="photons")
284299

285300
# (C, Z, Y, X)
301+
logger.info(
302+
f"Digital image generated in {time.perf_counter() - _t0:.2f} seconds"
303+
)
286304
return image
287305

288306
def run(self) -> xr.DataArray:

tests/test_cache.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -42,8 +42,8 @@ def test_cache_psf(caplog: pytest.LogCaptureFixture) -> None:
4242
caplog.set_level(10)
4343
psf1 = cached_psf(**kwargs)
4444
cached_psf.cache_clear() # clear the functools cache
45-
assert "Using cached PSF:" not in caplog.text
45+
assert "Found cached PSF" not in caplog.text
4646
psf2 = cached_psf(**kwargs)
47-
assert "Using cached PSF:" in caplog.text
47+
assert "Found cached PSF" in caplog.text
4848

4949
np.testing.assert_array_equal(psf1, psf2)

0 commit comments

Comments
 (0)