Skip to content

Commit 095f932

Browse files
Merge remote-tracking branch 'origin/master' into TOF_improvements
2 parents cd45053 + 05bca88 commit 095f932

File tree

7 files changed

+746
-26
lines changed

7 files changed

+746
-26
lines changed

documentation/release_6.4.htm

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,14 @@ <h3>Changed functionality</h3>
5353
<ul>
5454
<li>When using a (ray tracing) matrix as projector, previously the matrix stored elements for the whole LOR,
5555
while many elements would be zero. We now only store the non-zero ones, resulting in a speed-up for the Siemens Vision Quadra of about a factor 1.7. Memory usage is also reduced.<br>
56-
<a href=https://github.com/UCL/STIR/pull/1674># 1674</a>
56+
<a href=https://github.com/UCL/STIR/pull/1674># 1674</a>.
57+
</li>
58+
<li>
59+
OpenMP parallelisation was added to the function to create a 2d histogram of hits per crystal
60+
from a proj data object, <code>make_fan_sum_data</code>.
61+
<br>
62+
<a href=https://github.com/UCL/STIR/pull/1667>PR #1667</a>.
63+
</li>
5764
</ul>
5865
<h4>Changes to examples</h4>
5966
<ul>
@@ -103,6 +110,10 @@ <H2>What is new for developers (aside from what should be obvious from the above
103110

104111
<h3>New functionality</h3>
105112
<ul>
113+
<li>
114+
Added a Python script to convert e7tools generated Siemens Biograph Vision 600 sinograms to STIR compatible format.
115+
<a href=https://github.com/UCL/STIR/pull/1675>PR #1675</a>
116+
</li>
106117
</ul>
107118

108119
<h3>Changed functionality</h3>

examples/python/LBFGSBPC.py

Lines changed: 136 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,136 @@
1+
#
2+
# SPDX-License-Identifier: Apache-2.0
3+
#
4+
# Class implementing the LBFGSB-PC algorithm in stir
5+
#
6+
# Authors: Kris Thielemans
7+
#
8+
# Based on Georg Schramm's
9+
# https://github.com/SyneRBI/PETRIC-MaGeZ/blob/a690205b2e3ec874e621ed2a32a802cd0bed4c1d/simulation_src/sim_stochastic_grad.py
10+
# but with using diag(H.1) as preconditioner at the moment, as per Tsai's paper (see ref in the class doc)
11+
#
12+
# Copyright 2025 University College London
13+
14+
import numpy as np
15+
import numpy.typing as npt
16+
import stir
17+
from scipy.optimize import fmin_l_bfgs_b
18+
from typing import Callable, Optional, List
19+
20+
# import matplotlib.pyplot as plt
21+
22+
23+
class LBFGSBPC:
24+
"""Implementation of the LBFGSB-PC Algorithm
25+
26+
See
27+
Tsai et al,
28+
Fast Quasi-Newton Algorithms for Penalized Reconstruction in Emission Tomography and Further Improvements via Preconditioning
29+
IEEE TRANSACTIONS ON MEDICAL IMAGING, VOL. 37, NO. 4, APRIL 2018
30+
DOI: 10.1109/TMI.2017.2786865
31+
32+
WARNING: it maximises the objective function (as required by sirf.STIR.ObjectiveFunction).
33+
WARNING: the implementation uses asarray(), which means you need SIRF 3.9. You should be able to just replace it with as_array() otherwise.
34+
35+
This implementation is NOT a CIL.Algorithm, but it behaves somewhat as one.
36+
"""
37+
38+
def __init__(
39+
self,
40+
objfun: stir.GeneralisedObjectiveFunction3DFloat,
41+
initial: stir.FloatVoxelsOnCartesianGrid,
42+
update_objective_interval: int = 0,
43+
):
44+
self.trunc_filter = stir.TruncateToCylindricalFOVImageProcessor3DFloat()
45+
self.objfun = objfun
46+
self.initial = initial.clone()
47+
self.trunc_filter.apply(self.initial)
48+
self.shape = initial.shape()
49+
self.output = None
50+
self.update_objective_interval = update_objective_interval
51+
52+
precon = initial.get_empty_copy()
53+
objfun.accumulate_Hessian_times_input(precon, initial, initial * 0 + 1)
54+
precon *= -1
55+
# self.Dinv_STIR = precon.maximum(1).power(-0.5)
56+
self.Dinv = np.power(np.maximum(precon.as_array(), 1), -0.5)
57+
self.Dinv_STIR = precon
58+
self.Dinv_STIR.fill(self.Dinv)
59+
self.trunc_filter.apply(self.Dinv_STIR)
60+
# plt.figure()
61+
# plt.imshow(self.Dinv_STIR.as_array()[self.shape[0] // 2, :, :])
62+
self.Dinv = self.Dinv_STIR.as_array().ravel()
63+
self.tmp_for_value = initial.get_empty_copy()
64+
self.tmp1_for_gradient = initial.get_empty_copy()
65+
self.tmp2_for_gradient = initial.get_empty_copy()
66+
67+
def precond_objfun_value(self, z: npt.ArrayLike) -> float:
68+
self.tmp_for_value.fill(
69+
np.reshape(z.astype(np.float32) * self.Dinv, self.shape)
70+
)
71+
return -self.objfun.compute_value(self.tmp_for_value)
72+
73+
def precond_objfun_gradient(self, z: npt.ArrayLike) -> np.ndarray:
74+
self.tmp1_for_gradient.fill(
75+
np.reshape(z.astype(np.float32) * self.Dinv, self.shape)
76+
)
77+
self.objfun.compute_gradient(self.tmp2_for_gradient, self.tmp1_for_gradient)
78+
return self.tmp2_for_gradient.as_array().ravel() * self.Dinv * -1
79+
80+
def callback(self, x):
81+
if (
82+
self.update_objective_interval > 0
83+
and self.iter % self.update_objective_interval == 0
84+
):
85+
self.loss.append(-self.precond_objfun_value(x))
86+
self.iterations.append(self.iter)
87+
self.iter += 1
88+
89+
def process(
90+
self, iterations=None, callbacks: Optional[List[Callable]] = None, verbose=0
91+
) -> None:
92+
r"""run upto :code:`iterations` with callbacks.
93+
94+
Parameters
95+
-----------
96+
iterations: int, default is None
97+
Number of iterations to run.
98+
callbacks: list of callables, default is Defaults to self.callback
99+
List of callables which are passed the current Algorithm object each iteration. Defaults to :code:`[ProgressCallback(verbose)]`.
100+
verbose: 0=quiet, 1=info, 2=debug
101+
Passed to the default callback to determine the verbosity of the printed output.
102+
"""
103+
if iterations is None:
104+
raise ValueError("`missing argument `iterations`")
105+
precond_init = self.initial / self.Dinv_STIR
106+
self.trunc_filter.apply(precond_init)
107+
precond_init = precond_init.as_array().ravel()
108+
bounds = precond_init.size * [(0, None)]
109+
self.iter = 0
110+
self.loss = []
111+
self.iterations = []
112+
# TODO not really required, but it differs from the first value reported by fmin_l_bfgs_b. Not sure why...
113+
self.callback(precond_init)
114+
self.iter = 0 # set back again
115+
res = fmin_l_bfgs_b(
116+
self.precond_objfun_value,
117+
precond_init,
118+
self.precond_objfun_gradient,
119+
maxiter=iterations,
120+
bounds=bounds,
121+
m=20,
122+
callback=self.callback,
123+
factr=0,
124+
pgtol=0,
125+
)
126+
# store result (use name "x" for CIL compatibility)
127+
self.x = self.tmp_for_value.get_empty_copy()
128+
self.x.fill(np.reshape(res[0].astype(np.float32) * self.Dinv, self.shape))
129+
130+
def run(
131+
self, **kwargs
132+
) -> None: # CIL alias, would need to callback and verbose keywords etc
133+
self.process(**kwargs)
134+
135+
def get_output(self) -> stir.FloatVoxelsOnCartesianGrid:
136+
return self.x

0 commit comments

Comments
 (0)