Skip to content

Commit 1e5fb9e

Browse files
authored
Make FROLS.history_ as a list (#619)
1 parent a064736 commit 1e5fb9e

File tree

1 file changed

+37
-25
lines changed

1 file changed

+37
-25
lines changed

pysindy/optimizers/frols.py

Lines changed: 37 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -172,27 +172,39 @@ def _reduce(self, x, y):
172172
" as the loss function being minimized by FROLS!"
173173
)
174174

175-
# History of selected functions: [iteration x output x coefficients]
176-
self.history_ = np.zeros((n_features, n_targets, n_features), dtype=x.dtype)
175+
# History of selected functions as a list of [output x coefficients]
176+
# with a length of number of iterations
177+
self.history_ = []
177178
# Error Reduction Ratio: [iteration x output]
178179
self.ERR_global = np.zeros((n_features, n_targets))
179-
for k in range(n_targets):
180-
# Initialize arrays
181-
g_global = np.zeros(
182-
n_features, dtype=x.dtype
183-
) # Coefficients for selected (orthogonal) functions
184-
L = np.zeros(
185-
n_features, dtype=int
186-
) # Order of selection, i.e. L[0] is the first, L[1] second...
187-
A = np.zeros(
188-
(n_features, n_features), dtype=x.dtype
189-
) # Used for inversion to original function set (A @ coef = g)
190-
191-
# Orthogonal function libraries
192-
Q = np.zeros_like(x) # Global library (built over time)
193-
Qs = np.zeros_like(x) # Same, but for each step
194-
sigma = np.real(np.vdot(y[:, k], y[:, k])) # Variance of the signal
195-
for i in range(n_features):
180+
# Initialize arrays
181+
g_global_full = np.zeros(
182+
(n_targets, n_features), dtype=x.dtype
183+
) # Coefficients for selected (orthogonal) functions
184+
L_full = np.zeros(
185+
(n_targets, n_features), dtype=int
186+
) # Order of selection, i.e. L[0] is the first, L[1] second...
187+
A_full = np.zeros(
188+
(n_targets, n_features, n_features), dtype=x.dtype
189+
) # Used for inversion to original function set (A @ coef = g)
190+
# Orthogonal function libraries
191+
Q_full = np.zeros(
192+
(n_targets, *x.shape), dtype=x.dtype
193+
) # Global library (built over time)
194+
Qs_full = np.zeros(
195+
(n_targets, *x.shape), dtype=x.dtype
196+
) # Same, but for each step
197+
for i in range(n_features):
198+
coef_i = np.zeros((n_targets, n_features), dtype=x.dtype)
199+
200+
for k in range(n_targets):
201+
g_global = g_global_full[k]
202+
L = L_full[k]
203+
A = A_full[k]
204+
Q = Q_full[k]
205+
Qs = Qs_full[k]
206+
sigma = np.real(np.vdot(y[:, k], y[:, k])) # Variance of the signal
207+
196208
for m in range(n_features):
197209
if m not in L[:i]:
198210
# Orthogonalize with respect to already selected functions
@@ -222,13 +234,10 @@ def _reduce(self, x, y):
222234
coef_k = np.zeros_like(g_global)
223235
coef_k[L[:i]] = alpha
224236
coef_k[abs(coef_k) < 1e-10] = 0
237+
coef_i[k, :] = coef_k
225238

226-
self.history_[i, k, :] = np.copy(coef_k)
227-
228-
if i >= self.max_iter:
229-
break
230239
if self.verbose:
231-
coef = self.history_[i, k, :]
240+
coef = coef_i[k, :]
232241
R2 = np.sum((y[:, k] - np.dot(x, coef).T) ** 2)
233242
L2 = self.alpha * np.sum(coef**2)
234243
L0 = np.count_nonzero(coef)
@@ -237,6 +246,9 @@ def _reduce(self, x, y):
237246
"{0:10d} ... {1:5d} ... {2:10.4e} ... {3:10.4e}"
238247
" ... {4:5d} ... {5:10.4e} ... {6:10.4e}".format(*row)
239248
)
249+
self.history_.append(coef_i)
250+
if i >= self.max_iter:
251+
break
240252

241253
# Function selection: L2 error for output k at iteration i is given by
242254
# sum(ERR_global[k, :i]), and the number of nonzero coefficients is (i+1)
@@ -250,4 +262,4 @@ def _reduce(self, x, y):
250262
self.loss_[: self.max_iter + 1, :], axis=0
251263
) # Minimum loss for this output function
252264
for k in range(n_targets):
253-
self.coef_[k, :] = self.history_[loss_min[k], k, :]
265+
self.coef_[k, :] = self.history_[loss_min[k]][k, :]

0 commit comments

Comments
 (0)