Skip to content

Commit fac92a3

Browse files
committed
qsr discretize, analyze_data, gcv with edf caching, set_level. fpca discretize, analyze_data. de bug fixing
1 parent cf409dc commit fac92a3

File tree

5 files changed

+59
-44
lines changed

5 files changed

+59
-44
lines changed

fdaPDE/src/models/fpca.h

Lines changed: 15 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -526,18 +526,20 @@ template <typename VariationalSolver> class fPCA {
526526
public:
527527
fPCA() noexcept = default;
528528
template <typename GeoFrame, typename Penalty>
529-
fPCA(const std::string& colname, const GeoFrame& gf, Penalty&& penalty) noexcept :
530-
smoother_(), data_(gf[0].data().template col<double>(colname).as_matrix()) {
529+
fPCA(const std::string& colname, const GeoFrame& gf, Penalty&& penalty) noexcept : smoother_(), data_() {
530+
discretize(penalty.get().penalty);
531+
analyze_data(colname, gf);
532+
}
533+
template <typename... Args> void discretize(Args&&... args) {
534+
smoother_.discretize(std::forward<Args>(args)...);
535+
n_dofs_ = smoother_.n_dofs();
536+
}
537+
template <typename GeoFrame> void analyze_data(const std::string& colname, const GeoFrame& gf) {
531538
fdapde_assert(gf.n_layers() == 1);
539+
data_ = gf[0].data().template col<double>(colname).as_matrix();
532540
n_locs_ = data_.rows();
533-
n_units_ = data_.cols();
534-
if constexpr (requires(Penalty p) { p.get(); }) {
535-
smoother_ = smoother_t(gf, penalty.get());
536-
} else {
537-
smoother_ = smoother_t(gf, penalty(gf.template triangulation<0>()).get());
538-
}
539-
n_dofs_ = smoother_.n_dofs();
540-
// detect if data_ has at least one missing value
541+
n_units_ = data_.cols();
542+
// detect if data_ has at least one missing value
541543
has_nan_ = false;
542544
for (int i = 0; i < n_locs_; ++i) {
543545
for (int j = 0; j < n_units_; ++j) {
@@ -554,11 +556,11 @@ template <typename VariationalSolver> class fPCA {
554556
auto fit(int rank, const LambdaT& lambda_grid, int flag = ComputeRandSVD, Policy policy = Policy()) {
555557
fdapde_assert(lambda_grid.size() % n_lambda == 0);
556558
auto solver_ = policy.get(smoother_); // instantiate solver implementation
557-
f_.resize(n_dofs_ , rank);
559+
f_.resize(n_dofs_, rank);
558560
s_.resize(n_units_, rank);
559561
f_norm_.resize(rank);
560562
lambda_.resize(n_lambda, rank);
561-
// dispatch to processing logic
563+
// dispatch to processing logic
562564
if (has_nan_) {
563565
// default to OptimMSRE calibration, if no calibration provided
564566
if (lambda_grid.size() > n_lambda && (flag & 0b11110) == 0) { flag = flag | OptimizeMSRE; }
@@ -576,7 +578,7 @@ template <typename VariationalSolver> class fPCA {
576578
s_ = std::move(s);
577579
f_norm_ = solver_.loadings_norm();
578580
}
579-
lambda_ = solver_.lambda();
581+
lambda_ = solver_.lambda();
580582
return std::tie(f_, s_);
581583
}
582584
// observers

fdaPDE/src/models/gsr.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -163,8 +163,7 @@ class GSRPDE {
163163
double dor = n_ - (q_ + edf_cache_.at(lambda_vec)); // residual degrees of freedom
164164
// compute total deviance
165165
vector_t mu = model_->distr_->inv_link(model_->fitted());
166-
double total_deviance = model_->distr_->deviance(mu, model_->y_);
167-
return (n_ / std::pow(dor, 2)) * total_deviance;
166+
return (n_ / std::pow(dor, 2)) * model_->distr_->deviance(mu, model_->y_);
168167
}
169168
// observers
170169
const edf_cache_t& edf_cache() const { return edf_cache_; }
@@ -176,6 +175,7 @@ class GSRPDE {
176175
// stochastic edf approximation parameter
177176
int r_, seed_;
178177
};
178+
friend gcv_t;
179179
gcv_t gcv() { return gcv_t(this); }
180180
gcv_t gcv(const typename gcv_t::edf_cache_t& edf_cache) { return gcv_t(this, edf_cache); }
181181
gcv_t gcv(int r, int seed) { return gcv_t(this, r, seed); }

fdaPDE/src/models/qsr.h

Lines changed: 36 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -32,29 +32,27 @@ template <typename VariationalSolver> class QSRPDE {
3232
template <typename GeoFrame, typename Penalty>
3333
QSRPDE(const std::string& formula, const GeoFrame& gf, double alpha, Penalty&& penalty) noexcept :
3434
solver_(), alpha_(alpha) {
35+
discretize(penalty.get().penalty);
36+
analyze_data(formula, gf);
37+
}
38+
template <typename GeoFrame, typename Penalty>
39+
QSRPDE(const std::string& formula, const GeoFrame& gf, Penalty&& penalty) noexcept :
40+
QSRPDE(formula, gf, 0.5, penalty) { }
41+
42+
// modifiers
43+
void set_level(double alpha) { alpha_ = alpha; }
44+
template <typename... Args> void discretize(Args&&... args) { solver_.discretize(std::forward<Args>(args)...); }
45+
template <typename GeoFrame, typename WeightMatrix>
46+
void analyze_data(const std::string& formula, const GeoFrame& gf, const WeightMatrix& W) {
3547
fdapde_assert(gf.n_layers() == 1);
3648
Formula formula_(formula);
3749
n_obs_ = gf[0].rows();
3850
n_covs_ = 0;
3951
for (const std::string& token : formula_.rhs()) {
4052
if (gf.contains(token)) { n_covs_++; }
4153
}
42-
// discretize
43-
if constexpr (requires(Penalty p) { p.get(); }) {
44-
solver_ = solver_t(formula, gf, penalty.get());
45-
} else {
46-
solver_ = solver_t(formula, gf, penalty(gf.template triangulation<0>()).get());
47-
}
48-
y_ = solver_.response();
49-
}
50-
51-
// modifiers
52-
template <typename... Args> void discretize(Args&&... args) {
53-
return solver_.discretize(std::forward<Args>(args)...);
54-
}
55-
template <typename GeoFrame, typename WeightMatrix>
56-
void analyze_data(const std::string& formula, const GeoFrame& gf, const WeightMatrix& W) {
57-
return solver_.analyze_data(formula, gf, W);
54+
solver_.analyze_data(formula, gf, W);
55+
y_ = solver_.response();
5856
}
5957
template <typename GeoFrame> void analyze_data(const std::string& formula, const GeoFrame& gf) {
6058
return analyze_data(formula, gf, vector_t::Ones(gf[0].rows()).asDiagonal());
@@ -134,11 +132,21 @@ template <typename VariationalSolver> class QSRPDE {
134132
static constexpr int XprBits = 0;
135133
using Scalar = double;
136134
using InputType = Vector<Scalar, StaticInputSize>;
135+
using edf_cache_t = std::unordered_map<
136+
std::array<double, StaticInputSize>, double, internals::std_array_hash<double, StaticInputSize>>;
137137

138138
gcv_t() noexcept = default;
139-
gcv_t(QSRPDE* model) : model_(model), n_(model->n_obs()), q_(model->n_covs()), r_(100), seed_(random_seed) { }
140-
gcv_t(QSRPDE* model, int r, int seed) :
141-
model_(model), n_(model->n_obs()), q_(model->n_covs()), r_(r), seed_(seed) { }
139+
gcv_t(QSRPDE* model, const edf_cache_t& edf_cache) :
140+
model_(model),
141+
n_(model->n_obs()),
142+
q_(model->n_covs()),
143+
edf_cache_(edf_cache),
144+
r_(100),
145+
seed_(random_seed) { }
146+
gcv_t(QSRPDE* model, const edf_cache_t& edf_cache, int r, int seed) :
147+
model_(model), n_(model->n_obs()), q_(model->n_covs()), edf_cache_(edf_cache), r_(r), seed_(seed) { }
148+
gcv_t(QSRPDE* model) : gcv_t(model, edf_cache_t()) { }
149+
gcv_t(QSRPDE* model, int r, int seed) : gcv_t(model, edf_cache_t(), r, seed) { }
142150

143151
template <typename InputType_>
144152
requires(internals::is_subscriptable<InputType_, int>)
@@ -150,28 +158,31 @@ template <typename VariationalSolver> class QSRPDE {
150158
constexpr double operator()(LambdaT... lambda) {
151159
model_->fit(static_cast<double>(lambda)...);
152160
std::array<double, StaticInputSize> lambda_vec {lambda...};
153-
if (edf_map_.find(lambda_vec) == edf_map_.end()) { // cache Tr[S]
154-
edf_map_[lambda_vec] = model_->edf(r_, seed_);
161+
if (edf_cache_.find(lambda_vec) == edf_cache_.end()) { // cache Tr[S]
162+
edf_cache_[lambda_vec] = model_->edf(r_, seed_);
155163
}
156-
double dor = n_ - (q_ + edf_map_.at(lambda_vec)); // residual degrees of freedom
164+
double dor = n_ - (q_ + edf_cache_.at(lambda_vec)); // residual degrees of freedom
157165
double pinball = 0;
158166
for (int i = 0; i < n_; ++i) {
159167
pinball += model_->pinball_loss(model_->y_[i] - model_->mu_[i], std::pow(10, model_->eps_));
160168
}
161169
return (std::pow(pinball, 2) / std::pow(dor, 2));
162170
}
171+
// observers
172+
const edf_cache_t& edf_cache() const { return edf_cache_; }
173+
edf_cache_t& edf_cache() { return edf_cache_; }
163174
private:
164175
QSRPDE* model_;
165176
int n_ = 0, q_ = 0;
166-
std::unordered_map<
167-
std::array<double, StaticInputSize>, double, internals::std_array_hash<double, StaticInputSize>>
168-
edf_map_;
177+
edf_cache_t edf_cache_;
169178
// stochastic edf approximation parameter
170179
int r_, seed_;
171180
};
172181
friend gcv_t;
173182
gcv_t gcv() { return gcv_t(this); }
183+
gcv_t gcv(const typename gcv_t::edf_cache_t& edf_cache) { return gcv_t(this, edf_cache); }
174184
gcv_t gcv(int r, int seed) { return gcv_t(this, r, seed); }
185+
gcv_t gcv(const typename gcv_t::edf_cache_t& edf_cache, int r, int seed) { return gcv_t(this, edf_cache, r, seed); }
175186

176187
// inference
177188

fdaPDE/src/solvers/fe_de_elliptic.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ struct fe_de_elliptic {
5252
return -(m_->Psi_ * g).sum() + m_->n_obs_ * m_->int_exp_(g) + lambda_ * g.dot(m_->P_ * g);
5353
}
5454
// gradient functor
55-
std::function<vector_t(const vector_t&)> derive() {
55+
std::function<vector_t(const vector_t&)> gradient() {
5656
return [this, dllik = vector_t(-m_->Psi_.transpose() * vector_t::Ones(m_->n_obs_))](const vector_t& g) {
5757
return vector_t(dllik + m_->n_obs_ * m_->grad_int_exp_(g) + 2 * lambda_ * m_->P_ * g);
5858
};
@@ -100,7 +100,7 @@ struct fe_de_elliptic {
100100
vector_t w(n_quad_nodes);
101101
for (int i = 0; i < n_quad_nodes; ++i) {
102102
for (int j = 0; j < n_shape_functions; ++j) {
103-
PsiQuad(i, j) = fe_space.eval_shape_value(j, quad_rule.nodes.row(i));
103+
PsiQuad(i, j) = fe_space.eval_shape_value(j, quad_rule.nodes.row(i).transpose());
104104
}
105105
w[i] = quad_rule.weights[i];
106106
}

fdaPDE/src/solvers/fe_de_separable.h

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -88,7 +88,9 @@ struct fe_de_separable {
8888
int n_shape_functions = func_space.n_shape_functions();
8989
matrix_t m(n_quad_nodes, n_shape_functions);
9090
for (int i = 0; i < n_quad_nodes; ++i) {
91-
for (int j = 0; j < n_shape_functions; ++j) { m(i, j) = func_space.eval_shape_value(j, quad.nodes.row(i)); }
91+
for (int j = 0; j < n_shape_functions; ++j) {
92+
m(i, j) = func_space.eval_shape_value(j, quad.nodes.row(i).transpose());
93+
}
9294
}
9395
return m;
9496
}
@@ -124,7 +126,7 @@ struct fe_de_separable {
124126
lambda_[1] * g.dot(m_->PT_ * g);
125127
}
126128
// gradient functor
127-
std::function<vector_t(const vector_t&)> derive() {
129+
std::function<vector_t(const vector_t&)> gradient() {
128130
return [this, dllik = vector_t(-m_->Psi_.transpose() * vector_t::Ones(m_->n_obs_))](const vector_t& g) {
129131
return vector_t(
130132
dllik + m_->n_obs_ * m_->grad_int_exp_(g) + 2 * (lambda_[0] * m_->PD_ + lambda_[1] * m_->PT_) * g);

0 commit comments

Comments
 (0)