Skip to content

Commit e540d60

Browse files
using calculate_sum_error for safer calculations
1 parent fad523a commit e540d60

File tree

2 files changed

+5
-5
lines changed

2 files changed

+5
-5
lines changed

cpp/APLRRegressor.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -667,7 +667,7 @@ void APLRRegressor::consider_updating_intercept()
667667
intercept_test=intercept_test*v;
668668
linear_predictor_update=VectorXd::Constant(neg_gradient_current.size(),intercept_test);
669669
linear_predictor_update_validation=VectorXd::Constant(y_validation.size(),intercept_test);
670-
error_after_updating_intercept=calculate_errors(neg_gradient_current,linear_predictor_update,sample_weight_train).sum();
670+
error_after_updating_intercept=calculate_sum_error(calculate_errors(neg_gradient_current,linear_predictor_update,sample_weight_train));
671671
}
672672

673673
void APLRRegressor::select_the_best_term_and_update_errors(size_t boosting_step)
@@ -696,7 +696,7 @@ void APLRRegressor::select_the_best_term_and_update_errors(size_t boosting_step)
696696
VectorXd values_validation{terms_eligible_current[best_term].calculate(X_validation)};
697697
linear_predictor_update=values*terms_eligible_current[best_term].coefficient;
698698
linear_predictor_update_validation=values_validation*terms_eligible_current[best_term].coefficient;
699-
double error_after_updating_term=calculate_errors(neg_gradient_current,linear_predictor_update,sample_weight_train).sum();
699+
double error_after_updating_term=calculate_sum_error(calculate_errors(neg_gradient_current,linear_predictor_update,sample_weight_train));
700700
if(std::isgreaterequal(error_after_updating_term,neg_gradient_nullmodel_errors_sum)) //if no improvement or worse then terminate search
701701
{
702702
abort_boosting=true;
@@ -759,7 +759,7 @@ void APLRRegressor::update_gradient_and_errors()
759759
{
760760
neg_gradient_current=calculate_neg_gradient_current(y_train,predictions_current);
761761
neg_gradient_nullmodel_errors=calculate_errors(neg_gradient_current,linear_predictor_null_model,sample_weight_train);
762-
neg_gradient_nullmodel_errors_sum=neg_gradient_nullmodel_errors.sum();
762+
neg_gradient_nullmodel_errors_sum=calculate_sum_error(neg_gradient_nullmodel_errors);
763763
}
764764

765765
void APLRRegressor::add_new_term(size_t boosting_step)

cpp/term.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -493,7 +493,7 @@ void Term::discretize_data_by_bin()
493493
void Term::estimate_split_point_on_discretized_data()
494494
{
495495
errors_initial=calculate_errors(negative_gradient_discretized,VectorXd::Constant(negative_gradient_discretized.size(),0.0),sample_weight_discretized);
496-
error_initial=errors_initial.sum();
496+
error_initial=calculate_sum_error(errors_initial);
497497

498498
//FINDING BEST SPLIT ON DISCRETIZED DATA
499499
double split_point_temp;
@@ -609,7 +609,7 @@ void Term::estimate_coefficient_and_error_on_all_data()
609609
{
610610
coefficient=xwy/xwx*v;
611611
VectorXd predictions{sorted_vectors.values_sorted*coefficient};
612-
split_point_search_errors_sum=calculate_errors(sorted_vectors.negative_gradient_sorted,predictions,sorted_vectors.sample_weight_sorted).sum()+error_where_given_terms_are_zero;
612+
split_point_search_errors_sum=calculate_sum_error(calculate_errors(sorted_vectors.negative_gradient_sorted,predictions,sorted_vectors.sample_weight_sorted))+error_where_given_terms_are_zero;
613613
}
614614
else
615615
{

0 commit comments

Comments
 (0)