Skip to content

Commit 223ea23

Browse files
committed
bootridge: minor performance enhancement
1 parent a235a4c commit 223ea23

File tree

1 file changed

+17
-9
lines changed

1 file changed

+17
-9
lines changed

inst/bootridge.m

Lines changed: 17 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -707,7 +707,8 @@
707707
' nonzero variance.'));
708708
end
709709
parsubfun = struct ('booterr632', @booterr632, 'lambda_eval', @lambda_eval);
710-
obj_func = @(lambda) parsubfun.booterr632 (YS, XC, lambda, P_vec, nboot, seed);
710+
obj_func = @(lambda) parsubfun.booterr632 (YS, XC, lambda, P_vec, nboot, ...
711+
categor, seed);
711712

712713
% Search for the optimal lambda by .632 bootstrap prediction error
713714
try
@@ -732,7 +733,7 @@
732733
% Get the prediction error and stability selection at the optimal lambda
733734
% Use a minimum of 1999 bootstrap resamples for stability selection
734735
B = max (nboot, 1999);
735-
[pred_err, stability] = booterr632 (YS, XC, lambda, P_vec, B, seed);
736+
[pred_err, stability] = booterr632 (YS, XC, lambda, P_vec, B, categor, seed);
736737

737738
% Correct stability selection probabilities for the design effect
738739
stdnormcdf = @(x) 0.5 * (1 + erf (x / sqrt (2)));
@@ -1095,7 +1096,8 @@
10951096

10961097
%% FUNCTION FOR .632 BOOTSTRAP ESTIMATOR OF PREDICTION ERROR
10971098

1098-
function [PRED_ERR, STABILITY] = booterr632 (Y, X, lambda, P_vec, nboot, seed)
1099+
function [PRED_ERR, STABILITY] = booterr632 (Y, X, lambda, P_vec, nboot, ...
1100+
categor, seed)
10991101

11001102
% This function computes Efron & Tibshirani’s .632 bootstrap prediction error
11011103
% for a multivariate linear ridge/Tikhonov model. Loss is the per-observation
@@ -1169,6 +1171,7 @@
11691171
% --- BOOTSTRAP LOOP ---
11701172
SSE_OOB = 0;
11711173
N_OOB = 0;
1174+
NSAMP = 0;
11721175
if (nargout > 1)
11731176
tau = sqrt (eps_X);
11741177
Sign_obs = sign (Beta_obs);
@@ -1183,11 +1186,13 @@
11831186
o = true (m, 1);
11841187
o(i) = false;
11851188

1186-
% Check for missing predictors in training set and remove out-of-bag
1187-
% samples that have those predictors
1188-
missing = ~ any (X(i, :), 1);
1189-
if any (missing)
1190-
o(any (X(:,missing), 2)) = false;
1189+
% If there are any categorical predictor terms, check for missing predictors
1190+
% in training set and remove out-of-bag samples that have those predictors.
1191+
if (~ isempty (categor))
1192+
missing = ~ any (X(i, :), 1);
1193+
if (any (missing))
1194+
o(any (X(:,missing), 2)) = false;
1195+
end
11911196
end
11921197

11931198
% Skip to next bootstrap sample if there are no out-of-bag observations
@@ -1257,6 +1262,9 @@
12571262
% Calculate and accumulate number of OOB observations
12581263
N_OOB = N_OOB + sum (o) ;
12591264

1265+
% Count actual bootstrap samples used
1266+
NSAMP = NSAMP + 1;
1267+
12601268
end
12611269

12621270
% Calculate pooled OOB error estimate
@@ -1271,7 +1279,7 @@
12711279
% Calculate stability selection
12721280
if (nargout > 1)
12731281
% Convert counts to proportions, with Jeffrey's smoothing.
1274-
STABILITY = (STABILITY + 0.5) / (nboot + 1.0);
1282+
STABILITY = (STABILITY + 0.5) / (NSAMP + 1.0);
12751283
STABILITY(1, :) = NaN; % Set stability selection to NaN for the intercepts
12761284
end
12771285

0 commit comments

Comments
 (0)