Skip to content

Commit ec07564

Browse files
new export debiasingMatrix
1 parent e292429 commit ec07564

File tree

3 files changed

+144
-24
lines changed

3 files changed

+144
-24
lines changed

selectiveInference/NAMESPACE

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,16 +6,15 @@ export(lar,fs,
66
print.larInf,print.fsInf,
77
plot.lar,plot.fs,
88
fixedLassoInf,print.fixedLassoInf,
9-
# fixedLogitLassoInf,print.fixedLogitLassoInf,
10-
# fixedCoxLassoInf,print.fixedCoxLassoInf,
119
forwardStop,
1210
estimateSigma,
1311
manyMeans,print.manyMeans,
1412
groupfs,groupfsInf,
1513
scaleGroups,factorDesign,
1614
TG.pvalue,
1715
TG.limits,
18-
TG.interval
16+
TG.interval,
17+
debiasingMatrix
1918
)
2019

2120
S3method("coef", "lar")

selectiveInference/R/funs.fixed.R

Lines changed: 36 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -269,19 +269,19 @@ fixedLasso.poly=
269269
## Approximates inverse covariance matrix theta
270270

271271
debiasingMatrix = function(Sigma,
272-
nsample,
273-
rows,
274-
verbose=FALSE,
275-
mu=NULL, # starting value of mu
276-
linesearch=TRUE, # do a linesearch?
277-
resol=1.2, # multiplicative factor for linesearch
278-
max_active=NULL, # how big can active set get?
279-
max_try=10, # how many steps in linesearch?
280-
warn_kkt=FALSE, # warn if KKT does not seem to be satisfied?
281-
max_iter=100, # how many iterations for each optimization problem
282-
kkt_tol=1.e-4, # tolerance for the KKT conditions
283-
objective_tol=1.e-4 # tolerance for relative decrease in objective
284-
) {
272+
nsample,
273+
rows,
274+
verbose=FALSE,
275+
mu=NULL, # starting value of mu
276+
linesearch=TRUE, # do a linesearch?
277+
scaling_factor=1.5, # multiplicative factor for linesearch
278+
max_active=NULL, # how big can active set get?
279+
max_try=10, # how many steps in linesearch?
280+
warn_kkt=FALSE, # warn if KKT does not seem to be satisfied?
281+
max_iter=100, # how many iterations for each optimization problem
282+
kkt_tol=1.e-4, # tolerance for the KKT conditions
283+
objective_tol=1.e-8 # tolerance for relative decrease in objective
284+
) {
285285

286286

287287
if (is.null(max_active)) {
@@ -310,7 +310,7 @@ debiasingMatrix = function(Sigma,
310310
row,
311311
mu,
312312
linesearch=linesearch,
313-
resol=resol,
313+
scaling_factor=scaling_factor,
314314
max_active=max_active,
315315
max_try=max_try,
316316
warn_kkt=FALSE,
@@ -322,7 +322,12 @@ debiasingMatrix = function(Sigma,
322322
warning("Solution for row of M does not seem to be feasible")
323323
}
324324

325-
M[idx,] = output$soln;
325+
if (!is.null(output$soln)) {
326+
M[idx,] = output$soln;
327+
} else {
328+
stop(paste("Unable to approximate inverse row ", row));
329+
}
330+
326331
idx = idx + 1;
327332
}
328333
return(M)
@@ -332,13 +337,13 @@ debiasingRow = function (Sigma,
332337
row,
333338
mu,
334339
linesearch=TRUE, # do a linesearch?
335-
resol=1.2, # multiplicative factor for linesearch
340+
scaling_factor=1.2, # multiplicative factor for linesearch
336341
max_active=NULL, # how big can active set get?
337342
max_try=10, # how many steps in linesearch?
338343
warn_kkt=FALSE, # warn if KKT does not seem to be satisfied?
339-
max_iter=100, # how many iterations for each optimization problem
344+
max_iter=100, # how many iterations for each optimization problem
340345
kkt_tol=1.e-4, # tolerance for the KKT conditions
341-
objective_tol=1.e-4 # tolerance for relative decrease in objective
346+
objective_tol=1.e-8 # tolerance for relative decrease in objective
342347
) {
343348

344349
p = nrow(Sigma)
@@ -368,7 +373,17 @@ debiasingRow = function (Sigma,
368373

369374
while (counter_idx < max_try) {
370375

371-
result = solve_QP(Sigma, mu, max_iter, soln, linear_func, gradient, ever_active, nactive, kkt_tol, objective_tol, max_active)
376+
result = solve_QP(Sigma,
377+
mu,
378+
max_iter,
379+
soln,
380+
linear_func,
381+
gradient,
382+
ever_active,
383+
nactive,
384+
kkt_tol,
385+
objective_tol,
386+
max_active)
372387

373388
iter = result$iter
374389

@@ -390,13 +405,13 @@ debiasingRow = function (Sigma,
390405
if ((iter < (max_iter+1)) && (counter_idx > 1)) {
391406
break; # we've found a feasible point and solved the problem
392407
}
393-
mu = mu * resol;
408+
mu = mu * scaling_factor;
394409
} else { # trying to drop the bound parameter further
395410
if ((iter == (max_iter + 1)) && (counter_idx > 1)) {
396411
result = last_output; # problem seems infeasible because we didn't solve it
397412
break; # so we revert to previously found solution
398413
}
399-
mu = mu / resol;
414+
mu = mu / scaling_factor;
400415
}
401416

402417
# If the active set has grown to a certain size
Lines changed: 106 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,106 @@
1+
\name{debiasingMatrix}
2+
\alias{debiasingMatrix}
3+
\title{
4+
Find an approximate inverse of a non-negative definite matrix.
5+
}
6+
\description{
7+
Find some rows of an approximate inverse of a non-negative definite
8+
symmetric matrix by solving optimization problem described
9+
in Javanmard and Montanari (2013).
10+
}
11+
\usage{
12+
debiasingMatrix(Sigma,
13+
nsample,
14+
rows,
15+
verbose=FALSE,
16+
mu=NULL,
17+
linesearch=TRUE,
18+
scaling_factor=1.5,
19+
max_active=NULL,
20+
max_try=10,
21+
warn_kkt=FALSE,
22+
max_iter=100,
23+
kkt_tol=1.e-4,
24+
objective_tol=1.e-8)
25+
}
26+
\arguments{
27+
\item{Sigma}{
28+
A symmetric non-negative definite matrix, often a cross-covariance matrix.
29+
}
30+
\item{nsample}{
31+
Number of samples used in forming the cross-covariance matrix.
32+
Used for default value of the bound parameter mu.
33+
}
34+
\item{rows}{
35+
Which rows of the approximate inverse to compute.
36+
}
37+
\item{verbose}{
38+
Print out progress as rows are being computed.
39+
}
40+
\item{mu}{
41+
Initial bound parameter for each row. Will be changed
42+
if linesearch is TRUE.
43+
}
44+
\item{linesearch}{
45+
Run a line search to find as small as possible a bound parameter for each row?
46+
}
47+
\item{scaling_factor}{
48+
In the linesearch, the bound parameter is either multiplied or divided by this
49+
factor at each step.
50+
}
51+
\item{max_active}{
52+
How large an active set to consider in solving the problem with coordinate descent.
53+
Defaults to max(50, 0.3*nsample).
54+
}
55+
\item{max_try}{
56+
How many tries in the linesearch.
57+
}
58+
\item{warn_kkt}{
59+
Warn if the problem does not seem to be feasible after running the coordinate
60+
descent algorithm.
61+
}
62+
\item{max_iter}{
63+
How many full iterations to run of the coordinate descent for each
64+
value of the bound parameter.
65+
}
66+
\item{kkt_tol}{
67+
Tolerance value for assessing whether KKT conditions for solving the
68+
dual problem and feasibility of the original problem.
69+
}
70+
\item{objective_tol}{
71+
Tolerance value for assessing convergence of the problem using relative
72+
decrease of the objective.
73+
}
74+
}
75+
\details{
76+
This function computes an approximate inverse
77+
as described in Javanmard and Montanari (2013), specifically
78+
display (4). The problem is solved by considering a dual
79+
problem which has an objective similar to a LASSO problem and is solvable
80+
by coordinate descent. For some values of mu the original
81+
problem may not be feasible, in which case the dual problem has no solution.
82+
An attempt to detect this is made by stopping when the active set grows quite
83+
large, determined by max_active.
84+
}
85+
86+
\value{
87+
\item{M}{Rows of approximate inverse of Sigma.}
88+
}
89+
90+
\references{
91+
Adel Javanmard and Andrea Montanari (2013).
92+
Confidence Intervals and Hypothesis Testing for High-Dimensional Regression. Arxiv: 1306.3171
93+
}
94+
\author{Ryan Tibshirani, Rob Tibshirani, Jonathan Taylor, Joshua Loftus, Stephen Reid}
95+
96+
\examples{
97+
98+
set.seed(10)
99+
n = 50
100+
p = 100
101+
X = matrix(rnorm(n * p), n, p)
102+
S = t(X) %*% X / n
103+
M = debiasingMatrix(S, n, c(1,3,5))
104+
105+
}
106+

0 commit comments

Comments
 (0)