Skip to content

Commit 41145c7

Browse files
committed
rob
1 parent 322e7f0 commit 41145c7

File tree

5 files changed

+56
-8
lines changed

5 files changed

+56
-8
lines changed

selectiveInference/DESCRIPTION

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
11
Package: selectiveInference
22
Type: Package
33
Title: Tools for Post-Selection Inference
4-
Version: 1.1.3
5-
Date: 2016-02-8
4+
Version: 1.2.1
5+
Date: 2016-07-3
66
Author: Ryan Tibshirani, Rob Tibshirani, Jonathan Taylor,
77
Joshua Loftus, Stephen Reid
88
Maintainer: Rob Tibshirani <[email protected]>
@@ -14,5 +14,5 @@ Suggests:
1414
Rmpfr
1515
Description: New tools for post-selection inference, for use
1616
with forward stepwise regression, least angle regression, the
17-
lasso, and the many means problem.
17+
lasso, and the many means problem. The lasso function implements Gaussian, logistic and Cox survival models.
1818
License: GPL-2

selectiveInference/NAMESPACE

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -40,4 +40,6 @@ importFrom("graphics", abline, axis, matplot)
4040
importFrom("stats", dnorm, lsfit, pexp, pnorm, predict,
4141
qnorm, rnorm, sd, uniroot, dchisq, model.matrix, pchisq)
4242
importFrom("stats", "coef", "df", "lm", "pf")
43+
importFrom("stats", "glm", "residuals", "vcov")
44+
4345

selectiveInference/R/funs.fixed.R

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ else{
4141
n = nrow(x)
4242
p = ncol(x)
4343
beta = as.numeric(beta)
44-
if (length(beta) != p) stop("beta must have length equal to ncol(x)")
44+
if (length(beta) != p) stop("Since family='gaussian', beta must have length equal to ncol(x)")
4545

4646
# If glmnet was run with an intercept term, center x and y
4747
if (intercept==TRUE) {

selectiveInference/R/funs.fixedLogit.R

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ fixedLogitLassoInf=function(x,y,beta,lambda,alpha=.1, type=c("partial"), tol.bet
1818
n=length(y)
1919
p=ncol(x)
2020
# I assume that intcpt was used
21-
if(length(beta)!=p+1) stop("beta must be of length ncol(x)+1, that is, it should include an intercept")
21+
if(length(beta)!=p+1) stop("Since family='binomial', beta must be of length ncol(x)+1, that is, it should include an intercept")
2222
nvar=sum(beta[-1]!=0)
2323
pv=vlo=vup=sd=rep(NA, nvar)
2424
ci=tailarea=matrix(NA,nvar,2)
@@ -41,11 +41,12 @@ fixedLogitLassoInf=function(x,y,beta,lambda,alpha=.1, type=c("partial"), tol.bet
4141
etahat = xxm %*% bhat
4242
prhat = as.vector(exp(etahat) / (1 + exp(etahat)))
4343
ww=prhat*(1-prhat)
44-
w=diag(ww)
44+
# w=diag(ww)
4545

4646
#check KKT
4747
z=etahat+(y-prhat)/ww
48-
g= t(x)%*%w%*%(z-etahat)/lambda # negative gradient scaled by lambda
48+
# g= t(x)%*%w%*%(z-etahat)/lambda # negative gradient scaled by lambda
49+
gg=scale(t(x),FALSE,1/ww)%*%(z-etahat)/lambda # negative gradient scaled by lambda
4950
if (any(abs(g) > 1+tol.kkt) )
5051
warning(paste("Solution beta does not satisfy the KKT conditions",
5152
"(to within specified tolerances)"))
@@ -62,7 +63,8 @@ fixedLogitLassoInf=function(x,y,beta,lambda,alpha=.1, type=c("partial"), tol.bet
6263
"'thresh' parameter, for a more accurate convergence."))
6364

6465
#constraints for active variables
65-
MM=solve(t(xxm)%*%w%*%xxm)
66+
# MM=solve(t(xxm)%*%w%*%xxm)
67+
MM=solve(scale(t(xxm),F,1/ww)%*%xxm)
6668
gm = c(0,-g[vars]*lambda) # gradient at LASSO solution, first entry is 0 because intercept is unpenalized
6769
# at exact LASSO solution it should be s2[-1]
6870
dbeta = MM %*% gm

tests/test.fixed.R

Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -510,3 +510,47 @@ g=t(X)%*%(Y-X%*%b)/lam2
510510
out = fixedLassoInf(X,Y,beta,lam*n)
511511

512512

513+
514+
515+
516+
#
517+
518+
#gaussian
519+
n=50
520+
p=10
521+
sigma=.7
522+
beta=c(0,0,0,0,rep(0,p-4))
523+
set.seed(43)
524+
nsim = 1000
525+
pvals <- matrix(NA, nrow=nsim, ncol=p)
526+
x = matrix(rnorm(n*p),n,p)
527+
x = scale(x,T,T)/sqrt(n-1)
528+
mu = x%*%beta
529+
for (i in 1:nsim) {
530+
cat(i)
531+
y=mu+sigma*rnorm(n)
532+
#y=y-mean(y)
533+
# first run glmnet
534+
pf=c(rep(.001,4),rep(1,p-4))
535+
xs=scale(x,FALSE,pf) #scale cols of x by penalty factors
536+
# first run glmnet
537+
gfit = glmnet(xs,y,standardize=FALSE)
538+
539+
540+
lambda = .8
541+
beta = coef(gfit, s=lambda/n, exact=TRUE)[-1]
542+
543+
# compute fixed lambda p-values and selection intervals
544+
aa = fixedLassoInf(xs,y,beta,lambda,sigma=sigma)
545+
546+
pvals[i, which(beta != 0)] <- aa$pv
547+
}
548+
nulls = 1:nsim
549+
np = pvals[nulls,-(1:4)]
550+
mean(np[!is.na(np)] < 0.1)
551+
o=!is.na(np)
552+
plot((1:sum(o))/sum(o),sort(np))
553+
abline(0,1)
554+
#####
555+
556+

0 commit comments

Comments
 (0)