Skip to content

Commit 1237c78

Browse files
both solvers working
1 parent d65e437 commit 1237c78

File tree

5 files changed

+123
-52
lines changed

5 files changed

+123
-52
lines changed

selectiveInference/R/funs.fixed.R

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -297,7 +297,7 @@ InverseLinfty <- function(sigma, n, e, resol=1.2, mu=NULL, maxiter=50, threshold
297297

298298
while ((mu.stop != 1)&&(try.no<10)){
299299
last.beta <- beta
300-
print(c("#######################trying ", try.no))
300+
#print(c("#######################trying ", try.no))
301301
output <- InverseLinftyOneRow(sigma, i, mu, maxiter=maxiter, soln_result=output) # uses a warm start
302302
beta <- output$soln
303303
iter <- output$iter
@@ -365,11 +365,10 @@ InverseLinftyOneRow <- function (Sigma, i, mu, maxiter=50, soln_result=NULL) {
365365
linear_func = soln_result$linear_func
366366
}
367367

368-
result = solve_QP(Sigma, mu, maxiter, soln, linear_func, gradient, ever_active, nactive) # C function uses 0-based indexing
369-
result2 = find_one_row_debiasingM(Sigma, i, mu, maxiter, soln, gradient, ever_active, nactive) # C function uses 0-based indexing
370-
371-
print('close?')
372-
print(c(sqrt(sum((result$soln-result2$soln)^2)/sum(result$soln^2)), sqrt(sum(result$soln^2)), result2$nactive))
368+
result = find_one_row_debiasingM(Sigma, i, mu, maxiter, 0 * soln, gradient, ever_active, nactive) # C function uses 0-based indexing
369+
#result1 = solve_QP(Sigma, mu, maxiter, soln, linear_func, gradient, ever_active, nactive)
370+
#print("close?")
371+
#print(c(sqrt(sum((result1$soln-result$soln)^2)/sum(result$soln^2)), sum(result$soln^2)))
373372

374373
# Check feasibility
375374

selectiveInference/src/Rcpp-debias.cpp

Lines changed: 65 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -2,15 +2,15 @@
22
#include <debias.h> // where find_one_row_void is defined
33

44
// [[Rcpp::export]]
5-
Rcpp::List find_one_row_debiasingM(Rcpp::NumericMatrix Sigma,
6-
double bound,
7-
int maxiter,
8-
Rcpp::NumericVector theta,
9-
Rcpp::NumericVector linear_func,
10-
Rcpp::NumericVector gradient,
11-
Rcpp::IntegerVector ever_active,
12-
Rcpp::IntegerVector nactive
13-
) {
5+
Rcpp::List solve_QP(Rcpp::NumericMatrix Sigma,
6+
double bound,
7+
int maxiter,
8+
Rcpp::NumericVector theta,
9+
Rcpp::NumericVector linear_func,
10+
Rcpp::NumericVector gradient,
11+
Rcpp::IntegerVector ever_active,
12+
Rcpp::IntegerVector nactive
13+
) {
1414

1515
int nrow = Sigma.nrow(); // number of features
1616

@@ -41,14 +41,69 @@ Rcpp::List find_one_row_debiasingM(Rcpp::NumericMatrix Sigma,
4141

4242
// Check whether feasible
4343

44+
int kkt_check = check_KKT_qp(theta.begin(),
45+
gradient.begin(),
46+
nrow,
47+
bound);
48+
49+
return(Rcpp::List::create(Rcpp::Named("soln") = theta,
50+
Rcpp::Named("gradient") = gradient,
51+
Rcpp::Named("linear_func") = linear_func,
52+
Rcpp::Named("iter") = iter,
53+
Rcpp::Named("kkt_check") = kkt_check,
54+
Rcpp::Named("ever_active") = ever_active,
55+
Rcpp::Named("nactive") = nactive));
56+
57+
}
58+
59+
// [[Rcpp::export]]
60+
Rcpp::List find_one_row_debiasingM(Rcpp::NumericMatrix Sigma,
61+
int row, // 0-based
62+
double bound,
63+
int maxiter,
64+
Rcpp::NumericVector theta,
65+
Rcpp::NumericVector gradient,
66+
Rcpp::IntegerVector ever_active,
67+
Rcpp::IntegerVector nactive
68+
) {
69+
70+
int nrow = Sigma.nrow(); // number of features
71+
72+
// Active set
73+
74+
int irow;
75+
76+
// Extract the diagonal
77+
Rcpp::NumericVector Sigma_diag(nrow);
78+
double *sigma_diag_p = Sigma_diag.begin();
79+
80+
for (irow=0; irow<nrow; irow++) {
81+
sigma_diag_p[irow] = Sigma(irow, irow);
82+
}
83+
84+
// Now call our C function
85+
86+
int iter = find_one_row_((double *) Sigma.begin(),
87+
(double *) Sigma_diag.begin(),
88+
(double *) gradient.begin(),
89+
(int *) ever_active.begin(),
90+
(int *) nactive.begin(),
91+
nrow,
92+
bound,
93+
(double *) theta.begin(),
94+
maxiter,
95+
row);
96+
97+
// Check whether feasible
98+
4499
int kkt_check = check_KKT(theta.begin(),
45100
gradient.begin(),
46101
nrow,
102+
row,
47103
bound);
48104

49105
return(Rcpp::List::create(Rcpp::Named("soln") = theta,
50106
Rcpp::Named("gradient") = gradient,
51-
Rcpp::Named("linear_func") = linear_func,
52107
Rcpp::Named("iter") = iter,
53108
Rcpp::Named("kkt_check") = kkt_check,
54109
Rcpp::Named("ever_active") = ever_active,

selectiveInference/src/debias.c

Lines changed: 34 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -94,7 +94,6 @@ int check_KKT(double *theta, /* current theta */
9494
// First check inactive
9595

9696
int irow;
97-
int fail = 0;
9897
double tol = 1.e-6;
9998
double *theta_ptr, *gradient_ptr_tmp;
10099
double gradient;
@@ -106,26 +105,26 @@ int check_KKT(double *theta, /* current theta */
106105
// Compute this coordinate of the gradient
107106

108107
gradient = *gradient_ptr_tmp;
109-
if (row == irow) {
110-
gradient -= 1;
111-
}
108+
// if (row == irow) {
109+
// gradient -= 1;
110+
// }
112111

113112
if (*theta_ptr != 0) { // these coordinates of gradients should be equal to -bound
114113
if ((*theta_ptr > 0) && (fabs(gradient + bound) > tol * bound)) {
115-
fail += 1;
114+
return(0);
116115
}
117116
else if ((*theta_ptr < 0) && (fabs(gradient - bound) > tol * bound)) {
118-
fail += 1;
117+
return(0);
119118
}
120119
}
121120
else {
122121
if (fabs(gradient) > (1. + tol) * bound) {
123-
fail += 1;
122+
return(0);
124123
}
125124
}
126125
}
127126

128-
return(fail == 0);
127+
return(1);
129128
}
130129

131130
double update_one_coord(double *Sigma_ptr, /* A covariance matrix: X^TX/n */
@@ -230,16 +229,21 @@ int find_one_row_(double *Sigma_ptr, /* A covariance matrix: X^TX/n */
230229
int icoord = 0;
231230
int iactive = 0;
232231
int *active_ptr;
232+
double old_value, new_value, tol=1.e-5;
233+
234+
int check_objective = 1;
235+
236+
if (check_objective) {
237+
238+
old_value = objective(Sigma_ptr,
239+
ever_active_ptr,
240+
nactive_ptr,
241+
nrow,
242+
row,
243+
bound,
244+
theta);
245+
}
233246

234-
double old_value = objective(Sigma_ptr,
235-
ever_active_ptr,
236-
nactive_ptr,
237-
nrow,
238-
row,
239-
bound,
240-
theta);
241-
double new_value;
242-
double tol=1.e-5;
243247

244248
for (iter=0; iter<maxiter; iter++) {
245249

@@ -299,19 +303,21 @@ int find_one_row_(double *Sigma_ptr, /* A covariance matrix: X^TX/n */
299303
break;
300304
}
301305

302-
new_value = objective(Sigma_ptr,
303-
ever_active_ptr,
304-
nactive_ptr,
305-
nrow,
306-
row,
307-
bound,
308-
theta);
306+
if (check_objective) {
307+
new_value = objective(Sigma_ptr,
308+
ever_active_ptr,
309+
nactive_ptr,
310+
nrow,
311+
row,
312+
bound,
313+
theta);
314+
315+
if (((old_value - new_value) < tol * fabs(new_value)) && (iter > 0)) {
316+
break;
317+
}
309318

310-
if (((old_value - new_value) < tol * fabs(new_value)) && (iter > 0)) {
311-
break;
319+
old_value = new_value;
312320
}
313-
314-
old_value = new_value;
315321
}
316322
return(iter);
317323
}

selectiveInference/src/debias.h

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,11 +14,29 @@ int solve_qp(double *Sigma_ptr, /* A covariance matrix: X^TX/n */
1414
double *theta, /* current value */
1515
int maxiter);
1616

17+
int check_KKT_qp(double *theta, /* current theta */
18+
double *gradient_ptr, /* Current gradient of quadratic loss */
19+
int nrow, /* how many rows in Sigma */
20+
double bound); /* Lagrange multipler for \ell_1 */
21+
22+
int find_one_row_(double *Sigma_ptr, /* A covariance matrix: X^TX/n */
23+
double *Sigma_ptr_ptr, /* Diagonal entry of covariance matrix */
24+
double *gradient_ptr, /* Current gradient of quadratic loss */
25+
int *ever_active_ptr, /* Ever active set: 0-based */
26+
int *nactive_ptr, /* Size of ever active set */
27+
int nrow, /* How many rows in Sigma */
28+
double bound, /* feasibility parameter */
29+
double *theta, /* current value */
30+
int maxiter, /* how many iterations */
31+
int row); /* which coordinate to update: 0-based */
32+
1733
int check_KKT(double *theta, /* current theta */
1834
double *gradient_ptr, /* Current gradient of quadratic loss */
1935
int nrow, /* how many rows in Sigma */
36+
int row, /* which row: 0-based */
2037
double bound); /* Lagrange multipler for \ell_1 */
2138

39+
2240
#ifdef __cplusplus
2341
} /* extern "C" */
2442
#endif /* __cplusplus */

selectiveInference/src/quadratic_program.c

Lines changed: 1 addition & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,4 @@
11
#include <math.h> // for fabs
2-
#include <stdio.h>
32

43
// Find an approximate row of \hat{Sigma}^{-1}
54

@@ -82,8 +81,6 @@ int update_ever_active_qp(int coord,
8281
// Add it to the active set and increment the
8382
// number of active variables
8483

85-
fprintf(stderr, "adding %d\n", coord);
86-
8784
ever_active_ptr_tmp = ((int *) ever_active_ptr + *nactive_ptr);
8885
*ever_active_ptr_tmp = coord;
8986
*nactive_ptr += 1;
@@ -99,7 +96,6 @@ int check_KKT_qp(double *theta, /* current theta */
9996
// First check inactive
10097

10198
int irow;
102-
int fail = 0;
10399
double tol = 1.e-6;
104100
double *theta_ptr, *gradient_ptr_tmp;
105101
double gradient;
@@ -228,13 +224,11 @@ int solve_qp(double *Sigma_ptr, /* A covariance matrix: X^TX/n */
228224
int iactive = 0;
229225
int *active_ptr;
230226

231-
int check_objective = 0;
227+
int check_objective = 1;
232228

233229
double old_value, new_value;
234230
double tol=1.e-8;
235231

236-
fprintf(stderr, "%d nactive start\n", *nactive_ptr);
237-
238232
if (check_objective) {
239233

240234
old_value = objective_qp(Sigma_ptr,
@@ -250,7 +244,6 @@ int solve_qp(double *Sigma_ptr, /* A covariance matrix: X^TX/n */
250244

251245
for (iter=0; iter<maxiter; iter++) {
252246

253-
fprintf(stderr, "%d nactive loop \n", *nactive_ptr);
254247
// Update the active variables first
255248

256249
active_ptr = (int *) ever_active_ptr;

0 commit comments

Comments
 (0)