Skip to content

Commit 509fa95

Browse files
authored
[DOC] shorten lines in the examples (#421)
1 parent 1934f25 commit 509fa95

8 files changed

+92
-17
lines changed

examples/plot_2D_simulation_example.py

Lines changed: 19 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -184,9 +184,18 @@ def weight_map_2D_extended(shape, roi_size, delta):
184184

185185

186186
# compute desparsified lasso
187-
beta_hat, sigma_hat, precision_diagonal = desparsified_lasso(X_init, y, n_jobs=n_jobs)
187+
beta_hat, sigma_hat, precision_diagonal = desparsified_lasso(
188+
X_init,
189+
y,
190+
n_jobs=n_jobs,
191+
)
188192
pval, pval_corr, one_minus_pval, one_minus_pval_corr, cb_min, cb_max = (
189-
desparsified_lasso_pvalue(X_init.shape[0], beta_hat, sigma_hat, precision_diagonal)
193+
desparsified_lasso_pvalue(
194+
X_init.shape[0],
195+
beta_hat,
196+
sigma_hat,
197+
precision_diagonal,
198+
)
190199
)
191200

192201
# compute estimated support (first method)
@@ -215,7 +224,14 @@ def weight_map_2D_extended(shape, roi_size, delta):
215224
X_init, y, ward, n_clusters, scaler_sampling=StandardScaler()
216225
)
217226
beta_hat, pval, pval_corr, one_minus_pval, one_minus_pval_corr = (
218-
clustered_inference_pvalue(n_samples, False, ward_, beta_hat, theta_hat, omega_diag)
227+
clustered_inference_pvalue(
228+
n_samples,
229+
False,
230+
ward_,
231+
beta_hat,
232+
theta_hat,
233+
omega_diag,
234+
)
219235
)
220236

221237
# compute estimated support (first method)

examples/plot_conditional_vs_marginal_xor_data.py

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,12 @@
3030
np.linspace(np.min(X[:, 1]), np.max(X[:, 1]), 100),
3131
)
3232

33-
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=0)
33+
X_train, X_test, y_train, y_test = train_test_split(
34+
X,
35+
Y,
36+
test_size=0.2,
37+
random_state=0,
38+
)
3439
model = SVC(kernel="rbf", random_state=0)
3540
model.fit(X_train, y_train)
3641
Z = model.decision_function(np.c_[xx.ravel(), yy.ravel()])

examples/plot_dcrt_example.py

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,11 @@
7070

7171
## dcrt Random Forest ##
7272
d0crt_random_forest = D0CRT(
73-
estimator=RandomForestRegressor(n_estimators=100, random_state=42, n_jobs=1),
73+
estimator=RandomForestRegressor(
74+
n_estimators=100,
75+
random_state=42,
76+
n_jobs=1,
77+
),
7478
screening_threshold=None,
7579
)
7680
d0crt_random_forest.fit_importance(X, y)
@@ -92,7 +96,13 @@
9296

9397
_, ax = plt.subplots(nrows=1, ncols=2)
9498
sns.swarmplot(data=df_plot, x="model", y="type-1 error", ax=ax[0], hue="model")
95-
ax[0].axhline(alpha, linewidth=1, color="tab:red", ls="--", label="Nominal Level")
99+
ax[0].axhline(
100+
alpha,
101+
linewidth=1,
102+
color="tab:red",
103+
ls="--",
104+
label="Nominal Level",
105+
)
96106
ax[0].legend()
97107
ax[0].set_ylim(-0.01)
98108

examples/plot_diabetes_variable_importance_example.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -118,7 +118,9 @@
118118
cfi = CFI(
119119
estimator=regressor_list[i],
120120
imputation_model_continuous=RidgeCV(alphas=np.logspace(-3, 3, 10)),
121-
imputation_model_categorical=LogisticRegressionCV(Cs=np.logspace(-2, 2, 10)),
121+
imputation_model_categorical=LogisticRegressionCV(
122+
Cs=np.logspace(-2, 2, 10),
123+
),
122124
# covariate_estimator=HistGradientBoostingRegressor(random_state=0,),
123125
n_permutations=50,
124126
random_state=0,

examples/plot_importance_classification_iris.py

Lines changed: 13 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,15 @@
6060
# require a K-fold cross-fitting. Computing the importance for each fold is
6161
# embarrassingly parallel. For this reason, we encapsulate the main computations in a
6262
# function and use joblib to parallelize the computation.
63-
def run_one_fold(X, y, model, train_index, test_index, vim_name="CFI", groups=None):
63+
def run_one_fold(
64+
X,
65+
y,
66+
model,
67+
train_index,
68+
test_index,
69+
vim_name="CFI",
70+
groups=None,
71+
):
6472
model_c = clone(model)
6573
model_c.fit(X[train_index], y[train_index])
6674
y_pred = model_c.predict(X[test_index])
@@ -101,7 +109,10 @@ def run_one_fold(X, y, model, train_index, test_index, vim_name="CFI", groups=No
101109
"importance": importance,
102110
"vim": vim_name,
103111
"model": model_name,
104-
"score": balanced_accuracy_score(y_true=y[test_index], y_pred=y_pred),
112+
"score": balanced_accuracy_score(
113+
y_true=y[test_index],
114+
y_pred=y_pred,
115+
),
105116
}
106117
)
107118

examples/plot_knockoffs_wisconsin.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -106,7 +106,10 @@
106106
lasso_noisy.fit(noisy_train, y_train)
107107
y_pred_noisy = lasso_noisy.predict(noisy_test)
108108
print(
109-
f"Accuracy of Lasso on test set with noise: {lasso_noisy.score(noisy_test, y_test):.3f}"
109+
(
110+
"Accuracy of Lasso on test set with noise: "
111+
f"{lasso_noisy.score(noisy_test, y_test):.3f}"
112+
)
110113
)
111114

112115
selected_mask = [

examples/plot_model_agnostic_importance.py

Lines changed: 28 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,10 @@
7474
d0crt_linear.fit_importance(X, y)
7575
pval_dcrt_linear = d0crt_linear.pvalues_
7676

77-
d0crt_non_linear = D0CRT(estimator=clone(non_linear_model), screening_threshold=None)
77+
d0crt_non_linear = D0CRT(
78+
estimator=clone(non_linear_model),
79+
screening_threshold=None,
80+
)
7881
d0crt_non_linear.fit_importance(X, y)
7982
pval_dcrt_non_linear = d0crt_non_linear.pvalues_
8083

@@ -97,7 +100,10 @@
97100
linear_model_.fit(X[train], y[train])
98101

99102
vim_linear = LOCO(
100-
estimator=linear_model_, loss=log_loss, method="predict_proba", n_jobs=2
103+
estimator=linear_model_,
104+
loss=log_loss,
105+
method="predict_proba",
106+
n_jobs=2,
101107
)
102108
vim_non_linear = LOCO(
103109
estimator=non_linear_model_,
@@ -108,7 +114,9 @@
108114
vim_linear.fit(X[train], y[train])
109115
vim_non_linear.fit(X[train], y[train])
110116

111-
importances_linear.append(vim_linear.importance(X[test], y[test])["importance"])
117+
importances_linear.append(
118+
vim_linear.importance(X[test], y[test])["importance"],
119+
)
112120
importances_non_linear.append(
113121
vim_non_linear.importance(X[test], y[test])["importance"]
114122
)
@@ -118,15 +126,25 @@
118126
# To select variables using LOCO, we compute the p-values using a t-test over the
119127
# importance scores.
120128

121-
_, pval_linear = ttest_1samp(importances_linear, 0, axis=0, alternative="greater")
129+
_, pval_linear = ttest_1samp(
130+
importances_linear,
131+
0,
132+
axis=0,
133+
alternative="greater",
134+
)
122135
_, pval_non_linear = ttest_1samp(
123136
importances_non_linear, 0, axis=0, alternative="greater"
124137
)
125138

126139
df_pval = pd.DataFrame(
127140
{
128141
"pval": np.hstack(
129-
[pval_dcrt_linear, pval_dcrt_non_linear, pval_linear, pval_non_linear]
142+
[
143+
pval_dcrt_linear,
144+
pval_dcrt_non_linear,
145+
pval_linear,
146+
pval_non_linear,
147+
]
130148
),
131149
"method": ["d0CRT-linear"] * 2
132150
+ ["d0CRT-non-linear"] * 2
@@ -152,7 +170,11 @@
152170
)
153171
ax.set_xlabel("-$\\log_{10}(pval)$")
154172
ax.axvline(
155-
-np.log10(0.05), color="k", lw=3, linestyle="--", label="-$\\log_{10}(0.05)$"
173+
-np.log10(0.05),
174+
color="k",
175+
lw=3,
176+
linestyle="--",
177+
label="-$\\log_{10}(0.05)$",
156178
)
157179
ax.legend()
158180
plt.show()

examples/plot_pitfalls_permutation_importance.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,13 @@
4040
dataset = fetch_california_housing()
4141
X_, y_ = dataset.data, dataset.target
4242
# only use 2/3 of samples to speed up the example
43-
X, _, y, _ = train_test_split(X_, y_, test_size=0.6667, random_state=0, shuffle=True)
43+
X, _, y, _ = train_test_split(
44+
X_,
45+
y_,
46+
test_size=0.6667,
47+
random_state=0,
48+
shuffle=True,
49+
)
4450

4551
redundant_coef = rng.choice(np.arange(X.shape[1]), size=(3,), replace=False)
4652
X_spurious = X[:, redundant_coef].sum(axis=1)

0 commit comments

Comments
 (0)