|
19 | 19 | from hidimstat import CFI |
20 | 20 |
|
21 | 21 | # %% |
22 | | -# To solve the XOR problem, we will use a Support Vector Classier (SVC) with Radial Basis Function (RBF) kernel. The decision function of |
23 | | -# the fitted model shows that the model is able to separate the two classes. |
| 22 | +# To solve the XOR problem, we will use a Support Vector Classier (SVC) with Radial Basis Function (RBF) kernel. |
| 23 | +# |
24 | 24 | rng = np.random.RandomState(0) |
25 | 25 | X = rng.randn(400, 2) |
26 | 26 | Y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0).astype(int) |
|
38 | 38 | ) |
39 | 39 | model = SVC(kernel="rbf", random_state=0) |
40 | 40 | model.fit(X_train, y_train) |
41 | | -Z = model.decision_function(np.c_[xx.ravel(), yy.ravel()]) |
42 | 41 |
|
43 | 42 |
|
44 | 43 | # %% |
45 | 44 | # Visualizing the decision function of the SVC |
46 | 45 | # -------------------------------------------- |
| 46 | +# Let's plot the decision function of the fitted model |
| 47 | +# to confirm that the model is able to separate the two classes. |
| 48 | + |
| 49 | +Z = model.decision_function(np.c_[xx.ravel(), yy.ravel()]) |
| 50 | + |
47 | 51 | fig, ax = plt.subplots() |
48 | 52 | ax.imshow( |
49 | 53 | Z.reshape(xx.shape), |
|
86 | 90 | # :math:`Y \perp\!\!\!\perp X^1 | X^2`). |
87 | 91 | cv = KFold(n_splits=5, shuffle=True, random_state=0) |
88 | 92 | clf = SVC(kernel="rbf", random_state=0) |
89 | | -# Compute marginal importance using univariate models |
| 93 | + |
| 94 | +# %% |
| 95 | +# Compute marginal importance using univariate models. |
90 | 96 | marginal_scores = [] |
91 | 97 | for i in range(X.shape[1]): |
92 | 98 | feat_scores = [] |
|
101 | 107 | univariate_model.fit(X_train_univariate, y_train) |
102 | 108 |
|
103 | 109 | feat_scores.append(univariate_model.score(X_test_univariate, y_test)) |
| 110 | + |
104 | 111 | marginal_scores.append(feat_scores) |
105 | 112 |
|
106 | 113 | # %% |
107 | | - |
| 114 | +# Compute the conditional importance using the CFI class. |
108 | 115 | importances = [] |
109 | 116 | for i, (train_index, test_index) in enumerate(cv.split(X)): |
110 | 117 | X_train, X_test = X[train_index], X[test_index] |
|
132 | 139 | # --------------------------------- |
133 | 140 | # We will use boxplots to visualize the distribution of the importance scores. |
134 | 141 | fig, axes = plt.subplots(1, 2, sharey=True, figsize=(6, 2.5)) |
| 142 | + |
135 | 143 | # Marginal scores boxplot |
136 | 144 | sns.boxplot( |
137 | 145 | data=np.array(marginal_scores).T, |
|
0 commit comments