Skip to content
Draft
Show file tree
Hide file tree
Changes from 18 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ joblib
*.pyc
__pycache__
*.egg-info
.coverage
.coverage*

# documenation
doc
Expand Down
97 changes: 0 additions & 97 deletions src/hidimstat/RandomForestModified.py

This file was deleted.

2 changes: 0 additions & 2 deletions src/hidimstat/__init__.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
from .clustered_inference import clustered_inference, hd_inference
from .desparsified_lasso import desparsified_group_lasso, desparsified_lasso
from .Dnn_learner_single import DnnLearnerSingle
from .ensemble_clustered_inference import ensemble_clustered_inference
from .knockoff_aggregation import knockoff_aggregation
from .knockoffs import model_x_knockoff
Expand All @@ -25,7 +24,6 @@
"dcrt_zero",
"desparsified_lasso",
"desparsified_group_lasso",
"DnnLearnerSingle",
"ensemble_clustered_inference",
"group_reid",
"hd_inference",
Expand Down
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
from sklearn.preprocessing import OneHotEncoder
from sklearn.utils.validation import check_is_fitted

from .utils import (
from ._utils.Dnn import (
create_X_y,
dnn_net,
joblib_ensemble_dnnet,
Expand Down Expand Up @@ -241,6 +241,7 @@ def fit(self, X, y=None):
loss = np.array(res_ens[4])

if self.n_ensemble == 1:
raise Warning("The model can't be fit with n_ensemble = 1")
return [(res_ens[0][0], (res_ens[1][0], res_ens[2][0]))]

# Keeping the optimal subset of DNNs
Expand Down Expand Up @@ -283,6 +284,9 @@ def encode_outcome(self, y, train=True):
y = y.reshape(-1, 1)
if self.problem_type == "regression":
list_y.append(y)
# Encoding the target with the ordinal case
if self.problem_type == "ordinal":
list_y = ordinal_encode(y)

for col in range(y.shape[1]):
if train:
Expand All @@ -291,18 +295,12 @@ def encode_outcome(self, y, train=True):
self.enc_y.append(OneHotEncoder(handle_unknown="ignore"))
curr_y = self.enc_y[col].fit_transform(y[:, [col]]).toarray()
list_y.append(curr_y)

# Encoding the target with the ordinal case
if self.problem_type == "ordinal":
y = ordinal_encode(y)

else:
# Encoding the target with the classification case
if self.problem_type in ("classification", "binary"):
curr_y = self.enc_y[col].transform(y[:, [col]]).toarray()
list_y.append(curr_y)

## ToDo Add the ordinal case
return np.array(list_y)

def hyper_tuning(
Expand Down
5 changes: 5 additions & 0 deletions src/hidimstat/estimator/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
from .Dnn_learner_single import DnnLearnerSingle

__all__ = [
"DnnLearnerSingle",
]
Loading