|
| 1 | +import itertools |
| 2 | +import pickle |
| 3 | + |
| 4 | +from sklearn.discriminant_analysis import LinearDiscriminantAnalysis |
| 5 | +from sklearn.linear_model import LinearRegression |
| 6 | + |
| 7 | +from fastcan import FastCan |
| 8 | + |
| 9 | +from .common import Benchmark, get_estimator_path |
| 10 | +from .datasets import _digits_dataset, _synth_regression_dataset |
| 11 | + |
| 12 | + |
| 13 | +class FastCanBenchmark(Benchmark): |
| 14 | + """ |
| 15 | + Benchmarks for FastCan. |
| 16 | + """ |
| 17 | + |
| 18 | + param_names = ["task", "alg"] |
| 19 | + params = (["classif", "reg"], ["h", "eta"]) |
| 20 | + |
| 21 | + def setup_cache(self): |
| 22 | + """Pickle a fitted estimator for all combinations of parameters""" |
| 23 | + # This is run once per benchmark class. |
| 24 | + |
| 25 | + param_grid = list(itertools.product(*self.params)) |
| 26 | + |
| 27 | + for params in param_grid: |
| 28 | + _, alg = params |
| 29 | + X, _, y, _ = self.make_data(params) |
| 30 | + |
| 31 | + if alg == "h": |
| 32 | + eta = False |
| 33 | + else: |
| 34 | + eta = True |
| 35 | + estimator = FastCan( |
| 36 | + n_features_to_select=20, |
| 37 | + eta=eta, |
| 38 | + ) |
| 39 | + estimator.fit(X, y) |
| 40 | + |
| 41 | + est_path = get_estimator_path(self, params) |
| 42 | + with est_path.open(mode="wb") as f: |
| 43 | + pickle.dump(estimator, f) |
| 44 | + |
| 45 | + def make_data(self, params): |
| 46 | + task, _ = params |
| 47 | + if task == "classif": |
| 48 | + return _digits_dataset() |
| 49 | + return _synth_regression_dataset() |
| 50 | + |
| 51 | + def time_fit(self, *args): |
| 52 | + self.estimator.fit(self.X, self.y) |
| 53 | + |
| 54 | + def peakmem_fit(self, *args): |
| 55 | + self.estimator.fit(self.X, self.y) |
| 56 | + |
| 57 | + def track_train_score(self, *args): |
| 58 | + task, _ = args |
| 59 | + X_t = self.estimator.transform(self.X) |
| 60 | + if task == "classif": |
| 61 | + clf = LinearDiscriminantAnalysis() |
| 62 | + clf.fit(X_t, self.y) |
| 63 | + return float(clf.score(X_t, self.y)) |
| 64 | + else: |
| 65 | + reg = LinearRegression() |
| 66 | + reg.fit(X_t, self.y) |
| 67 | + return float(reg.score(X_t, self.y)) |
| 68 | + |
| 69 | + def track_test_score(self, *args): |
| 70 | + task, _ = args |
| 71 | + X_t = self.estimator.transform(self.X_val) |
| 72 | + if task == "classif": |
| 73 | + clf = LinearDiscriminantAnalysis() |
| 74 | + clf.fit(X_t, self.y_val) |
| 75 | + return float(clf.score(X_t, self.y_val)) |
| 76 | + else: |
| 77 | + reg = LinearRegression() |
| 78 | + reg.fit(X_t, self.y_val) |
| 79 | + return float(reg.score(X_t, self.y_val)) |
| 80 | + |
| 81 | + def time_transform(self, *args): |
| 82 | + self.estimator.transform(self.X) |
| 83 | + |
| 84 | + def peakmem_transform(self, *args): |
| 85 | + self.estimator.transform(self.X) |
0 commit comments