|
| 1 | +# Copyright (C) 2018-2019 Intel Corporation |
| 2 | +# |
| 3 | +# SPDX-License-Identifier: MIT |
| 4 | + |
| 5 | + |
| 6 | +import numpy as np |
| 7 | +import bench |
| 8 | +from daal4py import decision_forest_classification_training |
| 9 | +from daal4py import decision_forest_classification_prediction |
| 10 | +from daal4py import engines_mt2203 |
| 11 | +from daal4py.sklearn.utils import getFPType |
| 12 | + |
| 13 | + |
| 14 | +def df_clsf_fit(X, y, n_classes, n_trees=100, seed=12345, |
| 15 | + n_features_per_node=0, max_depth=0, min_impurity=0, |
| 16 | + bootstrap=True, verbose=False): |
| 17 | + |
| 18 | + fptype = getFPType(X) |
| 19 | + |
| 20 | + features_per_node = X.shape[1] |
| 21 | + if n_features_per_node > 0 and n_features_per_node < features_per_node: |
| 22 | + features_per_node = n_features_per_node |
| 23 | + |
| 24 | + engine = engines_mt2203(seed=seed, fptype=fptype) |
| 25 | + |
| 26 | + algorithm = decision_forest_classification_training( |
| 27 | + nClasses=n_classes, |
| 28 | + fptype=fptype, |
| 29 | + method='defaultDense', |
| 30 | + nTrees=n_trees, |
| 31 | + observationsPerTreeFraction=1., |
| 32 | + featuresPerNode=features_per_node, |
| 33 | + maxTreeDepth=max_depth, |
| 34 | + minObservationsInLeafNode=1, |
| 35 | + engine=engine, |
| 36 | + impurityThreshold=min_impurity, |
| 37 | + varImportance='MDI', |
| 38 | + resultsToCompute='', |
| 39 | + memorySavingMode=False, |
| 40 | + bootstrap=bootstrap |
| 41 | + ) |
| 42 | + |
| 43 | + df_clsf_result = algorithm.compute(X, y) |
| 44 | + |
| 45 | + return df_clsf_result |
| 46 | + |
| 47 | + |
| 48 | +def df_clsf_predict(X, training_result, n_classes, verbose=False): |
| 49 | + |
| 50 | + algorithm = decision_forest_classification_prediction( |
| 51 | + nClasses=n_classes, |
| 52 | + fptype='float', # we give float here specifically to match sklearn |
| 53 | + ) |
| 54 | + |
| 55 | + result = algorithm.compute(X, training_result.model) |
| 56 | + |
| 57 | + return result.prediction |
| 58 | + |
| 59 | + |
| 60 | +if __name__ == '__main__': |
| 61 | + import argparse |
| 62 | + |
| 63 | + def getArguments(argParser): |
| 64 | + argParser.add_argument('--prefix', type=str, default='daal4py', |
| 65 | + help="Identifier of the bench being executed") |
| 66 | + argParser.add_argument('--fileX', type=argparse.FileType('r'), |
| 67 | + help="Input file with features") |
| 68 | + argParser.add_argument('--fileY', type=argparse.FileType('r'), |
| 69 | + help="Input file with labels") |
| 70 | + argParser.add_argument('--num-trees', type=int, default=100, |
| 71 | + help="Number of trees in decision forest") |
| 72 | + argParser.add_argument('--max-features', type=int, default=0, |
| 73 | + help="Max features used to build trees") |
| 74 | + argParser.add_argument('--max-depth', type=int, default=0, |
| 75 | + help="Maximal depth of trees constructed") |
| 76 | + |
| 77 | + argParser.add_argument('--use-sklearn-class', action='store_true', |
| 78 | + help="Force use of sklearn.ensemble.RandomForestClassifier") |
| 79 | + argParser.add_argument('--seed', type=int, default=12345, |
| 80 | + help="Seed to pass as random_state to the class") |
| 81 | + |
| 82 | + argParser.add_argument('--fit-repetitions', dest="fit_inner_reps", type=int, default=1, |
| 83 | + help="Count of operations whose execution time is being clocked, average time reported") |
| 84 | + argParser.add_argument('--fit-samples', dest="fit_outer_reps", type=int, default=5, |
| 85 | + help="Count of repetitions of time measurements to collect statistics ") |
| 86 | + argParser.add_argument('--predict-repetitions', dest="predict_inner_reps", type=int, default=50, |
| 87 | + help="Count of operations whose execution time is being clocked, average time reported") |
| 88 | + argParser.add_argument('--predict-samples', dest="predict_outer_reps", type=int, default=5, |
| 89 | + help="Count of repetitions of time measurements to collect statistics ") |
| 90 | + |
| 91 | + argParser.add_argument('--verbose', action="store_true", |
| 92 | + help="Whether to print additional information.") |
| 93 | + argParser.add_argument('--header', action="store_true", |
| 94 | + help="Whether to print header.") |
| 95 | + argParser.add_argument('--num-threads', type=int, dest="num_threads", default=0, |
| 96 | + help="Number of threads for DAAL to use") |
| 97 | + |
| 98 | + args = argParser.parse_args() |
| 99 | + |
| 100 | + return args |
| 101 | + |
| 102 | + |
| 103 | + argParser = argparse.ArgumentParser(prog="df_clsf_bench.py", |
| 104 | + description="Execute RandomForest classification", |
| 105 | + formatter_class=argparse.ArgumentDefaultsHelpFormatter) |
| 106 | + |
| 107 | + args = getArguments(argParser) |
| 108 | + num_threads, daal_version = bench.prepare_benchmark(args) |
| 109 | + |
| 110 | + |
| 111 | + import timeit |
| 112 | + |
| 113 | + if args.fileX is None or args.fileY is None: |
| 114 | + argParser.error("Please specify data for the algorithm to train on. Use --fileX and --fileY or --generate options.") |
| 115 | + else: |
| 116 | + X = np.load(args.fileX.name) |
| 117 | + y = np.load(args.fileY.name)[:,np.newaxis] |
| 118 | + |
| 119 | + if args.verbose: |
| 120 | + print("@ {", end='') |
| 121 | + print(" FIT_SAMPLES : {0}, FIT_REPETITIONS : {1}, PREDICT_SAMPLES: {2}, PREDICT_REPETITIONS: {3}".format( |
| 122 | + args.fit_outer_reps, args.fit_inner_reps, args.predict_outer_reps, args.predict_inner_reps |
| 123 | + ), end='') |
| 124 | + print("}") |
| 125 | + |
| 126 | + if args.verbose: |
| 127 | + print("@ {", end='') |
| 128 | + print("'n_estimators': {0}, 'max_depth': {1}, 'max_features': {2}, 'random_state': {3}".format( |
| 129 | + args.num_trees, args.max_depth, args.max_features, args.seed |
| 130 | + ), end='') |
| 131 | + print("}") |
| 132 | + |
| 133 | + n_classes = np.max(y) - np.min(y) + 1 |
| 134 | + |
| 135 | + fit_times = [] |
| 136 | + for outer_it in range(args.fit_outer_reps): |
| 137 | + t0 = timeit.default_timer() |
| 138 | + for _ in range(args.fit_inner_reps): |
| 139 | + result = df_clsf_fit(X, y, n_classes, n_trees=args.num_trees, |
| 140 | + seed=args.seed, |
| 141 | + n_features_per_node=args.max_features, |
| 142 | + max_depth=args.max_depth, verbose=args.verbose) |
| 143 | + t1 = timeit.default_timer() |
| 144 | + fit_times.append((t1 - t0) / args.fit_inner_reps) |
| 145 | + |
| 146 | + |
| 147 | + predict_times = [] |
| 148 | + for outer_it in range(args.predict_outer_reps): |
| 149 | + |
| 150 | + t0 = timeit.default_timer() |
| 151 | + for _ in range(args.predict_inner_reps): |
| 152 | + y_pred = df_clsf_predict(X, result, n_classes, |
| 153 | + verbose=args.verbose) |
| 154 | + t1 = timeit.default_timer() |
| 155 | + predict_times.append((t1 - t0) / args.predict_inner_reps) |
| 156 | + |
| 157 | + |
| 158 | + from sklearn.metrics import accuracy_score |
| 159 | + acc = accuracy_score(y, y_pred) |
| 160 | + |
| 161 | + |
| 162 | + num_classes = np.unique(y).shape[0] |
| 163 | + if args.header: |
| 164 | + print("prefix_ID,function,threads,rows,features,fit,predict,accuracy,classes") |
| 165 | + print(",".join(( |
| 166 | + args.prefix, |
| 167 | + 'df_clsf', |
| 168 | + str(num_threads), |
| 169 | + str(X.shape[0]), |
| 170 | + str(X.shape[1]), |
| 171 | + "{0:.3f}".format(min(fit_times)), |
| 172 | + "{0:.3f}".format(min(predict_times)), |
| 173 | + "{0:.4f}".format(100*acc), |
| 174 | + str(num_classes) |
| 175 | + ))) |
| 176 | + |
| 177 | + if args.verbose: |
| 178 | + print("") |
| 179 | + print("@ Median of {0} runs of .fit averaging over {1} executions is {2:3.3f}".format(args.fit_outer_reps, args.fit_inner_reps, np.percentile(fit_times, 50))) |
| 180 | + print("@ Median of {0} runs of .predict averaging over {1} executions is {2:3.3f}".format(args.predict_outer_reps, args.predict_inner_reps, np.percentile(predict_times, 50))) |
0 commit comments