Skip to content

Commit ccc49b8

Browse files
ajdapretnarkernc
authored andcommitted
Neural Network widget (#2553)
* Neural Network widget * NN documentation * fix import order: python stdlib imports come first
1 parent 738c7e1 commit ccc49b8

File tree

10 files changed

+244
-17
lines changed

10 files changed

+244
-17
lines changed

Orange/tests/test_neural_network.py

Lines changed: 17 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55

66
from Orange.data import Table
77
from Orange.classification import NNClassificationLearner
8-
from Orange.modelling import NNLearner
8+
from Orange.modelling import NNLearner, ConstantLearner
99
from Orange.regression import NNRegressionLearner
1010
from Orange.evaluation import CA, CrossValidation, MSE
1111

@@ -15,22 +15,30 @@ class TestNNLearner(unittest.TestCase):
1515
def setUpClass(cls):
1616
cls.iris = Table('iris')
1717
cls.housing = Table('housing')
18+
cls.learner = NNLearner()
1819

1920
def test_NN_classification(self):
2021
results = CrossValidation(self.iris, [NNClassificationLearner()], k=3)
21-
self.assertGreater(CA(results), 0.90)
22+
ca = CA(results)
23+
self.assertGreater(ca, 0.8)
24+
self.assertLess(ca, 0.99)
2225

2326
def test_NN_regression(self):
24-
results = CrossValidation(self.housing, [NNRegressionLearner()], k=3)
25-
scorer = MSE()
26-
self.assertLess(scorer(results)[0], 35)
27+
const = ConstantLearner()
28+
results = CrossValidation(self.housing, [NNRegressionLearner(), const],
29+
k=3)
30+
mse = MSE()
31+
res = mse(results)
32+
self.assertLess(res[0], 35)
33+
self.assertLess(res[0], res[1])
2734

2835
def test_NN_model(self):
29-
results = CrossValidation(self.iris, [NNLearner()], k=3)
36+
results = CrossValidation(self.iris, [self.learner], k=3)
3037
self.assertGreater(CA(results), 0.90)
31-
results = CrossValidation(self.housing, [NNLearner()], k=3)
32-
scorer = MSE()
33-
self.assertLess(scorer(results)[0], 35)
38+
results = CrossValidation(self.housing, [self.learner], k=3)
39+
mse = MSE()
40+
res = mse(results)
41+
self.assertLess(res[0], 35)
3442

3543
def test_NN_classification_predict_single_instance(self):
3644
lrn = NNClassificationLearner()

Orange/widgets/model/icons/NN.svg

Lines changed: 48 additions & 0 deletions
Loading
Lines changed: 92 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,92 @@
1+
import re
2+
import sys
3+
4+
from AnyQt.QtWidgets import QApplication
5+
from AnyQt.QtCore import Qt
6+
7+
from Orange.data import Table
8+
from Orange.modelling import NNLearner
9+
from Orange.widgets import gui
10+
from Orange.widgets.settings import Setting
11+
from Orange.widgets.utils.owlearnerwidget import OWBaseLearner
12+
13+
14+
class OWNNLearner(OWBaseLearner):
15+
name = "Neural Network"
16+
description = "A multi-layer perceptron (MLP) algorithm with " \
17+
"backpropagation."
18+
icon = "icons/NN.svg"
19+
priority = 90
20+
21+
LEARNER = NNLearner
22+
23+
activation = ["identity", "logistic", "tanh", "relu"]
24+
act_lbl = ["Identity", "Logistic", "tanh", "ReLu"]
25+
solver = ["lbfgs", "sgd", "adam"]
26+
solv_lbl = ["L-BFGS-B", "SGD", "Adam"]
27+
28+
learner_name = Setting("Neural Network")
29+
hidden_layers_input = Setting("100,")
30+
activation_index = Setting(3)
31+
solver_index = Setting(2)
32+
alpha = Setting(0.0001)
33+
max_iterations = Setting(200)
34+
35+
def add_main_layout(self):
36+
box = gui.vBox(self.controlArea, "Network")
37+
self.hidden_layers_edit = gui.lineEdit(
38+
box, self, "hidden_layers_input", label="Neurons per hidden layer:",
39+
orientation=Qt.Horizontal, callback=self.settings_changed,
40+
tooltip="A list of integers defining neurons. Length of list "
41+
"defines the number of layers. E.g. 4, 2, 2, 3.",
42+
placeholderText="e.g. 100,")
43+
self.activation_combo = gui.comboBox(
44+
box, self, "activation_index", orientation=Qt.Horizontal,
45+
label="Activation:", items=[i for i in self.act_lbl],
46+
callback=self.settings_changed)
47+
self.solver_combo = gui.comboBox(
48+
box, self, "solver_index", orientation=Qt.Horizontal,
49+
label="Solver:", items=[i for i in self.solv_lbl],
50+
callback=self.settings_changed)
51+
self.alpha_spin = gui.doubleSpin(
52+
box, self, "alpha", 1e-5, 1.0, 1e-2,
53+
label="Alpha:", decimals=5, alignment=Qt.AlignRight,
54+
callback=self.settings_changed, controlWidth=80)
55+
self.max_iter_spin = gui.spin(
56+
box, self, "max_iterations", 10, 300, step=10,
57+
label="Max iterations:", orientation=Qt.Horizontal,
58+
alignment=Qt.AlignRight, callback=self.settings_changed,
59+
controlWidth=80)
60+
61+
def create_learner(self):
62+
return self.LEARNER(
63+
hidden_layer_sizes=self.get_hidden_layers(),
64+
activation=self.activation[self.activation_index],
65+
solver=self.solver[self.solver_index],
66+
alpha=self.alpha,
67+
max_iter=self.max_iterations,
68+
preprocessors=self.preprocessors)
69+
70+
def get_learner_parameters(self):
71+
return (("Hidden layers", ', '.join(map(str, self.get_hidden_layers()))),
72+
("Activation", self.act_lbl[self.activation_index]),
73+
("Solver", self.solv_lbl[self.solver_index]),
74+
("Alpha", self.alpha),
75+
("Max iterations", self.max_iterations))
76+
77+
def get_hidden_layers(self):
78+
layers = tuple(map(int, re.findall(r'\d+', self.hidden_layers_input)))
79+
if not layers:
80+
layers = (100,)
81+
self.hidden_layers_edit.setText("100,")
82+
return layers
83+
84+
85+
if __name__ == "__main__":
86+
a = QApplication(sys.argv)
87+
ow = OWNNLearner()
88+
d = Table(sys.argv[1] if len(sys.argv) > 1 else 'iris')
89+
ow.set_data(d)
90+
ow.show()
91+
a.exec_()
92+
ow.saveSettings()
Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
from Orange.widgets.model.owneuralnetwork import OWNNLearner
2+
from Orange.widgets.tests.base import WidgetTest, WidgetLearnerTestMixin
3+
4+
5+
class TestOWNeuralNetwork(WidgetTest, WidgetLearnerTestMixin):
6+
def setUp(self):
7+
self.widget = self.create_widget(OWNNLearner,
8+
stored_settings={"auto_apply": False})
9+
self.init()

doc/visual-programming/source/index.rst

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -80,19 +80,20 @@ Model
8080
.. toctree::
8181
:maxdepth: 1
8282

83-
widgets/model/naivebayes
84-
widgets/model/logisticregression
85-
widgets/model/tree
86-
widgets/model/knn
87-
widgets/model/loadmodel
8883
widgets/model/constant
84+
widgets/model/cn2ruleinduction
85+
widgets/model/knn
86+
widgets/model/tree
8987
widgets/model/randomforest
90-
widgets/model/savemodel
9188
widgets/model/svm
92-
widgets/model/cn2ruleinduction
89+
widgets/model/linearregression
90+
widgets/model/logisticregression
91+
widgets/model/naivebayes
9392
widgets/model/adaboost
93+
widgets/model/neuralnetwork
9494
widgets/model/stochasticgradient
95-
widgets/model/linearregression
95+
widgets/model/loadmodel
96+
widgets/model/savemodel
9697

9798

9899
Unsupervised
2.47 KB
Loading
178 KB
Loading
88.9 KB
Loading
20.4 KB
Loading
Lines changed: 69 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,69 @@
1+
Neural Network
2+
==============
3+
4+
.. figure:: icons/nn.png
5+
6+
A multi-layer perceptron (MLP) algorithm with backpropagation.
7+
8+
Signals
9+
-------
10+
11+
**Inputs**:
12+
13+
- **Data**
14+
15+
A data set
16+
17+
- **Preprocessor**
18+
19+
Preprocessing method(s)
20+
21+
**Outputs**:
22+
23+
- **Learner**
24+
25+
A MLP learning algorithm with settings as specified in the dialog.
26+
27+
- **Model**
28+
29+
A trained model. Output signal sent only if input *Data* is present.
30+
31+
Description
32+
-----------
33+
34+
The **Neural Network** widget uses sklearn's `Multi\-layer Perceptron algorithm <http://scikit-learn.org/stable/modules/neural_networks_supervised.html>`_ that can learn non-linear models as well as linear.
35+
36+
.. figure:: images/NeuralNetwork-stamped.png
37+
38+
1. A name under which it will appear in other widgets. The default name is
39+
"Neural Network".
40+
2. Set model parameters:
41+
- Neurons per hidden layer: defined as the ith element represents the number of neurons in the ith hidden layer. E.g. a neural network with 3 layers can be defined as 2, 3, 2.
42+
- Activation function for the hidden layer:
43+
- Identity: no-op activation, useful to implement linear bottleneck
44+
- Logistic: the logistic sigmoid function
45+
- tanh: the hyperbolic tan function
46+
- ReLu: the rectified linear unit function
47+
- Solver for weight optimization:
48+
- L-BFGS-B: an optimizer in the family of quasi-Newton methods
49+
- SGD: stochastic gradient descent
50+
- Adam: stochastic gradient-based optimizer
51+
- Alpha: L2 penalty (regularization term) parameter
52+
- Max iterations: maximum number of iterations
53+
54+
Other parameters are set to `sklearn's defaults <http://scikit-learn.org/stable/modules/generated/sklearn.neural_network.MLPClassifier.html>`_.
55+
56+
3. Produce a report.
57+
4. When the box is ticked (*Apply Automatically*), the widget will
58+
communicate changes automatically. Alternatively, click *Apply*.
59+
60+
Examples
61+
--------
62+
63+
The first example is a classification task on *iris* data set. We compare the results of **Neural Network** with the :doc:`Logistic Regression <../model/logisticregression>`.
64+
65+
.. figure:: images/NN-Example-Test.png
66+
67+
The second example is a prediction task, still using the *iris* data. This workflow shows how to use the *Learner* output. We input the **Neural Network** prediction model into :doc:`Predictions <../evaluation/predictions>` and observe the predicted values.
68+
69+
.. figure:: images/NN-Example-Predict.png

0 commit comments

Comments
 (0)