@@ -50,6 +50,19 @@ class ANN(Approximation):
5050 def __init__ (self , layers , function , stop_training , loss = None ,
5151 optimizer = torch .optim .Adam , lr = 0.001 , l2_regularization = 0 ,
5252 frequency_print = 10 , last_identity = True ):
53+ """
54+ Initialize an Artificial Neural Network.
55+
56+ :param list layers: Ordered list with the number of neurons of each hidden layer.
57+ :param function: Activation function(s) for each layer.
58+ :param stop_training: Stopping criteria for training (iterations and/or tolerance).
59+ :param loss: Loss function to use. Default is MSELoss.
60+ :param optimizer: Optimizer class to use. Default is Adam.
61+ :param float lr: Learning rate. Default is 0.001.
62+ :param float l2_regularization: L2 regularization coefficient. Default is 0.
63+ :param int frequency_print: Frequency of printing during training. Default is 10.
64+ :param bool last_identity: Whether the last activation is identity. Default is True.
65+ """
5366 if loss is None :
5467 loss = torch .nn .MSELoss ()
5568
@@ -121,15 +134,13 @@ def _list_to_sequential(layers, functions):
121134
122135 def _build_model (self , points , values ):
123136 """
124- Build the torch model.
125- Considering the number of neurons per layer (self.layers), a
126- feed-forward NN is defined:
127- - activation function from layer i>=0 to layer i+1:
128- self.function[i]; activation function at the output layer:
129- Identity (by default).
130- :param numpy.ndarray points: the coordinates of the given (training)
131- points.
132- :param numpy.ndarray values: the (training) values in the points.
137+ Build the torch neural network model.
138+
139+ Constructs a feed-forward neural network with the specified layers
140+ and activation functions.
141+
142+ :param numpy.ndarray points: The coordinates of the training points.
143+ :param numpy.ndarray values: The training values at the points.
133144 """
134145 layers = self .layers .copy ()
135146 layers .insert (0 , points .shape [1 ])
0 commit comments