@@ -2329,20 +2329,43 @@ namespace dlib
23292329
23302330 template <
23312331 unsigned long num_outputs_,
2332- linear_bias_mode bias_mode_
2332+ linear_bias_mode bias_mode_ = LINEAR_HAS_BIAS
23332333 >
23342334 class linear_
23352335 {
23362336 static_assert (num_outputs_ > 0 , " The number of outputs from a linear_ layer must be > 0" );
23372337
23382338 public:
2339- linear_ () :
2339+ explicit linear_ () :
23402340 num_inputs(0 ),
23412341 num_outputs(num_outputs_),
23422342 learning_rate_multiplier(1 ),
23432343 bias_mode(bias_mode_) {
23442344 }
23452345
2346+ linear_ (const linear_& other) :
2347+ num_outputs (other.num_outputs),
2348+ num_inputs (other.num_inputs),
2349+ learning_rate_multiplier (other.learning_rate_multiplier),
2350+ bias_mode (other.bias_mode),
2351+ params (other.params),
2352+ weights (other.weights),
2353+ biases (other.biases) {
2354+ }
2355+
2356+ linear_& operator =(const linear_& other) {
2357+ if (this != &other) {
2358+ num_outputs = other.num_outputs ;
2359+ num_inputs = other.num_inputs ;
2360+ learning_rate_multiplier = other.learning_rate_multiplier ;
2361+ bias_mode = other.bias_mode ;
2362+ params = other.params ;
2363+ weights = other.weights ;
2364+ biases = other.biases ;
2365+ }
2366+ return *this ;
2367+ }
2368+
23462369 double get_learning_rate_multiplier () const { return learning_rate_multiplier; }
23472370 void set_learning_rate_multiplier (double val) { learning_rate_multiplier = val; }
23482371
@@ -2515,7 +2538,7 @@ namespace dlib
25152538 unsigned long num_outputs,
25162539 typename SUBNET
25172540 >
2518- using linear = add_layer<linear_<num_outputs, LINEAR_HAS_BIAS >, SUBNET>;
2541+ using linear = add_layer<linear_<num_outputs>, SUBNET>;
25192542
25202543 template <
25212544 unsigned long num_outputs,
0 commit comments