88#include < unordered_set>
99
1010#include " dsp.h"
11- #include " json.hpp"
12- #include " util.h"
1311
1412#define tanh_impl_ std::tanh
1513// #define tanh_impl_ fast_tanh_
@@ -62,21 +60,6 @@ void nam::DSP::SetLoudness(const double loudness)
6260
6361void nam::DSP::finalize_ (const int num_frames) {}
6462
65- void nam::DSP::_get_params_ (const std::unordered_map<std::string, double >& input_params)
66- {
67- this ->_stale_params = false ;
68- for (auto it = input_params.begin (); it != input_params.end (); ++it)
69- {
70- const std::string key = util::lowercase (it->first );
71- const double value = it->second ;
72- if (this ->_params .find (key) == this ->_params .end ()) // Not contained
73- this ->_stale_params = true ;
74- else if (this ->_params [key] != value) // Contained but new value
75- this ->_stale_params = true ;
76- this ->_params [key] = value;
77- }
78- }
79-
8063// Buffer =====================================================================
8164
8265nam::Buffer::Buffer (const int receptive_field, const double expected_sample_rate)
@@ -153,20 +136,20 @@ void nam::Buffer::finalize_(const int num_frames)
153136
154137// Linear =====================================================================
155138
156- nam::Linear::Linear (const int receptive_field, const bool _bias, const std::vector<float >& params ,
139+ nam::Linear::Linear (const int receptive_field, const bool _bias, const std::vector<float >& weights ,
157140 const double expected_sample_rate)
158141: nam::Buffer(receptive_field, expected_sample_rate)
159142{
160- if ((int )params .size () != (receptive_field + (_bias ? 1 : 0 )))
143+ if ((int )weights .size () != (receptive_field + (_bias ? 1 : 0 )))
161144 throw std::runtime_error (
162145 " Params vector does not match expected size based "
163146 " on architecture parameters" );
164147
165148 this ->_weight .resize (this ->_receptive_field );
166149 // Pass in in reverse order so that dot products work out of the box.
167150 for (int i = 0 ; i < this ->_receptive_field ; i++)
168- this ->_weight (i) = params [receptive_field - 1 - i];
169- this ->_bias = _bias ? params [receptive_field] : (float )0.0 ;
151+ this ->_weight (i) = weights [receptive_field - 1 - i];
152+ this ->_bias = _bias ? weights [receptive_field] : (float )0.0 ;
170153}
171154
172155void nam::Linear::process (NAM_SAMPLE* input, NAM_SAMPLE* output, const int num_frames)
@@ -184,7 +167,7 @@ void nam::Linear::process(NAM_SAMPLE* input, NAM_SAMPLE* output, const int num_f
184167
185168// NN modules =================================================================
186169
187- void nam::Conv1D::set_params_ (std::vector<float >::iterator& params )
170+ void nam::Conv1D::set_weights_ (std::vector<float >::iterator& weights )
188171{
189172 if (this ->_weight .size () > 0 )
190173 {
@@ -194,10 +177,10 @@ void nam::Conv1D::set_params_(std::vector<float>::iterator& params)
194177 for (auto i = 0 ; i < out_channels; i++)
195178 for (auto j = 0 ; j < in_channels; j++)
196179 for (size_t k = 0 ; k < this ->_weight .size (); k++)
197- this ->_weight [k](i, j) = *(params ++);
180+ this ->_weight [k](i, j) = *(weights ++);
198181 }
199182 for (long i = 0 ; i < this ->_bias .size (); i++)
200- this ->_bias (i) = *(params ++);
183+ this ->_bias (i) = *(weights ++);
201184}
202185
203186void nam::Conv1D::set_size_ (const int in_channels, const int out_channels, const int kernel_size, const bool do_bias,
@@ -214,11 +197,11 @@ void nam::Conv1D::set_size_(const int in_channels, const int out_channels, const
214197 this ->_dilation = _dilation;
215198}
216199
217- void nam::Conv1D::set_size_and_params_ (const int in_channels, const int out_channels, const int kernel_size,
218- const int _dilation, const bool do_bias, std::vector<float >::iterator& params )
200+ void nam::Conv1D::set_size_and_weights_ (const int in_channels, const int out_channels, const int kernel_size,
201+ const int _dilation, const bool do_bias, std::vector<float >::iterator& weights )
219202{
220203 this ->set_size_ (in_channels, out_channels, kernel_size, do_bias, _dilation);
221- this ->set_params_ (params );
204+ this ->set_weights_ (weights );
222205}
223206
224207void nam::Conv1D::process_ (const Eigen::MatrixXf& input, Eigen::MatrixXf& output, const long i_start, const long ncols,
@@ -237,12 +220,12 @@ void nam::Conv1D::process_(const Eigen::MatrixXf& input, Eigen::MatrixXf& output
237220 output.middleCols (j_start, ncols).colwise () += this ->_bias ;
238221}
239222
240- long nam::Conv1D::get_num_params () const
223+ long nam::Conv1D::get_num_weights () const
241224{
242- long num_params = this ->_bias .size ();
225+ long num_weights = this ->_bias .size ();
243226 for (size_t i = 0 ; i < this ->_weight .size (); i++)
244- num_params += this ->_weight [i].size ();
245- return num_params ;
227+ num_weights += this ->_weight [i].size ();
228+ return num_weights ;
246229}
247230
248231nam::Conv1x1::Conv1x1 (const int in_channels, const int out_channels, const bool _bias)
@@ -253,14 +236,14 @@ nam::Conv1x1::Conv1x1(const int in_channels, const int out_channels, const bool
253236 this ->_bias .resize (out_channels);
254237}
255238
256- void nam::Conv1x1::set_params_ (std::vector<float >::iterator& params )
239+ void nam::Conv1x1::set_weights_ (std::vector<float >::iterator& weights )
257240{
258241 for (int i = 0 ; i < this ->_weight .rows (); i++)
259242 for (int j = 0 ; j < this ->_weight .cols (); j++)
260- this ->_weight (i, j) = *(params ++);
243+ this ->_weight (i, j) = *(weights ++);
261244 if (this ->_do_bias )
262245 for (int i = 0 ; i < this ->_bias .size (); i++)
263- this ->_bias (i) = *(params ++);
246+ this ->_bias (i) = *(weights ++);
264247}
265248
266249Eigen::MatrixXf nam::Conv1x1::process (const Eigen::MatrixXf& input) const
0 commit comments