Skip to content

Commit 3d4a71f

Browse files
authored
[BREAKING] Remove parametric modeling code (#95)
* Get rid of parametric modeling code * Virtual set condition array
1 parent 4772bb2 commit 3d4a71f

File tree

9 files changed

+120
-225
lines changed

9 files changed

+120
-225
lines changed

NAM/convnet.cpp

Lines changed: 22 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -8,26 +8,24 @@
88
#include <unordered_set>
99

1010
#include "dsp.h"
11-
#include "json.hpp"
12-
#include "util.h"
1311
#include "convnet.h"
1412

15-
nam::convnet::BatchNorm::BatchNorm(const int dim, std::vector<float>::iterator& params)
13+
nam::convnet::BatchNorm::BatchNorm(const int dim, std::vector<float>::iterator& weights)
1614
{
1715
// Extract from param buffer
1816
Eigen::VectorXf running_mean(dim);
1917
Eigen::VectorXf running_var(dim);
2018
Eigen::VectorXf _weight(dim);
2119
Eigen::VectorXf _bias(dim);
2220
for (int i = 0; i < dim; i++)
23-
running_mean(i) = *(params++);
21+
running_mean(i) = *(weights++);
2422
for (int i = 0; i < dim; i++)
25-
running_var(i) = *(params++);
23+
running_var(i) = *(weights++);
2624
for (int i = 0; i < dim; i++)
27-
_weight(i) = *(params++);
25+
_weight(i) = *(weights++);
2826
for (int i = 0; i < dim; i++)
29-
_bias(i) = *(params++);
30-
float eps = *(params++);
27+
_bias(i) = *(weights++);
28+
float eps = *(weights++);
3129

3230
// Convert to scale & loc
3331
this->scale.resize(dim);
@@ -48,15 +46,15 @@ void nam::convnet::BatchNorm::process_(Eigen::MatrixXf& x, const long i_start, c
4846
}
4947
}
5048

51-
void nam::convnet::ConvNetBlock::set_params_(const int in_channels, const int out_channels, const int _dilation,
52-
const bool batchnorm, const std::string activation,
53-
std::vector<float>::iterator& params)
49+
void nam::convnet::ConvNetBlock::set_weights_(const int in_channels, const int out_channels, const int _dilation,
50+
const bool batchnorm, const std::string activation,
51+
std::vector<float>::iterator& weights)
5452
{
5553
this->_batchnorm = batchnorm;
5654
// HACK 2 kernel
57-
this->conv.set_size_and_params_(in_channels, out_channels, 2, _dilation, !batchnorm, params);
55+
this->conv.set_size_and_weights_(in_channels, out_channels, 2, _dilation, !batchnorm, weights);
5856
if (this->_batchnorm)
59-
this->batchnorm = BatchNorm(out_channels, params);
57+
this->batchnorm = BatchNorm(out_channels, weights);
6058
this->activation = activations::Activation::get_activation(activation);
6159
}
6260

@@ -76,12 +74,12 @@ long nam::convnet::ConvNetBlock::get_out_channels() const
7674
return this->conv.get_out_channels();
7775
}
7876

79-
nam::convnet::_Head::_Head(const int channels, std::vector<float>::iterator& params)
77+
nam::convnet::_Head::_Head(const int channels, std::vector<float>::iterator& weights)
8078
{
8179
this->_weight.resize(channels);
8280
for (int i = 0; i < channels; i++)
83-
this->_weight[i] = *(params++);
84-
this->_bias = *(params++);
81+
this->_weight[i] = *(weights++);
82+
this->_bias = *(weights++);
8583
}
8684

8785
void nam::convnet::_Head::process_(const Eigen::MatrixXf& input, Eigen::VectorXf& output, const long i_start,
@@ -94,22 +92,22 @@ void nam::convnet::_Head::process_(const Eigen::MatrixXf& input, Eigen::VectorXf
9492
}
9593

9694
nam::convnet::ConvNet::ConvNet(const int channels, const std::vector<int>& dilations, const bool batchnorm,
97-
const std::string activation, std::vector<float>& params,
95+
const std::string activation, std::vector<float>& weights,
9896
const double expected_sample_rate)
9997
: Buffer(*std::max_element(dilations.begin(), dilations.end()), expected_sample_rate)
10098
{
101-
this->_verify_params(channels, dilations, batchnorm, params.size());
99+
this->_verify_weights(channels, dilations, batchnorm, weights.size());
102100
this->_blocks.resize(dilations.size());
103-
std::vector<float>::iterator it = params.begin();
101+
std::vector<float>::iterator it = weights.begin();
104102
for (size_t i = 0; i < dilations.size(); i++)
105-
this->_blocks[i].set_params_(i == 0 ? 1 : channels, channels, dilations[i], batchnorm, activation, it);
103+
this->_blocks[i].set_weights_(i == 0 ? 1 : channels, channels, dilations[i], batchnorm, activation, it);
106104
this->_block_vals.resize(this->_blocks.size() + 1);
107105
for (auto& matrix : this->_block_vals)
108106
matrix.setZero();
109107
std::fill(this->_input_buffer.begin(), this->_input_buffer.end(), 0.0f);
110108
this->_head = _Head(channels, it);
111-
if (it != params.end())
112-
throw std::runtime_error("Didn't touch all the params when initializing ConvNet");
109+
if (it != weights.end())
110+
throw std::runtime_error("Didn't touch all the weights when initializing ConvNet");
113111

114112
_prewarm_samples = 1;
115113
for (size_t i = 0; i < dilations.size(); i++)
@@ -136,8 +134,8 @@ void nam::convnet::ConvNet::process(NAM_SAMPLE* input, NAM_SAMPLE* output, const
136134
output[s] = this->_head_output(s);
137135
}
138136

139-
void nam::convnet::ConvNet::_verify_params(const int channels, const std::vector<int>& dilations, const bool batchnorm,
140-
const size_t actual_params)
137+
void nam::convnet::ConvNet::_verify_weights(const int channels, const std::vector<int>& dilations, const bool batchnorm,
138+
const size_t actual_weights)
141139
{
142140
// TODO
143141
}

NAM/convnet.h

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ class BatchNorm
2323
{
2424
public:
2525
BatchNorm(){};
26-
BatchNorm(const int dim, std::vector<float>::iterator& params);
26+
BatchNorm(const int dim, std::vector<float>::iterator& weights);
2727
void process_(Eigen::MatrixXf& input, const long i_start, const long i_end) const;
2828

2929
private:
@@ -40,8 +40,8 @@ class ConvNetBlock
4040
{
4141
public:
4242
ConvNetBlock(){};
43-
void set_params_(const int in_channels, const int out_channels, const int _dilation, const bool batchnorm,
44-
const std::string activation, std::vector<float>::iterator& params);
43+
void set_weights_(const int in_channels, const int out_channels, const int _dilation, const bool batchnorm,
44+
const std::string activation, std::vector<float>::iterator& weights);
4545
void process_(const Eigen::MatrixXf& input, Eigen::MatrixXf& output, const long i_start, const long i_end) const;
4646
long get_out_channels() const;
4747
Conv1D conv;
@@ -56,7 +56,7 @@ class _Head
5656
{
5757
public:
5858
_Head(){};
59-
_Head(const int channels, std::vector<float>::iterator& params);
59+
_Head(const int channels, std::vector<float>::iterator& weights);
6060
void process_(const Eigen::MatrixXf& input, Eigen::VectorXf& output, const long i_start, const long i_end) const;
6161

6262
private:
@@ -68,16 +68,16 @@ class ConvNet : public Buffer
6868
{
6969
public:
7070
ConvNet(const int channels, const std::vector<int>& dilations, const bool batchnorm, const std::string activation,
71-
std::vector<float>& params, const double expected_sample_rate = -1.0);
71+
std::vector<float>& weights, const double expected_sample_rate = -1.0);
7272
~ConvNet() = default;
7373

7474
protected:
7575
std::vector<ConvNetBlock> _blocks;
7676
std::vector<Eigen::MatrixXf> _block_vals;
7777
Eigen::VectorXf _head_output;
7878
_Head _head;
79-
void _verify_params(const int channels, const std::vector<int>& dilations, const bool batchnorm,
80-
const size_t actual_params);
79+
void _verify_weights(const int channels, const std::vector<int>& dilations, const bool batchnorm,
80+
const size_t actual_weights);
8181
void _update_buffers_(NAM_SAMPLE* input, const int num_frames) override;
8282
void _rewind_buffers_() override;
8383

NAM/dsp.cpp

Lines changed: 17 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,6 @@
88
#include <unordered_set>
99

1010
#include "dsp.h"
11-
#include "json.hpp"
12-
#include "util.h"
1311

1412
#define tanh_impl_ std::tanh
1513
// #define tanh_impl_ fast_tanh_
@@ -62,21 +60,6 @@ void nam::DSP::SetLoudness(const double loudness)
6260

6361
void nam::DSP::finalize_(const int num_frames) {}
6462

65-
void nam::DSP::_get_params_(const std::unordered_map<std::string, double>& input_params)
66-
{
67-
this->_stale_params = false;
68-
for (auto it = input_params.begin(); it != input_params.end(); ++it)
69-
{
70-
const std::string key = util::lowercase(it->first);
71-
const double value = it->second;
72-
if (this->_params.find(key) == this->_params.end()) // Not contained
73-
this->_stale_params = true;
74-
else if (this->_params[key] != value) // Contained but new value
75-
this->_stale_params = true;
76-
this->_params[key] = value;
77-
}
78-
}
79-
8063
// Buffer =====================================================================
8164

8265
nam::Buffer::Buffer(const int receptive_field, const double expected_sample_rate)
@@ -153,20 +136,20 @@ void nam::Buffer::finalize_(const int num_frames)
153136

154137
// Linear =====================================================================
155138

156-
nam::Linear::Linear(const int receptive_field, const bool _bias, const std::vector<float>& params,
139+
nam::Linear::Linear(const int receptive_field, const bool _bias, const std::vector<float>& weights,
157140
const double expected_sample_rate)
158141
: nam::Buffer(receptive_field, expected_sample_rate)
159142
{
160-
if ((int)params.size() != (receptive_field + (_bias ? 1 : 0)))
143+
if ((int)weights.size() != (receptive_field + (_bias ? 1 : 0)))
161144
throw std::runtime_error(
162145
"Params vector does not match expected size based "
163146
"on architecture parameters");
164147

165148
this->_weight.resize(this->_receptive_field);
166149
// Pass in in reverse order so that dot products work out of the box.
167150
for (int i = 0; i < this->_receptive_field; i++)
168-
this->_weight(i) = params[receptive_field - 1 - i];
169-
this->_bias = _bias ? params[receptive_field] : (float)0.0;
151+
this->_weight(i) = weights[receptive_field - 1 - i];
152+
this->_bias = _bias ? weights[receptive_field] : (float)0.0;
170153
}
171154

172155
void nam::Linear::process(NAM_SAMPLE* input, NAM_SAMPLE* output, const int num_frames)
@@ -184,7 +167,7 @@ void nam::Linear::process(NAM_SAMPLE* input, NAM_SAMPLE* output, const int num_f
184167

185168
// NN modules =================================================================
186169

187-
void nam::Conv1D::set_params_(std::vector<float>::iterator& params)
170+
void nam::Conv1D::set_weights_(std::vector<float>::iterator& weights)
188171
{
189172
if (this->_weight.size() > 0)
190173
{
@@ -194,10 +177,10 @@ void nam::Conv1D::set_params_(std::vector<float>::iterator& params)
194177
for (auto i = 0; i < out_channels; i++)
195178
for (auto j = 0; j < in_channels; j++)
196179
for (size_t k = 0; k < this->_weight.size(); k++)
197-
this->_weight[k](i, j) = *(params++);
180+
this->_weight[k](i, j) = *(weights++);
198181
}
199182
for (long i = 0; i < this->_bias.size(); i++)
200-
this->_bias(i) = *(params++);
183+
this->_bias(i) = *(weights++);
201184
}
202185

203186
void nam::Conv1D::set_size_(const int in_channels, const int out_channels, const int kernel_size, const bool do_bias,
@@ -214,11 +197,11 @@ void nam::Conv1D::set_size_(const int in_channels, const int out_channels, const
214197
this->_dilation = _dilation;
215198
}
216199

217-
void nam::Conv1D::set_size_and_params_(const int in_channels, const int out_channels, const int kernel_size,
218-
const int _dilation, const bool do_bias, std::vector<float>::iterator& params)
200+
void nam::Conv1D::set_size_and_weights_(const int in_channels, const int out_channels, const int kernel_size,
201+
const int _dilation, const bool do_bias, std::vector<float>::iterator& weights)
219202
{
220203
this->set_size_(in_channels, out_channels, kernel_size, do_bias, _dilation);
221-
this->set_params_(params);
204+
this->set_weights_(weights);
222205
}
223206

224207
void nam::Conv1D::process_(const Eigen::MatrixXf& input, Eigen::MatrixXf& output, const long i_start, const long ncols,
@@ -237,12 +220,12 @@ void nam::Conv1D::process_(const Eigen::MatrixXf& input, Eigen::MatrixXf& output
237220
output.middleCols(j_start, ncols).colwise() += this->_bias;
238221
}
239222

240-
long nam::Conv1D::get_num_params() const
223+
long nam::Conv1D::get_num_weights() const
241224
{
242-
long num_params = this->_bias.size();
225+
long num_weights = this->_bias.size();
243226
for (size_t i = 0; i < this->_weight.size(); i++)
244-
num_params += this->_weight[i].size();
245-
return num_params;
227+
num_weights += this->_weight[i].size();
228+
return num_weights;
246229
}
247230

248231
nam::Conv1x1::Conv1x1(const int in_channels, const int out_channels, const bool _bias)
@@ -253,14 +236,14 @@ nam::Conv1x1::Conv1x1(const int in_channels, const int out_channels, const bool
253236
this->_bias.resize(out_channels);
254237
}
255238

256-
void nam::Conv1x1::set_params_(std::vector<float>::iterator& params)
239+
void nam::Conv1x1::set_weights_(std::vector<float>::iterator& weights)
257240
{
258241
for (int i = 0; i < this->_weight.rows(); i++)
259242
for (int j = 0; j < this->_weight.cols(); j++)
260-
this->_weight(i, j) = *(params++);
243+
this->_weight(i, j) = *(weights++);
261244
if (this->_do_bias)
262245
for (int i = 0; i < this->_bias.size(); i++)
263-
this->_bias(i) = *(params++);
246+
this->_bias(i) = *(weights++);
264247
}
265248

266249
Eigen::MatrixXf nam::Conv1x1::process(const Eigen::MatrixXf& input) const

NAM/dsp.h

Lines changed: 9 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -34,16 +34,6 @@ enum EArchitectures
3434
kNumModels
3535
};
3636

37-
// Class for providing params from the plugin to the DSP module
38-
// For now, we'll work with doubles. Later, we'll add other types.
39-
class DSPParam
40-
{
41-
public:
42-
const char* name;
43-
const double val;
44-
};
45-
// And the params shall be provided as a std::vector<DSPParam>.
46-
4737
class DSP
4838
{
4939
public:
@@ -65,9 +55,6 @@ class DSP
6555
// Anything to take care of before next buffer comes in.
6656
// For example:
6757
// * Move the buffer index forward
68-
// * Does NOT say that params aren't stale; that's the job of the routine
69-
// that actually uses them, which varies depends on the particulars of the
70-
// DSP subclass implementation.
7158
virtual void finalize_(const int num_frames);
7259
// Expected sample rate, in Hz.
7360
// TODO throw if it doesn't know.
@@ -88,18 +75,8 @@ class DSP
8875
double mLoudness = 0.0;
8976
// What sample rate does the model expect?
9077
double mExpectedSampleRate;
91-
// Parameters (aka "knobs")
92-
std::unordered_map<std::string, double> _params;
93-
// If the params have changed since the last buffer was processed:
94-
bool _stale_params = true;
78+
// How many samples should be processed during "pre-warming"
9579
int _prewarm_samples = 0;
96-
97-
// Methods
98-
99-
// Copy the parameters to the DSP module.
100-
// If anything has changed, then set this->_stale_params to true.
101-
// (TODO use "listener" approach)
102-
void _get_params_(const std::unordered_map<std::string, double>& input_params);
10380
};
10481

10582
// Class where an input buffer is kept so that long-time effects can be
@@ -132,7 +109,7 @@ class Buffer : public DSP
132109
class Linear : public Buffer
133110
{
134111
public:
135-
Linear(const int receptive_field, const bool _bias, const std::vector<float>& params,
112+
Linear(const int receptive_field, const bool _bias, const std::vector<float>& weights,
136113
const double expected_sample_rate = -1.0);
137114
void process(NAM_SAMPLE* input, NAM_SAMPLE* output, const int num_frames) override;
138115

@@ -147,19 +124,19 @@ class Conv1D
147124
{
148125
public:
149126
Conv1D() { this->_dilation = 1; };
150-
void set_params_(std::vector<float>::iterator& params);
127+
void set_weights_(std::vector<float>::iterator& weights);
151128
void set_size_(const int in_channels, const int out_channels, const int kernel_size, const bool do_bias,
152129
const int _dilation);
153-
void set_size_and_params_(const int in_channels, const int out_channels, const int kernel_size, const int _dilation,
154-
const bool do_bias, std::vector<float>::iterator& params);
130+
void set_size_and_weights_(const int in_channels, const int out_channels, const int kernel_size, const int _dilation,
131+
const bool do_bias, std::vector<float>::iterator& weights);
155132
// Process from input to output
156133
// Rightmost indices of input go from i_start to i_end,
157134
// Indices on output for from j_start (to j_start + i_end - i_start)
158135
void process_(const Eigen::MatrixXf& input, Eigen::MatrixXf& output, const long i_start, const long i_end,
159136
const long j_start) const;
160137
long get_in_channels() const { return this->_weight.size() > 0 ? this->_weight[0].cols() : 0; };
161138
long get_kernel_size() const { return this->_weight.size(); };
162-
long get_num_params() const;
139+
long get_num_weights() const;
163140
long get_out_channels() const { return this->_weight.size() > 0 ? this->_weight[0].rows() : 0; };
164141
int get_dilation() const { return this->_dilation; };
165142

@@ -176,7 +153,7 @@ class Conv1x1
176153
{
177154
public:
178155
Conv1x1(const int in_channels, const int out_channels, const bool _bias);
179-
void set_params_(std::vector<float>::iterator& params);
156+
void set_weights_(std::vector<float>::iterator& weights);
180157
// :param input: (N,Cin) or (Cin,)
181158
// :return: (N,Cout) or (Cout,), respectively
182159
Eigen::MatrixXf process(const Eigen::MatrixXf& input) const;
@@ -203,7 +180,7 @@ class Conv1x1
203180
// * "WaveNet"
204181
// :param config:
205182
// :param metadata:
206-
// :param params: The model parameters ("weights")
183+
// :param weights: The model weights
207184
// :param expected_sample_rate: Most NAM models implicitly assume that data will be provided to them at some sample
208185
// rate. This captures it for other components interfacing with the model to understand its needs. Use -1.0 for "I
209186
// don't know".
@@ -213,7 +190,7 @@ struct dspData
213190
std::string architecture;
214191
nlohmann::json config;
215192
nlohmann::json metadata;
216-
std::vector<float> params;
193+
std::vector<float> weights;
217194
double expected_sample_rate;
218195
};
219196

0 commit comments

Comments
 (0)