Skip to content

Commit d581d11

Browse files
committed
Pass in const reference to weights
And const_iterator rather than iterator
1 parent 74a07ce commit d581d11

File tree

8 files changed

+31
-31
lines changed

8 files changed

+31
-31
lines changed

NAM/convnet.cpp

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
#include "dsp.h"
1111
#include "convnet.h"
1212

13-
nam::convnet::BatchNorm::BatchNorm(const int dim, std::vector<float>::iterator& weights)
13+
nam::convnet::BatchNorm::BatchNorm(const int dim, std::vector<float>::const_iterator& weights)
1414
{
1515
// Extract from param buffer
1616
Eigen::VectorXf running_mean(dim);
@@ -48,7 +48,7 @@ void nam::convnet::BatchNorm::process_(Eigen::MatrixXf& x, const long i_start, c
4848

4949
void nam::convnet::ConvNetBlock::set_weights_(const int in_channels, const int out_channels, const int _dilation,
5050
const bool batchnorm, const std::string activation,
51-
std::vector<float>::iterator& weights)
51+
std::vector<float>::const_iterator& weights)
5252
{
5353
this->_batchnorm = batchnorm;
5454
// HACK 2 kernel
@@ -74,7 +74,7 @@ long nam::convnet::ConvNetBlock::get_out_channels() const
7474
return this->conv.get_out_channels();
7575
}
7676

77-
nam::convnet::_Head::_Head(const int channels, std::vector<float>::iterator& weights)
77+
nam::convnet::_Head::_Head(const int channels, std::vector<float>::const_iterator& weights)
7878
{
7979
this->_weight.resize(channels);
8080
for (int i = 0; i < channels; i++)
@@ -92,13 +92,13 @@ void nam::convnet::_Head::process_(const Eigen::MatrixXf& input, Eigen::VectorXf
9292
}
9393

9494
nam::convnet::ConvNet::ConvNet(const int channels, const std::vector<int>& dilations, const bool batchnorm,
95-
const std::string activation, std::vector<float>& weights,
95+
const std::string activation, const std::vector<float>& weights,
9696
const double expected_sample_rate)
9797
: Buffer(*std::max_element(dilations.begin(), dilations.end()), expected_sample_rate)
9898
{
9999
this->_verify_weights(channels, dilations, batchnorm, weights.size());
100100
this->_blocks.resize(dilations.size());
101-
std::vector<float>::iterator it = weights.begin();
101+
std::vector<float>::const_iterator it = weights.begin();
102102
for (size_t i = 0; i < dilations.size(); i++)
103103
this->_blocks[i].set_weights_(i == 0 ? 1 : channels, channels, dilations[i], batchnorm, activation, it);
104104
this->_block_vals.resize(this->_blocks.size() + 1);

NAM/convnet.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ class BatchNorm
2323
{
2424
public:
2525
BatchNorm(){};
26-
BatchNorm(const int dim, std::vector<float>::iterator& weights);
26+
BatchNorm(const int dim, std::vector<float>::const_iterator& weights);
2727
void process_(Eigen::MatrixXf& input, const long i_start, const long i_end) const;
2828

2929
private:
@@ -41,7 +41,7 @@ class ConvNetBlock
4141
public:
4242
ConvNetBlock(){};
4343
void set_weights_(const int in_channels, const int out_channels, const int _dilation, const bool batchnorm,
44-
const std::string activation, std::vector<float>::iterator& weights);
44+
const std::string activation, std::vector<float>::const_iterator& weights);
4545
void process_(const Eigen::MatrixXf& input, Eigen::MatrixXf& output, const long i_start, const long i_end) const;
4646
long get_out_channels() const;
4747
Conv1D conv;
@@ -56,7 +56,7 @@ class _Head
5656
{
5757
public:
5858
_Head(){};
59-
_Head(const int channels, std::vector<float>::iterator& weights);
59+
_Head(const int channels, std::vector<float>::const_iterator& weights);
6060
void process_(const Eigen::MatrixXf& input, Eigen::VectorXf& output, const long i_start, const long i_end) const;
6161

6262
private:
@@ -68,7 +68,7 @@ class ConvNet : public Buffer
6868
{
6969
public:
7070
ConvNet(const int channels, const std::vector<int>& dilations, const bool batchnorm, const std::string activation,
71-
std::vector<float>& weights, const double expected_sample_rate = -1.0);
71+
const std::vector<float>& weights, const double expected_sample_rate = -1.0);
7272
~ConvNet() = default;
7373

7474
protected:

NAM/dsp.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -167,7 +167,7 @@ void nam::Linear::process(NAM_SAMPLE* input, NAM_SAMPLE* output, const int num_f
167167

168168
// NN modules =================================================================
169169

170-
void nam::Conv1D::set_weights_(std::vector<float>::iterator& weights)
170+
void nam::Conv1D::set_weights_(std::vector<float>::const_iterator& weights)
171171
{
172172
if (this->_weight.size() > 0)
173173
{
@@ -198,7 +198,7 @@ void nam::Conv1D::set_size_(const int in_channels, const int out_channels, const
198198
}
199199

200200
void nam::Conv1D::set_size_and_weights_(const int in_channels, const int out_channels, const int kernel_size,
201-
const int _dilation, const bool do_bias, std::vector<float>::iterator& weights)
201+
const int _dilation, const bool do_bias, std::vector<float>::const_iterator& weights)
202202
{
203203
this->set_size_(in_channels, out_channels, kernel_size, do_bias, _dilation);
204204
this->set_weights_(weights);
@@ -236,7 +236,7 @@ nam::Conv1x1::Conv1x1(const int in_channels, const int out_channels, const bool
236236
this->_bias.resize(out_channels);
237237
}
238238

239-
void nam::Conv1x1::set_weights_(std::vector<float>::iterator& weights)
239+
void nam::Conv1x1::set_weights_(std::vector<float>::const_iterator& weights)
240240
{
241241
for (int i = 0; i < this->_weight.rows(); i++)
242242
for (int j = 0; j < this->_weight.cols(); j++)

NAM/dsp.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -124,11 +124,11 @@ class Conv1D
124124
{
125125
public:
126126
Conv1D() { this->_dilation = 1; };
127-
void set_weights_(std::vector<float>::iterator& weights);
127+
void set_weights_(std::vector<float>::const_iterator& weights);
128128
void set_size_(const int in_channels, const int out_channels, const int kernel_size, const bool do_bias,
129129
const int _dilation);
130130
void set_size_and_weights_(const int in_channels, const int out_channels, const int kernel_size, const int _dilation,
131-
const bool do_bias, std::vector<float>::iterator& weights);
131+
const bool do_bias, std::vector<float>::const_iterator& weights);
132132
// Process from input to output
133133
// Rightmost indices of input go from i_start to i_end,
134134
// Indices on output for from j_start (to j_start + i_end - i_start)
@@ -153,7 +153,7 @@ class Conv1x1
153153
{
154154
public:
155155
Conv1x1(const int in_channels, const int out_channels, const bool _bias);
156-
void set_weights_(std::vector<float>::iterator& weights);
156+
void set_weights_(std::vector<float>::const_iterator& weights);
157157
// :param input: (N,Cin) or (Cin,)
158158
// :return: (N,Cout) or (Cout,), respectively
159159
Eigen::MatrixXf process(const Eigen::MatrixXf& input) const;

NAM/lstm.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44

55
#include "lstm.h"
66

7-
nam::lstm::LSTMCell::LSTMCell(const int input_size, const int hidden_size, std::vector<float>::iterator& weights)
7+
nam::lstm::LSTMCell::LSTMCell(const int input_size, const int hidden_size, std::vector<float>::const_iterator& weights)
88
{
99
// Resize arrays
1010
this->_w.resize(4 * hidden_size, input_size + hidden_size);
@@ -63,12 +63,12 @@ void nam::lstm::LSTMCell::process_(const Eigen::VectorXf& x)
6363
}
6464
}
6565

66-
nam::lstm::LSTM::LSTM(const int num_layers, const int input_size, const int hidden_size, std::vector<float>& weights,
66+
nam::lstm::LSTM::LSTM(const int num_layers, const int input_size, const int hidden_size, const std::vector<float>& weights,
6767
const double expected_sample_rate)
6868
: DSP(expected_sample_rate)
6969
{
7070
this->_input.resize(1);
71-
std::vector<float>::iterator it = weights.begin();
71+
std::vector<float>::const_iterator it = weights.begin();
7272
for (int i = 0; i < num_layers; i++)
7373
this->_layers.push_back(LSTMCell(i == 0 ? input_size : hidden_size, hidden_size, it));
7474
this->_head_weight.resize(hidden_size);

NAM/lstm.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ namespace lstm
2222
class LSTMCell
2323
{
2424
public:
25-
LSTMCell(const int input_size, const int hidden_size, std::vector<float>::iterator& weights);
25+
LSTMCell(const int input_size, const int hidden_size, std::vector<float>::const_iterator& weights);
2626
Eigen::VectorXf get_hidden_state() const { return this->_xh(Eigen::placeholders::lastN(this->_get_hidden_size())); };
2727
void process_(const Eigen::VectorXf& x);
2828

@@ -50,7 +50,7 @@ class LSTMCell
5050
class LSTM : public DSP
5151
{
5252
public:
53-
LSTM(const int num_layers, const int input_size, const int hidden_size, std::vector<float>& weights,
53+
LSTM(const int num_layers, const int input_size, const int hidden_size, const std::vector<float>& weights,
5454
const double expected_sample_rate = -1.0);
5555
~LSTM() = default;
5656

NAM/wavenet.cpp

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ nam::wavenet::_DilatedConv::_DilatedConv(const int in_channels, const int out_ch
1212
this->set_size_(in_channels, out_channels, kernel_size, bias, dilation);
1313
}
1414

15-
void nam::wavenet::_Layer::set_weights_(std::vector<float>::iterator& weights)
15+
void nam::wavenet::_Layer::set_weights_(std::vector<float>::const_iterator& weights)
1616
{
1717
this->_conv.set_weights_(weights);
1818
this->_input_mixin.set_weights_(weights);
@@ -133,7 +133,7 @@ void nam::wavenet::_LayerArray::set_num_frames_(const long num_frames)
133133
this->_layers[i].set_num_frames_(num_frames);
134134
}
135135

136-
void nam::wavenet::_LayerArray::set_weights_(std::vector<float>::iterator& weights)
136+
void nam::wavenet::_LayerArray::set_weights_(std::vector<float>::const_iterator& weights)
137137
{
138138
this->_rechannel.set_weights_(weights);
139139
for (size_t i = 0; i < this->_layers.size(); i++)
@@ -186,7 +186,7 @@ nam::wavenet::_Head::_Head(const int input_size, const int num_layers, const int
186186
}
187187
}
188188

189-
void nam::wavenet::_Head::set_weights_(std::vector<float>::iterator& weights)
189+
void nam::wavenet::_Head::set_weights_(std::vector<float>::const_iterator& weights)
190190
{
191191
for (size_t i = 0; i < this->_layers.size(); i++)
192192
this->_layers[i].set_weights_(weights);
@@ -231,7 +231,7 @@ void nam::wavenet::_Head::_apply_activation_(Eigen::MatrixXf& x)
231231
// WaveNet ====================================================================
232232

233233
nam::wavenet::WaveNet::WaveNet(const std::vector<nam::wavenet::LayerArrayParams>& layer_array_params,
234-
const float head_scale, const bool with_head, std::vector<float> weights,
234+
const float head_scale, const bool with_head, const std::vector<float>& weights,
235235
const double expected_sample_rate)
236236
: DSP(expected_sample_rate)
237237
, _num_frames(0)
@@ -272,9 +272,9 @@ void nam::wavenet::WaveNet::finalize_(const int num_frames)
272272
this->_advance_buffers_(num_frames);
273273
}
274274

275-
void nam::wavenet::WaveNet::set_weights_(std::vector<float>& weights)
275+
void nam::wavenet::WaveNet::set_weights_(const std::vector<float>& weights)
276276
{
277-
std::vector<float>::iterator it = weights.begin();
277+
std::vector<float>::const_iterator it = weights.begin();
278278
for (size_t i = 0; i < this->_layer_arrays.size(); i++)
279279
this->_layer_arrays[i].set_weights_(it);
280280
// this->_head.set_params_(it);

NAM/wavenet.h

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ class _Layer
3030
, _1x1(channels, channels, true)
3131
, _activation(activations::Activation::get_activation(activation))
3232
, _gated(gated){};
33-
void set_weights_(std::vector<float>::iterator& weights);
33+
void set_weights_(std::vector<float>::const_iterator& weights);
3434
// :param `input`: from previous layer
3535
// :param `output`: to next layer
3636
void process_(const Eigen::MatrixXf& input, const Eigen::MatrixXf& condition, Eigen::MatrixXf& head_input,
@@ -108,7 +108,7 @@ class _LayerArray
108108
Eigen::MatrixXf& head_outputs // post head-rechannel
109109
);
110110
void set_num_frames_(const long num_frames);
111-
void set_weights_(std::vector<float>::iterator& it);
111+
void set_weights_(std::vector<float>::const_iterator& it);
112112

113113
// "Zero-indexed" receptive field.
114114
// E.g. a 1x1 convolution has a z.i.r.f. of zero.
@@ -144,7 +144,7 @@ class _Head
144144
{
145145
public:
146146
_Head(const int input_size, const int num_layers, const int channels, const std::string activation);
147-
void set_weights_(std::vector<float>::iterator& weights);
147+
void set_weights_(std::vector<float>::const_iterator& weights);
148148
// NOTE: the head transforms the provided input by applying a nonlinearity
149149
// to it in-place!
150150
void process_(Eigen::MatrixXf& inputs, Eigen::MatrixXf& outputs);
@@ -169,11 +169,11 @@ class WaveNet : public DSP
169169
{
170170
public:
171171
WaveNet(const std::vector<LayerArrayParams>& layer_array_params, const float head_scale, const bool with_head,
172-
std::vector<float> weights, const double expected_sample_rate = -1.0);
172+
const std::vector<float>& weights, const double expected_sample_rate = -1.0);
173173
~WaveNet() = default;
174174

175175
void finalize_(const int num_frames) override;
176-
void set_weights_(std::vector<float>& weights);
176+
void set_weights_(const std::vector<float>& weights);
177177

178178
private:
179179
long _num_frames;

0 commit comments

Comments
 (0)