Skip to content

Commit dfec2e8

Browse files
Minor modifications and formatting changes
1 parent 0152028 commit dfec2e8

File tree

17 files changed

+745
-771
lines changed

17 files changed

+745
-771
lines changed

c_reference/include/conv1d.h

Lines changed: 26 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -9,9 +9,9 @@
99
* @var W pointer to convolutional weights W, size for regular = out_channels*in_channels*kernel_size, size for depth based = out_channels*kernel_size
1010
* @var B pointer to the bias vector for the convolution, shape = [out_channels]
1111
*/
12-
typedef struct ConvLayers_Params{
13-
float* W;
14-
float* B;
12+
typedef struct ConvLayers_Params {
13+
float* W;
14+
float* B;
1515
} ConvLayers_Params;
1616

1717
/**
@@ -31,9 +31,9 @@ typedef struct ConvLayers_Params{
3131
* 2: tanh
3232
* 3: relu
3333
*/
34-
int Conv1D(float *output_signal, unsigned out_T, unsigned out_channels, const float *input_signal,
35-
unsigned in_T, unsigned in_channels, int padding, unsigned kernel_size,
36-
const void* params, int activations);
34+
int conv1d(float *output_signal, unsigned out_T, unsigned out_channels, const float *input_signal,
35+
unsigned in_T, unsigned in_channels, int padding, unsigned kernel_size,
36+
const void* params, int activations);
3737

3838
/**
3939
* @brief Model definition for the 1D Depthwise Convolution Layer
@@ -51,9 +51,9 @@ int Conv1D(float *output_signal, unsigned out_T, unsigned out_channels, const fl
5151
* 2: tanh
5252
* 3: relu
5353
*/
54-
int Conv1D_Depth(float *output_signal, unsigned out_T, const float *input_signal,
55-
unsigned in_T, unsigned in_channels, int padding, unsigned kernel_size,
56-
const void* params, int activations);
54+
int conv1d_depth(float *output_signal, unsigned out_T, const float *input_signal,
55+
unsigned in_T, unsigned in_channels, int padding, unsigned kernel_size,
56+
const void* params, int activations);
5757

5858

5959
// Low Rank Convolution
@@ -64,11 +64,11 @@ int Conv1D_Depth(float *output_signal, unsigned out_T, const float *input_signal
6464
* @var B pointer to the bias vector for the convolution, shape = [out_channels]
6565
* @var rank rank of the weight tensor. A low rank decomposition typically used to reduce computation and storage
6666
*/
67-
typedef struct ConvLayers_LR_Params{
68-
float* W1;
69-
float* W2;
70-
float* B;
71-
unsigned rank;
67+
typedef struct ConvLayers_LR_Params {
68+
float* W1;
69+
float* W2;
70+
float* B;
71+
unsigned rank;
7272
} ConvLayers_LR_Params;
7373

7474
/**
@@ -89,9 +89,9 @@ typedef struct ConvLayers_LR_Params{
8989
* 2: tanh
9090
* 3: relu
9191
*/
92-
int Conv1D_LR(float *output_signal, unsigned out_T, unsigned out_channels, const float *input_signal,
93-
unsigned in_T, unsigned in_channels, int padding, unsigned kernel_size,
94-
const void* params, int activations);
92+
int conv1d_lr(float *output_signal, unsigned out_T, unsigned out_channels, const float *input_signal,
93+
unsigned in_T, unsigned in_channels, int padding, unsigned kernel_size,
94+
const void* params, int activations);
9595

9696
/**
9797
* @brief Model definition for the 1D Depthwise Convolution Layer
@@ -110,9 +110,9 @@ int Conv1D_LR(float *output_signal, unsigned out_T, unsigned out_channels, const
110110
* 2: tanh
111111
* 3: relu
112112
*/
113-
int Conv1D_Depth_LR(float *output_signal, unsigned out_T, const float *input_signal,
114-
unsigned in_T, unsigned in_channels, int padding, unsigned kernel_size,
115-
const void* params, int activations);
113+
int conv1d_depth_lr(float *output_signal, unsigned out_T, const float *input_signal,
114+
unsigned in_T, unsigned in_channels, int padding, unsigned kernel_size,
115+
const void* params, int activations);
116116

117117
// Auxillary Layers
118118
/**
@@ -130,8 +130,8 @@ int Conv1D_Depth_LR(float *output_signal, unsigned out_T, const float *input_sig
130130
* 2: tanh
131131
* 3: relu
132132
*/
133-
int AvgPool1D(float *output_signal, unsigned out_T, const float *input_signal, unsigned in_T, unsigned in_channels,
134-
int padding, unsigned kernel_size, int activations);
133+
int avgpool1d(float *output_signal, unsigned out_T, const float *input_signal, unsigned in_T, unsigned in_channels,
134+
int padding, unsigned kernel_size, int activations);
135135

136136
/**
137137
* @brief Model definition for the 1D batch Normalization Layer
@@ -147,6 +147,7 @@ int AvgPool1D(float *output_signal, unsigned out_T, const float *input_signal, u
147147
* @param[in] in_place in place computation of the batchnorm i.e. the output is stored in place of the input signal. Storage efficient
148148
* @param[in] eps a very small +ve value to avoid division by 0. For the default value, assign = 0.00001
149149
*/
150-
int BatchNorm1d(float* output_signal, float* input_signal, unsigned in_T, unsigned in_channels,
151-
float* mean, float* var, unsigned affine, float* gamma , float * beta, unsigned in_place, float eps);
152-
#endif
150+
int batchnorm1d(float* output_signal, float* input_signal, unsigned in_T, unsigned in_channels,
151+
float* mean, float* var, unsigned affine, float* gamma , float * beta, unsigned in_place, float eps);
152+
153+
#endif

c_reference/include/conv_utils.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
* @param[in] I dim 0 for W1, value = out_channels
1414
* @param[in] J dim 1 for W2, value = in_channels * kernel_size, (or for depthwise) = kernel_size
1515
*/
16-
int MatMul(float* out, float* W1, float* W2, unsigned rank, unsigned I, unsigned J);
16+
int matmul(float* out, float* W1, float* W2, unsigned rank, unsigned I, unsigned J);
1717

1818
/**
1919
* @brief Definition for the Custom non-linear layer : The TanhGate
@@ -24,4 +24,4 @@ int MatMul(float* out, float* W1, float* W2, unsigned rank, unsigned I, unsigned
2424
*/
2525
int TanhGate(float* output_signal, float* input_signal, unsigned in_T, unsigned in_channels);
2626

27-
#endif
27+
#endif

c_reference/include/dscnn.h

Lines changed: 12 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -4,11 +4,6 @@
44
#ifndef __DSCNN__
55
#define __DSCNN__
66

7-
#include"conv1d.h"
8-
#include"conv_utils.h"
9-
#include<stdlib.h>
10-
#include<math.h>
11-
127
/**
138
* @brief Model definition for the 1D Convolution sub-block applied before the RNN
149
* @brief sub-layers : BatchNorm1d -> Conv1D_LR
@@ -35,9 +30,10 @@
3530
* 2: tanh
3631
* 3: relu
3732
*/
38-
int DSCNN_LR(float* output_signal, float* input_signal, unsigned in_T, unsigned in_channels, float* mean, float* var,
39-
unsigned affine, float* gamma, float* beta, unsigned in_place, unsigned cnn_hidden, int cnn_padding, unsigned cnn_kernel_size,
40-
const void* cnn_params, int cnn_activations);
33+
int dscnn_lr(float* output_signal, float* input_signal, unsigned in_T, unsigned in_channels,
34+
float* mean, float* var, unsigned affine, float* gamma, float* beta, unsigned in_place,
35+
unsigned cnn_hidden, int cnn_padding, unsigned cnn_kernel_size,
36+
const void* cnn_params, int cnn_activations);
4137

4238
/**
4339
* @brief Model definition for the 1D Convolution sub-block applied after the RNN
@@ -83,10 +79,12 @@ int DSCNN_LR(float* output_signal, float* input_signal, unsigned in_T, unsigned
8379
* 2: tanh
8480
* 3: relu
8581
*/
86-
int DSCNN_LR_Point_Depth(float* output_signal, float* input_signal, unsigned in_T, unsigned in_channels, float* mean, float* var,
87-
unsigned affine, float* gamma, float* beta, unsigned in_place, unsigned depth_cnn_hidden, int depth_cnn_padding,
88-
unsigned depth_cnn_kernel_size, const void* depth_cnn_params, int depth_cnn_activations, unsigned point_cnn_hidden,
89-
int point_cnn_padding, unsigned point_cnn_kernel_size, const void* point_cnn_params, int point_cnn_activations,
90-
int pool_padding, unsigned pool_kernel_size, int pool_activation);
82+
int dscnn_depth_point_lr(float* output_signal, float* input_signal, unsigned in_T, unsigned in_channels,
83+
float* mean, float* var, unsigned affine, float* gamma, float* beta, unsigned in_place,
84+
unsigned depth_cnn_hidden, int depth_cnn_padding, unsigned depth_cnn_kernel_size,
85+
const void* depth_cnn_params, int depth_cnn_activations,
86+
unsigned point_cnn_hidden, int point_cnn_padding, unsigned point_cnn_kernel_size,
87+
const void* point_cnn_params, int point_cnn_activations,
88+
int pool_padding, unsigned pool_kernel_size, int pool_activation);
9189

92-
#endif
90+
#endif

0 commit comments

Comments
 (0)