Skip to content

Commit 0878200

Browse files
author
Anirudh B H
committed
Removed Norm and Modified Depth-wise Conv
1 parent ffc8a9b commit 0878200

File tree

2 files changed

+45
-70
lines changed

2 files changed

+45
-70
lines changed

c_reference/include/conv1d.h

Lines changed: 10 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
#include<stdlib.h>
12
#ifndef __CONVLAYERS_H__
23
#define __CONVLAYERS_H__
34

@@ -14,20 +15,18 @@
1415
* @var B pointer to the bias vector for the convolution
1516
*/
1617
typedef struct ConvLayers_Params{
17-
float* mean;
18-
float* stdDev;
1918
float* W;
2019
float* B;
2120
} ConvLayers_Params;
2221

2322

24-
int Conv1D(float *output_signal, unsigned out_channels, const float *input_signal,
23+
int Conv1D(float *output_signal, unsigned out_T, unsigned out_channels, const float *input_signal,
2524
unsigned N, unsigned in_T, unsigned in_channels, int padding, unsigned kernel_size,
26-
const void* params,int normalize, int activations);
25+
const void* params, int activations);
2726

28-
int Conv1D_Depth(float *output_signal, const float *input_signal,
27+
int Conv1D_Depth(float *output_signal, unsigned out_T, const float *input_signal,
2928
unsigned N, unsigned in_T, unsigned in_channels, int padding, unsigned kernel_size,
30-
const void* params,int normalize, int activations);
29+
const void* params, int activations);
3130

3231
// Low Rank
3332
/**
@@ -40,24 +39,22 @@ int Conv1D_Depth(float *output_signal, const float *input_signal,
4039
* @var B pointer to the bias vector for the convolution
4140
*/
4241
typedef struct ConvLayers_LR_Params{
43-
float* mean;
44-
float* stdDev;
4542
float* W1;
4643
float* W2;
4744
float* B;
4845
unsigned rank;
4946
} ConvLayers_LR_Params;
5047

51-
int Conv1D_LR(float *output_signal, unsigned out_channels, const float *input_signal,
48+
int Conv1D_LR(float *output_signal, unsigned out_T, unsigned out_channels, const float *input_signal,
5249
unsigned N, unsigned in_T, unsigned in_channels, int padding, unsigned kernel_size,
53-
const void* params,int normalize, int activations);
50+
const void* params, int activations);
5451

55-
int Conv1D_Depth_LR(float *output_signal, const float *input_signal,
52+
int Conv1D_Depth_LR(float *output_signal, unsigned out_T, const float *input_signal,
5653
unsigned N, unsigned in_T, unsigned in_channels, int padding, unsigned kernel_size,
57-
const void* params,int normalize, int activations);
54+
const void* params, int activations);
5855

5956
//Pool
60-
AvgPool1D(float *output_signal, unsigned out_T, const float *input_signal, unsigned N, unsigned in_T, unsigned in_channels,
57+
int AvgPool1D(float *output_signal, unsigned out_T, const float *input_signal, unsigned N, unsigned in_T, unsigned in_channels,
6158
int padding, unsigned kernel_size, int activations);
6259

6360
#endif

c_reference/src/conv1d.c

Lines changed: 35 additions & 57 deletions
Original file line numberDiff line numberDiff line change
@@ -1,32 +1,26 @@
11
#include"conv1d.h"
22
#include"conv_utils.h"
3+
#include<stdlib.h>
4+
#include<math.h>
5+
#include<stdio.h>
36

4-
int Conv1D_LR(float *output_signal, unsigned out_channels, const float *input_signal,
7+
int Conv1D_LR(float *output_signal, unsigned out_T, unsigned out_channels, const float *input_signal,
58
unsigned N, unsigned in_T, unsigned in_channels, int padding, unsigned kernel_size,
6-
const void* params,int normalize, int activations){
9+
const void* params, int activations){
710

811
const ConvLayers_LR_Params* tparams= (ConvLayers_LR_Params*) params;
912

1013
if(padding == -1){
1114
padding = kernel_size >> 1;
1215
}
1316
float* tempW = (float*)malloc(out_channels * in_channels * kernel_size * sizeof(float)) ;
14-
prepareLowRankConvMat(tempW, tparams->W1, tparams->W2, tparams->rank, out_channels, in_channels * kernel_size)
17+
prepareLowRankConvMat(tempW, tparams->W1, tparams->W2, tparams->rank, out_channels, in_channels * kernel_size);
1518

1619
// Perform the Convolution
17-
// input.shape = [N, in_T, in_channels]
18-
// output.shape = [N, out_T, out_channels]
20+
// input.shape = [in_T, in_channels]
21+
// output.shape = [out_T, out_channels]
1922
// filter.shape = [out_channels, in_channels, kernel_size]
2023
for(int t = 0; t < out_T; t++){
21-
if (normalize) {
22-
for (unsigned d = 0; d < in_channels; d++)
23-
input_signal[t * in_channels + d] = ((input_signal[t * in_channels + d] - tparams->mean[d]) / tparams->stdDev[d]);
24-
// v_add(1.0f, input_signal + t * in_channels, -1.0f, tparams->mean,
25-
// in_channels, tbuffers->normFeatures);
26-
// v_div(tparams->stdDev + t * in_channels, tbuffers->normFeatures, in_channels,
27-
// tbuffers->normFeatures);
28-
}
29-
3024
for(int co = 0; co < out_channels ; co++){
3125
float sum = 0;
3226
for(int tf = 0 ; tf < kernel_size ; tf++ ){
@@ -36,52 +30,45 @@ int Conv1D_LR(float *output_signal, unsigned out_channels, const float *input_si
3630
}
3731
}
3832
if(activations == 1){
39-
output_signal[n * out_channels * out_T + t * out_channels + co] = sigmoid(sum + tparams->B[co]);
33+
output_signal[t * out_channels + co] = sigmoid(sum + tparams->B[co]);
4034
}
4135
else if(activations == 2){
42-
output_signal[n * out_channels * out_T + t * out_channels + co] = tanh(sum + tparams->B[co]);
36+
output_signal[t * out_channels + co] = tanh(sum + tparams->B[co]);
4337
}
4438
else if(activations == 3){
45-
output_signal[n * out_channels * out_T + t * out_channels + co] = relu(sum + tparams->B[co]);
39+
output_signal[t * out_channels + co] = relu(sum + tparams->B[co]);
4640
}
4741
else{
48-
output_signal[n * out_channels * out_T + t * out_channels + co] = sum + tparams->B[co];
42+
output_signal[t * out_channels + co] = sum + tparams->B[co];
4943
}
5044
}
5145
}
52-
free(tempW)
46+
free(tempW);
5347
return 0;
5448
}
5549

56-
57-
int Conv1D_Depth_LR(float *output_signal, const float *input_signal,
50+
int Conv1D_Depth_LR(float *output_signal, unsigned out_T, const float *input_signal,
5851
unsigned N, unsigned in_T, unsigned in_channels, int padding, unsigned kernel_size,
59-
const void* params,int normalize, int activations){
52+
const void* params, int activations){
6053

6154
const ConvLayers_LR_Params* tparams= (ConvLayers_LR_Params*) params;
6255

63-
if (tempW == 0) return ERR_TEMPW_NOT_INIT;
64-
6556
if(padding == -1){
6657
padding = kernel_size >> 1;
6758
}
6859

69-
float* tempW = (float*)malloc(out_channels * in_channels * kernel_size * sizeof(float)) ;
70-
prepareLowRankConvMat(tempW, tparams->W1, tparams->W2, tparams->rank, in_channels, in_channels * kernel_size)
60+
float* tempW = (float*)malloc(in_channels * kernel_size * sizeof(float)) ;
61+
prepareLowRankConvMat(tempW, tparams->W1, tparams->W2, tparams->rank, in_channels, in_channels * kernel_size);
7162
// Perform the Convolution
7263
// input.shape = [N, in_T, in_channels]
7364
// output.shape = [N, out_T, in_channels]
7465
// filter.shape = [(out)in_channels, in_channels, kernel_size]
7566
for(int t = 0; t < out_T; t++){
76-
if (normalize) {
77-
for (unsigned d = 0; d < in_channels; d++)
78-
input_signal[t * in_channels + d] = ((input_signal[t * in_channels + d] - tparams->mean[d]) / tparams->stdDev[d]);
79-
}
8067
for(int ci = 0; ci < in_channels ; ci++){
8168
float sum = 0;
8269
for(int tf = 0 ; tf < kernel_size ; tf++ ){
8370
float a = ((((t + tf) < padding) || ((t + tf) >= (in_T + padding))) ? 0 : input_signal[((tf + t) - padding) * in_channels + ci]);
84-
sum += (a * tempW[ci * in_channels * kernel_size + ci * kernel_size + tf]);
71+
sum += (a * tempW[ci * kernel_size + tf]);
8572
}
8673
if(activations == 1){
8774
output_signal[t * in_channels + ci] = sigmoid(sum + tparams->B[ci]);
@@ -97,62 +84,57 @@ int Conv1D_Depth_LR(float *output_signal, const float *input_signal,
9784
}
9885
}
9986
}
100-
free(tempW)
87+
free(tempW);
10188
return 0;
10289
}
10390

10491

10592

106-
int Conv1D(float *output_signal, unsigned out_channels, const float *input_signal,
93+
int Conv1D(float *output_signal, unsigned out_T, unsigned out_channels, const float *input_signal,
10794
unsigned N, unsigned in_T, unsigned in_channels, int padding, unsigned kernel_size,
108-
const void* params,int normalize, int activations){
109-
95+
const void* params, int activations){
96+
11097
const ConvLayers_Params* tparams= (ConvLayers_Params*) params;
11198

11299
if(padding == -1){
113100
padding = kernel_size >> 1;
114101
}
115-
102+
float sum;
116103
// Perform the Convolution
117104
// input.shape = [N, in_T, in_channels]
118105
// output.shape = [N, out_T, out_channels]
119106
// filter.shape = [out_channels, in_channels, kernel_size]
120107
for(int t = 0; t < out_T; t++){
121-
if (normalize) {
122-
for (unsigned d = 0; d < in_channels; d++)
123-
input_signal[t * in_channels + d] = ((input_signal[t * in_channels + d] - tparams->mean[d]) / tparams->stdDev[d]);
124-
}
125-
126108
for(int co = 0; co < out_channels ; co++){
127-
float sum = 0;
109+
sum = 0;
128110
for(int tf = 0 ; tf < kernel_size ; tf++ ){
129111
for(int ci = 0 ; ci < in_channels ; ci++){
130-
float a = ((((t + tf) < padding) || ((t + tf) >= (in_T + padding))) ? continue : input_signal[((tf + t) - padding) * in_channels + ci]);
112+
float a = ((((t + tf) < padding) || ((t + tf) >= (in_T + padding))) ? 0 : input_signal[((tf + t) - padding) * in_channels + ci]);
131113
sum += (a * tparams->W[co * in_channels * kernel_size + ci * kernel_size + tf]);
132114
}
133115
}
134116
if(activations == 1){
135-
output_signal[n * out_channels * out_T + t * out_channels + co] = sigmoid(sum + tparams->B[co]);
117+
output_signal[t * out_channels + co] = sigmoid(sum + tparams->B[co]);
136118
}
137119
else if(activations == 2){
138-
output_signal[n * out_channels * out_T + t * out_channels + co] = tanh(sum + tparams->B[co]);
120+
output_signal[t * out_channels + co] = tanh(sum + tparams->B[co]);
139121
}
140122
else if(activations == 3){
141-
output_signal[n * out_channels * out_T + t * out_channels + co] = relu(sum + tparams->B[co]);
123+
output_signal[t * out_channels + co] = relu(sum + tparams->B[co]);
142124
}
143125
else{
144-
output_signal[n * out_channels * out_T + t * out_channels + co] = sum + tparams->B[co];
126+
output_signal[t * out_channels + co] = sum + tparams->B[co];
145127
}
146128
}
147129
}
148130
return 0;
149131
}
150132

151-
int Conv1D_Depth(float *output_signal, const float *input_signal,
133+
int Conv1D_Depth(float *output_signal, unsigned out_T, const float *input_signal,
152134
unsigned N, unsigned in_T, unsigned in_channels, int padding, unsigned kernel_size,
153-
const void* params,int normalize, int activations){
135+
const void* params, int activations){
154136

155-
const ConvLayers_LR_Params* tparams= (ConvLayers_LR_Params*) params;
137+
const ConvLayers_Params* tparams= (ConvLayers_Params*) params;
156138

157139
if(padding == -1){
158140
padding = kernel_size >> 1;
@@ -163,15 +145,11 @@ int Conv1D_Depth(float *output_signal, const float *input_signal,
163145
// output.shape = [N, out_T, in_channels]
164146
// filter.shape = [(out)in_channels, in_channels, kernel_size]
165147
for(int t = 0; t < out_T; t++){
166-
if (normalize) {
167-
for (unsigned d = 0; d < in_channels; d++)
168-
input_signal[t * in_channels + d] = ((input_signal[t * in_channels + d] - tparams->mean[d]) / tparams->stdDev[d]);
169-
}
170148
for(int ci = 0; ci < in_channels ; ci++){
171149
float sum = 0;
172150
for(int tf = 0 ; tf < kernel_size ; tf++ ){
173151
float a = ((((t + tf) < padding) || ((t + tf) >= (in_T + padding))) ? 0 : input_signal[((tf + t) - padding) * in_channels + ci]);
174-
sum += (a * tparams->W[ci * in_channels * kernel_size + ci * kernel_size + tf]);
152+
sum += (a * tparams->W[ci * kernel_size + tf]);
175153
}
176154
if(activations == 1){
177155
output_signal[t * in_channels + ci] = sigmoid(sum + tparams->B[ci]);
@@ -198,9 +176,9 @@ int AvgPool1D(float *output_signal, unsigned out_T, const float *input_signal, u
198176
}
199177

200178
for(int t = 0; t < out_T; t++){
201-
for(int ci = 0 ; ci < in_channels){
179+
for(int ci = 0 ; ci < in_channels; ci++){
202180
float sum = 0;
203-
for(int tf = ; tf < kernel_size ; tf++){
181+
for(int tf = 0; tf < kernel_size ; tf++){
204182
sum += ((((t + tf) < padding) || ((t + tf) >= (in_T + padding))) ? 0 : input_signal[((tf + t) - padding) * in_channels + ci]);
205183
}
206184
if(activations == 1){

0 commit comments

Comments
 (0)