Skip to content

Commit 664eaa9

Browse files
Anirudh B HAnirudhBHarish
authored andcommitted
Added conv1d layers with tests
1 parent 92f9654 commit 664eaa9

File tree

16 files changed

+550
-3
lines changed

16 files changed

+550
-3
lines changed

c_reference/include/conv1d.h

Lines changed: 60 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,60 @@
1+
#include<stdlib.h>
2+
#ifndef __CONVLAYERS_H__
3+
#define __CONVLAYERS_H__
4+
5+
#define ERR_INTERMIDIATE_NOT_INIT -1
6+
#define ERR_TEMPW_NOT_INIT -2
7+
#define ERR_TEMPLRU_NOT_INIT -3
8+
#define ERR_NORMFEATURES_NOT_INIT -4
9+
10+
/**
11+
* @brief Model paramters for the 1D Convolution Layer
12+
* @var mean pointer to mean of input vector for normalization, size inputDims
13+
* @var stdDev pointer to standard dev of input for normalization, size inputDims*steps
14+
* @var W pointer to convolutional weights W
15+
* @var B pointer to the bias vector for the convolution
16+
*/
17+
typedef struct ConvLayers_Params{
18+
float* W;
19+
float* B;
20+
} ConvLayers_Params;
21+
22+
23+
int Conv1D(float *output_signal, unsigned out_T, unsigned out_channels, const float *input_signal,
24+
unsigned in_T, unsigned in_channels, int padding, unsigned kernel_size,
25+
const void* params, int activations);
26+
27+
int Conv1D_Depth(float *output_signal, unsigned out_T, const float *input_signal,
28+
unsigned in_T, unsigned in_channels, int padding, unsigned kernel_size,
29+
const void* params, int activations);
30+
31+
// Low Rank
32+
/**
33+
* @brief Model paramters for the Low Rank 1D Convolution Layer
34+
* @var mean pointer to mean of input vector for normalization, size inputDims
35+
* @var stdDev pointer to standard dev of input for normalization, size inputDims
36+
* @var W1 pointer to first low-rank component of the convolutional weight W
37+
* @var W2 pointer to second low-rank component of the convolutional weight W
38+
* @var Rank rank of W matrix
39+
* @var B pointer to the bias vector for the convolution
40+
*/
41+
typedef struct ConvLayers_LR_Params{
42+
float* W1;
43+
float* W2;
44+
float* B;
45+
unsigned rank;
46+
} ConvLayers_LR_Params;
47+
48+
int Conv1D_LR(float *output_signal, unsigned out_T, unsigned out_channels, const float *input_signal,
49+
unsigned in_T, unsigned in_channels, int padding, unsigned kernel_size,
50+
const void* params, int activations);
51+
52+
int Conv1D_Depth_LR(float *output_signal, unsigned out_T, const float *input_signal,
53+
unsigned in_T, unsigned in_channels, int padding, unsigned kernel_size,
54+
const void* params, int activations);
55+
56+
//Pool
57+
int AvgPool1D(float *output_signal, unsigned out_T, const float *input_signal, unsigned in_T, unsigned in_channels,
58+
int padding, unsigned kernel_size, int activations);
59+
60+
#endif

c_reference/include/conv_utils.h

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
#ifndef __CONVLAYER_UTILS__
2+
#define __CONVLAYER_UTILS__
3+
4+
#include <math.h>
5+
#include <float.h>
6+
7+
int prepareLowRankConvMat(float* out, float* W1, float* W2, unsigned rank, unsigned I, unsigned J);
8+
float sigmoid(float x);
9+
float relu(float x);
10+
11+
#endif

c_reference/src/Makefile

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,13 @@ include ../config.mk
66
INCLUDE_DIR=../include
77
IFLAGS = -I $(INCLUDE_DIR)
88

9-
all: utils.o fastgrnn.o classifier.o rnnpool.o quantized_utils.o quantized_fastgrnn.o quantized_rnnpool.o quantized_mbconv.o
9+
all: conv1d.o conv_utils.o utils.o fastgrnn.o classifier.o rnnpool.o quantized_utils.o quantized_fastgrnn.o quantized_rnnpool.o quantized_mbconv.o
10+
11+
conv_utils.o : conv_utils.c
12+
$(CC) -o $@ $(IFLAGS) $(CFLAGS) -c $^
13+
14+
conv1d.o : conv1d.c
15+
$(CC) -o $@ $(IFLAGS) $(CFLAGS) -c $^
1016

1117
utils.o: utils.c
1218
$(CC) -o $@ $(IFLAGS) $(CFLAGS) -c $^

c_reference/src/conv1d.c

Lines changed: 211 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,211 @@
1+
#include"conv1d.h"
2+
#include"conv_utils.h"
3+
#include<stdlib.h>
4+
#include<math.h>
5+
#include<stdio.h>
6+
7+
int Conv1D_LR(float *output_signal, unsigned out_T, unsigned out_channels, const float *input_signal,
8+
unsigned in_T, unsigned in_channels, int padding, unsigned kernel_size,
9+
const void* params, int activations){
10+
11+
const ConvLayers_LR_Params* tparams= (ConvLayers_LR_Params*) params;
12+
13+
if(padding == -1){
14+
padding = kernel_size >> 1;
15+
}
16+
17+
float* tempW = (float*)malloc(out_channels * in_channels * kernel_size * sizeof(float)) ;
18+
prepareLowRankConvMat(tempW, tparams->W1, tparams->W2, tparams->rank, out_channels, in_channels * kernel_size);
19+
// Perform the Convolution
20+
// input.shape = [in_T, in_channels]
21+
// output.shape = [out_T, out_channels]
22+
// filter.shape = [out_channels, in_channels, kernel_size]
23+
for(int t = 0; t < out_T; t++){
24+
for(int co = 0; co < out_channels ; co++){
25+
float sum = 0;
26+
for(int tf = 0 ; tf < kernel_size ; tf++ ){
27+
for(int ci = 0 ; ci < in_channels ; ci++){
28+
if(((t + tf) < padding) || ((t + tf) >= (in_T + padding)))
29+
continue;
30+
else
31+
sum += (input_signal[((tf + t) - padding) * in_channels + ci] * tempW[co * in_channels * kernel_size + ci * kernel_size + tf]);
32+
}
33+
}
34+
if(activations == 1){
35+
output_signal[t * out_channels + co] = sigmoid(sum + tparams->B[co]);
36+
}
37+
else if(activations == 2){
38+
output_signal[t * out_channels + co] = tanh(sum + tparams->B[co]);
39+
}
40+
else if(activations == 3){
41+
output_signal[t * out_channels + co] = relu(sum + tparams->B[co]);
42+
}
43+
else{
44+
output_signal[t * out_channels + co] = sum + tparams->B[co];
45+
}
46+
}
47+
}
48+
free(tempW);
49+
return 0;
50+
}
51+
52+
int Conv1D_Depth_LR(float *output_signal, unsigned out_T, const float *input_signal,
53+
unsigned in_T, unsigned in_channels, int padding, unsigned kernel_size,
54+
const void* params, int activations){
55+
56+
const ConvLayers_LR_Params* tparams= (ConvLayers_LR_Params*) params;
57+
58+
if(padding == -1){
59+
padding = kernel_size >> 1;
60+
}
61+
62+
float* tempW = (float*)malloc(in_channels * kernel_size * sizeof(float)) ;
63+
prepareLowRankConvMat(tempW, tparams->W1, tparams->W2, tparams->rank, in_channels, kernel_size);
64+
// Perform the Convolution
65+
// input.shape = [N, in_T, in_channels]
66+
// output.shape = [N, out_T, in_channels]
67+
// filter.shape = [(out)in_channels, in_channels, kernel_size]
68+
for(int t = 0; t < out_T; t++){
69+
for(int ci = 0; ci < in_channels ; ci++){
70+
float sum = 0;
71+
for(int tf = 0 ; tf < kernel_size ; tf++ ){
72+
if(((t + tf) < padding) || ((t + tf) >= (in_T + padding)))
73+
continue;
74+
else
75+
sum += (input_signal[((tf + t) - padding) * in_channels + ci] * tempW[ci * kernel_size + tf]);
76+
}
77+
if(activations == 1){
78+
output_signal[t * in_channels + ci] = sigmoid(sum + tparams->B[ci]);
79+
}
80+
else if(activations == 2){
81+
output_signal[t * in_channels + ci] = tanh(sum + tparams->B[ci]);
82+
}
83+
else if(activations == 3){
84+
output_signal[t * in_channels + ci] = relu(sum + tparams->B[ci]);
85+
}
86+
else{
87+
output_signal[t * in_channels + ci] = sum + tparams->B[ci];
88+
}
89+
}
90+
}
91+
free(tempW);
92+
return 0;
93+
}
94+
95+
96+
97+
int Conv1D(float *output_signal, unsigned out_T, unsigned out_channels, const float *input_signal,
98+
unsigned in_T, unsigned in_channels, int padding, unsigned kernel_size,
99+
const void* params, int activations){
100+
101+
const ConvLayers_Params* tparams= (ConvLayers_Params*) params;
102+
103+
if(padding == -1){
104+
padding = kernel_size >> 1;
105+
}
106+
float sum;
107+
// Perform the Convolution
108+
// input.shape = [N, in_T, in_channels]
109+
// output.shape = [N, out_T, out_channels]
110+
// filter.shape = [out_channels, in_channels, kernel_size]
111+
for(int t = 0; t < out_T; t++){
112+
for(int co = 0; co < out_channels ; co++){
113+
sum = 0;
114+
for(int tf = 0 ; tf < kernel_size ; tf++ ){
115+
for(int ci = 0 ; ci < in_channels ; ci++){
116+
if(((t + tf) < padding) || ((t + tf) >= (in_T + padding)))
117+
continue;
118+
else
119+
sum += (input_signal[((tf + t) - padding) * in_channels + ci] * tparams->W[co * in_channels * kernel_size + ci * kernel_size + tf]);
120+
}
121+
}
122+
if(activations == 1){
123+
output_signal[t * out_channels + co] = sigmoid(sum + tparams->B[co]);
124+
}
125+
else if(activations == 2){
126+
output_signal[t * out_channels + co] = tanh(sum + tparams->B[co]);
127+
}
128+
else if(activations == 3){
129+
output_signal[t * out_channels + co] = relu(sum + tparams->B[co]);
130+
}
131+
else{
132+
output_signal[t * out_channels + co] = sum + tparams->B[co];
133+
}
134+
}
135+
}
136+
return 0;
137+
}
138+
139+
int Conv1D_Depth(float *output_signal, unsigned out_T, const float *input_signal,
140+
unsigned in_T, unsigned in_channels, int padding, unsigned kernel_size,
141+
const void* params, int activations){
142+
143+
const ConvLayers_Params* tparams= (ConvLayers_Params*) params;
144+
145+
if(padding == -1){
146+
padding = kernel_size >> 1;
147+
}
148+
149+
// Perform the Convolution
150+
// input.shape = [N, in_T, in_channels]
151+
// output.shape = [N, out_T, in_channels]
152+
// filter.shape = [(out)in_channels, in_channels, kernel_size]
153+
for(int t = 0; t < out_T; t++){
154+
for(int ci = 0; ci < in_channels ; ci++){
155+
float sum = 0;
156+
for(int tf = 0 ; tf < kernel_size ; tf++ ){
157+
if(((t + tf) < padding) || ((t + tf) >= (in_T + padding)))
158+
continue;
159+
else
160+
sum += (input_signal[((tf + t) - padding) * in_channels + ci] * tparams->W[ci * kernel_size + tf]);
161+
}
162+
if(activations == 1){
163+
output_signal[t * in_channels + ci] = sigmoid(sum + tparams->B[ci]);
164+
}
165+
else if(activations == 2){
166+
output_signal[t * in_channels + ci] = tanh(sum + tparams->B[ci]);
167+
}
168+
else if(activations == 3){
169+
output_signal[t * in_channels + ci] = relu(sum + tparams->B[ci]);
170+
}
171+
else{
172+
output_signal[t * in_channels + ci] = sum + tparams->B[ci];
173+
}
174+
}
175+
}
176+
return 0;
177+
}
178+
179+
int AvgPool1D(float *output_signal, unsigned out_T, const float *input_signal, unsigned in_T, unsigned in_channels,
180+
int padding, unsigned kernel_size, int activations){
181+
182+
if(padding == -1){
183+
padding = kernel_size >> 1;
184+
}
185+
186+
for(int t = 0; t < out_T; t++){
187+
for(int ci = 0 ; ci < in_channels; ci++){
188+
float sum = 0;
189+
for(int tf = 0; tf < kernel_size ; tf++){
190+
if(((t + tf) < padding) || ((t + tf) >= (in_T + padding)))
191+
continue;
192+
else
193+
sum += (input_signal[((tf + t) - padding) * in_channels + ci]);
194+
}
195+
if(activations == 1){
196+
output_signal[t * in_channels + ci] = sigmoid(sum/(float)kernel_size);
197+
}
198+
else if(activations == 2){
199+
output_signal[t * in_channels + ci] = tanh(sum/(float)kernel_size);
200+
}
201+
else if(activations == 3){
202+
output_signal[t * in_channels + ci] = relu(sum/(float)kernel_size);
203+
}
204+
else{
205+
output_signal[t * in_channels + ci] = sum/(float)kernel_size;
206+
}
207+
208+
}
209+
}
210+
return 0;
211+
}

c_reference/src/conv_utils.c

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
#include"conv_utils.h"
2+
#include <math.h>
3+
#include <float.h>
4+
5+
int prepareLowRankConvMat(float* out, float* W1, float* W2, unsigned rank, unsigned I, unsigned J){
6+
for(int i = 0 ; i < I; i++){
7+
for(int j = 0 ; j < J; j++){
8+
float sum = 0;
9+
for(int k = 0; k < rank ; k++){
10+
sum += (W1[i * rank + k] * W2[k * J + j]);
11+
}
12+
out[i * J + j] = sum;
13+
}
14+
}
15+
return 0;
16+
}
17+
18+
float relu(float x) {
19+
if (x < 0.0) return 0.0;
20+
else return x;
21+
}
22+
23+
float sigmoid(float x) {
24+
return 1.0f / (1.0f + expf(-1.0f * x));
25+
}

c_reference/tests/Makefile

Lines changed: 14 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,19 @@ MODEL_DIR=../models
88
SRC_DIR=../src
99
IFLAGS = -I $(INCLUDE_DIR) -I $(MODEL_DIR)
1010

11-
all: test_fastgrnn_lr test_rnnpool test_quantized_utils test_quantized_fastgrnn test_quantized_rnnpool test_quantized_mbconv test_quantized_face_detection test_quantized_face_detection_fast
11+
all: test_avg_pool test_conv1d test_conv1d_depth test_conv1d_lr test_conv1d_lr_depth test_fastgrnn_lr test_rnnpool test_quantized_utils test_quantized_fastgrnn test_quantized_rnnpool test_quantized_mbconv test_quantized_face_detection test_quantized_face_detection_fast
12+
13+
CONV1D_DIR=conv1d
14+
test_conv1d: $(CONV1D_DIR)/conv1d_regular/test_conv1d.c $(SRC_DIR)/conv_utils.o $(SRC_DIR)/conv1d.o
15+
$(CC) -o $@ $^ $(IFLAGS) $(CFLAGS) -lm
16+
test_conv1d_depth: $(CONV1D_DIR)/conv1d_depthwise/test_conv1d_depth.c $(SRC_DIR)/conv_utils.o $(SRC_DIR)/conv1d.o
17+
$(CC) -o $@ $^ $(IFLAGS) $(CFLAGS) -lm
18+
test_conv1d_lr: $(CONV1D_DIR)/conv1d_lr/test_conv1d_lr.c $(SRC_DIR)/conv_utils.o $(SRC_DIR)/conv1d.o
19+
$(CC) -o $@ $^ $(IFLAGS) $(CFLAGS) -lm
20+
test_conv1d_lr_depth: $(CONV1D_DIR)/conv1d_lr_depthwise/test_conv1d_lr_depth.c $(SRC_DIR)/conv_utils.o $(SRC_DIR)/conv1d.o
21+
$(CC) -o $@ $^ $(IFLAGS) $(CFLAGS) -lm
22+
test_avg_pool: $(CONV1D_DIR)/avg_pool/test_avg_pool.c $(SRC_DIR)/conv_utils.o $(SRC_DIR)/conv1d.o
23+
$(CC) -o $@ $^ $(IFLAGS) $(CFLAGS) -lm
1224

1325
FASTGRNN_DIR=fastgrnn
1426
test_fastgrnn_lr: $(FASTGRNN_DIR)/test_fastgrnn_lr.c $(SRC_DIR)/utils.o $(SRC_DIR)/fastgrnn.o $(SRC_DIR)/classifier.o
@@ -39,7 +51,7 @@ test_quantized_face_detection_fast: $(FACE_DETECTION_DIR)/test_quantized_face_de
3951
.PHONY: clean cleanest
4052

4153
clean:
42-
rm -f *.o *.gch test_fastgrnn_lr test_rnnpool test_quantized_utils test_quantized_fastgrnn test_quantized_rnnpool test_quantized_mbconv test_quantized_face_detection test_quantized_face_detection_fast
54+
rm -f *.o *.gch test_avg_pool test_conv1d test_conv1d_depth test_conv1d_lr test_conv1d_lr_depth test_fastgrnn_lr test_rnnpool test_quantized_utils test_quantized_fastgrnn test_quantized_rnnpool test_quantized_mbconv test_quantized_face_detection test_quantized_face_detection_fast
4355

4456
cleanest: clean
4557
rm *~

c_reference/tests/conv1d/avg_pool/avg_io.h

Lines changed: 13 additions & 0 deletions
Large diffs are not rendered by default.
Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
#include<stdio.h>
2+
#include<stdlib.h>
3+
4+
#include"avg_io.h"
5+
#include"conv1d.h"
6+
#include"conv_utils.h"
7+
8+
int main(){
9+
10+
float pred[O_T * O_F] = {};
11+
AvgPool1D(pred, O_T, INPUT, I_T, I_F, PAD, FILT, ACT);
12+
float error = 0;
13+
for(int t = 0 ; t < O_T ; t++){
14+
for(int d = 0 ; d < O_F ; d++){
15+
error += ((pred[t * O_F + d] - OUTPUT[t * O_F + d]) * (pred[t * O_F + d] - OUTPUT[t * O_F + d]));
16+
}
17+
}
18+
float avg_error = error/(O_T*O_F);
19+
printf("Testing Average Pool\n");
20+
printf("Squared Error : %f \t ; MSE : %f \n", error, avg_error);
21+
22+
return 0 ;
23+
}

0 commit comments

Comments
 (0)