Skip to content

Commit 3bf6668

Browse files
author
Anirudh B H
committed
1D convolutional layers with norm and actiavtion
1 parent d85b7ab commit 3bf6668

File tree

2 files changed

+285
-0
lines changed

2 files changed

+285
-0
lines changed

c_reference/include/conv1d.h

Lines changed: 63 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,63 @@
1+
#ifndef __CONVLAYERS_H__
2+
#define __CONVLAYERS_H__
3+
4+
#define ERR_INTERMIDIATE_NOT_INIT -1
5+
#define ERR_TEMPW_NOT_INIT -2
6+
#define ERR_TEMPLRU_NOT_INIT -3
7+
#define ERR_NORMFEATURES_NOT_INIT -4
8+
9+
/**
10+
* @brief Model paramters for the 1D Convolution Layer
11+
* @var mean pointer to mean of input vector for normalization, size inputDims
12+
* @var stdDev pointer to standard dev of input for normalization, size inputDims*steps
13+
* @var W pointer to convolutional weights W
14+
* @var B pointer to the bias vector for the convolution
15+
*/
16+
typedef struct ConvLayers_Params{
17+
float* mean;
18+
float* stdDev;
19+
float* W;
20+
float* B;
21+
} ConvLayers_Params;
22+
23+
24+
int Conv1D(float *output_signal, unsigned out_channels, const float *input_signal,
25+
unsigned N, unsigned in_T, unsigned in_channels, int padding, unsigned kernel_size,
26+
const void* params,int normalize, int activations);
27+
28+
int Conv1D_Depth(float *output_signal, const float *input_signal,
29+
unsigned N, unsigned in_T, unsigned in_channels, int padding, unsigned kernel_size,
30+
const void* params,int normalize, int activations);
31+
32+
// Low Rank
33+
/**
34+
* @brief Model paramters for the Low Rank 1D Convolution Layer
35+
* @var mean pointer to mean of input vector for normalization, size inputDims
36+
* @var stdDev pointer to standard dev of input for normalization, size inputDims
37+
* @var W1 pointer to first low-rank component of the convolutional weight W
38+
* @var W2 pointer to second low-rank component of the convolutional weight W
39+
* @var Rank rank of W matrix
40+
* @var B pointer to the bias vector for the convolution
41+
*/
42+
typedef struct ConvLayers_LR_Params{
43+
float* mean;
44+
float* stdDev;
45+
float* W1;
46+
float* W2;
47+
float* B;
48+
unsigned rank;
49+
} ConvLayers_LR_Params;
50+
51+
int Conv1D_LR(float *output_signal, unsigned out_channels, const float *input_signal,
52+
unsigned N, unsigned in_T, unsigned in_channels, int padding, unsigned kernel_size,
53+
const void* params,int normalize, int activations);
54+
55+
int Conv1D_Depth_LR(float *output_signal, const float *input_signal,
56+
unsigned N, unsigned in_T, unsigned in_channels, int padding, unsigned kernel_size,
57+
const void* params,int normalize, int activations);
58+
59+
//Pool
60+
AvgPool1D(float *output_signal, unsigned out_T, const float *input_signal, unsigned N, unsigned in_T, unsigned in_channels,
61+
int padding, unsigned kernel_size, int activations);
62+
63+
#endif

c_reference/src/conv1d.c

Lines changed: 222 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,222 @@
1+
#include"conv1d.h"
2+
#include"conv_utils.h"
3+
4+
int Conv1D_LR(float *output_signal, unsigned out_channels, const float *input_signal,
5+
unsigned N, unsigned in_T, unsigned in_channels, int padding, unsigned kernel_size,
6+
const void* params,int normalize, int activations){
7+
8+
const ConvLayers_LR_Params* tparams= (ConvLayers_LR_Params*) params;
9+
10+
if(padding == -1){
11+
padding = kernel_size >> 1;
12+
}
13+
float* tempW = (float*)malloc(out_channels * in_channels * kernel_size * sizeof(float)) ;
14+
prepareLowRankConvMat(tempW, tparams->W1, tparams->W2, tparams->rank, out_channels, in_channels * kernel_size)
15+
16+
// Perform the Convolution
17+
// input.shape = [N, in_T, in_channels]
18+
// output.shape = [N, out_T, out_channels]
19+
// filter.shape = [out_channels, in_channels, kernel_size]
20+
for(int t = 0; t < out_T; t++){
21+
if (normalize) {
22+
for (unsigned d = 0; d < in_channels; d++)
23+
input_signal[t * in_channels + d] = ((input_signal[t * in_channels + d] - tparams->mean[d]) / tparams->stdDev[d]);
24+
// v_add(1.0f, input_signal + t * in_channels, -1.0f, tparams->mean,
25+
// in_channels, tbuffers->normFeatures);
26+
// v_div(tparams->stdDev + t * in_channels, tbuffers->normFeatures, in_channels,
27+
// tbuffers->normFeatures);
28+
}
29+
30+
for(int co = 0; co < out_channels ; co++){
31+
float sum = 0;
32+
for(int tf = 0 ; tf < kernel_size ; tf++ ){
33+
for(int ci = 0 ; ci < in_channels ; ci++){
34+
float a = ((((t + tf) < padding) || ((t + tf) >= (in_T + padding))) ? 0 : input_signal[((tf + t) - padding) * in_channels + ci]);
35+
sum += (a * tempW[co * in_channels * kernel_size + ci * kernel_size + tf]);
36+
}
37+
}
38+
if(activations == 1){
39+
output_signal[n * out_channels * out_T + t * out_channels + co] = sigmoid(sum + tparams->B[co]);
40+
}
41+
else if(activations == 2){
42+
output_signal[n * out_channels * out_T + t * out_channels + co] = tanh(sum + tparams->B[co]);
43+
}
44+
else if(activations == 3){
45+
output_signal[n * out_channels * out_T + t * out_channels + co] = relu(sum + tparams->B[co]);
46+
}
47+
else{
48+
output_signal[n * out_channels * out_T + t * out_channels + co] = sum + tparams->B[co];
49+
}
50+
}
51+
}
52+
free(tempW)
53+
return 0;
54+
}
55+
56+
57+
int Conv1D_Depth_LR(float *output_signal, const float *input_signal,
58+
unsigned N, unsigned in_T, unsigned in_channels, int padding, unsigned kernel_size,
59+
const void* params,int normalize, int activations){
60+
61+
const ConvLayers_LR_Params* tparams= (ConvLayers_LR_Params*) params;
62+
63+
if (tempW == 0) return ERR_TEMPW_NOT_INIT;
64+
65+
if(padding == -1){
66+
padding = kernel_size >> 1;
67+
}
68+
69+
float* tempW = (float*)malloc(out_channels * in_channels * kernel_size * sizeof(float)) ;
70+
prepareLowRankConvMat(tempW, tparams->W1, tparams->W2, tparams->rank, in_channels, in_channels * kernel_size)
71+
// Perform the Convolution
72+
// input.shape = [N, in_T, in_channels]
73+
// output.shape = [N, out_T, in_channels]
74+
// filter.shape = [(out)in_channels, in_channels, kernel_size]
75+
for(int t = 0; t < out_T; t++){
76+
if (normalize) {
77+
for (unsigned d = 0; d < in_channels; d++)
78+
input_signal[t * in_channels + d] = ((input_signal[t * in_channels + d] - tparams->mean[d]) / tparams->stdDev[d]);
79+
}
80+
for(int ci = 0; ci < in_channels ; ci++){
81+
float sum = 0;
82+
for(int tf = 0 ; tf < kernel_size ; tf++ ){
83+
float a = ((((t + tf) < padding) || ((t + tf) >= (in_T + padding))) ? 0 : input_signal[((tf + t) - padding) * in_channels + ci]);
84+
sum += (a * tempW[ci * in_channels * kernel_size + ci * kernel_size + tf]);
85+
}
86+
if(activations == 1){
87+
output_signal[t * in_channels + ci] = sigmoid(sum + tparams->B[ci]);
88+
}
89+
else if(activations == 2){
90+
output_signal[t * in_channels + ci] = tanh(sum + tparams->B[ci]);
91+
}
92+
else if(activations == 3){
93+
output_signal[t * in_channels + ci] = relu(sum + tparams->B[ci]);
94+
}
95+
else{
96+
output_signal[t * in_channels + ci] = sum + tparams->B[ci];
97+
}
98+
}
99+
}
100+
free(tempW)
101+
return 0;
102+
}
103+
104+
105+
106+
int Conv1D(float *output_signal, unsigned out_channels, const float *input_signal,
107+
unsigned N, unsigned in_T, unsigned in_channels, int padding, unsigned kernel_size,
108+
const void* params,int normalize, int activations){
109+
110+
const ConvLayers_Params* tparams= (ConvLayers_Params*) params;
111+
112+
if(padding == -1){
113+
padding = kernel_size >> 1;
114+
}
115+
116+
// Perform the Convolution
117+
// input.shape = [N, in_T, in_channels]
118+
// output.shape = [N, out_T, out_channels]
119+
// filter.shape = [out_channels, in_channels, kernel_size]
120+
for(int t = 0; t < out_T; t++){
121+
if (normalize) {
122+
for (unsigned d = 0; d < in_channels; d++)
123+
input_signal[t * in_channels + d] = ((input_signal[t * in_channels + d] - tparams->mean[d]) / tparams->stdDev[d]);
124+
}
125+
126+
for(int co = 0; co < out_channels ; co++){
127+
float sum = 0;
128+
for(int tf = 0 ; tf < kernel_size ; tf++ ){
129+
for(int ci = 0 ; ci < in_channels ; ci++){
130+
float a = ((((t + tf) < padding) || ((t + tf) >= (in_T + padding))) ? continue : input_signal[((tf + t) - padding) * in_channels + ci]);
131+
sum += (a * tparams->W[co * in_channels * kernel_size + ci * kernel_size + tf]);
132+
}
133+
}
134+
if(activations == 1){
135+
output_signal[n * out_channels * out_T + t * out_channels + co] = sigmoid(sum + tparams->B[co]);
136+
}
137+
else if(activations == 2){
138+
output_signal[n * out_channels * out_T + t * out_channels + co] = tanh(sum + tparams->B[co]);
139+
}
140+
else if(activations == 3){
141+
output_signal[n * out_channels * out_T + t * out_channels + co] = relu(sum + tparams->B[co]);
142+
}
143+
else{
144+
output_signal[n * out_channels * out_T + t * out_channels + co] = sum + tparams->B[co];
145+
}
146+
}
147+
}
148+
return 0;
149+
}
150+
151+
int Conv1D_Depth(float *output_signal, const float *input_signal,
152+
unsigned N, unsigned in_T, unsigned in_channels, int padding, unsigned kernel_size,
153+
const void* params,int normalize, int activations){
154+
155+
const ConvLayers_LR_Params* tparams= (ConvLayers_LR_Params*) params;
156+
157+
if(padding == -1){
158+
padding = kernel_size >> 1;
159+
}
160+
161+
// Perform the Convolution
162+
// input.shape = [N, in_T, in_channels]
163+
// output.shape = [N, out_T, in_channels]
164+
// filter.shape = [(out)in_channels, in_channels, kernel_size]
165+
for(int t = 0; t < out_T; t++){
166+
if (normalize) {
167+
for (unsigned d = 0; d < in_channels; d++)
168+
input_signal[t * in_channels + d] = ((input_signal[t * in_channels + d] - tparams->mean[d]) / tparams->stdDev[d]);
169+
}
170+
for(int ci = 0; ci < in_channels ; ci++){
171+
float sum = 0;
172+
for(int tf = 0 ; tf < kernel_size ; tf++ ){
173+
float a = ((((t + tf) < padding) || ((t + tf) >= (in_T + padding))) ? 0 : input_signal[((tf + t) - padding) * in_channels + ci]);
174+
sum += (a * tparams->W[ci * in_channels * kernel_size + ci * kernel_size + tf]);
175+
}
176+
if(activations == 1){
177+
output_signal[t * in_channels + ci] = sigmoid(sum + tparams->B[ci]);
178+
}
179+
else if(activations == 2){
180+
output_signal[t * in_channels + ci] = tanh(sum + tparams->B[ci]);
181+
}
182+
else if(activations == 3){
183+
output_signal[t * in_channels + ci] = relu(sum + tparams->B[ci]);
184+
}
185+
else{
186+
output_signal[t * in_channels + ci] = sum + tparams->B[ci];
187+
}
188+
}
189+
}
190+
return 0;
191+
}
192+
193+
int AvgPool1D(float *output_signal, unsigned out_T, const float *input_signal, unsigned N, unsigned in_T, unsigned in_channels,
194+
int padding, unsigned kernel_size, int activations){
195+
196+
if(padding == -1){
197+
padding = kernel_size >> 1;
198+
}
199+
200+
for(int t = 0; t < out_T; t++){
201+
for(int ci = 0 ; ci < in_channels){
202+
float sum = 0;
203+
for(int tf = ; tf < kernel_size ; tf++){
204+
sum += ((((t + tf) < padding) || ((t + tf) >= (in_T + padding))) ? 0 : input_signal[((tf + t) - padding) * in_channels + ci]);
205+
}
206+
if(activations == 1){
207+
output_signal[t * in_channels + ci] = sigmoid(sum);
208+
}
209+
else if(activations == 2){
210+
output_signal[t * in_channels + ci] = tanh(sum);
211+
}
212+
else if(activations == 3){
213+
output_signal[t * in_channels + ci] = relu(sum);
214+
}
215+
else{
216+
output_signal[t * in_channels + ci] = sum;
217+
}
218+
219+
}
220+
}
221+
return 0;
222+
}

0 commit comments

Comments
 (0)