1
+ #include "conv1d.h"
2
+ #include "conv_utils.h"
3
+ #include <stdlib.h>
4
+ #include <math.h>
5
+ #include <stdio.h>
6
+
7
+ int Conv1D_LR (float * output_signal , unsigned out_T , unsigned out_channels , const float * input_signal ,
8
+ unsigned in_T , unsigned in_channels , int padding , unsigned kernel_size ,
9
+ const void * params , int activations ){
10
+
11
+ const ConvLayers_LR_Params * tparams = (ConvLayers_LR_Params * ) params ;
12
+
13
+ if (padding == -1 ){
14
+ padding = kernel_size >> 1 ;
15
+ }
16
+
17
+ float * tempW = (float * )malloc (out_channels * in_channels * kernel_size * sizeof (float )) ;
18
+ prepareLowRankConvMat (tempW , tparams -> W1 , tparams -> W2 , tparams -> rank , out_channels , in_channels * kernel_size );
19
+ // Perform the Convolution
20
+ // input.shape = [in_T, in_channels]
21
+ // output.shape = [out_T, out_channels]
22
+ // filter.shape = [out_channels, in_channels, kernel_size]
23
+ for (int t = 0 ; t < out_T ; t ++ ){
24
+ for (int co = 0 ; co < out_channels ; co ++ ){
25
+ float sum = 0 ;
26
+ for (int tf = 0 ; tf < kernel_size ; tf ++ ){
27
+ for (int ci = 0 ; ci < in_channels ; ci ++ ){
28
+ if (((t + tf ) < padding ) || ((t + tf ) >= (in_T + padding )))
29
+ continue ;
30
+ else
31
+ sum += (input_signal [((tf + t ) - padding ) * in_channels + ci ] * tempW [co * in_channels * kernel_size + ci * kernel_size + tf ]);
32
+ }
33
+ }
34
+ if (activations == 1 ){
35
+ output_signal [t * out_channels + co ] = sigmoid (sum + tparams -> B [co ]);
36
+ }
37
+ else if (activations == 2 ){
38
+ output_signal [t * out_channels + co ] = tanh (sum + tparams -> B [co ]);
39
+ }
40
+ else if (activations == 3 ){
41
+ output_signal [t * out_channels + co ] = relu (sum + tparams -> B [co ]);
42
+ }
43
+ else {
44
+ output_signal [t * out_channels + co ] = sum + tparams -> B [co ];
45
+ }
46
+ }
47
+ }
48
+ free (tempW );
49
+ return 0 ;
50
+ }
51
+
52
+ int Conv1D_Depth_LR (float * output_signal , unsigned out_T , const float * input_signal ,
53
+ unsigned in_T , unsigned in_channels , int padding , unsigned kernel_size ,
54
+ const void * params , int activations ){
55
+
56
+ const ConvLayers_LR_Params * tparams = (ConvLayers_LR_Params * ) params ;
57
+
58
+ if (padding == -1 ){
59
+ padding = kernel_size >> 1 ;
60
+ }
61
+
62
+ float * tempW = (float * )malloc (in_channels * kernel_size * sizeof (float )) ;
63
+ prepareLowRankConvMat (tempW , tparams -> W1 , tparams -> W2 , tparams -> rank , in_channels , kernel_size );
64
+ // Perform the Convolution
65
+ // input.shape = [N, in_T, in_channels]
66
+ // output.shape = [N, out_T, in_channels]
67
+ // filter.shape = [(out)in_channels, in_channels, kernel_size]
68
+ for (int t = 0 ; t < out_T ; t ++ ){
69
+ for (int ci = 0 ; ci < in_channels ; ci ++ ){
70
+ float sum = 0 ;
71
+ for (int tf = 0 ; tf < kernel_size ; tf ++ ){
72
+ if (((t + tf ) < padding ) || ((t + tf ) >= (in_T + padding )))
73
+ continue ;
74
+ else
75
+ sum += (input_signal [((tf + t ) - padding ) * in_channels + ci ] * tempW [ci * kernel_size + tf ]);
76
+ }
77
+ if (activations == 1 ){
78
+ output_signal [t * in_channels + ci ] = sigmoid (sum + tparams -> B [ci ]);
79
+ }
80
+ else if (activations == 2 ){
81
+ output_signal [t * in_channels + ci ] = tanh (sum + tparams -> B [ci ]);
82
+ }
83
+ else if (activations == 3 ){
84
+ output_signal [t * in_channels + ci ] = relu (sum + tparams -> B [ci ]);
85
+ }
86
+ else {
87
+ output_signal [t * in_channels + ci ] = sum + tparams -> B [ci ];
88
+ }
89
+ }
90
+ }
91
+ free (tempW );
92
+ return 0 ;
93
+ }
94
+
95
+
96
+
97
+ int Conv1D (float * output_signal , unsigned out_T , unsigned out_channels , const float * input_signal ,
98
+ unsigned in_T , unsigned in_channels , int padding , unsigned kernel_size ,
99
+ const void * params , int activations ){
100
+
101
+ const ConvLayers_Params * tparams = (ConvLayers_Params * ) params ;
102
+
103
+ if (padding == -1 ){
104
+ padding = kernel_size >> 1 ;
105
+ }
106
+ float sum ;
107
+ // Perform the Convolution
108
+ // input.shape = [N, in_T, in_channels]
109
+ // output.shape = [N, out_T, out_channels]
110
+ // filter.shape = [out_channels, in_channels, kernel_size]
111
+ for (int t = 0 ; t < out_T ; t ++ ){
112
+ for (int co = 0 ; co < out_channels ; co ++ ){
113
+ sum = 0 ;
114
+ for (int tf = 0 ; tf < kernel_size ; tf ++ ){
115
+ for (int ci = 0 ; ci < in_channels ; ci ++ ){
116
+ if (((t + tf ) < padding ) || ((t + tf ) >= (in_T + padding )))
117
+ continue ;
118
+ else
119
+ sum += (input_signal [((tf + t ) - padding ) * in_channels + ci ] * tparams -> W [co * in_channels * kernel_size + ci * kernel_size + tf ]);
120
+ }
121
+ }
122
+ if (activations == 1 ){
123
+ output_signal [t * out_channels + co ] = sigmoid (sum + tparams -> B [co ]);
124
+ }
125
+ else if (activations == 2 ){
126
+ output_signal [t * out_channels + co ] = tanh (sum + tparams -> B [co ]);
127
+ }
128
+ else if (activations == 3 ){
129
+ output_signal [t * out_channels + co ] = relu (sum + tparams -> B [co ]);
130
+ }
131
+ else {
132
+ output_signal [t * out_channels + co ] = sum + tparams -> B [co ];
133
+ }
134
+ }
135
+ }
136
+ return 0 ;
137
+ }
138
+
139
+ int Conv1D_Depth (float * output_signal , unsigned out_T , const float * input_signal ,
140
+ unsigned in_T , unsigned in_channels , int padding , unsigned kernel_size ,
141
+ const void * params , int activations ){
142
+
143
+ const ConvLayers_Params * tparams = (ConvLayers_Params * ) params ;
144
+
145
+ if (padding == -1 ){
146
+ padding = kernel_size >> 1 ;
147
+ }
148
+
149
+ // Perform the Convolution
150
+ // input.shape = [N, in_T, in_channels]
151
+ // output.shape = [N, out_T, in_channels]
152
+ // filter.shape = [(out)in_channels, in_channels, kernel_size]
153
+ for (int t = 0 ; t < out_T ; t ++ ){
154
+ for (int ci = 0 ; ci < in_channels ; ci ++ ){
155
+ float sum = 0 ;
156
+ for (int tf = 0 ; tf < kernel_size ; tf ++ ){
157
+ if (((t + tf ) < padding ) || ((t + tf ) >= (in_T + padding )))
158
+ continue ;
159
+ else
160
+ sum += (input_signal [((tf + t ) - padding ) * in_channels + ci ] * tparams -> W [ci * kernel_size + tf ]);
161
+ }
162
+ if (activations == 1 ){
163
+ output_signal [t * in_channels + ci ] = sigmoid (sum + tparams -> B [ci ]);
164
+ }
165
+ else if (activations == 2 ){
166
+ output_signal [t * in_channels + ci ] = tanh (sum + tparams -> B [ci ]);
167
+ }
168
+ else if (activations == 3 ){
169
+ output_signal [t * in_channels + ci ] = relu (sum + tparams -> B [ci ]);
170
+ }
171
+ else {
172
+ output_signal [t * in_channels + ci ] = sum + tparams -> B [ci ];
173
+ }
174
+ }
175
+ }
176
+ return 0 ;
177
+ }
178
+
179
+ int AvgPool1D (float * output_signal , unsigned out_T , const float * input_signal , unsigned in_T , unsigned in_channels ,
180
+ int padding , unsigned kernel_size , int activations ){
181
+
182
+ if (padding == -1 ){
183
+ padding = kernel_size >> 1 ;
184
+ }
185
+
186
+ for (int t = 0 ; t < out_T ; t ++ ){
187
+ for (int ci = 0 ; ci < in_channels ; ci ++ ){
188
+ float sum = 0 ;
189
+ for (int tf = 0 ; tf < kernel_size ; tf ++ ){
190
+ if (((t + tf ) < padding ) || ((t + tf ) >= (in_T + padding )))
191
+ continue ;
192
+ else
193
+ sum += (input_signal [((tf + t ) - padding ) * in_channels + ci ]);
194
+ }
195
+ if (activations == 1 ){
196
+ output_signal [t * in_channels + ci ] = sigmoid (sum /(float )kernel_size );
197
+ }
198
+ else if (activations == 2 ){
199
+ output_signal [t * in_channels + ci ] = tanh (sum /(float )kernel_size );
200
+ }
201
+ else if (activations == 3 ){
202
+ output_signal [t * in_channels + ci ] = relu (sum /(float )kernel_size );
203
+ }
204
+ else {
205
+ output_signal [t * in_channels + ci ] = sum /(float )kernel_size ;
206
+ }
207
+
208
+ }
209
+ }
210
+ return 0 ;
211
+ }
0 commit comments