@@ -10,7 +10,7 @@ for (front_name, backend) in (
10
10
@timeit_debug to function $ (Symbol (" $(front_name)$(backend) !" ))(
11
11
out:: Array{T1,4} , in1:: Array{T2,4} , in2:: Array{T3,4} ,
12
12
cdims:: ConvDims ; kwargs... ) where {T1, T2, T3}
13
- @warn " Automatically converting $( size (in1)) input tensor to Float32" maxlog= 1
13
+ @warn " Automatically converting input tensor to Float32. This will have performance implications " maxlog= 1
14
14
# Output must of the same type as in the function signature
15
15
T1 .($ (Symbol (" $(front_name)$(backend) !" ))(Float32 .(out), Float32 .(in1),
16
16
Float32 .(in2), cdims; kwargs... ))
20
20
21
21
22
22
function conv_nnpack (x:: Array{T1, 4} , w:: Array{T2, 4} , cdims:: ConvDims ; kwargs... ) where {T1, T2}
23
- y = similar (x, output_size (cdims), channels_out (cdims), size (x, 4 ))
23
+ y = similar (x, output_size (cdims)... , channels_out (cdims), size (x, 4 ))
24
24
return conv_nnpack! (y, x, w, cdims; kwargs... )
25
25
end
26
26
27
27
28
28
function ∇conv_data (dy:: Array{T1, 4} , w:: Array{T2, 4} , cdims:: ConvDims ; kwargs... ) where {T1, T2}
29
- dx = similar (dy, input_size (cdims), channels_in (cdims), size (dy, 4 ))
29
+ dx = similar (dy, input_size (cdims)... , channels_in (cdims), size (dy, 4 ))
30
30
return ∇conv_data! (dx, dy, w, cdims; kwargs... )
31
31
end
32
32
33
33
34
34
function ∇conv_filter (x:: Array{T1, 4} , dy:: Array{T2, 4} , cdims:: ConvDims ; kwargs... ) where {T1, T2}
35
- dw = similar (x, kernel_size (cdims), channels_in (cdims), channels_out (cdims))
35
+ dw = similar (x, kernel_size (cdims)... , channels_in (cdims), channels_out (cdims))
36
36
return ∇conv_filter! (dw, x, dy, cdims; kwargs... )
37
37
end
38
38
39
39
40
40
function maxpool_nnpack! (y:: Array{T1, 4} , x:: Array{T2, 4} , pdims:: PoolDims ;
41
41
kwargs... ) where {T1, T2}
42
- @warn " Automatically converting $( size (x)) input tensor to Float32" maxlog= 1
42
+ @warn " Automatically converting input tensor to Float32. This will have performance implications " maxlog= 1
43
43
# We want the output to be of the same type as desired
44
44
T1 .(maxpool_nnpack! (Float32 .(y), Float32 .(x), pdims; kwargs... ))
45
45
end
@@ -49,3 +49,26 @@ function maxpool_nnpack(x::Array{T, 4}, pdims::PoolDims; kwargs...) where {T}
49
49
y = similar (x, output_size (pdims)... , channels_out (pdims), size (x, 4 ))
50
50
return maxpool_nnpack! (y, x, pdims; kwargs... )
51
51
end
52
+
53
+
54
+ """
55
+ check_supported_operation(x::Array, cdims::DenseConvDims)
56
+
57
+ Returns `true` if nnpack supports the convolution operation for the given input.
58
+ """
59
+ function check_supported_operation (x:: Array{T, 4} , cdims:: DenseConvDims {2 , K, C_in,
60
+ C_out, S, P, (1 , 1 ), F}) where {T, K, C_in, C_out, S, P, F}
61
+ val = size (x)[1 : 2 ] .+ (P[1 ] + P[2 ], P[3 ] + P[4 ]) .- K
62
+ return val .% S == (0 , 0 ) ? true : false
63
+ end
64
+
65
+
66
+ """
67
+ check_supported_operation(x::Array, pdims::PoolDims)
68
+
69
+ Returns `true` if nnpack supports the pooling operation for the given input.
70
+ """
71
+ function check_supported_operation (x:: Array{T, 4} , pdims:: PoolDims{2, K, S, P, (1, 1)} ) where {T, K, S, P}
72
+ val = size (x)[1 : 2 ] .+ (P[1 ] + P[2 ], P[3 ] + P[4 ]) .- K
73
+ return val .% S == (0 , 0 ) ? true : false
74
+ end
0 commit comments