Skip to content

Commit 480754a

Browse files
Merge pull request #153 from FluxML/sf/nnpack_fix
Fix asymmetric padding arguments to NNPACK's `conv()`
2 parents 9cfd08a + 9df3631 commit 480754a

File tree

7 files changed

+26
-15
lines changed

7 files changed

+26
-15
lines changed

src/conv.jl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -159,7 +159,7 @@ end
159159
if is_nnpack_available()
160160
function conv(x::Array{xT, 4}, w::Array{wT, 4},
161161
cdims::DenseConvDims{2, K, C_in, C_out, (1, 1), P, (1, 1), F};
162-
kwargs...) where {xT, wT, K, C_in, C_out, S, P, F}
162+
kwargs...) where {xT, wT, K, C_in, C_out, P, F}
163163
return conv_nnpack(x, w, cdims; kwargs...)
164164
end
165165
end

src/impl/depthwiseconv_direct.jl

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ See the docstring for `conv_direct!()` for more on the optional parameters.
2020
"""
2121
function depthwiseconv_direct!(y::AbstractArray{yT,5}, x::AbstractArray{xT,5},
2222
w::AbstractArray{wT,5}, cdims::DepthwiseConvDims;
23-
alpha::yT = yT(1), beta = false) where {yT, xT, wT}
23+
alpha::yT=yT(1), beta=false) where {yT, xT, wT}
2424
check_dims(size(x), size(w), size(y), cdims)
2525

2626
width, height, depth = input_size(cdims)
@@ -135,7 +135,7 @@ for each batch and channel independently.
135135
function ∇depthwiseconv_data_direct!(
136136
dx::AbstractArray{xT,5}, dy::AbstractArray{yT,5},
137137
w::AbstractArray{wT,5}, cdims::DepthwiseConvDims;
138-
alpha::xT=xT(1), beta::xT=xT(0)) where {xT, yT, wT}
138+
alpha::xT=xT(1), beta=false) where {xT, yT, wT}
139139
# We do a separate convolution for each channel in x
140140
@inbounds for cidx in 1:channels_in(cdims)
141141
# For this batch and in-channel, we have a normal transposed convolution
@@ -168,7 +168,7 @@ Calculate the gradient imposed upon `w` in the depthwise convolution `y = x * w`
168168
function ∇depthwiseconv_filter_direct!(
169169
dw::AbstractArray{wT,5}, x::AbstractArray{xT,5},
170170
dy::AbstractArray{yT,5}, cdims::DepthwiseConvDims;
171-
alpha::wT=wT(1),beta::wT=wT(0)) where {xT, yT, wT}
171+
alpha::wT=wT(1),beta=false) where {xT, yT, wT}
172172
# We do a separate convolution for each channel in x
173173
@inbounds for cidx in 1:channels_in(cdims)
174174
# For this batch and in-channel, we have a normal transposed convolution

src/impl/depthwiseconv_im2col.jl

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ function depthwiseconv_im2col!(
1414
y::AbstractArray{T,5}, x::AbstractArray{T,5},
1515
w::AbstractArray{T,5}, cdims::DepthwiseConvDims;
1616
col::AbstractArray{T,2} = similar(x, im2col_dims(cdims)),
17-
alpha=T(1), beta=T(0)) where T
17+
alpha::T=T(1), beta::T=T(0)) where T
1818
check_dims(size(x), size(w), size(y), cdims)
1919

2020
# This functions exactly the same as conv_im2col!(), except that we shard the
@@ -56,7 +56,7 @@ function ∇depthwiseconv_filter_im2col!(
5656
dw::AbstractArray{T,5}, x::AbstractArray{T,5},
5757
dy::AbstractArray{T,5}, cdims::DepthwiseConvDims;
5858
col::AbstractArray{T,2} = similar(dw, im2col_dims(cdims)),
59-
alpha=T(1), beta=T(0)) where T
59+
alpha::T=T(1), beta::T=T(0)) where T
6060
check_dims(size(x), size(dw), size(dy), cdims)
6161

6262
M = prod(kernel_size(cdims))
@@ -96,7 +96,7 @@ function ∇depthwiseconv_data_im2col!(
9696
dx::AbstractArray{T,5}, dy::AbstractArray{T,5},
9797
w::AbstractArray{T,5}, cdims::DepthwiseConvDims;
9898
col::AbstractArray{T,2} = similar(dx, im2col_dims(cdims)),
99-
alpha=T(1), beta=T(0)) where T
99+
alpha::T=T(1), beta::T=T(0)) where T
100100
check_dims(size(dx), size(w), size(dy), cdims)
101101

102102
M = prod(output_size(cdims))

src/nnpack/interface.jl

Lines changed: 12 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -52,11 +52,19 @@ end
5252

5353

5454
"""
55-
check_supported_operation(x::Array, pdims::PoolDims)
55+
nnpack_supported_operation(cdims::ConvDims)
56+
nnpack_supported_operation(pdims::PoolDims)
5657
57-
Returns `true` if nnpack supports the pooling operation for the given input.
58+
Returns `true` if nnpack supports the convolution/pooling operation for the given parameters.
5859
"""
59-
function check_supported_operation(x::Array{T, 4}, pdims::PoolDims{2, K, S, P, (1, 1)}) where {T, K, S, P}
60-
val = size(x)[1:2] .+ (P[1] + P[2], P[3] + P[4]) .- K
60+
function nnpack_supported_operation(pdims::PoolDims{2, K, S, P, (1, 1)}) where {K, S, P}
61+
val = input_size(pdims)[1:2] .+ (P[1] + P[2], P[3] + P[4]) .- K
6162
return val .% S == (0, 0) ? true : false
6263
end
64+
65+
function nnpack_supported_operation(cdims::ConvDims{2, K, (1, 1), P, (1, 1)}) where {K, S, P}
66+
return true
67+
end
68+
69+
# Return false for everything else
70+
nnpack_supported_operation(dims) = false

src/nnpack/libnnpack.jl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -127,7 +127,7 @@ end
127127
function nnp_convolution_output(y::Array{Float32,4}, x::Array{Float32,4}, w::Array{Float32,4}, b::Array{Float32,1}; algo::nnp_convolution_algorithm = UInt32(0), workspace_buffer = nothing, workspace_size = 0, padding = 0, stride = 1, threadpool = C_NULL, profile = nothing)
128128
input_size = nnp_size(Csize_t.((size(x,1), size(x,2)))...)
129129
kernel_size = nnp_size(Csize_t.((size(w,1),size(w,2)))...)
130-
input_padding = nnp_padding(Csize_t(padding[2]), Csize_t(padding[1]), Csize_t(padding[2]), Csize_t(padding[1]))
130+
input_padding = nnp_padding(Csize_t(padding[3]), Csize_t(padding[2]), Csize_t(padding[4]), Csize_t(padding[1]))
131131
profile = profile == nothing ? nnp_profile() : profile
132132
workspace_buffer = workspace_buffer === nothing ? C_NULL : workspace_buffer
133133
nnp_convolution_output(UInt32(algo), size(x,4), size(x,3), size(w,4), input_size, input_padding, kernel_size, x, w, b, y, workspace_buffer, workspace_size, UInt32(0), C_NULL, threadpool, profile)

src/pooling.jl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -132,7 +132,7 @@ end
132132
# Use NNPACK if it is available and operation is supported
133133
if is_nnpack_available()
134134
function maxpool(x::Array{T, 4}, pdims::PoolDims{2, K, S, P, (1, 1)}; kwargs...) where {T, K, S, P}
135-
func = check_supported_operation(x, pdims) ? maxpool_nnpack : maxpool_direct
135+
func = nnpack_supported_operation(pdims) ? maxpool_nnpack : maxpool_direct
136136
return func(x, pdims; kwargs...)
137137
end
138138
end

test/conv.jl

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -274,10 +274,13 @@ conv_answer_dict = Dict(
274274
# A "drop channels and batch dimension" helper
275275
ddims(x) = dropdims(x, dims=(rank+1, rank+2))
276276

277-
for conv in (NNlib.conv, NNlib.conv_im2col, NNlib.conv_direct)
277+
for conv in (NNlib.conv, NNlib.conv_im2col, NNlib.conv_direct, NNlib.conv_nnpack)
278+
if conv == NNlib.conv_nnpack && !NNlib.nnpack_supported_operation(DenseConvDims(x, w))
279+
continue
280+
end
278281
@testset "$(conv)" begin
279-
# First, your basic convolution with no parameters
280282
cdims = DenseConvDims(x, w)
283+
# First, your basic convolution with no parameters
281284
@test isapprox(ddims(conv(x, w, cdims)), y_plain, rtol = 1.0e-7)
282285

283286
# Next, test convolution on views and alternate datatypes:

0 commit comments

Comments
 (0)