Skip to content

Commit 3847a8c

Browse files
committed
Remove _old!() methods
1 parent 6fd47e0 commit 3847a8c

File tree

2 files changed

+0
-127
lines changed

2 files changed

+0
-127
lines changed

src/impl/conv_direct.jl

Lines changed: 0 additions & 62 deletions
Original file line numberDiff line numberDiff line change
@@ -45,68 +45,6 @@ wrapper methods are available.
4545
"""
4646
conv_direct!
4747

48-
function conv_direct_old!(y::AbstractArray{yT,5}, x::AbstractArray{xT,5},
49-
w::AbstractArray{wT,5}, cdims::DenseConvDims;
50-
alpha::yT = yT(1), beta = false) where {yT, xT, wT}
51-
check_dims(size(x), size(w), size(y), cdims)
52-
53-
width, height, depth = input_size(cdims)
54-
kernel_w, kernel_h, kernel_d = kernel_size(cdims)
55-
out_c = channels_out(cdims)
56-
pad_w_lo, pad_w_hi, pad_h_lo, pad_h_hi, pad_d_lo, pad_d_hi = padding(cdims)
57-
dil_w, dil_h, dil_d = dilation(cdims)
58-
stride_w, stride_h, stride_d = stride(cdims)
59-
out_width, out_height, out_depth = output_size(cdims)
60-
61-
# If we're doing crosscorr instead of conv, then don't bother to flip `w`
62-
if !flipkernel(cdims)
63-
w = w[end:-1:1, end:-1:1, end:-1:1, :, :]
64-
end
65-
66-
# A helper function to project from output (w, h) to input (input_w, input_h)
67-
@inline project(idx, stride, pad) = (idx - 1)*stride - pad + 1
68-
69-
# explicit formulation of convolution. Oh hoisting gods, hear my plea.
70-
@inbounds for batch in 1:size(x)[end],
71-
c_out in 1:out_c,
72-
d_idx in 1:out_depth,
73-
h_idx in 1:out_height,
74-
w_idx in 1:out_width
75-
76-
# Starting points of the window of x we're going to grab
77-
x_w = project(w_idx, stride_w, pad_w_lo)
78-
x_h = project(h_idx, stride_h, pad_h_lo)
79-
x_d = project(d_idx, stride_d, pad_d_lo)
80-
81-
# Grow that starting point into ranges
82-
x_widxs = x_w .+ (0:dil_w:(dil_w*kernel_w-1))
83-
x_hidxs = x_h .+ (0:dil_h:(dil_h*kernel_h-1))
84-
x_didxs = x_d .+ (0:dil_d:(dil_d*kernel_d-1))
85-
w_widxs = 1:kernel_w
86-
w_hidxs = 1:kernel_h
87-
w_didxs = 1:kernel_d
88-
89-
# Clamp the ranges to simulate padding
90-
x_widxs, w_widxs = clamp_lo(x_widxs, w_widxs)
91-
x_widxs, w_widxs = clamp_hi(x_widxs, w_widxs, width)
92-
x_hidxs, w_hidxs = clamp_lo(x_hidxs, w_hidxs)
93-
x_hidxs, w_hidxs = clamp_hi(x_hidxs, w_hidxs, height)
94-
x_didxs, w_didxs = clamp_lo(x_didxs, w_didxs)
95-
x_didxs, w_didxs = clamp_hi(x_didxs, w_didxs, depth)
96-
97-
# Grab our slices
98-
x_slice = view(x, x_widxs, x_hidxs, x_didxs, :, batch)
99-
w_slice = view(w, w_widxs, w_hidxs, w_didxs, :, c_out)
100-
101-
# Do the dotproduct dance, then weight by alpha/beta and git 'er done
102-
dotprod = sum(x_slice .* w_slice)
103-
y[w_idx, h_idx, d_idx, c_out, batch] = alpha*convert(yT, dotprod) +
104-
beta*y[w_idx, h_idx, d_idx, c_out, batch]
105-
end
106-
107-
return y
108-
end
109-
11048
function conv_direct!(y::AbstractArray{yT,5}, x::AbstractArray{xT,5},
11149
w::AbstractArray{wT,5}, cdims::DenseConvDims;
11250
alpha::yT = yT(1), beta = false) where {yT, xT, wT}

src/impl/depthwiseconv_direct.jl

Lines changed: 0 additions & 65 deletions
Original file line numberDiff line numberDiff line change
@@ -18,71 +18,6 @@ channels in `x` is the last, not the second-to-last, as in a normal dense convol
1818
1919
See the docstring for `conv_direct!()` for more on the optional parameters.
2020
"""
21-
function depthwiseconv_direct_old!(
22-
y::AbstractArray{yT,5}, x::AbstractArray{xT,5},
23-
w::AbstractArray{wT,5}, cdims::DepthwiseConvDims;
24-
alpha::yT = yT(1), beta::yT = yT(0)) where {yT, xT, wT}
25-
check_dims(size(x), size(w), size(y), cdims)
26-
27-
width, height, depth = input_size(cdims)
28-
kernel_w, kernel_h, kernel_d = kernel_size(cdims)
29-
out_c = channels_out(cdims)
30-
pad_w_lo, pad_w_hi, pad_h_lo, pad_h_hi, pad_d_lo, pad_d_hi = padding(cdims)
31-
dil_w, dil_h, dil_d = dilation(cdims)
32-
stride_w, stride_h, stride_d = stride(cdims)
33-
out_width, out_height, out_depth = output_size(cdims)
34-
35-
# If we're doing crosscorr instead of conv, then don't bother to flip `w`
36-
if !flipkernel(cdims)
37-
w = w[end:-1:1, end:-1:1, end:-1:1, :, :]
38-
end
39-
40-
# A helper function to project from output (w, h) to input (input_w, input_h)
41-
@inline project(idx, stride, pad) = (idx - 1)*stride - pad + 1
42-
43-
# explicit formulation of convolution. Oh hoisting gods, hear my plea.
44-
@inbounds for batch in 1:size(x)[end],
45-
c_mult in 1:channel_multiplier(cdims),
46-
c_in in 1:channels_in(cdims),
47-
h_idx in 1:out_height,
48-
w_idx in 1:out_width,
49-
d_idx in 1:out_depth
50-
51-
# Starting points of the window of x we're going to grab
52-
x_w = project(w_idx, stride_w, pad_w_lo)
53-
x_h = project(h_idx, stride_h, pad_h_lo)
54-
x_d = project(d_idx, stride_d, pad_d_lo)
55-
56-
# Grow that starting point into ranges
57-
x_widxs = x_w .+ (0:dil_w:(dil_w*kernel_w-1))
58-
x_hidxs = x_h .+ (0:dil_h:(dil_h*kernel_h-1))
59-
x_didxs = x_d .+ (0:dil_d:(dil_d*kernel_d-1))
60-
w_widxs = 1:kernel_w
61-
w_hidxs = 1:kernel_h
62-
w_didxs = 1:kernel_d
63-
64-
# Clamp the ranges to simulate padding
65-
x_widxs, w_widxs = clamp_lo(x_widxs, w_widxs)
66-
x_widxs, w_widxs = clamp_hi(x_widxs, w_widxs, width)
67-
x_hidxs, w_hidxs = clamp_lo(x_hidxs, w_hidxs)
68-
x_hidxs, w_hidxs = clamp_hi(x_hidxs, w_hidxs, height)
69-
x_didxs, w_didxs = clamp_lo(x_didxs, w_didxs)
70-
x_didxs, w_didxs = clamp_hi(x_didxs, w_didxs, depth)
71-
72-
# Grab our slices (for a single channel pairing, as this is depthwise)
73-
c_out = (c_in - 1)*channel_multiplier(cdims) + c_mult
74-
x_slice = view(x, x_widxs, x_hidxs, x_didxs, c_in, batch)
75-
w_slice = view(w, w_widxs, w_hidxs, w_didxs, c_mult, c_in)
76-
77-
# Do the dotproduct dance, then weight by alpha/beta and git 'er done
78-
dotprod = sum(x_slice .* w_slice)
79-
prev_yval::yT = beta*y[w_idx, h_idx, d_idx, c_out, batch]
80-
y[w_idx, h_idx, d_idx, c_out, batch] = alpha*convert(yT, dotprod) + prev_yval
81-
end
82-
83-
return y
84-
end
85-
8621
function depthwiseconv_direct!(y::AbstractArray{yT,5}, x::AbstractArray{xT,5},
8722
w::AbstractArray{wT,5}, cdims::DepthwiseConvDims;
8823
alpha::yT = yT(1), beta = false) where {yT, xT, wT}

0 commit comments

Comments
 (0)