@@ -18,71 +18,6 @@ channels in `x` is the last, not the second-to-last, as in a normal dense convol
18
18
19
19
See the docstring for `conv_direct!()` for more on the optional parameters.
20
20
"""
21
- function depthwiseconv_direct_old! (
22
- y:: AbstractArray{yT,5} , x:: AbstractArray{xT,5} ,
23
- w:: AbstractArray{wT,5} , cdims:: DepthwiseConvDims ;
24
- alpha:: yT = yT (1 ), beta:: yT = yT (0 )) where {yT, xT, wT}
25
- check_dims (size (x), size (w), size (y), cdims)
26
-
27
- width, height, depth = input_size (cdims)
28
- kernel_w, kernel_h, kernel_d = kernel_size (cdims)
29
- out_c = channels_out (cdims)
30
- pad_w_lo, pad_w_hi, pad_h_lo, pad_h_hi, pad_d_lo, pad_d_hi = padding (cdims)
31
- dil_w, dil_h, dil_d = dilation (cdims)
32
- stride_w, stride_h, stride_d = stride (cdims)
33
- out_width, out_height, out_depth = output_size (cdims)
34
-
35
- # If we're doing crosscorr instead of conv, then don't bother to flip `w`
36
- if ! flipkernel (cdims)
37
- w = w[end : - 1 : 1 , end : - 1 : 1 , end : - 1 : 1 , :, :]
38
- end
39
-
40
- # A helper function to project from output (w, h) to input (input_w, input_h)
41
- @inline project (idx, stride, pad) = (idx - 1 )* stride - pad + 1
42
-
43
- # explicit formulation of convolution. Oh hoisting gods, hear my plea.
44
- @inbounds for batch in 1 : size (x)[end ],
45
- c_mult in 1 : channel_multiplier (cdims),
46
- c_in in 1 : channels_in (cdims),
47
- h_idx in 1 : out_height,
48
- w_idx in 1 : out_width,
49
- d_idx in 1 : out_depth
50
-
51
- # Starting points of the window of x we're going to grab
52
- x_w = project (w_idx, stride_w, pad_w_lo)
53
- x_h = project (h_idx, stride_h, pad_h_lo)
54
- x_d = project (d_idx, stride_d, pad_d_lo)
55
-
56
- # Grow that starting point into ranges
57
- x_widxs = x_w .+ (0 : dil_w: (dil_w* kernel_w- 1 ))
58
- x_hidxs = x_h .+ (0 : dil_h: (dil_h* kernel_h- 1 ))
59
- x_didxs = x_d .+ (0 : dil_d: (dil_d* kernel_d- 1 ))
60
- w_widxs = 1 : kernel_w
61
- w_hidxs = 1 : kernel_h
62
- w_didxs = 1 : kernel_d
63
-
64
- # Clamp the ranges to simulate padding
65
- x_widxs, w_widxs = clamp_lo (x_widxs, w_widxs)
66
- x_widxs, w_widxs = clamp_hi (x_widxs, w_widxs, width)
67
- x_hidxs, w_hidxs = clamp_lo (x_hidxs, w_hidxs)
68
- x_hidxs, w_hidxs = clamp_hi (x_hidxs, w_hidxs, height)
69
- x_didxs, w_didxs = clamp_lo (x_didxs, w_didxs)
70
- x_didxs, w_didxs = clamp_hi (x_didxs, w_didxs, depth)
71
-
72
- # Grab our slices (for a single channel pairing, as this is depthwise)
73
- c_out = (c_in - 1 )* channel_multiplier (cdims) + c_mult
74
- x_slice = view (x, x_widxs, x_hidxs, x_didxs, c_in, batch)
75
- w_slice = view (w, w_widxs, w_hidxs, w_didxs, c_mult, c_in)
76
-
77
- # Do the dotproduct dance, then weight by alpha/beta and git 'er done
78
- dotprod = sum (x_slice .* w_slice)
79
- prev_yval:: yT = beta* y[w_idx, h_idx, d_idx, c_out, batch]
80
- y[w_idx, h_idx, d_idx, c_out, batch] = alpha* convert (yT, dotprod) + prev_yval
81
- end
82
-
83
- return y
84
- end
85
-
86
21
function depthwiseconv_direct! (y:: AbstractArray{yT,5} , x:: AbstractArray{xT,5} ,
87
22
w:: AbstractArray{wT,5} , cdims:: DepthwiseConvDims ;
88
23
alpha:: yT = yT (1 ), beta = false ) where {yT, xT, wT}
0 commit comments