185
185
186
186
187
187
"""
188
- lpnormpool(x, p::Number , k::NTuple{N, Integer}; pad=0, stride=k)
188
+ lpnormpool(x, p::Real , k::NTuple{N, Integer}; pad=0, stride=k)
189
189
190
190
Perform Lp pool operation with value of the Lp norm `p` and window size `k` on input tensor `x`, also known as LPPool in pytorch.
191
191
This pooling operator from [Learned-Norm Pooling for Deep Feedforward and Recurrent Neural Networks](https://arxiv.org/abs/1311.1780).
@@ -201,12 +201,11 @@ For all elements `x` in a size `k` window, lpnormpool computes `(∑ᵢ xᵢ^p)^
201
201
202
202
Thus `lpnormpool(x, 1, k) ./ prod(k) ≈ meanpool(x, k)` and `lpnormpool(x, 2, k).^2 ./ prod(k) ≈ meanpool(x.^2, k)`.
203
203
"""
204
- function lpnormpool (x, p:: Number , k:: NTuple{N, Integer} ; pad= 0 , stride= k) where N
205
- (isinf (p) || p < 0 ) && error (" p value of Lp norm pool expects `0 < p < Inf`, but p is $(p) now." )
206
- pad = expand (Val (N), pad)
207
- stride = expand (Val (N), stride)
208
- pdims = PoolDims (x, k; padding= pad, stride= stride)
209
- return lpnormpool (x, pdims; p= p)
204
+ function lpnormpool (x, p:: Real , k:: NTuple{N, Integer} ; pad= 0 , stride= k) where {N}
205
+ pow = p isa Integer ? p : convert (float (eltype (x)), p)
206
+ (isinf (pow) || pow < 0 ) && error (" p value of Lp norm pool expects `0 < p < Inf`, but p is $(pow) now." )
207
+ pdims = PoolDims (x, k; padding= expand (Val (N), pad), stride= expand (Val (N), stride))
208
+ return lpnormpool (x, pdims; p= pow)
210
209
end
211
210
212
211
0 commit comments