Skip to content

Commit 23155cf

Browse files
authored
Merge pull request #47 from FluxML/julia-0.7
Julia 0.7
2 parents 0609258 + f0993bb commit 23155cf

File tree

9 files changed

+54
-55
lines changed

9 files changed

+54
-55
lines changed

.travis.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ os:
44
- linux
55
- osx
66
julia:
7-
- 0.6
7+
- nightly
88
notifications:
99
email: false
1010
git:

REQUIRE

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
1-
julia 0.6
1+
julia 0.7-
22
Requires
33
MacroTools

src/NNlib.jl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
module NNlib
22

3-
using Requires
3+
using Requires, Libdl
44

55
export σ, sigmoid, relu, leakyrelu, elu, swish, selu, softplus, softsign, logσ, logsigmoid,
66
softmax, logsoftmax, maxpool, meanpool

src/conv.jl

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ include("impl/conv.jl")
66
# Convolutions
77

88
function cdims(x::NTuple{N}, w::NTuple{N}, pad, stride) where N
9-
ntuple(Val{N}) do i
9+
ntuple(Val(N)) do i
1010
if i < N-1
1111
1 + div(x[i] - w[i] + 2*pad[i], stride[i])
1212
elseif i == N-1
@@ -31,10 +31,10 @@ function conv(x::A, w::A; pad = 0, stride = 1, dilation = 1) where A<:AbstractAr
3131
end
3232

3333
∇conv_data(dy::A, x::A, w::A; pad = 0, stride = 1, dilation = 1) where A<:AbstractArray =
34-
∇conv_data!(zeros(x), dy, x, w; pad = pad, stride = stride, dilation = dilation)
34+
∇conv_data!(zero(x), dy, x, w; pad = pad, stride = stride, dilation = dilation)
3535

3636
∇conv_filter(dy::A, x::A, w::A; pad = 0, stride = 1, dilation = 1) where A<:AbstractArray =
37-
∇conv_filter!(zeros(w), dy, x, w; pad = pad, stride = stride, dilation = dilation)
37+
∇conv_filter!(zero(w), dy, x, w; pad = pad, stride = stride, dilation = dilation)
3838

3939
# N-D dispatch
4040

@@ -88,7 +88,7 @@ conv!(y::AbstractArray{T,5}, x::AbstractArray{T,5}, w::AbstractArray{T,5};
8888
# Pooling
8989

9090
function pdims(dims::Dims{N}, window, padding, stride) where N
91-
ntuple(Val{N}) do i
91+
ntuple(Val(N)) do i
9292
if i < N-1
9393
1 + (dims[i] + 2*padding[i] - window[i])÷stride[i]
9494
else
@@ -97,7 +97,7 @@ function pdims(dims::Dims{N}, window, padding, stride) where N
9797
end
9898
end
9999

100-
expand(::Type{Val{N}}, i::Integer) where N = ntuple(_ -> i, Val{N})
100+
expand(::Type{Val{N}}, i::Integer) where N = ntuple(_ -> i, Val(N))
101101
expand(::Type{Val{N}}, i::NTuple{N, Integer}) where N = i
102102

103103
# Interface

src/impl/pool.jl

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ function max_pooling2d_bwd!(x::AbstractArray{T,4}, y::AbstractArray{T,4},
3232
channels::Int, num::Int, pooled_width::Int, pooled_height::Int, kernel_w::Int,
3333
kernel_h::Int, pad_w::Int, pad_h::Int, stride_w::Int, stride_h::Int) where T
3434

35-
grad_input[:, :, :, :] = 0
35+
grad_input .= 0
3636
#pragma omp parallel for
3737
for n = 1:num, c = 1:channels, ph = 1:pooled_height, pw = 1:pooled_width
3838
hstart = (ph - 1) * stride_h - pad_h
@@ -168,7 +168,7 @@ function max_pooling3d_bwd!(x::AbstractArray{T,5}, y::AbstractArray{T,5},
168168
kernel_w::Int, kernel_h::Int, kernel_d::Int, pad_w::Int, pad_h::Int, pad_d::Int,
169169
stride_w::Int, stride_h::Int, stride_d::Int) where T
170170

171-
grad_input[:, :, :, :, :] = 0
171+
grad_input .= 0
172172

173173
#pragma omp parallel for
174174
for n = 1:num, c = 1:channels, pd = 1:pooled_depth, ph = 1:pooled_height, pw = 1:pooled_width

src/linalg.jl

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
11
## Low level gemm! call with pointers
22
## Borrowed from Knet.jl
33

4-
using Base.LinAlg
5-
using Base.LinAlg.BLAS: libblas, BlasInt, @blasfunc
4+
using LinearAlgebra
5+
using LinearAlgebra.BLAS: libblas, BlasInt, @blasfunc
66

77
# C := alpha*op(A)*op(B) + beta*C, where:
88
# op(X) is one of op(X) = X, or op(X) = XT, or op(X) = XH,
@@ -18,13 +18,13 @@ for (gemm, elty) in ((:dgemm_,:Float64), (:sgemm_,:Float32))
1818
if transA=='N'; lda=M; else; lda=K; end
1919
if transB=='N'; ldb=K; else; ldb=N; end
2020
ldc = M;
21-
ccall((@blasfunc($gemm), libblas), Void,
22-
(Ptr{UInt8}, Ptr{UInt8}, Ptr{BlasInt}, Ptr{BlasInt},
23-
Ptr{BlasInt}, Ptr{$elty}, Ptr{$elty}, Ptr{BlasInt},
24-
Ptr{$elty}, Ptr{BlasInt}, Ptr{$elty}, Ptr{$elty},
25-
Ptr{BlasInt}),
26-
&transA, &transB, &M, &N, &K,
27-
&alpha, A, &lda, B, &ldb, &beta, C, &ldc)
21+
ccall((@blasfunc(dgemm_), libblas), Nothing,
22+
(Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt},
23+
Ref{BlasInt}, Ref{Float64}, Ptr{Float64}, Ref{BlasInt},
24+
Ptr{Float64}, Ref{BlasInt}, Ref{Float64}, Ptr{Float64},
25+
Ref{BlasInt}),
26+
transA, transB, M, N, K,
27+
alpha, A, lda, B, ldb, beta, C, ldc)
2828
end
2929
end
3030
end

test/activation.jl

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -22,19 +22,19 @@ function test_value_int_input_forces_float64(a)
2222
end
2323
end
2424

25-
if Base.find_in_path("ForwardDiff") nothing
26-
using ForwardDiff
27-
function test_value_duals(a)
28-
@testset "$(a): " begin
29-
for T in [Float32, Float64, Int32, Int64]
30-
val = @inferred a(ForwardDiff.Dual(float(T(1)), one(float(T))))
31-
@test typeof(val) == ForwardDiff.Dual{Void,float(T),1}
32-
end
33-
end
34-
end
35-
36-
test_value_duals.(ACTIVATION_FUNCTIONS)
37-
end
25+
# if Base.find_in_path("ForwardDiff") ≠ nothing
26+
# using ForwardDiff
27+
# function test_value_duals(a)
28+
# @testset "$(a): " begin
29+
# for T in [Float32, Float64, Int32, Int64]
30+
# val = @inferred a(ForwardDiff.Dual(float(T(1)), one(float(T))))
31+
# @test typeof(val) == ForwardDiff.Dual{Nothing,float(T),1}
32+
# end
33+
# end
34+
# end
35+
#
36+
# test_value_duals.(ACTIVATION_FUNCTIONS)
37+
# end
3838

3939
@testset "Activation Functions" begin
4040

@@ -82,7 +82,7 @@ end
8282

8383
xs = rand(5,5)
8484

85-
@test all(sum(softmax(xs), 1) .≈ 1)
85+
@test all(sum(softmax(xs), dims = 1) .≈ 1)
8686

8787
@test sum(softmax(vec(xs))) 1
8888

test/conv.jl

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -4,17 +4,17 @@ using NNlib: conv, ∇conv_filter, ∇conv_data, ∇maxpool, maxpool
44
x = reshape(Float64[1:20;], 5, 4, 1, 1)
55
w = reshape(Float64[1:4;], 2, 2, 1, 1)
66

7-
@test squeeze(conv(x, w),(3,4)) == [
7+
@test squeeze(conv(x, w), dims = (3,4)) == [
88
29 79 129;
99
39 89 139;
1010
49 99 149;
1111
59 109 159.]
1212

13-
@test squeeze(conv(x, w; stride=2),(3,4)) == [
13+
@test squeeze(conv(x, w; stride=2), dims = (3,4)) == [
1414
29 129;
1515
49 149.]
1616

17-
@test squeeze(conv(x, w; pad=1),(3,4)) == [
17+
@test squeeze(conv(x, w; pad=1), dims = (3,4)) == [
1818
1.0 9.0 29.0 49.0 48.0;
1919
4.0 29.0 79.0 129.0 115.0;
2020
7.0 39.0 89.0 139.0 122.0;
@@ -23,15 +23,15 @@ using NNlib: conv, ∇conv_filter, ∇conv_data, ∇maxpool, maxpool
2323
10.0 40.0 70.0 100.0 80.0
2424
]
2525

26-
@test squeeze(conv(x, w; dilation=2),(3,4)) == [
26+
@test squeeze(conv(x, w; dilation=2), dims = (3,4)) == [
2727
48 98;
2828
58 108;
2929
68 118.]
3030

3131
# NaN tests for dilation forward pass
3232

3333
ys = []
34-
for idx in 1:1000
34+
for idx in 1:1000
3535
push!(ys, conv(x, w; dilation=2))
3636
end
3737
@test !any([any(isnan.(ys[idx])) for idx in 1:1000])
@@ -72,9 +72,9 @@ end
7272

7373
x = reshape(Float64[1:20;], 5, 4, 1, 1)
7474

75-
@test squeeze(maxpool(x, (2,2)), (3,4)) == [7 17; 9 19]
76-
@test squeeze(maxpool(x, (2,2); stride=(2,2)), (3,4)) == [7 17; 9 19]
77-
@test squeeze(maxpool(x, (2,2); pad=(1,1)), (3,4)) == [
75+
@test squeeze(maxpool(x, (2,2)), dims = (3,4)) == [7 17; 9 19]
76+
@test squeeze(maxpool(x, (2,2); stride=(2,2)), dims = (3,4)) == [7 17; 9 19]
77+
@test squeeze(maxpool(x, (2,2); pad=(1,1)), dims = (3,4)) == [
7878
1.0 11.0 16.0;
7979
3.0 13.0 18.0;
8080
5.0 15.0 20.0;
@@ -106,9 +106,9 @@ end
106106
1078.0 1258.0 1438.0;
107107
1114.0 1294.0 1474.0;
108108
1150.0 1330.0 1510.0]
109-
@test squeeze(conv(x, w),(4,5)) == res
109+
@test squeeze(conv(x, w), dims = (4,5)) == res
110110

111-
@test squeeze(conv(x, w; stride=2),(3,4,5)) == [
111+
@test squeeze(conv(x, w; stride=2), dims = (3,4,5)) == [
112112
322.0 682.0;
113113
394.0 754.0]
114114

@@ -141,9 +141,9 @@ end
141141
478.0 1185.0 1315.0 1445.0 877.0;
142142
489.0 1211.0 1341.0 1471.0 892.0;
143143
270.0 660.0 730.0 800.0 480.0]
144-
@test squeeze(conv(x, w; pad=1),(4,5)) == res
144+
@test squeeze(conv(x, w; pad=1), dims = (4,5)) == res
145145

146-
@test squeeze(conv(x, w; dilation=2),(3,4,5)) == [
146+
@test squeeze(conv(x, w; dilation=2), dims = (3,4,5)) == [
147147
608 788;
148148
644 824;
149149
680 860.
@@ -152,7 +152,7 @@ end
152152
# NaN tests for dilation forward pass
153153

154154
ys = []
155-
for idx in 1:1000
155+
for idx in 1:1000
156156
push!(ys, conv(x, w; dilation=2))
157157
end
158158
@test !any([any(isnan.(ys[idx])) for idx in 1:1000])
@@ -187,8 +187,8 @@ end
187187

188188
x = reshape(Float64[1:60;], 5, 4, 3, 1, 1)
189189

190-
@test squeeze(maxpool(x, (2,2,2)), (3,4,5)) == [27 37; 29 39.]
191-
@test squeeze(maxpool(x, (2,2,2); stride=(2,2,2)), (3,4,5)) == [27 37; 29 39.]
190+
@test squeeze(maxpool(x, (2,2,2)), dims = (3,4,5)) == [27 37; 29 39.]
191+
@test squeeze(maxpool(x, (2,2,2); stride=(2,2,2)), dims = (3,4,5)) == [27 37; 29 39.]
192192
res = zeros(3,3,2)
193193
res[:, :, 1] = [
194194
1.0 11.0 16.0;
@@ -198,7 +198,7 @@ end
198198
41.0 51.0 56.0;
199199
43.0 53.0 58.0;
200200
45.0 55.0 60.0]
201-
@test squeeze(maxpool(x, (2,2,2), pad=(1,1,1)), (4,5)) == res
201+
@test squeeze(maxpool(x, (2,2,2), pad=(1,1,1)), dims = (4,5)) == res
202202

203203
# for gradients, check only size
204204
# correctness of gradients is cross-checked with CUDNN.jl

test/runtests.jl

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,12 @@
1-
using NNlib
2-
using Base.Test
1+
using NNlib, Test
32

43
@testset "NNlib" begin
54

65
include("activation.jl")
76
include("conv.jl")
8-
if Base.find_in_path("CuArrays") nothing
9-
include("cubroadcast.jl")
10-
end
7+
# if Base.find_in_path("CuArrays") ≠ nothing
8+
# include("cubroadcast.jl")
9+
# end
1110

1211
xs = [-100_000, -100_000.]
1312
@test softmax(xs) [0.5, 0.5]
@@ -19,7 +18,7 @@ xs = rand(5)
1918
@test logsigmoid.(xs) log.(sigmoid.(xs))
2019

2120
xs = rand(5,10)
22-
@test softmax(xs) exp.(xs) ./ sum(exp.(xs),1)
21+
@test softmax(xs) exp.(xs) ./ sum(exp.(xs), dims = 1)
2322
@test logsoftmax(xs) log.(softmax(xs))
2423
@test logsigmoid.(xs) log.(sigmoid.(xs))
2524

0 commit comments

Comments
 (0)