Skip to content

Commit 2efbb74

Browse files
author
Avik Pal
committed
Incorrect force push
1 parent e5e3964 commit 2efbb74

File tree

3 files changed

+46
-40
lines changed

3 files changed

+46
-40
lines changed

test/activation.jl

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -82,9 +82,9 @@ end
8282

8383
xs = rand(5,5)
8484

85-
@test all(sum(softmax(xs), dims = 1) .≈ 1)
85+
@test all(sum(softmax(xs), dims = 1) .≈ Float32(1))
8686

87-
@test sum(softmax(vec(xs))) 1
87+
@test sum(softmax(vec(xs))) Float32(1)
8888

8989
@testset "elu" begin
9090
@test elu(42) == 42

test/conv.jl

Lines changed: 38 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -6,37 +6,40 @@ using NNlib: conv, ∇conv_filter, ∇conv_data, ∇maxpool, maxpool, depthwisec
66
w = reshape(Float32[1:9;], 3, 3, 1, 1)
77

88
@test dropdims(conv(x, w, pad=1), dims=(3,4)) Float32.([
9-
29.0 99.0 207.0 263.0
10-
62.0 192.0 372.0 446.0
11-
83.0 237.0 417.0 485.0
12-
75.0 198.0 330.0 365.0])
9+
29 99 207 263
10+
62 192 372 446
11+
83 237 417 485
12+
75 198 330 365])
1313

1414
x = reshape(Float64[1:20;], 5, 4, 1, 1)
1515
w = reshape(Float64[1:4;], 2, 2, 1, 1)
1616

17-
@test dropdims(conv(x, w), dims = (3,4)) == [
18-
29 79 129;
19-
39 89 139;
20-
49 99 149;
21-
59 109 159.]
17+
@test dropdims(conv(x, w), dims = (3,4)) Float32.([
18+
29 79 129;
19+
39 89 139;
20+
49 99 149;
21+
59 109 159
22+
])
2223

23-
@test dropdims(conv(x, w; stride=2), dims = (3,4)) == [
24+
@test dropdims(conv(x, w; stride=2), dims = (3,4)) Float32.([
2425
29 129;
25-
49 149.]
26+
49 149
27+
])
2628

27-
@test dropdims(conv(x, w; pad=1), dims = (3,4)) == [
28-
1.0 9.0 29.0 49.0 48.0;
29-
4.0 29.0 79.0 129.0 115.0;
30-
7.0 39.0 89.0 139.0 122.0;
31-
10.0 49.0 99.0 149.0 129.0;
32-
13.0 59.0 109.0 159.0 136.0;
33-
10.0 40.0 70.0 100.0 80.0
34-
]
29+
@test dropdims(conv(x, w; pad=1), dims = (3,4)) Float32.([
30+
1 9 29 49 48;
31+
4 29 79 129 115;
32+
7 39 89 139 122;
33+
10 49 99 149 129;
34+
13 59 109 159 136;
35+
10 40 70 100 80
36+
])
3537

36-
@test dropdims(conv(x, w; dilation=2), dims = (3,4)) == [
37-
48 98;
38+
@test dropdims(conv(x, w; dilation=2), dims = (3,4)) Float32.([
39+
48 98;
3840
58 108;
39-
68 118.]
41+
68 118
42+
])
4043

4144
# NaN tests for dilation forward pass
4245

@@ -99,7 +102,7 @@ end
99102
W = copy(permutedims(w[:,:,:,i:i],[1,2,4,3]));
100103
DY = copy(dy[:,:,2i-1:2i,:]);
101104
res = ∇conv_data(DY,X,W)
102-
@test dropdims(z[:,:,i:i,:], dims=(3,4)) == dropdims(res, dims=(3,4))
105+
@test dropdims(z[:,:,i:i,:], dims=(3,4)) Float32.(dropdims(res, dims=(3,4)))
103106
end
104107

105108
z = ∇depthwiseconv_filter(dy, x, w)
@@ -108,7 +111,7 @@ end
108111
W = copy(permutedims(w[:,:,:,i:i],[1,2,4,3]))
109112
DY = copy(dy[:,:,2i-1:2i,:])
110113
res = ∇conv_filter(DY,X,W)
111-
@test dropdims(z[:,:,:,i:i]; dims=(4)) == dropdims(res; dims=(3))
114+
@test dropdims(z[:,:,:,i:i]; dims=(4)) Float32.(dropdims(res; dims=(3)))
112115
end
113116

114117
@test size(∇depthwiseconv_filter(rand(2,2,4,1), x, w)) == size(w)
@@ -125,17 +128,20 @@ end
125128

126129
x = reshape(Float32[1:16;], 4, 4, 1, 1)
127130

128-
@test dropdims(maxpool(x, (2,2)), dims = (3,4)) == Float32.([6.0 14.0; 8.0 16.0])
131+
@test dropdims(maxpool(x, (2,2)), dims = (3,4)) Float32.([
132+
6.0 14.0;
133+
8.0 16.0
134+
])
129135

130136
x = reshape(Float64[1:20;], 5, 4, 1, 1)
131137

132-
@test dropdims(maxpool(x, (2,2)), dims = (3,4)) == [7 17; 9 19]
133-
@test dropdims(maxpool(x, (2,2); stride=(2,2)), dims = (3,4)) == [7 17; 9 19]
134-
@test dropdims(maxpool(x, (2,2); pad=(1,1)), dims = (3,4)) == [
135-
1.0 11.0 16.0;
136-
3.0 13.0 18.0;
137-
5.0 15.0 20.0;
138-
]
138+
@test dropdims(maxpool(x, (2,2)), dims = (3,4)) Float32.([7 17; 9 19])
139+
@test dropdims(maxpool(x, (2,2); stride=(2,2)), dims = (3,4)) Float32.([7 17; 9 19])
140+
@test dropdims(maxpool(x, (2,2); pad=(1,1)), dims = (3,4)) Float32.([
141+
1 11 16;
142+
3 13 18;
143+
5 15 20;
144+
])
139145

140146
# for gradients, check only size
141147
# correctness of gradients is cross-checked with CUDNN.jl

test/runtests.jl

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -10,14 +10,14 @@ xs = [-100_000, -100_000.]
1010
@test logsoftmax(xs) log.([0.5, 0.5])
1111

1212
xs = rand(5)
13-
@test softmax(xs) exp.(xs) ./ sum(exp.(xs))
14-
@test logsoftmax(xs) log.(softmax(xs))
15-
@test logsigmoid.(xs) log.(sigmoid.(xs))
13+
@test softmax(xs) Float32.(exp.(xs) ./ sum(exp.(xs)))
14+
@test logsoftmax(xs) Float32.(log.(softmax(xs)))
15+
@test logsigmoid.(xs) Float32.(log.(sigmoid.(xs)))
1616

1717
xs = rand(5,10)
18-
@test softmax(xs) exp.(xs) ./ sum(exp.(xs), dims = 1)
19-
@test logsoftmax(xs) log.(softmax(xs))
20-
@test logsigmoid.(xs) log.(sigmoid.(xs))
18+
@test softmax(xs) Float32.(exp.(xs) ./ sum(exp.(xs), dims = 1))
19+
@test logsoftmax(xs) Float32.(log.(softmax(xs)))
20+
@test logsigmoid.(xs) Float32.(log.(sigmoid.(xs)))
2121

2222
for T in [:Float32, :Float64]
2323
@eval @test logsigmoid.($T[-100_000, 100_000.]) $T[-100_000, 0.]

0 commit comments

Comments
 (0)