|
57 | 57 | data_dir = withenv("DATADEPS_ALWAY_ACCEPT"=>"true") do
|
58 | 58 | datadep"SVHN2"
|
59 | 59 | end
|
| 60 | + |
| 61 | + @testset "Images" begin |
| 62 | + X_train = @inferred SVHN2.traintensor() |
| 63 | + X_test = @inferred SVHN2.testtensor() |
| 64 | + X_extra = @inferred SVHN2.extratensor() |
| 65 | + @test size(X_train, 4) == 73_257 |
| 66 | + @test size(X_test, 4) == 26_032 |
| 67 | + @test size(X_extra, 4) == 531_131 |
| 68 | + |
| 69 | + # Sanity check that the first trainimage is not the |
| 70 | + # first testimage nor extra image |
| 71 | + @test X_train[:,:,:,1] != X_test[:,:,:,1] |
| 72 | + @test X_train[:,:,:,1] != X_extra[:,:,:,1] |
| 73 | + @test X_test[:,:,:,1] != X_extra[:,:,:,1] |
| 74 | + # Make sure other integer types work as indicies |
| 75 | + @test SVHN2.testtensor(0xBAE) == SVHN2.testtensor(2990) |
| 76 | + |
| 77 | + @test reinterpret(UInt8, X_train)[11:13, 12:14, 1, 1] == [ |
| 78 | + 0x5a 0x5c 0x5b |
| 79 | + 0x5c 0x5b 0x5d |
| 80 | + 0x5d 0x57 0x59 |
| 81 | + ] |
| 82 | + @test reinterpret(UInt8, X_test)[11:13, 12:14, 1, 1] == [ |
| 83 | + 0x28 0x2f 0x33 |
| 84 | + 0x2e 0x38 0x3b |
| 85 | + 0x2d 0x37 0x3b |
| 86 | + ] |
| 87 | + @test reinterpret(UInt8, X_extra)[11:13, 12:14, 1, 1] == [ |
| 88 | + 0x51 0x51 0x50 |
| 89 | + 0x53 0x4e 0x4c |
| 90 | + 0x52 0x4c 0x49 |
| 91 | + ] |
| 92 | + |
| 93 | + # These tests check if the functions return internaly |
| 94 | + # consistent results for different parameters (e.g. index |
| 95 | + # as int or as vector). That means no matter how you |
| 96 | + # specify an index, you will always get the same result |
| 97 | + # for a specific index. |
| 98 | + for (image_fun, T, nimages) in ( |
| 99 | + (SVHN2.testtensor, UInt8, 26_032), |
| 100 | + (SVHN2.testtensor, Int, 26_032), |
| 101 | + (SVHN2.testtensor, Float64, 26_032), |
| 102 | + (SVHN2.testtensor, Float32, 26_032), |
| 103 | + (SVHN2.testtensor, N0f8, 26_032), |
| 104 | + ) |
| 105 | + @testset "$image_fun with T=$T" begin |
| 106 | + # whole image set |
| 107 | + A = @inferred image_fun(T) |
| 108 | + @test typeof(A) <: Array{T,4} |
| 109 | + @test size(A) == (32,32,3,nimages) |
| 110 | + |
| 111 | + @test_throws BoundsError image_fun(T,-1) |
| 112 | + @test_throws BoundsError image_fun(T,0) |
| 113 | + @test_throws BoundsError image_fun(T,nimages+1) |
| 114 | + |
| 115 | + @testset "load single images" begin |
| 116 | + # Sample a few random images to compare |
| 117 | + for i = rand(1:nimages, 3) |
| 118 | + A_i = @inferred image_fun(T,i) |
| 119 | + @test typeof(A_i) <: Array{T,3} |
| 120 | + @test size(A_i) == (32,32,3) |
| 121 | + @test A_i == A[:,:,:,i] |
| 122 | + end |
| 123 | + end |
| 124 | + |
| 125 | + @testset "load multiple images" begin |
| 126 | + A_5_10 = @inferred image_fun(T,5:10) |
| 127 | + @test typeof(A_5_10) <: Array{T,4} |
| 128 | + @test size(A_5_10) == (32,32,3,6) |
| 129 | + for i = 1:6 |
| 130 | + @test A_5_10[:,:,:,i] == A[:,:,:,i+4] |
| 131 | + end |
| 132 | + |
| 133 | + # also test edge cases `1`, `nimages` |
| 134 | + indices = [10,3,9,1,nimages] |
| 135 | + A_vec = image_fun(T,indices) |
| 136 | + A_vec_f = image_fun(T,Vector{Int32}(indices)) |
| 137 | + @test typeof(A_vec) <: Array{T,4} |
| 138 | + @test typeof(A_vec_f) <: Array{T,4} |
| 139 | + @test size(A_vec) == (32,32,3,5) |
| 140 | + @test size(A_vec_f) == (32,32,3,5) |
| 141 | + for i in 1:5 |
| 142 | + @test A_vec[:,:,:,i] == A[:,:,:,indices[i]] |
| 143 | + @test A_vec[:,:,:,i] == A_vec_f[:,:,:,i] |
| 144 | + end |
| 145 | + end |
| 146 | + end |
| 147 | + end |
| 148 | + end |
| 149 | + |
| 150 | + @testset "Labels" begin |
| 151 | + # Sanity check that the first trainlabel is not also |
| 152 | + # the first testlabel |
| 153 | + @test SVHN2.trainlabels(1) != SVHN2.testlabels(1) |
| 154 | + @test SVHN2.trainlabels(1) != SVHN2.extralabels(1) |
| 155 | + @test SVHN2.testlabels(1) != SVHN2.extralabels(1) |
| 156 | + |
| 157 | + # Check a few hand picked examples. I looked at both the |
| 158 | + # pictures and the native output to make sure these |
| 159 | + # values are correspond to the image at the same index. |
| 160 | + @test SVHN2.trainlabels(1) === 1 |
| 161 | + @test SVHN2.trainlabels(2) === 9 |
| 162 | + @test SVHN2.trainlabels(1337) === 2 |
| 163 | + @test SVHN2.trainlabels(0xCAF) === 3 |
| 164 | + @test SVHN2.trainlabels(73_257) === 9 |
| 165 | + @test SVHN2.testlabels(1) === 5 |
| 166 | + @test SVHN2.testlabels(4) === 10 |
| 167 | + @test SVHN2.testlabels(0xDAD) === 4 |
| 168 | + @test SVHN2.testlabels(26_032) === 7 |
| 169 | + @test SVHN2.extralabels(1) === 4 |
| 170 | + @test SVHN2.extralabels(3) === 8 |
| 171 | + @test SVHN2.extralabels(531_131) === 4 |
| 172 | + |
| 173 | + # These tests check if the functions return internaly |
| 174 | + # consistent results for different parameters (e.g. index |
| 175 | + # as int or as vector). That means no matter how you |
| 176 | + # specify an index, you will always get the same result |
| 177 | + # for a specific index. |
| 178 | + # -- However, technically these tests do not check if |
| 179 | + # these are the actual SVHN labels of that index! |
| 180 | + for (label_fun, nlabels) in |
| 181 | + ((SVHN2.trainlabels, 73_257), |
| 182 | + (SVHN2.testlabels, 26_032), |
| 183 | + (SVHN2.extralabels, 531_131)) |
| 184 | + @testset "$label_fun" begin |
| 185 | + # whole label set |
| 186 | + A = @inferred label_fun() |
| 187 | + @test typeof(A) <: Vector{Int64} |
| 188 | + @test size(A) == (nlabels,) |
| 189 | + |
| 190 | + @testset "load single label" begin |
| 191 | + # Sample a few random labels to compare |
| 192 | + for i = rand(1:nlabels, 10) |
| 193 | + A_i = @inferred label_fun(i) |
| 194 | + @test typeof(A_i) <: Int64 |
| 195 | + @test A_i == A[i] |
| 196 | + end |
| 197 | + end |
| 198 | + |
| 199 | + @testset "load multiple labels" begin |
| 200 | + A_5_10 = @inferred label_fun(5:10) |
| 201 | + @test typeof(A_5_10) <: Vector{Int64} |
| 202 | + @test size(A_5_10) == (6,) |
| 203 | + for i = 1:6 |
| 204 | + @test A_5_10[i] == A[i+4] |
| 205 | + end |
| 206 | + |
| 207 | + # also test edge cases `1`, `nlabels` |
| 208 | + indices = [10,3,9,1,nlabels] |
| 209 | + A_vec = @inferred label_fun(indices) |
| 210 | + A_vec_f = @inferred label_fun(Vector{Int32}(indices)) |
| 211 | + @test typeof(A_vec) <: Vector{Int64} |
| 212 | + @test typeof(A_vec_f) <: Vector{Int64} |
| 213 | + @test size(A_vec) == (5,) |
| 214 | + @test size(A_vec_f) == (5,) |
| 215 | + for i in 1:5 |
| 216 | + @test A_vec[i] == A[indices[i]] |
| 217 | + @test A_vec[i] == A_vec_f[i] |
| 218 | + end |
| 219 | + end |
| 220 | + end |
| 221 | + end |
| 222 | + end |
| 223 | + |
| 224 | + # Check against the already tested tensor and labels functions |
| 225 | + @testset "Data" begin |
| 226 | + for (data_fun, feature_fun, label_fun, nobs) in |
| 227 | + ((SVHN2.testdata, SVHN2.testtensor, SVHN2.testlabels, 26_032),) |
| 228 | + @testset "check $data_fun against $feature_fun and $label_fun" begin |
| 229 | + data, labels = @inferred data_fun() |
| 230 | + @test data == @inferred feature_fun() |
| 231 | + @test labels == @inferred label_fun() |
| 232 | + |
| 233 | + for i = rand(1:nobs, 10) |
| 234 | + d_i, l_i = @inferred data_fun(i) |
| 235 | + @test d_i == @inferred feature_fun(i) |
| 236 | + @test l_i == @inferred label_fun(i) |
| 237 | + end |
| 238 | + |
| 239 | + data, labels = @inferred data_fun(5:10) |
| 240 | + @test data == @inferred feature_fun(5:10) |
| 241 | + @test labels == @inferred label_fun(5:10) |
| 242 | + |
| 243 | + data, labels = @inferred data_fun(Int, 5:10) |
| 244 | + @test data == @inferred feature_fun(Int, 5:10) |
| 245 | + @test labels == @inferred label_fun(5:10) |
| 246 | + |
| 247 | + indices = [10,3,9,1,nobs] |
| 248 | + data, labels = @inferred data_fun(indices) |
| 249 | + @test data == @inferred feature_fun(indices) |
| 250 | + @test labels == @inferred label_fun(indices) |
| 251 | + end |
| 252 | + end |
| 253 | + end |
60 | 254 | end
|
61 | 255 |
|
62 | 256 | end
|
0 commit comments