Skip to content

Commit 29a4d45

Browse files
authored
Merge pull request #18 from alan-turing-institute/compat
Update compatibility and bump version
2 parents 8acf6e0 + be8ade4 commit 29a4d45

File tree

5 files changed

+24
-55
lines changed

5 files changed

+24
-55
lines changed

.github/workflows/CompatHelper.yml

Lines changed: 4 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,19 +1,16 @@
11
name: CompatHelper
2-
32
on:
43
schedule:
54
- cron: '00 00 * * *'
6-
5+
workflow_dispatch:
76
jobs:
87
CompatHelper:
98
runs-on: ubuntu-latest
109
steps:
11-
- uses: julia-actions/setup-julia@latest
12-
with:
13-
version: 1.3
1410
- name: Pkg.add("CompatHelper")
1511
run: julia -e 'using Pkg; Pkg.add("CompatHelper")'
16-
- name: CompatHelper.main
12+
- name: CompatHelper.main()
1713
env:
1814
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
19-
run: julia -e 'using CompatHelper; CompatHelper.main(; master_branch = "master")'
15+
COMPATHELPER_PRIV: ${{ secrets.COMPATHELPER_PRIV }} # optional
16+
run: julia -e 'using CompatHelper; CompatHelper.main()'

Project.toml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
name = "MLJMultivariateStatsInterface"
22
uuid = "1b6a4a23-ba22-4f51-9698-8599985d3728"
33
authors = ["Anthony D. Blaom <[email protected]>", "Thibaut Lienart <[email protected]>", "Okon Samuel <[email protected]>"]
4-
version = "0.1.6"
4+
version = "0.1.7"
55

66
[deps]
77
Distances = "b4f34e82-e78d-54a5-968a-f98e89d6e8f7"
@@ -12,7 +12,7 @@ StatsBase = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91"
1212

1313
[compat]
1414
Distances = "^0.9,^0.10"
15-
MLJModelInterface = "0.3.5"
15+
MLJModelInterface = "^0.3.5,^0.4"
1616
MultivariateStats = "0.7, 0.8"
1717
StatsBase = "0.32, 0.33"
1818
julia = "1"

test/models/discriminant_analysis.jl

Lines changed: 15 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
ytest = selectrows(y, test)
1111

1212
lda_model = LDA()
13-
13+
1414
## Check model `fit`
1515
fitresult, = fit(lda_model, 1, Xtrain, ytrain)
1616
class_means, projection_matrix = fitted_params(lda_model, fitresult)
@@ -28,10 +28,6 @@
2828
tlda_mlj = transform(lda_model, fitresult, X)
2929
@test tlda_mlj == tlda_ms
3030
## Check model traits
31-
d = info_dict(LDA)
32-
@test d[:input_scitype] == Table(Continuous)
33-
@test d[:target_scitype] == AbstractVector{<:Finite}
34-
@test d[:name] == "LDA"
3531
end
3632

3733
@testset "MLDA-2" begin
@@ -55,7 +51,7 @@ end
5551
ytrain = selectrows(y, train)
5652
Xtest = selectrows(X, test)
5753
ytest = selectrows(y, test)
58-
54+
5955
lda_model = LDA()
6056
## Check model `fit`/`predict`
6157
fitresult, = fit(lda_model, 1, Xtrain, ytrain)
@@ -74,7 +70,7 @@ end
7470
ytrain = selectrows(y, train)
7571
Xtest = selectrows(X, test)
7672
ytest = selectrows(y, test)
77-
73+
7874
BLDA_model = BayesianLDA()
7975
## Check model `fit`
8076
fitresult, = fit(BLDA_model, 1, Xtrain, ytrain)
@@ -85,14 +81,10 @@ end
8581
mce = cross_entropy(preds, ytest) |> mean
8682
@test 0.685 mce 0.695
8783
## Check model traits
88-
d = info_dict(BayesianLDA)
89-
@test d[:input_scitype] == Table(Continuous)
90-
@test d[:target_scitype] == AbstractVector{<:Finite}
91-
@test d[:name] == "BayesianLDA"
9284
end
9385

9486
@testset "BayesianSubspaceLDA" begin
95-
## Data
87+
## Data
9688
X, y = @load_iris
9789
LDA_model = BayesianSubspaceLDA()
9890
## Check model `fit`
@@ -113,27 +105,23 @@ end
113105
abs.(
114106
projection_matrix [
115107
0.0675721 0.00271023;
116-
0.127666 0.177718;
117-
-0.180211 -0.0767255;
108+
0.127666 0.177718;
109+
-0.180211 -0.0767255;
118110
-0.235382 0.231435
119111
]
120112
)
121113
) < 0.05
122114
@test round.(prior_probabilities, sigdigits=7) == [0.3333333, 0.3333333, 0.3333333]
123115
@test round.(report.explained_variance_ratio, digits=4) == [0.9915, 0.0085]
124-
116+
125117
## Check model `predict`
126118
preds=predict(LDA_model, fitresult, X)
127119
predicted_class = predict_mode(LDA_model, fitresult, X)
128120
mcr = misclassification_rate(predicted_class, y)
129121
mce = cross_entropy(preds, y) |> mean
130-
@test round.(mcr, sigdigits=1) == 0.02
122+
@test round.(mcr, sigdigits=1) == 0.02
131123
@test 0.04 mce 0.045
132124
## Check model traits
133-
d = info_dict(BayesianSubspaceLDA)
134-
@test d[:input_scitype] == Table(Continuous)
135-
@test d[:target_scitype] == AbstractVector{<:Finite}
136-
@test d[:name] == "BayesianSubspaceLDA"
137125
end
138126

139127
@testset "SubspaceLDA" begin
@@ -157,7 +145,7 @@ end
157145
ytrain = selectrows(y, train)
158146
Xtest = selectrows(X, test)
159147
ytest = selectrows(y, test)
160-
148+
161149
lda_model = SubspaceLDA()
162150
## Check model `fit`/ `transform`
163151
fitresult, = fit(lda_model, 1, Xtrain, ytrain)
@@ -174,10 +162,6 @@ end
174162
tlda_mlj = transform(lda_model, fitresult, X)
175163
@test tlda_mlj == tlda_ms
176164
## Check model traits
177-
d = info_dict(SubspaceLDA)
178-
@test d[:input_scitype] == Table(Continuous)
179-
@test d[:target_scitype] == AbstractVector{<:Finite}
180-
@test d[:name] == "SubspaceLDA"
181165
end
182166

183167
@testset "discriminant models checks" begin
@@ -187,7 +171,7 @@ X = (x1 =rand(4), x2 = collect(1:4))
187171

188172
## Note: The following test depend on the order in which they are written.
189173
## Hence do not change the ordering of the tests.
190-
174+
191175
## Check to make sure error is thrown if we only have a single
192176
## unique class during training.
193177
model = LDA()
@@ -199,23 +183,23 @@ y1 = y[[1,1,1,1]]
199183
## than unique classes during training.
200184
@test_throws ArgumentError fit(model, 1, X, y)
201185

202-
## Check to make sure error is thrown if `out_dim` exceeds the number of features in
186+
## Check to make sure error is thrown if `out_dim` exceeds the number of features in
203187
## sample matrix used in training.
204188
model = LDA(out_dim=3)
205189
# categorical array with same pool as y but only containing "apples" & "oranges"
206-
y2 = y[[1,2,1,2]]
190+
y2 = y[[1,2,1,2]]
207191
@test_throws ArgumentError fit(model, 1, X, y2)
208192

209-
## Check to make sure error is thrown if length(`priors`) != number of classes
193+
## Check to make sure error is thrown if length(`priors`) != number of classes
210194
## in common pool of target vector used in training.
211195
model = BayesianLDA(priors=[0.1, 0.5, 0.4])
212196
@test_throws ArgumentError fit(model, 1, X, y)
213197

214-
## Check to make sure error is thrown if sum(`priors`) isn't approximately equal to 1.
198+
## Check to make sure error is thrown if sum(`priors`) isn't approximately equal to 1.
215199
model = BayesianLDA(priors=[0.1, 0.5, 0.4, 0.2])
216200
@test_throws ArgumentError fit(model, 1, X, y)
217201

218-
## Check to make sure error is thrown if `priors .< 0` or `priors .> 1`.
202+
## Check to make sure error is thrown if `priors .< 0` or `priors .> 1`.
219203
model = BayesianLDA(priors=[-0.1, 0.0, 1.0, 0.1])
220204
@test_throws ArgumentError fit(model, 1, X, y)
221205
model = BayesianLDA(priors=[1.1, 0.0, 0.0, -0.1])

test/models/linear_models.jl

Lines changed: 3 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -12,10 +12,6 @@
1212
# Get the true intercept?
1313
@test abs(fr.intercept) < 1e-10
1414
# Check metadata
15-
d = info_dict(linear)
16-
@test d[:input_scitype] == Table(Continuous)
17-
@test d[:target_scitype] == Union{Table(Continuous), AbstractVector{Continuous}}
18-
@test d[:name] == "LinearRegressor"
1915
end
2016

2117
@testset "Multi-response Linear" begin
@@ -29,7 +25,7 @@ end
2925
linear = LinearRegressor()
3026
Yhat, fr = test_regression(linear, X, Y)
3127
Yhat_mat = MLJBase.matrix(Yhat)
32-
# Check if the column names is same after predict
28+
# Check if the column names is same after predict
3329
MLJBase.schema(Yhat).names == MLJBase.schema(Y).names
3430
# Training error
3531
@test norm(Yhat_mat - Y_mat)/sqrt(n) < 1e-12
@@ -52,10 +48,6 @@ end
5248
# Get the true intercept?
5349
@test abs(fr.intercept) < 1e-10
5450
# Check metadata
55-
d = info_dict(ridge)
56-
@test d[:input_scitype] == Table(Continuous)
57-
@test d[:target_scitype] == Union{Table(Continuous), AbstractVector{Continuous}}
58-
@test d[:name] == "RidgeRegressor"
5951
end
6052

6153
@testset "Multi-response Ridge" begin
@@ -65,12 +57,12 @@ end
6557
rng = StableRNG(1234)
6658
X, Y = make_regression2(n, 3, noise=0, intercept=false, rng=rng)
6759
Y_mat = MLJBase.matrix(Y)
68-
# Train model with intercept on all data with no regularization
60+
# Train model with intercept on all data with no regularization
6961
# and no standardization of target.
7062
ridge = RidgeRegressor(lambda=0.0)
7163
Yhat, fr = test_regression(ridge, X, Y)
7264
Yhat_mat = MLJBase.matrix(Yhat)
73-
# Check if the column names is same after predict
65+
# Check if the column names is same after predict
7466
MLJBase.schema(Yhat).names == MLJBase.schema(Y).names
7567
# Training error
7668
@test norm(Yhat_mat - Y_mat)/sqrt(n) < 1e-12

test/testutils.jl

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -30,10 +30,6 @@ function test_composition_model(ms_model, mlj_model, X, X_array ; test_inverse=t
3030
# Compare MLJ and MultivariateStats transformed matrices
3131
@test Xtr_mlj Xtr_ms
3232
# test metadata
33-
d = info_dict(mlj_model_type)
34-
@test d[:input_scitype] == Table(Continuous)
35-
@test d[:output_scitype] == Table(Continuous)
36-
@test d[:name] == string(mlj_model_type)
3733

3834
if test_inverse
3935
Xinv_ms = permutedims(

0 commit comments

Comments
 (0)