@@ -828,23 +828,24 @@ def test_gof_iv(self):
828
828
with pytest .raises (TypeError , match = message ):
829
829
goodness_of_fit (dist , x , n_mc_samples = 1000.5 )
830
830
831
- message = "'herring' cannot be used to seed a "
832
- with pytest .raises (ValueError , match = message ):
833
- goodness_of_fit (dist , x , random_state = 'herring' )
831
+ message = "SeedSequence expects int or sequence "
832
+ with pytest .raises (TypeError , match = message ):
833
+ goodness_of_fit (dist , x , rng = 'herring' )
834
834
835
835
def test_against_ks (self ):
836
836
rng = np .random .default_rng (8517426291317196949 )
837
837
x = examgrades
838
838
known_params = {'loc' : np .mean (x ), 'scale' : np .std (x , ddof = 1 )}
839
839
res = goodness_of_fit (stats .norm , x , known_params = known_params ,
840
- statistic = 'ks' , random_state = rng )
840
+ statistic = 'ks' , rng = rng )
841
841
ref = stats .kstest (x , stats .norm (** known_params ).cdf , method = 'exact' )
842
842
assert_allclose (res .statistic , ref .statistic ) # ~0.0848
843
843
assert_allclose (res .pvalue , ref .pvalue , atol = 5e-3 ) # ~0.335
844
844
845
845
def test_against_lilliefors (self ):
846
846
rng = np .random .default_rng (2291803665717442724 )
847
847
x = examgrades
848
+ # preserve use of old random_state during SPEC 7 transition
848
849
res = goodness_of_fit (stats .norm , x , statistic = 'ks' , random_state = rng )
849
850
known_params = {'loc' : np .mean (x ), 'scale' : np .std (x , ddof = 1 )}
850
851
ref = stats .kstest (x , stats .norm (** known_params ).cdf , method = 'exact' )
@@ -856,7 +857,7 @@ def test_against_cvm(self):
856
857
x = examgrades
857
858
known_params = {'loc' : np .mean (x ), 'scale' : np .std (x , ddof = 1 )}
858
859
res = goodness_of_fit (stats .norm , x , known_params = known_params ,
859
- statistic = 'cvm' , random_state = rng )
860
+ statistic = 'cvm' , rng = rng )
860
861
ref = stats .cramervonmises (x , stats .norm (** known_params ).cdf )
861
862
assert_allclose (res .statistic , ref .statistic ) # ~0.090
862
863
assert_allclose (res .pvalue , ref .pvalue , atol = 5e-3 ) # ~0.636
@@ -868,7 +869,7 @@ def test_against_anderson_case_0(self):
868
869
# loc that produced critical value of statistic found w/ root_scalar
869
870
known_params = {'loc' : 45.01575354024957 , 'scale' : 30 }
870
871
res = goodness_of_fit (stats .norm , x , known_params = known_params ,
871
- statistic = 'ad' , random_state = rng )
872
+ statistic = 'ad' , rng = rng )
872
873
assert_allclose (res .statistic , 2.492 ) # See [1] Table 1A 1.0
873
874
assert_allclose (res .pvalue , 0.05 , atol = 5e-3 )
874
875
@@ -879,7 +880,7 @@ def test_against_anderson_case_1(self):
879
880
# scale that produced critical value of statistic found w/ root_scalar
880
881
known_params = {'scale' : 29.957112639101933 }
881
882
res = goodness_of_fit (stats .norm , x , known_params = known_params ,
882
- statistic = 'ad' , random_state = rng )
883
+ statistic = 'ad' , rng = rng )
883
884
assert_allclose (res .statistic , 0.908 ) # See [1] Table 1B 1.1
884
885
assert_allclose (res .pvalue , 0.1 , atol = 5e-3 )
885
886
@@ -890,7 +891,7 @@ def test_against_anderson_case_2(self):
890
891
# loc that produced critical value of statistic found w/ root_scalar
891
892
known_params = {'loc' : 44.5680212261933 }
892
893
res = goodness_of_fit (stats .norm , x , known_params = known_params ,
893
- statistic = 'ad' , random_state = rng )
894
+ statistic = 'ad' , rng = rng )
894
895
assert_allclose (res .statistic , 2.904 ) # See [1] Table 1B 1.2
895
896
assert_allclose (res .pvalue , 0.025 , atol = 5e-3 )
896
897
@@ -900,7 +901,7 @@ def test_against_anderson_case_3(self):
900
901
# c that produced critical value of statistic found w/ root_scalar
901
902
x = stats .skewnorm .rvs (1.4477847789132101 , loc = 1 , scale = 2 , size = 100 ,
902
903
random_state = rng )
903
- res = goodness_of_fit (stats .norm , x , statistic = 'ad' , random_state = rng )
904
+ res = goodness_of_fit (stats .norm , x , statistic = 'ad' , rng = rng )
904
905
assert_allclose (res .statistic , 0.559 ) # See [1] Table 1B 1.2
905
906
assert_allclose (res .pvalue , 0.15 , atol = 5e-3 )
906
907
@@ -911,7 +912,7 @@ def test_against_anderson_gumbel_r(self):
911
912
x = stats .genextreme (0.051896837188595134 , loc = 0.5 ,
912
913
scale = 1.5 ).rvs (size = 1000 , random_state = rng )
913
914
res = goodness_of_fit (stats .gumbel_r , x , statistic = 'ad' ,
914
- random_state = rng )
915
+ rng = rng )
915
916
ref = stats .anderson (x , dist = 'gumbel_r' )
916
917
assert_allclose (res .statistic , ref .critical_values [0 ])
917
918
assert_allclose (res .pvalue , ref .significance_level [0 ]/ 100 , atol = 5e-3 )
@@ -922,7 +923,7 @@ def test_against_filliben_norm(self):
922
923
y = [6 , 1 , - 4 , 8 , - 2 , 5 , 0 ]
923
924
known_params = {'loc' : 0 , 'scale' : 1 }
924
925
res = stats .goodness_of_fit (stats .norm , y , known_params = known_params ,
925
- statistic = "filliben" , random_state = rng )
926
+ statistic = "filliben" , rng = rng )
926
927
# Slight discrepancy presumably due to roundoff in Filliben's
927
928
# calculation. Using exact order statistic medians instead of
928
929
# Filliben's approximation doesn't account for it.
@@ -944,10 +945,10 @@ def test_filliben_property(self):
944
945
rng = np .random .default_rng (8535677809395478813 )
945
946
x = rng .normal (loc = 10 , scale = 0.5 , size = 100 )
946
947
res = stats .goodness_of_fit (stats .norm , x ,
947
- statistic = "filliben" , random_state = rng )
948
+ statistic = "filliben" , rng = rng )
948
949
known_params = {'loc' : 0 , 'scale' : 1 }
949
950
ref = stats .goodness_of_fit (stats .norm , x , known_params = known_params ,
950
- statistic = "filliben" , random_state = rng )
951
+ statistic = "filliben" , rng = rng )
951
952
assert_allclose (res .statistic , ref .statistic , rtol = 1e-15 )
952
953
953
954
@pytest .mark .parametrize ('case' , [(25 , [.928 , .937 , .950 , .958 , .966 ]),
@@ -960,7 +961,7 @@ def test_against_filliben_norm_table(self, case):
960
961
x = rng .random (n )
961
962
known_params = {'loc' : 0 , 'scale' : 1 }
962
963
res = stats .goodness_of_fit (stats .norm , x , known_params = known_params ,
963
- statistic = "filliben" , random_state = rng )
964
+ statistic = "filliben" , rng = rng )
964
965
percentiles = np .array ([0.005 , 0.01 , 0.025 , 0.05 , 0.1 ])
965
966
res = stats .scoreatpercentile (res .null_distribution , percentiles * 100 )
966
967
assert_allclose (res , ref , atol = 2e-3 )
@@ -980,7 +981,7 @@ def test_against_ppcc(self, case):
980
981
rng = np .random .default_rng (7777775561439803116 )
981
982
x = rng .normal (size = n )
982
983
res = stats .goodness_of_fit (stats .rayleigh , x , statistic = "filliben" ,
983
- random_state = rng )
984
+ rng = rng )
984
985
assert_allclose (res .statistic , ref_statistic , rtol = 1e-4 )
985
986
assert_allclose (res .pvalue , ref_pvalue , atol = 1.5e-2 )
986
987
@@ -1000,7 +1001,7 @@ def test_params_effects(self):
1000
1001
res1 = goodness_of_fit (stats .weibull_min , x , n_mc_samples = 2 ,
1001
1002
guessed_params = guessed_params ,
1002
1003
fit_params = fit_params ,
1003
- known_params = known_params , random_state = rng )
1004
+ known_params = known_params , rng = rng )
1004
1005
assert not np .allclose (res1 .fit_result .params .c , 13.4 )
1005
1006
assert_equal (res1 .fit_result .params .scale , 13.73 )
1006
1007
assert_equal (res1 .fit_result .params .loc , - 13.85 )
@@ -1012,7 +1013,7 @@ def test_params_effects(self):
1012
1013
res2 = goodness_of_fit (stats .weibull_min , x , n_mc_samples = 2 ,
1013
1014
guessed_params = guessed_params ,
1014
1015
fit_params = fit_params ,
1015
- known_params = known_params , random_state = rng )
1016
+ known_params = known_params , rng = rng )
1016
1017
assert not np .allclose (res2 .fit_result .params .c ,
1017
1018
res1 .fit_result .params .c , rtol = 1e-8 )
1018
1019
assert not np .allclose (res2 .null_distribution ,
@@ -1028,7 +1029,7 @@ def test_params_effects(self):
1028
1029
res3 = goodness_of_fit (stats .weibull_min , x , n_mc_samples = 2 ,
1029
1030
guessed_params = guessed_params ,
1030
1031
fit_params = fit_params ,
1031
- known_params = known_params , random_state = rng )
1032
+ known_params = known_params , rng = rng )
1032
1033
assert_equal (res3 .fit_result .params .c , 13.4 )
1033
1034
assert_equal (res3 .fit_result .params .scale , 13.73 )
1034
1035
assert_equal (res3 .fit_result .params .loc , - 13.85 )
@@ -1058,7 +1059,7 @@ def greenwood(dist, data, *, axis):
1058
1059
data = stats .expon .rvs (size = 5 , random_state = rng )
1059
1060
result = goodness_of_fit (stats .expon , data ,
1060
1061
known_params = {'loc' : 0 , 'scale' : 1 },
1061
- statistic = greenwood , random_state = rng )
1062
+ statistic = greenwood , rng = rng )
1062
1063
p = [.01 , .05 , .1 , .2 , .3 , .4 , .5 , .6 , .7 , .8 , .9 , .95 , .99 ]
1063
1064
exact_quantiles = [
1064
1065
.183863 , .199403 , .210088 , .226040 , .239947 , .253677 , .268422 ,
0 commit comments