@@ -231,31 +231,32 @@ def cases_test_fit_mle():
231
231
# These fail default test or hang
232
232
skip_basic_fit = {'argus' , 'irwinhall' , 'foldnorm' , 'truncpareto' ,
233
233
'truncweibull_min' , 'ksone' , 'levy_stable' ,
234
- 'studentized_range' , 'kstwo' , 'arcsine' ,
234
+ 'studentized_range' , 'kstwo' ,
235
+ 'beta' , 'nakagami' , 'truncnorm' , # don't meet tolerance
235
236
'poisson_binom' } # vector-valued shape parameter
236
237
237
238
# Please keep this list in alphabetical order...
238
- slow_basic_fit = {'alpha' , 'betaprime' , 'binom' , 'bradford' , 'burr12' ,
239
+ slow_basic_fit = {'alpha' , 'arcsine' , ' betaprime' , 'binom' , 'bradford' , 'burr12' ,
239
240
'chi' , 'crystalball' , 'dweibull' , 'erlang' , 'exponnorm' ,
240
241
'exponpow' , 'f' , 'fatiguelife' , 'fisk' , 'foldcauchy' , 'gamma' ,
241
242
'genexpon' , 'genextreme' , 'gennorm' , 'genpareto' ,
242
243
'gompertz' , 'halfgennorm' , 'invgamma' , 'invgauss' , 'invweibull' ,
243
244
'jf_skew_t' , 'johnsonsb' , 'johnsonsu' , 'kappa3' ,
244
245
'kstwobign' , 'loglaplace' , 'lognorm' , 'lomax' , 'mielke' ,
245
- 'nakagami' , ' nbinom' , 'norminvgauss' ,
246
+ 'nbinom' , 'norminvgauss' ,
246
247
'pareto' , 'pearson3' , 'powerlaw' , 'powernorm' ,
247
248
'randint' , 'rdist' , 'recipinvgauss' , 'rice' , 'skewnorm' ,
248
249
't' , 'uniform' , 'weibull_max' , 'weibull_min' , 'wrapcauchy' }
249
250
250
251
# Please keep this list in alphabetical order...
251
- xslow_basic_fit = {'beta' , ' betabinom' , 'betanbinom' , 'burr' , 'exponweib' ,
252
+ xslow_basic_fit = {'betabinom' , 'betanbinom' , 'burr' , 'exponweib' ,
252
253
'gausshyper' , 'gengamma' , 'genhalflogistic' ,
253
254
'genhyperbolic' , 'geninvgauss' ,
254
255
'hypergeom' , 'kappa4' , 'loguniform' ,
255
256
'ncf' , 'nchypergeom_fisher' , 'nchypergeom_wallenius' ,
256
257
'nct' , 'ncx2' , 'nhypergeom' ,
257
258
'powerlognorm' , 'reciprocal' , 'rel_breitwigner' ,
258
- 'skellam' , 'trapezoid' , 'triang' , 'truncnorm' ,
259
+ 'skellam' , 'trapezoid' , 'triang' ,
259
260
'tukeylambda' , 'vonmises' , 'zipfian' }
260
261
261
262
for dist in dict (distdiscrete + distcont ):
@@ -282,12 +283,12 @@ def cases_test_fit_mse():
282
283
'gausshyper' , 'genhyperbolic' , # integration warnings
283
284
'tukeylambda' , # close, but doesn't meet tolerance
284
285
'vonmises' , # can have negative CDF; doesn't play nice
285
- 'argus' , # doesn 't meet tolerance; tested separately
286
+ 'arcsine' , ' argus' , 'powerlaw' , # don 't meet tolerance
286
287
'poisson_binom' , # vector-valued shape parameter
287
288
}
288
289
289
290
# Please keep this list in alphabetical order...
290
- slow_basic_fit = {'alpha' , 'anglit' , 'arcsine' , ' betabinom' , 'bradford' ,
291
+ slow_basic_fit = {'alpha' , 'anglit' , 'betabinom' , 'bradford' ,
291
292
'chi' , 'chi2' , 'crystalball' , 'dweibull' ,
292
293
'erlang' , 'exponnorm' , 'exponpow' , 'exponweib' ,
293
294
'fatiguelife' , 'fisk' , 'foldcauchy' , 'foldnorm' ,
@@ -311,7 +312,7 @@ def cases_test_fit_mse():
311
312
'johnsonsb' , 'kappa4' , 'loguniform' , 'mielke' ,
312
313
'nakagami' , 'ncf' , 'nchypergeom_fisher' ,
313
314
'nchypergeom_wallenius' , 'nct' , 'ncx2' ,
314
- 'pearson3' , 'powerlaw' , ' powerlognorm' ,
315
+ 'pearson3' , 'powerlognorm' ,
315
316
'rdist' , 'reciprocal' , 'rel_breitwigner' , 'rice' ,
316
317
'trapezoid' , 'truncnorm' , 'truncweibull_min' ,
317
318
'vonmises_line' , 'zipfian' }
@@ -375,8 +376,8 @@ class TestFit:
375
376
rtol = 1e-2
376
377
tols = {'atol' : atol , 'rtol' : rtol }
377
378
378
- def opt (self , * args , ** kwds ):
379
- return differential_evolution (* args , rng = 1 , ** kwds )
379
+ def opt (self , * args , rng = 1 , ** kwds ):
380
+ return differential_evolution (* args , rng = rng , ** kwds )
380
381
381
382
def test_dist_iv (self ):
382
383
message = "`dist` must be an instance of..."
@@ -494,7 +495,7 @@ def test_guess_iv(self):
494
495
with pytest .warns (RuntimeWarning , match = message ):
495
496
stats .fit (self .dist , self .data , self .shape_bounds_d , guess = guess )
496
497
497
- def basic_fit_test (self , dist_name , method ):
498
+ def basic_fit_test (self , dist_name , method , rng = 1 ):
498
499
499
500
N = 5000
500
501
dist_data = dict (distcont + distdiscrete )
@@ -530,12 +531,13 @@ def basic_fit_test(self, dist_name, method):
530
531
531
532
@pytest .mark .parametrize ("dist_name" , cases_test_fit_mle ())
532
533
def test_basic_fit_mle (self , dist_name ):
533
- self .basic_fit_test (dist_name , "mle" )
534
+ self .basic_fit_test (dist_name , "mle" , rng = 5 )
534
535
535
536
@pytest .mark .parametrize ("dist_name" , cases_test_fit_mse ())
536
537
def test_basic_fit_mse (self , dist_name ):
537
- self .basic_fit_test (dist_name , "mse" )
538
+ self .basic_fit_test (dist_name , "mse" , rng = 2 )
538
539
540
+ @pytest .mark .slow
539
541
def test_arcsine (self ):
540
542
# Can't guarantee that all distributions will fit all data with
541
543
# arbitrary bounds. This distribution just happens to fail above.
@@ -546,8 +548,9 @@ def test_arcsine(self):
546
548
shapes = (1. , 2. )
547
549
data = dist .rvs (* shapes , size = N , random_state = rng )
548
550
shape_bounds = {'loc' : (0.1 , 10 ), 'scale' : (0.1 , 10 )}
549
- res = stats .fit (dist , data , shape_bounds , optimizer = self .opt )
550
- assert_nlff_less_or_close (dist , data , res .params , shapes , ** self .tols )
551
+ res = stats .fit (dist , data , shape_bounds , method = 'mse' , optimizer = self .opt )
552
+ assert_nlff_less_or_close (dist , data , res .params , shapes ,
553
+ nlff_name = '_penalized_nlpsf' , ** self .tols )
551
554
552
555
@pytest .mark .parametrize ("method" , ('mle' , 'mse' ))
553
556
def test_argus (self , method ):
@@ -561,8 +564,25 @@ def test_argus(self, method):
561
564
data = dist .rvs (* shapes , size = N , random_state = rng )
562
565
shape_bounds = {'chi' : (0.1 , 10 ), 'loc' : (0.1 , 10 ), 'scale' : (0.1 , 10 )}
563
566
res = stats .fit (dist , data , shape_bounds , optimizer = self .opt , method = method )
567
+ nlff_name = {'mle' : 'nnlf' , 'mse' : '_penalized_nlpsf' }[method ]
568
+ assert_nlff_less_or_close (dist , data , res .params , shapes , ** self .tols ,
569
+ nlff_name = nlff_name )
564
570
565
- assert_nlff_less_or_close (dist , data , res .params , shapes , ** self .tols )
571
+ @pytest .mark .xslow
572
+ def test_beta (self ):
573
+ # Can't guarantee that all distributions will fit all data with
574
+ # arbitrary bounds. This distribution just happens to fail above.
575
+ # Try something slightly different.
576
+ N = 1000
577
+ rng = np .random .default_rng (self .seed )
578
+ dist = stats .beta
579
+ shapes = (2.3098496451481823 , 0.62687954300963677 , 1. , 2. )
580
+ data = dist .rvs (* shapes , size = N , random_state = rng )
581
+ shape_bounds = {'a' : (0.1 , 10 ), 'b' :(0.1 , 10 ),
582
+ 'loc' : (0.1 , 10 ), 'scale' : (0.1 , 10 )}
583
+ res = stats .fit (dist , data , shape_bounds , method = 'mle' , optimizer = self .opt )
584
+ assert_nlff_less_or_close (dist , data , res .params , shapes ,
585
+ nlff_name = 'nnlf' , ** self .tols )
566
586
567
587
def test_foldnorm (self ):
568
588
# Can't guarantee that all distributions will fit all data with
@@ -578,6 +598,35 @@ def test_foldnorm(self):
578
598
579
599
assert_nlff_less_or_close (dist , data , res .params , shapes , ** self .tols )
580
600
601
+ def test_nakagami (self ):
602
+ # Can't guarantee that all distributions will fit all data with
603
+ # arbitrary bounds. This distribution just happens to fail above.
604
+ # Try something slightly different.
605
+ N = 1000
606
+ rng = np .random .default_rng (self .seed )
607
+ dist = stats .nakagami
608
+ shapes = (4.9673794866666237 , 1. , 2. )
609
+ data = dist .rvs (* shapes , size = N , random_state = rng )
610
+ shape_bounds = {'nu' :(0.1 , 10 ), 'loc' : (0.1 , 10 ), 'scale' : (0.1 , 10 )}
611
+ res = stats .fit (dist , data , shape_bounds , method = 'mle' , optimizer = self .opt )
612
+ assert_nlff_less_or_close (dist , data , res .params , shapes ,
613
+ nlff_name = 'nnlf' , ** self .tols )
614
+
615
+ @pytest .mark .slow
616
+ def test_powerlaw (self ):
617
+ # Can't guarantee that all distributions will fit all data with
618
+ # arbitrary bounds. This distribution just happens to fail above.
619
+ # Try something slightly different.
620
+ N = 1000
621
+ rng = np .random .default_rng (self .seed )
622
+ dist = stats .powerlaw
623
+ shapes = (1.6591133289905851 , 1. , 2. )
624
+ data = dist .rvs (* shapes , size = N , random_state = rng )
625
+ shape_bounds = {'a' : (0.1 , 10 ), 'loc' : (0.1 , 10 ), 'scale' : (0.1 , 10 )}
626
+ res = stats .fit (dist , data , shape_bounds , method = 'mse' , optimizer = self .opt )
627
+ assert_nlff_less_or_close (dist , data , res .params , shapes ,
628
+ nlff_name = '_penalized_nlpsf' , ** self .tols )
629
+
581
630
def test_truncpareto (self ):
582
631
# Can't guarantee that all distributions will fit all data with
583
632
# arbitrary bounds. This distribution just happens to fail above.
@@ -592,7 +641,7 @@ def test_truncpareto(self):
592
641
593
642
assert_nlff_less_or_close (dist , data , res .params , shapes , ** self .tols )
594
643
595
- @pytest .mark .fail_slow ( 5 )
644
+ @pytest .mark .slow
596
645
def test_truncweibull_min (self ):
597
646
# Can't guarantee that all distributions will fit all data with
598
647
# arbitrary bounds. This distribution just happens to fail above.
0 commit comments