@@ -632,15 +632,15 @@ def test_chained_transform():
632
632
633
633
634
634
def test_exp_transform_rv ():
635
- base_rv = at .random .normal (0 , 1 , size = 2 , name = "base_rv" )
635
+ base_rv = at .random .normal (0 , 1 , size = 3 , name = "base_rv" )
636
636
y_rv = at .exp (base_rv )
637
637
y_rv .name = "y"
638
638
639
639
y_vv = y_rv .clone ()
640
640
logp = joint_logprob ({y_rv : y_vv }, sum = False )
641
641
logp_fn = pytensor .function ([y_vv ], logp )
642
642
643
- y_val = [0.1 , 0.3 ]
643
+ y_val = [- 2.0 , 0.1 , 0.3 ]
644
644
np .testing .assert_allclose (
645
645
logp_fn (y_val ),
646
646
sp .stats .lognorm (s = 1 ).logpdf (y_val ),
@@ -794,28 +794,28 @@ def test_invalid_broadcasted_transform_rv_fails():
794
794
def test_reciprocal_rv_transform (numerator ):
795
795
shape = 3
796
796
scale = 5
797
- x_rv = numerator / at .random .gamma (shape , scale )
797
+ x_rv = numerator / at .random .gamma (shape , scale , size = ( 2 ,) )
798
798
x_rv .name = "x"
799
799
800
800
x_vv = x_rv .clone ()
801
- x_logp_fn = pytensor .function ([x_vv ], joint_logprob ({x_rv : x_vv }))
801
+ x_logp_fn = pytensor .function ([x_vv ], joint_logprob ({x_rv : x_vv }, sum = False ))
802
802
803
- x_test_val = 1.5
804
- assert np .isclose (
803
+ x_test_val = np . r_ [ - 0.5 , 1.5 ]
804
+ assert np .allclose (
805
805
x_logp_fn (x_test_val ),
806
806
sp .stats .invgamma (shape , scale = scale * numerator ).logpdf (x_test_val ),
807
807
)
808
808
809
809
810
810
def test_sqr_transform ():
811
811
# The square of a unit normal is a chi-square with 1 df
812
- x_rv = at .random .normal (0 , 1 , size = (3 ,)) ** 2
812
+ x_rv = at .random .normal (0 , 1 , size = (4 ,)) ** 2
813
813
x_rv .name = "x"
814
814
815
815
x_vv = x_rv .clone ()
816
816
x_logp_fn = pytensor .function ([x_vv ], joint_logprob ({x_rv : x_vv }, sum = False ))
817
817
818
- x_test_val = np .r_ [0.5 , 1 , 2.5 ]
818
+ x_test_val = np .r_ [- 0.5 , 0.5 , 1 , 2.5 ]
819
819
assert np .allclose (
820
820
x_logp_fn (x_test_val ),
821
821
sp .stats .chi2 (df = 1 ).logpdf (x_test_val ),
@@ -824,19 +824,58 @@ def test_sqr_transform():
824
824
825
825
def test_sqrt_transform ():
826
826
# The sqrt of a chisquare with n df is a chi distribution with n df
827
- x_rv = at .sqrt (at .random .chisquare (df = 3 , size = (3 ,)))
827
+ x_rv = at .sqrt (at .random .chisquare (df = 3 , size = (4 ,)))
828
828
x_rv .name = "x"
829
829
830
830
x_vv = x_rv .clone ()
831
831
x_logp_fn = pytensor .function ([x_vv ], joint_logprob ({x_rv : x_vv }, sum = False ))
832
832
833
- x_test_val = np .r_ [0.5 , 1 , 2.5 ]
833
+ x_test_val = np .r_ [- 2.5 , 0.5 , 1 , 2.5 ]
834
834
assert np .allclose (
835
835
x_logp_fn (x_test_val ),
836
836
sp .stats .chi (df = 3 ).logpdf (x_test_val ),
837
837
)
838
838
839
839
840
+ @pytest .mark .parametrize ("power" , (- 3 , - 1 , 1 , 5 , 7 ))
841
+ def test_negative_value_odd_power_transform (power ):
842
+ # check that negative values and odd powers evaluate to a finite logp
843
+ x_rv = at .random .normal () ** power
844
+ x_rv .name = "x"
845
+
846
+ x_vv = x_rv .clone ()
847
+ x_logp_fn = pytensor .function ([x_vv ], joint_logprob ({x_rv : x_vv }, sum = False ))
848
+
849
+ assert np .isfinite (x_logp_fn (1 ))
850
+ assert np .isfinite (x_logp_fn (- 1 ))
851
+
852
+
853
+ @pytest .mark .parametrize ("power" , (- 2 , 2 , 4 , 6 , 8 ))
854
+ def test_negative_value_even_power_transform (power ):
855
+ # check that negative values and odd powers evaluate to -inf logp
856
+ x_rv = at .random .normal () ** power
857
+ x_rv .name = "x"
858
+
859
+ x_vv = x_rv .clone ()
860
+ x_logp_fn = pytensor .function ([x_vv ], joint_logprob ({x_rv : x_vv }, sum = False ))
861
+
862
+ assert np .isfinite (x_logp_fn (1 ))
863
+ assert np .isneginf (x_logp_fn (- 1 ))
864
+
865
+
866
+ @pytest .mark .parametrize ("power" , (- 1 / 3 , - 1 / 2 , 1 / 2 , 1 / 3 ))
867
+ def test_negative_value_frac_power_transform (power ):
868
+ # check that negative values and fractional powers evaluate to -inf logp
869
+ x_rv = at .random .normal () ** power
870
+ x_rv .name = "x"
871
+
872
+ x_vv = x_rv .clone ()
873
+ x_logp_fn = pytensor .function ([x_vv ], joint_logprob ({x_rv : x_vv }, sum = False ))
874
+
875
+ assert np .isfinite (x_logp_fn (2.5 ))
876
+ assert np .isneginf (x_logp_fn (- 2.5 ))
877
+
878
+
840
879
def test_negated_rv_transform ():
841
880
x_rv = - at .random .halfnormal ()
842
881
x_rv .name = "x"
0 commit comments