@@ -419,8 +419,9 @@ def setup_class(cls):
419
419
def test_method(self):
420
420
pass
421
421
""" )
422
+ from _pytest .skipping import _is_unittest_unexpected_success_a_failure
423
+ should_fail = _is_unittest_unexpected_success_a_failure ()
422
424
result = testdir .runpytest ("-rxs" )
423
- assert result .ret == 0
424
425
result .stdout .fnmatch_lines_random ([
425
426
"*XFAIL*test_trial_todo*" ,
426
427
"*trialselfskip*" ,
@@ -429,8 +430,9 @@ def test_method(self):
429
430
"*i2wanto*" ,
430
431
"*sys.version_info*" ,
431
432
"*skip_in_method*" ,
432
- "*4 skipped*3 xfail*1 xpass*" ,
433
+ "*1 failed*4 skipped*3 xfailed*" if should_fail else "* 4 skipped*3 xfail*1 xpass*" ,
433
434
])
435
+ assert result .ret == (1 if should_fail else 0 )
434
436
435
437
def test_trial_error (self , testdir ):
436
438
testdir .makepyfile ("""
@@ -587,39 +589,62 @@ def test_hello(self, arg1):
587
589
assert "TypeError" in result .stdout .str ()
588
590
assert result .ret == 1
589
591
592
+
590
593
@pytest .mark .skipif ("sys.version_info < (2,7)" )
591
- def test_unittest_expected_failure_for_failing_test_is_xfail (testdir ):
592
- testdir .makepyfile ("""
594
+ @pytest .mark .parametrize ('runner' , ['pytest' , 'unittest' ])
595
+ def test_unittest_expected_failure_for_failing_test_is_xfail (testdir , runner ):
596
+ script = testdir .makepyfile ("""
593
597
import unittest
594
598
class MyTestCase(unittest.TestCase):
595
599
@unittest.expectedFailure
596
600
def test_failing_test_is_xfail(self):
597
601
assert False
602
+ if __name__ == '__main__':
603
+ unittest.main()
598
604
""" )
599
- result = testdir .runpytest ("-rxX" )
600
- result .stdout .fnmatch_lines ([
601
- "*XFAIL*MyTestCase*test_failing_test_is_xfail*" ,
602
- "*1 xfailed*" ,
603
- ])
605
+ if runner == 'pytest' :
606
+ result = testdir .runpytest ("-rxX" )
607
+ result .stdout .fnmatch_lines ([
608
+ "*XFAIL*MyTestCase*test_failing_test_is_xfail*" ,
609
+ "*1 xfailed*" ,
610
+ ])
611
+ else :
612
+ result = testdir .runpython (script )
613
+ result .stderr .fnmatch_lines ([
614
+ "*1 test in*" ,
615
+ "*OK*(expected failures=1)*" ,
616
+ ])
604
617
assert result .ret == 0
605
618
619
+
606
620
@pytest .mark .skipif ("sys.version_info < (2,7)" )
607
- def test_unittest_expected_failure_for_passing_test_is_fail (testdir ):
608
- testdir .makepyfile ("""
621
+ @pytest .mark .parametrize ('runner' , ['pytest' , 'unittest' ])
622
+ def test_unittest_expected_failure_for_passing_test_is_fail (testdir , runner ):
623
+ script = testdir .makepyfile ("""
609
624
import unittest
610
625
class MyTestCase(unittest.TestCase):
611
626
@unittest.expectedFailure
612
627
def test_passing_test_is_fail(self):
613
628
assert True
629
+ if __name__ == '__main__':
630
+ unittest.main()
614
631
""" )
615
- result = testdir .runpytest ("-rxX" )
616
- result .stdout .fnmatch_lines ([
617
- "*FAILURES*" ,
618
- "*MyTestCase*test_passing_test_is_fail*" ,
619
- "*Unexpected success*" ,
620
- "*1 failed*" ,
621
- ])
622
- assert result .ret == 1
632
+ from _pytest .skipping import _is_unittest_unexpected_success_a_failure
633
+ should_fail = _is_unittest_unexpected_success_a_failure ()
634
+ if runner == 'pytest' :
635
+ result = testdir .runpytest ("-rxX" )
636
+ result .stdout .fnmatch_lines ([
637
+ "*MyTestCase*test_passing_test_is_fail*" ,
638
+ "*1 failed*" if should_fail else "*1 xpassed*" ,
639
+ ])
640
+ else :
641
+ result = testdir .runpython (script )
642
+ result .stderr .fnmatch_lines ([
643
+ "*1 test in*" ,
644
+ "*(unexpected successes=1)*" ,
645
+ ])
646
+
647
+ assert result .ret == (1 if should_fail else 0 )
623
648
624
649
625
650
@pytest .mark .parametrize ('fix_type, stmt' , [
0 commit comments