diff --git a/doc/src/configuration.rst b/doc/src/configuration.rst index 151a1ec..cb30c8a 100644 --- a/doc/src/configuration.rst +++ b/doc/src/configuration.rst @@ -36,6 +36,12 @@ automark_dependency .. versionadded:: 0.3 +accept_xfail + This is a flag. If set to `True`, will treat xfailing dependencies as + successful. Defaults to `False`. + + .. versionadded:: 0.6 + Command line options -------------------- diff --git a/src/pytest_dependency.py b/src/pytest_dependency.py index 43224ee..e03ca64 100644 --- a/src/pytest_dependency.py +++ b/src/pytest_dependency.py @@ -7,6 +7,7 @@ logger = logging.getLogger(__name__) +_accept_xfail = False _automark = False _ignore_unknown = False @@ -24,8 +25,12 @@ def __str__(self): l = ["%s: %s" % (w, self.results[w]) for w in self.Phases] return "Status(%s)" % ", ".join(l) + def _accept_xfail(self, rep): + '''Take xfail and accept_xfail into account.''' + return _accept_xfail and (rep.when == 'call') and (rep.outcome == 'skipped') and (hasattr(rep, 'wasxfail')) + def addResult(self, rep): - self.results[rep.when] = rep.outcome + self.results[rep.when] = 'passed' if self._accept_xfail(rep) else rep.outcome def isSuccess(self): return list(self.results.values()) == ['passed', 'passed', 'passed'] @@ -129,13 +134,17 @@ def pytest_addoption(parser): parser.addini("automark_dependency", "Add the dependency marker to all tests automatically", type="bool", default=False) + parser.addini("accept_xfail", + "Consider xfailing dependencies as succesful dependencies.", + type="bool", default=False) parser.addoption("--ignore-unknown-dependency", action="store_true", default=False, help="ignore dependencies whose outcome is not known") def pytest_configure(config): - global _automark, _ignore_unknown + global _accept_xfail, _automark, _ignore_unknown + _accept_xfail = config.getini("accept_xfail") _automark = config.getini("automark_dependency") _ignore_unknown = config.getoption("--ignore-unknown-dependency") config.addinivalue_line("markers", diff --git a/tests/test_10_accept_xfail.py b/tests/test_10_accept_xfail.py new file mode 100644 index 0000000..fb01ae7 --- /dev/null +++ b/tests/test_10_accept_xfail.py @@ -0,0 +1,101 @@ +"""Test the accept_xfail option. +""" + +import pytest + + +def test_accept_xfail_not_set(ctestdir): + """No pytest.ini file, therefore accept_xfail is not set. + + Since accept_xfail defaults to False and test_a is marked as xfail, + the xfailed outcome of test_a will be considered as skipped. As a result, + test_b will be skipped since its dependency was not successful. + """ + ctestdir.makepyfile(""" + import pytest + + @pytest.mark.dependency() + @pytest.mark.xfail() + def test_a(): + assert False + + @pytest.mark.dependency(depends=["test_a"]) + def test_b(): + pass + """) + result = ctestdir.runpytest("--verbose", "-rs") + result.assert_outcomes(xfailed=1, skipped=1) + result.stdout.re_match_lines(r""" + .*::test_a XFAIL + .*::test_b SKIPPED(?:\s+\(.*\))? + """) + + +@pytest.mark.parametrize( + "false_value", ["0", "no", "n", "False", "false", "f", "off"] +) +def test_accept_xfail_set_false(ctestdir, false_value): + """A pytest.ini is present, accept_xfail is set to False. + + Since accept_xfail is set to False and test_a is marked as xfail, + the xfailed outcome of test_a will be considered as skipped. As a result, + test_b will be skipped since its dependency was not successful. + """ + ctestdir.makefile('.ini', pytest=""" + [pytest] + accept_xfail = %s + console_output_style = classic + """ % false_value) + ctestdir.makepyfile(""" + import pytest + + @pytest.mark.dependency() + @pytest.mark.xfail() + def test_a(): + assert False + + @pytest.mark.dependency(depends=["test_a"]) + def test_b(): + pass + """) + result = ctestdir.runpytest("--verbose", "-rs") + result.assert_outcomes(xfailed=1, skipped=1) + result.stdout.re_match_lines(r""" + .*::test_a XFAIL + .*::test_b SKIPPED(?:\s+\(.*\))? + """) + + +@pytest.mark.parametrize( + "true_value", ["1", "yes", "y", "True", "true", "t", "on"] +) +def test_accept_xfail_set_true(ctestdir, true_value): + """A pytest.ini is present, accept_xfail is set to True. + + Since accept_xfail is set to True and test_a is marked as xfail, + the xfailed outcome of test_a will be considered as passing. As a result, + test_b will be executed since its dependency was successful. + """ + ctestdir.makefile('.ini', pytest=""" + [pytest] + accept_xfail = %s + console_output_style = classic + """ % true_value) + ctestdir.makepyfile(""" + import pytest + + @pytest.mark.dependency() + @pytest.mark.xfail() + def test_a(): + assert False + + @pytest.mark.dependency(depends=["test_a"]) + def test_b(): + pass + """) + result = ctestdir.runpytest("--verbose", "-rs") + result.assert_outcomes(xfailed=1, passed=1, skipped=0) + result.stdout.re_match_lines(r""" + .*::test_a XFAIL + .*::test_b PASSED + """)