Skip to content

add setting to accept xfail test dependencies #67

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: develop
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions doc/src/configuration.rst
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,12 @@ automark_dependency

.. versionadded:: 0.3

accept_xfail
This is a flag. If set to `True`, will treat xfailing dependencies as
successful. Defaults to `False`.

.. versionadded:: 0.6

Command line options
--------------------

Expand Down
13 changes: 11 additions & 2 deletions src/pytest_dependency.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@

logger = logging.getLogger(__name__)

_accept_xfail = False
_automark = False
_ignore_unknown = False

Expand All @@ -24,8 +25,12 @@ def __str__(self):
l = ["%s: %s" % (w, self.results[w]) for w in self.Phases]
return "Status(%s)" % ", ".join(l)

def _accept_xfail(self, rep):
'''Take xfail and accept_xfail into account.'''
return _accept_xfail and (rep.when == 'call') and (rep.outcome == 'skipped') and (hasattr(rep, 'wasxfail'))

def addResult(self, rep):
self.results[rep.when] = rep.outcome
self.results[rep.when] = 'passed' if self._accept_xfail(rep) else rep.outcome

def isSuccess(self):
return list(self.results.values()) == ['passed', 'passed', 'passed']
Expand Down Expand Up @@ -129,13 +134,17 @@ def pytest_addoption(parser):
parser.addini("automark_dependency",
"Add the dependency marker to all tests automatically",
type="bool", default=False)
parser.addini("accept_xfail",
"Consider xfailing dependencies as succesful dependencies.",
type="bool", default=False)
parser.addoption("--ignore-unknown-dependency",
action="store_true", default=False,
help="ignore dependencies whose outcome is not known")


def pytest_configure(config):
global _automark, _ignore_unknown
global _accept_xfail, _automark, _ignore_unknown
_accept_xfail = config.getini("accept_xfail")
_automark = config.getini("automark_dependency")
_ignore_unknown = config.getoption("--ignore-unknown-dependency")
config.addinivalue_line("markers",
Expand Down
101 changes: 101 additions & 0 deletions tests/test_10_accept_xfail.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,101 @@
"""Test the accept_xfail option.
"""

import pytest


def test_accept_xfail_not_set(ctestdir):
"""No pytest.ini file, therefore accept_xfail is not set.

Since accept_xfail defaults to False and test_a is marked as xfail,
the xfailed outcome of test_a will be considered as skipped. As a result,
test_b will be skipped since its dependency was not successful.
"""
ctestdir.makepyfile("""
import pytest

@pytest.mark.dependency()
@pytest.mark.xfail()
def test_a():
assert False

@pytest.mark.dependency(depends=["test_a"])
def test_b():
pass
""")
result = ctestdir.runpytest("--verbose", "-rs")
result.assert_outcomes(xfailed=1, skipped=1)
result.stdout.re_match_lines(r"""
.*::test_a XFAIL
.*::test_b SKIPPED(?:\s+\(.*\))?
""")


@pytest.mark.parametrize(
"false_value", ["0", "no", "n", "False", "false", "f", "off"]
)
def test_accept_xfail_set_false(ctestdir, false_value):
"""A pytest.ini is present, accept_xfail is set to False.

Since accept_xfail is set to False and test_a is marked as xfail,
the xfailed outcome of test_a will be considered as skipped. As a result,
test_b will be skipped since its dependency was not successful.
"""
ctestdir.makefile('.ini', pytest="""
[pytest]
accept_xfail = %s
console_output_style = classic
""" % false_value)
ctestdir.makepyfile("""
import pytest

@pytest.mark.dependency()
@pytest.mark.xfail()
def test_a():
assert False

@pytest.mark.dependency(depends=["test_a"])
def test_b():
pass
""")
result = ctestdir.runpytest("--verbose", "-rs")
result.assert_outcomes(xfailed=1, skipped=1)
result.stdout.re_match_lines(r"""
.*::test_a XFAIL
.*::test_b SKIPPED(?:\s+\(.*\))?
""")


@pytest.mark.parametrize(
"true_value", ["1", "yes", "y", "True", "true", "t", "on"]
)
def test_accept_xfail_set_true(ctestdir, true_value):
"""A pytest.ini is present, accept_xfail is set to True.

Since accept_xfail is set to True and test_a is marked as xfail,
the xfailed outcome of test_a will be considered as passing. As a result,
test_b will be executed since its dependency was successful.
"""
ctestdir.makefile('.ini', pytest="""
[pytest]
accept_xfail = %s
console_output_style = classic
""" % true_value)
ctestdir.makepyfile("""
import pytest

@pytest.mark.dependency()
@pytest.mark.xfail()
def test_a():
assert False

@pytest.mark.dependency(depends=["test_a"])
def test_b():
pass
""")
result = ctestdir.runpytest("--verbose", "-rs")
result.assert_outcomes(xfailed=1, passed=1, skipped=0)
result.stdout.re_match_lines(r"""
.*::test_a XFAIL
.*::test_b PASSED
""")