Skip to content

Commit 8e61910

Browse files
committed
chaning marker name llm -> qualitative
1 parent 89b4834 commit 8e61910

File tree

2 files changed

+3
-25
lines changed

2 files changed

+3
-25
lines changed

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -164,5 +164,5 @@ python_version = "3.10"
164164

165165
[tool.pytest.ini_options]
166166
markers = [
167-
"llm: Marks the test as needing an exact output from an LLM (deselect with '-m \" not llm\"'); this depends on the session.backend.model_id"
167+
"qualitative: Marks the test as needing an exact output from an LLM; set by an ENV variable for GITHUB_ACTIONS. All tests marked with this will xfail in CI/CD"
168168
]

test/conftest.py

Lines changed: 2 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -15,34 +15,12 @@ def gh_run() -> int:
1515

1616
def pytest_runtest_setup(item):
1717
# Runs tests *not* marked with `@pytest.mark.llm` to run normally.
18-
if not item.get_closest_marker("llm"):
18+
if not item.get_closest_marker("qualitative"):
1919
return
2020

2121
gh_run = int(os.environ.get("GITHUB_ACTION", 0))
2222

2323
if gh_run == 1:
2424
pytest.xfail(
25-
reason="Skipping LLM test: got env variable GITHUB_ACTION == 1. Used only in gh workflows."
25+
reason="Skipping qualitative test: got env variable GITHUB_ACTION == 1. Used only in gh workflows."
2626
)
27-
28-
# # Check if there is a session fixture.
29-
# try:
30-
# session: MelleaSession = item._request.getfixturevalue("m_session")
31-
# except Exception:
32-
# # Skip test cause all llm marked tests need a session fixture.
33-
# pytest.skip("`llm` marked tests requires a `m_session` fixture.")
34-
# # Get the Ollama name.
35-
# if isinstance(session.backend, OllamaModelBackend) or isinstance(session.backend, OpenAIBackend):
36-
# model_id = session.backend.model_id.ollama_name
37-
# # Skip tests of the model name is llama 1b
38-
# if model_id == "llama3.2:1b":
39-
# pytest.skip(
40-
# "Skipping LLM test: got model_id == llama3.2:1b in ollama. Used only in gh workflows."
41-
# )
42-
# elif isinstance(session.backend, LocalHFBackend):
43-
# model_id = session.backend.model_id.hf_model_name
44-
# # Skip tests of the model name is llama 1b
45-
# if model_id == "unsloth/Llama-3.2-1B":
46-
# pytest.skip(
47-
# "Skipping LLM test: got model_id == unsloth/Llama-3.2-1B in hf. Used only in gh workflows."
48-
# )

0 commit comments

Comments
 (0)