diff --git a/.github/workflows/quality.yml b/.github/workflows/quality.yml index c1f5f0c1..75777260 100644 --- a/.github/workflows/quality.yml +++ b/.github/workflows/quality.yml @@ -49,7 +49,7 @@ jobs: run: ollama pull llama3.2:1b - name: Run Tests - run: uv run -m pytest -v test -n auto --dist loadscope + run: uv run -m pytest -v test - name: Send failure message tests if: failure() # This step will only run if a previous step failed run: echo "Tests failed. Please verify that tests are working locally." diff --git a/docs/examples/context/contexts_with_sampling.py b/docs/examples/context/contexts_with_sampling.py new file mode 100644 index 00000000..d760ca2a --- /dev/null +++ b/docs/examples/context/contexts_with_sampling.py @@ -0,0 +1,51 @@ +from mellea.backends.types import ModelOption +from mellea.stdlib.sampling.base import RejectionSamplingStrategy +from mellea.stdlib.session import start_session + +# You can retrieve context information when using SamplingStrategies +# and validation. + +m = start_session() + +# We want the full SamplingResult. +res = m.instruct( + "Write a sentence.", + requirements=["be funny", "be formal", "start the sentence with the letter w"], + strategy=RejectionSamplingStrategy(loop_budget=3), + return_sampling_results=True, +) + +print() +print("Printing result of `Writing a sentence`.") +print(f"Result: {res.success}") +print(f"Result Output: {res.result}") +print() + +# We can also look at the context for the chosen result and +# any other results that weren't chosen. +# (This prompt tends to take 2 attempts. If it only takes one, try re-running it.) +print(f"Total Generation Attempts: {len(res.sample_generations)}") +print() + +print(f"Getting index of another result.") +index = 0 # Just choose the first one. + +print( + "If the below is the same output, try re-running this program to get multiple attempts." +) +print(f"Different attempted output: {res.sample_generations[index]}") +print() + +# We can see the context that created this output. +gen_ctx = res.sample_contexts[index] +print(f"Previous step in generating this result was: {gen_ctx.previous_node.node_data}") +print() + +# We can also see what the validation context looked like. +req, val_result = res.sample_validations[index][0] +print( + f"Getting context when evaluating the above output against Req({req.description})." +) +val_ctx = val_result.context + +print(f"Output of the validation for this requirement: {val_ctx.node_data}") diff --git a/docs/examples/safety.py/guardian.py b/docs/examples/safety/guardian.py similarity index 100% rename from docs/examples/safety.py/guardian.py rename to docs/examples/safety/guardian.py diff --git a/docs/examples/safety.py/guardian_huggingface.py b/docs/examples/safety/guardian_huggingface.py similarity index 100% rename from docs/examples/safety.py/guardian_huggingface.py rename to docs/examples/safety/guardian_huggingface.py diff --git a/docs/examples/safety.py/repair_with_guardian.py b/docs/examples/safety/repair_with_guardian.py similarity index 100% rename from docs/examples/safety.py/repair_with_guardian.py rename to docs/examples/safety/repair_with_guardian.py diff --git a/mellea/backends/huggingface.py b/mellea/backends/huggingface.py index c5d9b0db..99058e7d 100644 --- a/mellea/backends/huggingface.py +++ b/mellea/backends/huggingface.py @@ -206,7 +206,7 @@ def generate_from_context( mot = self._generate_from_context_alora( action, ctx, _format=format, model_options=model_opts ) - return mot, ctx.add(mot) + return mot, ctx.add(action).add(mot) else: mot = self._generate_from_context_standard( action, @@ -512,6 +512,14 @@ def generate_from_raw( "The raw endpoint does not support tool calling at the moment." ) + if self._model.device.type == "mps": + # TODO: Remove this when we are able to update the torch package. + # Test this by ensuring all outputs from this call are populated when running on mps. + # https://github.com/pytorch/pytorch/pull/157727 + FancyLogger.get_logger().warning( + "utilizing device mps with a `generate_from_raw` request; you may see issues when submitting batches of prompts to a huggingface backend; ensure all ModelOutputThunks have non-empty values." + ) + model_opts = self._simplify_and_merge(model_options) seed = model_opts.get(ModelOption.SEED, None) if seed is not None: diff --git a/mellea/backends/litellm.py b/mellea/backends/litellm.py index 9af431b0..54b8f3c2 100644 --- a/mellea/backends/litellm.py +++ b/mellea/backends/litellm.py @@ -270,8 +270,9 @@ def _generate_from_chat_context_standard( [OpenAIBackend.message_to_openai_message(m) for m in messages] ) + extra_params: dict[str, Any] = {} if _format is not None: - response_format = { + extra_params["response_format"] = { "type": "json_schema", "json_schema": { "name": _format.__name__, @@ -279,8 +280,6 @@ def _generate_from_chat_context_standard( "strict": True, }, } - else: - response_format = {"type": "text"} thinking = model_opts.get(ModelOption.THINKING, None) if type(thinking) is bool and thinking: @@ -304,9 +303,9 @@ def _generate_from_chat_context_standard( model=self._model_id, messages=conversation, tools=formatted_tools, - response_format=response_format, reasoning_effort=thinking, # type: ignore drop_params=True, # See note in `_make_backend_specific_and_remove`. + **extra_params, **model_specific_options, ) diff --git a/mellea/backends/openai.py b/mellea/backends/openai.py index e147dfc4..566fa7a2 100644 --- a/mellea/backends/openai.py +++ b/mellea/backends/openai.py @@ -465,8 +465,9 @@ def _generate_from_chat_context_standard( conversation.append({"role": "system", "content": system_prompt}) conversation.extend([self.message_to_openai_message(m) for m in messages]) + extra_params: dict[str, Any] = {} if _format is not None: - response_format = { + extra_params["response_format"] = { "type": "json_schema", "json_schema": { "name": _format.__name__, @@ -474,8 +475,6 @@ def _generate_from_chat_context_standard( "strict": True, }, } - else: - response_format = {"type": "text"} # Append tool call information if applicable. tools: dict[str, Callable] = dict() @@ -507,9 +506,9 @@ def _generate_from_chat_context_standard( model=self._hf_model_id, messages=conversation, # type: ignore reasoning_effort=thinking, # type: ignore - response_format=response_format, # type: ignore tools=formatted_tools if use_tools else None, # type: ignore # parallel_tool_calls=False, # We only support calling one tool per turn. But we do the choosing on our side so we leave this False. + **extra_params, **self._make_backend_specific_and_remove( model_opts, is_chat_context=ctx.is_chat_context ), diff --git a/mellea/backends/vllm.py b/mellea/backends/vllm.py index 51dcde74..db335c93 100644 --- a/mellea/backends/vllm.py +++ b/mellea/backends/vllm.py @@ -35,7 +35,8 @@ convert_tools_to_json, ) from mellea.backends.types import ModelOption -from mellea.helpers.async_helpers import send_to_queue +from mellea.helpers.async_helpers import get_current_event_loop, send_to_queue +from mellea.helpers.event_loop_helper import _run_async_in_thread from mellea.helpers.fancy_logger import FancyLogger from mellea.stdlib.base import ( CBlock, @@ -80,6 +81,14 @@ def __init__( formatter (Formatter): A mechanism for turning `stdlib` stuff into strings. Experimental Span-based models should use `mellea.backends.span.*` backends. model_options (Optional[dict]): Default model options. """ + if os.environ.get("VLLM_USE_V1", -1) != "0": + FancyLogger.get_logger().error( + "Mellea LocalVLLMBackend doesn't support VLLM V1. Must `export VLLM_USE_V1=0`." + ) + raise ValueError( + "Mellea LocalVLLMBackend doesn't support VLLM V1. Must `export VLLM_USE_V1=0`." + ) + formatter = ( formatter if formatter is not None else TemplateFormatter(model_id=model_id) ) @@ -140,7 +149,7 @@ def __init__( while True: retry += 1 try: - self._model = vllm.AsyncLLMEngine.from_engine_args( + self._underlying_model = vllm.AsyncLLMEngine.from_engine_args( vllm.AsyncEngineArgs(model=self._hf_model_id, **engine_args) ) break @@ -192,6 +201,9 @@ def __init__( f"max_num_seqs: {engine_args['max_num_seqs']}\n" ) + # Keep track of the event loop the engine was instantiated in. + self._event_loop = get_current_event_loop() + self._tokenizer: PreTrainedTokenizerBase = AutoTokenizer.from_pretrained( self._hf_model_id ) # type:ignore @@ -205,6 +217,24 @@ def __init__( "outlines.models.vllm" ).adapt_tokenizer(self._tokenizer) + @property + def _model(self) -> vllm.AsyncLLMEngine: + """Use model when making generation requests.""" + el = get_current_event_loop() + + # vLLM attaches itself to the event loop that is running when instantiated / + # the first generate request is made. Thankfully, they provide helpers to + # reset that. We do that here if the event loop changes. + + # Most of the time, this should be a no-op. The event loop will only change + # if switching between async and sync calls. + if el != self._event_loop: + self._underlying_model.shutdown_background_loop() + self._underlying_model.start_background_loop() + self._event_loop = el + + return self._underlying_model + def generate_from_context( self, action: Component | CBlock, @@ -447,7 +477,8 @@ async def generate_all(prompts): tasks = [generate(p, f"{id(prompts)}-{i}") for i, p in enumerate(prompts)] return await asyncio.gather(*tasks) - decoded_results = asyncio.run(generate_all(prompts)) + # Allow calling this from async functions. + decoded_results = _run_async_in_thread(generate_all(prompts)) results = [ModelOutputThunk(value=text) for text in decoded_results] diff --git a/mellea/stdlib/requirement.py b/mellea/stdlib/requirement.py index f10a3aaf..97ee2bec 100644 --- a/mellea/stdlib/requirement.py +++ b/mellea/stdlib/requirement.py @@ -47,6 +47,7 @@ def __init__( reason: str | None = None, score: float | None = None, thunk: ModelOutputThunk | None = None, + context: Context | None = None, ): """The result of a requirement's validation. @@ -57,11 +58,13 @@ def __init__( reason: a reason for the result score: if your validator gives you a score back, you can add this as metadata thunk: if your validator utilizes a backend to generate a response, the ModelOutputThunk returned from that request + context: if your validator utilizes a backend to generate a response, the context associated with that response """ self._result = result self._reason = reason self._score = score self._thunk = thunk + self._context = context @property def reason(self) -> str | None: @@ -78,6 +81,11 @@ def thunk(self) -> ModelOutputThunk | None: """The ModelOutputThunk associated with the validation func if an llm was used to generate the final result.""" return self._thunk + @property + def context(self) -> Context | None: + """The context associated with validation if a backend was used to generate the final result.""" + return self._context + def as_bool(self) -> bool: """Return a boolean value based on the result.""" return self._result @@ -140,7 +148,7 @@ async def validate( # and its template gets populated with the output correctly. req_copy = copy(self) req_copy._output = last_output.value - llm_as_a_judge_result, _ = backend.generate_from_context( + llm_as_a_judge_result, val_ctx = backend.generate_from_context( req_copy, ctx, format=format, model_options=model_options ) await llm_as_a_judge_result.avalue() @@ -149,6 +157,7 @@ async def validate( result=self.output_to_bool(llm_as_a_judge_result), reason=llm_as_a_judge_result.value, thunk=llm_as_a_judge_result, + context=val_ctx, ) def parts(self): @@ -252,7 +261,7 @@ async def validate( # and its template gets populated with the output correctly. req_copy = copy(self) req_copy._output = last_output.value - llm_as_a_judge_result, _ = backend.generate_from_context( + llm_as_a_judge_result, val_ctx = backend.generate_from_context( req_copy, ctx, format=format, model_options=model_options ) await llm_as_a_judge_result.avalue() @@ -263,6 +272,7 @@ async def validate( reason=llm_as_a_judge_result.value, score=1 if result else 0, thunk=llm_as_a_judge_result, + context=val_ctx, ) diff --git a/mellea/stdlib/safety/guardian.py b/mellea/stdlib/safety/guardian.py index e87464d2..b307723f 100644 --- a/mellea/stdlib/safety/guardian.py +++ b/mellea/stdlib/safety/guardian.py @@ -315,7 +315,7 @@ async def validate( # Use a CBlock for HuggingFace - it won't be added as a message action = CBlock("") # type: ignore - mot, _ = self._backend.generate_from_context( + mot, val_ctx = self._backend.generate_from_context( action, gctx, model_options=guardian_options ) await mot.avalue() @@ -337,5 +337,5 @@ async def validate( reason_parts.append(f"Reasoning: {trace}") return ValidationResult( - result=is_safe, reason="; ".join(reason_parts), thunk=mot + result=is_safe, reason="; ".join(reason_parts), thunk=mot, context=val_ctx ) diff --git a/pyproject.toml b/pyproject.toml index b60f2a6a..d06105df 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -82,7 +82,7 @@ vllm = [ "numpy<2.0.0", # patching incorrect dependencies in vllm and outlines. # see https://github.com/vllm-project/vllm/issues/5587 "outlines-core==0.1.26", - "vllm", # intentionally un-versioned, expecting a minor update. coutlines-core version should be enough to specify it + "vllm>=0.9.1", ] litellm = [ @@ -112,7 +112,6 @@ dev = [ "pytest-asyncio", "mypy>=1.17.0", "python-semantic-release~=7.32", - "pytest-xdist>=3.8.0", ] notebook = [ diff --git a/test/backends/test_huggingface.py b/test/backends/test_huggingface.py index c8cd3a74..8d50dc9e 100644 --- a/test/backends/test_huggingface.py +++ b/test/backends/test_huggingface.py @@ -9,7 +9,7 @@ from mellea.backends.formatter import TemplateFormatter from mellea.backends.huggingface import LocalHFBackend from mellea.backends.types import ModelOption -from mellea.stdlib.base import CBlock, ChatContext, SimpleContext +from mellea.stdlib.base import CBlock, ChatContext, Context, ModelOutputThunk, SimpleContext from mellea.stdlib.requirement import ( ALoraRequirement, LLMaJRequirement, @@ -117,6 +117,14 @@ def test_constraint_lora_override_does_not_override_alora(session, backend): val_result = validation_outputs[0] assert isinstance(val_result, ValidationResult) assert str(val_result.reason) in ["Y", "N"] + + # Ensure the ValidationResult has its thunk and context set. Ensure the context has + # the correct actions / results in it. + assert isinstance(val_result.context, Context) + assert isinstance(val_result.thunk, ModelOutputThunk) + assert isinstance(val_result.context.previous_node.node_data, ALoraRequirement) + assert val_result.context.node_data is val_result.thunk + backend.default_to_constraint_checking_alora = True @@ -150,7 +158,6 @@ def test_multiturn(session): "Take the result of the previous sum and find the corresponding letter in the greek alphabet.", model_options={ModelOption.MAX_NEW_TOKENS: 300}, ) - assert "β" in str(beta).lower() words = session.instruct("Now list five English words that start with that letter.") print(words) @@ -193,7 +200,6 @@ class Email(pydantic.BaseModel): "The email address should be at example.com" ) - @pytest.mark.qualitative def test_generate_from_raw(session): prompts = [ @@ -210,7 +216,6 @@ def test_generate_from_raw(session): assert len(results) == len(prompts) - @pytest.mark.qualitative def test_generate_from_raw_with_format(session): prompts = ["what is 1+1?", "what is 2+2?", "what is 3+3?", "what is 4+4?"] diff --git a/test/backends/test_openai_vllm/test_openai_vllm.py b/test/backends/test_openai_vllm/test_openai_vllm.py index 30dff26a..c6386d53 100644 --- a/test/backends/test_openai_vllm/test_openai_vllm.py +++ b/test/backends/test_openai_vllm/test_openai_vllm.py @@ -1,4 +1,5 @@ # test/rits_backend_tests/test_openai_integration.py +from contextvars import Context from mellea import MelleaSession from mellea.stdlib.base import CBlock, ModelOutputThunk, ChatContext from mellea.backends.openai import OpenAIBackend @@ -215,6 +216,14 @@ def test_constraint_lora_override_does_not_override_alora(self): assert len(validation_outputs) == 1 non_alora_output = validation_outputs[0] assert str(non_alora_output.reason) in ["Y", "N"] + + # Ensure the ValidationResult has its thunk and context set. Ensure the context has + # the correct actions / results in it. + assert isinstance(non_alora_output.context, Context) + assert isinstance(non_alora_output.thunk, ModelOutputThunk) + assert isinstance(non_alora_output.context.previous_node.node_data, ALoraRequirement) + assert non_alora_output.context.node_data is non_alora_output.thunk + self.backend.default_to_constraint_checking_alora = True self.m.reset() @@ -245,7 +254,6 @@ def test_multiturn(self): beta = self.m.instruct( "Let n be the result of the previous sum. Find the n-th letter in the greek alphabet." ) - assert "β" in str(beta).lower() words = self.m.instruct( "Now list five English words that start with that letter." ) diff --git a/test/backends/test_vllm.py b/test/backends/test_vllm.py index 0722fdc0..c3b97e67 100644 --- a/test/backends/test_vllm.py +++ b/test/backends/test_vllm.py @@ -20,6 +20,9 @@ @pytest.fixture(scope="module") def backend(): """Shared vllm backend for all tests in this module.""" + if os.environ.get("VLLM_USE_V1", -1) != "0": + pytest.skip("skipping vllm tests; tests require `export VLLM_USE_V1=0`") + backend = LocalVLLMBackend( model_id=model_ids.QWEN3_0_6B, # formatter=TemplateFormatter(model_id="ibm-granite/granite-4.0-tiny-preview"), @@ -41,10 +44,6 @@ def session(backend): session.reset() -@pytest.mark.qualitative -def test_v0_api(session): - assert os.environ["VLLM_USE_V1"] == "0" - @pytest.mark.qualitative def test_system_prompt(session): result = session.chat( @@ -66,7 +65,6 @@ def test_multiturn(session): beta = session.instruct( "Take the result of the previous sum and find the corresponding letter in the greek alphabet." ) - assert "β" in str(beta).lower() words = session.instruct("Now list five English words that start with that letter.") print(words) @@ -105,8 +103,9 @@ class Email(pydantic.BaseModel): def test_generate_from_raw(session): prompts = ["what is 1+1?", "what is 2+2?", "what is 3+3?", "what is 4+4?"] - results = session.backend._generate_from_raw( - actions=[CBlock(value=prompt) for prompt in prompts], generate_logs=None + results = session.backend.generate_from_raw( + actions=[CBlock(value=prompt) for prompt in prompts], + ctx=session.ctx ) assert len(results) == len(prompts) @@ -120,10 +119,10 @@ class Answer(pydantic.BaseModel): name: str value: int - results = session.backend._generate_from_raw( + results = session.backend.generate_from_raw( actions=[CBlock(value=prompt) for prompt in prompts], + ctx=session.ctx, format=Answer, - generate_logs=None, ) assert len(results) == len(prompts) diff --git a/test/backends/test_vllm_tools.py b/test/backends/test_vllm_tools.py index 35be9c68..b384be85 100644 --- a/test/backends/test_vllm_tools.py +++ b/test/backends/test_vllm_tools.py @@ -19,6 +19,9 @@ @pytest.fixture(scope="module") def backend(): """Shared vllm backend for all tests in this module.""" + if os.environ.get("VLLM_USE_V1", -1) != "0": + pytest.skip("skipping vllm tests; tests require `export VLLM_USE_V1=0`") + backend = LocalVLLMBackend( model_id=model_ids.MISTRALAI_MISTRAL_0_3_7B, model_options = { diff --git a/test/stdlib_basics/test_sampling_ctx.py b/test/stdlib_basics/test_sampling_ctx.py index 0e78e7ea..462adbfa 100644 --- a/test/stdlib_basics/test_sampling_ctx.py +++ b/test/stdlib_basics/test_sampling_ctx.py @@ -1,7 +1,8 @@ import pytest from mellea import start_session from mellea.backends import ModelOption -from mellea.stdlib.base import ChatContext +from mellea.stdlib.base import ChatContext, ModelOutputThunk, Context +from mellea.stdlib.requirement import Requirement from mellea.stdlib.sampling import ( MultiTurnStrategy, RejectionSamplingStrategy, @@ -47,6 +48,14 @@ def test_ctx_for_rejection_sampling(self): ) assert len(self.m.last_prompt()) == 1, "Last prompt should only have only one instruction inside - independent of sampling iterations." + _, val_res = res.result_validations[0] + # Ensure the ValidationResult has its thunk and context set. Ensure the context has + # the correct actions / results in it. + assert isinstance(val_res.context, Context) + assert isinstance(val_res.thunk, ModelOutputThunk) + assert isinstance(val_res.context.previous_node.node_data, Requirement) + assert val_res.context.node_data is val_res.thunk + def test_ctx_for_multiturn(self): self.m.reset() res = self.m.instruct( diff --git a/uv.lock b/uv.lock index db659e41..33c5e64a 100644 --- a/uv.lock +++ b/uv.lock @@ -1,5 +1,5 @@ version = 1 -revision = 3 +revision = 2 requires-python = ">=3.10" resolution-markers = [ "python_full_version >= '3.14' and sys_platform == 'darwin'", @@ -1182,15 +1182,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/36/f4/c6e662dade71f56cd2f3735141b265c3c79293c109549c1e6933b0651ffc/exceptiongroup-1.3.0-py3-none-any.whl", hash = "sha256:4d111e6e0c13d0644cad6ddaa7ed0261a0b36971f6d23e7ec9b4b9097da78a10", size = 16674, upload-time = "2025-05-10T17:42:49.33Z" }, ] -[[package]] -name = "execnet" -version = "2.1.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/bb/ff/b4c0dc78fbe20c3e59c0c7334de0c27eb4001a2b2017999af398bf730817/execnet-2.1.1.tar.gz", hash = "sha256:5189b52c6121c24feae288166ab41b32549c7e2348652736540b9e6e7d4e72e3", size = 166524, upload-time = "2024-04-08T09:04:19.245Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/43/09/2aea36ff60d16dd8879bdb2f5b3ee0ba8d08cbbdcdfe870e695ce3784385/execnet-2.1.1-py3-none-any.whl", hash = "sha256:26dee51f1b80cebd6d0ca8e74dd8745419761d3bef34163928cbebbdc4749fdc", size = 40612, upload-time = "2024-04-08T09:04:17.414Z" }, -] - [[package]] name = "executing" version = "2.2.1" @@ -2897,7 +2888,6 @@ dev = [ { name = "pylint" }, { name = "pytest" }, { name = "pytest-asyncio" }, - { name = "pytest-xdist" }, { name = "python-semantic-release" }, { name = "ruff" }, ] @@ -2949,7 +2939,7 @@ requires-dist = [ { name = "types-requests" }, { name = "types-tqdm" }, { name = "uvicorn" }, - { name = "vllm", marker = "extra == 'vllm'" }, + { name = "vllm", marker = "extra == 'vllm'", specifier = ">=0.9.1" }, ] provides-extras = ["hf", "vllm", "litellm", "watsonx", "docling", "all"] @@ -2962,7 +2952,6 @@ dev = [ { name = "pylint", specifier = ">=3.3.4" }, { name = "pytest" }, { name = "pytest-asyncio" }, - { name = "pytest-xdist", specifier = ">=3.8.0" }, { name = "python-semantic-release", specifier = "~=7.32" }, { name = "ruff", specifier = ">=0.11.6" }, ] @@ -4823,19 +4812,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/04/93/2fa34714b7a4ae72f2f8dad66ba17dd9a2c793220719e736dda28b7aec27/pytest_asyncio-1.2.0-py3-none-any.whl", hash = "sha256:8e17ae5e46d8e7efe51ab6494dd2010f4ca8dae51652aa3c8d55acf50bfb2e99", size = 15095, upload-time = "2025-09-12T07:33:52.639Z" }, ] -[[package]] -name = "pytest-xdist" -version = "3.8.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "execnet" }, - { name = "pytest" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/78/b4/439b179d1ff526791eb921115fca8e44e596a13efeda518b9d845a619450/pytest_xdist-3.8.0.tar.gz", hash = "sha256:7e578125ec9bc6050861aa93f2d59f1d8d085595d6551c2c90b6f4fad8d3a9f1", size = 88069, upload-time = "2025-07-01T13:30:59.346Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/ca/31/d4e37e9e550c2b92a9cbc2e4d0b7420a27224968580b5a447f420847c975/pytest_xdist-3.8.0-py3-none-any.whl", hash = "sha256:202ca578cfeb7370784a8c33d6d05bc6e13b4f25b5053c30a152269fd10f0b88", size = 46396, upload-time = "2025-07-01T13:30:56.632Z" }, -] - [[package]] name = "python-bidi" version = "0.6.6"